Browse code

Merge pull request #34252 from Microsoft/akagup/lcow-remotefs-sandbox

LCOW: Support for docker cp, ADD/COPY on build

Victor Vieux authored on 2017/09/16 08:49:48
Showing 143 changed files
... ...
@@ -2,7 +2,6 @@ package httputils
2 2
 
3 3
 import (
4 4
 	"net/http"
5
-	"path/filepath"
6 5
 	"strconv"
7 6
 	"strings"
8 7
 )
... ...
@@ -69,8 +68,7 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions,
69 69
 	if name == "" {
70 70
 		return ArchiveOptions{}, badParameterError{"name"}
71 71
 	}
72
-
73
-	path := filepath.FromSlash(r.Form.Get("path"))
72
+	path := r.Form.Get("path")
74 73
 	if path == "" {
75 74
 		return ArchiveOptions{}, badParameterError{"path"}
76 75
 	}
... ...
@@ -12,6 +12,7 @@ import (
12 12
 	"github.com/docker/docker/api/types/container"
13 13
 	containerpkg "github.com/docker/docker/container"
14 14
 	"github.com/docker/docker/layer"
15
+	"github.com/docker/docker/pkg/containerfs"
15 16
 	"golang.org/x/net/context"
16 17
 )
17 18
 
... ...
@@ -24,7 +25,7 @@ const (
24 24
 // instructions in the builder.
25 25
 type Source interface {
26 26
 	// Root returns root path for accessing source
27
-	Root() string
27
+	Root() containerfs.ContainerFS
28 28
 	// Close allows to signal that the filesystem tree won't be used anymore.
29 29
 	// For Context implementations using a temporary directory, it is recommended to
30 30
 	// delete the temporary directory in Close().
... ...
@@ -99,7 +100,7 @@ type Image interface {
99 99
 // ReleaseableLayer is an image layer that can be mounted and released
100 100
 type ReleaseableLayer interface {
101 101
 	Release() error
102
-	Mount() (string, error)
102
+	Mount() (containerfs.ContainerFS, error)
103 103
 	Commit(platform string) (ReleaseableLayer, error)
104 104
 	DiffID() layer.DiffID
105 105
 }
... ...
@@ -17,8 +17,6 @@ import (
17 17
 	"github.com/docker/docker/builder/dockerfile/parser"
18 18
 	"github.com/docker/docker/builder/fscache"
19 19
 	"github.com/docker/docker/builder/remotecontext"
20
-	"github.com/docker/docker/pkg/archive"
21
-	"github.com/docker/docker/pkg/chrootarchive"
22 20
 	"github.com/docker/docker/pkg/idtools"
23 21
 	"github.com/docker/docker/pkg/streamformatter"
24 22
 	"github.com/docker/docker/pkg/stringid"
... ...
@@ -50,21 +48,21 @@ type SessionGetter interface {
50 50
 
51 51
 // BuildManager is shared across all Builder objects
52 52
 type BuildManager struct {
53
-	archiver  *archive.Archiver
54
-	backend   builder.Backend
55
-	pathCache pathCache // TODO: make this persistent
56
-	sg        SessionGetter
57
-	fsCache   *fscache.FSCache
53
+	idMappings *idtools.IDMappings
54
+	backend    builder.Backend
55
+	pathCache  pathCache // TODO: make this persistent
56
+	sg         SessionGetter
57
+	fsCache    *fscache.FSCache
58 58
 }
59 59
 
60 60
 // NewBuildManager creates a BuildManager
61 61
 func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) {
62 62
 	bm := &BuildManager{
63
-		backend:   b,
64
-		pathCache: &syncmap.Map{},
65
-		sg:        sg,
66
-		archiver:  chrootarchive.NewArchiver(idMappings),
67
-		fsCache:   fsCache,
63
+		backend:    b,
64
+		pathCache:  &syncmap.Map{},
65
+		sg:         sg,
66
+		idMappings: idMappings,
67
+		fsCache:    fsCache,
68 68
 	}
69 69
 	if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil {
70 70
 		return nil, err
... ...
@@ -114,7 +112,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
114 114
 		ProgressWriter: config.ProgressWriter,
115 115
 		Backend:        bm.backend,
116 116
 		PathCache:      bm.pathCache,
117
-		Archiver:       bm.archiver,
117
+		IDMappings:     bm.idMappings,
118 118
 		Platform:       dockerfile.Platform,
119 119
 	}
120 120
 
... ...
@@ -160,7 +158,7 @@ type builderOptions struct {
160 160
 	Backend        builder.Backend
161 161
 	ProgressWriter backend.ProgressWriter
162 162
 	PathCache      pathCache
163
-	Archiver       *archive.Archiver
163
+	IDMappings     *idtools.IDMappings
164 164
 	Platform       string
165 165
 }
166 166
 
... ...
@@ -177,7 +175,7 @@ type Builder struct {
177 177
 	docker    builder.Backend
178 178
 	clientCtx context.Context
179 179
 
180
-	archiver         *archive.Archiver
180
+	idMappings       *idtools.IDMappings
181 181
 	buildStages      *buildStages
182 182
 	disableCommit    bool
183 183
 	buildArgs        *buildArgs
... ...
@@ -219,7 +217,7 @@ func newBuilder(clientCtx context.Context, options builderOptions) *Builder {
219 219
 		Aux:              options.ProgressWriter.AuxFormatter,
220 220
 		Output:           options.ProgressWriter.Output,
221 221
 		docker:           options.Backend,
222
-		archiver:         options.Archiver,
222
+		idMappings:       options.IDMappings,
223 223
 		buildArgs:        newBuildArgs(config.BuildArgs),
224 224
 		buildStages:      newBuildStages(),
225 225
 		imageSources:     newImageSources(clientCtx, options),
... ...
@@ -1,6 +1,7 @@
1 1
 package dockerfile
2 2
 
3 3
 import (
4
+	"archive/tar"
4 5
 	"fmt"
5 6
 	"io"
6 7
 	"mime"
... ...
@@ -8,6 +9,7 @@ import (
8 8
 	"net/url"
9 9
 	"os"
10 10
 	"path/filepath"
11
+	"runtime"
11 12
 	"sort"
12 13
 	"strings"
13 14
 	"time"
... ...
@@ -15,11 +17,11 @@ import (
15 15
 	"github.com/docker/docker/builder"
16 16
 	"github.com/docker/docker/builder/remotecontext"
17 17
 	"github.com/docker/docker/pkg/archive"
18
+	"github.com/docker/docker/pkg/containerfs"
18 19
 	"github.com/docker/docker/pkg/idtools"
19 20
 	"github.com/docker/docker/pkg/ioutils"
20 21
 	"github.com/docker/docker/pkg/progress"
21 22
 	"github.com/docker/docker/pkg/streamformatter"
22
-	"github.com/docker/docker/pkg/symlink"
23 23
 	"github.com/docker/docker/pkg/system"
24 24
 	"github.com/docker/docker/pkg/urlutil"
25 25
 	"github.com/pkg/errors"
... ...
@@ -35,14 +37,14 @@ type pathCache interface {
35 35
 // copyInfo is a data object which stores the metadata about each source file in
36 36
 // a copyInstruction
37 37
 type copyInfo struct {
38
-	root         string
38
+	root         containerfs.ContainerFS
39 39
 	path         string
40 40
 	hash         string
41 41
 	noDecompress bool
42 42
 }
43 43
 
44 44
 func (c copyInfo) fullPath() (string, error) {
45
-	return symlink.FollowSymlinkInScope(filepath.Join(c.root, c.path), c.root)
45
+	return c.root.ResolveScopedPath(c.path, true)
46 46
 }
47 47
 
48 48
 func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
... ...
@@ -71,6 +73,7 @@ type copier struct {
71 71
 	pathCache   pathCache
72 72
 	download    sourceDownloader
73 73
 	tmpPaths    []string
74
+	platform    string
74 75
 }
75 76
 
76 77
 func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
... ...
@@ -79,6 +82,7 @@ func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, i
79 79
 		pathCache:   req.builder.pathCache,
80 80
 		download:    download,
81 81
 		imageSource: imageSource,
82
+		platform:    req.builder.platform,
82 83
 	}
83 84
 }
84 85
 
... ...
@@ -86,13 +90,14 @@ func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstr
86 86
 	inst := copyInstruction{cmdName: cmdName}
87 87
 	last := len(args) - 1
88 88
 
89
-	// Work in daemon-specific filepath semantics
90
-	inst.dest = filepath.FromSlash(args[last])
89
+	// Work in platform-specific filepath semantics
90
+	inst.dest = fromSlash(args[last], o.platform)
91
+	separator := string(separator(o.platform))
91 92
 	infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest)
92 93
 	if err != nil {
93 94
 		return inst, errors.Wrapf(err, "%s failed", cmdName)
94 95
 	}
95
-	if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) {
96
+	if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) {
96 97
 		return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
97 98
 	}
98 99
 	inst.infos = infos
... ...
@@ -122,6 +127,7 @@ func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error)
122 122
 	if !urlutil.IsURL(orig) {
123 123
 		return o.calcCopyInfo(orig, true)
124 124
 	}
125
+
125 126
 	remote, path, err := o.download(orig)
126 127
 	if err != nil {
127 128
 		return nil, err
... ...
@@ -134,7 +140,7 @@ func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error)
134 134
 		}
135 135
 		path = unnamedFilename
136 136
 	}
137
-	o.tmpPaths = append(o.tmpPaths, remote.Root())
137
+	o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
138 138
 
139 139
 	hash, err := remote.Hash(path)
140 140
 	ci := newCopyInfoFromSource(remote, path, hash)
... ...
@@ -154,14 +160,6 @@ func (o *copier) Cleanup() {
154 154
 // TODO: allowWildcards can probably be removed by refactoring this function further.
155 155
 func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
156 156
 	imageSource := o.imageSource
157
-	if err := validateCopySourcePath(imageSource, origPath); err != nil {
158
-		return nil, err
159
-	}
160
-
161
-	// Work in daemon-specific OS filepath semantics
162
-	origPath = filepath.FromSlash(origPath)
163
-	origPath = strings.TrimPrefix(origPath, string(os.PathSeparator))
164
-	origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
165 157
 
166 158
 	// TODO: do this when creating copier. Requires validateCopySourcePath
167 159
 	// (and other below) to be aware of the difference sources. Why is it only
... ...
@@ -178,8 +176,20 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo,
178 178
 		return nil, errors.Errorf("missing build context")
179 179
 	}
180 180
 
181
+	root := o.source.Root()
182
+
183
+	if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil {
184
+		return nil, err
185
+	}
186
+
187
+	// Work in source OS specific filepath semantics
188
+	// For LCOW, this is NOT the daemon OS.
189
+	origPath = root.FromSlash(origPath)
190
+	origPath = strings.TrimPrefix(origPath, string(root.Separator()))
191
+	origPath = strings.TrimPrefix(origPath, "."+string(root.Separator()))
192
+
181 193
 	// Deal with wildcards
182
-	if allowWildcards && containsWildcards(origPath) {
194
+	if allowWildcards && containsWildcards(origPath, root.OS()) {
183 195
 		return o.copyWithWildcards(origPath)
184 196
 	}
185 197
 
... ...
@@ -211,6 +221,19 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo,
211 211
 	return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
212 212
 }
213 213
 
214
+func containsWildcards(name, platform string) bool {
215
+	isWindows := platform == "windows"
216
+	for i := 0; i < len(name); i++ {
217
+		ch := name[i]
218
+		if ch == '\\' && !isWindows {
219
+			i++
220
+		} else if ch == '*' || ch == '?' || ch == '[' {
221
+			return true
222
+		}
223
+	}
224
+	return false
225
+}
226
+
214 227
 func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
215 228
 	if im != nil {
216 229
 		o.pathCache.Store(im.ImageID()+path, hash)
... ...
@@ -218,12 +241,13 @@ func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
218 218
 }
219 219
 
220 220
 func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
221
+	root := o.source.Root()
221 222
 	var copyInfos []copyInfo
222
-	if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error {
223
+	if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
223 224
 		if err != nil {
224 225
 			return err
225 226
 		}
226
-		rel, err := remotecontext.Rel(o.source.Root(), path)
227
+		rel, err := remotecontext.Rel(root, path)
227 228
 		if err != nil {
228 229
 			return err
229 230
 		}
... ...
@@ -231,7 +255,7 @@ func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
231 231
 		if rel == "." {
232 232
 			return nil
233 233
 		}
234
-		if match, _ := filepath.Match(origPath, rel); !match {
234
+		if match, _ := root.Match(origPath, rel); !match {
235 235
 			return nil
236 236
 		}
237 237
 
... ...
@@ -273,7 +297,7 @@ func walkSource(source builder.Source, origPath string) ([]string, error) {
273 273
 	}
274 274
 	// Must be a dir
275 275
 	var subfiles []string
276
-	err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error {
276
+	err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
277 277
 		if err != nil {
278 278
 			return err
279 279
 		}
... ...
@@ -398,14 +422,19 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b
398 398
 		return
399 399
 	}
400 400
 
401
-	lc, err := remotecontext.NewLazySource(tmpDir)
401
+	lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
402 402
 	return lc, filename, err
403 403
 }
404 404
 
405 405
 type copyFileOptions struct {
406 406
 	decompress bool
407
-	archiver   *archive.Archiver
408 407
 	chownPair  idtools.IDPair
408
+	archiver   Archiver
409
+}
410
+
411
+type copyEndpoint struct {
412
+	driver containerfs.Driver
413
+	path   string
409 414
 }
410 415
 
411 416
 func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
... ...
@@ -413,6 +442,7 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
413 413
 	if err != nil {
414 414
 		return err
415 415
 	}
416
+
416 417
 	destPath, err := dest.fullPath()
417 418
 	if err != nil {
418 419
 		return err
... ...
@@ -420,59 +450,90 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
420 420
 
421 421
 	archiver := options.archiver
422 422
 
423
-	src, err := os.Stat(srcPath)
423
+	srcEndpoint := &copyEndpoint{driver: source.root, path: srcPath}
424
+	destEndpoint := &copyEndpoint{driver: dest.root, path: destPath}
425
+
426
+	src, err := source.root.Stat(srcPath)
424 427
 	if err != nil {
425 428
 		return errors.Wrapf(err, "source path not found")
426 429
 	}
427 430
 	if src.IsDir() {
428
-		return copyDirectory(archiver, srcPath, destPath, options.chownPair)
431
+		return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair)
429 432
 	}
430
-	if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress {
433
+	if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
431 434
 		return archiver.UntarPath(srcPath, destPath)
432 435
 	}
433 436
 
434
-	destExistsAsDir, err := isExistingDirectory(destPath)
437
+	destExistsAsDir, err := isExistingDirectory(destEndpoint)
435 438
 	if err != nil {
436 439
 		return err
437 440
 	}
438 441
 	// dest.path must be used because destPath has already been cleaned of any
439 442
 	// trailing slash
440
-	if endsInSlash(dest.path) || destExistsAsDir {
443
+	if endsInSlash(dest.root, dest.path) || destExistsAsDir {
441 444
 		// source.path must be used to get the correct filename when the source
442 445
 		// is a symlink
443
-		destPath = filepath.Join(destPath, filepath.Base(source.path))
446
+		destPath = dest.root.Join(destPath, source.root.Base(source.path))
447
+		destEndpoint = &copyEndpoint{driver: dest.root, path: destPath}
444 448
 	}
445
-	return copyFile(archiver, srcPath, destPath, options.chownPair)
449
+	return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair)
446 450
 }
447 451
 
448
-func copyDirectory(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error {
452
+func isArchivePath(driver containerfs.ContainerFS, path string) bool {
453
+	file, err := driver.Open(path)
454
+	if err != nil {
455
+		return false
456
+	}
457
+	defer file.Close()
458
+	rdr, err := archive.DecompressStream(file)
459
+	if err != nil {
460
+		return false
461
+	}
462
+	r := tar.NewReader(rdr)
463
+	_, err = r.Next()
464
+	return err == nil
465
+}
466
+
467
+func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
449 468
 	destExists, err := isExistingDirectory(dest)
450 469
 	if err != nil {
451 470
 		return errors.Wrapf(err, "failed to query destination path")
452 471
 	}
453
-	if err := archiver.CopyWithTar(source, dest); err != nil {
472
+
473
+	if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
454 474
 		return errors.Wrapf(err, "failed to copy directory")
455 475
 	}
456
-	return fixPermissions(source, dest, chownPair, !destExists)
476
+	// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
477
+	return fixPermissions(source.path, dest.path, chownPair, !destExists)
457 478
 }
458 479
 
459
-func copyFile(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error {
460
-	if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, chownPair); err != nil {
461
-		return errors.Wrapf(err, "failed to create new directory")
480
+func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
481
+	if runtime.GOOS == "windows" && dest.driver.OS() == "linux" {
482
+		// LCOW
483
+		if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil {
484
+			return errors.Wrapf(err, "failed to create new directory")
485
+		}
486
+	} else {
487
+		if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil {
488
+			// Normal containers
489
+			return errors.Wrapf(err, "failed to create new directory")
490
+		}
462 491
 	}
463
-	if err := archiver.CopyFileWithTar(source, dest); err != nil {
492
+
493
+	if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
464 494
 		return errors.Wrapf(err, "failed to copy file")
465 495
 	}
466
-	return fixPermissions(source, dest, chownPair, false)
496
+	// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
497
+	return fixPermissions(source.path, dest.path, chownPair, false)
467 498
 }
468 499
 
469
-func endsInSlash(path string) bool {
470
-	return strings.HasSuffix(path, string(os.PathSeparator))
500
+func endsInSlash(driver containerfs.Driver, path string) bool {
501
+	return strings.HasSuffix(path, string(driver.Separator()))
471 502
 }
472 503
 
473 504
 // isExistingDirectory returns true if the path exists and is a directory
474
-func isExistingDirectory(path string) (bool, error) {
475
-	destStat, err := os.Stat(path)
505
+func isExistingDirectory(point *copyEndpoint) (bool, error) {
506
+	destStat, err := point.driver.Stat(point.path)
476 507
 	switch {
477 508
 	case os.IsNotExist(err):
478 509
 		return false, nil
... ...
@@ -4,6 +4,7 @@ import (
4 4
 	"net/http"
5 5
 	"testing"
6 6
 
7
+	"github.com/docker/docker/pkg/containerfs"
7 8
 	"github.com/gotestyourself/gotestyourself/fs"
8 9
 	"github.com/stretchr/testify/assert"
9 10
 )
... ...
@@ -37,7 +38,7 @@ func TestIsExistingDirectory(t *testing.T) {
37 37
 	}
38 38
 
39 39
 	for _, testcase := range testcases {
40
-		result, err := isExistingDirectory(testcase.path)
40
+		result, err := isExistingDirectory(&copyEndpoint{driver: containerfs.NewLocalDriver(), path: testcase.path})
41 41
 		if !assert.NoError(t, err) {
42 42
 			continue
43 43
 		}
... ...
@@ -6,6 +6,7 @@ import (
6 6
 	"os"
7 7
 	"path/filepath"
8 8
 
9
+	"github.com/docker/docker/pkg/containerfs"
9 10
 	"github.com/docker/docker/pkg/idtools"
10 11
 )
11 12
 
... ...
@@ -15,7 +16,8 @@ func fixPermissions(source, destination string, rootIDs idtools.IDPair, override
15 15
 		err           error
16 16
 	)
17 17
 	if !overrideSkip {
18
-		skipChownRoot, err = isExistingDirectory(destination)
18
+		destEndpoint := &copyEndpoint{driver: containerfs.NewLocalDriver(), path: destination}
19
+		skipChownRoot, err = isExistingDirectory(destEndpoint)
19 20
 		if err != nil {
20 21
 			return err
21 22
 		}
... ...
@@ -40,3 +42,7 @@ func fixPermissions(source, destination string, rootIDs idtools.IDPair, override
40 40
 		return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID)
41 41
 	})
42 42
 }
43
+
44
+func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
45
+	return nil
46
+}
... ...
@@ -1,8 +1,38 @@
1 1
 package dockerfile
2 2
 
3
-import "github.com/docker/docker/pkg/idtools"
3
+import (
4
+	"errors"
5
+	"path/filepath"
6
+	"strings"
7
+
8
+	"github.com/docker/docker/pkg/idtools"
9
+)
4 10
 
5 11
 func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error {
6 12
 	// chown is not supported on Windows
7 13
 	return nil
8 14
 }
15
+
16
+func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
17
+	// validate windows paths from other images + LCOW
18
+	if imageSource == nil || platform != "windows" {
19
+		return nil
20
+	}
21
+
22
+	origPath = filepath.FromSlash(origPath)
23
+	p := strings.ToLower(filepath.Clean(origPath))
24
+	if !filepath.IsAbs(p) {
25
+		if filepath.VolumeName(p) != "" {
26
+			if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
27
+				p = p[:len(p)-1]
28
+			}
29
+			p += "\\"
30
+		} else {
31
+			p = filepath.Join("c:\\", p)
32
+		}
33
+	}
34
+	if _, blacklisted := pathBlacklist[p]; blacklisted {
35
+		return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
36
+	}
37
+	return nil
38
+}
... ...
@@ -7,6 +7,9 @@ import (
7 7
 	"crypto/sha256"
8 8
 	"encoding/hex"
9 9
 	"fmt"
10
+	"io"
11
+	"os"
12
+	"path"
10 13
 	"path/filepath"
11 14
 	"strconv"
12 15
 	"strings"
... ...
@@ -15,13 +18,69 @@ import (
15 15
 	"github.com/docker/docker/api/types/backend"
16 16
 	"github.com/docker/docker/api/types/container"
17 17
 	"github.com/docker/docker/image"
18
+	"github.com/docker/docker/pkg/archive"
19
+	"github.com/docker/docker/pkg/chrootarchive"
20
+	"github.com/docker/docker/pkg/containerfs"
18 21
 	"github.com/docker/docker/pkg/idtools"
19 22
 	"github.com/docker/docker/pkg/stringid"
20 23
 	"github.com/docker/docker/pkg/symlink"
24
+	"github.com/docker/docker/pkg/system"
21 25
 	lcUser "github.com/opencontainers/runc/libcontainer/user"
22 26
 	"github.com/pkg/errors"
23 27
 )
24 28
 
29
+// For Windows only
30
+var pathBlacklist = map[string]bool{
31
+	"c:\\":        true,
32
+	"c:\\windows": true,
33
+}
34
+
35
+// Archiver defines an interface for copying files from one destination to
36
+// another using Tar/Untar.
37
+type Archiver interface {
38
+	TarUntar(src, dst string) error
39
+	UntarPath(src, dst string) error
40
+	CopyWithTar(src, dst string) error
41
+	CopyFileWithTar(src, dst string) error
42
+	IDMappings() *idtools.IDMappings
43
+}
44
+
45
+// The builder will use the following interfaces if the container fs implements
46
+// these for optimized copies to and from the container.
47
+type extractor interface {
48
+	ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
49
+}
50
+
51
+type archiver interface {
52
+	ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
53
+}
54
+
55
+// helper functions to get tar/untar func
56
+func untarFunc(i interface{}) containerfs.UntarFunc {
57
+	if ea, ok := i.(extractor); ok {
58
+		return ea.ExtractArchive
59
+	}
60
+	return chrootarchive.Untar
61
+}
62
+
63
+func tarFunc(i interface{}) containerfs.TarFunc {
64
+	if ap, ok := i.(archiver); ok {
65
+		return ap.ArchivePath
66
+	}
67
+	return archive.TarWithOptions
68
+}
69
+
70
+func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver {
71
+	t, u := tarFunc(src), untarFunc(dst)
72
+	return &containerfs.Archiver{
73
+		SrcDriver:     src,
74
+		DstDriver:     dst,
75
+		Tar:           t,
76
+		Untar:         u,
77
+		IDMappingsVar: b.idMappings,
78
+	}
79
+}
80
+
25 81
 func (b *Builder) commit(dispatchState *dispatchState, comment string) error {
26 82
 	if b.disableCommit {
27 83
 		return nil
... ...
@@ -131,28 +190,29 @@ func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error
131 131
 	if err != nil {
132 132
 		return errors.Wrapf(err, "failed to get destination image %q", state.imageID)
133 133
 	}
134
-	destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount)
134
+
135
+	destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount, b.platform)
135 136
 	if err != nil {
136 137
 		return err
137 138
 	}
138 139
 
139
-	chownPair := b.archiver.IDMappings.RootPair()
140
+	chownPair := b.idMappings.RootPair()
140 141
 	// if a chown was requested, perform the steps to get the uid, gid
141 142
 	// translated (if necessary because of user namespaces), and replace
142 143
 	// the root pair with the chown pair for copy operations
143 144
 	if inst.chownStr != "" {
144
-		chownPair, err = parseChownFlag(inst.chownStr, destInfo.root, b.archiver.IDMappings)
145
+		chownPair, err = parseChownFlag(inst.chownStr, destInfo.root.Path(), b.idMappings)
145 146
 		if err != nil {
146 147
 			return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping")
147 148
 		}
148 149
 	}
149 150
 
150
-	opts := copyFileOptions{
151
-		decompress: inst.allowLocalDecompression,
152
-		archiver:   b.archiver,
153
-		chownPair:  chownPair,
154
-	}
155 151
 	for _, info := range inst.infos {
152
+		opts := copyFileOptions{
153
+			decompress: inst.allowLocalDecompression,
154
+			archiver:   b.getArchiver(info.root, destInfo.root),
155
+			chownPair:  chownPair,
156
+		}
156 157
 		if err := performCopyForInfo(destInfo, info, opts); err != nil {
157 158
 			return errors.Wrapf(err, "failed to copy files")
158 159
 		}
... ...
@@ -236,10 +296,10 @@ func lookupGroup(groupStr, filepath string) (int, error) {
236 236
 	return groups[0].Gid, nil
237 237
 }
238 238
 
239
-func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount) (copyInfo, error) {
239
+func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount, platform string) (copyInfo, error) {
240 240
 	// Twiddle the destination when it's a relative path - meaning, make it
241 241
 	// relative to the WORKINGDIR
242
-	dest, err := normalizeDest(workingDir, inst.dest)
242
+	dest, err := normalizeDest(workingDir, inst.dest, platform)
243 243
 	if err != nil {
244 244
 		return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName)
245 245
 	}
... ...
@@ -252,6 +312,63 @@ func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMo
252 252
 	return newCopyInfoFromSource(destMount, dest, ""), nil
253 253
 }
254 254
 
255
+// normalizeDest normalises the destination of a COPY/ADD command in a
256
+// platform semantically consistent way.
257
+func normalizeDest(workingDir, requested string, platform string) (string, error) {
258
+	dest := fromSlash(requested, platform)
259
+	endsInSlash := strings.HasSuffix(dest, string(separator(platform)))
260
+
261
+	if platform != "windows" {
262
+		if !path.IsAbs(requested) {
263
+			dest = path.Join("/", filepath.ToSlash(workingDir), dest)
264
+			// Make sure we preserve any trailing slash
265
+			if endsInSlash {
266
+				dest += "/"
267
+			}
268
+		}
269
+		return dest, nil
270
+	}
271
+
272
+	// We are guaranteed that the working directory is already consistent,
273
+	// However, Windows also has, for now, the limitation that ADD/COPY can
274
+	// only be done to the system drive, not any drives that might be present
275
+	// as a result of a bind mount.
276
+	//
277
+	// So... if the path requested is Linux-style absolute (/foo or \\foo),
278
+	// we assume it is the system drive. If it is a Windows-style absolute
279
+	// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
280
+	// strip any configured working directories drive letter so that it
281
+	// can be subsequently legitimately converted to a Windows volume-style
282
+	// pathname.
283
+
284
+	// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
285
+	// we only want to validate where the DriveColon part has been supplied.
286
+	if filepath.IsAbs(dest) {
287
+		if strings.ToUpper(string(dest[0])) != "C" {
288
+			return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)")
289
+		}
290
+		dest = dest[2:] // Strip the drive letter
291
+	}
292
+
293
+	// Cannot handle relative where WorkingDir is not the system drive.
294
+	if len(workingDir) > 0 {
295
+		if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
296
+			return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
297
+		}
298
+		if !system.IsAbs(dest) {
299
+			if string(workingDir[0]) != "C" {
300
+				return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive")
301
+			}
302
+			dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
303
+			// Make sure we preserve any trailing slash
304
+			if endsInSlash {
305
+				dest += string(os.PathSeparator)
306
+			}
307
+		}
308
+	}
309
+	return dest, nil
310
+}
311
+
255 312
 // For backwards compat, if there's just one info then use it as the
256 313
 // cache look-up string, otherwise hash 'em all into one
257 314
 func getSourceHashFromInfos(infos []copyInfo) string {
... ...
@@ -397,3 +514,19 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf
397 397
 		ExtraHosts: options.ExtraHosts,
398 398
 	}
399 399
 }
400
+
401
+// fromSlash works like filepath.FromSlash but with a given OS platform field
402
+func fromSlash(path, platform string) string {
403
+	if platform == "windows" {
404
+		return strings.Replace(path, "/", "\\", -1)
405
+	}
406
+	return path
407
+}
408
+
409
+// separator returns a OS path separator for the given OS platform
410
+func separator(platform string) byte {
411
+	if platform == "windows" {
412
+		return '\\'
413
+	}
414
+	return '/'
415
+}
400 416
deleted file mode 100644
... ...
@@ -1,42 +0,0 @@
1
-// +build !windows
2
-
3
-package dockerfile
4
-
5
-import (
6
-	"os"
7
-	"path/filepath"
8
-	"strings"
9
-
10
-	"github.com/docker/docker/pkg/system"
11
-)
12
-
13
-// normalizeDest normalizes the destination of a COPY/ADD command in a
14
-// platform semantically consistent way.
15
-func normalizeDest(workingDir, requested string) (string, error) {
16
-	dest := filepath.FromSlash(requested)
17
-	endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator))
18
-	if !system.IsAbs(requested) {
19
-		dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest)
20
-		// Make sure we preserve any trailing slash
21
-		if endsInSlash {
22
-			dest += string(os.PathSeparator)
23
-		}
24
-	}
25
-	return dest, nil
26
-}
27
-
28
-func containsWildcards(name string) bool {
29
-	for i := 0; i < len(name); i++ {
30
-		ch := name[i]
31
-		if ch == '\\' {
32
-			i++
33
-		} else if ch == '*' || ch == '?' || ch == '[' {
34
-			return true
35
-		}
36
-	}
37
-	return false
38
-}
39
-
40
-func validateCopySourcePath(imageSource *imageMount, origPath string) error {
41
-	return nil
42
-}
43 1
deleted file mode 100644
... ...
@@ -1,95 +0,0 @@
1
-package dockerfile
2
-
3
-import (
4
-	"fmt"
5
-	"os"
6
-	"path/filepath"
7
-	"strings"
8
-
9
-	"github.com/docker/docker/pkg/system"
10
-	"github.com/pkg/errors"
11
-)
12
-
13
-// normalizeDest normalizes the destination of a COPY/ADD command in a
14
-// platform semantically consistent way.
15
-func normalizeDest(workingDir, requested string) (string, error) {
16
-	dest := filepath.FromSlash(requested)
17
-	endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator))
18
-
19
-	// We are guaranteed that the working directory is already consistent,
20
-	// However, Windows also has, for now, the limitation that ADD/COPY can
21
-	// only be done to the system drive, not any drives that might be present
22
-	// as a result of a bind mount.
23
-	//
24
-	// So... if the path requested is Linux-style absolute (/foo or \\foo),
25
-	// we assume it is the system drive. If it is a Windows-style absolute
26
-	// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
27
-	// strip any configured working directories drive letter so that it
28
-	// can be subsequently legitimately converted to a Windows volume-style
29
-	// pathname.
30
-
31
-	// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
32
-	// we only want to validate where the DriveColon part has been supplied.
33
-	if filepath.IsAbs(dest) {
34
-		if strings.ToUpper(string(dest[0])) != "C" {
35
-			return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)")
36
-		}
37
-		dest = dest[2:] // Strip the drive letter
38
-	}
39
-
40
-	// Cannot handle relative where WorkingDir is not the system drive.
41
-	if len(workingDir) > 0 {
42
-		if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
43
-			return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
44
-		}
45
-		if !system.IsAbs(dest) {
46
-			if string(workingDir[0]) != "C" {
47
-				return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive")
48
-			}
49
-			dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
50
-			// Make sure we preserve any trailing slash
51
-			if endsInSlash {
52
-				dest += string(os.PathSeparator)
53
-			}
54
-		}
55
-	}
56
-	return dest, nil
57
-}
58
-
59
-func containsWildcards(name string) bool {
60
-	for i := 0; i < len(name); i++ {
61
-		ch := name[i]
62
-		if ch == '*' || ch == '?' || ch == '[' {
63
-			return true
64
-		}
65
-	}
66
-	return false
67
-}
68
-
69
-var pathBlacklist = map[string]bool{
70
-	"c:\\":        true,
71
-	"c:\\windows": true,
72
-}
73
-
74
-func validateCopySourcePath(imageSource *imageMount, origPath string) error {
75
-	// validate windows paths from other images
76
-	if imageSource == nil {
77
-		return nil
78
-	}
79
-	origPath = filepath.FromSlash(origPath)
80
-	p := strings.ToLower(filepath.Clean(origPath))
81
-	if !filepath.IsAbs(p) {
82
-		if filepath.VolumeName(p) != "" {
83
-			if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
84
-				p = p[:len(p)-1]
85
-			}
86
-			p += "\\"
87
-		} else {
88
-			p = filepath.Join("c:\\", p)
89
-		}
90
-	}
91
-	if _, blacklisted := pathBlacklist[p]; blacklisted {
92
-		return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
93
-	}
94
-	return nil
95
-}
... ...
@@ -40,7 +40,7 @@ func TestNormalizeDest(t *testing.T) {
40 40
 	}
41 41
 	for _, testcase := range tests {
42 42
 		msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested)
43
-		actual, err := normalizeDest(testcase.current, testcase.requested)
43
+		actual, err := normalizeDest(testcase.current, testcase.requested, "windows")
44 44
 		if testcase.etext == "" {
45 45
 			if !assert.NoError(t, err, msg) {
46 46
 				continue
... ...
@@ -10,6 +10,7 @@ import (
10 10
 	"github.com/docker/docker/builder"
11 11
 	containerpkg "github.com/docker/docker/container"
12 12
 	"github.com/docker/docker/layer"
13
+	"github.com/docker/docker/pkg/containerfs"
13 14
 	"golang.org/x/net/context"
14 15
 )
15 16
 
... ...
@@ -117,8 +118,8 @@ func (l *mockLayer) Release() error {
117 117
 	return nil
118 118
 }
119 119
 
120
-func (l *mockLayer) Mount() (string, error) {
121
-	return "mountPath", nil
120
+func (l *mockLayer) Mount() (containerfs.ContainerFS, error) {
121
+	return containerfs.NewLocalContainerFS("mountPath"), nil
122 122
 }
123 123
 
124 124
 func (l *mockLayer) Commit(string) (builder.ReleaseableLayer, error) {
... ...
@@ -36,25 +36,25 @@ func TestFSCache(t *testing.T) {
36 36
 	src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"})
37 37
 	assert.Nil(t, err)
38 38
 
39
-	dt, err := ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
39
+	dt, err := ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
40 40
 	assert.Nil(t, err)
41 41
 	assert.Equal(t, string(dt), "data")
42 42
 
43 43
 	// same id doesn't recalculate anything
44 44
 	src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"})
45 45
 	assert.Nil(t, err)
46
-	assert.Equal(t, src1.Root(), src2.Root())
46
+	assert.Equal(t, src1.Root().Path(), src2.Root().Path())
47 47
 
48
-	dt, err = ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
48
+	dt, err = ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
49 49
 	assert.Nil(t, err)
50 50
 	assert.Equal(t, string(dt), "data")
51 51
 	assert.Nil(t, src2.Close())
52 52
 
53 53
 	src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"})
54 54
 	assert.Nil(t, err)
55
-	assert.NotEqual(t, src1.Root(), src3.Root())
55
+	assert.NotEqual(t, src1.Root().Path(), src3.Root().Path())
56 56
 
57
-	dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo2"))
57
+	dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo2"))
58 58
 	assert.Nil(t, err)
59 59
 	assert.Equal(t, string(dt), "data2")
60 60
 
... ...
@@ -71,12 +71,12 @@ func TestFSCache(t *testing.T) {
71 71
 	// new upload with the same shared key shoutl overwrite
72 72
 	src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"})
73 73
 	assert.Nil(t, err)
74
-	assert.NotEqual(t, src1.Root(), src3.Root())
74
+	assert.NotEqual(t, src1.Root().Path(), src3.Root().Path())
75 75
 
76
-	dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo3"))
76
+	dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo3"))
77 77
 	assert.Nil(t, err)
78 78
 	assert.Equal(t, string(dt), "data3")
79
-	assert.Equal(t, src4.Root(), src3.Root())
79
+	assert.Equal(t, src4.Root().Path(), src3.Root().Path())
80 80
 	assert.Nil(t, src4.Close())
81 81
 
82 82
 	s, err = fscache.DiskUsage()
... ...
@@ -8,19 +8,19 @@ import (
8 8
 	"github.com/docker/docker/builder"
9 9
 	"github.com/docker/docker/pkg/archive"
10 10
 	"github.com/docker/docker/pkg/chrootarchive"
11
+	"github.com/docker/docker/pkg/containerfs"
11 12
 	"github.com/docker/docker/pkg/ioutils"
12
-	"github.com/docker/docker/pkg/symlink"
13 13
 	"github.com/docker/docker/pkg/tarsum"
14 14
 	"github.com/pkg/errors"
15 15
 )
16 16
 
17 17
 type archiveContext struct {
18
-	root string
18
+	root containerfs.ContainerFS
19 19
 	sums tarsum.FileInfoSums
20 20
 }
21 21
 
22 22
 func (c *archiveContext) Close() error {
23
-	return os.RemoveAll(c.root)
23
+	return c.root.RemoveAll(c.root.Path())
24 24
 }
25 25
 
26 26
 func convertPathError(err error, cleanpath string) error {
... ...
@@ -52,7 +52,8 @@ func FromArchive(tarStream io.Reader) (builder.Source, error) {
52 52
 		return nil, err
53 53
 	}
54 54
 
55
-	tsc := &archiveContext{root: root}
55
+	// Assume local file system. Since it's coming from a tar file.
56
+	tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)}
56 57
 
57 58
 	// Make sure we clean-up upon error.  In the happy case the caller
58 59
 	// is expected to manage the clean-up
... ...
@@ -82,7 +83,7 @@ func FromArchive(tarStream io.Reader) (builder.Source, error) {
82 82
 	return tsc, nil
83 83
 }
84 84
 
85
-func (c *archiveContext) Root() string {
85
+func (c *archiveContext) Root() containerfs.ContainerFS {
86 86
 	return c.root
87 87
 }
88 88
 
... ...
@@ -91,7 +92,7 @@ func (c *archiveContext) Remove(path string) error {
91 91
 	if err != nil {
92 92
 		return err
93 93
 	}
94
-	return os.RemoveAll(fullpath)
94
+	return c.root.RemoveAll(fullpath)
95 95
 }
96 96
 
97 97
 func (c *archiveContext) Hash(path string) (string, error) {
... ...
@@ -100,7 +101,7 @@ func (c *archiveContext) Hash(path string) (string, error) {
100 100
 		return "", err
101 101
 	}
102 102
 
103
-	rel, err := filepath.Rel(c.root, fullpath)
103
+	rel, err := c.root.Rel(c.root.Path(), fullpath)
104 104
 	if err != nil {
105 105
 		return "", convertPathError(err, cleanpath)
106 106
 	}
... ...
@@ -115,13 +116,13 @@ func (c *archiveContext) Hash(path string) (string, error) {
115 115
 	return path, nil // backwards compat TODO: see if really needed
116 116
 }
117 117
 
118
-func normalize(path, root string) (cleanPath, fullPath string, err error) {
119
-	cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:]
120
-	fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root)
118
+func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) {
119
+	cleanPath = root.Clean(string(root.Separator()) + path)[1:]
120
+	fullPath, err = root.ResolveScopedPath(path, true)
121 121
 	if err != nil {
122 122
 		return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath)
123 123
 	}
124
-	if _, err := os.Lstat(fullPath); err != nil {
124
+	if _, err := root.Lstat(fullPath); err != nil {
125 125
 		return "", "", errors.WithStack(convertPathError(err, path))
126 126
 	}
127 127
 	return
... ...
@@ -5,15 +5,14 @@ import (
5 5
 	"fmt"
6 6
 	"io"
7 7
 	"os"
8
-	"path/filepath"
9 8
 	"strings"
10 9
 
10
+	"github.com/containerd/continuity/driver"
11 11
 	"github.com/docker/docker/api/types/backend"
12 12
 	"github.com/docker/docker/builder"
13 13
 	"github.com/docker/docker/builder/dockerfile/parser"
14 14
 	"github.com/docker/docker/builder/dockerignore"
15 15
 	"github.com/docker/docker/pkg/fileutils"
16
-	"github.com/docker/docker/pkg/symlink"
17 16
 	"github.com/docker/docker/pkg/urlutil"
18 17
 	"github.com/pkg/errors"
19 18
 	"github.com/sirupsen/logrus"
... ...
@@ -157,12 +156,12 @@ func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {
157 157
 	return parser.Parse(br)
158 158
 }
159 159
 
160
-func openAt(remote builder.Source, path string) (*os.File, error) {
160
+func openAt(remote builder.Source, path string) (driver.File, error) {
161 161
 	fullPath, err := FullPath(remote, path)
162 162
 	if err != nil {
163 163
 		return nil, err
164 164
 	}
165
-	return os.Open(fullPath)
165
+	return remote.Root().Open(fullPath)
166 166
 }
167 167
 
168 168
 // StatAt is a helper for calling Stat on a path from a source
... ...
@@ -171,12 +170,12 @@ func StatAt(remote builder.Source, path string) (os.FileInfo, error) {
171 171
 	if err != nil {
172 172
 		return nil, err
173 173
 	}
174
-	return os.Stat(fullPath)
174
+	return remote.Root().Stat(fullPath)
175 175
 }
176 176
 
177 177
 // FullPath is a helper for getting a full path for a path from a source
178 178
 func FullPath(remote builder.Source, path string) (string, error) {
179
-	fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root())
179
+	fullPath, err := remote.Root().ResolveScopedPath(path, true)
180 180
 	if err != nil {
181 181
 		return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error
182 182
 	}
... ...
@@ -5,11 +5,11 @@ import (
5 5
 	"io/ioutil"
6 6
 	"log"
7 7
 	"os"
8
-	"path/filepath"
9 8
 	"sort"
10 9
 	"testing"
11 10
 
12 11
 	"github.com/docker/docker/builder"
12
+	"github.com/docker/docker/pkg/containerfs"
13 13
 )
14 14
 
15 15
 const (
... ...
@@ -53,7 +53,7 @@ func checkDirectory(t *testing.T, dir string, expectedFiles []string) {
53 53
 }
54 54
 
55 55
 func executeProcess(t *testing.T, contextDir string) {
56
-	modifiableCtx := &stubRemote{root: contextDir}
56
+	modifiableCtx := &stubRemote{root: containerfs.NewLocalContainerFS(contextDir)}
57 57
 
58 58
 	err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName)
59 59
 
... ...
@@ -105,19 +105,19 @@ func TestProcessShouldLeaveAllFiles(t *testing.T) {
105 105
 
106 106
 // TODO: remove after moving to a separate pkg
107 107
 type stubRemote struct {
108
-	root string
108
+	root containerfs.ContainerFS
109 109
 }
110 110
 
111 111
 func (r *stubRemote) Hash(path string) (string, error) {
112 112
 	return "", errors.New("not implemented")
113 113
 }
114 114
 
115
-func (r *stubRemote) Root() string {
115
+func (r *stubRemote) Root() containerfs.ContainerFS {
116 116
 	return r.root
117 117
 }
118 118
 func (r *stubRemote) Close() error {
119 119
 	return errors.New("not implemented")
120 120
 }
121 121
 func (r *stubRemote) Remove(p string) error {
122
-	return os.Remove(filepath.Join(r.root, p))
122
+	return r.root.Remove(r.root.Join(r.root.Path(), p))
123 123
 }
... ...
@@ -3,11 +3,10 @@ package remotecontext
3 3
 import (
4 4
 	"encoding/hex"
5 5
 	"os"
6
-	"path/filepath"
7
-	"runtime"
8 6
 	"strings"
9 7
 
10 8
 	"github.com/docker/docker/builder"
9
+	"github.com/docker/docker/pkg/containerfs"
11 10
 	"github.com/docker/docker/pkg/pools"
12 11
 	"github.com/pkg/errors"
13 12
 )
... ...
@@ -15,7 +14,7 @@ import (
15 15
 // NewLazySource creates a new LazyContext. LazyContext defines a hashed build
16 16
 // context based on a root directory. Individual files are hashed first time
17 17
 // they are asked. It is not safe to call methods of LazyContext concurrently.
18
-func NewLazySource(root string) (builder.Source, error) {
18
+func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) {
19 19
 	return &lazySource{
20 20
 		root: root,
21 21
 		sums: make(map[string]string),
... ...
@@ -23,11 +22,11 @@ func NewLazySource(root string) (builder.Source, error) {
23 23
 }
24 24
 
25 25
 type lazySource struct {
26
-	root string
26
+	root containerfs.ContainerFS
27 27
 	sums map[string]string
28 28
 }
29 29
 
30
-func (c *lazySource) Root() string {
30
+func (c *lazySource) Root() containerfs.ContainerFS {
31 31
 	return c.root
32 32
 }
33 33
 
... ...
@@ -41,7 +40,7 @@ func (c *lazySource) Hash(path string) (string, error) {
41 41
 		return "", err
42 42
 	}
43 43
 
44
-	fi, err := os.Lstat(fullPath)
44
+	fi, err := c.root.Lstat(fullPath)
45 45
 	if err != nil {
46 46
 		return "", errors.WithStack(err)
47 47
 	}
... ...
@@ -63,13 +62,13 @@ func (c *lazySource) Hash(path string) (string, error) {
63 63
 }
64 64
 
65 65
 func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) {
66
-	p := filepath.Join(c.root, relPath)
66
+	p := c.root.Join(c.root.Path(), relPath)
67 67
 	h, err := NewFileHash(p, relPath, fi)
68 68
 	if err != nil {
69 69
 		return "", errors.Wrapf(err, "failed to create hash for %s", relPath)
70 70
 	}
71 71
 	if fi.Mode().IsRegular() && fi.Size() > 0 {
72
-		f, err := os.Open(p)
72
+		f, err := c.root.Open(p)
73 73
 		if err != nil {
74 74
 			return "", errors.Wrapf(err, "failed to open %s", relPath)
75 75
 		}
... ...
@@ -85,10 +84,10 @@ func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error)
85 85
 
86 86
 // Rel makes a path relative to base path. Same as `filepath.Rel` but can also
87 87
 // handle UUID paths in windows.
88
-func Rel(basepath, targpath string) (string, error) {
88
+func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) {
89 89
 	// filepath.Rel can't handle UUID paths in windows
90
-	if runtime.GOOS == "windows" {
91
-		pfx := basepath + `\`
90
+	if basepath.OS() == "windows" {
91
+		pfx := basepath.Path() + `\`
92 92
 		if strings.HasPrefix(targpath, pfx) {
93 93
 			p := strings.TrimPrefix(targpath, pfx)
94 94
 			if p == "" {
... ...
@@ -97,5 +96,5 @@ func Rel(basepath, targpath string) (string, error) {
97 97
 			return p, nil
98 98
 		}
99 99
 	}
100
-	return filepath.Rel(basepath, targpath)
100
+	return basepath.Rel(basepath.Path(), targpath)
101 101
 }
... ...
@@ -3,11 +3,11 @@ package remotecontext
3 3
 import (
4 4
 	"fmt"
5 5
 	"os"
6
-	"path/filepath"
7 6
 	"sync"
8 7
 
9
-	"github.com/docker/docker/pkg/symlink"
10 8
 	iradix "github.com/hashicorp/go-immutable-radix"
9
+
10
+	"github.com/docker/docker/pkg/containerfs"
11 11
 	"github.com/pkg/errors"
12 12
 	"github.com/tonistiigi/fsutil"
13 13
 )
... ...
@@ -19,7 +19,7 @@ type hashed interface {
19 19
 // CachableSource is a source that contains cache records for its contents
20 20
 type CachableSource struct {
21 21
 	mu   sync.Mutex
22
-	root string
22
+	root containerfs.ContainerFS
23 23
 	tree *iradix.Tree
24 24
 	txn  *iradix.Txn
25 25
 }
... ...
@@ -28,7 +28,7 @@ type CachableSource struct {
28 28
 func NewCachableSource(root string) *CachableSource {
29 29
 	ts := &CachableSource{
30 30
 		tree: iradix.New(),
31
-		root: root,
31
+		root: containerfs.NewLocalContainerFS(root),
32 32
 	}
33 33
 	return ts
34 34
 }
... ...
@@ -67,7 +67,7 @@ func (cs *CachableSource) Scan() error {
67 67
 		return err
68 68
 	}
69 69
 	txn := iradix.New().Txn()
70
-	err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error {
70
+	err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error {
71 71
 		if err != nil {
72 72
 			return errors.Wrapf(err, "failed to walk %s", path)
73 73
 		}
... ...
@@ -134,12 +134,12 @@ func (cs *CachableSource) Close() error {
134 134
 }
135 135
 
136 136
 func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, err error) {
137
-	cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:]
138
-	fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(cs.root, path), cs.root)
137
+	cleanpath = cs.root.Clean(string(cs.root.Separator()) + path)[1:]
138
+	fullpath, err = cs.root.ResolveScopedPath(path, true)
139 139
 	if err != nil {
140 140
 		return "", "", fmt.Errorf("Forbidden path outside the context: %s (%s)", path, fullpath)
141 141
 	}
142
-	_, err = os.Lstat(fullpath)
142
+	_, err = cs.root.Lstat(fullpath)
143 143
 	if err != nil {
144 144
 		return "", "", convertPathError(err, path)
145 145
 	}
... ...
@@ -158,7 +158,7 @@ func (cs *CachableSource) Hash(path string) (string, error) {
158 158
 }
159 159
 
160 160
 // Root returns a root directory for the source
161
-func (cs *CachableSource) Root() string {
161
+func (cs *CachableSource) Root() containerfs.ContainerFS {
162 162
 	return cs.root
163 163
 }
164 164
 
... ...
@@ -94,7 +94,7 @@ func (this *TarsumBackup) GoString() string {
94 94
 	s := make([]string, 0, 5)
95 95
 	s = append(s, "&remotecontext.TarsumBackup{")
96 96
 	keysForHashes := make([]string, 0, len(this.Hashes))
97
-	for k, _ := range this.Hashes {
97
+	for k := range this.Hashes {
98 98
 		keysForHashes = append(keysForHashes, k)
99 99
 	}
100 100
 	github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
... ...
@@ -133,7 +133,7 @@ func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) {
133 133
 	var l int
134 134
 	_ = l
135 135
 	if len(m.Hashes) > 0 {
136
-		for k, _ := range m.Hashes {
136
+		for k := range m.Hashes {
137 137
 			dAtA[i] = 0xa
138 138
 			i++
139 139
 			v := m.Hashes[k]
... ...
@@ -211,7 +211,7 @@ func (this *TarsumBackup) String() string {
211 211
 		return "nil"
212 212
 	}
213 213
 	keysForHashes := make([]string, 0, len(this.Hashes))
214
-	for k, _ := range this.Hashes {
214
+	for k := range this.Hashes {
215 215
 		keysForHashes = append(keysForHashes, k)
216 216
 	}
217 217
 	github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
... ...
@@ -35,7 +35,7 @@ func TestCloseRootDirectory(t *testing.T) {
35 35
 		t.Fatalf("Error while executing Close: %s", err)
36 36
 	}
37 37
 
38
-	_, err = os.Stat(src.Root())
38
+	_, err = os.Stat(src.Root().Path())
39 39
 
40 40
 	if !os.IsNotExist(err) {
41 41
 		t.Fatal("Directory should not exist at this point")
... ...
@@ -99,6 +99,8 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
99 99
 		FullTimestamp:   true,
100 100
 	})
101 101
 
102
+	system.InitLCOW(cli.Config.Experimental)
103
+
102 104
 	if err := setDefaultUmask(); err != nil {
103 105
 		return fmt.Errorf("Failed to set umask: %v", err)
104 106
 	}
... ...
@@ -2,7 +2,6 @@ package container
2 2
 
3 3
 import (
4 4
 	"os"
5
-	"path/filepath"
6 5
 
7 6
 	"github.com/docker/docker/api/types"
8 7
 	"github.com/docker/docker/pkg/archive"
... ...
@@ -15,17 +14,20 @@ import (
15 15
 // an error if the path points to outside the container's rootfs.
16 16
 func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) {
17 17
 	// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
18
-	path, err = system.CheckSystemDriveAndRemoveDriveLetter(path)
18
+	path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS)
19 19
 	if err != nil {
20 20
 		return "", "", err
21 21
 	}
22 22
 
23 23
 	// Consider the given path as an absolute path in the container.
24
-	absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
24
+	absPath = archive.PreserveTrailingDotOrSeparator(
25
+		container.BaseFS.Join(string(container.BaseFS.Separator()), path),
26
+		path,
27
+		container.BaseFS.Separator())
25 28
 
26 29
 	// Split the absPath into its Directory and Base components. We will
27 30
 	// resolve the dir in the scope of the container then append the base.
28
-	dirPath, basePath := filepath.Split(absPath)
31
+	dirPath, basePath := container.BaseFS.Split(absPath)
29 32
 
30 33
 	resolvedDirPath, err := container.GetResourcePath(dirPath)
31 34
 	if err != nil {
... ...
@@ -34,8 +36,7 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri
34 34
 
35 35
 	// resolvedDirPath will have been cleaned (no trailing path separators) so
36 36
 	// we can manually join it with the base path element.
37
-	resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
38
-
37
+	resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath
39 38
 	return resolvedPath, absPath, nil
40 39
 }
41 40
 
... ...
@@ -44,7 +45,9 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri
44 44
 // resolved to a path on the host corresponding to the given absolute path
45 45
 // inside the container.
46 46
 func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
47
-	lstat, err := os.Lstat(resolvedPath)
47
+	driver := container.BaseFS
48
+
49
+	lstat, err := driver.Lstat(resolvedPath)
48 50
 	if err != nil {
49 51
 		return nil, err
50 52
 	}
... ...
@@ -57,17 +60,17 @@ func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.
57 57
 			return nil, err
58 58
 		}
59 59
 
60
-		linkTarget, err = filepath.Rel(container.BaseFS, hostPath)
60
+		linkTarget, err = driver.Rel(driver.Path(), hostPath)
61 61
 		if err != nil {
62 62
 			return nil, err
63 63
 		}
64 64
 
65 65
 		// Make it an absolute path.
66
-		linkTarget = filepath.Join(string(filepath.Separator), linkTarget)
66
+		linkTarget = driver.Join(string(driver.Separator()), linkTarget)
67 67
 	}
68 68
 
69 69
 	return &types.ContainerPathStat{
70
-		Name:       filepath.Base(absPath),
70
+		Name:       driver.Base(absPath),
71 71
 		Size:       lstat.Size(),
72 72
 		Mode:       lstat.Mode(),
73 73
 		Mtime:      lstat.ModTime(),
... ...
@@ -28,6 +28,7 @@ import (
28 28
 	"github.com/docker/docker/layer"
29 29
 	"github.com/docker/docker/libcontainerd"
30 30
 	"github.com/docker/docker/opts"
31
+	"github.com/docker/docker/pkg/containerfs"
31 32
 	"github.com/docker/docker/pkg/idtools"
32 33
 	"github.com/docker/docker/pkg/ioutils"
33 34
 	"github.com/docker/docker/pkg/signal"
... ...
@@ -64,10 +65,10 @@ var (
64 64
 type Container struct {
65 65
 	StreamConfig *stream.Config
66 66
 	// embed for Container to support states directly.
67
-	*State          `json:"State"` // Needed for Engine API version <= 1.11
68
-	Root            string         `json:"-"` // Path to the "home" of the container, including metadata.
69
-	BaseFS          string         `json:"-"` // Path to the graphdriver mountpoint
70
-	RWLayer         layer.RWLayer  `json:"-"`
67
+	*State          `json:"State"`          // Needed for Engine API version <= 1.11
68
+	Root            string                  `json:"-"` // Path to the "home" of the container, including metadata.
69
+	BaseFS          containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount
70
+	RWLayer         layer.RWLayer           `json:"-"`
71 71
 	ID              string
72 72
 	Created         time.Time
73 73
 	Managed         bool
... ...
@@ -305,15 +306,13 @@ func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error
305 305
 func (container *Container) GetResourcePath(path string) (string, error) {
306 306
 	// IMPORTANT - These are paths on the OS where the daemon is running, hence
307 307
 	// any filepath operations must be done in an OS agnostic way.
308
-
309
-	cleanPath := cleanResourcePath(path)
310
-	r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS)
308
+	r, e := container.BaseFS.ResolveScopedPath(path, false)
311 309
 
312 310
 	// Log this here on the daemon side as there's otherwise no indication apart
313 311
 	// from the error being propagated all the way back to the client. This makes
314 312
 	// debugging significantly easier and clearly indicates the error comes from the daemon.
315 313
 	if e != nil {
316
-		logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e)
314
+		logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e)
317 315
 	}
318 316
 	return r, e
319 317
 }
... ...
@@ -5,7 +5,6 @@ package container
5 5
 import (
6 6
 	"io/ioutil"
7 7
 	"os"
8
-	"path/filepath"
9 8
 
10 9
 	"github.com/docker/docker/api/types"
11 10
 	containertypes "github.com/docker/docker/api/types/container"
... ...
@@ -13,7 +12,6 @@ import (
13 13
 	"github.com/docker/docker/pkg/chrootarchive"
14 14
 	"github.com/docker/docker/pkg/mount"
15 15
 	"github.com/docker/docker/pkg/stringid"
16
-	"github.com/docker/docker/pkg/symlink"
17 16
 	"github.com/docker/docker/pkg/system"
18 17
 	"github.com/docker/docker/volume"
19 18
 	"github.com/opencontainers/selinux/go-selinux/label"
... ...
@@ -131,7 +129,7 @@ func (container *Container) NetworkMounts() []Mount {
131 131
 
132 132
 // CopyImagePathContent copies files in destination to the volume.
133 133
 func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
134
-	rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS)
134
+	rootfs, err := container.GetResourcePath(destination)
135 135
 	if err != nil {
136 136
 		return err
137 137
 	}
... ...
@@ -456,11 +454,6 @@ func (container *Container) TmpfsMounts() ([]Mount, error) {
456 456
 	return mounts, nil
457 457
 }
458 458
 
459
-// cleanResourcePath cleans a resource path and prepares to combine with mnt path
460
-func cleanResourcePath(path string) string {
461
-	return filepath.Join(string(os.PathSeparator), path)
462
-}
463
-
464 459
 // EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network
465 460
 func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
466 461
 	return false
... ...
@@ -172,18 +172,6 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi
172 172
 	return nil
173 173
 }
174 174
 
175
-// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares
176
-// to combine with a volume path
177
-func cleanResourcePath(path string) string {
178
-	if len(path) >= 2 {
179
-		c := path[0]
180
-		if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
181
-			path = path[2:]
182
-		}
183
-	}
184
-	return filepath.Join(string(os.PathSeparator), path)
185
-}
186
-
187 175
 // BuildHostnameFile writes the container's hostname file.
188 176
 func (container *Container) BuildHostnameFile() error {
189 177
 	return nil
... ...
@@ -3,7 +3,6 @@ package daemon
3 3
 import (
4 4
 	"io"
5 5
 	"os"
6
-	"path/filepath"
7 6
 	"strings"
8 7
 
9 8
 	"github.com/docker/docker/api/types"
... ...
@@ -20,6 +19,31 @@ import (
20 20
 // path does not refer to a directory.
21 21
 var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory")
22 22
 
23
+// The daemon will use the following interfaces if the container fs implements
24
+// these for optimized copies to and from the container.
25
+type extractor interface {
26
+	ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
27
+}
28
+
29
+type archiver interface {
30
+	ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
31
+}
32
+
33
+// helper functions to extract or archive
34
+func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error {
35
+	if ea, ok := i.(extractor); ok {
36
+		return ea.ExtractArchive(src, dst, opts)
37
+	}
38
+	return chrootarchive.Untar(src, dst, opts)
39
+}
40
+
41
+func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
42
+	if ap, ok := i.(archiver); ok {
43
+		return ap.ArchivePath(src, opts)
44
+	}
45
+	return archive.TarWithOptions(src, opts)
46
+}
47
+
23 48
 // ContainerCopy performs a deprecated operation of archiving the resource at
24 49
 // the specified path in the container identified by the given name.
25 50
 func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
... ...
@@ -138,6 +162,9 @@ func (daemon *Daemon) containerStatPath(container *container.Container, path str
138 138
 		return nil, err
139 139
 	}
140 140
 
141
+	// Normalize path before sending to rootfs
142
+	path = container.BaseFS.FromSlash(path)
143
+
141 144
 	resolvedPath, absPath, err := container.ResolvePath(path)
142 145
 	if err != nil {
143 146
 		return nil, err
... ...
@@ -178,6 +205,9 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path
178 178
 		return nil, nil, err
179 179
 	}
180 180
 
181
+	// Normalize path before sending to rootfs
182
+	path = container.BaseFS.FromSlash(path)
183
+
181 184
 	resolvedPath, absPath, err := container.ResolvePath(path)
182 185
 	if err != nil {
183 186
 		return nil, nil, err
... ...
@@ -196,7 +226,18 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path
196 196
 	// also catches the case when the root directory of the container is
197 197
 	// requested: we want the archive entries to start with "/" and not the
198 198
 	// container ID.
199
-	data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath))
199
+	driver := container.BaseFS
200
+
201
+	// Get the source and the base paths of the container resolved path in order
202
+	// to get the proper tar options for the rebase tar.
203
+	resolvedPath = driver.Clean(resolvedPath)
204
+	if driver.Base(resolvedPath) == "." {
205
+		resolvedPath += string(driver.Separator()) + "."
206
+	}
207
+	sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
208
+	opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
209
+
210
+	data, err := archivePath(driver, sourceDir, opts)
200 211
 	if err != nil {
201 212
 		return nil, nil, err
202 213
 	}
... ...
@@ -235,8 +276,12 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
235 235
 		return err
236 236
 	}
237 237
 
238
+	// Normalize path before sending to rootfs'
239
+	path = container.BaseFS.FromSlash(path)
240
+	driver := container.BaseFS
241
+
238 242
 	// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
239
-	path, err = system.CheckSystemDriveAndRemoveDriveLetter(path)
243
+	path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, driver)
240 244
 	if err != nil {
241 245
 		return err
242 246
 	}
... ...
@@ -248,7 +293,10 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
248 248
 	// that you can extract an archive to a symlink that points to a directory.
249 249
 
250 250
 	// Consider the given path as an absolute path in the container.
251
-	absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
251
+	absPath := archive.PreserveTrailingDotOrSeparator(
252
+		driver.Join(string(driver.Separator()), path),
253
+		path,
254
+		driver.Separator())
252 255
 
253 256
 	// This will evaluate the last path element if it is a symlink.
254 257
 	resolvedPath, err := container.GetResourcePath(absPath)
... ...
@@ -256,7 +304,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
256 256
 		return err
257 257
 	}
258 258
 
259
-	stat, err := os.Lstat(resolvedPath)
259
+	stat, err := driver.Lstat(resolvedPath)
260 260
 	if err != nil {
261 261
 		return err
262 262
 	}
... ...
@@ -279,21 +327,24 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
279 279
 	// a volume file path.
280 280
 	var baseRel string
281 281
 	if strings.HasPrefix(resolvedPath, `\\?\Volume{`) {
282
-		if strings.HasPrefix(resolvedPath, container.BaseFS) {
283
-			baseRel = resolvedPath[len(container.BaseFS):]
282
+		if strings.HasPrefix(resolvedPath, driver.Path()) {
283
+			baseRel = resolvedPath[len(driver.Path()):]
284 284
 			if baseRel[:1] == `\` {
285 285
 				baseRel = baseRel[1:]
286 286
 			}
287 287
 		}
288 288
 	} else {
289
-		baseRel, err = filepath.Rel(container.BaseFS, resolvedPath)
289
+		baseRel, err = driver.Rel(driver.Path(), resolvedPath)
290 290
 	}
291 291
 	if err != nil {
292 292
 		return err
293 293
 	}
294 294
 	// Make it an absolute path.
295
-	absPath = filepath.Join(string(filepath.Separator), baseRel)
295
+	absPath = driver.Join(string(driver.Separator()), baseRel)
296 296
 
297
+	// @ TODO: gupta-ak: Technically, this works since it no-ops
298
+	// on Windows and the file system is local anyway on linux.
299
+	// But eventually, it should be made driver aware.
297 300
 	toVolume, err := checkIfPathIsInAVolume(container, absPath)
298 301
 	if err != nil {
299 302
 		return err
... ...
@@ -315,7 +366,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
315 315
 		}
316 316
 	}
317 317
 
318
-	if err := chrootarchive.Untar(content, resolvedPath, options); err != nil {
318
+	if err := extractArchive(driver, content, resolvedPath, options); err != nil {
319 319
 		return err
320 320
 	}
321 321
 
... ...
@@ -356,24 +407,28 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
356 356
 		return nil, err
357 357
 	}
358 358
 
359
+	// Normalize path before sending to rootfs
360
+	resource = container.BaseFS.FromSlash(resource)
361
+	driver := container.BaseFS
362
+
359 363
 	basePath, err := container.GetResourcePath(resource)
360 364
 	if err != nil {
361 365
 		return nil, err
362 366
 	}
363
-	stat, err := os.Stat(basePath)
367
+	stat, err := driver.Stat(basePath)
364 368
 	if err != nil {
365 369
 		return nil, err
366 370
 	}
367 371
 	var filter []string
368 372
 	if !stat.IsDir() {
369
-		d, f := filepath.Split(basePath)
373
+		d, f := driver.Split(basePath)
370 374
 		basePath = d
371 375
 		filter = []string{f}
372 376
 	} else {
373
-		filter = []string{filepath.Base(basePath)}
374
-		basePath = filepath.Dir(basePath)
377
+		filter = []string{driver.Base(basePath)}
378
+		basePath = driver.Dir(basePath)
375 379
 	}
376
-	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
380
+	archive, err := archivePath(driver, basePath, &archive.TarOptions{
377 381
 		Compression:  archive.Uncompressed,
378 382
 		IncludeFiles: filter,
379 383
 	})
... ...
@@ -10,6 +10,7 @@ import (
10 10
 	"github.com/docker/docker/builder"
11 11
 	"github.com/docker/docker/image"
12 12
 	"github.com/docker/docker/layer"
13
+	"github.com/docker/docker/pkg/containerfs"
13 14
 	"github.com/docker/docker/pkg/idtools"
14 15
 	"github.com/docker/docker/pkg/stringid"
15 16
 	"github.com/docker/docker/registry"
... ...
@@ -25,9 +26,9 @@ type releaseableLayer struct {
25 25
 	rwLayer    layer.RWLayer
26 26
 }
27 27
 
28
-func (rl *releaseableLayer) Mount() (string, error) {
28
+func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) {
29 29
 	var err error
30
-	var mountPath string
30
+	var mountPath containerfs.ContainerFS
31 31
 	var chainID layer.ChainID
32 32
 	if rl.roLayer != nil {
33 33
 		chainID = rl.roLayer.ChainID()
... ...
@@ -36,7 +37,7 @@ func (rl *releaseableLayer) Mount() (string, error) {
36 36
 	mountID := stringid.GenerateRandomID()
37 37
 	rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil)
38 38
 	if err != nil {
39
-		return "", errors.Wrap(err, "failed to create rwlayer")
39
+		return nil, errors.Wrap(err, "failed to create rwlayer")
40 40
 	}
41 41
 
42 42
 	mountPath, err = rl.rwLayer.Mount("")
... ...
@@ -48,7 +49,7 @@ func (rl *releaseableLayer) Mount() (string, error) {
48 48
 			logrus.Errorf("Failed to release RWLayer: %s", err)
49 49
 		}
50 50
 		rl.rwLayer = nil
51
-		return "", err
51
+		return nil, err
52 52
 	}
53 53
 
54 54
 	return mountPath, nil
... ...
@@ -40,6 +40,7 @@ import (
40 40
 	"github.com/docker/docker/layer"
41 41
 	"github.com/docker/docker/libcontainerd"
42 42
 	"github.com/docker/docker/migrate/v1"
43
+	"github.com/docker/docker/pkg/containerfs"
43 44
 	"github.com/docker/docker/pkg/idtools"
44 45
 	"github.com/docker/docker/pkg/plugingetter"
45 46
 	"github.com/docker/docker/pkg/sysinfo"
... ...
@@ -966,11 +967,11 @@ func (daemon *Daemon) Mount(container *container.Container) error {
966 966
 	}
967 967
 	logrus.Debugf("container mounted via layerStore: %v", dir)
968 968
 
969
-	if container.BaseFS != dir {
969
+	if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() {
970 970
 		// The mount path reported by the graph driver should always be trusted on Windows, since the
971 971
 		// volume path for a given mounted layer may change over time.  This should only be an error
972 972
 		// on non-Windows operating systems.
973
-		if container.BaseFS != "" && runtime.GOOS != "windows" {
973
+		if runtime.GOOS != "windows" {
974 974
 			daemon.Unmount(container)
975 975
 			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
976 976
 				daemon.GraphDriverName(container.Platform), container.ID, container.BaseFS, dir)
... ...
@@ -1045,7 +1046,7 @@ func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) {
1045 1045
 	return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs)
1046 1046
 }
1047 1047
 
1048
-func (daemon *Daemon) setupInitLayer(initPath string) error {
1048
+func (daemon *Daemon) setupInitLayer(initPath containerfs.ContainerFS) error {
1049 1049
 	rootIDs := daemon.idMappings.RootPair()
1050 1050
 	return initlayer.Setup(initPath, rootIDs)
1051 1051
 }
... ...
@@ -12,6 +12,7 @@ import (
12 12
 	"github.com/docker/docker/container"
13 13
 	"github.com/docker/docker/daemon/config"
14 14
 	"github.com/docker/docker/image"
15
+	"github.com/docker/docker/pkg/containerfs"
15 16
 	"github.com/docker/docker/pkg/fileutils"
16 17
 	"github.com/docker/docker/pkg/idtools"
17 18
 	"github.com/docker/docker/pkg/parsers/kernel"
... ...
@@ -97,7 +98,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPa
97 97
 	return nil
98 98
 }
99 99
 
100
-func (daemon *Daemon) getLayerInit() func(string) error {
100
+func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error {
101 101
 	return nil
102 102
 }
103 103
 
... ...
@@ -24,6 +24,7 @@ import (
24 24
 	"github.com/docker/docker/daemon/config"
25 25
 	"github.com/docker/docker/image"
26 26
 	"github.com/docker/docker/opts"
27
+	"github.com/docker/docker/pkg/containerfs"
27 28
 	"github.com/docker/docker/pkg/idtools"
28 29
 	"github.com/docker/docker/pkg/parsers"
29 30
 	"github.com/docker/docker/pkg/parsers/kernel"
... ...
@@ -988,7 +989,7 @@ func removeDefaultBridgeInterface() {
988 988
 	}
989 989
 }
990 990
 
991
-func (daemon *Daemon) getLayerInit() func(string) error {
991
+func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error {
992 992
 	return daemon.setupInitLayer
993 993
 }
994 994
 
... ...
@@ -12,6 +12,7 @@ import (
12 12
 	"github.com/docker/docker/container"
13 13
 	"github.com/docker/docker/daemon/config"
14 14
 	"github.com/docker/docker/image"
15
+	"github.com/docker/docker/pkg/containerfs"
15 16
 	"github.com/docker/docker/pkg/fileutils"
16 17
 	"github.com/docker/docker/pkg/idtools"
17 18
 	"github.com/docker/docker/pkg/parsers"
... ...
@@ -56,7 +57,7 @@ func parseSecurityOpt(container *container.Container, config *containertypes.Hos
56 56
 	return nil
57 57
 }
58 58
 
59
-func (daemon *Daemon) getLayerInit() func(string) error {
59
+func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error {
60 60
 	return nil
61 61
 }
62 62
 
... ...
@@ -40,7 +40,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCl
40 40
 		return nil, err
41 41
 	}
42 42
 
43
-	archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{
43
+	archive, err := archivePath(container.BaseFS, container.BaseFS.Path(), &archive.TarOptions{
44 44
 		Compression: archive.Uncompressed,
45 45
 		UIDMaps:     daemon.idMappings.UIDs(),
46 46
 		GIDMaps:     daemon.idMappings.GIDs(),
... ...
@@ -38,6 +38,7 @@ import (
38 38
 	"github.com/docker/docker/daemon/graphdriver"
39 39
 	"github.com/docker/docker/pkg/archive"
40 40
 	"github.com/docker/docker/pkg/chrootarchive"
41
+	"github.com/docker/docker/pkg/containerfs"
41 42
 	"github.com/docker/docker/pkg/directory"
42 43
 	"github.com/docker/docker/pkg/idtools"
43 44
 	"github.com/docker/docker/pkg/locker"
... ...
@@ -388,12 +389,12 @@ func atomicRemove(source string) error {
388 388
 
389 389
 // Get returns the rootfs path for the id.
390 390
 // This will mount the dir at its given path
391
-func (a *Driver) Get(id, mountLabel string) (string, error) {
391
+func (a *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
392 392
 	a.locker.Lock(id)
393 393
 	defer a.locker.Unlock(id)
394 394
 	parents, err := a.getParentLayerPaths(id)
395 395
 	if err != nil && !os.IsNotExist(err) {
396
-		return "", err
396
+		return nil, err
397 397
 	}
398 398
 
399 399
 	a.pathCacheLock.Lock()
... ...
@@ -407,21 +408,21 @@ func (a *Driver) Get(id, mountLabel string) (string, error) {
407 407
 		}
408 408
 	}
409 409
 	if count := a.ctr.Increment(m); count > 1 {
410
-		return m, nil
410
+		return containerfs.NewLocalContainerFS(m), nil
411 411
 	}
412 412
 
413 413
 	// If a dir does not have a parent ( no layers )do not try to mount
414 414
 	// just return the diff path to the data
415 415
 	if len(parents) > 0 {
416 416
 		if err := a.mount(id, m, mountLabel, parents); err != nil {
417
-			return "", err
417
+			return nil, err
418 418
 		}
419 419
 	}
420 420
 
421 421
 	a.pathCacheLock.Lock()
422 422
 	a.pathCache[id] = m
423 423
 	a.pathCacheLock.Unlock()
424
-	return m, nil
424
+	return containerfs.NewLocalContainerFS(m), nil
425 425
 }
426 426
 
427 427
 // Put unmounts and updates list of active mounts.
... ...
@@ -9,11 +9,10 @@ import (
9 9
 	"io/ioutil"
10 10
 	"os"
11 11
 	"path"
12
+	"path/filepath"
12 13
 	"sync"
13 14
 	"testing"
14 15
 
15
-	"path/filepath"
16
-
17 16
 	"github.com/docker/docker/daemon/graphdriver"
18 17
 	"github.com/docker/docker/pkg/archive"
19 18
 	"github.com/docker/docker/pkg/reexec"
... ...
@@ -43,6 +42,14 @@ func testInit(dir string, t testing.TB) graphdriver.Driver {
43 43
 	return d
44 44
 }
45 45
 
46
+func driverGet(d *Driver, id string, mntLabel string) (string, error) {
47
+	mnt, err := d.Get(id, mntLabel)
48
+	if err != nil {
49
+		return "", err
50
+	}
51
+	return mnt.Path(), nil
52
+}
53
+
46 54
 func newDriver(t testing.TB) *Driver {
47 55
 	if err := os.MkdirAll(tmp, 0755); err != nil {
48 56
 		t.Fatal(err)
... ...
@@ -172,7 +179,7 @@ func TestGetWithoutParent(t *testing.T) {
172 172
 		t.Fatal(err)
173 173
 	}
174 174
 	expected := path.Join(tmp, "diff", "1")
175
-	if diffPath != expected {
175
+	if diffPath.Path() != expected {
176 176
 		t.Fatalf("Expected path %s got %s", expected, diffPath)
177 177
 	}
178 178
 }
... ...
@@ -249,13 +256,13 @@ func TestMountWithParent(t *testing.T) {
249 249
 	if err != nil {
250 250
 		t.Fatal(err)
251 251
 	}
252
-	if mntPath == "" {
253
-		t.Fatal("mntPath should not be empty string")
252
+	if mntPath == nil {
253
+		t.Fatal("mntPath should not be nil")
254 254
 	}
255 255
 
256 256
 	expected := path.Join(tmp, "mnt", "2")
257
-	if mntPath != expected {
258
-		t.Fatalf("Expected %s got %s", expected, mntPath)
257
+	if mntPath.Path() != expected {
258
+		t.Fatalf("Expected %s got %s", expected, mntPath.Path())
259 259
 	}
260 260
 }
261 261
 
... ...
@@ -280,8 +287,8 @@ func TestRemoveMountedDir(t *testing.T) {
280 280
 	if err != nil {
281 281
 		t.Fatal(err)
282 282
 	}
283
-	if mntPath == "" {
284
-		t.Fatal("mntPath should not be empty string")
283
+	if mntPath == nil {
284
+		t.Fatal("mntPath should not be nil")
285 285
 	}
286 286
 
287 287
 	mounted, err := d.mounted(d.pathCache["2"])
... ...
@@ -315,7 +322,7 @@ func TestGetDiff(t *testing.T) {
315 315
 		t.Fatal(err)
316 316
 	}
317 317
 
318
-	diffPath, err := d.Get("1", "")
318
+	diffPath, err := driverGet(d, "1", "")
319 319
 	if err != nil {
320 320
 		t.Fatal(err)
321 321
 	}
... ...
@@ -359,7 +366,7 @@ func TestChanges(t *testing.T) {
359 359
 		}
360 360
 	}()
361 361
 
362
-	mntPoint, err := d.Get("2", "")
362
+	mntPoint, err := driverGet(d, "2", "")
363 363
 	if err != nil {
364 364
 		t.Fatal(err)
365 365
 	}
... ...
@@ -398,7 +405,7 @@ func TestChanges(t *testing.T) {
398 398
 	if err := d.CreateReadWrite("3", "2", nil); err != nil {
399 399
 		t.Fatal(err)
400 400
 	}
401
-	mntPoint, err = d.Get("3", "")
401
+	mntPoint, err = driverGet(d, "3", "")
402 402
 	if err != nil {
403 403
 		t.Fatal(err)
404 404
 	}
... ...
@@ -444,7 +451,7 @@ func TestDiffSize(t *testing.T) {
444 444
 		t.Fatal(err)
445 445
 	}
446 446
 
447
-	diffPath, err := d.Get("1", "")
447
+	diffPath, err := driverGet(d, "1", "")
448 448
 	if err != nil {
449 449
 		t.Fatal(err)
450 450
 	}
... ...
@@ -486,7 +493,7 @@ func TestChildDiffSize(t *testing.T) {
486 486
 		t.Fatal(err)
487 487
 	}
488 488
 
489
-	diffPath, err := d.Get("1", "")
489
+	diffPath, err := driverGet(d, "1", "")
490 490
 	if err != nil {
491 491
 		t.Fatal(err)
492 492
 	}
... ...
@@ -587,7 +594,7 @@ func TestApplyDiff(t *testing.T) {
587 587
 		t.Fatal(err)
588 588
 	}
589 589
 
590
-	diffPath, err := d.Get("1", "")
590
+	diffPath, err := driverGet(d, "1", "")
591 591
 	if err != nil {
592 592
 		t.Fatal(err)
593 593
 	}
... ...
@@ -622,7 +629,7 @@ func TestApplyDiff(t *testing.T) {
622 622
 
623 623
 	// Ensure that the file is in the mount point for id 3
624 624
 
625
-	mountPoint, err := d.Get("3", "")
625
+	mountPoint, err := driverGet(d, "3", "")
626 626
 	if err != nil {
627 627
 		t.Fatal(err)
628 628
 	}
... ...
@@ -665,7 +672,7 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
665 665
 		err := d.CreateReadWrite(current, parent, nil)
666 666
 		require.NoError(t, err, "current layer %d", i)
667 667
 
668
-		point, err := d.Get(current, "")
668
+		point, err := driverGet(d, current, "")
669 669
 		require.NoError(t, err, "current layer %d", i)
670 670
 
671 671
 		f, err := os.Create(path.Join(point, current))
... ...
@@ -681,7 +688,7 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
681 681
 	}
682 682
 
683 683
 	// Perform the actual mount for the top most image
684
-	point, err := d.Get(last, "")
684
+	point, err := driverGet(d, last, "")
685 685
 	require.NoError(t, err)
686 686
 	files, err := ioutil.ReadDir(point)
687 687
 	require.NoError(t, err)
... ...
@@ -27,6 +27,7 @@ import (
27 27
 	"unsafe"
28 28
 
29 29
 	"github.com/docker/docker/daemon/graphdriver"
30
+	"github.com/docker/docker/pkg/containerfs"
30 31
 	"github.com/docker/docker/pkg/idtools"
31 32
 	"github.com/docker/docker/pkg/mount"
32 33
 	"github.com/docker/docker/pkg/parsers"
... ...
@@ -631,29 +632,29 @@ func (d *Driver) Remove(id string) error {
631 631
 }
632 632
 
633 633
 // Get the requested filesystem id.
634
-func (d *Driver) Get(id, mountLabel string) (string, error) {
634
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
635 635
 	dir := d.subvolumesDirID(id)
636 636
 	st, err := os.Stat(dir)
637 637
 	if err != nil {
638
-		return "", err
638
+		return nil, err
639 639
 	}
640 640
 
641 641
 	if !st.IsDir() {
642
-		return "", fmt.Errorf("%s: not a directory", dir)
642
+		return nil, fmt.Errorf("%s: not a directory", dir)
643 643
 	}
644 644
 
645 645
 	if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
646 646
 		if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
647 647
 			if err := d.subvolEnableQuota(); err != nil {
648
-				return "", err
648
+				return nil, err
649 649
 			}
650 650
 			if err := subvolLimitQgroup(dir, size); err != nil {
651
-				return "", err
651
+				return nil, err
652 652
 			}
653 653
 		}
654 654
 	}
655 655
 
656
-	return dir, nil
656
+	return containerfs.NewLocalContainerFS(dir), nil
657 657
 }
658 658
 
659 659
 // Put is not implemented for BTRFS as there is no cleanup required for the id.
... ...
@@ -35,12 +35,14 @@ func TestBtrfsSubvolDelete(t *testing.T) {
35 35
 	}
36 36
 	defer graphtest.PutDriver(t)
37 37
 
38
-	dir, err := d.Get("test", "")
38
+	dirFS, err := d.Get("test", "")
39 39
 	if err != nil {
40 40
 		t.Fatal(err)
41 41
 	}
42 42
 	defer d.Put("test")
43 43
 
44
+	dir := dirFS.Path()
45
+
44 46
 	if err := subvolCreate(dir, "subvoltest"); err != nil {
45 47
 		t.Fatal(err)
46 48
 	}
... ...
@@ -12,6 +12,7 @@ import (
12 12
 	"github.com/sirupsen/logrus"
13 13
 
14 14
 	"github.com/docker/docker/daemon/graphdriver"
15
+	"github.com/docker/docker/pkg/containerfs"
15 16
 	"github.com/docker/docker/pkg/devicemapper"
16 17
 	"github.com/docker/docker/pkg/idtools"
17 18
 	"github.com/docker/docker/pkg/locker"
... ...
@@ -163,41 +164,41 @@ func (d *Driver) Remove(id string) error {
163 163
 }
164 164
 
165 165
 // Get mounts a device with given id into the root filesystem
166
-func (d *Driver) Get(id, mountLabel string) (string, error) {
166
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
167 167
 	d.locker.Lock(id)
168 168
 	defer d.locker.Unlock(id)
169 169
 	mp := path.Join(d.home, "mnt", id)
170 170
 	rootFs := path.Join(mp, "rootfs")
171 171
 	if count := d.ctr.Increment(mp); count > 1 {
172
-		return rootFs, nil
172
+		return containerfs.NewLocalContainerFS(rootFs), nil
173 173
 	}
174 174
 
175 175
 	uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
176 176
 	if err != nil {
177 177
 		d.ctr.Decrement(mp)
178
-		return "", err
178
+		return nil, err
179 179
 	}
180 180
 
181 181
 	// Create the target directories if they don't exist
182 182
 	if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
183 183
 		d.ctr.Decrement(mp)
184
-		return "", err
184
+		return nil, err
185 185
 	}
186 186
 	if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
187 187
 		d.ctr.Decrement(mp)
188
-		return "", err
188
+		return nil, err
189 189
 	}
190 190
 
191 191
 	// Mount the device
192 192
 	if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
193 193
 		d.ctr.Decrement(mp)
194
-		return "", err
194
+		return nil, err
195 195
 	}
196 196
 
197 197
 	if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
198 198
 		d.ctr.Decrement(mp)
199 199
 		d.DeviceSet.UnmountDevice(id, mp)
200
-		return "", err
200
+		return nil, err
201 201
 	}
202 202
 
203 203
 	idFile := path.Join(mp, "id")
... ...
@@ -207,11 +208,11 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
207 207
 		if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
208 208
 			d.ctr.Decrement(mp)
209 209
 			d.DeviceSet.UnmountDevice(id, mp)
210
-			return "", err
210
+			return nil, err
211 211
 		}
212 212
 	}
213 213
 
214
-	return rootFs, nil
214
+	return containerfs.NewLocalContainerFS(rootFs), nil
215 215
 }
216 216
 
217 217
 // Put unmounts a device and removes it.
... ...
@@ -12,6 +12,7 @@ import (
12 12
 	"github.com/vbatts/tar-split/tar/storage"
13 13
 
14 14
 	"github.com/docker/docker/pkg/archive"
15
+	"github.com/docker/docker/pkg/containerfs"
15 16
 	"github.com/docker/docker/pkg/idtools"
16 17
 	"github.com/docker/docker/pkg/plugingetter"
17 18
 )
... ...
@@ -68,7 +69,7 @@ type ProtoDriver interface {
68 68
 	// Get returns the mountpoint for the layered filesystem referred
69 69
 	// to by this id. You can optionally specify a mountLabel or "".
70 70
 	// Returns the absolute path to the mounted layered filesystem.
71
-	Get(id, mountLabel string) (dir string, err error)
71
+	Get(id, mountLabel string) (fs containerfs.ContainerFS, err error)
72 72
 	// Put releases the system resources for the specified id,
73 73
 	// e.g, unmounting layered filesystem.
74 74
 	Put(id string) error
... ...
@@ -18,9 +18,9 @@ var (
18 18
 )
19 19
 
20 20
 // NaiveDiffDriver takes a ProtoDriver and adds the
21
-// capability of the Diffing methods which it may or may not
22
-// support on its own. See the comment on the exported
23
-// NewNaiveDiffDriver function below.
21
+// capability of the Diffing methods on the local file system,
22
+// which it may or may not support on its own. See the comment
23
+// on the exported NewNaiveDiffDriver function below.
24 24
 // Notably, the AUFS driver doesn't need to be wrapped like this.
25 25
 type NaiveDiffDriver struct {
26 26
 	ProtoDriver
... ...
@@ -47,10 +47,11 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
47 47
 	startTime := time.Now()
48 48
 	driver := gdw.ProtoDriver
49 49
 
50
-	layerFs, err := driver.Get(id, "")
50
+	layerRootFs, err := driver.Get(id, "")
51 51
 	if err != nil {
52 52
 		return nil, err
53 53
 	}
54
+	layerFs := layerRootFs.Path()
54 55
 
55 56
 	defer func() {
56 57
 		if err != nil {
... ...
@@ -70,12 +71,14 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
70 70
 		}), nil
71 71
 	}
72 72
 
73
-	parentFs, err := driver.Get(parent, "")
73
+	parentRootFs, err := driver.Get(parent, "")
74 74
 	if err != nil {
75 75
 		return nil, err
76 76
 	}
77 77
 	defer driver.Put(parent)
78 78
 
79
+	parentFs := parentRootFs.Path()
80
+
79 81
 	changes, err := archive.ChangesDirs(layerFs, parentFs)
80 82
 	if err != nil {
81 83
 		return nil, err
... ...
@@ -104,20 +107,22 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
104 104
 func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
105 105
 	driver := gdw.ProtoDriver
106 106
 
107
-	layerFs, err := driver.Get(id, "")
107
+	layerRootFs, err := driver.Get(id, "")
108 108
 	if err != nil {
109 109
 		return nil, err
110 110
 	}
111 111
 	defer driver.Put(id)
112 112
 
113
+	layerFs := layerRootFs.Path()
113 114
 	parentFs := ""
114 115
 
115 116
 	if parent != "" {
116
-		parentFs, err = driver.Get(parent, "")
117
+		parentRootFs, err := driver.Get(parent, "")
117 118
 		if err != nil {
118 119
 			return nil, err
119 120
 		}
120 121
 		defer driver.Put(parent)
122
+		parentFs = parentRootFs.Path()
121 123
 	}
122 124
 
123 125
 	return archive.ChangesDirs(layerFs, parentFs)
... ...
@@ -130,12 +135,13 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i
130 130
 	driver := gdw.ProtoDriver
131 131
 
132 132
 	// Mount the root filesystem so we can apply the diff/layer.
133
-	layerFs, err := driver.Get(id, "")
133
+	layerRootFs, err := driver.Get(id, "")
134 134
 	if err != nil {
135 135
 		return
136 136
 	}
137 137
 	defer driver.Put(id)
138 138
 
139
+	layerFs := layerRootFs.Path()
139 140
 	options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
140 141
 		GIDMaps: gdw.gidMaps}
141 142
 	start := time.Now().UTC()
... ...
@@ -165,5 +171,5 @@ func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error)
165 165
 	}
166 166
 	defer driver.Put(id)
167 167
 
168
-	return archive.ChangesSize(layerFs, changes), nil
168
+	return archive.ChangesSize(layerFs.Path(), changes), nil
169 169
 }
... ...
@@ -5,9 +5,9 @@ package graphtest
5 5
 import (
6 6
 	"io"
7 7
 	"io/ioutil"
8
-	"path/filepath"
9 8
 	"testing"
10 9
 
10
+	contdriver "github.com/containerd/continuity/driver"
11 11
 	"github.com/docker/docker/pkg/stringid"
12 12
 	"github.com/stretchr/testify/require"
13 13
 )
... ...
@@ -245,7 +245,7 @@ func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, d
245 245
 	for i := 0; i < b.N; i++ {
246 246
 
247 247
 		// Read content
248
-		c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt"))
248
+		c, err := contdriver.ReadFile(root, root.Join(root.Path(), "testfile.txt"))
249 249
 		if err != nil {
250 250
 			b.Fatal(err)
251 251
 		}
... ...
@@ -97,10 +97,10 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str
97 97
 	dir, err := driver.Get("empty", "")
98 98
 	require.NoError(t, err)
99 99
 
100
-	verifyFile(t, dir, 0755|os.ModeDir, 0, 0)
100
+	verifyFile(t, dir.Path(), 0755|os.ModeDir, 0, 0)
101 101
 
102 102
 	// Verify that the directory is empty
103
-	fis, err := readDir(dir)
103
+	fis, err := readDir(dir, dir.Path())
104 104
 	require.NoError(t, err)
105 105
 	assert.Len(t, fis, 0)
106 106
 
... ...
@@ -328,9 +328,9 @@ func DriverTestSetQuota(t *testing.T, drivername string) {
328 328
 	}
329 329
 
330 330
 	quota := uint64(50 * units.MiB)
331
-	err = writeRandomFile(path.Join(mountPath, "file"), quota*2)
331
+
332
+	err = writeRandomFile(path.Join(mountPath.Path(), "file"), quota*2)
332 333
 	if pathError, ok := err.(*os.PathError); ok && pathError.Err != unix.EDQUOT {
333 334
 		t.Fatalf("expect write() to fail with %v, got %v", unix.EDQUOT, err)
334 335
 	}
335
-
336 336
 }
... ...
@@ -3,12 +3,11 @@ package graphtest
3 3
 import (
4 4
 	"bytes"
5 5
 	"fmt"
6
-	"io/ioutil"
7 6
 	"math/rand"
8 7
 	"os"
9
-	"path"
10 8
 	"sort"
11 9
 
10
+	"github.com/containerd/continuity/driver"
12 11
 	"github.com/docker/docker/daemon/graphdriver"
13 12
 	"github.com/docker/docker/pkg/archive"
14 13
 	"github.com/docker/docker/pkg/stringid"
... ...
@@ -36,17 +35,17 @@ func addFiles(drv graphdriver.Driver, layer string, seed int64) error {
36 36
 	}
37 37
 	defer drv.Put(layer)
38 38
 
39
-	if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil {
39
+	if err := driver.WriteFile(root, root.Join(root.Path(), "file-a"), randomContent(64, seed), 0755); err != nil {
40 40
 		return err
41 41
 	}
42
-	if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil {
42
+	if err := root.MkdirAll(root.Join(root.Path(), "dir-b"), 0755); err != nil {
43 43
 		return err
44 44
 	}
45
-	if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil {
45
+	if err := driver.WriteFile(root, root.Join(root.Path(), "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil {
46 46
 		return err
47 47
 	}
48 48
 
49
-	return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755)
49
+	return driver.WriteFile(root, root.Join(root.Path(), "file-c"), randomContent(128*128, seed+2), 0755)
50 50
 }
51 51
 
52 52
 func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error {
... ...
@@ -56,7 +55,7 @@ func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) e
56 56
 	}
57 57
 	defer drv.Put(layer)
58 58
 
59
-	fileContent, err := ioutil.ReadFile(path.Join(root, filename))
59
+	fileContent, err := driver.ReadFile(root, root.Join(root.Path(), filename))
60 60
 	if err != nil {
61 61
 		return err
62 62
 	}
... ...
@@ -75,7 +74,7 @@ func addFile(drv graphdriver.Driver, layer, filename string, content []byte) err
75 75
 	}
76 76
 	defer drv.Put(layer)
77 77
 
78
-	return ioutil.WriteFile(path.Join(root, filename), content, 0755)
78
+	return driver.WriteFile(root, root.Join(root.Path(), filename), content, 0755)
79 79
 }
80 80
 
81 81
 func addDirectory(drv graphdriver.Driver, layer, dir string) error {
... ...
@@ -85,7 +84,7 @@ func addDirectory(drv graphdriver.Driver, layer, dir string) error {
85 85
 	}
86 86
 	defer drv.Put(layer)
87 87
 
88
-	return os.MkdirAll(path.Join(root, dir), 0755)
88
+	return root.MkdirAll(root.Join(root.Path(), dir), 0755)
89 89
 }
90 90
 
91 91
 func removeAll(drv graphdriver.Driver, layer string, names ...string) error {
... ...
@@ -96,7 +95,7 @@ func removeAll(drv graphdriver.Driver, layer string, names ...string) error {
96 96
 	defer drv.Put(layer)
97 97
 
98 98
 	for _, filename := range names {
99
-		if err := os.RemoveAll(path.Join(root, filename)); err != nil {
99
+		if err := root.RemoveAll(root.Join(root.Path(), filename)); err != nil {
100 100
 			return err
101 101
 		}
102 102
 	}
... ...
@@ -110,8 +109,8 @@ func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error {
110 110
 	}
111 111
 	defer drv.Put(layer)
112 112
 
113
-	if _, err := os.Stat(path.Join(root, filename)); err == nil {
114
-		return fmt.Errorf("file still exists: %s", path.Join(root, filename))
113
+	if _, err := root.Stat(root.Join(root.Path(), filename)); err == nil {
114
+		return fmt.Errorf("file still exists: %s", root.Join(root.Path(), filename))
115 115
 	} else if !os.IsNotExist(err) {
116 116
 		return err
117 117
 	}
... ...
@@ -127,13 +126,13 @@ func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) e
127 127
 	defer drv.Put(layer)
128 128
 
129 129
 	for i := 0; i < count; i += 100 {
130
-		dir := path.Join(root, fmt.Sprintf("directory-%d", i))
131
-		if err := os.MkdirAll(dir, 0755); err != nil {
130
+		dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i))
131
+		if err := root.MkdirAll(dir, 0755); err != nil {
132 132
 			return err
133 133
 		}
134 134
 		for j := 0; i+j < count && j < 100; j++ {
135
-			file := path.Join(dir, fmt.Sprintf("file-%d", i+j))
136
-			if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil {
135
+			file := root.Join(dir, fmt.Sprintf("file-%d", i+j))
136
+			if err := driver.WriteFile(root, file, randomContent(64, seed+int64(i+j)), 0755); err != nil {
137 137
 				return err
138 138
 			}
139 139
 		}
... ...
@@ -152,7 +151,7 @@ func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64
152 152
 	changes := []archive.Change{}
153 153
 	for i := 0; i < count; i += 100 {
154 154
 		archiveRoot := fmt.Sprintf("/directory-%d", i)
155
-		if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil {
155
+		if err := root.MkdirAll(root.Join(root.Path(), archiveRoot), 0755); err != nil {
156 156
 			return nil, err
157 157
 		}
158 158
 		for j := 0; i+j < count && j < 100; j++ {
... ...
@@ -166,23 +165,23 @@ func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64
166 166
 			switch j % 3 {
167 167
 			// Update file
168 168
 			case 0:
169
-				change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
169
+				change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
170 170
 				change.Kind = archive.ChangeModify
171
-				if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
171
+				if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
172 172
 					return nil, err
173 173
 				}
174 174
 			// Add file
175 175
 			case 1:
176
-				change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j))
176
+				change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j))
177 177
 				change.Kind = archive.ChangeAdd
178
-				if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
178
+				if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
179 179
 					return nil, err
180 180
 				}
181 181
 			// Remove file
182 182
 			case 2:
183
-				change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
183
+				change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
184 184
 				change.Kind = archive.ChangeDelete
185
-				if err := os.Remove(path.Join(root, change.Path)); err != nil {
185
+				if err := root.Remove(root.Join(root.Path(), change.Path)); err != nil {
186 186
 					return nil, err
187 187
 				}
188 188
 			}
... ...
@@ -201,10 +200,10 @@ func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64)
201 201
 	defer drv.Put(layer)
202 202
 
203 203
 	for i := 0; i < count; i += 100 {
204
-		dir := path.Join(root, fmt.Sprintf("directory-%d", i))
204
+		dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i))
205 205
 		for j := 0; i+j < count && j < 100; j++ {
206
-			file := path.Join(dir, fmt.Sprintf("file-%d", i+j))
207
-			fileContent, err := ioutil.ReadFile(file)
206
+			file := root.Join(dir, fmt.Sprintf("file-%d", i+j))
207
+			fileContent, err := driver.ReadFile(root, file)
208 208
 			if err != nil {
209 209
 				return err
210 210
 			}
... ...
@@ -254,17 +253,17 @@ func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error {
254 254
 	}
255 255
 	defer drv.Put(layer)
256 256
 
257
-	if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil {
257
+	if err := driver.WriteFile(root, root.Join(root.Path(), "top-id"), []byte(layer), 0755); err != nil {
258 258
 		return err
259 259
 	}
260
-	layerDir := path.Join(root, fmt.Sprintf("layer-%d", i))
261
-	if err := os.MkdirAll(layerDir, 0755); err != nil {
260
+	layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i))
261
+	if err := root.MkdirAll(layerDir, 0755); err != nil {
262 262
 		return err
263 263
 	}
264
-	if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil {
264
+	if err := driver.WriteFile(root, root.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil {
265 265
 		return err
266 266
 	}
267
-	if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil {
267
+	if err := driver.WriteFile(root, root.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil {
268 268
 		return err
269 269
 	}
270 270
 
... ...
@@ -295,7 +294,7 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error {
295 295
 	}
296 296
 	defer drv.Put(layer)
297 297
 
298
-	layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id"))
298
+	layerIDBytes, err := driver.ReadFile(root, root.Join(root.Path(), "top-id"))
299 299
 	if err != nil {
300 300
 		return err
301 301
 	}
... ...
@@ -305,16 +304,16 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error {
305 305
 	}
306 306
 
307 307
 	for i := count; i > 0; i-- {
308
-		layerDir := path.Join(root, fmt.Sprintf("layer-%d", i))
308
+		layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i))
309 309
 
310
-		thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id"))
310
+		thisLayerIDBytes, err := driver.ReadFile(root, root.Join(layerDir, "layer-id"))
311 311
 		if err != nil {
312 312
 			return err
313 313
 		}
314 314
 		if !bytes.Equal(thisLayerIDBytes, layerIDBytes) {
315 315
 			return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes)
316 316
 		}
317
-		layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id"))
317
+		layerIDBytes, err = driver.ReadFile(root, root.Join(layerDir, "parent-id"))
318 318
 		if err != nil {
319 319
 			return err
320 320
 		}
... ...
@@ -322,11 +321,11 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error {
322 322
 	return nil
323 323
 }
324 324
 
325
-// readDir reads a directory just like ioutil.ReadDir()
325
+// readDir reads a directory just like driver.ReadDir()
326 326
 // then hides specific files (currently "lost+found")
327 327
 // so the tests don't "see" it
328
-func readDir(dir string) ([]os.FileInfo, error) {
329
-	a, err := ioutil.ReadDir(dir)
328
+func readDir(r driver.Driver, dir string) ([]os.FileInfo, error) {
329
+	a, err := driver.ReadDir(r, dir)
330 330
 	if err != nil {
331 331
 		return nil, err
332 332
 	}
... ...
@@ -3,12 +3,11 @@
3 3
 package graphtest
4 4
 
5 5
 import (
6
-	"io/ioutil"
7 6
 	"os"
8
-	"path"
9 7
 	"syscall"
10 8
 	"testing"
11 9
 
10
+	contdriver "github.com/containerd/continuity/driver"
12 11
 	"github.com/docker/docker/daemon/graphdriver"
13 12
 	"github.com/stretchr/testify/assert"
14 13
 	"github.com/stretchr/testify/require"
... ...
@@ -40,31 +39,31 @@ func createBase(t testing.TB, driver graphdriver.Driver, name string) {
40 40
 	err := driver.CreateReadWrite(name, "", nil)
41 41
 	require.NoError(t, err)
42 42
 
43
-	dir, err := driver.Get(name, "")
43
+	dirFS, err := driver.Get(name, "")
44 44
 	require.NoError(t, err)
45 45
 	defer driver.Put(name)
46 46
 
47
-	subdir := path.Join(dir, "a subdir")
48
-	require.NoError(t, os.Mkdir(subdir, 0705|os.ModeSticky))
49
-	require.NoError(t, os.Chown(subdir, 1, 2))
47
+	subdir := dirFS.Join(dirFS.Path(), "a subdir")
48
+	require.NoError(t, dirFS.Mkdir(subdir, 0705|os.ModeSticky))
49
+	require.NoError(t, dirFS.Lchown(subdir, 1, 2))
50 50
 
51
-	file := path.Join(dir, "a file")
52
-	err = ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid)
51
+	file := dirFS.Join(dirFS.Path(), "a file")
52
+	err = contdriver.WriteFile(dirFS, file, []byte("Some data"), 0222|os.ModeSetuid)
53 53
 	require.NoError(t, err)
54 54
 }
55 55
 
56 56
 func verifyBase(t testing.TB, driver graphdriver.Driver, name string) {
57
-	dir, err := driver.Get(name, "")
57
+	dirFS, err := driver.Get(name, "")
58 58
 	require.NoError(t, err)
59 59
 	defer driver.Put(name)
60 60
 
61
-	subdir := path.Join(dir, "a subdir")
61
+	subdir := dirFS.Join(dirFS.Path(), "a subdir")
62 62
 	verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2)
63 63
 
64
-	file := path.Join(dir, "a file")
64
+	file := dirFS.Join(dirFS.Path(), "a file")
65 65
 	verifyFile(t, file, 0222|os.ModeSetuid, 0, 0)
66 66
 
67
-	files, err := readDir(dir)
67
+	files, err := readDir(dirFS, dirFS.Path())
68 68
 	require.NoError(t, err)
69 69
 	assert.Len(t, files, 2)
70 70
 }
... ...
@@ -65,12 +65,14 @@ import (
65 65
 	"strconv"
66 66
 	"strings"
67 67
 	"sync"
68
+	"syscall"
68 69
 	"time"
69 70
 
70 71
 	"github.com/Microsoft/hcsshim"
71 72
 	"github.com/Microsoft/opengcs/client"
72 73
 	"github.com/docker/docker/daemon/graphdriver"
73 74
 	"github.com/docker/docker/pkg/archive"
75
+	"github.com/docker/docker/pkg/containerfs"
74 76
 	"github.com/docker/docker/pkg/idtools"
75 77
 	"github.com/docker/docker/pkg/ioutils"
76 78
 	"github.com/docker/docker/pkg/system"
... ...
@@ -106,72 +108,24 @@ const (
106 106
 
107 107
 	// scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs
108 108
 	scratchDirectory = "scratch"
109
-)
110
-
111
-// cacheItem is our internal structure representing an item in our local cache
112
-// of things that have been mounted.
113
-type cacheItem struct {
114
-	sync.Mutex        // Protects operations performed on this item
115
-	uvmPath    string // Path in utility VM
116
-	hostPath   string // Path on host
117
-	refCount   int    // How many times its been mounted
118
-	isSandbox  bool   // True if a sandbox
119
-	isMounted  bool   // True when mounted in a service VM
120
-}
121
-
122
-// setIsMounted is a helper function for a cacheItem which does exactly what it says
123
-func (ci *cacheItem) setIsMounted() {
124
-	logrus.Debugf("locking cache item for set isMounted")
125
-	ci.Lock()
126
-	defer ci.Unlock()
127
-	ci.isMounted = true
128
-	logrus.Debugf("set isMounted on cache item")
129
-}
130 109
 
131
-// incrementRefCount is a helper function for a cacheItem which does exactly what it says
132
-func (ci *cacheItem) incrementRefCount() {
133
-	logrus.Debugf("locking cache item for increment")
134
-	ci.Lock()
135
-	defer ci.Unlock()
136
-	ci.refCount++
137
-	logrus.Debugf("incremented refcount on cache item %+v", ci)
138
-}
139
-
140
-// decrementRefCount is a helper function for a cacheItem which does exactly what it says
141
-func (ci *cacheItem) decrementRefCount() int {
142
-	logrus.Debugf("locking cache item for decrement")
143
-	ci.Lock()
144
-	defer ci.Unlock()
145
-	ci.refCount--
146
-	logrus.Debugf("decremented refcount on cache item %+v", ci)
147
-	return ci.refCount
148
-}
149
-
150
-// serviceVMItem is our internal structure representing an item in our
151
-// map of service VMs we are maintaining.
152
-type serviceVMItem struct {
153
-	sync.Mutex                     // Serialises operations being performed in this service VM.
154
-	scratchAttached bool           // Has a scratch been attached?
155
-	config          *client.Config // Represents the service VM item.
156
-}
110
+	// errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending.
111
+	errOperationPending syscall.Errno = 0xc0370103
112
+)
157 113
 
158 114
 // Driver represents an LCOW graph driver.
159 115
 type Driver struct {
160
-	dataRoot           string                    // Root path on the host where we are storing everything.
161
-	cachedSandboxFile  string                    // Location of the local default-sized cached sandbox.
162
-	cachedSandboxMutex sync.Mutex                // Protects race conditions from multiple threads creating the cached sandbox.
163
-	cachedScratchFile  string                    // Location of the local cached empty scratch space.
164
-	cachedScratchMutex sync.Mutex                // Protects race conditions from multiple threads creating the cached scratch.
165
-	options            []string                  // Graphdriver options we are initialised with.
166
-	serviceVmsMutex    sync.Mutex                // Protects add/updates/delete to the serviceVMs map.
167
-	serviceVms         map[string]*serviceVMItem // Map of the configs representing the service VM(s) we are running.
168
-	globalMode         bool                      // Indicates if running in an unsafe/global service VM mode.
116
+	dataRoot           string     // Root path on the host where we are storing everything.
117
+	cachedSandboxFile  string     // Location of the local default-sized cached sandbox.
118
+	cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox.
119
+	cachedScratchFile  string     // Location of the local cached empty scratch space.
120
+	cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch.
121
+	options            []string   // Graphdriver options we are initialised with.
122
+	globalMode         bool       // Indicates if running in an unsafe/global service VM mode.
169 123
 
170 124
 	// NOTE: It is OK to use a cache here because Windows does not support
171 125
 	// restoring containers when the daemon dies.
172
-
173
-	cacheMutex sync.Mutex            // Protects add/update/deletes to cache.
174
-	cache      map[string]*cacheItem // Map holding a cache of all the IDs we've mounted/unmounted.
126
+	serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running.
175 127
 }
176 128
 
177 129
 // layerDetails is the structure returned by a helper function `getLayerDetails`
... ...
@@ -204,9 +158,10 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd
204 204
 		options:           options,
205 205
 		cachedSandboxFile: filepath.Join(cd, sandboxFilename),
206 206
 		cachedScratchFile: filepath.Join(cd, scratchFilename),
207
-		cache:             make(map[string]*cacheItem),
208
-		serviceVms:        make(map[string]*serviceVMItem),
209
-		globalMode:        false,
207
+		serviceVms: &serviceVMMap{
208
+			svms: make(map[string]*serviceVMMapItem),
209
+		},
210
+		globalMode: false,
210 211
 	}
211 212
 
212 213
 	// Looks for relevant options
... ...
@@ -248,53 +203,59 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd
248 248
 	return d, nil
249 249
 }
250 250
 
251
+func (d *Driver) getVMID(id string) string {
252
+	if d.globalMode {
253
+		return svmGlobalID
254
+	}
255
+	return id
256
+}
257
+
251 258
 // startServiceVMIfNotRunning starts a service utility VM if it is not currently running.
252 259
 // It can optionally be started with a mapped virtual disk. Returns a opengcs config structure
253 260
 // representing the VM.
254
-func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedVirtualDisk, context string) (*serviceVMItem, error) {
261
+func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) {
255 262
 	// Use the global ID if in global mode
256
-	if d.globalMode {
257
-		id = svmGlobalID
258
-	}
263
+	id = d.getVMID(id)
259 264
 
260 265
 	title := fmt.Sprintf("lcowdriver: startservicevmifnotrunning %s:", id)
261 266
 
262
-	// Make sure thread-safe when interrogating the map
263
-	logrus.Debugf("%s taking serviceVmsMutex", title)
264
-	d.serviceVmsMutex.Lock()
265
-
266
-	// Nothing to do if it's already running except add the mapped drive if supplied.
267
-	if svm, ok := d.serviceVms[id]; ok {
268
-		logrus.Debugf("%s exists, releasing serviceVmsMutex", title)
269
-		d.serviceVmsMutex.Unlock()
270
-
271
-		if mvdToAdd != nil {
272
-			logrus.Debugf("hot-adding %s to %s", mvdToAdd.HostPath, mvdToAdd.ContainerPath)
273
-
274
-			// Ensure the item is locked while doing this
275
-			logrus.Debugf("%s locking serviceVmItem %s", title, svm.config.Name)
276
-			svm.Lock()
277
-
278
-			if err := svm.config.HotAddVhd(mvdToAdd.HostPath, mvdToAdd.ContainerPath, false, true); err != nil {
279
-				logrus.Debugf("%s releasing serviceVmItem %s on hot-add failure %s", title, svm.config.Name, err)
280
-				svm.Unlock()
281
-				return nil, fmt.Errorf("%s hot add %s to %s failed: %s", title, mvdToAdd.HostPath, mvdToAdd.ContainerPath, err)
282
-			}
267
+	// Attempt to add ID to the service vm map
268
+	logrus.Debugf("%s: Adding entry to service vm map", title)
269
+	svm, exists, err := d.serviceVms.add(id)
270
+	if err != nil && err == errVMisTerminating {
271
+		// VM is in the process of terminating. Wait until it's done and and then try again
272
+		logrus.Debugf("%s: VM with current ID still in the process of terminating: %s", title, id)
273
+		if err := svm.getStopError(); err != nil {
274
+			logrus.Debugf("%s: VM %s did not stop succesfully: %s", title, id, err)
275
+			return nil, err
276
+		}
277
+		return d.startServiceVMIfNotRunning(id, mvdToAdd, context)
278
+	} else if err != nil {
279
+		logrus.Debugf("%s: failed to add service vm to map: %s", err)
280
+		return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err)
281
+	}
283 282
 
284
-			logrus.Debugf("%s releasing serviceVmItem %s", title, svm.config.Name)
285
-			svm.Unlock()
283
+	if exists {
284
+		// Service VM is already up and running. In this case, just hot add the vhds.
285
+		logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd)
286
+		if err := svm.hotAddVHDs(mvdToAdd...); err != nil {
287
+			logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err)
288
+			return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err)
286 289
 		}
287 290
 		return svm, nil
288 291
 	}
289 292
 
290
-	// Release the lock early
291
-	logrus.Debugf("%s releasing serviceVmsMutex", title)
292
-	d.serviceVmsMutex.Unlock()
293
+	// We are the first service for this id, so we need to start it
294
+	logrus.Debugf("%s: service vm doesn't exist. Now starting it up: %s", title, id)
293 295
 
294
-	// So we are starting one. First need an enpty structure.
295
-	svm := &serviceVMItem{
296
-		config: &client.Config{},
297
-	}
296
+	defer func() {
297
+		// Signal that start has finished, passing in the error if any.
298
+		svm.signalStartFinished(err)
299
+		if err != nil {
300
+			// We added a ref to the VM, since we failed, we should delete the ref.
301
+			d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false)
302
+		}
303
+	}()
298 304
 
299 305
 	// Generate a default configuration
300 306
 	if err := svm.config.GenerateDefault(d.options); err != nil {
... ...
@@ -335,12 +296,14 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedV
335 335
 		svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd)
336 336
 		svm.scratchAttached = true
337 337
 	}
338
+
338 339
 	logrus.Debugf("%s releasing cachedScratchMutex", title)
339 340
 	d.cachedScratchMutex.Unlock()
340 341
 
341 342
 	// If requested to start it with a mapped virtual disk, add it now.
342
-	if mvdToAdd != nil {
343
-		svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, *mvdToAdd)
343
+	svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvdToAdd...)
344
+	for _, mvd := range svm.config.MappedVirtualDisks {
345
+		svm.attachedVHDs[mvd.HostPath] = 1
344 346
 	}
345 347
 
346 348
 	// Start it.
... ...
@@ -349,108 +312,80 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedV
349 349
 		return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err)
350 350
 	}
351 351
 
352
-	// As it's now running, add it to the map, checking for a race where another
353
-	// thread has simultaneously tried to start it.
354
-	logrus.Debugf("%s locking serviceVmsMutex for insertion", title)
355
-	d.serviceVmsMutex.Lock()
356
-	if svm, ok := d.serviceVms[id]; ok {
357
-		logrus.Debugf("%s releasing serviceVmsMutex after insertion but exists", title)
358
-		d.serviceVmsMutex.Unlock()
359
-		return svm, nil
360
-	}
361
-	d.serviceVms[id] = svm
362
-	logrus.Debugf("%s releasing serviceVmsMutex after insertion", title)
363
-	d.serviceVmsMutex.Unlock()
352
+	// defer function to terminate the VM if the next steps fail
353
+	defer func() {
354
+		if err != nil {
355
+			waitTerminate(svm, fmt.Sprintf("startServiceVmIfNotRunning: %s (%s)", id, context))
356
+		}
357
+	}()
364 358
 
365 359
 	// Now we have a running service VM, we can create the cached scratch file if it doesn't exist.
366 360
 	logrus.Debugf("%s locking cachedScratchMutex", title)
367 361
 	d.cachedScratchMutex.Lock()
368 362
 	if _, err := os.Stat(d.cachedScratchFile); err != nil {
369
-		logrus.Debugf("%s (%s): creating an SVM scratch - locking serviceVM", title, context)
370
-		svm.Lock()
363
+		logrus.Debugf("%s (%s): creating an SVM scratch", title, context)
364
+
365
+		// Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup,
366
+		// but we're still in that process right now.
371 367
 		if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil {
372
-			logrus.Debugf("%s (%s): releasing serviceVM on error path from CreateExt4Vhdx: %s", title, context, err)
373
-			svm.Unlock()
374 368
 			logrus.Debugf("%s (%s): releasing cachedScratchMutex on error path", title, context)
375 369
 			d.cachedScratchMutex.Unlock()
376
-
377
-			// Do a force terminate and remove it from the map on failure, ignoring any errors
378
-			if err2 := d.terminateServiceVM(id, "error path from CreateExt4Vhdx", true); err2 != nil {
379
-				logrus.Warnf("failed to terminate service VM on error path from CreateExt4Vhdx: %s", err2)
380
-			}
381
-
370
+			logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err)
382 371
 			return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err)
383 372
 		}
384
-		logrus.Debugf("%s (%s): releasing serviceVM after %s created and cached to %s", title, context, scratchTargetFile, d.cachedScratchFile)
385
-		svm.Unlock()
386 373
 	}
387 374
 	logrus.Debugf("%s (%s): releasing cachedScratchMutex", title, context)
388 375
 	d.cachedScratchMutex.Unlock()
389 376
 
390 377
 	// Hot-add the scratch-space if not already attached
391 378
 	if !svm.scratchAttached {
392
-		logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s - locking serviceVM", context, scratchTargetFile)
393
-		svm.Lock()
394
-		if err := svm.config.HotAddVhd(scratchTargetFile, toolsScratchPath, false, true); err != nil {
395
-			logrus.Debugf("%s (%s): releasing serviceVM on error path of HotAddVhd: %s", title, context, err)
396
-			svm.Unlock()
397
-
398
-			// Do a force terminate and remove it from the map on failure, ignoring any errors
399
-			if err2 := d.terminateServiceVM(id, "error path from HotAddVhd", true); err2 != nil {
400
-				logrus.Warnf("failed to terminate service VM on error path from HotAddVhd: %s", err2)
401
-			}
402
-
379
+		logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s", context, scratchTargetFile)
380
+		if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{
381
+			HostPath:          scratchTargetFile,
382
+			ContainerPath:     toolsScratchPath,
383
+			CreateInUtilityVM: true,
384
+		}); err != nil {
385
+			logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err)
403 386
 			return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err)
404 387
 		}
405
-		logrus.Debugf("%s (%s): releasing serviceVM", title, context)
406
-		svm.Unlock()
388
+		svm.scratchAttached = true
407 389
 	}
408 390
 
409 391
 	logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) success", context)
410 392
 	return svm, nil
411 393
 }
412 394
 
413
-// getServiceVM returns the appropriate service utility VM instance, optionally
414
-// deleting it from the map (but not the global one)
415
-func (d *Driver) getServiceVM(id string, deleteFromMap bool) (*serviceVMItem, error) {
416
-	logrus.Debugf("lcowdriver: getservicevm:locking serviceVmsMutex")
417
-	d.serviceVmsMutex.Lock()
418
-	defer func() {
419
-		logrus.Debugf("lcowdriver: getservicevm:releasing serviceVmsMutex")
420
-		d.serviceVmsMutex.Unlock()
421
-	}()
422
-	if d.globalMode {
423
-		id = svmGlobalID
424
-	}
425
-	if _, ok := d.serviceVms[id]; !ok {
426
-		return nil, fmt.Errorf("getservicevm for %s failed as not found", id)
427
-	}
428
-	svm := d.serviceVms[id]
429
-	if deleteFromMap && id != svmGlobalID {
430
-		logrus.Debugf("lcowdriver: getservicevm: removing %s from map", id)
431
-		delete(d.serviceVms, id)
395
+// terminateServiceVM terminates a service utility VM if its running if it's,
396
+// not being used by any goroutine, but does nothing when in global mode as it's
397
+// lifetime is limited to that of the daemon. If the force flag is set, then
398
+// the VM will be killed regardless of the ref count or if it's global.
399
+func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) {
400
+	// We don't do anything in safe mode unless the force flag has been passed, which
401
+	// is only the case for cleanup at driver termination.
402
+	if d.globalMode && !force {
403
+		logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
404
+		return nil
432 405
 	}
433
-	return svm, nil
434
-}
435 406
 
436
-// terminateServiceVM terminates a service utility VM if its running, but does nothing
437
-// when in global mode as it's lifetime is limited to that of the daemon.
438
-func (d *Driver) terminateServiceVM(id, context string, force bool) error {
407
+	id = d.getVMID(id)
439 408
 
440
-	// We don't do anything in safe mode unless the force flag has been passed, which
441
-	// is only the case for cleanup at driver termination.
442
-	if d.globalMode {
443
-		if !force {
444
-			logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
445
-			return nil
446
-		}
447
-		id = svmGlobalID
409
+	var svm *serviceVM
410
+	var lastRef bool
411
+	if !force {
412
+		// In the not force case, we ref count
413
+		svm, lastRef, err = d.serviceVms.decrementRefCount(id)
414
+	} else {
415
+		// In the force case, we ignore the ref count and just set it to 0
416
+		svm, err = d.serviceVms.setRefCountZero(id)
417
+		lastRef = true
448 418
 	}
449 419
 
450
-	// Get the service VM and delete it from the map
451
-	svm, err := d.getServiceVM(id, true)
452
-	if err != nil {
453
-		return err
420
+	if err == errVMUnknown {
421
+		return nil
422
+	} else if err == errVMisTerminating {
423
+		return svm.getStopError()
424
+	} else if !lastRef {
425
+		return nil
454 426
 	}
455 427
 
456 428
 	// We run the deletion of the scratch as a deferred function to at least attempt
... ...
@@ -459,26 +394,64 @@ func (d *Driver) terminateServiceVM(id, context string, force bool) error {
459 459
 		if svm.scratchAttached {
460 460
 			scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id))
461 461
 			logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile)
462
-			if err := os.Remove(scratchTargetFile); err != nil {
463
-				logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, err)
462
+			if errRemove := os.Remove(scratchTargetFile); errRemove != nil {
463
+				logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove)
464
+				err = errRemove
464 465
 			}
465 466
 		}
467
+
468
+		// This function shouldn't actually return error unless there is a bug
469
+		if errDelete := d.serviceVms.deleteID(id); errDelete != nil {
470
+			logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete)
471
+		}
472
+
473
+		// Signal that this VM has stopped
474
+		svm.signalStopFinished(err)
466 475
 	}()
467 476
 
468
-	// Nothing to do if it's not running
469
-	if svm.config.Uvm != nil {
470
-		logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - calling terminate", id, context)
471
-		if err := svm.config.Uvm.Terminate(); err != nil {
472
-			return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
477
+	// Now it's possible that the serivce VM failed to start and now we are trying to termiante it.
478
+	// In this case, we will relay the error to the goroutines waiting for this vm to stop.
479
+	if err := svm.getStartError(); err != nil {
480
+		logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err)
481
+		return err
482
+	}
483
+
484
+	if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil {
485
+		return err
486
+	}
487
+
488
+	logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context)
489
+	return nil
490
+}
491
+
492
+func waitTerminate(svm *serviceVM, context string) error {
493
+	if svm.config == nil {
494
+		return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context)
495
+	}
496
+
497
+	logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context)
498
+	if err := svm.config.Uvm.Terminate(); err != nil {
499
+		// We might get operation still pending from the HCS. In that case, we shouldn't return
500
+		// an error since we call wait right after.
501
+		underlyingError := err
502
+		if conterr, ok := err.(*hcsshim.ContainerError); ok {
503
+			underlyingError = conterr.Err
473 504
 		}
474 505
 
475
-		logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - waiting for utility VM to terminate", id, context)
476
-		if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
477
-			return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
506
+		if syscallErr, ok := underlyingError.(syscall.Errno); ok {
507
+			underlyingError = syscallErr
508
+		}
509
+
510
+		if underlyingError != errOperationPending {
511
+			return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
478 512
 		}
513
+		logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context)
479 514
 	}
480 515
 
481
-	logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context)
516
+	logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context)
517
+	if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
518
+		return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
519
+	}
482 520
 	return nil
483 521
 }
484 522
 
... ...
@@ -571,25 +544,18 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
571 571
 		}()
572 572
 	}
573 573
 
574
-	// Synchronise the operation in the service VM.
575
-	logrus.Debugf("%s: locking svm for sandbox creation", title)
576
-	svm.Lock()
577
-	defer func() {
578
-		logrus.Debugf("%s: releasing svm for sandbox creation", title)
579
-		svm.Unlock()
580
-	}()
581
-
582 574
 	// Make sure we don't write to our local cached copy if this is for a non-default size request.
583 575
 	targetCacheFile := d.cachedSandboxFile
584 576
 	if sandboxSize != client.DefaultVhdxSizeGB {
585 577
 		targetCacheFile = ""
586 578
 	}
587 579
 
588
-	// Actually do the creation.
589
-	if err := svm.config.CreateExt4Vhdx(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
580
+	// Create the ext4 vhdx
581
+	logrus.Debugf("%s: creating sandbox ext4 vhdx", title)
582
+	if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
583
+		logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err)
590 584
 		return err
591 585
 	}
592
-
593 586
 	return nil
594 587
 }
595 588
 
... ...
@@ -638,6 +604,21 @@ func (d *Driver) Remove(id string) error {
638 638
 	layerPath := d.dir(id)
639 639
 
640 640
 	logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath)
641
+
642
+	// Unmount all the layers
643
+	err := d.Put(id)
644
+	if err != nil {
645
+		logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err)
646
+		return err
647
+	}
648
+
649
+	// for non-global case just kill the vm
650
+	if !d.globalMode {
651
+		if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil {
652
+			return err
653
+		}
654
+	}
655
+
641 656
 	if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
642 657
 		return err
643 658
 	}
... ...
@@ -659,43 +640,24 @@ func (d *Driver) Remove(id string) error {
659 659
 // For optimisation, we don't actually mount the filesystem (which in our
660 660
 // case means [hot-]adding it to a service VM. But we track that and defer
661 661
 // the actual adding to the point we need to access it.
662
-func (d *Driver) Get(id, mountLabel string) (string, error) {
662
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
663 663
 	title := fmt.Sprintf("lcowdriver: get: %s", id)
664 664
 	logrus.Debugf(title)
665 665
 
666
-	// Work out what we are working on
667
-	ld, err := getLayerDetails(d.dir(id))
666
+	// Generate the mounts needed for the defered operation.
667
+	disks, err := d.getAllMounts(id)
668 668
 	if err != nil {
669
-		logrus.Debugf("%s failed to get layer details from %s: %s", title, d.dir(id), err)
670
-		return "", fmt.Errorf("%s failed to open layer or sandbox VHD to open in %s: %s", title, d.dir(id), err)
671
-	}
672
-	logrus.Debugf("%s %s, size %d, isSandbox %t", title, ld.filename, ld.size, ld.isSandbox)
673
-
674
-	// Add item to cache, or update existing item, but ensure we have the
675
-	// lock while updating items.
676
-	logrus.Debugf("%s: locking cacheMutex", title)
677
-	d.cacheMutex.Lock()
678
-	var ci *cacheItem
679
-	if item, ok := d.cache[id]; !ok {
680
-		// The item is not currently in the cache.
681
-		ci = &cacheItem{
682
-			refCount:  1,
683
-			isSandbox: ld.isSandbox,
684
-			hostPath:  ld.filename,
685
-			uvmPath:   fmt.Sprintf("/mnt/%s", id),
686
-			isMounted: false, // we defer this as an optimisation
687
-		}
688
-		d.cache[id] = ci
689
-		logrus.Debugf("%s: added cache item %+v", title, ci)
690
-	} else {
691
-		// Increment the reference counter in the cache.
692
-		item.incrementRefCount()
669
+		logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
670
+		return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
693 671
 	}
694
-	logrus.Debugf("%s: releasing cacheMutex", title)
695
-	d.cacheMutex.Unlock()
696 672
 
697
-	logrus.Debugf("%s %s success. %s: %+v: size %d", title, id, d.dir(id), ci, ld.size)
698
-	return d.dir(id), nil
673
+	logrus.Debugf("%s: got layer mounts: %+v", title, disks)
674
+	return &lcowfs{
675
+		root:        unionMountName(disks),
676
+		d:           d,
677
+		mappedDisks: disks,
678
+		vmID:        d.getVMID(id),
679
+	}, nil
699 680
 }
700 681
 
701 682
 // Put does the reverse of get. If there are no more references to
... ...
@@ -703,56 +665,45 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
703 703
 func (d *Driver) Put(id string) error {
704 704
 	title := fmt.Sprintf("lcowdriver: put: %s", id)
705 705
 
706
-	logrus.Debugf("%s: locking cacheMutex", title)
707
-	d.cacheMutex.Lock()
708
-	item, ok := d.cache[id]
709
-	if !ok {
710
-		logrus.Debugf("%s: releasing cacheMutex on error path", title)
711
-		d.cacheMutex.Unlock()
712
-		return fmt.Errorf("%s possible ref-count error, or invalid id was passed to the graphdriver. Cannot handle id %s as it's not in the cache", title, id)
713
-	}
714
-
715
-	// Decrement the ref-count, and nothing more to do if still in use.
716
-	if item.decrementRefCount() > 0 {
717
-		logrus.Debugf("%s: releasing cacheMutex. Cache item is still in use", title)
718
-		d.cacheMutex.Unlock()
706
+	// Get the service VM that we need to remove from
707
+	svm, err := d.serviceVms.get(d.getVMID(id))
708
+	if err == errVMUnknown {
719 709
 		return nil
710
+	} else if err == errVMisTerminating {
711
+		return svm.getStopError()
720 712
 	}
721 713
 
722
-	// Remove from the cache map.
723
-	delete(d.cache, id)
724
-	logrus.Debugf("%s: releasing cacheMutex. Ref count on cache item has dropped to zero, removed from cache", title)
725
-	d.cacheMutex.Unlock()
714
+	// Generate the mounts that Get() might have mounted
715
+	disks, err := d.getAllMounts(id)
716
+	if err != nil {
717
+		logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
718
+		return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
719
+	}
726 720
 
727
-	// If we have done a mount and we are in global mode, then remove it. We don't
728
-	// need to remove in safe mode as the service VM is going to be torn down anyway.
729
-	if d.globalMode {
730
-		logrus.Debugf("%s: locking cache item at zero ref-count", title)
731
-		item.Lock()
732
-		defer func() {
733
-			logrus.Debugf("%s: releasing cache item at zero ref-count", title)
734
-			item.Unlock()
735
-		}()
736
-		if item.isMounted {
737
-			svm, err := d.getServiceVM(id, false)
738
-			if err != nil {
739
-				return err
740
-			}
721
+	// Now, we want to perform the unmounts, hot-remove and stop the service vm.
722
+	// We want to go though all the steps even if we have an error to clean up properly
723
+	err = svm.deleteUnionMount(unionMountName(disks), disks...)
724
+	if err != nil {
725
+		logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err)
726
+	}
741 727
 
742
-			logrus.Debugf("%s: Hot-Removing %s. Locking svm", title, item.hostPath)
743
-			svm.Lock()
744
-			if err := svm.config.HotRemoveVhd(item.hostPath); err != nil {
745
-				logrus.Debugf("%s: releasing svm on error path", title)
746
-				svm.Unlock()
747
-				return fmt.Errorf("%s failed to hot-remove %s from global service utility VM: %s", title, item.hostPath, err)
748
-			}
749
-			logrus.Debugf("%s: releasing svm", title)
750
-			svm.Unlock()
728
+	err1 := svm.hotRemoveVHDs(disks...)
729
+	if err1 != nil {
730
+		logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err)
731
+		if err == nil {
732
+			err = err1
751 733
 		}
752 734
 	}
753 735
 
754
-	logrus.Debugf("%s %s: refCount 0. %s (%s) completed successfully", title, id, item.hostPath, item.uvmPath)
755
-	return nil
736
+	err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false)
737
+	if err1 != nil {
738
+		logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1)
739
+		if err == nil {
740
+			err = err1
741
+		}
742
+	}
743
+	logrus.Debugf("Put succeeded on id %s", id)
744
+	return err
756 745
 }
757 746
 
758 747
 // Cleanup ensures the information the driver stores is properly removed.
... ...
@@ -761,15 +712,6 @@ func (d *Driver) Put(id string) error {
761 761
 func (d *Driver) Cleanup() error {
762 762
 	title := "lcowdriver: cleanup"
763 763
 
764
-	d.cacheMutex.Lock()
765
-	for k, v := range d.cache {
766
-		logrus.Debugf("%s cache item: %s: %+v", title, k, v)
767
-		if v.refCount > 0 {
768
-			logrus.Warnf("%s leaked %s: %+v", title, k, v)
769
-		}
770
-	}
771
-	d.cacheMutex.Unlock()
772
-
773 764
 	items, err := ioutil.ReadDir(d.dataRoot)
774 765
 	if err != nil {
775 766
 		if os.IsNotExist(err) {
... ...
@@ -794,8 +736,8 @@ func (d *Driver) Cleanup() error {
794 794
 
795 795
 	// Cleanup any service VMs we have running, along with their scratch spaces.
796 796
 	// We don't take the lock for this as it's taken in terminateServiceVm.
797
-	for k, v := range d.serviceVms {
798
-		logrus.Debugf("%s svm: %s: %+v", title, k, v)
797
+	for k, v := range d.serviceVms.svms {
798
+		logrus.Debugf("%s svm entry: %s: %+v", title, k, v)
799 799
 		d.terminateServiceVM(k, "cleanup", true)
800 800
 	}
801 801
 
... ...
@@ -812,65 +754,41 @@ func (d *Driver) Cleanup() error {
812 812
 func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
813 813
 	title := fmt.Sprintf("lcowdriver: diff: %s", id)
814 814
 
815
-	logrus.Debugf("%s: locking cacheMutex", title)
816
-	d.cacheMutex.Lock()
817
-	if _, ok := d.cache[id]; !ok {
818
-		logrus.Debugf("%s: releasing cacheMutex on error path", title)
819
-		d.cacheMutex.Unlock()
820
-		return nil, fmt.Errorf("%s fail as %s is not in the cache", title, id)
821
-	}
822
-	ci := d.cache[id]
823
-	logrus.Debugf("%s: releasing cacheMutex", title)
824
-	d.cacheMutex.Unlock()
825
-
826
-	// Stat to get size
827
-	logrus.Debugf("%s: locking cacheItem", title)
828
-	ci.Lock()
829
-	fileInfo, err := os.Stat(ci.hostPath)
815
+	// Get VHDX info
816
+	ld, err := getLayerDetails(d.dir(id))
830 817
 	if err != nil {
831
-		logrus.Debugf("%s: releasing cacheItem on error path", title)
832
-		ci.Unlock()
833
-		return nil, fmt.Errorf("%s failed to stat %s: %s", title, ci.hostPath, err)
818
+		logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err)
819
+		return nil, err
834 820
 	}
835
-	logrus.Debugf("%s: releasing cacheItem", title)
836
-	ci.Unlock()
837 821
 
838 822
 	// Start the SVM with a mapped virtual disk. Note that if the SVM is
839 823
 	// already running and we are in global mode, this will be
840 824
 	// hot-added.
841
-	mvd := &hcsshim.MappedVirtualDisk{
842
-		HostPath:          ci.hostPath,
843
-		ContainerPath:     ci.uvmPath,
825
+	mvd := hcsshim.MappedVirtualDisk{
826
+		HostPath:          ld.filename,
827
+		ContainerPath:     hostToGuest(ld.filename),
844 828
 		CreateInUtilityVM: true,
845 829
 		ReadOnly:          true,
846 830
 	}
847 831
 
848 832
 	logrus.Debugf("%s: starting service VM", title)
849
-	svm, err := d.startServiceVMIfNotRunning(id, mvd, fmt.Sprintf("diff %s", id))
833
+	svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id))
850 834
 	if err != nil {
851 835
 		return nil, err
852 836
 	}
853 837
 
854
-	// Set `isMounted` for the cache item. Note that we re-scan the cache
855
-	// at this point as it's possible the cacheItem changed during the long-
856
-	// running operation above when we weren't holding the cacheMutex lock.
857
-	logrus.Debugf("%s: locking cacheMutex for updating isMounted", title)
858
-	d.cacheMutex.Lock()
859
-	if _, ok := d.cache[id]; !ok {
860
-		logrus.Debugf("%s: releasing cacheMutex on error path of isMounted", title)
861
-		d.cacheMutex.Unlock()
838
+	logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting")
839
+	err = svm.getStartError()
840
+	if err != nil {
862 841
 		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
863
-		return nil, fmt.Errorf("%s fail as %s is not in the cache when updating isMounted", title, id)
842
+		return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err)
864 843
 	}
865
-	ci = d.cache[id]
866
-	ci.setIsMounted()
867
-	logrus.Debugf("%s: releasing cacheMutex for updating isMounted", title)
868
-	d.cacheMutex.Unlock()
869 844
 
870 845
 	// Obtain the tar stream for it
871
-	logrus.Debugf("%s %s, size %d, isSandbox %t", title, ci.hostPath, fileInfo.Size(), ci.isSandbox)
872
-	tarReadCloser, err := svm.config.VhdToTar(ci.hostPath, ci.uvmPath, ci.isSandbox, fileInfo.Size())
846
+	logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, mvd.ContainerPath, ld.size, ld.isSandbox)
847
+	tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, mvd.ContainerPath, ld.isSandbox, ld.size)
873 848
 	if err != nil {
849
+		svm.hotRemoveVHDs(mvd)
874 850
 		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
875 851
 		return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err)
876 852
 	}
... ...
@@ -878,14 +796,12 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
878 878
 	logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent)
879 879
 
880 880
 	// In safe/non-global mode, we can't tear down the service VM until things have been read.
881
-	if !d.globalMode {
882
-		return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
883
-			tarReadCloser.Close()
884
-			d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
885
-			return nil
886
-		}), nil
887
-	}
888
-	return tarReadCloser, nil
881
+	return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
882
+		tarReadCloser.Close()
883
+		svm.hotRemoveVHDs(mvd)
884
+		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
885
+		return nil
886
+	}), nil
889 887
 }
890 888
 
891 889
 // ApplyDiff extracts the changeset from the given diff into the
... ...
@@ -902,6 +818,12 @@ func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
902 902
 	}
903 903
 	defer d.terminateServiceVM(id, fmt.Sprintf("applydiff %s", id), false)
904 904
 
905
+	logrus.Debugf("lcowdriver: applydiff: waiting for svm to finish booting")
906
+	err = svm.getStartError()
907
+	if err != nil {
908
+		return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err)
909
+	}
910
+
905 911
 	// TODO @jhowardmsft - the retries are temporary to overcome platform reliablity issues.
906 912
 	// Obviously this will be removed as platform bugs are fixed.
907 913
 	retries := 0
... ...
@@ -944,6 +866,11 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
944 944
 	return m, nil
945 945
 }
946 946
 
947
+// GetLayerPath gets the layer path on host (path to VHD/VHDX)
948
+func (d *Driver) GetLayerPath(id string) (string, error) {
949
+	return d.dir(id), nil
950
+}
951
+
947 952
 // dir returns the absolute path to the layer.
948 953
 func (d *Driver) dir(id string) string {
949 954
 	return filepath.Join(d.dataRoot, filepath.Base(id))
... ...
@@ -1006,3 +933,34 @@ func getLayerDetails(folder string) (*layerDetails, error) {
1006 1006
 
1007 1007
 	return ld, nil
1008 1008
 }
1009
+
1010
+func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) {
1011
+	layerChain, err := d.getLayerChain(id)
1012
+	if err != nil {
1013
+		return nil, err
1014
+	}
1015
+	layerChain = append([]string{d.dir(id)}, layerChain...)
1016
+
1017
+	logrus.Debugf("getting all  layers: %v", layerChain)
1018
+	disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain))
1019
+	for i := range layerChain {
1020
+		ld, err := getLayerDetails(layerChain[i])
1021
+		if err != nil {
1022
+			logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err)
1023
+			return nil, err
1024
+		}
1025
+		disks[i].HostPath = ld.filename
1026
+		disks[i].ContainerPath = hostToGuest(ld.filename)
1027
+		disks[i].CreateInUtilityVM = true
1028
+		disks[i].ReadOnly = !ld.isSandbox
1029
+	}
1030
+	return disks, nil
1031
+}
1032
+
1033
+func hostToGuest(hostpath string) string {
1034
+	return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath)))
1035
+}
1036
+
1037
+func unionMountName(disks []hcsshim.MappedVirtualDisk) string {
1038
+	return fmt.Sprintf("%s-mount", disks[0].ContainerPath)
1039
+}
1009 1040
new file mode 100644
... ...
@@ -0,0 +1,373 @@
0
+// +build windows
1
+
2
+package lcow
3
+
4
+import (
5
+	"errors"
6
+	"fmt"
7
+	"io"
8
+	"strings"
9
+	"sync"
10
+	"time"
11
+
12
+	"github.com/Microsoft/hcsshim"
13
+	"github.com/Microsoft/opengcs/client"
14
+	"github.com/sirupsen/logrus"
15
+)
16
+
17
+// Code for all the service VM management for the LCOW graphdriver
18
+
19
+var errVMisTerminating = errors.New("service VM is shutting down")
20
+var errVMUnknown = errors.New("service vm id is unknown")
21
+var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used")
22
+
23
+// serviceVMMap is the struct representing the id -> service VM mapping.
24
+type serviceVMMap struct {
25
+	sync.Mutex
26
+	svms map[string]*serviceVMMapItem
27
+}
28
+
29
+// serviceVMMapItem is our internal structure representing an item in our
30
+// map of service VMs we are maintaining.
31
+type serviceVMMapItem struct {
32
+	svm      *serviceVM // actual service vm object
33
+	refCount int        // refcount for VM
34
+}
35
+
36
+type serviceVM struct {
37
+	sync.Mutex                     // Serialises operations being performed in this service VM.
38
+	scratchAttached bool           // Has a scratch been attached?
39
+	config          *client.Config // Represents the service VM item.
40
+
41
+	// Indicates that the vm is started
42
+	startStatus chan interface{}
43
+	startError  error
44
+
45
+	// Indicates that the vm is stopped
46
+	stopStatus chan interface{}
47
+	stopError  error
48
+
49
+	attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed.
50
+	unionMounts  map[string]int // Map ref counting all the union filesystems we mounted.
51
+}
52
+
53
+// add will add an id to the service vm map. There are three cases:
54
+// 	- entry doesn't exist:
55
+// 		- add id to map and return a new vm that the caller can manually configure+start
56
+//	- entry does exist
57
+//  	- return vm in map and increment ref count
58
+//  - entry does exist but the ref count is 0
59
+//		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
60
+func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) {
61
+	svmMap.Lock()
62
+	defer svmMap.Unlock()
63
+	if svm, ok := svmMap.svms[id]; ok {
64
+		if svm.refCount == 0 {
65
+			return svm.svm, true, errVMisTerminating
66
+		}
67
+		svm.refCount++
68
+		return svm.svm, true, nil
69
+	}
70
+
71
+	// Doesn't exist, so create an empty svm to put into map and return
72
+	newSVM := &serviceVM{
73
+		startStatus:  make(chan interface{}),
74
+		stopStatus:   make(chan interface{}),
75
+		attachedVHDs: make(map[string]int),
76
+		unionMounts:  make(map[string]int),
77
+		config:       &client.Config{},
78
+	}
79
+	svmMap.svms[id] = &serviceVMMapItem{
80
+		svm:      newSVM,
81
+		refCount: 1,
82
+	}
83
+	return newSVM, false, nil
84
+}
85
+
86
+// get will get the service vm from the map. There are three cases:
87
+// 	- entry doesn't exist:
88
+// 		- return errVMUnknown
89
+//	- entry does exist
90
+//  	- return vm with no error
91
+//  - entry does exist but the ref count is 0
92
+//		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
93
+func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) {
94
+	svmMap.Lock()
95
+	defer svmMap.Unlock()
96
+	svm, ok := svmMap.svms[id]
97
+	if !ok {
98
+		return nil, errVMUnknown
99
+	}
100
+	if svm.refCount == 0 {
101
+		return svm.svm, errVMisTerminating
102
+	}
103
+	return svm.svm, nil
104
+}
105
+
106
+// decrementRefCount decrements the ref count of the given ID from the map. There are four cases:
107
+// 	- entry doesn't exist:
108
+// 		- return errVMUnknown
109
+//  - entry does exist but the ref count is 0
110
+//		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
111
+//	- entry does exist but ref count is 1
112
+//  	- return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map
113
+//      - and execute svm.signalStopFinished to signal the threads that the svm has been terminated.
114
+//	- entry does exist and ref count > 1
115
+//		- just reduce ref count and return svm
116
+func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) {
117
+	svmMap.Lock()
118
+	defer svmMap.Unlock()
119
+
120
+	svm, ok := svmMap.svms[id]
121
+	if !ok {
122
+		return nil, false, errVMUnknown
123
+	}
124
+	if svm.refCount == 0 {
125
+		return svm.svm, false, errVMisTerminating
126
+	}
127
+	svm.refCount--
128
+	return svm.svm, svm.refCount == 0, nil
129
+}
130
+
131
+// setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it.
132
+func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) {
133
+	svmMap.Lock()
134
+	defer svmMap.Unlock()
135
+
136
+	svm, ok := svmMap.svms[id]
137
+	if !ok {
138
+		return nil, errVMUnknown
139
+	}
140
+	if svm.refCount == 0 {
141
+		return svm.svm, errVMisTerminating
142
+	}
143
+	svm.refCount = 0
144
+	return svm.svm, nil
145
+}
146
+
147
+// deleteID deletes the given ID from the map. If the refcount is not 0 or the
148
+// VM does not exist, then this function returns an error.
149
+func (svmMap *serviceVMMap) deleteID(id string) error {
150
+	svmMap.Lock()
151
+	defer svmMap.Unlock()
152
+	svm, ok := svmMap.svms[id]
153
+	if !ok {
154
+		return errVMUnknown
155
+	}
156
+	if svm.refCount != 0 {
157
+		return errVMStillHasReference
158
+	}
159
+	delete(svmMap.svms, id)
160
+	return nil
161
+}
162
+
163
+func (svm *serviceVM) signalStartFinished(err error) {
164
+	svm.Lock()
165
+	svm.startError = err
166
+	svm.Unlock()
167
+	close(svm.startStatus)
168
+}
169
+
170
+func (svm *serviceVM) getStartError() error {
171
+	<-svm.startStatus
172
+	svm.Lock()
173
+	defer svm.Unlock()
174
+	return svm.startError
175
+}
176
+
177
+func (svm *serviceVM) signalStopFinished(err error) {
178
+	svm.Lock()
179
+	svm.stopError = err
180
+	svm.Unlock()
181
+	close(svm.stopStatus)
182
+}
183
+
184
+func (svm *serviceVM) getStopError() error {
185
+	<-svm.stopStatus
186
+	svm.Lock()
187
+	defer svm.Unlock()
188
+	return svm.stopError
189
+}
190
+
191
+// hotAddVHDs waits for the service vm to start and then attaches the vhds.
192
+func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
193
+	if err := svm.getStartError(); err != nil {
194
+		return err
195
+	}
196
+	return svm.hotAddVHDsAtStart(mvds...)
197
+}
198
+
199
+// hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start.
200
+func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error {
201
+	svm.Lock()
202
+	defer svm.Unlock()
203
+	for i, mvd := range mvds {
204
+		if _, ok := svm.attachedVHDs[mvd.HostPath]; ok {
205
+			svm.attachedVHDs[mvd.HostPath]++
206
+			continue
207
+		}
208
+
209
+		if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil {
210
+			svm.hotRemoveVHDsAtStart(mvds[:i]...)
211
+			return err
212
+		}
213
+		svm.attachedVHDs[mvd.HostPath] = 1
214
+	}
215
+	return nil
216
+}
217
+
218
+// hotRemoveVHDs waits for the service vm to start and then removes the vhds.
219
+func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
220
+	if err := svm.getStartError(); err != nil {
221
+		return err
222
+	}
223
+	return svm.hotRemoveVHDsAtStart(mvds...)
224
+}
225
+
226
+// hotRemoveVHDsAtStart works the same way as hotRemoveVHDs but does not wait for the VM to start.
227
+func (svm *serviceVM) hotRemoveVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error {
228
+	svm.Lock()
229
+	defer svm.Unlock()
230
+	var retErr error
231
+	for _, mvd := range mvds {
232
+		if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok {
233
+			// We continue instead of returning an error if we try to hot remove a non-existent VHD.
234
+			// This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get()
235
+			// defers the VM start to the first operation, it's possible that nothing have been hot-added
236
+			// when Put() is called. To avoid Put returning an error in that case, we simply continue if we
237
+			// don't find the vhd attached.
238
+			continue
239
+		}
240
+
241
+		if svm.attachedVHDs[mvd.HostPath] > 1 {
242
+			svm.attachedVHDs[mvd.HostPath]--
243
+			continue
244
+		}
245
+
246
+		// last VHD, so remove from VM and map
247
+		if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil {
248
+			delete(svm.attachedVHDs, mvd.HostPath)
249
+		} else {
250
+			// Take note of the error, but still continue to remove the other VHDs
251
+			logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err)
252
+			if retErr == nil {
253
+				retErr = err
254
+			}
255
+		}
256
+	}
257
+	return retErr
258
+}
259
+
260
+func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error {
261
+	if err := svm.getStartError(); err != nil {
262
+		return err
263
+	}
264
+
265
+	svm.Lock()
266
+	defer svm.Unlock()
267
+	return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile)
268
+}
269
+
270
+func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) {
271
+	if len(mvds) == 0 {
272
+		return fmt.Errorf("createUnionMount: error must have at least 1 layer")
273
+	}
274
+
275
+	if err = svm.getStartError(); err != nil {
276
+		return err
277
+	}
278
+
279
+	svm.Lock()
280
+	defer svm.Unlock()
281
+	if _, ok := svm.unionMounts[mountName]; ok {
282
+		svm.unionMounts[mountName]++
283
+		return nil
284
+	}
285
+
286
+	var lowerLayers []string
287
+	if mvds[0].ReadOnly {
288
+		lowerLayers = append(lowerLayers, mvds[0].ContainerPath)
289
+	}
290
+
291
+	for i := 1; i < len(mvds); i++ {
292
+		lowerLayers = append(lowerLayers, mvds[i].ContainerPath)
293
+	}
294
+
295
+	logrus.Debugf("Doing the overlay mount with union directory=%s", mountName)
296
+	if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, nil); err != nil {
297
+		return err
298
+	}
299
+
300
+	var cmd string
301
+	if mvds[0].ReadOnly {
302
+		// Readonly overlay
303
+		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s",
304
+			strings.Join(lowerLayers, ","),
305
+			mountName)
306
+	} else {
307
+		upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath)
308
+		work := fmt.Sprintf("%s/work", mvds[0].ContainerPath)
309
+
310
+		if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil {
311
+			return err
312
+		}
313
+
314
+		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s",
315
+			strings.Join(lowerLayers, ":"),
316
+			upper,
317
+			work,
318
+			mountName)
319
+	}
320
+
321
+	logrus.Debugf("createUnionMount: Executing mount=%s", cmd)
322
+	if err = svm.runProcess(cmd, nil, nil, nil); err != nil {
323
+		return err
324
+	}
325
+
326
+	svm.unionMounts[mountName] = 1
327
+	return nil
328
+}
329
+
330
+func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error {
331
+	if err := svm.getStartError(); err != nil {
332
+		return err
333
+	}
334
+
335
+	svm.Lock()
336
+	defer svm.Unlock()
337
+	if _, ok := svm.unionMounts[mountName]; !ok {
338
+		return nil
339
+	}
340
+
341
+	if svm.unionMounts[mountName] > 1 {
342
+		svm.unionMounts[mountName]--
343
+		return nil
344
+	}
345
+
346
+	logrus.Debugf("Removing union mount %s", mountName)
347
+	if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil {
348
+		return err
349
+	}
350
+
351
+	delete(svm.unionMounts, mountName)
352
+	return nil
353
+}
354
+
355
+func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
356
+	process, err := svm.config.RunProcess(command, stdin, stdout, stderr)
357
+	if err != nil {
358
+		return err
359
+	}
360
+	defer process.Close()
361
+
362
+	process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds))
363
+	exitCode, err := process.ExitCode()
364
+	if err != nil {
365
+		return err
366
+	}
367
+
368
+	if exitCode != 0 {
369
+		return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode)
370
+	}
371
+	return nil
372
+}
0 373
new file mode 100644
... ...
@@ -0,0 +1,139 @@
0
+// +build windows
1
+
2
+package lcow
3
+
4
+import (
5
+	"bytes"
6
+	"fmt"
7
+	"io"
8
+	"runtime"
9
+	"strings"
10
+	"sync"
11
+
12
+	"github.com/Microsoft/hcsshim"
13
+	"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
14
+	"github.com/docker/docker/pkg/archive"
15
+	"github.com/docker/docker/pkg/containerfs"
16
+	"github.com/sirupsen/logrus"
17
+)
18
+
19
+type lcowfs struct {
20
+	root        string
21
+	d           *Driver
22
+	mappedDisks []hcsshim.MappedVirtualDisk
23
+	vmID        string
24
+	currentSVM  *serviceVM
25
+	sync.Mutex
26
+}
27
+
28
+var _ containerfs.ContainerFS = &lcowfs{}
29
+
30
+// ErrNotSupported is an error for unsupported operations in the remotefs
31
+var ErrNotSupported = fmt.Errorf("not supported")
32
+
33
+// Functions to implement the ContainerFS interface
34
+func (l *lcowfs) Path() string {
35
+	return l.root
36
+}
37
+
38
+func (l *lcowfs) ResolveScopedPath(path string, rawPath bool) (string, error) {
39
+	logrus.Debugf("remotefs.resolvescopedpath inputs: %s %s ", path, l.root)
40
+
41
+	arg1 := l.Join(l.root, path)
42
+	if !rawPath {
43
+		// The l.Join("/", path) will make path an absolute path and then clean it
44
+		// so if path = ../../X, it will become /X.
45
+		arg1 = l.Join(l.root, l.Join("/", path))
46
+	}
47
+	arg2 := l.root
48
+
49
+	output := &bytes.Buffer{}
50
+	if err := l.runRemoteFSProcess(nil, output, remotefs.ResolvePathCmd, arg1, arg2); err != nil {
51
+		return "", err
52
+	}
53
+
54
+	logrus.Debugf("remotefs.resolvescopedpath success. Output: %s\n", output.String())
55
+	return output.String(), nil
56
+}
57
+
58
+func (l *lcowfs) OS() string {
59
+	return "linux"
60
+}
61
+
62
+func (l *lcowfs) Architecture() string {
63
+	return runtime.GOARCH
64
+}
65
+
66
+// Other functions that are used by docker like the daemon Archiver/Extractor
67
+func (l *lcowfs) ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error {
68
+	logrus.Debugf("remotefs.ExtractArchve inputs: %s %+v", dst, opts)
69
+
70
+	tarBuf := &bytes.Buffer{}
71
+	if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
72
+		return fmt.Errorf("failed to marshall tar opts: %s", err)
73
+	}
74
+
75
+	input := io.MultiReader(tarBuf, src)
76
+	if err := l.runRemoteFSProcess(input, nil, remotefs.ExtractArchiveCmd, dst); err != nil {
77
+		return fmt.Errorf("failed to extract archive to %s: %s", dst, err)
78
+	}
79
+	return nil
80
+}
81
+
82
+func (l *lcowfs) ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) {
83
+	logrus.Debugf("remotefs.ArchivePath: %s %+v", src, opts)
84
+
85
+	tarBuf := &bytes.Buffer{}
86
+	if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
87
+		return nil, fmt.Errorf("failed to marshall tar opts: %s", err)
88
+	}
89
+
90
+	r, w := io.Pipe()
91
+	go func() {
92
+		defer w.Close()
93
+		if err := l.runRemoteFSProcess(tarBuf, w, remotefs.ArchivePathCmd, src); err != nil {
94
+			logrus.Debugf("REMOTEFS: Failed to extract archive: %s %+v %s", src, opts, err)
95
+		}
96
+	}()
97
+	return r, nil
98
+}
99
+
100
+// Helper functions
101
+func (l *lcowfs) startVM() error {
102
+	l.Lock()
103
+	defer l.Unlock()
104
+	if l.currentSVM != nil {
105
+		return nil
106
+	}
107
+
108
+	svm, err := l.d.startServiceVMIfNotRunning(l.vmID, l.mappedDisks, fmt.Sprintf("lcowfs.startVM"))
109
+	if err != nil {
110
+		return err
111
+	}
112
+
113
+	if err = svm.createUnionMount(l.root, l.mappedDisks...); err != nil {
114
+		return err
115
+	}
116
+	l.currentSVM = svm
117
+	return nil
118
+}
119
+
120
+func (l *lcowfs) runRemoteFSProcess(stdin io.Reader, stdout io.Writer, args ...string) error {
121
+	if err := l.startVM(); err != nil {
122
+		return err
123
+	}
124
+
125
+	// Append remotefs prefix and setup as a command line string
126
+	cmd := fmt.Sprintf("%s %s", remotefs.RemotefsCmd, strings.Join(args, " "))
127
+	stderr := &bytes.Buffer{}
128
+	if err := l.currentSVM.runProcess(cmd, stdin, stdout, stderr); err != nil {
129
+		return err
130
+	}
131
+
132
+	eerr, err := remotefs.ReadError(stderr)
133
+	if eerr != nil {
134
+		// Process returned an error so return that.
135
+		return remotefs.ExportedToError(eerr)
136
+	}
137
+	return err
138
+}
0 139
new file mode 100644
... ...
@@ -0,0 +1,211 @@
0
+// +build windows
1
+
2
+package lcow
3
+
4
+import (
5
+	"bytes"
6
+	"encoding/binary"
7
+	"encoding/json"
8
+	"fmt"
9
+	"io"
10
+	"os"
11
+	"strconv"
12
+
13
+	"github.com/Microsoft/hcsshim"
14
+	"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
15
+	"github.com/containerd/continuity/driver"
16
+)
17
+
18
+type lcowfile struct {
19
+	process   hcsshim.Process
20
+	stdin     io.WriteCloser
21
+	stdout    io.ReadCloser
22
+	stderr    io.ReadCloser
23
+	fs        *lcowfs
24
+	guestPath string
25
+}
26
+
27
+func (l *lcowfs) Open(path string) (driver.File, error) {
28
+	return l.OpenFile(path, os.O_RDONLY, 0)
29
+}
30
+
31
+func (l *lcowfs) OpenFile(path string, flag int, perm os.FileMode) (_ driver.File, err error) {
32
+	flagStr := strconv.FormatInt(int64(flag), 10)
33
+	permStr := strconv.FormatUint(uint64(perm), 8)
34
+
35
+	commandLine := fmt.Sprintf("%s %s %s %s", remotefs.RemotefsCmd, remotefs.OpenFileCmd, flagStr, permStr)
36
+	env := make(map[string]string)
37
+	env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:"
38
+	processConfig := &hcsshim.ProcessConfig{
39
+		EmulateConsole:    false,
40
+		CreateStdInPipe:   true,
41
+		CreateStdOutPipe:  true,
42
+		CreateStdErrPipe:  true,
43
+		CreateInUtilityVm: true,
44
+		WorkingDirectory:  "/bin",
45
+		Environment:       env,
46
+		CommandLine:       commandLine,
47
+	}
48
+
49
+	process, err := l.currentSVM.config.Uvm.CreateProcess(processConfig)
50
+	if err != nil {
51
+		return nil, fmt.Errorf("failed to open file %s: %s", path, err)
52
+	}
53
+
54
+	stdin, stdout, stderr, err := process.Stdio()
55
+	if err != nil {
56
+		process.Kill()
57
+		process.Close()
58
+		return nil, fmt.Errorf("failed to open file pipes %s: %s", path, err)
59
+	}
60
+
61
+	lf := &lcowfile{
62
+		process:   process,
63
+		stdin:     stdin,
64
+		stdout:    stdout,
65
+		stderr:    stderr,
66
+		fs:        l,
67
+		guestPath: path,
68
+	}
69
+
70
+	if _, err := lf.getResponse(); err != nil {
71
+		return nil, fmt.Errorf("failed to open file %s: %s", path, err)
72
+	}
73
+	return lf, nil
74
+}
75
+
76
+func (l *lcowfile) Read(b []byte) (int, error) {
77
+	hdr := &remotefs.FileHeader{
78
+		Cmd:  remotefs.Read,
79
+		Size: uint64(len(b)),
80
+	}
81
+
82
+	if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
83
+		return 0, err
84
+	}
85
+
86
+	buf, err := l.getResponse()
87
+	if err != nil {
88
+		return 0, nil
89
+	}
90
+
91
+	n := copy(b, buf)
92
+	return n, nil
93
+}
94
+
95
+func (l *lcowfile) Write(b []byte) (int, error) {
96
+	hdr := &remotefs.FileHeader{
97
+		Cmd:  remotefs.Write,
98
+		Size: uint64(len(b)),
99
+	}
100
+
101
+	if err := remotefs.WriteFileHeader(l.stdin, hdr, b); err != nil {
102
+		return 0, err
103
+	}
104
+
105
+	_, err := l.getResponse()
106
+	if err != nil {
107
+		return 0, nil
108
+	}
109
+
110
+	return len(b), nil
111
+}
112
+
113
+func (l *lcowfile) Seek(offset int64, whence int) (int64, error) {
114
+	seekHdr := &remotefs.SeekHeader{
115
+		Offset: offset,
116
+		Whence: int32(whence),
117
+	}
118
+
119
+	buf := &bytes.Buffer{}
120
+	if err := binary.Write(buf, binary.BigEndian, seekHdr); err != nil {
121
+		return 0, err
122
+	}
123
+
124
+	hdr := &remotefs.FileHeader{
125
+		Cmd:  remotefs.Write,
126
+		Size: uint64(buf.Len()),
127
+	}
128
+	if err := remotefs.WriteFileHeader(l.stdin, hdr, buf.Bytes()); err != nil {
129
+		return 0, err
130
+	}
131
+
132
+	resBuf, err := l.getResponse()
133
+	if err != nil {
134
+		return 0, err
135
+	}
136
+
137
+	var res int64
138
+	if err := binary.Read(bytes.NewBuffer(resBuf), binary.BigEndian, &res); err != nil {
139
+		return 0, err
140
+	}
141
+	return res, nil
142
+}
143
+
144
+func (l *lcowfile) Close() error {
145
+	hdr := &remotefs.FileHeader{
146
+		Cmd:  remotefs.Close,
147
+		Size: 0,
148
+	}
149
+
150
+	if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
151
+		return err
152
+	}
153
+
154
+	_, err := l.getResponse()
155
+	return err
156
+}
157
+
158
+func (l *lcowfile) Readdir(n int) ([]os.FileInfo, error) {
159
+	nStr := strconv.FormatInt(int64(n), 10)
160
+
161
+	// Unlike the other File functions, this one can just be run without maintaining state,
162
+	// so just do the normal runRemoteFSProcess way.
163
+	buf := &bytes.Buffer{}
164
+	if err := l.fs.runRemoteFSProcess(nil, buf, remotefs.ReadDirCmd, l.guestPath, nStr); err != nil {
165
+		return nil, err
166
+	}
167
+
168
+	var info []remotefs.FileInfo
169
+	if err := json.Unmarshal(buf.Bytes(), &info); err != nil {
170
+		return nil, nil
171
+	}
172
+
173
+	osInfo := make([]os.FileInfo, len(info))
174
+	for i := range info {
175
+		osInfo[i] = &info[i]
176
+	}
177
+	return osInfo, nil
178
+}
179
+
180
+func (l *lcowfile) getResponse() ([]byte, error) {
181
+	hdr, err := remotefs.ReadFileHeader(l.stdout)
182
+	if err != nil {
183
+		return nil, err
184
+	}
185
+
186
+	if hdr.Cmd != remotefs.CmdOK {
187
+		// Something went wrong during the openfile in the server.
188
+		// Parse stderr and return that as an error
189
+		eerr, err := remotefs.ReadError(l.stderr)
190
+		if eerr != nil {
191
+			return nil, remotefs.ExportedToError(eerr)
192
+		}
193
+
194
+		// Maybe the parsing went wrong?
195
+		if err != nil {
196
+			return nil, err
197
+		}
198
+
199
+		// At this point, we know something went wrong in the remotefs program, but
200
+		// we we don't know why.
201
+		return nil, fmt.Errorf("unknown error")
202
+	}
203
+
204
+	// Successful command, we might have some data to read (for Read + Seek)
205
+	buf := make([]byte, hdr.Size, hdr.Size)
206
+	if _, err := io.ReadFull(l.stdout, buf); err != nil {
207
+		return nil, err
208
+	}
209
+	return buf, nil
210
+}
0 211
new file mode 100644
... ...
@@ -0,0 +1,123 @@
0
+// +build windows
1
+
2
+package lcow
3
+
4
+import (
5
+	"bytes"
6
+	"encoding/json"
7
+	"os"
8
+	"strconv"
9
+
10
+	"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
11
+
12
+	"github.com/containerd/continuity/driver"
13
+	"github.com/sirupsen/logrus"
14
+)
15
+
16
+var _ driver.Driver = &lcowfs{}
17
+
18
+func (l *lcowfs) Readlink(p string) (string, error) {
19
+	logrus.Debugf("removefs.readlink args: %s", p)
20
+
21
+	result := &bytes.Buffer{}
22
+	if err := l.runRemoteFSProcess(nil, result, remotefs.ReadlinkCmd, p); err != nil {
23
+		return "", err
24
+	}
25
+	return result.String(), nil
26
+}
27
+
28
+func (l *lcowfs) Mkdir(path string, mode os.FileMode) error {
29
+	return l.mkdir(path, mode, remotefs.MkdirCmd)
30
+}
31
+
32
+func (l *lcowfs) MkdirAll(path string, mode os.FileMode) error {
33
+	return l.mkdir(path, mode, remotefs.MkdirAllCmd)
34
+}
35
+
36
+func (l *lcowfs) mkdir(path string, mode os.FileMode, cmd string) error {
37
+	modeStr := strconv.FormatUint(uint64(mode), 8)
38
+	logrus.Debugf("remotefs.%s args: %s %s", cmd, path, modeStr)
39
+	return l.runRemoteFSProcess(nil, nil, cmd, path, modeStr)
40
+}
41
+
42
+func (l *lcowfs) Remove(path string) error {
43
+	return l.remove(path, remotefs.RemoveCmd)
44
+}
45
+
46
+func (l *lcowfs) RemoveAll(path string) error {
47
+	return l.remove(path, remotefs.RemoveAllCmd)
48
+}
49
+
50
+func (l *lcowfs) remove(path string, cmd string) error {
51
+	logrus.Debugf("remotefs.%s args: %s", cmd, path)
52
+	return l.runRemoteFSProcess(nil, nil, cmd, path)
53
+}
54
+
55
+func (l *lcowfs) Link(oldname, newname string) error {
56
+	return l.link(oldname, newname, remotefs.LinkCmd)
57
+}
58
+
59
+func (l *lcowfs) Symlink(oldname, newname string) error {
60
+	return l.link(oldname, newname, remotefs.SymlinkCmd)
61
+}
62
+
63
+func (l *lcowfs) link(oldname, newname, cmd string) error {
64
+	logrus.Debugf("remotefs.%s args: %s %s", cmd, oldname, newname)
65
+	return l.runRemoteFSProcess(nil, nil, cmd, oldname, newname)
66
+}
67
+
68
+func (l *lcowfs) Lchown(name string, uid, gid int64) error {
69
+	uidStr := strconv.FormatInt(uid, 10)
70
+	gidStr := strconv.FormatInt(gid, 10)
71
+
72
+	logrus.Debugf("remotefs.lchown args: %s %s %s", name, uidStr, gidStr)
73
+	return l.runRemoteFSProcess(nil, nil, remotefs.LchownCmd, name, uidStr, gidStr)
74
+}
75
+
76
+// Lchmod changes the mode of an file not following symlinks.
77
+func (l *lcowfs) Lchmod(path string, mode os.FileMode) error {
78
+	modeStr := strconv.FormatUint(uint64(mode), 8)
79
+	logrus.Debugf("remotefs.lchmod args: %s %s", path, modeStr)
80
+	return l.runRemoteFSProcess(nil, nil, remotefs.LchmodCmd, path, modeStr)
81
+}
82
+
83
+func (l *lcowfs) Mknod(path string, mode os.FileMode, major, minor int) error {
84
+	modeStr := strconv.FormatUint(uint64(mode), 8)
85
+	majorStr := strconv.FormatUint(uint64(major), 10)
86
+	minorStr := strconv.FormatUint(uint64(minor), 10)
87
+
88
+	logrus.Debugf("remotefs.mknod args: %s %s %s %s", path, modeStr, majorStr, minorStr)
89
+	return l.runRemoteFSProcess(nil, nil, remotefs.MknodCmd, path, modeStr, majorStr, minorStr)
90
+}
91
+
92
+func (l *lcowfs) Mkfifo(path string, mode os.FileMode) error {
93
+	modeStr := strconv.FormatUint(uint64(mode), 8)
94
+	logrus.Debugf("remotefs.mkfifo args: %s %s", path, modeStr)
95
+	return l.runRemoteFSProcess(nil, nil, remotefs.MkfifoCmd, path, modeStr)
96
+}
97
+
98
+func (l *lcowfs) Stat(p string) (os.FileInfo, error) {
99
+	return l.stat(p, remotefs.StatCmd)
100
+}
101
+
102
+func (l *lcowfs) Lstat(p string) (os.FileInfo, error) {
103
+	return l.stat(p, remotefs.LstatCmd)
104
+}
105
+
106
+func (l *lcowfs) stat(path string, cmd string) (os.FileInfo, error) {
107
+	logrus.Debugf("remotefs.stat inputs: %s %s", cmd, path)
108
+
109
+	output := &bytes.Buffer{}
110
+	err := l.runRemoteFSProcess(nil, output, cmd, path)
111
+	if err != nil {
112
+		return nil, err
113
+	}
114
+
115
+	var fi remotefs.FileInfo
116
+	if err := json.Unmarshal(output.Bytes(), &fi); err != nil {
117
+		return nil, err
118
+	}
119
+
120
+	logrus.Debugf("remotefs.stat success. got: %v\n", fi)
121
+	return &fi, nil
122
+}
0 123
new file mode 100644
... ...
@@ -0,0 +1,212 @@
0
+// +build windows
1
+
2
+package lcow
3
+
4
+import (
5
+	"errors"
6
+	"os"
7
+	pathpkg "path"
8
+	"path/filepath"
9
+	"sort"
10
+	"strings"
11
+
12
+	"github.com/containerd/continuity/pathdriver"
13
+)
14
+
15
+var _ pathdriver.PathDriver = &lcowfs{}
16
+
17
+// Continuity Path functions can be done locally
18
+func (l *lcowfs) Join(path ...string) string {
19
+	return pathpkg.Join(path...)
20
+}
21
+
22
+func (l *lcowfs) IsAbs(path string) bool {
23
+	return pathpkg.IsAbs(path)
24
+}
25
+
26
+func sameWord(a, b string) bool {
27
+	return a == b
28
+}
29
+
30
+// Implementation taken from the Go standard library
31
+func (l *lcowfs) Rel(basepath, targpath string) (string, error) {
32
+	baseVol := ""
33
+	targVol := ""
34
+	base := l.Clean(basepath)
35
+	targ := l.Clean(targpath)
36
+	if sameWord(targ, base) {
37
+		return ".", nil
38
+	}
39
+	base = base[len(baseVol):]
40
+	targ = targ[len(targVol):]
41
+	if base == "." {
42
+		base = ""
43
+	}
44
+	// Can't use IsAbs - `\a` and `a` are both relative in Windows.
45
+	baseSlashed := len(base) > 0 && base[0] == l.Separator()
46
+	targSlashed := len(targ) > 0 && targ[0] == l.Separator()
47
+	if baseSlashed != targSlashed || !sameWord(baseVol, targVol) {
48
+		return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
49
+	}
50
+	// Position base[b0:bi] and targ[t0:ti] at the first differing elements.
51
+	bl := len(base)
52
+	tl := len(targ)
53
+	var b0, bi, t0, ti int
54
+	for {
55
+		for bi < bl && base[bi] != l.Separator() {
56
+			bi++
57
+		}
58
+		for ti < tl && targ[ti] != l.Separator() {
59
+			ti++
60
+		}
61
+		if !sameWord(targ[t0:ti], base[b0:bi]) {
62
+			break
63
+		}
64
+		if bi < bl {
65
+			bi++
66
+		}
67
+		if ti < tl {
68
+			ti++
69
+		}
70
+		b0 = bi
71
+		t0 = ti
72
+	}
73
+	if base[b0:bi] == ".." {
74
+		return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
75
+	}
76
+	if b0 != bl {
77
+		// Base elements left. Must go up before going down.
78
+		seps := strings.Count(base[b0:bl], string(l.Separator()))
79
+		size := 2 + seps*3
80
+		if tl != t0 {
81
+			size += 1 + tl - t0
82
+		}
83
+		buf := make([]byte, size)
84
+		n := copy(buf, "..")
85
+		for i := 0; i < seps; i++ {
86
+			buf[n] = l.Separator()
87
+			copy(buf[n+1:], "..")
88
+			n += 3
89
+		}
90
+		if t0 != tl {
91
+			buf[n] = l.Separator()
92
+			copy(buf[n+1:], targ[t0:])
93
+		}
94
+		return string(buf), nil
95
+	}
96
+	return targ[t0:], nil
97
+}
98
+
99
+func (l *lcowfs) Base(path string) string {
100
+	return pathpkg.Base(path)
101
+}
102
+
103
+func (l *lcowfs) Dir(path string) string {
104
+	return pathpkg.Dir(path)
105
+}
106
+
107
+func (l *lcowfs) Clean(path string) string {
108
+	return pathpkg.Clean(path)
109
+}
110
+
111
+func (l *lcowfs) Split(path string) (dir, file string) {
112
+	return pathpkg.Split(path)
113
+}
114
+
115
+func (l *lcowfs) Separator() byte {
116
+	return '/'
117
+}
118
+
119
+func (l *lcowfs) Abs(path string) (string, error) {
120
+	// Abs is supposed to add the current working directory, which is meaningless in lcow.
121
+	// So, return an error.
122
+	return "", ErrNotSupported
123
+}
124
+
125
+// Implementation taken from the Go standard library
126
+func (l *lcowfs) Walk(root string, walkFn filepath.WalkFunc) error {
127
+	info, err := l.Lstat(root)
128
+	if err != nil {
129
+		err = walkFn(root, nil, err)
130
+	} else {
131
+		err = l.walk(root, info, walkFn)
132
+	}
133
+	if err == filepath.SkipDir {
134
+		return nil
135
+	}
136
+	return err
137
+}
138
+
139
+// walk recursively descends path, calling w.
140
+func (l *lcowfs) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
141
+	err := walkFn(path, info, nil)
142
+	if err != nil {
143
+		if info.IsDir() && err == filepath.SkipDir {
144
+			return nil
145
+		}
146
+		return err
147
+	}
148
+
149
+	if !info.IsDir() {
150
+		return nil
151
+	}
152
+
153
+	names, err := l.readDirNames(path)
154
+	if err != nil {
155
+		return walkFn(path, info, err)
156
+	}
157
+
158
+	for _, name := range names {
159
+		filename := l.Join(path, name)
160
+		fileInfo, err := l.Lstat(filename)
161
+		if err != nil {
162
+			if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
163
+				return err
164
+			}
165
+		} else {
166
+			err = l.walk(filename, fileInfo, walkFn)
167
+			if err != nil {
168
+				if !fileInfo.IsDir() || err != filepath.SkipDir {
169
+					return err
170
+				}
171
+			}
172
+		}
173
+	}
174
+	return nil
175
+}
176
+
177
+// readDirNames reads the directory named by dirname and returns
178
+// a sorted list of directory entries.
179
+func (l *lcowfs) readDirNames(dirname string) ([]string, error) {
180
+	f, err := l.Open(dirname)
181
+	if err != nil {
182
+		return nil, err
183
+	}
184
+	files, err := f.Readdir(-1)
185
+	f.Close()
186
+	if err != nil {
187
+		return nil, err
188
+	}
189
+
190
+	names := make([]string, len(files), len(files))
191
+	for i := range files {
192
+		names[i] = files[i].Name()
193
+	}
194
+
195
+	sort.Strings(names)
196
+	return names, nil
197
+}
198
+
199
+// Note that Go's filepath.FromSlash/ToSlash convert between OS paths and '/'. Since the path separator
200
+// for LCOW (and Unix) is '/', they are no-ops.
201
+func (l *lcowfs) FromSlash(path string) string {
202
+	return path
203
+}
204
+
205
+func (l *lcowfs) ToSlash(path string) string {
206
+	return path
207
+}
208
+
209
+func (l *lcowfs) Match(pattern, name string) (matched bool, err error) {
210
+	return pathpkg.Match(pattern, name)
211
+}
... ...
@@ -15,6 +15,7 @@ import (
15 15
 	"github.com/docker/docker/daemon/graphdriver"
16 16
 	"github.com/docker/docker/daemon/graphdriver/overlayutils"
17 17
 	"github.com/docker/docker/pkg/archive"
18
+	"github.com/docker/docker/pkg/containerfs"
18 19
 	"github.com/docker/docker/pkg/fsutils"
19 20
 	"github.com/docker/docker/pkg/idtools"
20 21
 	"github.com/docker/docker/pkg/locker"
... ...
@@ -341,21 +342,21 @@ func (d *Driver) Remove(id string) error {
341 341
 }
342 342
 
343 343
 // Get creates and mounts the required file system for the given id and returns the mount path.
344
-func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
344
+func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, err error) {
345 345
 	d.locker.Lock(id)
346 346
 	defer d.locker.Unlock(id)
347 347
 	dir := d.dir(id)
348 348
 	if _, err := os.Stat(dir); err != nil {
349
-		return "", err
349
+		return nil, err
350 350
 	}
351 351
 	// If id has a root, just return it
352 352
 	rootDir := path.Join(dir, "root")
353 353
 	if _, err := os.Stat(rootDir); err == nil {
354
-		return rootDir, nil
354
+		return containerfs.NewLocalContainerFS(rootDir), nil
355 355
 	}
356 356
 	mergedDir := path.Join(dir, "merged")
357 357
 	if count := d.ctr.Increment(mergedDir); count > 1 {
358
-		return mergedDir, nil
358
+		return containerfs.NewLocalContainerFS(mergedDir), nil
359 359
 	}
360 360
 	defer func() {
361 361
 		if err != nil {
... ...
@@ -366,7 +367,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
366 366
 	}()
367 367
 	lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
368 368
 	if err != nil {
369
-		return "", err
369
+		return nil, err
370 370
 	}
371 371
 	var (
372 372
 		lowerDir = path.Join(d.dir(string(lowerID)), "root")
... ...
@@ -375,18 +376,18 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
375 375
 		opts     = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
376 376
 	)
377 377
 	if err := unix.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
378
-		return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
378
+		return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
379 379
 	}
380 380
 	// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
381 381
 	// user namespace requires this to move a directory from lower to upper.
382 382
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
383 383
 	if err != nil {
384
-		return "", err
384
+		return nil, err
385 385
 	}
386 386
 	if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
387
-		return "", err
387
+		return nil, err
388 388
 	}
389
-	return mergedDir, nil
389
+	return containerfs.NewLocalContainerFS(mergedDir), nil
390 390
 }
391 391
 
392 392
 // Put unmounts the mount path created for the give id.
... ...
@@ -23,6 +23,7 @@ import (
23 23
 	"github.com/docker/docker/daemon/graphdriver/quota"
24 24
 	"github.com/docker/docker/pkg/archive"
25 25
 	"github.com/docker/docker/pkg/chrootarchive"
26
+	"github.com/docker/docker/pkg/containerfs"
26 27
 	"github.com/docker/docker/pkg/directory"
27 28
 	"github.com/docker/docker/pkg/fsutils"
28 29
 	"github.com/docker/docker/pkg/idtools"
... ...
@@ -514,12 +515,12 @@ func (d *Driver) Remove(id string) error {
514 514
 }
515 515
 
516 516
 // Get creates and mounts the required file system for the given id and returns the mount path.
517
-func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
517
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
518 518
 	d.locker.Lock(id)
519 519
 	defer d.locker.Unlock(id)
520 520
 	dir := d.dir(id)
521 521
 	if _, err := os.Stat(dir); err != nil {
522
-		return "", err
522
+		return nil, err
523 523
 	}
524 524
 
525 525
 	diffDir := path.Join(dir, "diff")
... ...
@@ -527,14 +528,14 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
527 527
 	if err != nil {
528 528
 		// If no lower, just return diff directory
529 529
 		if os.IsNotExist(err) {
530
-			return diffDir, nil
530
+			return containerfs.NewLocalContainerFS(diffDir), nil
531 531
 		}
532
-		return "", err
532
+		return nil, err
533 533
 	}
534 534
 
535 535
 	mergedDir := path.Join(dir, "merged")
536 536
 	if count := d.ctr.Increment(mergedDir); count > 1 {
537
-		return mergedDir, nil
537
+		return containerfs.NewLocalContainerFS(mergedDir), nil
538 538
 	}
539 539
 	defer func() {
540 540
 		if err != nil {
... ...
@@ -574,7 +575,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
574 574
 		opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
575 575
 		mountData = label.FormatMountLabel(opts, mountLabel)
576 576
 		if len(mountData) > pageSize {
577
-			return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
577
+			return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
578 578
 		}
579 579
 
580 580
 		mount = func(source string, target string, mType string, flags uintptr, label string) error {
... ...
@@ -584,21 +585,21 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
584 584
 	}
585 585
 
586 586
 	if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil {
587
-		return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
587
+		return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
588 588
 	}
589 589
 
590 590
 	// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
591 591
 	// user namespace requires this to move a directory from lower to upper.
592 592
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
593 593
 	if err != nil {
594
-		return "", err
594
+		return nil, err
595 595
 	}
596 596
 
597 597
 	if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
598
-		return "", err
598
+		return nil, err
599 599
 	}
600 600
 
601
-	return mergedDir, nil
601
+	return containerfs.NewLocalContainerFS(mergedDir), nil
602 602
 }
603 603
 
604 604
 // Put unmounts the mount path created for the give id.
... ...
@@ -7,6 +7,7 @@ import (
7 7
 	"path/filepath"
8 8
 
9 9
 	"github.com/docker/docker/pkg/archive"
10
+	"github.com/docker/docker/pkg/containerfs"
10 11
 	"github.com/docker/docker/pkg/idtools"
11 12
 	"github.com/docker/docker/pkg/plugingetter"
12 13
 	"github.com/docker/docker/pkg/plugins"
... ...
@@ -129,20 +130,20 @@ func (d *graphDriverProxy) Remove(id string) error {
129 129
 	return nil
130 130
 }
131 131
 
132
-func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
132
+func (d *graphDriverProxy) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
133 133
 	args := &graphDriverRequest{
134 134
 		ID:         id,
135 135
 		MountLabel: mountLabel,
136 136
 	}
137 137
 	var ret graphDriverResponse
138 138
 	if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil {
139
-		return "", err
139
+		return nil, err
140 140
 	}
141 141
 	var err error
142 142
 	if ret.Err != "" {
143 143
 		err = errors.New(ret.Err)
144 144
 	}
145
-	return filepath.Join(d.p.BasePath(), ret.Dir), err
145
+	return containerfs.NewLocalContainerFS(filepath.Join(d.p.BasePath(), ret.Dir)), err
146 146
 }
147 147
 
148 148
 func (d *graphDriverProxy) Put(id string) error {
... ...
@@ -7,6 +7,7 @@ import (
7 7
 
8 8
 	"github.com/docker/docker/daemon/graphdriver"
9 9
 	"github.com/docker/docker/pkg/chrootarchive"
10
+	"github.com/docker/docker/pkg/containerfs"
10 11
 	"github.com/docker/docker/pkg/idtools"
11 12
 	"github.com/docker/docker/pkg/system"
12 13
 	"github.com/opencontainers/selinux/go-selinux/label"
... ...
@@ -94,7 +95,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
94 94
 	if err != nil {
95 95
 		return fmt.Errorf("%s: %s", parent, err)
96 96
 	}
97
-	return CopyWithTar(parentDir, dir)
97
+	return CopyWithTar(parentDir.Path(), dir)
98 98
 }
99 99
 
100 100
 func (d *Driver) dir(id string) string {
... ...
@@ -107,14 +108,14 @@ func (d *Driver) Remove(id string) error {
107 107
 }
108 108
 
109 109
 // Get returns the directory for the given id.
110
-func (d *Driver) Get(id, mountLabel string) (string, error) {
110
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
111 111
 	dir := d.dir(id)
112 112
 	if st, err := os.Stat(dir); err != nil {
113
-		return "", err
113
+		return nil, err
114 114
 	} else if !st.IsDir() {
115
-		return "", fmt.Errorf("%s: not a directory", dir)
115
+		return nil, fmt.Errorf("%s: not a directory", dir)
116 116
 	}
117
-	return dir, nil
117
+	return containerfs.NewLocalContainerFS(dir), nil
118 118
 }
119 119
 
120 120
 // Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up.
... ...
@@ -26,6 +26,7 @@ import (
26 26
 	"github.com/Microsoft/hcsshim"
27 27
 	"github.com/docker/docker/daemon/graphdriver"
28 28
 	"github.com/docker/docker/pkg/archive"
29
+	"github.com/docker/docker/pkg/containerfs"
29 30
 	"github.com/docker/docker/pkg/idtools"
30 31
 	"github.com/docker/docker/pkg/ioutils"
31 32
 	"github.com/docker/docker/pkg/longpath"
... ...
@@ -354,36 +355,36 @@ func (d *Driver) Remove(id string) error {
354 354
 }
355 355
 
356 356
 // Get returns the rootfs path for the id. This will mount the dir at its given path.
357
-func (d *Driver) Get(id, mountLabel string) (string, error) {
357
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
358 358
 	panicIfUsedByLcow()
359 359
 	logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
360 360
 	var dir string
361 361
 
362 362
 	rID, err := d.resolveID(id)
363 363
 	if err != nil {
364
-		return "", err
364
+		return nil, err
365 365
 	}
366 366
 	if count := d.ctr.Increment(rID); count > 1 {
367
-		return d.cache[rID], nil
367
+		return containerfs.NewLocalContainerFS(d.cache[rID]), nil
368 368
 	}
369 369
 
370 370
 	// Getting the layer paths must be done outside of the lock.
371 371
 	layerChain, err := d.getLayerChain(rID)
372 372
 	if err != nil {
373 373
 		d.ctr.Decrement(rID)
374
-		return "", err
374
+		return nil, err
375 375
 	}
376 376
 
377 377
 	if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
378 378
 		d.ctr.Decrement(rID)
379
-		return "", err
379
+		return nil, err
380 380
 	}
381 381
 	if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
382 382
 		d.ctr.Decrement(rID)
383 383
 		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
384 384
 			logrus.Warnf("Failed to Deactivate %s: %s", id, err)
385 385
 		}
386
-		return "", err
386
+		return nil, err
387 387
 	}
388 388
 
389 389
 	mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
... ...
@@ -395,7 +396,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
395 395
 		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
396 396
 			logrus.Warnf("Failed to Deactivate %s: %s", id, err)
397 397
 		}
398
-		return "", err
398
+		return nil, err
399 399
 	}
400 400
 	d.cacheMu.Lock()
401 401
 	d.cache[rID] = mountPath
... ...
@@ -409,7 +410,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
409 409
 		dir = d.dir(id)
410 410
 	}
411 411
 
412
-	return dir, nil
412
+	return containerfs.NewLocalContainerFS(dir), nil
413 413
 }
414 414
 
415 415
 // Put adds a new layer to the driver.
... ...
@@ -618,7 +619,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
618 618
 	}
619 619
 	defer d.Put(id)
620 620
 
621
-	return archive.ChangesSize(layerFs, changes), nil
621
+	return archive.ChangesSize(layerFs.Path(), changes), nil
622 622
 }
623 623
 
624 624
 // GetMetadata returns custom driver information.
... ...
@@ -13,6 +13,7 @@ import (
13 13
 	"time"
14 14
 
15 15
 	"github.com/docker/docker/daemon/graphdriver"
16
+	"github.com/docker/docker/pkg/containerfs"
16 17
 	"github.com/docker/docker/pkg/idtools"
17 18
 	"github.com/docker/docker/pkg/mount"
18 19
 	"github.com/docker/docker/pkg/parsers"
... ...
@@ -356,10 +357,10 @@ func (d *Driver) Remove(id string) error {
356 356
 }
357 357
 
358 358
 // Get returns the mountpoint for the given id after creating the target directories if necessary.
359
-func (d *Driver) Get(id, mountLabel string) (string, error) {
359
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
360 360
 	mountpoint := d.mountPath(id)
361 361
 	if count := d.ctr.Increment(mountpoint); count > 1 {
362
-		return mountpoint, nil
362
+		return containerfs.NewLocalContainerFS(mountpoint), nil
363 363
 	}
364 364
 
365 365
 	filesystem := d.zfsPath(id)
... ...
@@ -369,17 +370,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
369 369
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
370 370
 	if err != nil {
371 371
 		d.ctr.Decrement(mountpoint)
372
-		return "", err
372
+		return nil, err
373 373
 	}
374 374
 	// Create the target directories if they don't exist
375 375
 	if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
376 376
 		d.ctr.Decrement(mountpoint)
377
-		return "", err
377
+		return nil, err
378 378
 	}
379 379
 
380 380
 	if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
381 381
 		d.ctr.Decrement(mountpoint)
382
-		return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
382
+		return nil, fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
383 383
 	}
384 384
 
385 385
 	// this could be our first mount after creation of the filesystem, and the root dir may still have root
... ...
@@ -387,10 +388,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
387 387
 	if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
388 388
 		mount.Unmount(mountpoint)
389 389
 		d.ctr.Decrement(mountpoint)
390
-		return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
390
+		return nil, fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
391 391
 	}
392 392
 
393
-	return mountpoint, nil
393
+	return containerfs.NewLocalContainerFS(mountpoint), nil
394 394
 }
395 395
 
396 396
 // Put removes the existing mountpoint for the given id if it exists.
... ...
@@ -2,12 +2,14 @@
2 2
 
3 3
 package initlayer
4 4
 
5
+import "github.com/docker/docker/pkg/containerfs"
6
+
5 7
 // Setup populates a directory with mountpoints suitable
6 8
 // for bind-mounting dockerinit into the container. The mountpoint is simply an
7 9
 // empty file at /.dockerinit
8 10
 //
9 11
 // This extra layer is used by all containers as the top-most ro layer. It protects
10 12
 // the container from unwanted side-effects on the rw layer.
11
-func Setup(initLayer string, rootUID, rootGID int) error {
13
+func Setup(initLayer containerfs.ContainerFS, rootUID, rootGID int) error {
12 14
 	return nil
13 15
 }
... ...
@@ -7,6 +7,7 @@ import (
7 7
 	"path/filepath"
8 8
 	"strings"
9 9
 
10
+	"github.com/docker/docker/pkg/containerfs"
10 11
 	"github.com/docker/docker/pkg/idtools"
11 12
 	"golang.org/x/sys/unix"
12 13
 )
... ...
@@ -16,7 +17,10 @@ import (
16 16
 //
17 17
 // This extra layer is used by all containers as the top-most ro layer. It protects
18 18
 // the container from unwanted side-effects on the rw layer.
19
-func Setup(initLayer string, rootIDs idtools.IDPair) error {
19
+func Setup(initLayerFs containerfs.ContainerFS, rootIDs idtools.IDPair) error {
20
+	// Since all paths are local to the container, we can just extract initLayerFs.Path()
21
+	initLayer := initLayerFs.Path()
22
+
20 23
 	for pth, typ := range map[string]string{
21 24
 		"/dev/pts":         "dir",
22 25
 		"/dev/shm":         "dir",
... ...
@@ -3,6 +3,7 @@
3 3
 package initlayer
4 4
 
5 5
 import (
6
+	"github.com/docker/docker/pkg/containerfs"
6 7
 	"github.com/docker/docker/pkg/idtools"
7 8
 )
8 9
 
... ...
@@ -12,6 +13,6 @@ import (
12 12
 //
13 13
 // This extra layer is used by all containers as the top-most ro layer. It protects
14 14
 // the container from unwanted side-effects on the rw layer.
15
-func Setup(initLayer string, rootIDs idtools.IDPair) error {
15
+func Setup(initLayer containerfs.ContainerFS, rootIDs idtools.IDPair) error {
16 16
 	return nil
17 17
 }
... ...
@@ -19,7 +19,6 @@ import (
19 19
 	"github.com/docker/docker/pkg/idtools"
20 20
 	"github.com/docker/docker/pkg/mount"
21 21
 	"github.com/docker/docker/pkg/stringutils"
22
-	"github.com/docker/docker/pkg/symlink"
23 22
 	"github.com/docker/docker/volume"
24 23
 	"github.com/opencontainers/runc/libcontainer/apparmor"
25 24
 	"github.com/opencontainers/runc/libcontainer/cgroups"
... ...
@@ -187,7 +186,7 @@ func setUser(s *specs.Spec, c *container.Container) error {
187 187
 }
188 188
 
189 189
 func readUserFile(c *container.Container, p string) (io.ReadCloser, error) {
190
-	fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS)
190
+	fp, err := c.GetResourcePath(p)
191 191
 	if err != nil {
192 192
 		return nil, err
193 193
 	}
... ...
@@ -634,7 +633,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container)
634 634
 		return err
635 635
 	}
636 636
 	s.Root = &specs.Root{
637
-		Path:     c.BaseFS,
637
+		Path:     c.BaseFS.Path(),
638 638
 		Readonly: c.HostConfig.ReadonlyRootfs,
639 639
 	}
640 640
 	if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil {
... ...
@@ -2,7 +2,6 @@ package daemon
2 2
 
3 3
 import (
4 4
 	"fmt"
5
-	"path/filepath"
6 5
 	"sort"
7 6
 	"strconv"
8 7
 
... ...
@@ -127,7 +126,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container)
127 127
 		return err
128 128
 	}
129 129
 	s.Root = specs.Root{
130
-		Path:     filepath.Dir(c.BaseFS),
130
+		Path:     c.BaseFS.Dir(c.BaseFS.Path()),
131 131
 		Readonly: c.HostConfig.ReadonlyRootfs,
132 132
 	}
133 133
 	if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil {
... ...
@@ -239,7 +239,7 @@ func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.S
239 239
 
240 240
 	s.Root.Readonly = false // Windows does not support a read-only root filesystem
241 241
 	if !isHyperV {
242
-		s.Root.Path = c.BaseFS // This is not set for Hyper-V containers
242
+		s.Root.Path = c.BaseFS.Path() // This is not set for Hyper-V containers
243 243
 		if !strings.HasSuffix(s.Root.Path, `\`) {
244 244
 			s.Root.Path = s.Root.Path + `\` // Ensure a correctly formatted volume GUID path \\?\Volume{GUID}\
245 245
 		}
... ...
@@ -204,7 +204,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
204 204
 		daemon.unregisterExecCommand(container, eConfig)
205 205
 	}
206 206
 
207
-	if container.BaseFS != "" {
207
+	if container.BaseFS != nil && container.BaseFS.Path() != "" {
208 208
 		if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil {
209 209
 			logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
210 210
 		}
... ...
@@ -198,12 +198,13 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex
198 198
 			return
199 199
 		}
200 200
 
201
+		// TODO @gupta-ak: Figure out what to do here.
201 202
 		dir, err := driver.Get(req.ID, req.MountLabel)
202 203
 		if err != nil {
203 204
 			respond(w, err)
204 205
 			return
205 206
 		}
206
-		respond(w, &graphDriverResponse{Dir: dir})
207
+		respond(w, &graphDriverResponse{Dir: dir.Path()})
207 208
 	})
208 209
 
209 210
 	mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) {
... ...
@@ -15,6 +15,7 @@ import (
15 15
 
16 16
 	"github.com/docker/distribution"
17 17
 	"github.com/docker/docker/pkg/archive"
18
+	"github.com/docker/docker/pkg/containerfs"
18 19
 	"github.com/opencontainers/go-digest"
19 20
 	"github.com/sirupsen/logrus"
20 21
 )
... ...
@@ -137,7 +138,7 @@ type RWLayer interface {
137 137
 
138 138
 	// Mount mounts the RWLayer and returns the filesystem path
139 139
 	// the to the writable layer.
140
-	Mount(mountLabel string) (string, error)
140
+	Mount(mountLabel string) (containerfs.ContainerFS, error)
141 141
 
142 142
 	// Unmount unmounts the RWLayer. This should be called
143 143
 	// for every mount. If there are multiple mount calls
... ...
@@ -178,7 +179,7 @@ type Metadata struct {
178 178
 // writable mount. Changes made here will
179 179
 // not be included in the Tar stream of the
180 180
 // RWLayer.
181
-type MountInit func(root string) error
181
+type MountInit func(root containerfs.ContainerFS) error
182 182
 
183 183
 // CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer
184 184
 type CreateRWLayerOpts struct {
... ...
@@ -749,5 +749,5 @@ func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser,
749 749
 	if err != nil {
750 750
 		return nil, err
751 751
 	}
752
-	return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil
752
+	return &fileGetPutter{storage.NewPathFileGetter(p.Path()), n.Driver, id}, nil
753 753
 }
... ...
@@ -10,9 +10,11 @@ import (
10 10
 	"strings"
11 11
 	"testing"
12 12
 
13
+	"github.com/containerd/continuity/driver"
13 14
 	"github.com/docker/docker/daemon/graphdriver"
14 15
 	"github.com/docker/docker/daemon/graphdriver/vfs"
15 16
 	"github.com/docker/docker/pkg/archive"
17
+	"github.com/docker/docker/pkg/containerfs"
16 18
 	"github.com/docker/docker/pkg/idtools"
17 19
 	"github.com/docker/docker/pkg/stringid"
18 20
 	"github.com/opencontainers/go-digest"
... ...
@@ -82,7 +84,7 @@ func newTestStore(t *testing.T) (Store, string, func()) {
82 82
 	}
83 83
 }
84 84
 
85
-type layerInit func(root string) error
85
+type layerInit func(root containerfs.ContainerFS) error
86 86
 
87 87
 func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
88 88
 	containerID := stringid.GenerateRandomID()
... ...
@@ -91,12 +93,12 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
91 91
 		return nil, err
92 92
 	}
93 93
 
94
-	path, err := mount.Mount("")
94
+	pathFS, err := mount.Mount("")
95 95
 	if err != nil {
96 96
 		return nil, err
97 97
 	}
98 98
 
99
-	if err := layerFunc(path); err != nil {
99
+	if err := layerFunc(pathFS); err != nil {
100 100
 		return nil, err
101 101
 	}
102 102
 
... ...
@@ -123,7 +125,7 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
123 123
 }
124 124
 
125 125
 type FileApplier interface {
126
-	ApplyFile(root string) error
126
+	ApplyFile(root containerfs.ContainerFS) error
127 127
 }
128 128
 
129 129
 type testFile struct {
... ...
@@ -140,25 +142,25 @@ func newTestFile(name string, content []byte, perm os.FileMode) FileApplier {
140 140
 	}
141 141
 }
142 142
 
143
-func (tf *testFile) ApplyFile(root string) error {
144
-	fullPath := filepath.Join(root, tf.name)
145
-	if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil {
143
+func (tf *testFile) ApplyFile(root containerfs.ContainerFS) error {
144
+	fullPath := root.Join(root.Path(), tf.name)
145
+	if err := root.MkdirAll(root.Dir(fullPath), 0755); err != nil {
146 146
 		return err
147 147
 	}
148 148
 	// Check if already exists
149
-	if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission {
150
-		if err := os.Chmod(fullPath, tf.permission); err != nil {
149
+	if stat, err := root.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission {
150
+		if err := root.Lchmod(fullPath, tf.permission); err != nil {
151 151
 			return err
152 152
 		}
153 153
 	}
154
-	if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil {
154
+	if err := driver.WriteFile(root, fullPath, tf.content, tf.permission); err != nil {
155 155
 		return err
156 156
 	}
157 157
 	return nil
158 158
 }
159 159
 
160 160
 func initWithFiles(files ...FileApplier) layerInit {
161
-	return func(root string) error {
161
+	return func(root containerfs.ContainerFS) error {
162 162
 		for _, f := range files {
163 163
 			if err := f.ApplyFile(root); err != nil {
164 164
 				return err
... ...
@@ -288,7 +290,7 @@ func TestMountAndRegister(t *testing.T) {
288 288
 		t.Fatal(err)
289 289
 	}
290 290
 
291
-	b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt"))
291
+	b, err := driver.ReadFile(path2, path2.Join(path2.Path(), "testfile.txt"))
292 292
 	if err != nil {
293 293
 		t.Fatal(err)
294 294
 	}
... ...
@@ -391,12 +393,12 @@ func TestStoreRestore(t *testing.T) {
391 391
 		t.Fatal(err)
392 392
 	}
393 393
 
394
-	path, err := m.Mount("")
394
+	pathFS, err := m.Mount("")
395 395
 	if err != nil {
396 396
 		t.Fatal(err)
397 397
 	}
398 398
 
399
-	if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil {
399
+	if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt"), []byte("nothing here"), 0644); err != nil {
400 400
 		t.Fatal(err)
401 401
 	}
402 402
 
... ...
@@ -430,20 +432,20 @@ func TestStoreRestore(t *testing.T) {
430 430
 
431 431
 	if mountPath, err := m2.Mount(""); err != nil {
432 432
 		t.Fatal(err)
433
-	} else if path != mountPath {
434
-		t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
433
+	} else if pathFS.Path() != mountPath.Path() {
434
+		t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path())
435 435
 	}
436 436
 
437 437
 	if mountPath, err := m2.Mount(""); err != nil {
438 438
 		t.Fatal(err)
439
-	} else if path != mountPath {
440
-		t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
439
+	} else if pathFS.Path() != mountPath.Path() {
440
+		t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path())
441 441
 	}
442 442
 	if err := m2.Unmount(); err != nil {
443 443
 		t.Fatal(err)
444 444
 	}
445 445
 
446
-	b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt"))
446
+	b, err := driver.ReadFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt"))
447 447
 	if err != nil {
448 448
 		t.Fatal(err)
449 449
 	}
... ...
@@ -618,7 +620,7 @@ func tarFromFiles(files ...FileApplier) ([]byte, error) {
618 618
 	defer os.RemoveAll(td)
619 619
 
620 620
 	for _, f := range files {
621
-		if err := f.ApplyFile(td); err != nil {
621
+		if err := f.ApplyFile(containerfs.NewLocalContainerFS(td)); err != nil {
622 622
 			return nil, err
623 623
 		}
624 624
 	}
... ...
@@ -1,6 +1,15 @@
1 1
 package layer
2 2
 
3
-import "errors"
3
+import (
4
+	"errors"
5
+)
6
+
7
+// Getter is an interface to get the path to a layer on the host.
8
+type Getter interface {
9
+	// GetLayerPath gets the path for the layer. This is different from Get()
10
+	// since that returns an interface to account for umountable layers.
11
+	GetLayerPath(id string) (string, error)
12
+}
4 13
 
5 14
 // GetLayerPath returns the path to a layer
6 15
 func GetLayerPath(s Store, layer ChainID) (string, error) {
... ...
@@ -16,6 +25,10 @@ func GetLayerPath(s Store, layer ChainID) (string, error) {
16 16
 		return "", ErrLayerDoesNotExist
17 17
 	}
18 18
 
19
+	if layerGetter, ok := ls.driver.(Getter); ok {
20
+		return layerGetter.GetLayerPath(rl.cacheID)
21
+	}
22
+
19 23
 	path, err := ls.driver.Get(rl.cacheID, "")
20 24
 	if err != nil {
21 25
 		return "", err
... ...
@@ -25,7 +38,7 @@ func GetLayerPath(s Store, layer ChainID) (string, error) {
25 25
 		return "", err
26 26
 	}
27 27
 
28
-	return path, nil
28
+	return path.Path(), nil
29 29
 }
30 30
 
31 31
 func (ls *layerStore) mountID(name string) string {
... ...
@@ -2,13 +2,13 @@ package layer
2 2
 
3 3
 import (
4 4
 	"io/ioutil"
5
-	"os"
6
-	"path/filepath"
7 5
 	"runtime"
8 6
 	"sort"
9 7
 	"testing"
10 8
 
9
+	"github.com/containerd/continuity/driver"
11 10
 	"github.com/docker/docker/pkg/archive"
11
+	"github.com/docker/docker/pkg/containerfs"
12 12
 )
13 13
 
14 14
 func TestMountInit(t *testing.T) {
... ...
@@ -28,7 +28,7 @@ func TestMountInit(t *testing.T) {
28 28
 		t.Fatal(err)
29 29
 	}
30 30
 
31
-	mountInit := func(root string) error {
31
+	mountInit := func(root containerfs.ContainerFS) error {
32 32
 		return initfile.ApplyFile(root)
33 33
 	}
34 34
 
... ...
@@ -40,21 +40,21 @@ func TestMountInit(t *testing.T) {
40 40
 		t.Fatal(err)
41 41
 	}
42 42
 
43
-	path, err := m.Mount("")
43
+	pathFS, err := m.Mount("")
44 44
 	if err != nil {
45 45
 		t.Fatal(err)
46 46
 	}
47 47
 
48
-	f, err := os.Open(filepath.Join(path, "testfile.txt"))
48
+	fi, err := pathFS.Stat(pathFS.Join(pathFS.Path(), "testfile.txt"))
49 49
 	if err != nil {
50 50
 		t.Fatal(err)
51 51
 	}
52
-	defer f.Close()
53 52
 
54
-	fi, err := f.Stat()
53
+	f, err := pathFS.Open(pathFS.Join(pathFS.Path(), "testfile.txt"))
55 54
 	if err != nil {
56 55
 		t.Fatal(err)
57 56
 	}
57
+	defer f.Close()
58 58
 
59 59
 	b, err := ioutil.ReadAll(f)
60 60
 	if err != nil {
... ...
@@ -88,7 +88,7 @@ func TestMountSize(t *testing.T) {
88 88
 		t.Fatal(err)
89 89
 	}
90 90
 
91
-	mountInit := func(root string) error {
91
+	mountInit := func(root containerfs.ContainerFS) error {
92 92
 		return newTestFile("file-init", contentInit, 0777).ApplyFile(root)
93 93
 	}
94 94
 	rwLayerOpts := &CreateRWLayerOpts{
... ...
@@ -100,12 +100,12 @@ func TestMountSize(t *testing.T) {
100 100
 		t.Fatal(err)
101 101
 	}
102 102
 
103
-	path, err := m.Mount("")
103
+	pathFS, err := m.Mount("")
104 104
 	if err != nil {
105 105
 		t.Fatal(err)
106 106
 	}
107 107
 
108
-	if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil {
108
+	if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "file2"), content2, 0755); err != nil {
109 109
 		t.Fatal(err)
110 110
 	}
111 111
 
... ...
@@ -140,7 +140,7 @@ func TestMountChanges(t *testing.T) {
140 140
 		t.Fatal(err)
141 141
 	}
142 142
 
143
-	mountInit := func(root string) error {
143
+	mountInit := func(root containerfs.ContainerFS) error {
144 144
 		return initfile.ApplyFile(root)
145 145
 	}
146 146
 	rwLayerOpts := &CreateRWLayerOpts{
... ...
@@ -152,28 +152,28 @@ func TestMountChanges(t *testing.T) {
152 152
 		t.Fatal(err)
153 153
 	}
154 154
 
155
-	path, err := m.Mount("")
155
+	pathFS, err := m.Mount("")
156 156
 	if err != nil {
157 157
 		t.Fatal(err)
158 158
 	}
159 159
 
160
-	if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil {
160
+	if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile1.txt"), 0755); err != nil {
161 161
 		t.Fatal(err)
162 162
 	}
163 163
 
164
-	if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil {
164
+	if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile1.txt"), []byte("mount data!"), 0755); err != nil {
165 165
 		t.Fatal(err)
166 166
 	}
167 167
 
168
-	if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil {
168
+	if err := pathFS.Remove(pathFS.Join(pathFS.Path(), "testfile2.txt")); err != nil {
169 169
 		t.Fatal(err)
170 170
 	}
171 171
 
172
-	if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil {
172
+	if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile3.txt"), 0755); err != nil {
173 173
 		t.Fatal(err)
174 174
 	}
175 175
 
176
-	if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil {
176
+	if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile4.txt"), []byte("mount data!"), 0644); err != nil {
177 177
 		t.Fatal(err)
178 178
 	}
179 179
 
... ...
@@ -4,6 +4,7 @@ import (
4 4
 	"io"
5 5
 
6 6
 	"github.com/docker/docker/pkg/archive"
7
+	"github.com/docker/docker/pkg/containerfs"
7 8
 )
8 9
 
9 10
 type mountedLayer struct {
... ...
@@ -88,7 +89,7 @@ type referencedRWLayer struct {
88 88
 	*mountedLayer
89 89
 }
90 90
 
91
-func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
91
+func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) {
92 92
 	return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
93 93
 }
94 94
 
... ...
@@ -55,18 +55,17 @@ type (
55 55
 	}
56 56
 )
57 57
 
58
-// Archiver allows the reuse of most utility functions of this package
59
-// with a pluggable Untar function. Also, to facilitate the passing of
60
-// specific id mappings for untar, an archiver can be created with maps
61
-// which will then be passed to Untar operations
58
+// Archiver implements the Archiver interface and allows the reuse of most utility functions of
59
+// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
60
+// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
62 61
 type Archiver struct {
63
-	Untar      func(io.Reader, string, *TarOptions) error
64
-	IDMappings *idtools.IDMappings
62
+	Untar         func(io.Reader, string, *TarOptions) error
63
+	IDMappingsVar *idtools.IDMappings
65 64
 }
66 65
 
67 66
 // NewDefaultArchiver returns a new Archiver without any IDMappings
68 67
 func NewDefaultArchiver() *Archiver {
69
-	return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}}
68
+	return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}}
70 69
 }
71 70
 
72 71
 // breakoutError is used to differentiate errors related to breaking out
... ...
@@ -1025,8 +1024,8 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
1025 1025
 	}
1026 1026
 	defer archive.Close()
1027 1027
 	options := &TarOptions{
1028
-		UIDMaps: archiver.IDMappings.UIDs(),
1029
-		GIDMaps: archiver.IDMappings.GIDs(),
1028
+		UIDMaps: archiver.IDMappingsVar.UIDs(),
1029
+		GIDMaps: archiver.IDMappingsVar.GIDs(),
1030 1030
 	}
1031 1031
 	return archiver.Untar(archive, dst, options)
1032 1032
 }
... ...
@@ -1039,8 +1038,8 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
1039 1039
 	}
1040 1040
 	defer archive.Close()
1041 1041
 	options := &TarOptions{
1042
-		UIDMaps: archiver.IDMappings.UIDs(),
1043
-		GIDMaps: archiver.IDMappings.GIDs(),
1042
+		UIDMaps: archiver.IDMappingsVar.UIDs(),
1043
+		GIDMaps: archiver.IDMappingsVar.GIDs(),
1044 1044
 	}
1045 1045
 	return archiver.Untar(archive, dst, options)
1046 1046
 }
... ...
@@ -1058,10 +1057,10 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
1058 1058
 		return archiver.CopyFileWithTar(src, dst)
1059 1059
 	}
1060 1060
 
1061
-	// if this archiver is set up with ID mapping we need to create
1061
+	// if this Archiver is set up with ID mapping we need to create
1062 1062
 	// the new destination directory with the remapped root UID/GID pair
1063 1063
 	// as owner
1064
-	rootIDs := archiver.IDMappings.RootPair()
1064
+	rootIDs := archiver.IDMappingsVar.RootPair()
1065 1065
 	// Create dst, copy src's content into it
1066 1066
 	logrus.Debugf("Creating dest directory: %s", dst)
1067 1067
 	if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
... ...
@@ -1112,7 +1111,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
1112 1112
 		hdr.Name = filepath.Base(dst)
1113 1113
 		hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
1114 1114
 
1115
-		if err := remapIDs(archiver.IDMappings, hdr); err != nil {
1115
+		if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
1116 1116
 			return err
1117 1117
 		}
1118 1118
 
... ...
@@ -1139,6 +1138,11 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
1139 1139
 	return err
1140 1140
 }
1141 1141
 
1142
+// IDMappings returns the IDMappings of the archiver.
1143
+func (archiver *Archiver) IDMappings() *idtools.IDMappings {
1144
+	return archiver.IDMappingsVar
1145
+}
1146
+
1142 1147
 func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
1143 1148
 	ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
1144 1149
 	hdr.Uid, hdr.Gid = ids.UID, ids.GID
... ...
@@ -27,23 +27,23 @@ var (
27 27
 // path (from before being processed by utility functions from the path or
28 28
 // filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
29 29
 // path already ends in a `.` path segment, then another is not added. If the
30
-// clean path already ends in a path separator, then another is not added.
31
-func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
30
+// clean path already ends in the separator, then another is not added.
31
+func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
32 32
 	// Ensure paths are in platform semantics
33
-	cleanedPath = normalizePath(cleanedPath)
34
-	originalPath = normalizePath(originalPath)
33
+	cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
34
+	originalPath = strings.Replace(originalPath, "/", string(sep), -1)
35 35
 
36 36
 	if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
37
-		if !hasTrailingPathSeparator(cleanedPath) {
37
+		if !hasTrailingPathSeparator(cleanedPath, sep) {
38 38
 			// Add a separator if it doesn't already end with one (a cleaned
39 39
 			// path would only end in a separator if it is the root).
40
-			cleanedPath += string(filepath.Separator)
40
+			cleanedPath += string(sep)
41 41
 		}
42 42
 		cleanedPath += "."
43 43
 	}
44 44
 
45
-	if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
46
-		cleanedPath += string(filepath.Separator)
45
+	if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
46
+		cleanedPath += string(sep)
47 47
 	}
48 48
 
49 49
 	return cleanedPath
... ...
@@ -52,14 +52,14 @@ func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
52 52
 // assertsDirectory returns whether the given path is
53 53
 // asserted to be a directory, i.e., the path ends with
54 54
 // a trailing '/' or `/.`, assuming a path separator of `/`.
55
-func assertsDirectory(path string) bool {
56
-	return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
55
+func assertsDirectory(path string, sep byte) bool {
56
+	return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
57 57
 }
58 58
 
59 59
 // hasTrailingPathSeparator returns whether the given
60 60
 // path ends with the system's path separator character.
61
-func hasTrailingPathSeparator(path string) bool {
62
-	return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
61
+func hasTrailingPathSeparator(path string, sep byte) bool {
62
+	return len(path) > 0 && path[len(path)-1] == sep
63 63
 }
64 64
 
65 65
 // specifiesCurrentDir returns whether the given path specifies
... ...
@@ -72,10 +72,10 @@ func specifiesCurrentDir(path string) bool {
72 72
 // basename by first cleaning the path but preserves a trailing "." if the
73 73
 // original path specified the current directory.
74 74
 func SplitPathDirEntry(path string) (dir, base string) {
75
-	cleanedPath := filepath.Clean(normalizePath(path))
75
+	cleanedPath := filepath.Clean(filepath.FromSlash(path))
76 76
 
77 77
 	if specifiesCurrentDir(path) {
78
-		cleanedPath += string(filepath.Separator) + "."
78
+		cleanedPath += string(os.PathSeparator) + "."
79 79
 	}
80 80
 
81 81
 	return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
... ...
@@ -106,19 +106,24 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er
106 106
 	// Separate the source path between its directory and
107 107
 	// the entry in that directory which we are archiving.
108 108
 	sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
109
-
110
-	filter := []string{sourceBase}
109
+	opts := TarResourceRebaseOpts(sourceBase, rebaseName)
111 110
 
112 111
 	logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
112
+	return TarWithOptions(sourceDir, opts)
113
+}
113 114
 
114
-	return TarWithOptions(sourceDir, &TarOptions{
115
+// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
116
+// parameters to be sent to TarWithOptions (the TarOptions struct)
117
+func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
118
+	filter := []string{sourceBase}
119
+	return &TarOptions{
115 120
 		Compression:      Uncompressed,
116 121
 		IncludeFiles:     filter,
117 122
 		IncludeSourceDir: true,
118 123
 		RebaseNames: map[string]string{
119 124
 			sourceBase: rebaseName,
120 125
 		},
121
-	})
126
+	}
122 127
 }
123 128
 
124 129
 // CopyInfo holds basic info about the source
... ...
@@ -281,7 +286,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
281 281
 			srcBase = srcInfo.RebaseName
282 282
 		}
283 283
 		return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
284
-	case assertsDirectory(dstInfo.Path):
284
+	case assertsDirectory(dstInfo.Path, os.PathSeparator):
285 285
 		// The destination does not exist and is asserted to be created as a
286 286
 		// directory, but the source content is not a directory. This is an
287 287
 		// error condition since you cannot create a directory from a file
... ...
@@ -351,6 +356,9 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read
351 351
 	return rebased
352 352
 }
353 353
 
354
+// TODO @gupta-ak. These might have to be changed in the future to be
355
+// continuity driver aware as well to support LCOW.
356
+
354 357
 // CopyResource performs an archive copy from the given source path to the
355 358
 // given destination path. The source path MUST exist and the destination
356 359
 // path's parent directory must exist.
... ...
@@ -365,8 +373,8 @@ func CopyResource(srcPath, dstPath string, followLink bool) error {
365 365
 	dstPath = normalizePath(dstPath)
366 366
 
367 367
 	// Clean the source and destination paths.
368
-	srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
369
-	dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
368
+	srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
369
+	dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
370 370
 
371 371
 	if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
372 372
 		return err
... ...
@@ -429,7 +437,8 @@ func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseNa
429 429
 		// resolvedDirPath will have been cleaned (no trailing path separators) so
430 430
 		// we can manually join it with the base path element.
431 431
 		resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
432
-		if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
432
+		if hasTrailingPathSeparator(path, os.PathSeparator) &&
433
+			filepath.Base(path) != filepath.Base(resolvedPath) {
433 434
 			rebaseName = filepath.Base(path)
434 435
 		}
435 436
 	}
... ...
@@ -442,11 +451,13 @@ func GetRebaseName(path, resolvedPath string) (string, string) {
442 442
 	// linkTarget will have been cleaned (no trailing path separators and dot) so
443 443
 	// we can manually join it with them
444 444
 	var rebaseName string
445
-	if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
445
+	if specifiesCurrentDir(path) &&
446
+		!specifiesCurrentDir(resolvedPath) {
446 447
 		resolvedPath += string(filepath.Separator) + "."
447 448
 	}
448 449
 
449
-	if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
450
+	if hasTrailingPathSeparator(path, os.PathSeparator) &&
451
+		!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
450 452
 		resolvedPath += string(filepath.Separator)
451 453
 	}
452 454
 
... ...
@@ -16,7 +16,10 @@ func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
16 16
 	if idMappings == nil {
17 17
 		idMappings = &idtools.IDMappings{}
18 18
 	}
19
-	return &archive.Archiver{Untar: Untar, IDMappings: idMappings}
19
+	return &archive.Archiver{
20
+		Untar:         Untar,
21
+		IDMappingsVar: idMappings,
22
+	}
20 23
 }
21 24
 
22 25
 // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
23 26
new file mode 100644
... ...
@@ -0,0 +1,194 @@
0
+package containerfs
1
+
2
+import (
3
+	"archive/tar"
4
+	"fmt"
5
+	"io"
6
+	"os"
7
+	"path/filepath"
8
+
9
+	"github.com/docker/docker/pkg/archive"
10
+	"github.com/docker/docker/pkg/idtools"
11
+	"github.com/docker/docker/pkg/promise"
12
+	"github.com/docker/docker/pkg/system"
13
+	"github.com/sirupsen/logrus"
14
+)
15
+
16
+// TarFunc provides a function definition for a custom Tar function
17
+type TarFunc func(string, *archive.TarOptions) (io.ReadCloser, error)
18
+
19
+// UntarFunc provides a function definition for a custom Untar function
20
+type UntarFunc func(io.Reader, string, *archive.TarOptions) error
21
+
22
+// Archiver provides a similar implementation of the archive.Archiver package with the rootfs abstraction
23
+type Archiver struct {
24
+	SrcDriver     Driver
25
+	DstDriver     Driver
26
+	Tar           TarFunc
27
+	Untar         UntarFunc
28
+	IDMappingsVar *idtools.IDMappings
29
+}
30
+
31
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
32
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
33
+func (archiver *Archiver) TarUntar(src, dst string) error {
34
+	logrus.Debugf("TarUntar(%s %s)", src, dst)
35
+	tarArchive, err := archiver.Tar(src, &archive.TarOptions{Compression: archive.Uncompressed})
36
+	if err != nil {
37
+		return err
38
+	}
39
+	defer tarArchive.Close()
40
+	options := &archive.TarOptions{
41
+		UIDMaps: archiver.IDMappingsVar.UIDs(),
42
+		GIDMaps: archiver.IDMappingsVar.GIDs(),
43
+	}
44
+	return archiver.Untar(tarArchive, dst, options)
45
+}
46
+
47
+// UntarPath untar a file from path to a destination, src is the source tar file path.
48
+func (archiver *Archiver) UntarPath(src, dst string) error {
49
+	tarArchive, err := archiver.SrcDriver.Open(src)
50
+	if err != nil {
51
+		return err
52
+	}
53
+	defer tarArchive.Close()
54
+	options := &archive.TarOptions{
55
+		UIDMaps: archiver.IDMappingsVar.UIDs(),
56
+		GIDMaps: archiver.IDMappingsVar.GIDs(),
57
+	}
58
+	return archiver.Untar(tarArchive, dst, options)
59
+}
60
+
61
+// CopyWithTar creates a tar archive of filesystem path `src`, and
62
+// unpacks it at filesystem path `dst`.
63
+// The archive is streamed directly with fixed buffering and no
64
+// intermediary disk IO.
65
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
66
+	srcSt, err := archiver.SrcDriver.Stat(src)
67
+	if err != nil {
68
+		return err
69
+	}
70
+	if !srcSt.IsDir() {
71
+		return archiver.CopyFileWithTar(src, dst)
72
+	}
73
+
74
+	// if this archiver is set up with ID mapping we need to create
75
+	// the new destination directory with the remapped root UID/GID pair
76
+	// as owner
77
+	rootIDs := archiver.IDMappingsVar.RootPair()
78
+	// Create dst, copy src's content into it
79
+	if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
80
+		return err
81
+	}
82
+	logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
83
+	return archiver.TarUntar(src, dst)
84
+}
85
+
86
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
87
+// for a single file. It copies a regular file from path `src` to
88
+// path `dst`, and preserves all its metadata.
89
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
90
+	logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
91
+	srcDriver := archiver.SrcDriver
92
+	dstDriver := archiver.DstDriver
93
+
94
+	srcSt, err := srcDriver.Stat(src)
95
+	if err != nil {
96
+		return err
97
+	}
98
+
99
+	if srcSt.IsDir() {
100
+		return fmt.Errorf("Can't copy a directory")
101
+	}
102
+
103
+	// Clean up the trailing slash. This must be done in an operating
104
+	// system specific manner.
105
+	if dst[len(dst)-1] == dstDriver.Separator() {
106
+		dst = dstDriver.Join(dst, srcDriver.Base(src))
107
+	}
108
+
109
+	// The original call was system.MkdirAll, which is just
110
+	// os.MkdirAll on not-Windows and changed for Windows.
111
+	if dstDriver.OS() == "windows" {
112
+		// Now we are WCOW
113
+		if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
114
+			return err
115
+		}
116
+	} else {
117
+		// We can just use the driver.MkdirAll function
118
+		if err := dstDriver.MkdirAll(dstDriver.Dir(dst), 0700); err != nil {
119
+			return err
120
+		}
121
+	}
122
+
123
+	r, w := io.Pipe()
124
+	errC := promise.Go(func() error {
125
+		defer w.Close()
126
+
127
+		srcF, err := srcDriver.Open(src)
128
+		if err != nil {
129
+			return err
130
+		}
131
+		defer srcF.Close()
132
+
133
+		hdr, err := tar.FileInfoHeader(srcSt, "")
134
+		if err != nil {
135
+			return err
136
+		}
137
+		hdr.Name = dstDriver.Base(dst)
138
+		if dstDriver.OS() == "windows" {
139
+			hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
140
+		} else {
141
+			hdr.Mode = int64(os.FileMode(hdr.Mode))
142
+		}
143
+
144
+		if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
145
+			return err
146
+		}
147
+
148
+		tw := tar.NewWriter(w)
149
+		defer tw.Close()
150
+		if err := tw.WriteHeader(hdr); err != nil {
151
+			return err
152
+		}
153
+		if _, err := io.Copy(tw, srcF); err != nil {
154
+			return err
155
+		}
156
+		return nil
157
+	})
158
+	defer func() {
159
+		if er := <-errC; err == nil && er != nil {
160
+			err = er
161
+		}
162
+	}()
163
+
164
+	err = archiver.Untar(r, dstDriver.Dir(dst), nil)
165
+	if err != nil {
166
+		r.CloseWithError(err)
167
+	}
168
+	return err
169
+}
170
+
171
+// IDMappings returns the IDMappings of the archiver.
172
+func (archiver *Archiver) IDMappings() *idtools.IDMappings {
173
+	return archiver.IDMappingsVar
174
+}
175
+
176
+func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
177
+	ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
178
+	hdr.Uid, hdr.Gid = ids.UID, ids.GID
179
+	return err
180
+}
181
+
182
+// chmodTarEntry is used to adjust the file permissions used in tar header based
183
+// on the platform the archival is done.
184
+func chmodTarEntry(perm os.FileMode) os.FileMode {
185
+	//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
186
+	permPart := perm & os.ModePerm
187
+	noPermPart := perm &^ os.ModePerm
188
+	// Add the x bit: make everything +x from windows
189
+	permPart |= 0111
190
+	permPart &= 0755
191
+
192
+	return noPermPart | permPart
193
+}
0 194
new file mode 100644
... ...
@@ -0,0 +1,87 @@
0
+package containerfs
1
+
2
+import (
3
+	"path/filepath"
4
+	"runtime"
5
+
6
+	"github.com/containerd/continuity/driver"
7
+	"github.com/containerd/continuity/pathdriver"
8
+	"github.com/docker/docker/pkg/symlink"
9
+)
10
+
11
+// ContainerFS is that represents a root file system
12
+type ContainerFS interface {
13
+	// Path returns the path to the root. Note that this may not exist
14
+	// on the local system, so the continuity operations must be used
15
+	Path() string
16
+
17
+	// ResolveScopedPath evaluates the given path scoped to the root.
18
+	// For example, if root=/a, and path=/b/c, then this function would return /a/b/c.
19
+	// If rawPath is true, then the function will not preform any modifications
20
+	// before path resolution. Otherwise, the function will clean the given path
21
+	// by making it an absolute path.
22
+	ResolveScopedPath(path string, rawPath bool) (string, error)
23
+
24
+	Driver
25
+}
26
+
27
+// Driver combines both continuity's Driver and PathDriver interfaces with a Platform
28
+// field to determine the OS.
29
+type Driver interface {
30
+	// OS returns the OS where the rootfs is located. Essentially,
31
+	// runtime.GOOS for everything aside from LCOW, which is "linux"
32
+	OS() string
33
+
34
+	// Architecture returns the hardware architecture where the
35
+	// container is located.
36
+	Architecture() string
37
+
38
+	// Driver & PathDriver provide methods to manipulate files & paths
39
+	driver.Driver
40
+	pathdriver.PathDriver
41
+}
42
+
43
+// NewLocalContainerFS is a helper function to implement daemon's Mount interface
44
+// when the graphdriver mount point is a local path on the machine.
45
+func NewLocalContainerFS(path string) ContainerFS {
46
+	return &local{
47
+		path:       path,
48
+		Driver:     driver.LocalDriver,
49
+		PathDriver: pathdriver.LocalPathDriver,
50
+	}
51
+}
52
+
53
+// NewLocalDriver provides file and path drivers for a local file system. They are
54
+// essentially a wrapper around the `os` and `filepath` functions.
55
+func NewLocalDriver() Driver {
56
+	return &local{
57
+		Driver:     driver.LocalDriver,
58
+		PathDriver: pathdriver.LocalPathDriver,
59
+	}
60
+}
61
+
62
+type local struct {
63
+	path string
64
+	driver.Driver
65
+	pathdriver.PathDriver
66
+}
67
+
68
+func (l *local) Path() string {
69
+	return l.path
70
+}
71
+
72
+func (l *local) ResolveScopedPath(path string, rawPath bool) (string, error) {
73
+	cleanedPath := path
74
+	if !rawPath {
75
+		cleanedPath = cleanScopedPath(path)
76
+	}
77
+	return symlink.FollowSymlinkInScope(filepath.Join(l.path, cleanedPath), l.path)
78
+}
79
+
80
+func (l *local) OS() string {
81
+	return runtime.GOOS
82
+}
83
+
84
+func (l *local) Architecture() string {
85
+	return runtime.GOARCH
86
+}
0 87
new file mode 100644
... ...
@@ -0,0 +1,10 @@
0
+// +build !windows
1
+
2
+package containerfs
3
+
4
+import "path/filepath"
5
+
6
+// cleanScopedPath preappends a to combine with a mnt path.
7
+func cleanScopedPath(path string) string {
8
+	return filepath.Join(string(filepath.Separator), path)
9
+}
0 10
new file mode 100644
... ...
@@ -0,0 +1,15 @@
0
+package containerfs
1
+
2
+import "path/filepath"
3
+
4
+// cleanScopedPath removes the C:\ syntax, and prepares to combine
5
+// with a volume path
6
+func cleanScopedPath(path string) string {
7
+	if len(path) >= 2 {
8
+		c := path[0]
9
+		if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
10
+			path = path[2:]
11
+		}
12
+	}
13
+	return filepath.Join(string(filepath.Separator), path)
14
+}
0 15
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+// +build !windows
1
+
2
+package system
3
+
4
+// InitLCOW does nothing since LCOW is a windows only feature
5
+func InitLCOW(experimental bool) {
6
+}
... ...
@@ -8,9 +8,10 @@ import "os"
8 8
 // on build number. @jhowardmsft
9 9
 var lcowSupported = false
10 10
 
11
-func init() {
11
+// InitLCOW sets whether LCOW is supported or not
12
+func InitLCOW(experimental bool) {
12 13
 	// LCOW initialization
13
-	if os.Getenv("LCOW_SUPPORTED") != "" {
14
+	if experimental && os.Getenv("LCOW_SUPPORTED") != "" {
14 15
 		lcowSupported = true
15 16
 	}
16 17
 
... ...
@@ -1,6 +1,13 @@
1 1
 package system
2 2
 
3
-import "runtime"
3
+import (
4
+	"fmt"
5
+	"path/filepath"
6
+	"runtime"
7
+	"strings"
8
+
9
+	"github.com/containerd/continuity/pathdriver"
10
+)
4 11
 
5 12
 const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
6 13
 
... ...
@@ -19,3 +26,35 @@ func DefaultPathEnv(platform string) string {
19 19
 	return defaultUnixPathEnv
20 20
 
21 21
 }
22
+
23
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
24
+// is the system drive.
25
+// On Linux: this is a no-op.
26
+// On Windows: this does the following>
27
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
28
+// This is used, for example, when validating a user provided path in docker cp.
29
+// If a drive letter is supplied, it must be the system drive. The drive letter
30
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
31
+// need the path in this syntax so that it can ultimately be contatenated with
32
+// a Windows long-path which doesn't support drive-letters. Examples:
33
+// C:			--> Fail
34
+// C:\			--> \
35
+// a			--> a
36
+// /a			--> \a
37
+// d:\			--> Fail
38
+func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) {
39
+	if runtime.GOOS != "windows" || LCOWSupported() {
40
+		return path, nil
41
+	}
42
+
43
+	if len(path) == 2 && string(path[1]) == ":" {
44
+		return "", fmt.Errorf("No relative path specified in %q", path)
45
+	}
46
+	if !driver.IsAbs(path) || len(path) < 2 {
47
+		return filepath.FromSlash(path), nil
48
+	}
49
+	if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
50
+		return "", fmt.Errorf("The specified path is not on the system drive (C:)")
51
+	}
52
+	return filepath.FromSlash(path[2:]), nil
53
+}
22 54
deleted file mode 100644
... ...
@@ -1,9 +0,0 @@
1
-// +build !windows
2
-
3
-package system
4
-
5
-// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
6
-// is the system drive. This is a no-op on Linux.
7
-func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
8
-	return path, nil
9
-}
10 1
deleted file mode 100644
... ...
@@ -1,33 +0,0 @@
1
-// +build windows
2
-
3
-package system
4
-
5
-import (
6
-	"fmt"
7
-	"path/filepath"
8
-	"strings"
9
-)
10
-
11
-// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
12
-// This is used, for example, when validating a user provided path in docker cp.
13
-// If a drive letter is supplied, it must be the system drive. The drive letter
14
-// is always removed. Also, it translates it to OS semantics (IOW / to \). We
15
-// need the path in this syntax so that it can ultimately be concatenated with
16
-// a Windows long-path which doesn't support drive-letters. Examples:
17
-// C:			--> Fail
18
-// C:\			--> \
19
-// a			--> a
20
-// /a			--> \a
21
-// d:\			--> Fail
22
-func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
23
-	if len(path) == 2 && string(path[1]) == ":" {
24
-		return "", fmt.Errorf("No relative path specified in %q", path)
25
-	}
26
-	if !filepath.IsAbs(path) || len(path) < 2 {
27
-		return filepath.FromSlash(path), nil
28
-	}
29
-	if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
30
-		return "", fmt.Errorf("The specified path is not on the system drive (C:)")
31
-	}
32
-	return filepath.FromSlash(path[2:]), nil
33
-}
... ...
@@ -2,19 +2,23 @@
2 2
 
3 3
 package system
4 4
 
5
-import "testing"
5
+import (
6
+	"testing"
7
+
8
+	"github.com/containerd/continuity/pathdriver"
9
+)
6 10
 
7 11
 // TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter
8 12
 func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
9 13
 	// Fails if not C drive.
10
-	_, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`)
14
+	_, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`, pathdriver.LocalPathDriver)
11 15
 	if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") {
12 16
 		t.Fatalf("Expected error for d:")
13 17
 	}
14 18
 
15 19
 	// Single character is unchanged
16 20
 	var path string
17
-	if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil {
21
+	if path, err = CheckSystemDriveAndRemoveDriveLetter("z", pathdriver.LocalPathDriver); err != nil {
18 22
 		t.Fatalf("Single character should pass")
19 23
 	}
20 24
 	if path != "z" {
... ...
@@ -22,7 +26,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
22 22
 	}
23 23
 
24 24
 	// Two characters without colon is unchanged
25
-	if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil {
25
+	if path, err = CheckSystemDriveAndRemoveDriveLetter("AB", pathdriver.LocalPathDriver); err != nil {
26 26
 		t.Fatalf("2 characters without colon should pass")
27 27
 	}
28 28
 	if path != "AB" {
... ...
@@ -30,7 +34,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
30 30
 	}
31 31
 
32 32
 	// Abs path without drive letter
33
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil {
33
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`, pathdriver.LocalPathDriver); err != nil {
34 34
 		t.Fatalf("abs path no drive letter should pass")
35 35
 	}
36 36
 	if path != `\l` {
... ...
@@ -38,7 +42,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
38 38
 	}
39 39
 
40 40
 	// Abs path without drive letter, linux style
41
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil {
41
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`, pathdriver.LocalPathDriver); err != nil {
42 42
 		t.Fatalf("abs path no drive letter linux style should pass")
43 43
 	}
44 44
 	if path != `\l` {
... ...
@@ -46,7 +50,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
46 46
 	}
47 47
 
48 48
 	// Drive-colon should be stripped
49
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil {
49
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`, pathdriver.LocalPathDriver); err != nil {
50 50
 		t.Fatalf("An absolute path should pass")
51 51
 	}
52 52
 	if path != `\` {
... ...
@@ -54,7 +58,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
54 54
 	}
55 55
 
56 56
 	// Verify with a linux-style path
57
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil {
57
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`, pathdriver.LocalPathDriver); err != nil {
58 58
 		t.Fatalf("An absolute path should pass")
59 59
 	}
60 60
 	if path != `\` {
... ...
@@ -62,7 +66,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
62 62
 	}
63 63
 
64 64
 	// Failure on c:
65
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil {
65
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`, pathdriver.LocalPathDriver); err == nil {
66 66
 		t.Fatalf("c: should fail")
67 67
 	}
68 68
 	if err.Error() != `No relative path specified in "c:"` {
... ...
@@ -70,7 +74,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
70 70
 	}
71 71
 
72 72
 	// Failure on d:
73
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil {
73
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`, pathdriver.LocalPathDriver); err == nil {
74 74
 		t.Fatalf("c: should fail")
75 75
 	}
76 76
 	if err.Error() != `No relative path specified in "d:"` {
... ...
@@ -12,6 +12,7 @@ import (
12 12
 	"github.com/docker/docker/api/types"
13 13
 	"github.com/docker/docker/daemon/initlayer"
14 14
 	"github.com/docker/docker/libcontainerd"
15
+	"github.com/docker/docker/pkg/containerfs"
15 16
 	"github.com/docker/docker/pkg/idtools"
16 17
 	"github.com/docker/docker/pkg/mount"
17 18
 	"github.com/docker/docker/pkg/plugins"
... ...
@@ -57,7 +58,8 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
57 57
 		}
58 58
 	}
59 59
 
60
-	if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), idtools.IDPair{0, 0}); err != nil {
60
+	rootFS := containerfs.NewLocalContainerFS(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName))
61
+	if err := initlayer.Setup(rootFS, idtools.IDPair{0, 0}); err != nil {
61 62
 		return errors.WithStack(err)
62 63
 	}
63 64
 
... ...
@@ -27,6 +27,8 @@ github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
27 27
 github.com/imdario/mergo 0.2.1
28 28
 golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
29 29
 
30
+github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
31
+
30 32
 #get libnetwork packages
31 33
 github.com/docker/libnetwork d5c822319097cc01cc9bd5ffedd74c7ce7c894f2
32 34
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
33 35
new file mode 100644
... ...
@@ -0,0 +1,4 @@
0
+1. This program only runs in Linux. So you just first copy the files over to a Linux machine. 
1
+2. Get Go and and then run make get-deps && make. This is set the $GOPATH for you and build the binaries.
2
+3. vhd_to_tar and tar_to_vhd are the standalone executables that read/write to stdin/out and do the tar <-> vhd conversion.
3
+   tar2vhd_server is the service VM server that takes client requests over hvsock.
0 4
new file mode 100644
... ...
@@ -0,0 +1,109 @@
0
+package remotefs
1
+
2
+import (
3
+	"errors"
4
+	"os"
5
+	"time"
6
+)
7
+
8
+// RemotefsCmd is the name of the remotefs meta command
9
+const RemotefsCmd = "remotefs"
10
+
11
+// Name of the commands when called from the cli context (remotefs <CMD> ...)
12
+const (
13
+	StatCmd           = "stat"
14
+	LstatCmd          = "lstat"
15
+	ReadlinkCmd       = "readlink"
16
+	MkdirCmd          = "mkdir"
17
+	MkdirAllCmd       = "mkdirall"
18
+	RemoveCmd         = "remove"
19
+	RemoveAllCmd      = "removeall"
20
+	LinkCmd           = "link"
21
+	SymlinkCmd        = "symlink"
22
+	LchmodCmd         = "lchmod"
23
+	LchownCmd         = "lchown"
24
+	MknodCmd          = "mknod"
25
+	MkfifoCmd         = "mkfifo"
26
+	OpenFileCmd       = "openfile"
27
+	ReadFileCmd       = "readfile"
28
+	WriteFileCmd      = "writefile"
29
+	ReadDirCmd        = "readdir"
30
+	ResolvePathCmd    = "resolvepath"
31
+	ExtractArchiveCmd = "extractarchive"
32
+	ArchivePathCmd    = "archivepath"
33
+)
34
+
35
+// ErrInvalid is returned if the parameters are invalid
36
+var ErrInvalid = errors.New("invalid arguments")
37
+
38
+// ErrUnknown is returned for an unknown remotefs command
39
+var ErrUnknown = errors.New("unkown command")
40
+
41
+// ExportedError is the serialized version of the a Go error.
42
+// It also provides a trivial implementation of the error interface.
43
+type ExportedError struct {
44
+	ErrString string
45
+	ErrNum    int `json:",omitempty"`
46
+}
47
+
48
+// Error returns an error string
49
+func (ee *ExportedError) Error() string {
50
+	return ee.ErrString
51
+}
52
+
53
+// FileInfo is the stat struct returned by the remotefs system. It
54
+// fulfills the os.FileInfo interface.
55
+type FileInfo struct {
56
+	NameVar    string
57
+	SizeVar    int64
58
+	ModeVar    os.FileMode
59
+	ModTimeVar int64 // Serialization of time.Time breaks in travis, so use an int
60
+	IsDirVar   bool
61
+}
62
+
63
+var _ os.FileInfo = &FileInfo{}
64
+
65
+// Name returns the filename from a FileInfo structure
66
+func (f *FileInfo) Name() string { return f.NameVar }
67
+
68
+// Size returns the size from a FileInfo structure
69
+func (f *FileInfo) Size() int64 { return f.SizeVar }
70
+
71
+// Mode returns the mode from a FileInfo structure
72
+func (f *FileInfo) Mode() os.FileMode { return f.ModeVar }
73
+
74
+// ModTime returns the modification time from a FileInfo structure
75
+func (f *FileInfo) ModTime() time.Time { return time.Unix(0, f.ModTimeVar) }
76
+
77
+// IsDir returns the is-directory indicator from a FileInfo structure
78
+func (f *FileInfo) IsDir() bool { return f.IsDirVar }
79
+
80
+// Sys provides an interface to a FileInfo structure
81
+func (f *FileInfo) Sys() interface{} { return nil }
82
+
83
+// FileHeader is a header for remote *os.File operations for remotefs.OpenFile
84
+type FileHeader struct {
85
+	Cmd  uint32
86
+	Size uint64
87
+}
88
+
89
+const (
90
+	// Read request command.
91
+	Read uint32 = iota
92
+	// Write request command.
93
+	Write
94
+	// Seek request command.
95
+	Seek
96
+	// Close request command.
97
+	Close
98
+	// CmdOK is a response meaning request succeeded.
99
+	CmdOK
100
+	// CmdFailed is a response meaning request failed.
101
+	CmdFailed
102
+)
103
+
104
+// SeekHeader is header for the Seek operation for remotefs.OpenFile
105
+type SeekHeader struct {
106
+	Offset int64
107
+	Whence int32
108
+}
0 109
new file mode 100644
... ...
@@ -0,0 +1,546 @@
0
+// +build !windows
1
+
2
+package remotefs
3
+
4
+import (
5
+	"bytes"
6
+	"encoding/binary"
7
+	"encoding/json"
8
+	"io"
9
+	"os"
10
+	"path/filepath"
11
+	"strconv"
12
+
13
+	"github.com/docker/docker/pkg/archive"
14
+	"github.com/docker/docker/pkg/symlink"
15
+	"golang.org/x/sys/unix"
16
+)
17
+
18
+// Func is the function definition for a generic remote fs function
19
+// The input to the function is any serialized structs / data from in and the string slice
20
+// from args. The output of the function will be serialized and written to out.
21
+type Func func(stdin io.Reader, stdout io.Writer, args []string) error
22
+
23
+// Commands provide a string -> remotefs function mapping.
24
+// This is useful for commandline programs that will receive a string
25
+// as the function to execute.
26
+var Commands = map[string]Func{
27
+	StatCmd:           Stat,
28
+	LstatCmd:          Lstat,
29
+	ReadlinkCmd:       Readlink,
30
+	MkdirCmd:          Mkdir,
31
+	MkdirAllCmd:       MkdirAll,
32
+	RemoveCmd:         Remove,
33
+	RemoveAllCmd:      RemoveAll,
34
+	LinkCmd:           Link,
35
+	SymlinkCmd:        Symlink,
36
+	LchmodCmd:         Lchmod,
37
+	LchownCmd:         Lchown,
38
+	MknodCmd:          Mknod,
39
+	MkfifoCmd:         Mkfifo,
40
+	OpenFileCmd:       OpenFile,
41
+	ReadFileCmd:       ReadFile,
42
+	WriteFileCmd:      WriteFile,
43
+	ReadDirCmd:        ReadDir,
44
+	ResolvePathCmd:    ResolvePath,
45
+	ExtractArchiveCmd: ExtractArchive,
46
+	ArchivePathCmd:    ArchivePath,
47
+}
48
+
49
+// Stat functions like os.Stat.
50
+// Args:
51
+// - args[0] is the path
52
+// Out:
53
+// - out = FileInfo object
54
+func Stat(in io.Reader, out io.Writer, args []string) error {
55
+	return stat(in, out, args, os.Stat)
56
+}
57
+
58
+// Lstat functions like os.Lstat.
59
+// Args:
60
+// - args[0] is the path
61
+// Out:
62
+// - out = FileInfo object
63
+func Lstat(in io.Reader, out io.Writer, args []string) error {
64
+	return stat(in, out, args, os.Lstat)
65
+}
66
+
67
+func stat(in io.Reader, out io.Writer, args []string, statfunc func(string) (os.FileInfo, error)) error {
68
+	if len(args) < 1 {
69
+		return ErrInvalid
70
+	}
71
+
72
+	fi, err := statfunc(args[0])
73
+	if err != nil {
74
+		return err
75
+	}
76
+
77
+	info := FileInfo{
78
+		NameVar:    fi.Name(),
79
+		SizeVar:    fi.Size(),
80
+		ModeVar:    fi.Mode(),
81
+		ModTimeVar: fi.ModTime().UnixNano(),
82
+		IsDirVar:   fi.IsDir(),
83
+	}
84
+
85
+	buf, err := json.Marshal(info)
86
+	if err != nil {
87
+		return err
88
+	}
89
+
90
+	if _, err := out.Write(buf); err != nil {
91
+		return err
92
+	}
93
+	return nil
94
+}
95
+
96
+// Readlink works like os.Readlink
97
+// In:
98
+//  - args[0] is path
99
+// Out:
100
+//  - Write link result to out
101
+func Readlink(in io.Reader, out io.Writer, args []string) error {
102
+	if len(args) < 1 {
103
+		return ErrInvalid
104
+	}
105
+
106
+	l, err := os.Readlink(args[0])
107
+	if err != nil {
108
+		return err
109
+	}
110
+
111
+	if _, err := out.Write([]byte(l)); err != nil {
112
+		return err
113
+	}
114
+	return nil
115
+}
116
+
117
+// Mkdir works like os.Mkdir
118
+// Args:
119
+// - args[0] is the path
120
+// - args[1] is the permissions in octal (like 0755)
121
+func Mkdir(in io.Reader, out io.Writer, args []string) error {
122
+	return mkdir(in, out, args, os.Mkdir)
123
+}
124
+
125
+// MkdirAll works like os.MkdirAll.
126
+// Args:
127
+// - args[0] is the path
128
+// - args[1] is the permissions in octal (like 0755)
129
+func MkdirAll(in io.Reader, out io.Writer, args []string) error {
130
+	return mkdir(in, out, args, os.MkdirAll)
131
+}
132
+
133
+func mkdir(in io.Reader, out io.Writer, args []string, mkdirFunc func(string, os.FileMode) error) error {
134
+	if len(args) < 2 {
135
+		return ErrInvalid
136
+	}
137
+
138
+	perm, err := strconv.ParseUint(args[1], 8, 32)
139
+	if err != nil {
140
+		return err
141
+	}
142
+	return mkdirFunc(args[0], os.FileMode(perm))
143
+}
144
+
145
+// Remove works like os.Remove
146
+// Args:
147
+//	- args[0] is the path
148
+func Remove(in io.Reader, out io.Writer, args []string) error {
149
+	return remove(in, out, args, os.Remove)
150
+}
151
+
152
+// RemoveAll works like os.RemoveAll
153
+// Args:
154
+//  - args[0] is the path
155
+func RemoveAll(in io.Reader, out io.Writer, args []string) error {
156
+	return remove(in, out, args, os.RemoveAll)
157
+}
158
+
159
+func remove(in io.Reader, out io.Writer, args []string, removefunc func(string) error) error {
160
+	if len(args) < 1 {
161
+		return ErrInvalid
162
+	}
163
+	return removefunc(args[0])
164
+}
165
+
166
+// Link works like os.Link
167
+// Args:
168
+//  - args[0] = old path name (link source)
169
+//  - args[1] = new path name (link dest)
170
+func Link(in io.Reader, out io.Writer, args []string) error {
171
+	return link(in, out, args, os.Link)
172
+}
173
+
174
+// Symlink works like os.Symlink
175
+// Args:
176
+//  - args[0] = old path name (link source)
177
+//  - args[1] = new path name (link dest)
178
+func Symlink(in io.Reader, out io.Writer, args []string) error {
179
+	return link(in, out, args, os.Symlink)
180
+}
181
+
182
+func link(in io.Reader, out io.Writer, args []string, linkfunc func(string, string) error) error {
183
+	if len(args) < 2 {
184
+		return ErrInvalid
185
+	}
186
+	return linkfunc(args[0], args[1])
187
+}
188
+
189
+// Lchmod changes permission of the given file without following symlinks
190
+// Args:
191
+//  - args[0] = path
192
+//  - args[1] = permission mode in octal (like 0755)
193
+func Lchmod(in io.Reader, out io.Writer, args []string) error {
194
+	if len(args) < 2 {
195
+		return ErrInvalid
196
+	}
197
+
198
+	perm, err := strconv.ParseUint(args[1], 8, 32)
199
+	if err != nil {
200
+		return err
201
+	}
202
+
203
+	path := args[0]
204
+	if !filepath.IsAbs(path) {
205
+		path, err = filepath.Abs(path)
206
+		if err != nil {
207
+			return err
208
+		}
209
+	}
210
+	return unix.Fchmodat(0, path, uint32(perm), unix.AT_SYMLINK_NOFOLLOW)
211
+}
212
+
213
+// Lchown works like os.Lchown
214
+// Args:
215
+//  - args[0] = path
216
+//  - args[1] = uid in base 10
217
+//  - args[2] = gid in base 10
218
+func Lchown(in io.Reader, out io.Writer, args []string) error {
219
+	if len(args) < 3 {
220
+		return ErrInvalid
221
+	}
222
+
223
+	uid, err := strconv.ParseInt(args[1], 10, 64)
224
+	if err != nil {
225
+		return err
226
+	}
227
+
228
+	gid, err := strconv.ParseInt(args[2], 10, 64)
229
+	if err != nil {
230
+		return err
231
+	}
232
+	return os.Lchown(args[0], int(uid), int(gid))
233
+}
234
+
235
+// Mknod works like syscall.Mknod
236
+// Args:
237
+//  - args[0] = path
238
+//  - args[1] = permission mode in octal (like 0755)
239
+//  - args[2] = major device number in base 10
240
+//  - args[3] = minor device number in base 10
241
+func Mknod(in io.Reader, out io.Writer, args []string) error {
242
+	if len(args) < 4 {
243
+		return ErrInvalid
244
+	}
245
+
246
+	perm, err := strconv.ParseUint(args[1], 8, 32)
247
+	if err != nil {
248
+		return err
249
+	}
250
+
251
+	major, err := strconv.ParseInt(args[2], 10, 32)
252
+	if err != nil {
253
+		return err
254
+	}
255
+
256
+	minor, err := strconv.ParseInt(args[3], 10, 32)
257
+	if err != nil {
258
+		return err
259
+	}
260
+
261
+	dev := unix.Mkdev(uint32(major), uint32(minor))
262
+	return unix.Mknod(args[0], uint32(perm), int(dev))
263
+}
264
+
265
+// Mkfifo creates a FIFO special file with the given path name and permissions
266
+// Args:
267
+// 	- args[0] = path
268
+//  - args[1] = permission mode in octal (like 0755)
269
+func Mkfifo(in io.Reader, out io.Writer, args []string) error {
270
+	if len(args) < 2 {
271
+		return ErrInvalid
272
+	}
273
+
274
+	perm, err := strconv.ParseUint(args[1], 8, 32)
275
+	if err != nil {
276
+		return err
277
+	}
278
+	return unix.Mkfifo(args[0], uint32(perm))
279
+}
280
+
281
+// OpenFile works like os.OpenFile. To manage the file pointer state,
282
+// this function acts as a single file "file server" with Read/Write/Close
283
+// being serialized control codes from in.
284
+// Args:
285
+//  - args[0] = path
286
+//  - args[1] = flag in base 10
287
+//  - args[2] = permission mode in octal (like 0755)
288
+func OpenFile(in io.Reader, out io.Writer, args []string) (err error) {
289
+	defer func() {
290
+		if err != nil {
291
+			// error code will be serialized by the caller, so don't write it here
292
+			WriteFileHeader(out, &FileHeader{Cmd: CmdFailed}, nil)
293
+		}
294
+	}()
295
+
296
+	if len(args) < 3 {
297
+		return ErrInvalid
298
+	}
299
+
300
+	flag, err := strconv.ParseInt(args[1], 10, 32)
301
+	if err != nil {
302
+		return err
303
+	}
304
+
305
+	perm, err := strconv.ParseUint(args[2], 8, 32)
306
+	if err != nil {
307
+		return err
308
+	}
309
+
310
+	f, err := os.OpenFile(args[0], int(flag), os.FileMode(perm))
311
+	if err != nil {
312
+		return err
313
+	}
314
+
315
+	// Signal the client that OpenFile succeeded
316
+	if err := WriteFileHeader(out, &FileHeader{Cmd: CmdOK}, nil); err != nil {
317
+		return err
318
+	}
319
+
320
+	for {
321
+		hdr, err := ReadFileHeader(in)
322
+		if err != nil {
323
+			return err
324
+		}
325
+
326
+		var buf []byte
327
+		switch hdr.Cmd {
328
+		case Read:
329
+			buf = make([]byte, hdr.Size, hdr.Size)
330
+			n, err := f.Read(buf)
331
+			if err != nil {
332
+				return err
333
+			}
334
+			buf = buf[:n]
335
+		case Write:
336
+			if _, err := io.CopyN(f, in, int64(hdr.Size)); err != nil {
337
+				return err
338
+			}
339
+		case Seek:
340
+			seekHdr := &SeekHeader{}
341
+			if err := binary.Read(in, binary.BigEndian, seekHdr); err != nil {
342
+				return err
343
+			}
344
+			res, err := f.Seek(seekHdr.Offset, int(seekHdr.Whence))
345
+			if err != nil {
346
+				return err
347
+			}
348
+			buffer := &bytes.Buffer{}
349
+			if err := binary.Write(buffer, binary.BigEndian, res); err != nil {
350
+				return err
351
+			}
352
+			buf = buffer.Bytes()
353
+		case Close:
354
+			if err := f.Close(); err != nil {
355
+				return err
356
+			}
357
+		default:
358
+			return ErrUnknown
359
+		}
360
+
361
+		retHdr := &FileHeader{
362
+			Cmd:  CmdOK,
363
+			Size: uint64(len(buf)),
364
+		}
365
+		if err := WriteFileHeader(out, retHdr, buf); err != nil {
366
+			return err
367
+		}
368
+
369
+		if hdr.Cmd == Close {
370
+			break
371
+		}
372
+	}
373
+	return nil
374
+}
375
+
376
+// ReadFile works like ioutil.ReadFile but instead writes the file to a writer
377
+// Args:
378
+//  - args[0] = path
379
+// Out:
380
+//  - Write file contents to out
381
+func ReadFile(in io.Reader, out io.Writer, args []string) error {
382
+	if len(args) < 1 {
383
+		return ErrInvalid
384
+	}
385
+
386
+	f, err := os.Open(args[0])
387
+	if err != nil {
388
+		return err
389
+	}
390
+	defer f.Close()
391
+
392
+	if _, err := io.Copy(out, f); err != nil {
393
+		return nil
394
+	}
395
+	return nil
396
+}
397
+
398
+// WriteFile works like ioutil.WriteFile but instead reads the file from a reader
399
+// Args:
400
+//  - args[0] = path
401
+//  - args[1] = permission mode in octal (like 0755)
402
+//  - input data stream from in
403
+func WriteFile(in io.Reader, out io.Writer, args []string) error {
404
+	if len(args) < 2 {
405
+		return ErrInvalid
406
+	}
407
+
408
+	perm, err := strconv.ParseUint(args[1], 8, 32)
409
+	if err != nil {
410
+		return err
411
+	}
412
+
413
+	f, err := os.OpenFile(args[0], os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm))
414
+	if err != nil {
415
+		return err
416
+	}
417
+	defer f.Close()
418
+
419
+	if _, err := io.Copy(f, in); err != nil {
420
+		return err
421
+	}
422
+	return nil
423
+}
424
+
425
+// ReadDir works like *os.File.Readdir but instead writes the result to a writer
426
+// Args:
427
+//  - args[0] = path
428
+//  - args[1] = number of directory entries to return. If <= 0, return all entries in directory
429
+func ReadDir(in io.Reader, out io.Writer, args []string) error {
430
+	if len(args) < 2 {
431
+		return ErrInvalid
432
+	}
433
+
434
+	n, err := strconv.ParseInt(args[1], 10, 32)
435
+	if err != nil {
436
+		return err
437
+	}
438
+
439
+	f, err := os.Open(args[0])
440
+	if err != nil {
441
+		return err
442
+	}
443
+	defer f.Close()
444
+
445
+	infos, err := f.Readdir(int(n))
446
+	if err != nil {
447
+		return err
448
+	}
449
+
450
+	fileInfos := make([]FileInfo, len(infos))
451
+	for i := range infos {
452
+		fileInfos[i] = FileInfo{
453
+			NameVar:    infos[i].Name(),
454
+			SizeVar:    infos[i].Size(),
455
+			ModeVar:    infos[i].Mode(),
456
+			ModTimeVar: infos[i].ModTime().UnixNano(),
457
+			IsDirVar:   infos[i].IsDir(),
458
+		}
459
+	}
460
+
461
+	buf, err := json.Marshal(fileInfos)
462
+	if err != nil {
463
+		return err
464
+	}
465
+
466
+	if _, err := out.Write(buf); err != nil {
467
+		return err
468
+	}
469
+	return nil
470
+}
471
+
472
+// ResolvePath works like docker's symlink.FollowSymlinkInScope.
473
+// It takens in a `path` and a `root` and evaluates symlinks in `path`
474
+// as if they were scoped in `root`. `path` must be a child path of `root`.
475
+// In other words, `path` must have `root` as a prefix.
476
+// Example:
477
+// path=/foo/bar -> /baz
478
+// root=/foo,
479
+// Expected result = /foo/baz
480
+//
481
+// Args:
482
+// - args[0] is `path`
483
+// - args[1] is `root`
484
+// Out:
485
+// - Write resolved path to stdout
486
+func ResolvePath(in io.Reader, out io.Writer, args []string) error {
487
+	if len(args) < 2 {
488
+		return ErrInvalid
489
+	}
490
+	res, err := symlink.FollowSymlinkInScope(args[0], args[1])
491
+	if err != nil {
492
+		return err
493
+	}
494
+	if _, err = out.Write([]byte(res)); err != nil {
495
+		return err
496
+	}
497
+	return nil
498
+}
499
+
500
+// ExtractArchive extracts the archive read from in.
501
+// Args:
502
+// - in = size of json | json of archive.TarOptions | input tar stream
503
+// - args[0] = extract directory name
504
+func ExtractArchive(in io.Reader, out io.Writer, args []string) error {
505
+	if len(args) < 1 {
506
+		return ErrInvalid
507
+	}
508
+
509
+	opts, err := ReadTarOptions(in)
510
+	if err != nil {
511
+		return err
512
+	}
513
+
514
+	if err := archive.Untar(in, args[0], opts); err != nil {
515
+		return err
516
+	}
517
+	return nil
518
+}
519
+
520
+// ArchivePath archives the given directory and writes it to out.
521
+// Args:
522
+// - in = size of json | json of archive.TarOptions
523
+// - args[0] = source directory name
524
+// Out:
525
+// - out = tar file of the archive
526
+func ArchivePath(in io.Reader, out io.Writer, args []string) error {
527
+	if len(args) < 1 {
528
+		return ErrInvalid
529
+	}
530
+
531
+	opts, err := ReadTarOptions(in)
532
+	if err != nil {
533
+		return err
534
+	}
535
+
536
+	r, err := archive.TarWithOptions(args[0], opts)
537
+	if err != nil {
538
+		return err
539
+	}
540
+
541
+	if _, err := io.Copy(out, r); err != nil {
542
+		return err
543
+	}
544
+	return nil
545
+}
0 546
new file mode 100644
... ...
@@ -0,0 +1,168 @@
0
+package remotefs
1
+
2
+import (
3
+	"bytes"
4
+	"encoding/binary"
5
+	"encoding/json"
6
+	"io"
7
+	"io/ioutil"
8
+	"os"
9
+	"syscall"
10
+
11
+	"github.com/docker/docker/pkg/archive"
12
+)
13
+
14
+// ReadError is an utility function that reads a serialized error from the given reader
15
+// and deserializes it.
16
+func ReadError(in io.Reader) (*ExportedError, error) {
17
+	b, err := ioutil.ReadAll(in)
18
+	if err != nil {
19
+		return nil, err
20
+	}
21
+
22
+	// No error
23
+	if len(b) == 0 {
24
+		return nil, nil
25
+	}
26
+
27
+	var exportedErr ExportedError
28
+	if err := json.Unmarshal(b, &exportedErr); err != nil {
29
+		return nil, err
30
+	}
31
+
32
+	return &exportedErr, nil
33
+}
34
+
35
+// ExportedToError will convert a ExportedError to an error. It will try to match
36
+// the error to any existing known error like os.ErrNotExist. Otherwise, it will just
37
+// return an implementation of the error interface.
38
+func ExportedToError(ee *ExportedError) error {
39
+	if ee.Error() == os.ErrNotExist.Error() {
40
+		return os.ErrNotExist
41
+	} else if ee.Error() == os.ErrExist.Error() {
42
+		return os.ErrExist
43
+	} else if ee.Error() == os.ErrPermission.Error() {
44
+		return os.ErrPermission
45
+	}
46
+	return ee
47
+}
48
+
49
+// WriteError is an utility function that serializes the error
50
+// and writes it to the output writer.
51
+func WriteError(err error, out io.Writer) error {
52
+	if err == nil {
53
+		return nil
54
+	}
55
+	err = fixOSError(err)
56
+
57
+	var errno int
58
+	switch typedError := err.(type) {
59
+	case *os.PathError:
60
+		if se, ok := typedError.Err.(syscall.Errno); ok {
61
+			errno = int(se)
62
+		}
63
+	case *os.LinkError:
64
+		if se, ok := typedError.Err.(syscall.Errno); ok {
65
+			errno = int(se)
66
+		}
67
+	case *os.SyscallError:
68
+		if se, ok := typedError.Err.(syscall.Errno); ok {
69
+			errno = int(se)
70
+		}
71
+	}
72
+
73
+	exportedError := &ExportedError{
74
+		ErrString: err.Error(),
75
+		ErrNum:    errno,
76
+	}
77
+
78
+	b, err1 := json.Marshal(exportedError)
79
+	if err1 != nil {
80
+		return err1
81
+	}
82
+
83
+	_, err1 = out.Write(b)
84
+	if err1 != nil {
85
+		return err1
86
+	}
87
+	return nil
88
+}
89
+
90
+// fixOSError converts possible platform dependent error into the portable errors in the
91
+// Go os package if possible.
92
+func fixOSError(err error) error {
93
+	// The os.IsExist, os.IsNotExist, and os.IsPermissions functions are platform
94
+	// dependent, so sending the raw error might break those functions on a different OS.
95
+	// Go defines portable errors for these.
96
+	if os.IsExist(err) {
97
+		return os.ErrExist
98
+	} else if os.IsNotExist(err) {
99
+		return os.ErrNotExist
100
+	} else if os.IsPermission(err) {
101
+		return os.ErrPermission
102
+	}
103
+	return err
104
+}
105
+
106
+// ReadTarOptions reads from the specified reader and deserializes an archive.TarOptions struct.
107
+func ReadTarOptions(r io.Reader) (*archive.TarOptions, error) {
108
+	var size uint64
109
+	if err := binary.Read(r, binary.BigEndian, &size); err != nil {
110
+		return nil, err
111
+	}
112
+
113
+	rawJSON := make([]byte, size)
114
+	if _, err := io.ReadFull(r, rawJSON); err != nil {
115
+		return nil, err
116
+	}
117
+
118
+	var opts archive.TarOptions
119
+	if err := json.Unmarshal(rawJSON, &opts); err != nil {
120
+		return nil, err
121
+	}
122
+	return &opts, nil
123
+}
124
+
125
+// WriteTarOptions serializes a archive.TarOptions struct and writes it to the writer.
126
+func WriteTarOptions(w io.Writer, opts *archive.TarOptions) error {
127
+	optsBuf, err := json.Marshal(opts)
128
+	if err != nil {
129
+		return err
130
+	}
131
+
132
+	optsSize := uint64(len(optsBuf))
133
+	optsSizeBuf := &bytes.Buffer{}
134
+	if err := binary.Write(optsSizeBuf, binary.BigEndian, optsSize); err != nil {
135
+		return err
136
+	}
137
+
138
+	if _, err := optsSizeBuf.WriteTo(w); err != nil {
139
+		return err
140
+	}
141
+
142
+	if _, err := w.Write(optsBuf); err != nil {
143
+		return err
144
+	}
145
+
146
+	return nil
147
+}
148
+
149
+// ReadFileHeader reads from r and returns a deserialized FileHeader
150
+func ReadFileHeader(r io.Reader) (*FileHeader, error) {
151
+	hdr := &FileHeader{}
152
+	if err := binary.Read(r, binary.BigEndian, hdr); err != nil {
153
+		return nil, err
154
+	}
155
+	return hdr, nil
156
+}
157
+
158
+// WriteFileHeader serializes a FileHeader and writes it to w, along with any extra data
159
+func WriteFileHeader(w io.Writer, hdr *FileHeader, extraData []byte) error {
160
+	if err := binary.Write(w, binary.BigEndian, hdr); err != nil {
161
+		return err
162
+	}
163
+	if _, err := w.Write(extraData); err != nil {
164
+		return err
165
+	}
166
+	return nil
167
+}
0 168
new file mode 100644
... ...
@@ -0,0 +1,202 @@
0
+                                 Apache License
1
+                           Version 2.0, January 2004
2
+                        http://www.apache.org/licenses/
3
+
4
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
5
+
6
+   1. Definitions.
7
+
8
+      "License" shall mean the terms and conditions for use, reproduction,
9
+      and distribution as defined by Sections 1 through 9 of this document.
10
+
11
+      "Licensor" shall mean the copyright owner or entity authorized by
12
+      the copyright owner that is granting the License.
13
+
14
+      "Legal Entity" shall mean the union of the acting entity and all
15
+      other entities that control, are controlled by, or are under common
16
+      control with that entity. For the purposes of this definition,
17
+      "control" means (i) the power, direct or indirect, to cause the
18
+      direction or management of such entity, whether by contract or
19
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
20
+      outstanding shares, or (iii) beneficial ownership of such entity.
21
+
22
+      "You" (or "Your") shall mean an individual or Legal Entity
23
+      exercising permissions granted by this License.
24
+
25
+      "Source" form shall mean the preferred form for making modifications,
26
+      including but not limited to software source code, documentation
27
+      source, and configuration files.
28
+
29
+      "Object" form shall mean any form resulting from mechanical
30
+      transformation or translation of a Source form, including but
31
+      not limited to compiled object code, generated documentation,
32
+      and conversions to other media types.
33
+
34
+      "Work" shall mean the work of authorship, whether in Source or
35
+      Object form, made available under the License, as indicated by a
36
+      copyright notice that is included in or attached to the work
37
+      (an example is provided in the Appendix below).
38
+
39
+      "Derivative Works" shall mean any work, whether in Source or Object
40
+      form, that is based on (or derived from) the Work and for which the
41
+      editorial revisions, annotations, elaborations, or other modifications
42
+      represent, as a whole, an original work of authorship. For the purposes
43
+      of this License, Derivative Works shall not include works that remain
44
+      separable from, or merely link (or bind by name) to the interfaces of,
45
+      the Work and Derivative Works thereof.
46
+
47
+      "Contribution" shall mean any work of authorship, including
48
+      the original version of the Work and any modifications or additions
49
+      to that Work or Derivative Works thereof, that is intentionally
50
+      submitted to Licensor for inclusion in the Work by the copyright owner
51
+      or by an individual or Legal Entity authorized to submit on behalf of
52
+      the copyright owner. For the purposes of this definition, "submitted"
53
+      means any form of electronic, verbal, or written communication sent
54
+      to the Licensor or its representatives, including but not limited to
55
+      communication on electronic mailing lists, source code control systems,
56
+      and issue tracking systems that are managed by, or on behalf of, the
57
+      Licensor for the purpose of discussing and improving the Work, but
58
+      excluding communication that is conspicuously marked or otherwise
59
+      designated in writing by the copyright owner as "Not a Contribution."
60
+
61
+      "Contributor" shall mean Licensor and any individual or Legal Entity
62
+      on behalf of whom a Contribution has been received by Licensor and
63
+      subsequently incorporated within the Work.
64
+
65
+   2. Grant of Copyright License. Subject to the terms and conditions of
66
+      this License, each Contributor hereby grants to You a perpetual,
67
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
68
+      copyright license to reproduce, prepare Derivative Works of,
69
+      publicly display, publicly perform, sublicense, and distribute the
70
+      Work and such Derivative Works in Source or Object form.
71
+
72
+   3. Grant of Patent License. Subject to the terms and conditions of
73
+      this License, each Contributor hereby grants to You a perpetual,
74
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
75
+      (except as stated in this section) patent license to make, have made,
76
+      use, offer to sell, sell, import, and otherwise transfer the Work,
77
+      where such license applies only to those patent claims licensable
78
+      by such Contributor that are necessarily infringed by their
79
+      Contribution(s) alone or by combination of their Contribution(s)
80
+      with the Work to which such Contribution(s) was submitted. If You
81
+      institute patent litigation against any entity (including a
82
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
83
+      or a Contribution incorporated within the Work constitutes direct
84
+      or contributory patent infringement, then any patent licenses
85
+      granted to You under this License for that Work shall terminate
86
+      as of the date such litigation is filed.
87
+
88
+   4. Redistribution. You may reproduce and distribute copies of the
89
+      Work or Derivative Works thereof in any medium, with or without
90
+      modifications, and in Source or Object form, provided that You
91
+      meet the following conditions:
92
+
93
+      (a) You must give any other recipients of the Work or
94
+          Derivative Works a copy of this License; and
95
+
96
+      (b) You must cause any modified files to carry prominent notices
97
+          stating that You changed the files; and
98
+
99
+      (c) You must retain, in the Source form of any Derivative Works
100
+          that You distribute, all copyright, patent, trademark, and
101
+          attribution notices from the Source form of the Work,
102
+          excluding those notices that do not pertain to any part of
103
+          the Derivative Works; and
104
+
105
+      (d) If the Work includes a "NOTICE" text file as part of its
106
+          distribution, then any Derivative Works that You distribute must
107
+          include a readable copy of the attribution notices contained
108
+          within such NOTICE file, excluding those notices that do not
109
+          pertain to any part of the Derivative Works, in at least one
110
+          of the following places: within a NOTICE text file distributed
111
+          as part of the Derivative Works; within the Source form or
112
+          documentation, if provided along with the Derivative Works; or,
113
+          within a display generated by the Derivative Works, if and
114
+          wherever such third-party notices normally appear. The contents
115
+          of the NOTICE file are for informational purposes only and
116
+          do not modify the License. You may add Your own attribution
117
+          notices within Derivative Works that You distribute, alongside
118
+          or as an addendum to the NOTICE text from the Work, provided
119
+          that such additional attribution notices cannot be construed
120
+          as modifying the License.
121
+
122
+      You may add Your own copyright statement to Your modifications and
123
+      may provide additional or different license terms and conditions
124
+      for use, reproduction, or distribution of Your modifications, or
125
+      for any such Derivative Works as a whole, provided Your use,
126
+      reproduction, and distribution of the Work otherwise complies with
127
+      the conditions stated in this License.
128
+
129
+   5. Submission of Contributions. Unless You explicitly state otherwise,
130
+      any Contribution intentionally submitted for inclusion in the Work
131
+      by You to the Licensor shall be under the terms and conditions of
132
+      this License, without any additional terms or conditions.
133
+      Notwithstanding the above, nothing herein shall supersede or modify
134
+      the terms of any separate license agreement you may have executed
135
+      with Licensor regarding such Contributions.
136
+
137
+   6. Trademarks. This License does not grant permission to use the trade
138
+      names, trademarks, service marks, or product names of the Licensor,
139
+      except as required for reasonable and customary use in describing the
140
+      origin of the Work and reproducing the content of the NOTICE file.
141
+
142
+   7. Disclaimer of Warranty. Unless required by applicable law or
143
+      agreed to in writing, Licensor provides the Work (and each
144
+      Contributor provides its Contributions) on an "AS IS" BASIS,
145
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
146
+      implied, including, without limitation, any warranties or conditions
147
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
148
+      PARTICULAR PURPOSE. You are solely responsible for determining the
149
+      appropriateness of using or redistributing the Work and assume any
150
+      risks associated with Your exercise of permissions under this License.
151
+
152
+   8. Limitation of Liability. In no event and under no legal theory,
153
+      whether in tort (including negligence), contract, or otherwise,
154
+      unless required by applicable law (such as deliberate and grossly
155
+      negligent acts) or agreed to in writing, shall any Contributor be
156
+      liable to You for damages, including any direct, indirect, special,
157
+      incidental, or consequential damages of any character arising as a
158
+      result of this License or out of the use or inability to use the
159
+      Work (including but not limited to damages for loss of goodwill,
160
+      work stoppage, computer failure or malfunction, or any and all
161
+      other commercial damages or losses), even if such Contributor
162
+      has been advised of the possibility of such damages.
163
+
164
+   9. Accepting Warranty or Additional Liability. While redistributing
165
+      the Work or Derivative Works thereof, You may choose to offer,
166
+      and charge a fee for, acceptance of support, warranty, indemnity,
167
+      or other liability obligations and/or rights consistent with this
168
+      License. However, in accepting such obligations, You may act only
169
+      on Your own behalf and on Your sole responsibility, not on behalf
170
+      of any other Contributor, and only if You agree to indemnify,
171
+      defend, and hold each Contributor harmless for any liability
172
+      incurred by, or claims asserted against, such Contributor by reason
173
+      of your accepting any such warranty or additional liability.
174
+
175
+   END OF TERMS AND CONDITIONS
176
+
177
+   APPENDIX: How to apply the Apache License to your work.
178
+
179
+      To apply the Apache License to your work, attach the following
180
+      boilerplate notice, with the fields enclosed by brackets "{}"
181
+      replaced with your own identifying information. (Don't include
182
+      the brackets!)  The text should be enclosed in the appropriate
183
+      comment syntax for the file format. We also recommend that a
184
+      file or class name and description of purpose be included on the
185
+      same "printed page" as the copyright notice for easier
186
+      identification within third-party archives.
187
+
188
+   Copyright {yyyy} {name of copyright owner}
189
+
190
+   Licensed under the Apache License, Version 2.0 (the "License");
191
+   you may not use this file except in compliance with the License.
192
+   You may obtain a copy of the License at
193
+
194
+       http://www.apache.org/licenses/LICENSE-2.0
195
+
196
+   Unless required by applicable law or agreed to in writing, software
197
+   distributed under the License is distributed on an "AS IS" BASIS,
198
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
199
+   See the License for the specific language governing permissions and
200
+   limitations under the License.
201
+
0 202
new file mode 100644
... ...
@@ -0,0 +1,74 @@
0
+# continuity
1
+
2
+[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity)
3
+[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity)
4
+
5
+A transport-agnostic, filesystem metadata manifest system
6
+
7
+This project is a staging area for experiments in providing transport agnostic
8
+metadata storage.
9
+
10
+Please see https://github.com/opencontainers/specs/issues/11 for more details.
11
+
12
+## Manifest Format
13
+
14
+A continuity manifest encodes filesystem metadata in Protocol Buffers.
15
+Please refer to [proto/manifest.proto](proto/manifest.proto).
16
+
17
+## Usage
18
+
19
+Build:
20
+
21
+```console
22
+$ make
23
+```
24
+
25
+Create a manifest (of this repo itself):
26
+
27
+```console
28
+$ ./bin/continuity build . > /tmp/a.pb
29
+```
30
+
31
+Dump a manifest:
32
+
33
+```console
34
+$ ./bin/continuity ls /tmp/a.pb
35
+...
36
+-rw-rw-r--      270 B   /.gitignore
37
+-rw-rw-r--      88 B    /.mailmap
38
+-rw-rw-r--      187 B   /.travis.yml
39
+-rw-rw-r--      359 B   /AUTHORS
40
+-rw-rw-r--      11 kB   /LICENSE
41
+-rw-rw-r--      1.5 kB  /Makefile
42
+...
43
+-rw-rw-r--      986 B   /testutil_test.go
44
+drwxrwxr-x      0 B     /version
45
+-rw-rw-r--      478 B   /version/version.go
46
+```
47
+
48
+Verify a manifest:
49
+
50
+```console
51
+$ ./bin/continuity verify . /tmp/a.pb
52
+```
53
+
54
+Break the directory and restore using the manifest:
55
+```console
56
+$ chmod 777 Makefile
57
+$ ./bin/continuity verify . /tmp/a.pb
58
+2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
59
+$ ./bin/continuity apply . /tmp/a.pb
60
+$ stat -c %a Makefile
61
+664
62
+$ ./bin/continuity verify . /tmp/a.pb
63
+```
64
+
65
+
66
+## Contribution Guide
67
+### Building Proto Package
68
+
69
+If you change the proto file you will need to rebuild the generated Go with `go generate`.
70
+
71
+```console
72
+$ go generate ./proto
73
+```
0 74
new file mode 100644
... ...
@@ -0,0 +1,5 @@
0
+package devices
1
+
2
+import "fmt"
3
+
4
+var ErrNotSupported = fmt.Errorf("not supported")
0 5
new file mode 100644
... ...
@@ -0,0 +1,15 @@
0
+package devices
1
+
2
+// from /usr/include/sys/types.h
3
+
4
+func getmajor(dev int32) uint64 {
5
+	return (uint64(dev) >> 24) & 0xff
6
+}
7
+
8
+func getminor(dev int32) uint64 {
9
+	return uint64(dev) & 0xffffff
10
+}
11
+
12
+func makedev(major int, minor int) int {
13
+	return ((major << 24) | minor)
14
+}
0 15
new file mode 100644
... ...
@@ -0,0 +1,23 @@
0
+// +build solaris,!cgo
1
+
2
+//
3
+// Implementing the functions below requires cgo support.  Non-cgo stubs
4
+// versions are defined below to enable cross-compilation of source code
5
+// that depends on these functions, but the resultant cross-compiled
6
+// binaries cannot actually be used.  If the stub function(s) below are
7
+// actually invoked they will cause the calling process to exit.
8
+//
9
+
10
+package devices
11
+
12
+func getmajor(dev uint64) uint64 {
13
+	panic("getmajor() support requires cgo.")
14
+}
15
+
16
+func getminor(dev uint64) uint64 {
17
+	panic("getminor() support requires cgo.")
18
+}
19
+
20
+func makedev(major int, minor int) int {
21
+	panic("makedev() support requires cgo.")
22
+}
0 23
new file mode 100644
... ...
@@ -0,0 +1,15 @@
0
+package devices
1
+
2
+// from /usr/include/sys/types.h
3
+
4
+func getmajor(dev uint32) uint64 {
5
+	return (uint64(dev) >> 24) & 0xff
6
+}
7
+
8
+func getminor(dev uint32) uint64 {
9
+	return uint64(dev) & 0xffffff
10
+}
11
+
12
+func makedev(major int, minor int) int {
13
+	return ((major << 24) | minor)
14
+}
0 15
new file mode 100644
... ...
@@ -0,0 +1,15 @@
0
+package devices
1
+
2
+// from /usr/include/linux/kdev_t.h
3
+
4
+func getmajor(dev uint64) uint64 {
5
+	return dev >> 8
6
+}
7
+
8
+func getminor(dev uint64) uint64 {
9
+	return dev & 0xff
10
+}
11
+
12
+func makedev(major int, minor int) int {
13
+	return ((major << 8) | minor)
14
+}
0 15
new file mode 100644
... ...
@@ -0,0 +1,18 @@
0
+// +build cgo
1
+
2
+package devices
3
+
4
+//#include <sys/mkdev.h>
5
+import "C"
6
+
7
+func getmajor(dev uint64) uint64 {
8
+	return uint64(C.major(C.dev_t(dev)))
9
+}
10
+
11
+func getminor(dev uint64) uint64 {
12
+	return uint64(C.minor(C.dev_t(dev)))
13
+}
14
+
15
+func makedev(major int, minor int) int {
16
+	return int(C.makedev(C.major_t(major), C.minor_t(minor)))
17
+}
0 18
new file mode 100644
... ...
@@ -0,0 +1,55 @@
0
+// +build linux darwin freebsd solaris
1
+
2
+package devices
3
+
4
+import (
5
+	"fmt"
6
+	"os"
7
+	"syscall"
8
+)
9
+
10
+func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
11
+	sys, ok := fi.Sys().(*syscall.Stat_t)
12
+	if !ok {
13
+		return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo")
14
+	}
15
+
16
+	return getmajor(sys.Rdev), getminor(sys.Rdev), nil
17
+}
18
+
19
+// mknod provides a shortcut for syscall.Mknod
20
+func Mknod(p string, mode os.FileMode, maj, min int) error {
21
+	var (
22
+		m   = syscallMode(mode.Perm())
23
+		dev int
24
+	)
25
+
26
+	if mode&os.ModeDevice != 0 {
27
+		dev = makedev(maj, min)
28
+
29
+		if mode&os.ModeCharDevice != 0 {
30
+			m |= syscall.S_IFCHR
31
+		} else {
32
+			m |= syscall.S_IFBLK
33
+		}
34
+	} else if mode&os.ModeNamedPipe != 0 {
35
+		m |= syscall.S_IFIFO
36
+	}
37
+
38
+	return syscall.Mknod(p, m, dev)
39
+}
40
+
41
+// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
42
+func syscallMode(i os.FileMode) (o uint32) {
43
+	o |= uint32(i.Perm())
44
+	if i&os.ModeSetuid != 0 {
45
+		o |= syscall.S_ISUID
46
+	}
47
+	if i&os.ModeSetgid != 0 {
48
+		o |= syscall.S_ISGID
49
+	}
50
+	if i&os.ModeSticky != 0 {
51
+		o |= syscall.S_ISVTX
52
+	}
53
+	return
54
+}
0 55
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+package devices
1
+
2
+import (
3
+	"os"
4
+
5
+	"github.com/pkg/errors"
6
+)
7
+
8
+func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
9
+	return 0, 0, errors.Wrap(ErrNotSupported, "cannot get device info on windows")
10
+}
0 11
new file mode 100644
... ...
@@ -0,0 +1,162 @@
0
+package driver
1
+
2
+import (
3
+	"fmt"
4
+	"io"
5
+	"os"
6
+)
7
+
8
+var ErrNotSupported = fmt.Errorf("not supported")
9
+
10
+// Driver provides all of the system-level functions in a common interface.
11
+// The context should call these with full paths and should never use the `os`
12
+// package or any other package to access resources on the filesystem. This
13
+// mechanism let's us carefully control access to the context and maintain
14
+// path and resource integrity. It also gives us an interface to reason about
15
+// direct resource access.
16
+//
17
+// Implementations don't need to do much other than meet the interface. For
18
+// example, it is not required to wrap os.FileInfo to return correct paths for
19
+// the call to Name().
20
+type Driver interface {
21
+	// Note that Open() returns a File interface instead of *os.File. This
22
+	// is because os.File is a struct, so if Open was to return *os.File,
23
+	// the only way to fulfill the interface would be to call os.Open()
24
+	Open(path string) (File, error)
25
+	OpenFile(path string, flag int, perm os.FileMode) (File, error)
26
+
27
+	Stat(path string) (os.FileInfo, error)
28
+	Lstat(path string) (os.FileInfo, error)
29
+	Readlink(p string) (string, error)
30
+	Mkdir(path string, mode os.FileMode) error
31
+	Remove(path string) error
32
+
33
+	Link(oldname, newname string) error
34
+	Lchmod(path string, mode os.FileMode) error
35
+	Lchown(path string, uid, gid int64) error
36
+	Symlink(oldname, newname string) error
37
+
38
+	MkdirAll(path string, perm os.FileMode) error
39
+	RemoveAll(path string) error
40
+
41
+	// TODO(aaronl): These methods might move outside the main Driver
42
+	// interface in the future as more platforms are added.
43
+	Mknod(path string, mode os.FileMode, major int, minor int) error
44
+	Mkfifo(path string, mode os.FileMode) error
45
+}
46
+
47
+// File is the interface for interacting with files returned by continuity's Open
48
+// This is needed since os.File is a struct, instead of an interface, so it can't
49
+// be used.
50
+type File interface {
51
+	io.ReadWriteCloser
52
+	io.Seeker
53
+	Readdir(n int) ([]os.FileInfo, error)
54
+}
55
+
56
+func NewSystemDriver() (Driver, error) {
57
+	// TODO(stevvooe): Consider having this take a "hint" path argument, which
58
+	// would be the context root. The hint could be used to resolve required
59
+	// filesystem support when assembling the driver to use.
60
+	return &driver{}, nil
61
+}
62
+
63
+// XAttrDriver should be implemented on operation systems and filesystems that
64
+// have xattr support for regular files and directories.
65
+type XAttrDriver interface {
66
+	// Getxattr returns all of the extended attributes for the file at path.
67
+	// Typically, this takes a syscall call to Listxattr and Getxattr.
68
+	Getxattr(path string) (map[string][]byte, error)
69
+
70
+	// Setxattr sets all of the extended attributes on file at path, following
71
+	// any symbolic links, if necessary. All attributes on the target are
72
+	// replaced by the values from attr. If the operation fails to set any
73
+	// attribute, those already applied will not be rolled back.
74
+	Setxattr(path string, attr map[string][]byte) error
75
+}
76
+
77
+// LXAttrDriver should be implemented by drivers on operating systems and
78
+// filesystems that support setting and getting extended attributes on
79
+// symbolic links. If this is not implemented, extended attributes will be
80
+// ignored on symbolic links.
81
+type LXAttrDriver interface {
82
+	// LGetxattr returns all of the extended attributes for the file at path
83
+	// and does not follow symlinks. Typically, this takes a syscall call to
84
+	// Llistxattr and Lgetxattr.
85
+	LGetxattr(path string) (map[string][]byte, error)
86
+
87
+	// LSetxattr sets all of the extended attributes on file at path, without
88
+	// following symbolic links. All attributes on the target are replaced by
89
+	// the values from attr. If the operation fails to set any attribute,
90
+	// those already applied will not be rolled back.
91
+	LSetxattr(path string, attr map[string][]byte) error
92
+}
93
+
94
+type DeviceInfoDriver interface {
95
+	DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error)
96
+}
97
+
98
+// driver is a simple default implementation that sends calls out to the "os"
99
+// package. Extend the "driver" type in system-specific files to add support,
100
+// such as xattrs, which can add support at compile time.
101
+type driver struct{}
102
+
103
+var _ File = &os.File{}
104
+
105
+// LocalDriver is the exported Driver struct for convenience.
106
+var LocalDriver Driver = &driver{}
107
+
108
+func (d *driver) Open(p string) (File, error) {
109
+	return os.Open(p)
110
+}
111
+
112
+func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) {
113
+	return os.OpenFile(path, flag, perm)
114
+}
115
+
116
+func (d *driver) Stat(p string) (os.FileInfo, error) {
117
+	return os.Stat(p)
118
+}
119
+
120
+func (d *driver) Lstat(p string) (os.FileInfo, error) {
121
+	return os.Lstat(p)
122
+}
123
+
124
+func (d *driver) Readlink(p string) (string, error) {
125
+	return os.Readlink(p)
126
+}
127
+
128
+func (d *driver) Mkdir(p string, mode os.FileMode) error {
129
+	return os.Mkdir(p, mode)
130
+}
131
+
132
+// Remove is used to unlink files and remove directories.
133
+// This is following the golang os package api which
134
+// combines the operations into a higher level Remove
135
+// function. If explicit unlinking or directory removal
136
+// to mirror system call is required, they should be
137
+// split up at that time.
138
+func (d *driver) Remove(path string) error {
139
+	return os.Remove(path)
140
+}
141
+
142
+func (d *driver) Link(oldname, newname string) error {
143
+	return os.Link(oldname, newname)
144
+}
145
+
146
+func (d *driver) Lchown(name string, uid, gid int64) error {
147
+	// TODO: error out if uid excesses int bit width?
148
+	return os.Lchown(name, int(uid), int(gid))
149
+}
150
+
151
+func (d *driver) Symlink(oldname, newname string) error {
152
+	return os.Symlink(oldname, newname)
153
+}
154
+
155
+func (d *driver) MkdirAll(path string, perm os.FileMode) error {
156
+	return os.MkdirAll(path, perm)
157
+}
158
+
159
+func (d *driver) RemoveAll(path string) error {
160
+	return os.RemoveAll(path)
161
+}
0 162
new file mode 100644
... ...
@@ -0,0 +1,122 @@
0
+// +build linux darwin freebsd solaris
1
+
2
+package driver
3
+
4
+import (
5
+	"errors"
6
+	"fmt"
7
+	"os"
8
+	"path/filepath"
9
+	"sort"
10
+
11
+	"github.com/containerd/continuity/devices"
12
+	"github.com/containerd/continuity/sysx"
13
+)
14
+
15
+func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error {
16
+	return devices.Mknod(path, mode, major, minor)
17
+}
18
+
19
+func (d *driver) Mkfifo(path string, mode os.FileMode) error {
20
+	if mode&os.ModeNamedPipe == 0 {
21
+		return errors.New("mode passed to Mkfifo does not have the named pipe bit set")
22
+	}
23
+	// mknod with a mode that has ModeNamedPipe set creates a fifo, not a
24
+	// device.
25
+	return devices.Mknod(path, mode, 0, 0)
26
+}
27
+
28
+// Lchmod changes the mode of an file not following symlinks.
29
+func (d *driver) Lchmod(path string, mode os.FileMode) (err error) {
30
+	if !filepath.IsAbs(path) {
31
+		path, err = filepath.Abs(path)
32
+		if err != nil {
33
+			return
34
+		}
35
+	}
36
+
37
+	return sysx.Fchmodat(0, path, uint32(mode), sysx.AtSymlinkNofollow)
38
+}
39
+
40
+// Getxattr returns all of the extended attributes for the file at path p.
41
+func (d *driver) Getxattr(p string) (map[string][]byte, error) {
42
+	xattrs, err := sysx.Listxattr(p)
43
+	if err != nil {
44
+		return nil, fmt.Errorf("listing %s xattrs: %v", p, err)
45
+	}
46
+
47
+	sort.Strings(xattrs)
48
+	m := make(map[string][]byte, len(xattrs))
49
+
50
+	for _, attr := range xattrs {
51
+		value, err := sysx.Getxattr(p, attr)
52
+		if err != nil {
53
+			return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err)
54
+		}
55
+
56
+		// NOTE(stevvooe): This append/copy tricky relies on unique
57
+		// xattrs. Break this out into an alloc/copy if xattrs are no
58
+		// longer unique.
59
+		m[attr] = append(m[attr], value...)
60
+	}
61
+
62
+	return m, nil
63
+}
64
+
65
+// Setxattr sets all of the extended attributes on file at path, following
66
+// any symbolic links, if necessary. All attributes on the target are
67
+// replaced by the values from attr. If the operation fails to set any
68
+// attribute, those already applied will not be rolled back.
69
+func (d *driver) Setxattr(path string, attrMap map[string][]byte) error {
70
+	for attr, value := range attrMap {
71
+		if err := sysx.Setxattr(path, attr, value, 0); err != nil {
72
+			return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err)
73
+		}
74
+	}
75
+
76
+	return nil
77
+}
78
+
79
+// LGetxattr returns all of the extended attributes for the file at path p
80
+// not following symbolic links.
81
+func (d *driver) LGetxattr(p string) (map[string][]byte, error) {
82
+	xattrs, err := sysx.LListxattr(p)
83
+	if err != nil {
84
+		return nil, fmt.Errorf("listing %s xattrs: %v", p, err)
85
+	}
86
+
87
+	sort.Strings(xattrs)
88
+	m := make(map[string][]byte, len(xattrs))
89
+
90
+	for _, attr := range xattrs {
91
+		value, err := sysx.LGetxattr(p, attr)
92
+		if err != nil {
93
+			return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err)
94
+		}
95
+
96
+		// NOTE(stevvooe): This append/copy tricky relies on unique
97
+		// xattrs. Break this out into an alloc/copy if xattrs are no
98
+		// longer unique.
99
+		m[attr] = append(m[attr], value...)
100
+	}
101
+
102
+	return m, nil
103
+}
104
+
105
+// LSetxattr sets all of the extended attributes on file at path, not
106
+// following any symbolic links. All attributes on the target are
107
+// replaced by the values from attr. If the operation fails to set any
108
+// attribute, those already applied will not be rolled back.
109
+func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error {
110
+	for attr, value := range attrMap {
111
+		if err := sysx.LSetxattr(path, attr, value, 0); err != nil {
112
+			return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err)
113
+		}
114
+	}
115
+
116
+	return nil
117
+}
118
+
119
+func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) {
120
+	return devices.DeviceInfo(fi)
121
+}
0 122
new file mode 100644
... ...
@@ -0,0 +1,21 @@
0
+package driver
1
+
2
+import (
3
+	"os"
4
+
5
+	"github.com/pkg/errors"
6
+)
7
+
8
+func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error {
9
+	return errors.Wrap(ErrNotSupported, "cannot create device node on Windows")
10
+}
11
+
12
+func (d *driver) Mkfifo(path string, mode os.FileMode) error {
13
+	return errors.Wrap(ErrNotSupported, "cannot create fifo on Windows")
14
+}
15
+
16
+// Lchmod changes the mode of an file not following symlinks.
17
+func (d *driver) Lchmod(path string, mode os.FileMode) (err error) {
18
+	// TODO: Use Window's equivalent
19
+	return os.Chmod(path, mode)
20
+}
0 21
new file mode 100644
... ...
@@ -0,0 +1,74 @@
0
+package driver
1
+
2
+import (
3
+	"io"
4
+	"io/ioutil"
5
+	"os"
6
+	"sort"
7
+)
8
+
9
+// ReadFile works the same as ioutil.ReadFile with the Driver abstraction
10
+func ReadFile(r Driver, filename string) ([]byte, error) {
11
+	f, err := r.Open(filename)
12
+	if err != nil {
13
+		return nil, err
14
+	}
15
+	defer f.Close()
16
+
17
+	data, err := ioutil.ReadAll(f)
18
+	if err != nil {
19
+		return nil, err
20
+	}
21
+
22
+	return data, nil
23
+}
24
+
25
+// WriteFile works the same as ioutil.WriteFile with the Driver abstraction
26
+func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error {
27
+	f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
28
+	if err != nil {
29
+		return err
30
+	}
31
+	defer f.Close()
32
+
33
+	n, err := f.Write(data)
34
+	if err != nil {
35
+		return err
36
+	} else if n != len(data) {
37
+		return io.ErrShortWrite
38
+	}
39
+
40
+	return nil
41
+}
42
+
43
+// ReadDir works the same as ioutil.ReadDir with the Driver abstraction
44
+func ReadDir(r Driver, dirname string) ([]os.FileInfo, error) {
45
+	f, err := r.Open(dirname)
46
+	if err != nil {
47
+		return nil, err
48
+	}
49
+	defer f.Close()
50
+
51
+	dirs, err := f.Readdir(-1)
52
+	if err != nil {
53
+		return nil, err
54
+	}
55
+
56
+	sort.Sort(fileInfos(dirs))
57
+	return dirs, nil
58
+}
59
+
60
+// Simple implementation of the sort.Interface for os.FileInfo
61
+type fileInfos []os.FileInfo
62
+
63
+func (fis fileInfos) Len() int {
64
+	return len(fis)
65
+}
66
+
67
+func (fis fileInfos) Less(i, j int) bool {
68
+	return fis[i].Name() < fis[j].Name()
69
+}
70
+
71
+func (fis fileInfos) Swap(i, j int) {
72
+	fis[i], fis[j] = fis[j], fis[i]
73
+}
0 74
new file mode 100644
... ...
@@ -0,0 +1,85 @@
0
+package pathdriver
1
+
2
+import (
3
+	"path/filepath"
4
+)
5
+
6
+// PathDriver provides all of the path manipulation functions in a common
7
+// interface. The context should call these and never use the `filepath`
8
+// package or any other package to manipulate paths.
9
+type PathDriver interface {
10
+	Join(paths ...string) string
11
+	IsAbs(path string) bool
12
+	Rel(base, target string) (string, error)
13
+	Base(path string) string
14
+	Dir(path string) string
15
+	Clean(path string) string
16
+	Split(path string) (dir, file string)
17
+	Separator() byte
18
+	Abs(path string) (string, error)
19
+	Walk(string, filepath.WalkFunc) error
20
+	FromSlash(path string) string
21
+	ToSlash(path string) string
22
+	Match(pattern, name string) (matched bool, err error)
23
+}
24
+
25
+// pathDriver is a simple default implementation calls the filepath package.
26
+type pathDriver struct{}
27
+
28
+// LocalPathDriver is the exported pathDriver struct for convenience.
29
+var LocalPathDriver PathDriver = &pathDriver{}
30
+
31
+func (*pathDriver) Join(paths ...string) string {
32
+	return filepath.Join(paths...)
33
+}
34
+
35
+func (*pathDriver) IsAbs(path string) bool {
36
+	return filepath.IsAbs(path)
37
+}
38
+
39
+func (*pathDriver) Rel(base, target string) (string, error) {
40
+	return filepath.Rel(base, target)
41
+}
42
+
43
+func (*pathDriver) Base(path string) string {
44
+	return filepath.Base(path)
45
+}
46
+
47
+func (*pathDriver) Dir(path string) string {
48
+	return filepath.Dir(path)
49
+}
50
+
51
+func (*pathDriver) Clean(path string) string {
52
+	return filepath.Clean(path)
53
+}
54
+
55
+func (*pathDriver) Split(path string) (dir, file string) {
56
+	return filepath.Split(path)
57
+}
58
+
59
+func (*pathDriver) Separator() byte {
60
+	return filepath.Separator
61
+}
62
+
63
+func (*pathDriver) Abs(path string) (string, error) {
64
+	return filepath.Abs(path)
65
+}
66
+
67
+// Note that filepath.Walk calls os.Stat, so if the context wants to
68
+// to call Driver.Stat() for Walk, they need to create a new struct that
69
+// overrides this method.
70
+func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
71
+	return filepath.Walk(root, walkFn)
72
+}
73
+
74
+func (*pathDriver) FromSlash(path string) string {
75
+	return filepath.FromSlash(path)
76
+}
77
+
78
+func (*pathDriver) ToSlash(path string) string {
79
+	return filepath.ToSlash(path)
80
+}
81
+
82
+func (*pathDriver) Match(pattern, name string) (bool, error) {
83
+	return filepath.Match(pattern, name)
84
+}
0 85
new file mode 100644
... ...
@@ -0,0 +1,10 @@
0
+// Copyright 2014 The Go Authors.  All rights reserved.
1
+// Use of this source code is governed by a BSD-style
2
+// license that can be found in the LICENSE file.
3
+
4
+// +build !gccgo
5
+
6
+#include "textflag.h"
7
+
8
+TEXT ·use(SB),NOSPLIT,$0
9
+	RET
0 10
new file mode 100644
... ...
@@ -0,0 +1,18 @@
0
+package sysx
1
+
2
+const (
3
+	// AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in <sys/fcntl.h>
4
+	AtSymlinkNofollow = 0x20
5
+)
6
+
7
+const (
8
+
9
+	// SYS_FCHMODAT defined from golang.org/sys/unix
10
+	SYS_FCHMODAT = 467
11
+)
12
+
13
+// These functions will be generated by generate.sh
14
+//    $ GOOS=darwin GOARCH=386 ./generate.sh chmod
15
+//    $ GOOS=darwin GOARCH=amd64 ./generate.sh chmod
16
+
17
+//sys  Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
0 18
new file mode 100644
... ...
@@ -0,0 +1,25 @@
0
+// mksyscall.pl -l32 chmod_darwin.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	_, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
19
+	use(unsafe.Pointer(_p0))
20
+	if e1 != 0 {
21
+		err = errnoErr(e1)
22
+	}
23
+	return
24
+}
0 25
new file mode 100644
... ...
@@ -0,0 +1,25 @@
0
+// mksyscall.pl chmod_darwin.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	_, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
19
+	use(unsafe.Pointer(_p0))
20
+	if e1 != 0 {
21
+		err = errnoErr(e1)
22
+	}
23
+	return
24
+}
0 25
new file mode 100644
... ...
@@ -0,0 +1,17 @@
0
+package sysx
1
+
2
+const (
3
+	// AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in <sys/fcntl.h>
4
+	AtSymlinkNofollow = 0x200
5
+)
6
+
7
+const (
8
+
9
+	// SYS_FCHMODAT defined from golang.org/sys/unix
10
+	SYS_FCHMODAT = 490
11
+)
12
+
13
+// These functions will be generated by generate.sh
14
+//    $ GOOS=freebsd GOARCH=amd64 ./generate.sh chmod
15
+
16
+//sys  Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
0 17
new file mode 100644
... ...
@@ -0,0 +1,25 @@
0
+// mksyscall.pl chmod_freebsd.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	_, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
19
+	use(unsafe.Pointer(_p0))
20
+	if e1 != 0 {
21
+		err = errnoErr(e1)
22
+	}
23
+	return
24
+}
0 25
new file mode 100644
... ...
@@ -0,0 +1,12 @@
0
+package sysx
1
+
2
+import "syscall"
3
+
4
+const (
5
+	// AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in /usr/include/linux/fcntl.h
6
+	AtSymlinkNofollow = 0x100
7
+)
8
+
9
+func Fchmodat(dirfd int, path string, mode uint32, flags int) error {
10
+	return syscall.Fchmodat(dirfd, path, mode, flags)
11
+}
0 12
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+package sysx
1
+
2
+import "golang.org/x/sys/unix"
3
+
4
+const (
5
+	AtSymlinkNofollow = unix.AT_SYMLINK_NOFOLLOW
6
+)
7
+
8
+func Fchmodat(dirfd int, path string, mode uint32, flags int) error {
9
+	return unix.Fchmodat(dirfd, path, mode, flags)
10
+}
0 11
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+package sysx
1
+
2
+// These functions will be generated by generate.sh
3
+//    $ GOOS=linux GOARCH=386 ./generate.sh copy
4
+//    $ GOOS=linux GOARCH=amd64 ./generate.sh copy
5
+//    $ GOOS=linux GOARCH=arm ./generate.sh copy
6
+//    $ GOOS=linux GOARCH=arm64 ./generate.sh copy
7
+//    $ GOOS=linux GOARCH=ppc64le ./generate.sh copy
8
+//    $ GOOS=linux GOARCH=s390x ./generate.sh copy
9
+
10
+//sys CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error)
0 11
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+// mksyscall.pl -l32 copy_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
13
+	r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
14
+	n = int(r0)
15
+	if e1 != 0 {
16
+		err = errnoErr(e1)
17
+	}
18
+	return
19
+}
0 20
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+// mksyscall.pl copy_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
13
+	r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
14
+	n = int(r0)
15
+	if e1 != 0 {
16
+		err = errnoErr(e1)
17
+	}
18
+	return
19
+}
0 20
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+// mksyscall.pl -l32 copy_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
13
+	r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
14
+	n = int(r0)
15
+	if e1 != 0 {
16
+		err = errnoErr(e1)
17
+	}
18
+	return
19
+}
0 20
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+// mksyscall.pl copy_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
13
+	r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
14
+	n = int(r0)
15
+	if e1 != 0 {
16
+		err = errnoErr(e1)
17
+	}
18
+	return
19
+}
0 20
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+// mksyscall.pl copy_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
13
+	r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
14
+	n = int(r0)
15
+	if e1 != 0 {
16
+		err = errnoErr(e1)
17
+	}
18
+	return
19
+}
0 20
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+// mksyscall.pl copy_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
13
+	r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
14
+	n = int(r0)
15
+	if e1 != 0 {
16
+		err = errnoErr(e1)
17
+	}
18
+	return
19
+}
0 20
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+package sysx
1
+
2
+import (
3
+	"syscall"
4
+)
5
+
6
+const ENODATA = syscall.ENODATA
0 7
new file mode 100644
... ...
@@ -0,0 +1,8 @@
0
+package sysx
1
+
2
+import (
3
+	"syscall"
4
+)
5
+
6
+// This should actually be a set that contains ENOENT and EPERM
7
+const ENODATA = syscall.ENOENT
0 8
new file mode 100644
... ...
@@ -0,0 +1,9 @@
0
+// +build darwin freebsd
1
+
2
+package sysx
3
+
4
+import (
5
+	"syscall"
6
+)
7
+
8
+const ENODATA = syscall.ENOATTR
0 9
new file mode 100644
... ...
@@ -0,0 +1,37 @@
0
+package sysx
1
+
2
+import (
3
+	"syscall"
4
+	"unsafe"
5
+)
6
+
7
+var _zero uintptr
8
+
9
+// use is a no-op, but the compiler cannot see that it is.
10
+// Calling use(p) ensures that p is kept live until that point.
11
+//go:noescape
12
+func use(p unsafe.Pointer)
13
+
14
+// Do the interface allocations only once for common
15
+// Errno values.
16
+var (
17
+	errEAGAIN error = syscall.EAGAIN
18
+	errEINVAL error = syscall.EINVAL
19
+	errENOENT error = syscall.ENOENT
20
+)
21
+
22
+// errnoErr returns common boxed Errno values, to prevent
23
+// allocations at runtime.
24
+func errnoErr(e syscall.Errno) error {
25
+	switch e {
26
+	case 0:
27
+		return nil
28
+	case syscall.EAGAIN:
29
+		return errEAGAIN
30
+	case syscall.EINVAL:
31
+		return errEINVAL
32
+	case syscall.ENOENT:
33
+		return errENOENT
34
+	}
35
+	return e
36
+}
0 37
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+package sysx
1
+
2
+const (
3
+	// SYS_COPYFILERANGE defined in Kernel 4.5+
4
+	// Number defined in /usr/include/asm/unistd_32.h
5
+	SYS_COPY_FILE_RANGE = 377
6
+)
0 7
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+package sysx
1
+
2
+const (
3
+	// SYS_COPYFILERANGE defined in Kernel 4.5+
4
+	// Number defined in /usr/include/asm/unistd_64.h
5
+	SYS_COPY_FILE_RANGE = 326
6
+)
0 7
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+package sysx
1
+
2
+const (
3
+	// SYS_COPY_FILE_RANGE defined in Kernel 4.5+
4
+	// Number defined in /usr/include/arm-linux-gnueabihf/asm/unistd.h
5
+	SYS_COPY_FILE_RANGE = 391
6
+)
0 7
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+package sysx
1
+
2
+const (
3
+	// SYS_COPY_FILE_RANGE defined in Kernel 4.5+
4
+	// Number defined in /usr/include/asm-generic/unistd.h
5
+	SYS_COPY_FILE_RANGE = 285
6
+)
0 7
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+package sysx
1
+
2
+const (
3
+	// SYS_COPYFILERANGE defined in Kernel 4.5+
4
+	// Number defined in /usr/include/asm/unistd_64.h
5
+	SYS_COPY_FILE_RANGE = 379
6
+)
0 7
new file mode 100644
... ...
@@ -0,0 +1,7 @@
0
+package sysx
1
+
2
+const (
3
+	// SYS_COPYFILERANGE defined in Kernel 4.5+
4
+	// Number defined in /usr/include/asm/unistd_64.h
5
+	SYS_COPY_FILE_RANGE = 375
6
+)
0 7
new file mode 100644
... ...
@@ -0,0 +1,67 @@
0
+package sysx
1
+
2
+import (
3
+	"bytes"
4
+	"fmt"
5
+	"syscall"
6
+)
7
+
8
+const defaultXattrBufferSize = 5
9
+
10
+var ErrNotSupported = fmt.Errorf("not supported")
11
+
12
+type listxattrFunc func(path string, dest []byte) (int, error)
13
+
14
+func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) {
15
+	var p []byte // nil on first execution
16
+
17
+	for {
18
+		n, err := listFunc(path, p) // first call gets buffer size.
19
+		if err != nil {
20
+			return nil, err
21
+		}
22
+
23
+		if n > len(p) {
24
+			p = make([]byte, n)
25
+			continue
26
+		}
27
+
28
+		p = p[:n]
29
+
30
+		ps := bytes.Split(bytes.TrimSuffix(p, []byte{0}), []byte{0})
31
+		var entries []string
32
+		for _, p := range ps {
33
+			s := string(p)
34
+			if s != "" {
35
+				entries = append(entries, s)
36
+			}
37
+		}
38
+
39
+		return entries, nil
40
+	}
41
+}
42
+
43
+type getxattrFunc func(string, string, []byte) (int, error)
44
+
45
+func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) {
46
+	p := make([]byte, defaultXattrBufferSize)
47
+	for {
48
+		n, err := getFunc(path, attr, p)
49
+		if err != nil {
50
+			if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERANGE {
51
+				p = make([]byte, len(p)*2) // this can't be ideal.
52
+				continue                   // try again!
53
+			}
54
+
55
+			return nil, err
56
+		}
57
+
58
+		// realloc to correct size and repeat
59
+		if n > len(p) {
60
+			p = make([]byte, n)
61
+			continue
62
+		}
63
+
64
+		return p[:n], nil
65
+	}
66
+}
0 67
new file mode 100644
... ...
@@ -0,0 +1,71 @@
0
+package sysx
1
+
2
+// These functions will be generated by generate.sh
3
+//    $ GOOS=darwin GOARCH=386 ./generate.sh xattr
4
+//    $ GOOS=darwin GOARCH=amd64 ./generate.sh xattr
5
+
6
+//sys  getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error)
7
+//sys  setxattr(path string, attr string, data []byte, flags int) (err error)
8
+//sys  removexattr(path string, attr string, options int) (err error)
9
+//sys  listxattr(path string, dest []byte, options int) (sz int, err error)
10
+//sys  Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
11
+
12
+const (
13
+	xattrNoFollow = 0x01
14
+)
15
+
16
+func listxattrFollow(path string, dest []byte) (sz int, err error) {
17
+	return listxattr(path, dest, 0)
18
+}
19
+
20
+// Listxattr calls syscall getxattr
21
+func Listxattr(path string) ([]string, error) {
22
+	return listxattrAll(path, listxattrFollow)
23
+}
24
+
25
+// Removexattr calls syscall getxattr
26
+func Removexattr(path string, attr string) (err error) {
27
+	return removexattr(path, attr, 0)
28
+}
29
+
30
+// Setxattr calls syscall setxattr
31
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
32
+	return setxattr(path, attr, data, flags)
33
+}
34
+
35
+func getxattrFollow(path, attr string, dest []byte) (sz int, err error) {
36
+	return getxattr(path, attr, dest, 0, 0)
37
+}
38
+
39
+// Getxattr calls syscall getxattr
40
+func Getxattr(path, attr string) ([]byte, error) {
41
+	return getxattrAll(path, attr, getxattrFollow)
42
+}
43
+
44
+func listxattrNoFollow(path string, dest []byte) (sz int, err error) {
45
+	return listxattr(path, dest, xattrNoFollow)
46
+}
47
+
48
+// LListxattr calls syscall listxattr with XATTR_NOFOLLOW
49
+func LListxattr(path string) ([]string, error) {
50
+	return listxattrAll(path, listxattrNoFollow)
51
+}
52
+
53
+// LRemovexattr calls syscall removexattr with XATTR_NOFOLLOW
54
+func LRemovexattr(path string, attr string) (err error) {
55
+	return removexattr(path, attr, xattrNoFollow)
56
+}
57
+
58
+// Setxattr calls syscall setxattr with XATTR_NOFOLLOW
59
+func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
60
+	return setxattr(path, attr, data, flags|xattrNoFollow)
61
+}
62
+
63
+func getxattrNoFollow(path, attr string, dest []byte) (sz int, err error) {
64
+	return getxattr(path, attr, dest, 0, xattrNoFollow)
65
+}
66
+
67
+// LGetxattr calls syscall getxattr with XATTR_NOFOLLOW
68
+func LGetxattr(path, attr string) ([]byte, error) {
69
+	return getxattrAll(path, attr, getxattrNoFollow)
70
+}
0 71
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+// mksyscall.pl -l32 xattr_darwin.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	var _p1 *byte
19
+	_p1, err = syscall.BytePtrFromString(attr)
20
+	if err != nil {
21
+		return
22
+	}
23
+	var _p2 unsafe.Pointer
24
+	if len(dest) > 0 {
25
+		_p2 = unsafe.Pointer(&dest[0])
26
+	} else {
27
+		_p2 = unsafe.Pointer(&_zero)
28
+	}
29
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), uintptr(pos), uintptr(options))
30
+	use(unsafe.Pointer(_p0))
31
+	use(unsafe.Pointer(_p1))
32
+	sz = int(r0)
33
+	if e1 != 0 {
34
+		err = errnoErr(e1)
35
+	}
36
+	return
37
+}
38
+
39
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
40
+
41
+func setxattr(path string, attr string, data []byte, flags int) (err error) {
42
+	var _p0 *byte
43
+	_p0, err = syscall.BytePtrFromString(path)
44
+	if err != nil {
45
+		return
46
+	}
47
+	var _p1 *byte
48
+	_p1, err = syscall.BytePtrFromString(attr)
49
+	if err != nil {
50
+		return
51
+	}
52
+	var _p2 unsafe.Pointer
53
+	if len(data) > 0 {
54
+		_p2 = unsafe.Pointer(&data[0])
55
+	} else {
56
+		_p2 = unsafe.Pointer(&_zero)
57
+	}
58
+	_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
59
+	use(unsafe.Pointer(_p0))
60
+	use(unsafe.Pointer(_p1))
61
+	if e1 != 0 {
62
+		err = errnoErr(e1)
63
+	}
64
+	return
65
+}
66
+
67
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
68
+
69
+func removexattr(path string, attr string, options int) (err error) {
70
+	var _p0 *byte
71
+	_p0, err = syscall.BytePtrFromString(path)
72
+	if err != nil {
73
+		return
74
+	}
75
+	var _p1 *byte
76
+	_p1, err = syscall.BytePtrFromString(attr)
77
+	if err != nil {
78
+		return
79
+	}
80
+	_, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
81
+	use(unsafe.Pointer(_p0))
82
+	use(unsafe.Pointer(_p1))
83
+	if e1 != 0 {
84
+		err = errnoErr(e1)
85
+	}
86
+	return
87
+}
88
+
89
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
90
+
91
+func listxattr(path string, dest []byte, options int) (sz int, err error) {
92
+	var _p0 *byte
93
+	_p0, err = syscall.BytePtrFromString(path)
94
+	if err != nil {
95
+		return
96
+	}
97
+	var _p1 unsafe.Pointer
98
+	if len(dest) > 0 {
99
+		_p1 = unsafe.Pointer(&dest[0])
100
+	} else {
101
+		_p1 = unsafe.Pointer(&_zero)
102
+	}
103
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0)
104
+	use(unsafe.Pointer(_p0))
105
+	sz = int(r0)
106
+	if e1 != 0 {
107
+		err = errnoErr(e1)
108
+	}
109
+	return
110
+}
0 111
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+// mksyscall.pl xattr_darwin.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	var _p1 *byte
19
+	_p1, err = syscall.BytePtrFromString(attr)
20
+	if err != nil {
21
+		return
22
+	}
23
+	var _p2 unsafe.Pointer
24
+	if len(dest) > 0 {
25
+		_p2 = unsafe.Pointer(&dest[0])
26
+	} else {
27
+		_p2 = unsafe.Pointer(&_zero)
28
+	}
29
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), uintptr(pos), uintptr(options))
30
+	use(unsafe.Pointer(_p0))
31
+	use(unsafe.Pointer(_p1))
32
+	sz = int(r0)
33
+	if e1 != 0 {
34
+		err = errnoErr(e1)
35
+	}
36
+	return
37
+}
38
+
39
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
40
+
41
+func setxattr(path string, attr string, data []byte, flags int) (err error) {
42
+	var _p0 *byte
43
+	_p0, err = syscall.BytePtrFromString(path)
44
+	if err != nil {
45
+		return
46
+	}
47
+	var _p1 *byte
48
+	_p1, err = syscall.BytePtrFromString(attr)
49
+	if err != nil {
50
+		return
51
+	}
52
+	var _p2 unsafe.Pointer
53
+	if len(data) > 0 {
54
+		_p2 = unsafe.Pointer(&data[0])
55
+	} else {
56
+		_p2 = unsafe.Pointer(&_zero)
57
+	}
58
+	_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
59
+	use(unsafe.Pointer(_p0))
60
+	use(unsafe.Pointer(_p1))
61
+	if e1 != 0 {
62
+		err = errnoErr(e1)
63
+	}
64
+	return
65
+}
66
+
67
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
68
+
69
+func removexattr(path string, attr string, options int) (err error) {
70
+	var _p0 *byte
71
+	_p0, err = syscall.BytePtrFromString(path)
72
+	if err != nil {
73
+		return
74
+	}
75
+	var _p1 *byte
76
+	_p1, err = syscall.BytePtrFromString(attr)
77
+	if err != nil {
78
+		return
79
+	}
80
+	_, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
81
+	use(unsafe.Pointer(_p0))
82
+	use(unsafe.Pointer(_p1))
83
+	if e1 != 0 {
84
+		err = errnoErr(e1)
85
+	}
86
+	return
87
+}
88
+
89
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
90
+
91
+func listxattr(path string, dest []byte, options int) (sz int, err error) {
92
+	var _p0 *byte
93
+	_p0, err = syscall.BytePtrFromString(path)
94
+	if err != nil {
95
+		return
96
+	}
97
+	var _p1 unsafe.Pointer
98
+	if len(dest) > 0 {
99
+		_p1 = unsafe.Pointer(&dest[0])
100
+	} else {
101
+		_p1 = unsafe.Pointer(&_zero)
102
+	}
103
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0)
104
+	use(unsafe.Pointer(_p0))
105
+	sz = int(r0)
106
+	if e1 != 0 {
107
+		err = errnoErr(e1)
108
+	}
109
+	return
110
+}
0 111
new file mode 100644
... ...
@@ -0,0 +1,12 @@
0
+package sysx
1
+
2
+import (
3
+	"errors"
4
+)
5
+
6
+// Initial stub version for FreeBSD. FreeBSD has a different
7
+// syscall API from Darwin and Linux for extended attributes;
8
+// it is also not widely used. It is not exposed at all by the
9
+// Go syscall package, so we need to implement directly eventually.
10
+
11
+var unsupported = errors.New("extended attributes unsupported on FreeBSD")
0 12
new file mode 100644
... ...
@@ -0,0 +1,61 @@
0
+package sysx
1
+
2
+import "syscall"
3
+
4
+// These functions will be generated by generate.sh
5
+//    $ GOOS=linux GOARCH=386 ./generate.sh xattr
6
+//    $ GOOS=linux GOARCH=amd64 ./generate.sh xattr
7
+//    $ GOOS=linux GOARCH=arm ./generate.sh xattr
8
+//    $ GOOS=linux GOARCH=arm64 ./generate.sh xattr
9
+//    $ GOOS=linux GOARCH=ppc64 ./generate.sh xattr
10
+//    $ GOOS=linux GOARCH=ppc64le ./generate.sh xattr
11
+//    $ GOOS=linux GOARCH=s390x ./generate.sh xattr
12
+
13
+// Listxattr calls syscall listxattr and reads all content
14
+// and returns a string array
15
+func Listxattr(path string) ([]string, error) {
16
+	return listxattrAll(path, syscall.Listxattr)
17
+}
18
+
19
+// Removexattr calls syscall removexattr
20
+func Removexattr(path string, attr string) (err error) {
21
+	return syscall.Removexattr(path, attr)
22
+}
23
+
24
+// Setxattr calls syscall setxattr
25
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
26
+	return syscall.Setxattr(path, attr, data, flags)
27
+}
28
+
29
+// Getxattr calls syscall getxattr
30
+func Getxattr(path, attr string) ([]byte, error) {
31
+	return getxattrAll(path, attr, syscall.Getxattr)
32
+}
33
+
34
+//sys llistxattr(path string, dest []byte) (sz int, err error)
35
+
36
+// LListxattr lists xattrs, not following symlinks
37
+func LListxattr(path string) ([]string, error) {
38
+	return listxattrAll(path, llistxattr)
39
+}
40
+
41
+//sys lremovexattr(path string, attr string) (err error)
42
+
43
+// LRemovexattr removes an xattr, not following symlinks
44
+func LRemovexattr(path string, attr string) (err error) {
45
+	return lremovexattr(path, attr)
46
+}
47
+
48
+//sys lsetxattr(path string, attr string, data []byte, flags int) (err error)
49
+
50
+// LSetxattr sets an xattr, not following symlinks
51
+func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
52
+	return lsetxattr(path, attr, data, flags)
53
+}
54
+
55
+//sys lgetxattr(path string, attr string, dest []byte) (sz int, err error)
56
+
57
+// LGetxattr gets an xattr, not following symlinks
58
+func LGetxattr(path, attr string) ([]byte, error) {
59
+	return getxattrAll(path, attr, lgetxattr)
60
+}
0 61
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+// mksyscall.pl -l32 xattr_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func llistxattr(path string, dest []byte) (sz int, err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	var _p1 unsafe.Pointer
19
+	if len(dest) > 0 {
20
+		_p1 = unsafe.Pointer(&dest[0])
21
+	} else {
22
+		_p1 = unsafe.Pointer(&_zero)
23
+	}
24
+	r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
25
+	use(unsafe.Pointer(_p0))
26
+	sz = int(r0)
27
+	if e1 != 0 {
28
+		err = errnoErr(e1)
29
+	}
30
+	return
31
+}
32
+
33
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
34
+
35
+func lremovexattr(path string, attr string) (err error) {
36
+	var _p0 *byte
37
+	_p0, err = syscall.BytePtrFromString(path)
38
+	if err != nil {
39
+		return
40
+	}
41
+	var _p1 *byte
42
+	_p1, err = syscall.BytePtrFromString(attr)
43
+	if err != nil {
44
+		return
45
+	}
46
+	_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
47
+	use(unsafe.Pointer(_p0))
48
+	use(unsafe.Pointer(_p1))
49
+	if e1 != 0 {
50
+		err = errnoErr(e1)
51
+	}
52
+	return
53
+}
54
+
55
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
56
+
57
+func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
58
+	var _p0 *byte
59
+	_p0, err = syscall.BytePtrFromString(path)
60
+	if err != nil {
61
+		return
62
+	}
63
+	var _p1 *byte
64
+	_p1, err = syscall.BytePtrFromString(attr)
65
+	if err != nil {
66
+		return
67
+	}
68
+	var _p2 unsafe.Pointer
69
+	if len(data) > 0 {
70
+		_p2 = unsafe.Pointer(&data[0])
71
+	} else {
72
+		_p2 = unsafe.Pointer(&_zero)
73
+	}
74
+	_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
75
+	use(unsafe.Pointer(_p0))
76
+	use(unsafe.Pointer(_p1))
77
+	if e1 != 0 {
78
+		err = errnoErr(e1)
79
+	}
80
+	return
81
+}
82
+
83
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
84
+
85
+func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
86
+	var _p0 *byte
87
+	_p0, err = syscall.BytePtrFromString(path)
88
+	if err != nil {
89
+		return
90
+	}
91
+	var _p1 *byte
92
+	_p1, err = syscall.BytePtrFromString(attr)
93
+	if err != nil {
94
+		return
95
+	}
96
+	var _p2 unsafe.Pointer
97
+	if len(dest) > 0 {
98
+		_p2 = unsafe.Pointer(&dest[0])
99
+	} else {
100
+		_p2 = unsafe.Pointer(&_zero)
101
+	}
102
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
103
+	use(unsafe.Pointer(_p0))
104
+	use(unsafe.Pointer(_p1))
105
+	sz = int(r0)
106
+	if e1 != 0 {
107
+		err = errnoErr(e1)
108
+	}
109
+	return
110
+}
0 111
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+// mksyscall.pl xattr_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func llistxattr(path string, dest []byte) (sz int, err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	var _p1 unsafe.Pointer
19
+	if len(dest) > 0 {
20
+		_p1 = unsafe.Pointer(&dest[0])
21
+	} else {
22
+		_p1 = unsafe.Pointer(&_zero)
23
+	}
24
+	r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
25
+	use(unsafe.Pointer(_p0))
26
+	sz = int(r0)
27
+	if e1 != 0 {
28
+		err = errnoErr(e1)
29
+	}
30
+	return
31
+}
32
+
33
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
34
+
35
+func lremovexattr(path string, attr string) (err error) {
36
+	var _p0 *byte
37
+	_p0, err = syscall.BytePtrFromString(path)
38
+	if err != nil {
39
+		return
40
+	}
41
+	var _p1 *byte
42
+	_p1, err = syscall.BytePtrFromString(attr)
43
+	if err != nil {
44
+		return
45
+	}
46
+	_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
47
+	use(unsafe.Pointer(_p0))
48
+	use(unsafe.Pointer(_p1))
49
+	if e1 != 0 {
50
+		err = errnoErr(e1)
51
+	}
52
+	return
53
+}
54
+
55
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
56
+
57
+func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
58
+	var _p0 *byte
59
+	_p0, err = syscall.BytePtrFromString(path)
60
+	if err != nil {
61
+		return
62
+	}
63
+	var _p1 *byte
64
+	_p1, err = syscall.BytePtrFromString(attr)
65
+	if err != nil {
66
+		return
67
+	}
68
+	var _p2 unsafe.Pointer
69
+	if len(data) > 0 {
70
+		_p2 = unsafe.Pointer(&data[0])
71
+	} else {
72
+		_p2 = unsafe.Pointer(&_zero)
73
+	}
74
+	_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
75
+	use(unsafe.Pointer(_p0))
76
+	use(unsafe.Pointer(_p1))
77
+	if e1 != 0 {
78
+		err = errnoErr(e1)
79
+	}
80
+	return
81
+}
82
+
83
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
84
+
85
+func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
86
+	var _p0 *byte
87
+	_p0, err = syscall.BytePtrFromString(path)
88
+	if err != nil {
89
+		return
90
+	}
91
+	var _p1 *byte
92
+	_p1, err = syscall.BytePtrFromString(attr)
93
+	if err != nil {
94
+		return
95
+	}
96
+	var _p2 unsafe.Pointer
97
+	if len(dest) > 0 {
98
+		_p2 = unsafe.Pointer(&dest[0])
99
+	} else {
100
+		_p2 = unsafe.Pointer(&_zero)
101
+	}
102
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
103
+	use(unsafe.Pointer(_p0))
104
+	use(unsafe.Pointer(_p1))
105
+	sz = int(r0)
106
+	if e1 != 0 {
107
+		err = errnoErr(e1)
108
+	}
109
+	return
110
+}
0 111
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+// mksyscall.pl -l32 xattr_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func llistxattr(path string, dest []byte) (sz int, err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	var _p1 unsafe.Pointer
19
+	if len(dest) > 0 {
20
+		_p1 = unsafe.Pointer(&dest[0])
21
+	} else {
22
+		_p1 = unsafe.Pointer(&_zero)
23
+	}
24
+	r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
25
+	use(unsafe.Pointer(_p0))
26
+	sz = int(r0)
27
+	if e1 != 0 {
28
+		err = errnoErr(e1)
29
+	}
30
+	return
31
+}
32
+
33
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
34
+
35
+func lremovexattr(path string, attr string) (err error) {
36
+	var _p0 *byte
37
+	_p0, err = syscall.BytePtrFromString(path)
38
+	if err != nil {
39
+		return
40
+	}
41
+	var _p1 *byte
42
+	_p1, err = syscall.BytePtrFromString(attr)
43
+	if err != nil {
44
+		return
45
+	}
46
+	_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
47
+	use(unsafe.Pointer(_p0))
48
+	use(unsafe.Pointer(_p1))
49
+	if e1 != 0 {
50
+		err = errnoErr(e1)
51
+	}
52
+	return
53
+}
54
+
55
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
56
+
57
+func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
58
+	var _p0 *byte
59
+	_p0, err = syscall.BytePtrFromString(path)
60
+	if err != nil {
61
+		return
62
+	}
63
+	var _p1 *byte
64
+	_p1, err = syscall.BytePtrFromString(attr)
65
+	if err != nil {
66
+		return
67
+	}
68
+	var _p2 unsafe.Pointer
69
+	if len(data) > 0 {
70
+		_p2 = unsafe.Pointer(&data[0])
71
+	} else {
72
+		_p2 = unsafe.Pointer(&_zero)
73
+	}
74
+	_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
75
+	use(unsafe.Pointer(_p0))
76
+	use(unsafe.Pointer(_p1))
77
+	if e1 != 0 {
78
+		err = errnoErr(e1)
79
+	}
80
+	return
81
+}
82
+
83
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
84
+
85
+func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
86
+	var _p0 *byte
87
+	_p0, err = syscall.BytePtrFromString(path)
88
+	if err != nil {
89
+		return
90
+	}
91
+	var _p1 *byte
92
+	_p1, err = syscall.BytePtrFromString(attr)
93
+	if err != nil {
94
+		return
95
+	}
96
+	var _p2 unsafe.Pointer
97
+	if len(dest) > 0 {
98
+		_p2 = unsafe.Pointer(&dest[0])
99
+	} else {
100
+		_p2 = unsafe.Pointer(&_zero)
101
+	}
102
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
103
+	use(unsafe.Pointer(_p0))
104
+	use(unsafe.Pointer(_p1))
105
+	sz = int(r0)
106
+	if e1 != 0 {
107
+		err = errnoErr(e1)
108
+	}
109
+	return
110
+}
0 111
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+// mksyscall.pl xattr_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func llistxattr(path string, dest []byte) (sz int, err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	var _p1 unsafe.Pointer
19
+	if len(dest) > 0 {
20
+		_p1 = unsafe.Pointer(&dest[0])
21
+	} else {
22
+		_p1 = unsafe.Pointer(&_zero)
23
+	}
24
+	r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
25
+	use(unsafe.Pointer(_p0))
26
+	sz = int(r0)
27
+	if e1 != 0 {
28
+		err = errnoErr(e1)
29
+	}
30
+	return
31
+}
32
+
33
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
34
+
35
+func lremovexattr(path string, attr string) (err error) {
36
+	var _p0 *byte
37
+	_p0, err = syscall.BytePtrFromString(path)
38
+	if err != nil {
39
+		return
40
+	}
41
+	var _p1 *byte
42
+	_p1, err = syscall.BytePtrFromString(attr)
43
+	if err != nil {
44
+		return
45
+	}
46
+	_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
47
+	use(unsafe.Pointer(_p0))
48
+	use(unsafe.Pointer(_p1))
49
+	if e1 != 0 {
50
+		err = errnoErr(e1)
51
+	}
52
+	return
53
+}
54
+
55
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
56
+
57
+func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
58
+	var _p0 *byte
59
+	_p0, err = syscall.BytePtrFromString(path)
60
+	if err != nil {
61
+		return
62
+	}
63
+	var _p1 *byte
64
+	_p1, err = syscall.BytePtrFromString(attr)
65
+	if err != nil {
66
+		return
67
+	}
68
+	var _p2 unsafe.Pointer
69
+	if len(data) > 0 {
70
+		_p2 = unsafe.Pointer(&data[0])
71
+	} else {
72
+		_p2 = unsafe.Pointer(&_zero)
73
+	}
74
+	_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
75
+	use(unsafe.Pointer(_p0))
76
+	use(unsafe.Pointer(_p1))
77
+	if e1 != 0 {
78
+		err = errnoErr(e1)
79
+	}
80
+	return
81
+}
82
+
83
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
84
+
85
+func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
86
+	var _p0 *byte
87
+	_p0, err = syscall.BytePtrFromString(path)
88
+	if err != nil {
89
+		return
90
+	}
91
+	var _p1 *byte
92
+	_p1, err = syscall.BytePtrFromString(attr)
93
+	if err != nil {
94
+		return
95
+	}
96
+	var _p2 unsafe.Pointer
97
+	if len(dest) > 0 {
98
+		_p2 = unsafe.Pointer(&dest[0])
99
+	} else {
100
+		_p2 = unsafe.Pointer(&_zero)
101
+	}
102
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
103
+	use(unsafe.Pointer(_p0))
104
+	use(unsafe.Pointer(_p1))
105
+	sz = int(r0)
106
+	if e1 != 0 {
107
+		err = errnoErr(e1)
108
+	}
109
+	return
110
+}
0 111
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+// mksyscall.pl xattr_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func llistxattr(path string, dest []byte) (sz int, err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	var _p1 unsafe.Pointer
19
+	if len(dest) > 0 {
20
+		_p1 = unsafe.Pointer(&dest[0])
21
+	} else {
22
+		_p1 = unsafe.Pointer(&_zero)
23
+	}
24
+	r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
25
+	use(unsafe.Pointer(_p0))
26
+	sz = int(r0)
27
+	if e1 != 0 {
28
+		err = errnoErr(e1)
29
+	}
30
+	return
31
+}
32
+
33
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
34
+
35
+func lremovexattr(path string, attr string) (err error) {
36
+	var _p0 *byte
37
+	_p0, err = syscall.BytePtrFromString(path)
38
+	if err != nil {
39
+		return
40
+	}
41
+	var _p1 *byte
42
+	_p1, err = syscall.BytePtrFromString(attr)
43
+	if err != nil {
44
+		return
45
+	}
46
+	_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
47
+	use(unsafe.Pointer(_p0))
48
+	use(unsafe.Pointer(_p1))
49
+	if e1 != 0 {
50
+		err = errnoErr(e1)
51
+	}
52
+	return
53
+}
54
+
55
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
56
+
57
+func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
58
+	var _p0 *byte
59
+	_p0, err = syscall.BytePtrFromString(path)
60
+	if err != nil {
61
+		return
62
+	}
63
+	var _p1 *byte
64
+	_p1, err = syscall.BytePtrFromString(attr)
65
+	if err != nil {
66
+		return
67
+	}
68
+	var _p2 unsafe.Pointer
69
+	if len(data) > 0 {
70
+		_p2 = unsafe.Pointer(&data[0])
71
+	} else {
72
+		_p2 = unsafe.Pointer(&_zero)
73
+	}
74
+	_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
75
+	use(unsafe.Pointer(_p0))
76
+	use(unsafe.Pointer(_p1))
77
+	if e1 != 0 {
78
+		err = errnoErr(e1)
79
+	}
80
+	return
81
+}
82
+
83
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
84
+
85
+func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
86
+	var _p0 *byte
87
+	_p0, err = syscall.BytePtrFromString(path)
88
+	if err != nil {
89
+		return
90
+	}
91
+	var _p1 *byte
92
+	_p1, err = syscall.BytePtrFromString(attr)
93
+	if err != nil {
94
+		return
95
+	}
96
+	var _p2 unsafe.Pointer
97
+	if len(dest) > 0 {
98
+		_p2 = unsafe.Pointer(&dest[0])
99
+	} else {
100
+		_p2 = unsafe.Pointer(&_zero)
101
+	}
102
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
103
+	use(unsafe.Pointer(_p0))
104
+	use(unsafe.Pointer(_p1))
105
+	sz = int(r0)
106
+	if e1 != 0 {
107
+		err = errnoErr(e1)
108
+	}
109
+	return
110
+}
0 111
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+// mksyscall.pl xattr_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func llistxattr(path string, dest []byte) (sz int, err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	var _p1 unsafe.Pointer
19
+	if len(dest) > 0 {
20
+		_p1 = unsafe.Pointer(&dest[0])
21
+	} else {
22
+		_p1 = unsafe.Pointer(&_zero)
23
+	}
24
+	r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
25
+	use(unsafe.Pointer(_p0))
26
+	sz = int(r0)
27
+	if e1 != 0 {
28
+		err = errnoErr(e1)
29
+	}
30
+	return
31
+}
32
+
33
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
34
+
35
+func lremovexattr(path string, attr string) (err error) {
36
+	var _p0 *byte
37
+	_p0, err = syscall.BytePtrFromString(path)
38
+	if err != nil {
39
+		return
40
+	}
41
+	var _p1 *byte
42
+	_p1, err = syscall.BytePtrFromString(attr)
43
+	if err != nil {
44
+		return
45
+	}
46
+	_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
47
+	use(unsafe.Pointer(_p0))
48
+	use(unsafe.Pointer(_p1))
49
+	if e1 != 0 {
50
+		err = errnoErr(e1)
51
+	}
52
+	return
53
+}
54
+
55
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
56
+
57
+func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
58
+	var _p0 *byte
59
+	_p0, err = syscall.BytePtrFromString(path)
60
+	if err != nil {
61
+		return
62
+	}
63
+	var _p1 *byte
64
+	_p1, err = syscall.BytePtrFromString(attr)
65
+	if err != nil {
66
+		return
67
+	}
68
+	var _p2 unsafe.Pointer
69
+	if len(data) > 0 {
70
+		_p2 = unsafe.Pointer(&data[0])
71
+	} else {
72
+		_p2 = unsafe.Pointer(&_zero)
73
+	}
74
+	_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
75
+	use(unsafe.Pointer(_p0))
76
+	use(unsafe.Pointer(_p1))
77
+	if e1 != 0 {
78
+		err = errnoErr(e1)
79
+	}
80
+	return
81
+}
82
+
83
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
84
+
85
+func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
86
+	var _p0 *byte
87
+	_p0, err = syscall.BytePtrFromString(path)
88
+	if err != nil {
89
+		return
90
+	}
91
+	var _p1 *byte
92
+	_p1, err = syscall.BytePtrFromString(attr)
93
+	if err != nil {
94
+		return
95
+	}
96
+	var _p2 unsafe.Pointer
97
+	if len(dest) > 0 {
98
+		_p2 = unsafe.Pointer(&dest[0])
99
+	} else {
100
+		_p2 = unsafe.Pointer(&_zero)
101
+	}
102
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
103
+	use(unsafe.Pointer(_p0))
104
+	use(unsafe.Pointer(_p1))
105
+	sz = int(r0)
106
+	if e1 != 0 {
107
+		err = errnoErr(e1)
108
+	}
109
+	return
110
+}
0 111
new file mode 100644
... ...
@@ -0,0 +1,111 @@
0
+// mksyscall.pl xattr_linux.go
1
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
2
+
3
+package sysx
4
+
5
+import (
6
+	"syscall"
7
+	"unsafe"
8
+)
9
+
10
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
11
+
12
+func llistxattr(path string, dest []byte) (sz int, err error) {
13
+	var _p0 *byte
14
+	_p0, err = syscall.BytePtrFromString(path)
15
+	if err != nil {
16
+		return
17
+	}
18
+	var _p1 unsafe.Pointer
19
+	if len(dest) > 0 {
20
+		_p1 = unsafe.Pointer(&dest[0])
21
+	} else {
22
+		_p1 = unsafe.Pointer(&_zero)
23
+	}
24
+	r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
25
+	use(unsafe.Pointer(_p0))
26
+	sz = int(r0)
27
+	if e1 != 0 {
28
+		err = errnoErr(e1)
29
+	}
30
+	return
31
+}
32
+
33
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
34
+
35
+func lremovexattr(path string, attr string) (err error) {
36
+	var _p0 *byte
37
+	_p0, err = syscall.BytePtrFromString(path)
38
+	if err != nil {
39
+		return
40
+	}
41
+	var _p1 *byte
42
+	_p1, err = syscall.BytePtrFromString(attr)
43
+	if err != nil {
44
+		return
45
+	}
46
+	_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
47
+	use(unsafe.Pointer(_p0))
48
+	use(unsafe.Pointer(_p1))
49
+	if e1 != 0 {
50
+		err = errnoErr(e1)
51
+	}
52
+	return
53
+}
54
+
55
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
56
+
57
+func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
58
+	var _p0 *byte
59
+	_p0, err = syscall.BytePtrFromString(path)
60
+	if err != nil {
61
+		return
62
+	}
63
+	var _p1 *byte
64
+	_p1, err = syscall.BytePtrFromString(attr)
65
+	if err != nil {
66
+		return
67
+	}
68
+	var _p2 unsafe.Pointer
69
+	if len(data) > 0 {
70
+		_p2 = unsafe.Pointer(&data[0])
71
+	} else {
72
+		_p2 = unsafe.Pointer(&_zero)
73
+	}
74
+	_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
75
+	use(unsafe.Pointer(_p0))
76
+	use(unsafe.Pointer(_p1))
77
+	if e1 != 0 {
78
+		err = errnoErr(e1)
79
+	}
80
+	return
81
+}
82
+
83
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
84
+
85
+func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
86
+	var _p0 *byte
87
+	_p0, err = syscall.BytePtrFromString(path)
88
+	if err != nil {
89
+		return
90
+	}
91
+	var _p1 *byte
92
+	_p1, err = syscall.BytePtrFromString(attr)
93
+	if err != nil {
94
+		return
95
+	}
96
+	var _p2 unsafe.Pointer
97
+	if len(dest) > 0 {
98
+		_p2 = unsafe.Pointer(&dest[0])
99
+	} else {
100
+		_p2 = unsafe.Pointer(&_zero)
101
+	}
102
+	r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
103
+	use(unsafe.Pointer(_p0))
104
+	use(unsafe.Pointer(_p1))
105
+	sz = int(r0)
106
+	if e1 != 0 {
107
+		err = errnoErr(e1)
108
+	}
109
+	return
110
+}
0 111
new file mode 100644
... ...
@@ -0,0 +1,12 @@
0
+package sysx
1
+
2
+import (
3
+	"errors"
4
+)
5
+
6
+// Initial stub version for Solaris. Solaris has a different
7
+// syscall API from Darwin and Linux for extended attributes;
8
+// it is also not widely used. It is not exposed at all by the
9
+// Go syscall package, so we need to implement directly eventually.
10
+
11
+var unsupported = errors.New("extended attributes unsupported on Solaris")
0 12
new file mode 100644
... ...
@@ -0,0 +1,44 @@
0
+// +build freebsd solaris
1
+
2
+package sysx
3
+
4
+// Listxattr calls syscall listxattr and reads all content
5
+// and returns a string array
6
+func Listxattr(path string) ([]string, error) {
7
+	return []string{}, nil
8
+}
9
+
10
+// Removexattr calls syscall removexattr
11
+func Removexattr(path string, attr string) (err error) {
12
+	return unsupported
13
+}
14
+
15
+// Setxattr calls syscall setxattr
16
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
17
+	return unsupported
18
+}
19
+
20
+// Getxattr calls syscall getxattr
21
+func Getxattr(path, attr string) ([]byte, error) {
22
+	return []byte{}, unsupported
23
+}
24
+
25
+// LListxattr lists xattrs, not following symlinks
26
+func LListxattr(path string) ([]string, error) {
27
+	return []string{}, nil
28
+}
29
+
30
+// LRemovexattr removes an xattr, not following symlinks
31
+func LRemovexattr(path string, attr string) (err error) {
32
+	return unsupported
33
+}
34
+
35
+// LSetxattr sets an xattr, not following symlinks
36
+func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
37
+	return unsupported
38
+}
39
+
40
+// LGetxattr gets an xattr, not following symlinks
41
+func LGetxattr(path, attr string) ([]byte, error) {
42
+	return []byte{}, nil
43
+}