Browse code

Merge pull request #21454 from anusha-ragunathan/fix_overlay

Move layer mount refcounts to mountedLayer

Brian Goff authored on 2016/03/24 11:39:38
Showing 14 changed files
... ...
@@ -228,6 +228,7 @@ func (daemon *Daemon) exportContainerRw(container *container.Container) (archive
228 228
 
229 229
 	archive, err := container.RWLayer.TarStream()
230 230
 	if err != nil {
231
+		daemon.Unmount(container) // logging is already handled in the `Unmount` function
231 232
 		return nil, err
232 233
 	}
233 234
 	return ioutils.NewReadCloserWrapper(archive, func() error {
... ...
@@ -29,6 +29,7 @@ import (
29 29
 	"os"
30 30
 	"os/exec"
31 31
 	"path"
32
+	"path/filepath"
32 33
 	"strings"
33 34
 	"sync"
34 35
 	"syscall"
... ...
@@ -64,21 +65,13 @@ func init() {
64 64
 	graphdriver.Register("aufs", Init)
65 65
 }
66 66
 
67
-type data struct {
68
-	referenceCount int
69
-	path           string
70
-}
71
-
72 67
 // Driver contains information about the filesystem mounted.
73
-// root of the filesystem
74
-// sync.Mutex to protect against concurrent modifications
75
-// active maps mount id to the count
76 68
 type Driver struct {
77
-	root       string
78
-	uidMaps    []idtools.IDMap
79
-	gidMaps    []idtools.IDMap
80
-	sync.Mutex // Protects concurrent modification to active
81
-	active     map[string]*data
69
+	root          string
70
+	uidMaps       []idtools.IDMap
71
+	gidMaps       []idtools.IDMap
72
+	pathCacheLock sync.Mutex
73
+	pathCache     map[string]string
82 74
 }
83 75
 
84 76
 // Init returns a new AUFS driver.
... ...
@@ -111,10 +104,10 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
111 111
 	}
112 112
 
113 113
 	a := &Driver{
114
-		root:    root,
115
-		active:  make(map[string]*data),
116
-		uidMaps: uidMaps,
117
-		gidMaps: gidMaps,
114
+		root:      root,
115
+		uidMaps:   uidMaps,
116
+		gidMaps:   gidMaps,
117
+		pathCache: make(map[string]string),
118 118
 	}
119 119
 
120 120
 	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
... ...
@@ -228,9 +221,7 @@ func (a *Driver) Create(id, parent, mountLabel string) error {
228 228
 			}
229 229
 		}
230 230
 	}
231
-	a.Lock()
232
-	a.active[id] = &data{}
233
-	a.Unlock()
231
+
234 232
 	return nil
235 233
 }
236 234
 
... ...
@@ -259,108 +250,91 @@ func (a *Driver) createDirsFor(id string) error {
259 259
 
260 260
 // Remove will unmount and remove the given id.
261 261
 func (a *Driver) Remove(id string) error {
262
-	// Protect the a.active from concurrent access
263
-	a.Lock()
264
-	defer a.Unlock()
265
-
266
-	m := a.active[id]
267
-	if m != nil {
268
-		if m.referenceCount > 0 {
269
-			return nil
270
-		}
271
-		// Make sure the dir is umounted first
272
-		if err := a.unmount(m); err != nil {
273
-			return err
274
-		}
262
+	a.pathCacheLock.Lock()
263
+	mountpoint, exists := a.pathCache[id]
264
+	a.pathCacheLock.Unlock()
265
+	if !exists {
266
+		mountpoint = a.getMountpoint(id)
275 267
 	}
276
-	tmpDirs := []string{
277
-		"mnt",
278
-		"diff",
268
+	if err := a.unmount(mountpoint); err != nil {
269
+		// no need to return here, we can still try to remove since the `Rename` will fail below if still mounted
270
+		logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err)
279 271
 	}
280 272
 
281 273
 	// Atomically remove each directory in turn by first moving it out of the
282 274
 	// way (so that docker doesn't find it anymore) before doing removal of
283 275
 	// the whole tree.
284
-	for _, p := range tmpDirs {
285
-		realPath := path.Join(a.rootPath(), p, id)
286
-		tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id))
287
-		if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) {
288
-			return err
289
-		}
290
-		defer os.RemoveAll(tmpPath)
276
+	tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
277
+	if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
278
+		return err
279
+	}
280
+	defer os.RemoveAll(tmpMntPath)
281
+
282
+	tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id))
283
+	if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
284
+		return err
291 285
 	}
286
+	defer os.RemoveAll(tmpDiffpath)
287
+
292 288
 	// Remove the layers file for the id
293 289
 	if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
294 290
 		return err
295 291
 	}
296
-	if m != nil {
297
-		delete(a.active, id)
298
-	}
292
+
293
+	a.pathCacheLock.Lock()
294
+	delete(a.pathCache, id)
295
+	a.pathCacheLock.Unlock()
299 296
 	return nil
300 297
 }
301 298
 
302 299
 // Get returns the rootfs path for the id.
303 300
 // This will mount the dir at it's given path
304 301
 func (a *Driver) Get(id, mountLabel string) (string, error) {
305
-	// Protect the a.active from concurrent access
306
-	a.Lock()
307
-	defer a.Unlock()
308
-
309
-	m := a.active[id]
310
-	if m == nil {
311
-		m = &data{}
312
-		a.active[id] = m
313
-	}
314
-
315 302
 	parents, err := a.getParentLayerPaths(id)
316 303
 	if err != nil && !os.IsNotExist(err) {
317 304
 		return "", err
318 305
 	}
319 306
 
307
+	a.pathCacheLock.Lock()
308
+	m, exists := a.pathCache[id]
309
+	a.pathCacheLock.Unlock()
310
+
311
+	if !exists {
312
+		m = a.getDiffPath(id)
313
+		if len(parents) > 0 {
314
+			m = a.getMountpoint(id)
315
+		}
316
+	}
317
+
320 318
 	// If a dir does not have a parent ( no layers )do not try to mount
321 319
 	// just return the diff path to the data
322
-	m.path = path.Join(a.rootPath(), "diff", id)
323 320
 	if len(parents) > 0 {
324
-		m.path = path.Join(a.rootPath(), "mnt", id)
325
-		if m.referenceCount == 0 {
326
-			if err := a.mount(id, m, mountLabel, parents); err != nil {
327
-				return "", err
328
-			}
321
+		if err := a.mount(id, m, mountLabel, parents); err != nil {
322
+			return "", err
329 323
 		}
330 324
 	}
331
-	m.referenceCount++
332
-	return m.path, nil
325
+
326
+	a.pathCacheLock.Lock()
327
+	a.pathCache[id] = m
328
+	a.pathCacheLock.Unlock()
329
+	return m, nil
333 330
 }
334 331
 
335 332
 // Put unmounts and updates list of active mounts.
336 333
 func (a *Driver) Put(id string) error {
337
-	// Protect the a.active from concurrent access
338
-	a.Lock()
339
-	defer a.Unlock()
340
-
341
-	m := a.active[id]
342
-	if m == nil {
343
-		// but it might be still here
344
-		if a.Exists(id) {
345
-			path := path.Join(a.rootPath(), "mnt", id)
346
-			err := Unmount(path)
347
-			if err != nil {
348
-				logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
349
-			}
350
-		}
351
-		return nil
334
+	a.pathCacheLock.Lock()
335
+	m, exists := a.pathCache[id]
336
+	if !exists {
337
+		m = a.getMountpoint(id)
338
+		a.pathCache[id] = m
352 339
 	}
353
-	if count := m.referenceCount; count > 1 {
354
-		m.referenceCount = count - 1
355
-	} else {
356
-		ids, _ := getParentIds(a.rootPath(), id)
357
-		// We only mounted if there are any parents
358
-		if ids != nil && len(ids) > 0 {
359
-			a.unmount(m)
360
-		}
361
-		delete(a.active, id)
340
+	a.pathCacheLock.Unlock()
341
+
342
+	err := a.unmount(m)
343
+	if err != nil {
344
+		logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
362 345
 	}
363
-	return nil
346
+	return err
364 347
 }
365 348
 
366 349
 // Diff produces an archive of the changes between the specified
... ...
@@ -443,16 +417,13 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
443 443
 	return layers, nil
444 444
 }
445 445
 
446
-func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) error {
446
+func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
447 447
 	// If the id is mounted or we get an error return
448
-	if mounted, err := a.mounted(m); err != nil || mounted {
448
+	if mounted, err := a.mounted(target); err != nil || mounted {
449 449
 		return err
450 450
 	}
451 451
 
452
-	var (
453
-		target = m.path
454
-		rw     = path.Join(a.rootPath(), "diff", id)
455
-	)
452
+	rw := a.getDiffPath(id)
456 453
 
457 454
 	if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
458 455
 		return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
... ...
@@ -460,26 +431,39 @@ func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) e
460 460
 	return nil
461 461
 }
462 462
 
463
-func (a *Driver) unmount(m *data) error {
464
-	if mounted, err := a.mounted(m); err != nil || !mounted {
463
+func (a *Driver) unmount(mountPath string) error {
464
+	if mounted, err := a.mounted(mountPath); err != nil || !mounted {
465
+		return err
466
+	}
467
+	if err := Unmount(mountPath); err != nil {
465 468
 		return err
466 469
 	}
467
-	return Unmount(m.path)
470
+	return nil
468 471
 }
469 472
 
470
-func (a *Driver) mounted(m *data) (bool, error) {
471
-	var buf syscall.Statfs_t
472
-	if err := syscall.Statfs(m.path, &buf); err != nil {
473
-		return false, nil
474
-	}
475
-	return graphdriver.FsMagic(buf.Type) == graphdriver.FsMagicAufs, nil
473
+func (a *Driver) mounted(mountpoint string) (bool, error) {
474
+	return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint)
476 475
 }
477 476
 
478 477
 // Cleanup aufs and unmount all mountpoints
479 478
 func (a *Driver) Cleanup() error {
480
-	for id, m := range a.active {
479
+	var dirs []string
480
+	if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
481
+		if err != nil {
482
+			return err
483
+		}
484
+		if !info.IsDir() {
485
+			return nil
486
+		}
487
+		dirs = append(dirs, path)
488
+		return nil
489
+	}); err != nil {
490
+		return err
491
+	}
492
+
493
+	for _, m := range dirs {
481 494
 		if err := a.unmount(m); err != nil {
482
-			logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err)
495
+			logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err)
483 496
 		}
484 497
 	}
485 498
 	return mountpk.Unmount(a.root)
... ...
@@ -200,7 +200,7 @@ func TestMountedFalseResponse(t *testing.T) {
200 200
 		t.Fatal(err)
201 201
 	}
202 202
 
203
-	response, err := d.mounted(d.active["1"])
203
+	response, err := d.mounted(d.getDiffPath("1"))
204 204
 	if err != nil {
205 205
 		t.Fatal(err)
206 206
 	}
... ...
@@ -227,7 +227,7 @@ func TestMountedTrueReponse(t *testing.T) {
227 227
 		t.Fatal(err)
228 228
 	}
229 229
 
230
-	response, err := d.mounted(d.active["2"])
230
+	response, err := d.mounted(d.pathCache["2"])
231 231
 	if err != nil {
232 232
 		t.Fatal(err)
233 233
 	}
... ...
@@ -293,7 +293,7 @@ func TestRemoveMountedDir(t *testing.T) {
293 293
 		t.Fatal("mntPath should not be empty string")
294 294
 	}
295 295
 
296
-	mounted, err := d.mounted(d.active["2"])
296
+	mounted, err := d.mounted(d.pathCache["2"])
297 297
 	if err != nil {
298 298
 		t.Fatal(err)
299 299
 	}
... ...
@@ -46,3 +46,19 @@ func getParentIds(root, id string) ([]string, error) {
46 46
 	}
47 47
 	return out, s.Err()
48 48
 }
49
+
50
+func (a *Driver) getMountpoint(id string) string {
51
+	return path.Join(a.mntPath(), id)
52
+}
53
+
54
+func (a *Driver) mntPath() string {
55
+	return path.Join(a.rootPath(), "mnt")
56
+}
57
+
58
+func (a *Driver) getDiffPath(id string) string {
59
+	return path.Join(a.diffPath(), id)
60
+}
61
+
62
+func (a *Driver) diffPath() string {
63
+	return path.Join(a.rootPath(), "diff")
64
+}
... ...
@@ -69,9 +69,6 @@ type devInfo struct {
69 69
 	Deleted       bool   `json:"deleted"`
70 70
 	devices       *DeviceSet
71 71
 
72
-	mountCount int
73
-	mountPath  string
74
-
75 72
 	// The global DeviceSet lock guarantees that we serialize all
76 73
 	// the calls to libdevmapper (which is not threadsafe), but we
77 74
 	// sometimes release that lock while sleeping. In that case
... ...
@@ -1991,13 +1988,6 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
1991 1991
 	devices.Lock()
1992 1992
 	defer devices.Unlock()
1993 1993
 
1994
-	// If mountcount is not zero, that means devices is still in use
1995
-	// or has not been Put() properly. Fail device deletion.
1996
-
1997
-	if info.mountCount != 0 {
1998
-		return fmt.Errorf("devmapper: Can't delete device %v as it is still mounted. mntCount=%v", info.Hash, info.mountCount)
1999
-	}
2000
-
2001 1994
 	return devices.deleteDevice(info, syncDelete)
2002 1995
 }
2003 1996
 
... ...
@@ -2116,13 +2106,11 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
2116 2116
 }
2117 2117
 
2118 2118
 // Shutdown shuts down the device by unmounting the root.
2119
-func (devices *DeviceSet) Shutdown() error {
2119
+func (devices *DeviceSet) Shutdown(home string) error {
2120 2120
 	logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix)
2121 2121
 	logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root)
2122 2122
 	defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix)
2123 2123
 
2124
-	var devs []*devInfo
2125
-
2126 2124
 	// Stop deletion worker. This should start delivering new events to
2127 2125
 	// ticker channel. That means no new instance of cleanupDeletedDevice()
2128 2126
 	// will run after this call. If one instance is already running at
... ...
@@ -2139,30 +2127,46 @@ func (devices *DeviceSet) Shutdown() error {
2139 2139
 	// metadata. Hence save this early before trying to deactivate devices.
2140 2140
 	devices.saveDeviceSetMetaData()
2141 2141
 
2142
-	for _, info := range devices.Devices {
2143
-		devs = append(devs, info)
2142
+	// ignore the error since it's just a best effort to not try to unmount something that's mounted
2143
+	mounts, _ := mount.GetMounts()
2144
+	mounted := make(map[string]bool, len(mounts))
2145
+	for _, mnt := range mounts {
2146
+		mounted[mnt.Mountpoint] = true
2144 2147
 	}
2145
-	devices.Unlock()
2146 2148
 
2147
-	for _, info := range devs {
2148
-		info.lock.Lock()
2149
-		if info.mountCount > 0 {
2149
+	if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error {
2150
+		if err != nil {
2151
+			return err
2152
+		}
2153
+		if !info.IsDir() {
2154
+			return nil
2155
+		}
2156
+
2157
+		if mounted[p] {
2150 2158
 			// We use MNT_DETACH here in case it is still busy in some running
2151 2159
 			// container. This means it'll go away from the global scope directly,
2152 2160
 			// and the device will be released when that container dies.
2153
-			if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
2154
-				logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", info.mountPath, err)
2161
+			if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil {
2162
+				logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
2155 2163
 			}
2164
+		}
2156 2165
 
2157
-			devices.Lock()
2158
-			if err := devices.deactivateDevice(info); err != nil {
2159
-				logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", info.Hash, err)
2166
+		if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil {
2167
+			logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err)
2168
+		} else {
2169
+			if err := devices.deactivateDevice(devInfo); err != nil {
2170
+				logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err)
2160 2171
 			}
2161
-			devices.Unlock()
2162 2172
 		}
2163
-		info.lock.Unlock()
2173
+
2174
+		return nil
2175
+	}); err != nil && !os.IsNotExist(err) {
2176
+		devices.Unlock()
2177
+		return err
2164 2178
 	}
2165 2179
 
2180
+	devices.Unlock()
2181
+
2166 2182
 	info, _ := devices.lookupDeviceWithLock("")
2167 2183
 	if info != nil {
2168 2184
 		info.lock.Lock()
... ...
@@ -2202,15 +2206,6 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
2202 2202
 	devices.Lock()
2203 2203
 	defer devices.Unlock()
2204 2204
 
2205
-	if info.mountCount > 0 {
2206
-		if path != info.mountPath {
2207
-			return fmt.Errorf("devmapper: Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path)
2208
-		}
2209
-
2210
-		info.mountCount++
2211
-		return nil
2212
-	}
2213
-
2214 2205
 	if err := devices.activateDeviceIfNeeded(info, false); err != nil {
2215 2206
 		return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err)
2216 2207
 	}
... ...
@@ -2234,9 +2229,6 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
2234 2234
 		return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err)
2235 2235
 	}
2236 2236
 
2237
-	info.mountCount = 1
2238
-	info.mountPath = path
2239
-
2240 2237
 	return nil
2241 2238
 }
2242 2239
 
... ...
@@ -2256,20 +2248,6 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
2256 2256
 	devices.Lock()
2257 2257
 	defer devices.Unlock()
2258 2258
 
2259
-	// If there are running containers when daemon crashes, during daemon
2260
-	// restarting, it will kill running containers and will finally call
2261
-	// Put() without calling Get(). So info.MountCount may become negative.
2262
-	// if info.mountCount goes negative, we do the unmount and assign
2263
-	// it to 0.
2264
-
2265
-	info.mountCount--
2266
-	if info.mountCount > 0 {
2267
-		return nil
2268
-	} else if info.mountCount < 0 {
2269
-		logrus.Warnf("devmapper: Mount count of device went negative. Put() called without matching Get(). Resetting count to 0")
2270
-		info.mountCount = 0
2271
-	}
2272
-
2273 2259
 	logrus.Debugf("devmapper: Unmount(%s)", mountPath)
2274 2260
 	if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
2275 2261
 		return err
... ...
@@ -2280,8 +2258,6 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
2280 2280
 		return err
2281 2281
 	}
2282 2282
 
2283
-	info.mountPath = ""
2284
-
2285 2283
 	return nil
2286 2284
 }
2287 2285
 
... ...
@@ -108,7 +108,7 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
108 108
 
109 109
 // Cleanup unmounts a device.
110 110
 func (d *Driver) Cleanup() error {
111
-	err := d.DeviceSet.Shutdown()
111
+	err := d.DeviceSet.Shutdown(d.home)
112 112
 
113 113
 	if err2 := mount.Unmount(d.home); err == nil {
114 114
 		err = err2
... ...
@@ -1,8 +1,19 @@
1 1
 package graphdriver
2 2
 
3
+import "syscall"
4
+
3 5
 var (
4 6
 	// Slice of drivers that should be used in an order
5 7
 	priority = []string{
6 8
 		"zfs",
7 9
 	}
8 10
 )
11
+
12
+// Mounted checks if the given path is mounted as the fs type
13
+func Mounted(fsType FsMagic, mountPath string) (bool, error) {
14
+	var buf syscall.Statfs_t
15
+	if err := syscall.Statfs(mountPath, &buf); err != nil {
16
+		return false, err
17
+	}
18
+	return FsMagic(buf.Type) == fsType, nil
19
+}
... ...
@@ -42,6 +42,8 @@ const (
42 42
 	FsMagicXfs = FsMagic(0x58465342)
43 43
 	// FsMagicZfs filesystem id for Zfs
44 44
 	FsMagicZfs = FsMagic(0x2fc12fc1)
45
+	// FsMagicOverlay filesystem id for overlay
46
+	FsMagicOverlay = FsMagic(0x794C7630)
45 47
 )
46 48
 
47 49
 var (
... ...
@@ -86,3 +88,12 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
86 86
 	}
87 87
 	return FsMagic(buf.Type), nil
88 88
 }
89
+
90
+// Mounted checks if the given path is mounted as the fs type
91
+func Mounted(fsType FsMagic, mountPath string) (bool, error) {
92
+	var buf syscall.Statfs_t
93
+	if err := syscall.Statfs(mountPath, &buf); err != nil {
94
+		return false, err
95
+	}
96
+	return FsMagic(buf.Type) == fsType, nil
97
+}
... ...
@@ -88,21 +88,13 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Rea
88 88
 // of that. This means all child images share file (but not directory)
89 89
 // data with the parent.
90 90
 
91
-// ActiveMount contains information about the count, path and whether is mounted or not.
92
-// This information is part of the Driver, that contains list of active mounts that are part of this overlay.
93
-type ActiveMount struct {
94
-	count   int
95
-	path    string
96
-	mounted bool
97
-}
98
-
99 91
 // Driver contains information about the home directory and the list of active mounts that are created using this driver.
100 92
 type Driver struct {
101
-	home       string
102
-	sync.Mutex // Protects concurrent modification to active
103
-	active     map[string]*ActiveMount
104
-	uidMaps    []idtools.IDMap
105
-	gidMaps    []idtools.IDMap
93
+	home          string
94
+	pathCacheLock sync.Mutex
95
+	pathCache     map[string]string
96
+	uidMaps       []idtools.IDMap
97
+	gidMaps       []idtools.IDMap
106 98
 }
107 99
 
108 100
 var backingFs = "<unknown>"
... ...
@@ -151,10 +143,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
151 151
 	}
152 152
 
153 153
 	d := &Driver{
154
-		home:    home,
155
-		active:  make(map[string]*ActiveMount),
156
-		uidMaps: uidMaps,
157
-		gidMaps: gidMaps,
154
+		home:      home,
155
+		pathCache: make(map[string]string),
156
+		uidMaps:   uidMaps,
157
+		gidMaps:   gidMaps,
158 158
 	}
159 159
 
160 160
 	return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil
... ...
@@ -325,23 +317,14 @@ func (d *Driver) Remove(id string) error {
325 325
 	if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
326 326
 		return err
327 327
 	}
328
+	d.pathCacheLock.Lock()
329
+	delete(d.pathCache, id)
330
+	d.pathCacheLock.Unlock()
328 331
 	return nil
329 332
 }
330 333
 
331 334
 // Get creates and mounts the required file system for the given id and returns the mount path.
332 335
 func (d *Driver) Get(id string, mountLabel string) (string, error) {
333
-	// Protect the d.active from concurrent access
334
-	d.Lock()
335
-	defer d.Unlock()
336
-
337
-	mount := d.active[id]
338
-	if mount != nil {
339
-		mount.count++
340
-		return mount.path, nil
341
-	}
342
-
343
-	mount = &ActiveMount{count: 1}
344
-
345 336
 	dir := d.dir(id)
346 337
 	if _, err := os.Stat(dir); err != nil {
347 338
 		return "", err
... ...
@@ -350,9 +333,10 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
350 350
 	// If id has a root, just return it
351 351
 	rootDir := path.Join(dir, "root")
352 352
 	if _, err := os.Stat(rootDir); err == nil {
353
-		mount.path = rootDir
354
-		d.active[id] = mount
355
-		return mount.path, nil
353
+		d.pathCacheLock.Lock()
354
+		d.pathCache[id] = rootDir
355
+		d.pathCacheLock.Unlock()
356
+		return rootDir, nil
356 357
 	}
357 358
 
358 359
 	lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
... ...
@@ -365,6 +349,16 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
365 365
 	mergedDir := path.Join(dir, "merged")
366 366
 
367 367
 	opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
368
+
369
+	// if it's mounted already, just return
370
+	mounted, err := d.mounted(mergedDir)
371
+	if err != nil {
372
+		return "", err
373
+	}
374
+	if mounted {
375
+		return mergedDir, nil
376
+	}
377
+
368 378
 	if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
369 379
 		return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
370 380
 	}
... ...
@@ -378,42 +372,38 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
378 378
 	if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
379 379
 		return "", err
380 380
 	}
381
-	mount.path = mergedDir
382
-	mount.mounted = true
383
-	d.active[id] = mount
384 381
 
385
-	return mount.path, nil
382
+	d.pathCacheLock.Lock()
383
+	d.pathCache[id] = mergedDir
384
+	d.pathCacheLock.Unlock()
385
+
386
+	return mergedDir, nil
387
+}
388
+
389
+func (d *Driver) mounted(dir string) (bool, error) {
390
+	return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir)
386 391
 }
387 392
 
388 393
 // Put unmounts the mount path created for the give id.
389 394
 func (d *Driver) Put(id string) error {
390
-	// Protect the d.active from concurrent access
391
-	d.Lock()
392
-	defer d.Unlock()
395
+	d.pathCacheLock.Lock()
396
+	mountpoint, exists := d.pathCache[id]
397
+	d.pathCacheLock.Unlock()
393 398
 
394
-	mount := d.active[id]
395
-	if mount == nil {
399
+	if !exists {
396 400
 		logrus.Debugf("Put on a non-mounted device %s", id)
397 401
 		// but it might be still here
398 402
 		if d.Exists(id) {
399
-			mergedDir := path.Join(d.dir(id), "merged")
400
-			err := syscall.Unmount(mergedDir, 0)
401
-			if err != nil {
402
-				logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
403
-			}
403
+			mountpoint = path.Join(d.dir(id), "merged")
404 404
 		}
405
-		return nil
406
-	}
407 405
 
408
-	mount.count--
409
-	if mount.count > 0 {
410
-		return nil
406
+		d.pathCacheLock.Lock()
407
+		d.pathCache[id] = mountpoint
408
+		d.pathCacheLock.Unlock()
411 409
 	}
412 410
 
413
-	defer delete(d.active, id)
414
-	if mount.mounted {
415
-		err := syscall.Unmount(mount.path, 0)
416
-		if err != nil {
411
+	if mounted, err := d.mounted(mountpoint); mounted || err != nil {
412
+		if err = syscall.Unmount(mountpoint, 0); err != nil {
417 413
 			logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
418 414
 		}
419 415
 		return err
... ...
@@ -13,7 +13,6 @@ import (
13 13
 	"path"
14 14
 	"path/filepath"
15 15
 	"strings"
16
-	"sync"
17 16
 	"syscall"
18 17
 	"time"
19 18
 
... ...
@@ -47,10 +46,6 @@ const (
47 47
 type Driver struct {
48 48
 	// info stores the shim driver information
49 49
 	info hcsshim.DriverInfo
50
-	// Mutex protects concurrent modification to active
51
-	sync.Mutex
52
-	// active stores references to the activated layers
53
-	active map[string]int
54 50
 }
55 51
 
56 52
 var _ graphdriver.DiffGetterDriver = &Driver{}
... ...
@@ -63,7 +58,6 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
63 63
 			HomeDir: home,
64 64
 			Flavour: filterDriver,
65 65
 		},
66
-		active: make(map[string]int),
67 66
 	}
68 67
 	return d, nil
69 68
 }
... ...
@@ -76,7 +70,6 @@ func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (
76 76
 			HomeDir: home,
77 77
 			Flavour: diffDriver,
78 78
 		},
79
-		active: make(map[string]int),
80 79
 	}
81 80
 	return d, nil
82 81
 }
... ...
@@ -189,9 +182,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
189 189
 	logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
190 190
 	var dir string
191 191
 
192
-	d.Lock()
193
-	defer d.Unlock()
194
-
195 192
 	rID, err := d.resolveID(id)
196 193
 	if err != nil {
197 194
 		return "", err
... ...
@@ -203,16 +193,14 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
203 203
 		return "", err
204 204
 	}
205 205
 
206
-	if d.active[rID] == 0 {
207
-		if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
208
-			return "", err
209
-		}
210
-		if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
211
-			if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
212
-				logrus.Warnf("Failed to Deactivate %s: %s", id, err)
213
-			}
214
-			return "", err
206
+	if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
207
+		return "", err
208
+	}
209
+	if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
210
+		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
211
+			logrus.Warnf("Failed to Deactivate %s: %s", id, err)
215 212
 		}
213
+		return "", err
216 214
 	}
217 215
 
218 216
 	mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
... ...
@@ -223,8 +211,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
223 223
 		return "", err
224 224
 	}
225 225
 
226
-	d.active[rID]++
227
-
228 226
 	// If the layer has a mount path, use that. Otherwise, use the
229 227
 	// folder path.
230 228
 	if mountPath != "" {
... ...
@@ -245,22 +231,10 @@ func (d *Driver) Put(id string) error {
245 245
 		return err
246 246
 	}
247 247
 
248
-	d.Lock()
249
-	defer d.Unlock()
250
-
251
-	if d.active[rID] > 1 {
252
-		d.active[rID]--
253
-	} else if d.active[rID] == 1 {
254
-		if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
255
-			return err
256
-		}
257
-		if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
258
-			return err
259
-		}
260
-		delete(d.active, rID)
248
+	if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
249
+		return err
261 250
 	}
262
-
263
-	return nil
251
+	return hcsshim.DeactivateLayer(d.info, rID)
264 252
 }
265 253
 
266 254
 // Cleanup ensures the information the driver stores is properly removed.
... ...
@@ -270,62 +244,40 @@ func (d *Driver) Cleanup() error {
270 270
 
271 271
 // Diff produces an archive of the changes between the specified
272 272
 // layer and its parent layer which may be "".
273
+// The layer should be mounted when calling this function
273 274
 func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
274 275
 	rID, err := d.resolveID(id)
275 276
 	if err != nil {
276 277
 		return
277 278
 	}
278 279
 
279
-	// Getting the layer paths must be done outside of the lock.
280 280
 	layerChain, err := d.getLayerChain(rID)
281 281
 	if err != nil {
282 282
 		return
283 283
 	}
284 284
 
285
-	var undo func()
286
-
287
-	d.Lock()
288
-
289
-	// To support export, a layer must be activated but not prepared.
290
-	if d.info.Flavour == filterDriver {
291
-		if d.active[rID] == 0 {
292
-			if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
293
-				d.Unlock()
294
-				return
295
-			}
296
-			undo = func() {
297
-				if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
298
-					logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
299
-				}
300
-			}
301
-		} else {
302
-			if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
303
-				d.Unlock()
304
-				return
305
-			}
306
-			undo = func() {
307
-				if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
308
-					logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err)
309
-				}
310
-			}
311
-		}
285
+	// this is assuming that the layer is unmounted
286
+	if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
287
+		return nil, err
312 288
 	}
313
-
314
-	d.Unlock()
289
+	defer func() {
290
+		if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
291
+			logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
292
+		}
293
+	}()
315 294
 
316 295
 	arch, err := d.exportLayer(rID, layerChain)
317 296
 	if err != nil {
318
-		undo()
319 297
 		return
320 298
 	}
321 299
 	return ioutils.NewReadCloserWrapper(arch, func() error {
322
-		defer undo()
323 300
 		return arch.Close()
324 301
 	}), nil
325 302
 }
326 303
 
327 304
 // Changes produces a list of changes between the specified layer
328 305
 // and its parent layer. If parent is "", then all changes will be ADD changes.
306
+// The layer should be mounted when calling this function
329 307
 func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
330 308
 	rID, err := d.resolveID(id)
331 309
 	if err != nil {
... ...
@@ -336,31 +288,15 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
336 336
 		return nil, err
337 337
 	}
338 338
 
339
-	d.Lock()
340
-	if d.info.Flavour == filterDriver {
341
-		if d.active[rID] == 0 {
342
-			if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
343
-				d.Unlock()
344
-				return nil, err
345
-			}
346
-			defer func() {
347
-				if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
348
-					logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
349
-				}
350
-			}()
351
-		} else {
352
-			if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
353
-				d.Unlock()
354
-				return nil, err
355
-			}
356
-			defer func() {
357
-				if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
358
-					logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err)
359
-				}
360
-			}()
361
-		}
339
+	// this is assuming that the layer is unmounted
340
+	if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
341
+		return nil, err
362 342
 	}
363
-	d.Unlock()
343
+	defer func() {
344
+		if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
345
+			logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
346
+		}
347
+	}()
364 348
 
365 349
 	r, err := hcsshim.NewLayerReader(d.info, id, parentChain)
366 350
 	if err != nil {
... ...
@@ -391,6 +327,7 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
391 391
 // ApplyDiff extracts the changeset from the given diff into the
392 392
 // layer with the specified id and parent, returning the size of the
393 393
 // new layer in bytes.
394
+// The layer should not be mounted when calling this function
394 395
 func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
395 396
 	rPId, err := d.resolveID(parent)
396 397
 	if err != nil {
... ...
@@ -22,12 +22,6 @@ import (
22 22
 	"github.com/opencontainers/runc/libcontainer/label"
23 23
 )
24 24
 
25
-type activeMount struct {
26
-	count   int
27
-	path    string
28
-	mounted bool
29
-}
30
-
31 25
 type zfsOptions struct {
32 26
 	fsName    string
33 27
 	mountPath string
... ...
@@ -109,7 +103,6 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
109 109
 		dataset:          rootDataset,
110 110
 		options:          options,
111 111
 		filesystemsCache: filesystemsCache,
112
-		active:           make(map[string]*activeMount),
113 112
 		uidMaps:          uidMaps,
114 113
 		gidMaps:          gidMaps,
115 114
 	}
... ...
@@ -166,7 +159,6 @@ type Driver struct {
166 166
 	options          zfsOptions
167 167
 	sync.Mutex       // protects filesystem cache against concurrent access
168 168
 	filesystemsCache map[string]bool
169
-	active           map[string]*activeMount
170 169
 	uidMaps          []idtools.IDMap
171 170
 	gidMaps          []idtools.IDMap
172 171
 }
... ...
@@ -302,17 +294,6 @@ func (d *Driver) Remove(id string) error {
302 302
 
303 303
 // Get returns the mountpoint for the given id after creating the target directories if necessary.
304 304
 func (d *Driver) Get(id, mountLabel string) (string, error) {
305
-	d.Lock()
306
-	defer d.Unlock()
307
-
308
-	mnt := d.active[id]
309
-	if mnt != nil {
310
-		mnt.count++
311
-		return mnt.path, nil
312
-	}
313
-
314
-	mnt = &activeMount{count: 1}
315
-
316 305
 	mountpoint := d.mountPath(id)
317 306
 	filesystem := d.zfsPath(id)
318 307
 	options := label.FormatMountLabel("", mountLabel)
... ...
@@ -335,48 +316,29 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
335 335
 	if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
336 336
 		return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
337 337
 	}
338
-	mnt.path = mountpoint
339
-	mnt.mounted = true
340
-	d.active[id] = mnt
341 338
 
342 339
 	return mountpoint, nil
343 340
 }
344 341
 
345 342
 // Put removes the existing mountpoint for the given id if it exists.
346 343
 func (d *Driver) Put(id string) error {
347
-	d.Lock()
348
-	defer d.Unlock()
349
-
350
-	mnt := d.active[id]
351
-	if mnt == nil {
352
-		logrus.Debugf("[zfs] Put on a non-mounted device %s", id)
353
-		// but it might be still here
354
-		if d.Exists(id) {
355
-			err := mount.Unmount(d.mountPath(id))
356
-			if err != nil {
357
-				logrus.Debugf("[zfs] Failed to unmount %s zfs fs: %v", id, err)
358
-			}
359
-		}
360
-		return nil
361
-	}
362
-
363
-	mnt.count--
364
-	if mnt.count > 0 {
365
-		return nil
344
+	mountpoint := d.mountPath(id)
345
+	mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
346
+	if err != nil || !mounted {
347
+		return err
366 348
 	}
367 349
 
368
-	defer delete(d.active, id)
369
-	if mnt.mounted {
370
-		logrus.Debugf(`[zfs] unmount("%s")`, mnt.path)
350
+	logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
371 351
 
372
-		if err := mount.Unmount(mnt.path); err != nil {
373
-			return fmt.Errorf("error unmounting to %s: %v", mnt.path, err)
374
-		}
352
+	if err := mount.Unmount(mountpoint); err != nil {
353
+		return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
375 354
 	}
376 355
 	return nil
377 356
 }
378 357
 
379 358
 // Exists checks to see if the cache entry exists for the given id.
380 359
 func (d *Driver) Exists(id string) bool {
360
+	d.Lock()
361
+	defer d.Unlock()
381 362
 	return d.filesystemsCache[d.zfsPath(id)] == true
382 363
 }
383 364
new file mode 100644
... ...
@@ -0,0 +1,95 @@
0
+package main
1
+
2
+import (
3
+	"fmt"
4
+	"io/ioutil"
5
+	"os"
6
+	"runtime"
7
+	"strings"
8
+	"sync"
9
+
10
+	"github.com/docker/docker/pkg/integration/checker"
11
+	"github.com/go-check/check"
12
+)
13
+
14
+func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) {
15
+	maxConcurrency := runtime.GOMAXPROCS(0)
16
+	numIterations := c.N
17
+	outerGroup := &sync.WaitGroup{}
18
+	outerGroup.Add(maxConcurrency)
19
+	chErr := make(chan error, numIterations*2*maxConcurrency)
20
+
21
+	for i := 0; i < maxConcurrency; i++ {
22
+		go func() {
23
+			defer outerGroup.Done()
24
+			innerGroup := &sync.WaitGroup{}
25
+			innerGroup.Add(2)
26
+
27
+			go func() {
28
+				defer innerGroup.Done()
29
+				for i := 0; i < numIterations; i++ {
30
+					args := []string{"run", "-d", defaultSleepImage}
31
+					args = append(args, defaultSleepCommand...)
32
+					out, _, err := dockerCmdWithError(args...)
33
+					if err != nil {
34
+						chErr <- fmt.Errorf(out)
35
+						return
36
+					}
37
+
38
+					id := strings.TrimSpace(out)
39
+					tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id)
40
+					if err != nil {
41
+						chErr <- err
42
+						return
43
+					}
44
+					defer os.RemoveAll(tmpDir)
45
+					out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir)
46
+					if err != nil {
47
+						chErr <- fmt.Errorf(out)
48
+						return
49
+					}
50
+
51
+					out, _, err = dockerCmdWithError("kill", id)
52
+					if err != nil {
53
+						chErr <- fmt.Errorf(out)
54
+					}
55
+
56
+					out, _, err = dockerCmdWithError("start", id)
57
+					if err != nil {
58
+						chErr <- fmt.Errorf(out)
59
+					}
60
+
61
+					out, _, err = dockerCmdWithError("kill", id)
62
+					if err != nil {
63
+						chErr <- fmt.Errorf(out)
64
+					}
65
+
66
+					// don't do an rm -f here since it can potentially ignore errors from the graphdriver
67
+					out, _, err = dockerCmdWithError("rm", id)
68
+					if err != nil {
69
+						chErr <- fmt.Errorf(out)
70
+					}
71
+				}
72
+			}()
73
+
74
+			go func() {
75
+				defer innerGroup.Done()
76
+				for i := 0; i < numIterations; i++ {
77
+					out, _, err := dockerCmdWithError("ps")
78
+					if err != nil {
79
+						chErr <- fmt.Errorf(out)
80
+					}
81
+				}
82
+			}()
83
+
84
+			innerGroup.Wait()
85
+		}()
86
+	}
87
+
88
+	outerGroup.Wait()
89
+	close(chErr)
90
+
91
+	for err := range chErr {
92
+		c.Assert(err, checker.IsNil)
93
+	}
94
+}
... ...
@@ -49,6 +49,10 @@ var (
49 49
 	// to be created which would result in a layer depth
50 50
 	// greater than the 125 max.
51 51
 	ErrMaxDepthExceeded = errors.New("max depth exceeded")
52
+
53
+	// ErrNotSupported is used when the action is not supppoted
54
+	// on the current platform
55
+	ErrNotSupported = errors.New("not support on this platform")
52 56
 )
53 57
 
54 58
 // ChainID is the content-addressable ID of a layer.
... ...
@@ -12,6 +12,7 @@ type mountedLayer struct {
12 12
 	mountID    string
13 13
 	initID     string
14 14
 	parent     *roLayer
15
+	path       string
15 16
 	layerStore *layerStore
16 17
 
17 18
 	references map[RWLayer]*referencedRWLayer
... ...
@@ -131,10 +132,21 @@ func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
131 131
 		return "", ErrLayerNotRetained
132 132
 	}
133 133
 
134
-	rl.activityCount++
135
-	return rl.mountedLayer.Mount(mountLabel)
134
+	if rl.activityCount > 0 {
135
+		rl.activityCount++
136
+		return rl.path, nil
137
+	}
138
+
139
+	m, err := rl.mountedLayer.Mount(mountLabel)
140
+	if err == nil {
141
+		rl.activityCount++
142
+		rl.path = m
143
+	}
144
+	return m, err
136 145
 }
137 146
 
147
+// Unmount decrements the activity count and unmounts the underlying layer
148
+// Callers should only call `Unmount` once per call to `Mount`, even on error.
138 149
 func (rl *referencedRWLayer) Unmount() error {
139 150
 	rl.activityL.Lock()
140 151
 	defer rl.activityL.Unlock()
... ...
@@ -145,7 +157,11 @@ func (rl *referencedRWLayer) Unmount() error {
145 145
 	if rl.activityCount == -1 {
146 146
 		return ErrLayerNotRetained
147 147
 	}
148
+
148 149
 	rl.activityCount--
150
+	if rl.activityCount > 0 {
151
+		return nil
152
+	}
149 153
 
150 154
 	return rl.mountedLayer.Unmount()
151 155
 }