| ... | ... |
@@ -100,6 +100,25 @@ Here is the list of supported options: |
| 100 | 100 |
|
| 101 | 101 |
``docker -d --storage-opt dm.mountopt=nodiscard`` |
| 102 | 102 |
|
| 103 |
+ * `dm.thinpooldev` |
|
| 104 |
+ |
|
| 105 |
+ Specifies a custom blockdevice to use for the thin pool. |
|
| 106 |
+ |
|
| 107 |
+ If using a block device for device mapper storage, ideally lvm2 |
|
| 108 |
+ would be used to create/manage the thin-pool volume that is then |
|
| 109 |
+ handed to docker to exclusively create/manage the thin and thin |
|
| 110 |
+ snapshot volumes needed for it's containers. Managing the thin-pool |
|
| 111 |
+ outside of docker makes for the most feature-rich method of having |
|
| 112 |
+ docker utilize device mapper thin provisioning as the backing |
|
| 113 |
+ storage for docker's containers. lvm2-based thin-pool management |
|
| 114 |
+ feature highlights include: automatic or interactive thin-pool |
|
| 115 |
+ resize support, dynamically change thin-pool features, automatic |
|
| 116 |
+ thinp metadata checking when lvm2 activates the thin-pool, etc. |
|
| 117 |
+ |
|
| 118 |
+ Example use: |
|
| 119 |
+ |
|
| 120 |
+ ``docker -d --storage-opt dm.thinpooldev=/dev/mapper/thin-pool`` |
|
| 121 |
+ |
|
| 103 | 122 |
* `dm.datadev` |
| 104 | 123 |
|
| 105 | 124 |
Specifies a custom blockdevice to use for data for the thin pool. |
| ... | ... |
@@ -84,6 +84,7 @@ type DeviceSet struct {
|
| 84 | 84 |
metadataDevice string |
| 85 | 85 |
doBlkDiscard bool |
| 86 | 86 |
thinpBlockSize uint32 |
| 87 |
+ thinPoolDevice string |
|
| 87 | 88 |
} |
| 88 | 89 |
|
| 89 | 90 |
type DiskUsage struct {
|
| ... | ... |
@@ -150,7 +151,10 @@ func (devices *DeviceSet) oldMetadataFile() string {
|
| 150 | 150 |
} |
| 151 | 151 |
|
| 152 | 152 |
func (devices *DeviceSet) getPoolName() string {
|
| 153 |
- return devices.devicePrefix + "-pool" |
|
| 153 |
+ if devices.thinPoolDevice == "" {
|
|
| 154 |
+ return devices.devicePrefix + "-pool" |
|
| 155 |
+ } |
|
| 156 |
+ return devices.thinPoolDevice |
|
| 154 | 157 |
} |
| 155 | 158 |
|
| 156 | 159 |
func (devices *DeviceSet) getPoolDevName() string {
|
| ... | ... |
@@ -411,7 +415,22 @@ func (devices *DeviceSet) setupBaseImage() error {
|
| 411 | 411 |
} |
| 412 | 412 |
} |
| 413 | 413 |
|
| 414 |
- log.Debugf("Initializing base device-manager snapshot")
|
|
| 414 |
+ if devices.thinPoolDevice != "" && oldInfo == nil {
|
|
| 415 |
+ _, transactionId, dataUsed, _, _, _, err := devices.poolStatus() |
|
| 416 |
+ if err != nil {
|
|
| 417 |
+ return err |
|
| 418 |
+ } |
|
| 419 |
+ if dataUsed != 0 {
|
|
| 420 |
+ return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks",
|
|
| 421 |
+ devices.thinPoolDevice) |
|
| 422 |
+ } |
|
| 423 |
+ if transactionId != 0 {
|
|
| 424 |
+ return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id",
|
|
| 425 |
+ devices.thinPoolDevice) |
|
| 426 |
+ } |
|
| 427 |
+ } |
|
| 428 |
+ |
|
| 429 |
+ log.Debugf("Initializing base device-mapper thin volume")
|
|
| 415 | 430 |
|
| 416 | 431 |
id := devices.NextDeviceId |
| 417 | 432 |
|
| ... | ... |
@@ -430,7 +449,7 @@ func (devices *DeviceSet) setupBaseImage() error {
|
| 430 | 430 |
return err |
| 431 | 431 |
} |
| 432 | 432 |
|
| 433 |
- log.Debugf("Creating filesystem on base device-manager snapshot")
|
|
| 433 |
+ log.Debugf("Creating filesystem on base device-mapper thin volume")
|
|
| 434 | 434 |
|
| 435 | 435 |
if err = devices.activateDeviceIfNeeded(info); err != nil {
|
| 436 | 436 |
return err |
| ... | ... |
@@ -605,7 +624,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
| 605 | 605 |
devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
|
| 606 | 606 |
log.Debugf("Generated prefix: %s", devices.devicePrefix)
|
| 607 | 607 |
|
| 608 |
- // Check for the existence of the device <prefix>-pool |
|
| 608 |
+ // Check for the existence of the thin-pool device |
|
| 609 | 609 |
log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
|
| 610 | 610 |
info, err := devicemapper.GetInfo(devices.getPoolName()) |
| 611 | 611 |
if info == nil {
|
| ... | ... |
@@ -624,7 +643,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
| 624 | 624 |
createdLoopback := false |
| 625 | 625 |
|
| 626 | 626 |
// If the pool doesn't exist, create it |
| 627 |
- if info.Exists == 0 {
|
|
| 627 |
+ if info.Exists == 0 && devices.thinPoolDevice == "" {
|
|
| 628 | 628 |
log.Debugf("Pool doesn't exist. Creating it.")
|
| 629 | 629 |
|
| 630 | 630 |
var ( |
| ... | ... |
@@ -988,8 +1007,10 @@ func (devices *DeviceSet) Shutdown() error {
|
| 988 | 988 |
} |
| 989 | 989 |
|
| 990 | 990 |
devices.Lock() |
| 991 |
- if err := devices.deactivatePool(); err != nil {
|
|
| 992 |
- log.Debugf("Shutdown deactivate pool , error: %s", err)
|
|
| 991 |
+ if devices.thinPoolDevice == "" {
|
|
| 992 |
+ if err := devices.deactivatePool(); err != nil {
|
|
| 993 |
+ log.Debugf("Shutdown deactivate pool , error: %s", err)
|
|
| 994 |
+ } |
|
| 993 | 995 |
} |
| 994 | 996 |
|
| 995 | 997 |
devices.saveDeviceSetMetaData() |
| ... | ... |
@@ -1275,6 +1296,8 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error |
| 1275 | 1275 |
devices.metadataDevice = val |
| 1276 | 1276 |
case "dm.datadev": |
| 1277 | 1277 |
devices.dataDevice = val |
| 1278 |
+ case "dm.thinpooldev": |
|
| 1279 |
+ devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") |
|
| 1278 | 1280 |
case "dm.blkdiscard": |
| 1279 | 1281 |
foundBlkDiscard = true |
| 1280 | 1282 |
devices.doBlkDiscard, err = strconv.ParseBool(val) |
| ... | ... |
@@ -1294,7 +1317,7 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error |
| 1294 | 1294 |
} |
| 1295 | 1295 |
|
| 1296 | 1296 |
// By default, don't do blk discard hack on raw devices, its rarely useful and is expensive |
| 1297 |
- if !foundBlkDiscard && devices.dataDevice != "" {
|
|
| 1297 |
+ if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") {
|
|
| 1298 | 1298 |
devices.doBlkDiscard = false |
| 1299 | 1299 |
} |
| 1300 | 1300 |
|
| ... | ... |
@@ -384,7 +384,8 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize |
| 384 | 384 |
} |
| 385 | 385 |
|
| 386 | 386 |
var cookie uint = 0 |
| 387 |
- if err := task.SetCookie(&cookie, 0); err != nil {
|
|
| 387 |
+ var flags uint16 = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag |
|
| 388 |
+ if err := task.SetCookie(&cookie, flags); err != nil {
|
|
| 388 | 389 |
return fmt.Errorf("Can't set cookie %s", err)
|
| 389 | 390 |
} |
| 390 | 391 |
defer UdevWait(cookie) |
| ... | ... |
@@ -82,6 +82,12 @@ const ( |
| 82 | 82 |
LoNameSize = C.LO_NAME_SIZE |
| 83 | 83 |
) |
| 84 | 84 |
|
| 85 |
+const ( |
|
| 86 |
+ DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG |
|
| 87 |
+ DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG |
|
| 88 |
+ DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG |
|
| 89 |
+) |
|
| 90 |
+ |
|
| 85 | 91 |
var ( |
| 86 | 92 |
DmGetLibraryVersion = dmGetLibraryVersionFct |
| 87 | 93 |
DmGetNextTarget = dmGetNextTargetFct |