Add dm.blocksize option that you can use with --storage-opt to set a
specific blocksize for the thin provisioning pool.
Also change the default dm-thin-pool blocksize from 64K to 512K. This
strikes a balance between the desire to have smaller blocksize given
docker's use of snapshots versus the desire to have more performance
that comes with using a larger blocksize. But if very small files will
be used on average the user is encouraged to override this default.
Docker-DCO-1.1-Signed-off-by: Mike Snitzer <snitzer@redhat.com> (github: snitm)
... | ... |
@@ -126,6 +126,14 @@ Here is the list of supported options: |
126 | 126 |
|
127 | 127 |
``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` |
128 | 128 |
|
129 |
+ * `dm.blocksize` |
|
130 |
+ |
|
131 |
+ Specifies a custom blocksize to use for the thin pool. |
|
132 |
+ |
|
133 |
+ Example use: |
|
134 |
+ |
|
135 |
+ ``docker -d --storage-opt dm.blocksize=64K`` |
|
136 |
+ |
|
129 | 137 |
* `dm.blkdiscard` |
130 | 138 |
|
131 | 139 |
Enables or disables the use of blkdiscard when removing |
... | ... |
@@ -28,6 +28,7 @@ var ( |
28 | 28 |
DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 |
29 | 29 |
DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 |
30 | 30 |
DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 |
31 |
+ DefaultThinpBlockSize uint32 = 1024 // 512K = 1024 512b sectors |
|
31 | 32 |
) |
32 | 33 |
|
33 | 34 |
type DevInfo struct { |
... | ... |
@@ -78,6 +79,7 @@ type DeviceSet struct { |
78 | 78 |
dataDevice string |
79 | 79 |
metadataDevice string |
80 | 80 |
doBlkDiscard bool |
81 |
+ thinpBlockSize uint32 |
|
81 | 82 |
} |
82 | 83 |
|
83 | 84 |
type DiskUsage struct { |
... | ... |
@@ -510,7 +512,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { |
510 | 510 |
} |
511 | 511 |
|
512 | 512 |
// Reload with the new block sizes |
513 |
- if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil { |
|
513 |
+ if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { |
|
514 | 514 |
return fmt.Errorf("Unable to reload pool: %s", err) |
515 | 515 |
} |
516 | 516 |
|
... | ... |
@@ -640,7 +642,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { |
640 | 640 |
} |
641 | 641 |
defer metadataFile.Close() |
642 | 642 |
|
643 |
- if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil { |
|
643 |
+ if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { |
|
644 | 644 |
return err |
645 | 645 |
} |
646 | 646 |
} |
... | ... |
@@ -1159,6 +1161,7 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error |
1159 | 1159 |
baseFsSize: DefaultBaseFsSize, |
1160 | 1160 |
filesystem: "ext4", |
1161 | 1161 |
doBlkDiscard: true, |
1162 |
+ thinpBlockSize: DefaultThinpBlockSize, |
|
1162 | 1163 |
} |
1163 | 1164 |
|
1164 | 1165 |
foundBlkDiscard := false |
... | ... |
@@ -1206,6 +1209,13 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error |
1206 | 1206 |
if err != nil { |
1207 | 1207 |
return nil, err |
1208 | 1208 |
} |
1209 |
+ case "dm.blocksize": |
|
1210 |
+ size, err := units.RAMInBytes(val) |
|
1211 |
+ if err != nil { |
|
1212 |
+ return nil, err |
|
1213 |
+ } |
|
1214 |
+ // convert to 512b sectors |
|
1215 |
+ devices.thinpBlockSize = uint32(size) >> 9 |
|
1209 | 1216 |
default: |
1210 | 1217 |
return nil, fmt.Errorf("Unknown option %s\n", key) |
1211 | 1218 |
} |
... | ... |
@@ -328,7 +328,7 @@ func BlockDeviceDiscard(path string) error { |
328 | 328 |
} |
329 | 329 |
|
330 | 330 |
// This is the programmatic example of "dmsetup create" |
331 |
-func createPool(poolName string, dataFile, metadataFile *os.File) error { |
|
331 |
+func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { |
|
332 | 332 |
task, err := createTask(DeviceCreate, poolName) |
333 | 333 |
if task == nil { |
334 | 334 |
return err |
... | ... |
@@ -339,7 +339,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error { |
339 | 339 |
return fmt.Errorf("Can't get data size %s", err) |
340 | 340 |
} |
341 | 341 |
|
342 |
- params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" |
|
342 |
+ params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) |
|
343 | 343 |
if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { |
344 | 344 |
return fmt.Errorf("Can't add target %s", err) |
345 | 345 |
} |
... | ... |
@@ -358,7 +358,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error { |
358 | 358 |
return nil |
359 | 359 |
} |
360 | 360 |
|
361 |
-func reloadPool(poolName string, dataFile, metadataFile *os.File) error { |
|
361 |
+func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { |
|
362 | 362 |
task, err := createTask(DeviceReload, poolName) |
363 | 363 |
if task == nil { |
364 | 364 |
return err |
... | ... |
@@ -369,7 +369,7 @@ func reloadPool(poolName string, dataFile, metadataFile *os.File) error { |
369 | 369 |
return fmt.Errorf("Can't get data size %s", err) |
370 | 370 |
} |
371 | 371 |
|
372 |
- params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" |
|
372 |
+ params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) |
|
373 | 373 |
if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { |
374 | 374 |
return fmt.Errorf("Can't add target %s", err) |
375 | 375 |
} |