summaryrefslogtreecommitdiffstats
path: root/module/zfs/vdev_removal.c
diff options
context:
space:
mode:
authorSerapheim Dimitropoulos <[email protected]>2016-12-16 14:11:29 -0800
committerBrian Behlendorf <[email protected]>2018-06-26 10:07:42 -0700
commitd2734cce68cf740e015312314415f9034c67851c (patch)
treeb7a140a3cf2a19bb7c88f2d277f3b5a33c121cea /module/zfs/vdev_removal.c
parent88eaf610d9c7056f0946e5090cba1e6288ff2b70 (diff)
OpenZFS 9166 - zfs storage pool checkpoint
Details about the motivation of this feature and its usage can be found in this blogpost: https://sdimitro.github.io/post/zpool-checkpoint/ A lightning talk of this feature can be found here: https://www.youtube.com/watch?v=fPQA8K40jAM Implementation details can be found in big block comment of spa_checkpoint.c Side-changes that are relevant to this commit but not explained elsewhere: * renames members of "struct metaslab trees to be shorter without losing meaning * space_map_{alloc,truncate}() accept a block size as a parameter. The reason is that in the current state all space maps that we allocate through the DMU use a global tunable (space_map_blksz) which defauls to 4KB. This is ok for metaslab space maps in terms of bandwirdth since they are scattered all over the disk. But for other space maps this default is probably not what we want. Examples are device removal's vdev_obsolete_sm or vdev_chedkpoint_sm from this review. Both of these have a 1:1 relationship with each vdev and could benefit from a bigger block size. Porting notes: * The part of dsl_scan_sync() which handles async destroys has been moved into the new dsl_process_async_destroys() function. * Remove "VERIFY(!(flags & FWRITE))" in "kernel.c" so zhack can write to block device backed pools. * ZTS: * Fix get_txg() in zpool_sync_001_pos due to "checkpoint_txg". * Don't use large dd block sizes on /dev/urandom under Linux in checkpoint_capacity. * Adopt Delphix-OS's setting of 4 (spa_asize_inflation = SPA_DVAS_PER_BP + 1) for the checkpoint_capacity test to speed its attempts to fill the pool * Create the base and nested pools with sync=disabled to speed up the "setup" phase. * Clear labels in test pool between checkpoint tests to avoid duplicate pool issues. * The import_rewind_device_replaced test has been marked as "known to fail" for the reasons listed in its DISCLAIMER. * New module parameters: zfs_spa_discard_memory_limit, zfs_remove_max_bytes_pause (not documented - debugging only) vdev_max_ms_count (formerly metaslabs_per_vdev) vdev_min_ms_count Authored by: Serapheim Dimitropoulos <[email protected]> Reviewed by: Matthew Ahrens <[email protected]> Reviewed by: John Kennedy <[email protected]> Reviewed by: Dan Kimmel <[email protected]> Reviewed by: Brian Behlendorf <[email protected]> Approved by: Richard Lowe <[email protected]> Ported-by: Tim Chase <[email protected]> Signed-off-by: Tim Chase <[email protected]> OpenZFS-issue: https://illumos.org/issues/9166 OpenZFS-commit: https://github.com/openzfs/openzfs/commit/7159fdb8 Closes #7570
Diffstat (limited to 'module/zfs/vdev_removal.c')
-rw-r--r--module/zfs/vdev_removal.c85
1 files changed, 64 insertions, 21 deletions
diff --git a/module/zfs/vdev_removal.c b/module/zfs/vdev_removal.c
index f9084e8cf..f2bdd6389 100644
--- a/module/zfs/vdev_removal.c
+++ b/module/zfs/vdev_removal.c
@@ -117,6 +117,12 @@ int zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
*/
int vdev_removal_max_span = 32 * 1024;
+/*
+ * This is used by the test suite so that it can ensure that certain
+ * actions happen while in the middle of a removal.
+ */
+unsigned long zfs_remove_max_bytes_pause = -1UL;
+
#define VDEV_REMOVAL_ZAP_OBJS "lzap"
static void spa_vdev_remove_thread(void *arg);
@@ -286,11 +292,11 @@ vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
* be copied.
*/
spa->spa_removing_phys.sr_to_copy -=
- range_tree_space(ms->ms_freeingtree);
+ range_tree_space(ms->ms_freeing);
- ASSERT0(range_tree_space(ms->ms_freedtree));
+ ASSERT0(range_tree_space(ms->ms_freed));
for (int t = 0; t < TXG_SIZE; t++)
- ASSERT0(range_tree_space(ms->ms_alloctree[t]));
+ ASSERT0(range_tree_space(ms->ms_allocating[t]));
}
/*
@@ -467,19 +473,18 @@ spa_restart_removal(spa_t *spa)
* and we correctly free already-copied data.
*/
void
-free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size,
- uint64_t txg)
+free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
{
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ uint64_t txg = spa_syncing_txg(spa);
uint64_t max_offset_yet = 0;
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==,
vdev_indirect_mapping_object(vim));
ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id);
- ASSERT3U(spa_syncing_txg(spa), ==, txg);
mutex_enter(&svr->svr_lock);
@@ -494,8 +499,13 @@ free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size,
* held, so that the remove_thread can not load this metaslab and then
* visit this offset between the time that we metaslab_free_concrete()
* and when we check to see if it has been visited.
+ *
+ * Note: The checkpoint flag is set to false as having/taking
+ * a checkpoint and removing a device can't happen at the same
+ * time.
*/
- metaslab_free_concrete(vd, offset, size, txg);
+ ASSERT(!spa_has_checkpoint(spa));
+ metaslab_free_concrete(vd, offset, size, B_FALSE);
uint64_t synced_size = 0;
uint64_t synced_offset = 0;
@@ -627,16 +637,17 @@ free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size,
* of this free.
*/
if (synced_size > 0) {
- vdev_indirect_mark_obsolete(vd, synced_offset, synced_size,
- txg);
+ vdev_indirect_mark_obsolete(vd, synced_offset, synced_size);
+
/*
* Note: this can only be called from syncing context,
* and the vdev_indirect_mapping is only changed from the
* sync thread, so we don't need svr_lock while doing
* metaslab_free_impl_cb.
*/
+ boolean_t checkpoint = B_FALSE;
vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size,
- metaslab_free_impl_cb, &txg);
+ metaslab_free_impl_cb, &checkpoint);
}
}
@@ -684,10 +695,10 @@ static void
free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
- vdev_indirect_mark_obsolete(vd, offset, size,
- vd->vdev_spa->spa_syncing_txg);
+ vdev_indirect_mark_obsolete(vd, offset, size);
+ boolean_t checkpoint = B_FALSE;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
- metaslab_free_impl_cb, &vd->vdev_spa->spa_syncing_txg);
+ metaslab_free_impl_cb, &checkpoint);
}
/*
@@ -1363,7 +1374,7 @@ spa_vdev_remove_thread(void *arg)
* Assert nothing in flight -- ms_*tree is empty.
*/
for (int i = 0; i < TXG_SIZE; i++) {
- ASSERT0(range_tree_space(msp->ms_alloctree[i]));
+ ASSERT0(range_tree_space(msp->ms_allocating[i]));
}
/*
@@ -1393,7 +1404,7 @@ spa_vdev_remove_thread(void *arg)
SM_ALLOC));
space_map_close(sm);
- range_tree_walk(msp->ms_freeingtree,
+ range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
/*
@@ -1412,7 +1423,7 @@ spa_vdev_remove_thread(void *arg)
msp->ms_id);
while (!svr->svr_thread_exit &&
- range_tree_space(svr->svr_allocd_segs) != 0) {
+ !range_tree_is_empty(svr->svr_allocd_segs)) {
mutex_exit(&svr->svr_lock);
@@ -1427,6 +1438,19 @@ spa_vdev_remove_thread(void *arg)
*/
spa_config_exit(spa, SCL_CONFIG, FTAG);
+ /*
+ * This delay will pause the removal around the point
+ * specified by zfs_remove_max_bytes_pause. We do this
+ * solely from the test suite or during debugging.
+ */
+ uint64_t bytes_copied =
+ spa->spa_removing_phys.sr_copied;
+ for (int i = 0; i < TXG_SIZE; i++)
+ bytes_copied += svr->svr_bytes_done[i];
+ while (zfs_remove_max_bytes_pause <= bytes_copied &&
+ !svr->svr_thread_exit)
+ delay(hz);
+
mutex_enter(&vca.vca_lock);
while (vca.vca_outstanding_bytes >
zfs_remove_max_copy_bytes) {
@@ -1567,10 +1591,10 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
* Assert nothing in flight -- ms_*tree is empty.
*/
for (int i = 0; i < TXG_SIZE; i++)
- ASSERT0(range_tree_space(msp->ms_alloctree[i]));
+ ASSERT0(range_tree_space(msp->ms_allocating[i]));
for (int i = 0; i < TXG_DEFER_SIZE; i++)
- ASSERT0(range_tree_space(msp->ms_defertree[i]));
- ASSERT0(range_tree_space(msp->ms_freedtree));
+ ASSERT0(range_tree_space(msp->ms_defer[i]));
+ ASSERT0(range_tree_space(msp->ms_freed));
if (msp->ms_sm != NULL) {
/*
@@ -1586,7 +1610,7 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
mutex_enter(&svr->svr_lock);
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
- range_tree_walk(msp->ms_freeingtree,
+ range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
/*
@@ -1662,7 +1686,8 @@ spa_vdev_remove_cancel(spa_t *spa)
uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id;
int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check,
- spa_vdev_remove_cancel_sync, NULL, 0, ZFS_SPACE_CHECK_NONE);
+ spa_vdev_remove_cancel_sync, NULL, 0,
+ ZFS_SPACE_CHECK_EXTRA_RESERVED);
if (error == 0) {
spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER);
@@ -1999,6 +2024,17 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
if (!locked)
txg = spa_vdev_enter(spa);
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+ if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
+ error = (spa_has_checkpoint(spa)) ?
+ ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
+
+ if (!locked)
+ return (spa_vdev_exit(spa, NULL, txg, error));
+
+ return (error);
+ }
+
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (spa->spa_spares.sav_vdevs != NULL &&
@@ -2111,6 +2147,13 @@ module_param(vdev_removal_max_span, int, 0644);
MODULE_PARM_DESC(vdev_removal_max_span,
"Largest span of free chunks a remap segment can span");
+/* BEGIN CSTYLED */
+module_param(zfs_remove_max_bytes_pause, ulong, 0644);
+MODULE_PARM_DESC(zfs_remove_max_bytes_pause,
+ "Pause device removal after this many bytes are copied "
+ "(debug use only - causes removal to hang)");
+/* END CSTYLED */
+
EXPORT_SYMBOL(free_from_removing_vdev);
EXPORT_SYMBOL(spa_removal_get_stats);
EXPORT_SYMBOL(spa_remove_init);