diff options
author | Olaf Faaland <[email protected]> | 2019-03-12 10:37:06 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2019-03-12 10:37:06 -0700 |
commit | 3d31aad83e6420d7a2f661ca077afdac13f50b77 (patch) | |
tree | 7eb5fc97bdc07188d2d415464ad9c39176bd3fd4 /module | |
parent | b1b94e9644ee6af27ce71d127618b7d5323561c6 (diff) |
MMP writes rotate over leaves
Instead of choosing a leaf vdev quasi-randomly, by starting at the root
vdev and randomly choosing children, rotate over leaves to issue MMP
writes. This fixes an issue in a pool whose top-level vdevs have
different numbers of leaves.
The issue is that the frequency at which individual leaves are chosen
for MMP writes is based not on the total number of leaves but based on
how many siblings the leaves have.
For example, in a pool like this:
root-vdev
+------+---------------+
vdev1 vdev2
| |
| +------+-----+-----+----+
disk1 disk2 disk3 disk4 disk5 disk6
vdev1 and vdev2 will each be chosen 50% of the time. Every time vdev1
is chosen, disk1 will be chosen. However, every time vdev2 is chosen,
disk2 is chosen 20% of the time. As a result, disk1 will be sent 5x as
many MMP writes as disk2.
This may create wear issues in the case of SSDs. It also reduces the
effectiveness of MMP as it depends on the writes being evenly
distributed for the case where some devices fail or are partitioned.
The new code maintains a list of leaf vdevs in the pool. MMP records
the last leaf used for an MMP write in mmp->mmp_last_leaf. To choose
the next leaf, MMP starts at mmp->mmp_last_leaf and traverses the list,
continuing from the head if the tail is reached. It stops when a
suitable leaf is found or all leaves have been examined.
Added a test to verify MMP write distribution is even.
Reviewed-by: Tom Caputi <[email protected]>
Reviewed-by: Kash Pande <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Reviewed-by: loli10K <[email protected]>
Signed-off-by: Olaf Faaland <[email protected]>
Closes #7953
Diffstat (limited to 'module')
-rw-r--r-- | module/zfs/mmp.c | 110 | ||||
-rw-r--r-- | module/zfs/spa_misc.c | 4 | ||||
-rw-r--r-- | module/zfs/vdev.c | 13 |
3 files changed, 61 insertions, 66 deletions
diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c index 746ee0f77..16975dd98 100644 --- a/module/zfs/mmp.c +++ b/module/zfs/mmp.c @@ -205,80 +205,57 @@ typedef enum mmp_vdev_state_flag { MMP_FAIL_WRITE_PENDING = (1 << 1), } mmp_vdev_state_flag_t; -static vdev_t * -mmp_random_leaf_impl(vdev_t *vd, int *fail_mask) -{ - int child_idx; - - if (vd->vdev_ops->vdev_op_leaf) { - vdev_t *ret; - - if (!vdev_writeable(vd)) { - *fail_mask |= MMP_FAIL_NOT_WRITABLE; - ret = NULL; - } else if (vd->vdev_mmp_pending != 0) { - *fail_mask |= MMP_FAIL_WRITE_PENDING; - ret = NULL; - } else { - ret = vd; - } - - return (ret); - } - - if (vd->vdev_children == 0) - return (NULL); - - child_idx = spa_get_random(vd->vdev_children); - for (int offset = vd->vdev_children; offset > 0; offset--) { - vdev_t *leaf; - vdev_t *child = vd->vdev_child[(child_idx + offset) % - vd->vdev_children]; - - leaf = mmp_random_leaf_impl(child, fail_mask); - if (leaf) - return (leaf); - } - - return (NULL); -} - /* * Find a leaf vdev to write an MMP block to. It must not have an outstanding * mmp write (if so a new write will also likely block). If there is no usable - * leaf in the tree rooted at in_vd, a nonzero error value is returned, and - * *out_vd is unchanged. - * - * The error value returned is a bit field. - * - * MMP_FAIL_WRITE_PENDING - * If set, one or more leaf vdevs are writeable, but have an MMP write which has - * not yet completed. - * - * MMP_FAIL_NOT_WRITABLE - * If set, one or more vdevs are not writeable. The children of those vdevs - * were not examined. + * leaf, a nonzero error value is returned. The error value returned is a bit + * field. * - * Assuming in_vd points to a tree, a random subtree will be chosen to start. - * That subtree, and successive ones, will be walked until a usable leaf has - * been found, or all subtrees have been examined (except that the children of - * un-writeable vdevs are not examined). - * - * If the leaf vdevs in the tree are healthy, the distribution of returned leaf - * vdevs will be even. If there are unhealthy leaves, the following leaves - * (child_index % index_children) will be chosen more often. + * MMP_FAIL_WRITE_PENDING One or more leaf vdevs are writeable, but have an + * outstanding MMP write. + * MMP_FAIL_NOT_WRITABLE One or more leaf vdevs are not writeable. */ static int -mmp_random_leaf(vdev_t *in_vd, vdev_t **out_vd) +mmp_next_leaf(spa_t *spa) { - int error_mask = 0; - vdev_t *vd = mmp_random_leaf_impl(in_vd, &error_mask); + vdev_t *leaf; + vdev_t *starting_leaf; + int fail_mask = 0; + + ASSERT(MUTEX_HELD(&spa->spa_mmp.mmp_io_lock)); + ASSERT(spa_config_held(spa, SCL_STATE, RW_READER)); + ASSERT(list_link_active(&spa->spa_leaf_list.list_head) == B_TRUE); + ASSERT(!list_is_empty(&spa->spa_leaf_list)); + + if (spa->spa_mmp.mmp_leaf_last_gen != spa->spa_leaf_list_gen) { + spa->spa_mmp.mmp_last_leaf = list_head(&spa->spa_leaf_list); + spa->spa_mmp.mmp_leaf_last_gen = spa->spa_leaf_list_gen; + } + + leaf = spa->spa_mmp.mmp_last_leaf; + if (leaf == NULL) + leaf = list_head(&spa->spa_leaf_list); + starting_leaf = leaf; - if (error_mask == 0) - *out_vd = vd; + do { + leaf = list_next(&spa->spa_leaf_list, leaf); + if (leaf == NULL) + leaf = list_head(&spa->spa_leaf_list); - return (error_mask); + if (!vdev_writeable(leaf)) { + fail_mask |= MMP_FAIL_NOT_WRITABLE; + } else if (leaf->vdev_mmp_pending != 0) { + fail_mask |= MMP_FAIL_WRITE_PENDING; + } else { + spa->spa_mmp.mmp_last_leaf = leaf; + return (0); + } + } while (leaf != starting_leaf); + + ASSERT(fail_mask); + + return (fail_mask); } /* @@ -398,10 +375,10 @@ mmp_write_uberblock(spa_t *spa) zfs_dbgmsg("SCL_STATE acquisition took %llu ns\n", (u_longlong_t)lock_acquire_time); - error = mmp_random_leaf(spa->spa_root_vdev, &vd); - mutex_enter(&mmp->mmp_io_lock); + error = mmp_next_leaf(spa); + /* * spa_mmp_history has two types of entries: * Issued MMP write: records time issued, error status, etc. @@ -425,6 +402,7 @@ mmp_write_uberblock(spa_t *spa) return; } + vd = spa->spa_mmp.mmp_last_leaf; mmp->mmp_skip_error = 0; if (mmp->mmp_zio_root == NULL) diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index 0976cc49c..71221b21b 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -730,6 +730,9 @@ spa_add(const char *name, nvlist_t *config, const char *altroot) spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; } + list_create(&spa->spa_leaf_list, sizeof (vdev_t), + offsetof(vdev_t, vdev_leaf_node)); + return (spa); } @@ -772,6 +775,7 @@ spa_remove(spa_t *spa) sizeof (avl_tree_t)); list_destroy(&spa->spa_config_list); + list_destroy(&spa->spa_leaf_list); nvlist_free(spa->spa_label_features); nvlist_free(spa->spa_load_info); diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index 1332c720f..890bb1135 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -398,6 +398,11 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd) */ for (; pvd != NULL; pvd = pvd->vdev_parent) pvd->vdev_guid_sum += cvd->vdev_guid_sum; + + if (cvd->vdev_ops->vdev_op_leaf) { + list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd); + cvd->vdev_spa->spa_leaf_list_gen++; + } } void @@ -427,6 +432,12 @@ vdev_remove_child(vdev_t *pvd, vdev_t *cvd) pvd->vdev_children = 0; } + if (cvd->vdev_ops->vdev_op_leaf) { + spa_t *spa = cvd->vdev_spa; + list_remove(&spa->spa_leaf_list, cvd); + spa->spa_leaf_list_gen++; + } + /* * Walk up all ancestors to update guid sum. */ @@ -531,6 +542,7 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) list_link_init(&vd->vdev_config_dirty_node); list_link_init(&vd->vdev_state_dirty_node); list_link_init(&vd->vdev_initialize_node); + list_link_init(&vd->vdev_leaf_node); mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL); mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); @@ -914,6 +926,7 @@ vdev_free(vdev_t *vd) vdev_remove_child(vd->vdev_parent, vd); ASSERT(vd->vdev_parent == NULL); + ASSERT(!list_link_active(&vd->vdev_leaf_node)); /* * Clean up vdev structure. |