aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/zio.c
diff options
context:
space:
mode:
authorAlexander Motin <[email protected]>2021-07-21 08:40:36 -0400
committerGitHub <[email protected]>2021-07-21 06:40:36 -0600
commit1b50749ce9757030ddd4dba9f03ee18182cda82e (patch)
treee00d31d71014a3e9164369807ee5bccbbe046bd6 /module/zfs/zio.c
parentbc93935ef0294e559bd0b46809a33c717070a8ce (diff)
Optimize allocation throttling
Remove mc_lock use from metaslab_class_throttle_*(). The math there is based on refcounts and so atomic, so the only race possible there is between zfs_refcount_count() and zfs_refcount_add(). But in most cases metaslab_class_throttle_reserve() is called with the allocator lock held, which covers the race. In cases where the lock is not held, GANG_ALLOCATION() or METASLAB_MUST_RESERVE are set, and so we do not use zfs_refcount_count(). And even if we assume some other non-existing scenario, the worst that may happen from this race is few more I/Os get to allocation earlier, that is not a problem. Move locks and data of different allocators into different cache lines to avoid false sharing. Group spa_alloc_* arrays together into single array of aligned struct spa_alloc spa_allocs. Align struct metaslab_class_allocator. Reviewed-by: Paul Dagnelie <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Reviewed-by: Don Brady <[email protected]> Signed-off-by: Alexander Motin <[email protected]> Sponsored-By: iXsystems, Inc. Closes #12314
Diffstat (limited to 'module/zfs/zio.c')
-rw-r--r--module/zfs/zio.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index 6030b3813..76ed4fad4 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -877,8 +877,7 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio->io_bookmark = *zb;
if (pio != NULL) {
- if (zio->io_metaslab_class == NULL)
- zio->io_metaslab_class = pio->io_metaslab_class;
+ zio->io_metaslab_class = pio->io_metaslab_class;
if (zio->io_logical == NULL)
zio->io_logical = pio->io_logical;
if (zio->io_child_type == ZIO_CHILD_GANG)
@@ -3380,9 +3379,9 @@ zio_io_to_allocate(spa_t *spa, int allocator)
{
zio_t *zio;
- ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator]));
+ ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
- zio = avl_first(&spa->spa_alloc_trees[allocator]);
+ zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
if (zio == NULL)
return (NULL);
@@ -3394,11 +3393,11 @@ zio_io_to_allocate(spa_t *spa, int allocator)
*/
ASSERT3U(zio->io_allocator, ==, allocator);
if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
- zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) {
+ zio->io_prop.zp_copies, allocator, zio, 0)) {
return (NULL);
}
- avl_remove(&spa->spa_alloc_trees[allocator], zio);
+ avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
return (zio);
@@ -3422,8 +3421,8 @@ zio_dva_throttle(zio_t *zio)
return (zio);
}
+ ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
-
ASSERT3U(zio->io_queued_timestamp, >, 0);
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
@@ -3435,14 +3434,14 @@ zio_dva_throttle(zio_t *zio)
* into 2^20 block regions, and then hash based on the objset, object,
* level, and region to accomplish both of these goals.
*/
- zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object,
+ int allocator = (uint_t)cityhash4(bm->zb_objset, bm->zb_object,
bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
- mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]);
- ASSERT(zio->io_type == ZIO_TYPE_WRITE);
+ zio->io_allocator = allocator;
zio->io_metaslab_class = mc;
- avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio);
- nio = zio_io_to_allocate(spa, zio->io_allocator);
- mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]);
+ mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
+ avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
+ nio = zio_io_to_allocate(spa, allocator);
+ mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
return (nio);
}
@@ -3451,9 +3450,9 @@ zio_allocate_dispatch(spa_t *spa, int allocator)
{
zio_t *zio;
- mutex_enter(&spa->spa_alloc_locks[allocator]);
+ mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
zio = zio_io_to_allocate(spa, allocator);
- mutex_exit(&spa->spa_alloc_locks[allocator]);
+ mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
if (zio == NULL)
return;
@@ -3643,8 +3642,8 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
* some parallelism.
*/
int flags = METASLAB_FASTWRITE | METASLAB_ZIL;
- int allocator = cityhash4(0, 0, 0, os->os_dsl_dataset->ds_object) %
- spa->spa_alloc_count;
+ int allocator = (uint_t)cityhash4(0, 0, 0,
+ os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
txg, NULL, flags, &io_alloc_list, NULL, allocator);
*slog = (error == 0);