aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Motin <[email protected]>2023-10-20 15:37:16 -0400
committerGitHub <[email protected]>2023-10-20 12:37:16 -0700
commit4fbc52495552b7e8b3337c94d0b7080db65657b8 (patch)
treed0b2ccca968b7bd1b82022b456b5b330fa8b9190
parentde7b1ae30ab83c37978902dfd53c1ac783ddbc6e (diff)
Remove lock from dsl_pool_need_dirty_delay()
Torn reads/writes of dp_dirty_total are unlikely: on 64-bit systems due to register size, while on 32-bit due to memory constraints. And even if we hit some race, the code implementing the delay takes the lock any way. Removal of the poll-wide lock acquisition saves ~1% of CPU time on 8-thread 8KB write workload. Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Alexander Motin <[email protected]> Sponsored by: iXsystems, Inc. Closes #15390
-rw-r--r--module/zfs/dsl_pool.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 9120fef93..17b971248 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -965,18 +965,18 @@ dsl_pool_need_dirty_delay(dsl_pool_t *dp)
uint64_t delay_min_bytes =
zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
- mutex_enter(&dp->dp_lock);
- uint64_t dirty = dp->dp_dirty_total;
- mutex_exit(&dp->dp_lock);
-
- return (dirty > delay_min_bytes);
+ /*
+ * We are not taking the dp_lock here and few other places, since torn
+ * reads are unlikely: on 64-bit systems due to register size and on
+ * 32-bit due to memory constraints. Pool-wide locks in hot path may
+ * be too expensive, while we do not need a precise result here.
+ */
+ return (dp->dp_dirty_total > delay_min_bytes);
}
static boolean_t
dsl_pool_need_dirty_sync(dsl_pool_t *dp, uint64_t txg)
{
- ASSERT(MUTEX_HELD(&dp->dp_lock));
-
uint64_t dirty_min_bytes =
zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
uint64_t dirty = dp->dp_dirty_pertxg[txg & TXG_MASK];