aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/dbuf_stats.c
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2022-09-19 11:07:15 -0700
committerBrian Behlendorf <[email protected]>2022-09-22 12:59:41 -0700
commit223b04d23d6e84dfb6c983c46f69d5986625125c (patch)
tree29696c28679e279b6dc87b5a87c4a32caf7064f6 /module/zfs/dbuf_stats.c
parente506a0ce40bd777a84ba1de8ed40df2154f7afb1 (diff)
Revert "Reduce dbuf_find() lock contention"
This reverts commit 34dbc618f50cfcd392f90af80c140398c38cbcd1. While this change resolved the lock contention observed for certain workloads, it inadventantly reduced the maximum hash inserts/removes per second. This appears to be due to the slightly higher acquisition cost of a rwlock vs a mutex. Reviewed-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module/zfs/dbuf_stats.c')
-rw-r--r--module/zfs/dbuf_stats.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/module/zfs/dbuf_stats.c b/module/zfs/dbuf_stats.c
index 747fc337d..e5dc2df30 100644
--- a/module/zfs/dbuf_stats.c
+++ b/module/zfs/dbuf_stats.c
@@ -137,7 +137,7 @@ dbuf_stats_hash_table_data(char *buf, size_t size, void *data)
if (size)
buf[0] = 0;
- rw_enter(DBUF_HASH_RWLOCK(h, dsh->idx), RW_READER);
+ mutex_enter(DBUF_HASH_MUTEX(h, dsh->idx));
for (db = h->hash_table[dsh->idx]; db != NULL; db = db->db_hash_next) {
/*
* Returning ENOMEM will cause the data and header functions
@@ -158,7 +158,7 @@ dbuf_stats_hash_table_data(char *buf, size_t size, void *data)
mutex_exit(&db->db_mtx);
}
- rw_exit(DBUF_HASH_RWLOCK(h, dsh->idx));
+ mutex_exit(DBUF_HASH_MUTEX(h, dsh->idx));
return (error);
}