aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
authorAlexander Motin <[email protected]>2023-06-05 14:51:44 -0400
committerGitHub <[email protected]>2023-06-05 11:51:44 -0700
commit5ba4025a8d94699d2638938a0cdf790113ff0531 (patch)
tree12839b3feb9a8b4f5502798317b5a9e91da6d649 /module/zfs
parentdae3c549f59a4650edd07b86707166765c240310 (diff)
Introduce zfs_refcount_(add|remove)_few().
There are two places where we need to add/remove several references with semantics of zfs_refcount_(add|remove). But when debug/tracing is disabled, it is a crime to run multiple atomic_inc() in a loop, especially under congested pool-wide allocator lock. Introduced new functions implement the same semantics as the loop, but without overhead in production builds. Reviewed-by: Rich Ercolani <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Alexander Motin <[email protected]> Sponsored by: iXsystems, Inc. Closes #14934
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/dmu_zfetch.c3
-rw-r--r--module/zfs/metaslab.c6
-rw-r--r--module/zfs/refcount.c18
3 files changed, 21 insertions, 6 deletions
diff --git a/module/zfs/dmu_zfetch.c b/module/zfs/dmu_zfetch.c
index ffc012e6c..b70459380 100644
--- a/module/zfs/dmu_zfetch.c
+++ b/module/zfs/dmu_zfetch.c
@@ -520,8 +520,7 @@ dmu_zfetch_run(zstream_t *zs, boolean_t missed, boolean_t have_lock)
issued = pf_end - pf_start + ipf_end - ipf_start;
if (issued > 1) {
/* More references on top of taken in dmu_zfetch_prepare(). */
- for (int i = 0; i < issued - 1; i++)
- zfs_refcount_add(&zs->zs_refs, NULL);
+ zfs_refcount_add_few(&zs->zs_refs, issued - 1, NULL);
} else if (issued == 0) {
/* Some other thread has done our work, so drop the ref. */
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index 94b131fcd..176247d63 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -5650,8 +5650,7 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
* We reserve the slots individually so that we can unreserve
* them individually when an I/O completes.
*/
- for (int d = 0; d < slots; d++)
- zfs_refcount_add(&mca->mca_alloc_slots, zio);
+ zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio);
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
return (B_TRUE);
}
@@ -5665,8 +5664,7 @@ metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
ASSERT(mc->mc_alloc_throttle_enabled);
- for (int d = 0; d < slots; d++)
- zfs_refcount_remove(&mca->mca_alloc_slots, zio);
+ zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio);
}
static int
diff --git a/module/zfs/refcount.c b/module/zfs/refcount.c
index 62ec03e10..c9a504f67 100644
--- a/module/zfs/refcount.c
+++ b/module/zfs/refcount.c
@@ -151,6 +151,15 @@ zfs_refcount_add(zfs_refcount_t *rc, const void *holder)
return (zfs_refcount_add_many(rc, 1, holder));
}
+void
+zfs_refcount_add_few(zfs_refcount_t *rc, uint64_t number, const void *holder)
+{
+ if (!rc->rc_tracked)
+ (void) zfs_refcount_add_many(rc, number, holder);
+ else for (; number > 0; number--)
+ (void) zfs_refcount_add(rc, holder);
+}
+
int64_t
zfs_refcount_remove_many(zfs_refcount_t *rc, uint64_t number,
const void *holder)
@@ -205,6 +214,15 @@ zfs_refcount_remove(zfs_refcount_t *rc, const void *holder)
}
void
+zfs_refcount_remove_few(zfs_refcount_t *rc, uint64_t number, const void *holder)
+{
+ if (!rc->rc_tracked)
+ (void) zfs_refcount_remove_many(rc, number, holder);
+ else for (; number > 0; number--)
+ (void) zfs_refcount_remove(rc, holder);
+}
+
+void
zfs_refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
{
int64_t count, removed_count;