aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2020-02-13 14:20:42 -0500
committerGitHub <noreply@github.com>2020-02-13 11:20:42 -0800
commit465e4e795ee3cbdc5de862b26d81b2f1116733df (patch)
tree0848a222d5ae27a1a623856540aca148ff3c031a /module/zfs
parent610eec452d723bc53ce531095aff9577a2e0dc93 (diff)
Remove duplicate dbufs accounting
Since AVL already has embedded element counter, use dn_dbufs_count only for dbufs not counted there (bonus buffers) and just add them. This removes two atomics per dbuf life cycle. According to profiler it reduces time spent by dbuf_destroy() inside bottlenecked dbuf_evict_thread() from 13.36% to 9.20% of the core. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Matt Ahrens <matt@delphix.com> Signed-off-by: Alexander Motin <mav@FreeBSD.org> Sponsored-By: iXsystems, Inc. Closes #9949
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/dbuf.c2
-rw-r--r--module/zfs/dnode.c4
2 files changed, 2 insertions, 4 deletions
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index bafc30e62..1a6098849 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -2688,7 +2688,6 @@ dbuf_destroy(dmu_buf_impl_t *db)
mutex_enter_nested(&dn->dn_dbufs_mtx,
NESTED_SINGLE);
avl_remove(&dn->dn_dbufs, db);
- atomic_dec_32(&dn->dn_dbufs_count);
membar_producer();
DB_DNODE_EXIT(db);
if (needlock)
@@ -2912,7 +2911,6 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
zfs_refcount_count(&dn->dn_holds) > 0);
(void) zfs_refcount_add(&dn->dn_holds, db);
- atomic_inc_32(&dn->dn_dbufs_count);
dprintf_dbuf(db, "db=%p\n", db);
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index 167ab8677..3116a59bb 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -1004,7 +1004,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
*/
refcount = zfs_refcount_count(&odn->dn_holds);
ASSERT(refcount >= 0);
- dbufs = odn->dn_dbufs_count;
+ dbufs = DN_DBUFS_COUNT(odn);
/* We can't have more dbufs than dnode holds. */
ASSERT3U(dbufs, <=, refcount);
@@ -1031,7 +1031,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
list_link_replace(&odn->dn_link, &ndn->dn_link);
/* If the dnode was safe to move, the refcount cannot have changed. */
ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
- ASSERT(dbufs == ndn->dn_dbufs_count);
+ ASSERT(dbufs == DN_DBUFS_COUNT(ndn));
zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
mutex_exit(&os->os_lock);