aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorMatthew Ahrens <[email protected]>2019-07-30 09:18:30 -0700
committerBrian Behlendorf <[email protected]>2019-07-30 09:18:30 -0700
commit0eb8ba6ab68801d4b0e6280e7388514a6355fc86 (patch)
tree0c93c95bc24643bb354a3f4ebfd511b3ac3d82fa /module
parent1e620c98727a5a5cff1af70fef9bc25626b4e9d8 (diff)
Improve performance by using dmu_tx_hold_*_by_dnode()
In zfs_write() and dmu_tx_hold_sa(), we can use dmu_tx_hold_*_by_dnode() instead of dmu_tx_hold_*(), since we already have a dbuf from the target dnode in hand. This eliminates some calls to dnode_hold(), which can be expensive. This is especially impactful if several threads are accessing objects that are in the same block of dnodes, because they will contend for that dbuf's lock. We are seeing 10-20% performance wins for the sequential_writes tests in the performance test suite, when doing >=128K writes to files with recordsize=8K. This also removes some unnecessary casts that are in the area. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Tony Nguyen <[email protected]> Signed-off-by: Matthew Ahrens <[email protected]> Closes #9081
Diffstat (limited to 'module')
-rw-r--r--module/zfs/dmu_tx.c6
-rw-r--r--module/zfs/sa.c10
-rw-r--r--module/zfs/zfs_vnops.c8
3 files changed, 15 insertions, 9 deletions
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index 4f489de5f..fcbe30287 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -1321,7 +1321,10 @@ dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
object = sa_handle_object(hdl);
- dmu_tx_hold_bonus(tx, object);
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
+ DB_DNODE_ENTER(db);
+ dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
+ DB_DNODE_EXIT(db);
if (tx->tx_objset->os_sa->sa_master_obj == 0)
return;
@@ -1343,7 +1346,6 @@ dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
ASSERT(tx->tx_txg == 0);
dmu_tx_hold_spill(tx, object);
} else {
- dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
dnode_t *dn;
DB_DNODE_ENTER(db);
diff --git a/module/zfs/sa.c b/module/zfs/sa.c
index 56a606962..4999fef34 100644
--- a/module/zfs/sa.c
+++ b/module/zfs/sa.c
@@ -1380,7 +1380,7 @@ sa_handle_destroy(sa_handle_t *hdl)
dmu_buf_rele(hdl->sa_bonus, NULL);
if (hdl->sa_spill)
- dmu_buf_rele((dmu_buf_t *)hdl->sa_spill, NULL);
+ dmu_buf_rele(hdl->sa_spill, NULL);
mutex_exit(&hdl->sa_lock);
kmem_cache_free(sa_cache, hdl);
@@ -2028,7 +2028,7 @@ sa_bulk_update_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
hdl->sa_spill_tab = NULL;
}
- dmu_buf_rele((dmu_buf_t *)hdl->sa_spill, NULL);
+ dmu_buf_rele(hdl->sa_spill, NULL);
hdl->sa_spill = NULL;
}
@@ -2131,13 +2131,13 @@ sa_remove(sa_handle_t *hdl, sa_attr_type_t attr, dmu_tx_t *tx)
void
sa_object_info(sa_handle_t *hdl, dmu_object_info_t *doi)
{
- dmu_object_info_from_db((dmu_buf_t *)hdl->sa_bonus, doi);
+ dmu_object_info_from_db(hdl->sa_bonus, doi);
}
void
sa_object_size(sa_handle_t *hdl, uint32_t *blksize, u_longlong_t *nblocks)
{
- dmu_object_size_from_db((dmu_buf_t *)hdl->sa_bonus,
+ dmu_object_size_from_db(hdl->sa_bonus,
blksize, nblocks);
}
@@ -2150,7 +2150,7 @@ sa_set_userp(sa_handle_t *hdl, void *ptr)
dmu_buf_t *
sa_get_db(sa_handle_t *hdl)
{
- return ((dmu_buf_t *)hdl->sa_bonus);
+ return (hdl->sa_bonus);
}
void *
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 2a49293c2..7f33aea43 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -775,7 +775,11 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
*/
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
- dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
+ DB_DNODE_ENTER(db);
+ dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
+ MIN(n, max_blksz));
+ DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
@@ -1048,7 +1052,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
return (SET_ERROR(ENOENT));
}
- zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
+ zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
zgd->zgd_private = zp;