aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/dbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs/dbuf.c')
-rw-r--r--module/zfs/dbuf.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index ace862637..c3127ee0e 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -2337,7 +2337,7 @@ dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
/*
- * Quick check for dirtyness. For already dirty blocks, this
+ * Quick check for dirtiness. For already dirty blocks, this
* reduces runtime of this function by >90%, and overall performance
* by 50% for some workloads (e.g. file deletion with indirect blocks
* cached).
@@ -2892,7 +2892,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
* Hold the dn_dbufs_mtx while we get the new dbuf
* in the hash table *and* added to the dbufs list.
* This prevents a possible deadlock with someone
- * trying to look up this dbuf before its added to the
+ * trying to look up this dbuf before it's added to the
* dn_dbufs list.
*/
mutex_enter(&dn->dn_dbufs_mtx);
@@ -3337,7 +3337,7 @@ dbuf_hold_impl_arg(struct dbuf_hold_arg *dh)
ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
/*
- * If this buffer is currently syncing out, and we are are
+ * If this buffer is currently syncing out, and we are
* still referencing it from db_data, we need to make a copy
* of it in case we decide we want to dirty it again in this txg.
*/
@@ -3812,7 +3812,7 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
/*
* This buffer was allocated at a time when there was
* no available blkptrs from the dnode, or it was
- * inappropriate to hook it in (i.e., nlevels mis-match).
+ * inappropriate to hook it in (i.e., nlevels mismatch).
*/
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
ASSERT(db->db_parent == NULL);