aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/dbuf.c
diff options
context:
space:
mode:
authorMatthew Ahrens <[email protected]>2013-08-20 20:11:52 -0800
committerBrian Behlendorf <[email protected]>2013-11-05 12:23:35 -0800
commitb663a23d36d805dd5e9d1b4663dbf5966944002d (patch)
tree1b50cc016c2e7658fa66360125e2e02e7ca57a93 /module/zfs/dbuf.c
parent46ba1e59d3ae7e374c7a98f15f4bef21ee3fcded (diff)
Illumos #4047
4047 panic from dbuf_free_range() from dmu_free_object() while doing zfs receive Reviewed by: Adam Leventhal <[email protected]> Reviewed by: George Wilson <[email protected]> Approved by: Dan McDonald <[email protected]> References: https://www.illumos.org/issues/4047 illumos/illumos-gate@713d6c208802cfbb806329ec0d154b641b80c355 Ported-by: Richard Yao <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #1775 Porting notes: 1. The exported symbol dmu_free_object() was renamed to dmu_free_long_object() in Illumos.
Diffstat (limited to 'module/zfs/dbuf.c')
-rw-r--r--module/zfs/dbuf.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index c1d0b294c..9ad3d1d30 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -64,6 +64,12 @@ static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
void *tag, dmu_buf_impl_t **dbp, int depth);
static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
+/*
+ * Number of times that zfs_free_range() took the slow path while doing
+ * a zfs receive. A nonzero value indicates a potential performance problem.
+ */
+uint64_t zfs_free_range_recv_miss;
+
static void dbuf_destroy(dmu_buf_impl_t *db);
static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
@@ -869,20 +875,22 @@ dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
}
dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
- if (dmu_objset_is_receiving(dn->dn_objset)) {
+ mutex_enter(&dn->dn_dbufs_mtx);
+ if (start >= dn->dn_unlisted_l0_blkid * dn->dn_datablksz) {
+ /* There can't be any dbufs in this range; no need to search. */
+ mutex_exit(&dn->dn_dbufs_mtx);
+ return;
+ } else if (dmu_objset_is_receiving(dn->dn_objset)) {
/*
- * When processing a free record from a zfs receive,
- * there should have been no previous modifications to the
- * data in this range. Therefore there should be no dbufs
- * in the range. Searching dn_dbufs for these non-existent
- * dbufs can be very expensive, so simply ignore this.
+ * If we are receiving, we expect there to be no dbufs in
+ * the range to be freed, because receive modifies each
+ * block at most once, and in offset order. If this is
+ * not the case, it can lead to performance problems,
+ * so note that we unexpectedly took the slow path.
*/
- VERIFY3P(dbuf_find(dn, 0, start), ==, NULL);
- VERIFY3P(dbuf_find(dn, 0, end), ==, NULL);
- return;
+ atomic_inc_64(&zfs_free_range_recv_miss);
}
- mutex_enter(&dn->dn_dbufs_mtx);
for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
db_next = list_next(&dn->dn_dbufs, db);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
@@ -1781,6 +1789,9 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
return (odb);
}
list_insert_head(&dn->dn_dbufs, db);
+ if (db->db_level == 0 && db->db_blkid >=
+ dn->dn_unlisted_l0_blkid)
+ dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
db->db_state = DB_UNCACHED;
mutex_exit(&dn->dn_dbufs_mtx);
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);