summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/sys/dmu.h3
-rw-r--r--module/zfs/brt.c14
-rw-r--r--module/zfs/dmu.c6
-rw-r--r--module/zfs/zfs_vnops.c4
4 files changed, 12 insertions, 15 deletions
diff --git a/include/sys/dmu.h b/include/sys/dmu.h
index 615ba8fe7..06b4dc27d 100644
--- a/include/sys/dmu.h
+++ b/include/sys/dmu.h
@@ -1072,8 +1072,7 @@ int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole,
int dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset,
uint64_t length, struct blkptr *bps, size_t *nbpsp);
int dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset,
- uint64_t length, dmu_tx_t *tx, const struct blkptr *bps, size_t nbps,
- boolean_t replay);
+ uint64_t length, dmu_tx_t *tx, const struct blkptr *bps, size_t nbps);
/*
* Initial setup and final teardown.
diff --git a/module/zfs/brt.c b/module/zfs/brt.c
index b0529521e..759bc8d2e 100644
--- a/module/zfs/brt.c
+++ b/module/zfs/brt.c
@@ -235,13 +235,13 @@
* destination dataset is mounted and its ZIL replayed.
* To address this situation we leverage zil_claim() mechanism where ZFS will
* parse all the ZILs on pool import. When we come across TX_CLONE_RANGE
- * entries, we will bump reference counters for their BPs in the BRT and then
- * on mount and ZIL replay we will just attach BPs to the file without
- * bumping reference counters.
- * Note it is still possible that after zil_claim() we never mount the
- * destination, so we never replay its ZIL and we destroy it. This way we would
- * end up with leaked references in BRT. We address that too as ZFS gives us
- * a chance to clean this up on dataset destroy (see zil_free_clone_range()).
+ * entries, we will bump reference counters for their BPs in the BRT. Then
+ * on mount and ZIL replay we bump the reference counters once more, while the
+ * first references are dropped during ZIL destroy by zil_free_clone_range().
+ * It is possible that after zil_claim() we never mount the destination, so
+ * we never replay its ZIL and just destroy it. In this case the only taken
+ * references will be dropped by zil_free_clone_range(), since the cloning is
+ * not going to ever take place.
*/
static kmem_cache_t *brt_entry_cache;
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index ddb29020b..3f626031d 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -2267,7 +2267,7 @@ out:
int
dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
- dmu_tx_t *tx, const blkptr_t *bps, size_t nbps, boolean_t replay)
+ dmu_tx_t *tx, const blkptr_t *bps, size_t nbps)
{
spa_t *spa;
dmu_buf_t **dbp, *dbuf;
@@ -2341,10 +2341,8 @@ dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
* When data in embedded into BP there is no need to create
* BRT entry as there is no data block. Just copy the BP as
* it contains the data.
- * Also, when replaying ZIL we don't want to bump references
- * in the BRT as it was already done during ZIL claim.
*/
- if (!replay && !BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
+ if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
brt_pending_add(spa, bp, tx);
}
}
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 84e6b10ef..3a5fa75df 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -1333,7 +1333,7 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp,
}
error = dmu_brt_clone(outos, outzp->z_id, outoff, size, tx,
- bps, nbps, B_FALSE);
+ bps, nbps);
if (error != 0) {
dmu_tx_commit(tx);
break;
@@ -1467,7 +1467,7 @@ zfs_clone_range_replay(znode_t *zp, uint64_t off, uint64_t len, uint64_t blksz,
if (zp->z_blksz < blksz)
zfs_grow_blocksize(zp, blksz, tx);
- dmu_brt_clone(zfsvfs->z_os, zp->z_id, off, len, tx, bps, nbps, B_TRUE);
+ dmu_brt_clone(zfsvfs->z_os, zp->z_id, off, len, tx, bps, nbps);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);