summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2019-04-05 17:32:56 -0700
committerGitHub <[email protected]>2019-04-05 17:32:56 -0700
commitd93d4b1acdf53a25ad21e20ddfca3b0d58a06cdf (patch)
tree4981ce2ec4724699f55cf3fd1dad2c4222528b13 /module
parent944a37248a0db8a3f8c5aee3864d9857a3486672 (diff)
Revert "Fix issues with truncated files in raw sends"
This partially reverts commit 5dbf8b4ed. This change resolved the issues observed with truncated files in raw sends. However, the required changes to dnode_allocate() introduced a regression for non-raw streams which needs to be understood. The additional debugging improvements from the original patch were not reverted. Reviewed-by: Tom Caputi <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #7378 Issue #8528 Issue #8540 Issue #8565 Close #8584
Diffstat (limited to 'module')
-rw-r--r--module/zfs/dmu.c1
-rw-r--r--module/zfs/dmu_recv.c15
-rw-r--r--module/zfs/dnode.c11
3 files changed, 14 insertions, 13 deletions
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 18328042c..b047a93ef 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -1737,7 +1737,6 @@ dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
/* compressed bufs must always be assignable to their dbuf */
ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
- ASSERT(!arc_is_encrypted(buf));
dbuf_rele(db, FTAG);
dmu_write(os, object, offset, blksz, buf->b_data, tx);
diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c
index e534540cb..0fa3dfad3 100644
--- a/module/zfs/dmu_recv.c
+++ b/module/zfs/dmu_recv.c
@@ -1235,13 +1235,11 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
* processed. However, for raw receives we manually set the
* maxblkid from the drr_maxblkid and so we must first free
* everything above that blkid to ensure the DMU is always
- * consistent with itself. We will never free the first block
- * of the object here because a maxblkid of 0 could indicate
- * an object with a single block or one with no blocks.
+ * consistent with itself.
*/
- if (rwa->raw && object != DMU_NEW_OBJECT) {
+ if (rwa->raw) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
- (drro->drr_maxblkid + 1) * doi.doi_data_block_size,
+ (drro->drr_maxblkid + 1) * drro->drr_blksz,
DMU_OBJECT_END);
if (err != 0)
return (SET_ERROR(EINVAL));
@@ -1377,8 +1375,11 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
drro->drr_nlevels, tx));
/*
- * Set the maxblkid. This will always succeed because
- * we freed all blocks beyond the new maxblkid above.
+ * Set the maxblkid. We will never free the first block of
+ * an object here because a maxblkid of 0 could indicate
+ * an object with a single block or one with no blocks.
+ * This will always succeed because we freed all blocks
+ * beyond the new maxblkid above.
*/
VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
drro->drr_maxblkid, tx));
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index 952ec95ae..2903bc78d 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -689,9 +689,12 @@ dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_setdirty(dn, tx);
if (dn->dn_datablksz != blocksize) {
- ASSERT0(dn->dn_maxblkid);
- ASSERT(BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
- dnode_block_freed(dn, 0));
+ /* change blocksize */
+ ASSERT(dn->dn_maxblkid == 0 &&
+ (BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
+ dnode_block_freed(dn, 0)));
+ dnode_setdblksz(dn, blocksize);
+ dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
}
if (dn->dn_bonuslen != bonuslen)
dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
@@ -712,8 +715,6 @@ dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
}
rw_exit(&dn->dn_struct_rwlock);
- VERIFY0(dnode_set_blksz(dn, blocksize, 0, tx));
-
/* change type */
dn->dn_type = ot;