summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/zfs/arc.c3
-rw-r--r--module/zfs/dbuf.c3
-rw-r--r--module/zfs/dmu.c21
-rw-r--r--module/zfs/zio.c14
4 files changed, 24 insertions, 17 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 7aa221b00..a78a029c2 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -6776,6 +6776,9 @@ arc_write_ready(zio_t *zio)
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
+ } else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) {
+ buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
+ buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/* this must be done after the buffer flags are adjusted */
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index 51cb0c982..e885a2756 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -3465,7 +3465,8 @@ dbuf_check_crypt(dbuf_dirty_record_t *dr)
* Writing raw encrypted data requires the db's arc buffer
* to be converted to raw by the caller.
*/
- ASSERT(arc_is_encrypted(db->db_buf));
+ ASSERT(arc_is_encrypted(db->db_buf) ||
+ db->db.db_object == DMU_META_DNODE_OBJECT);
}
}
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index b7c5bba15..e0114e659 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -2107,8 +2107,6 @@ dmu_object_dirty_raw(objset_t *os, uint64_t object, dmu_tx_t *tx)
return (err);
}
-int zfs_mdcomp_disable = 0;
-
/*
* When the "redundant_metadata" property is set to "most", only indirect
* blocks of this level and higher will have an additional ditto block.
@@ -2138,16 +2136,12 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
* 3. all other level 0 blocks
*/
if (ismd) {
- if (zfs_mdcomp_disable) {
- compress = ZIO_COMPRESS_EMPTY;
- } else {
- /*
- * XXX -- we should design a compression algorithm
- * that specializes in arrays of bps.
- */
- compress = zio_compress_select(os->os_spa,
- ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
- }
+ /*
+ * XXX -- we should design a compression algorithm
+ * that specializes in arrays of bps.
+ */
+ compress = zio_compress_select(os->os_spa,
+ ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
/*
* Metadata always gets checksummed. If the data
@@ -2523,9 +2517,6 @@ EXPORT_SYMBOL(dmu_buf_hold);
EXPORT_SYMBOL(dmu_ot);
/* BEGIN CSTYLED */
-module_param(zfs_mdcomp_disable, int, 0644);
-MODULE_PARM_DESC(zfs_mdcomp_disable, "Disable meta data compression");
-
module_param(zfs_nopwrite_enabled, int, 0644);
MODULE_PARM_DESC(zfs_nopwrite_enabled, "Enable NOP writes");
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index 9458f3924..7544cf4e3 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -1532,9 +1532,21 @@ zio_write_compress(zio_t *zio)
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
+ } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
+ zp->zp_type == DMU_OT_DNODE) {
+ /*
+ * The DMU actually relies on the zio layer's compression
+ * to free metadnode blocks that have had all contained
+ * dnodes freed. As a result, even when doing a raw
+ * receive, we must check whether the block can be compressed
+ * to a hole.
+ */
+ psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
+ zio->io_abd, NULL, lsize);
+ if (psize == 0)
+ compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(psize, !=, 0);
-
}
/*