summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/icp/algs/sha2/sha2.c13
-rw-r--r--module/zfs/Makefile.in1
-rw-r--r--module/zfs/arc.c77
-rw-r--r--module/zfs/dbuf.c12
-rw-r--r--module/zfs/dmu.c103
-rw-r--r--module/zfs/dmu_objset.c25
-rw-r--r--module/zfs/dmu_send.c55
-rw-r--r--module/zfs/dmu_traverse.c2
-rw-r--r--module/zfs/dsl_crypt.c116
-rw-r--r--module/zfs/hkdf.c171
-rw-r--r--module/zfs/metaslab.c3
-rw-r--r--module/zfs/zfs_vnops.c4
-rw-r--r--module/zfs/zil.c29
-rw-r--r--module/zfs/zio.c3
-rw-r--r--module/zfs/zio_crypt.c233
15 files changed, 518 insertions, 329 deletions
diff --git a/module/icp/algs/sha2/sha2.c b/module/icp/algs/sha2/sha2.c
index 6f7971afd..05a2e6ad1 100644
--- a/module/icp/algs/sha2/sha2.c
+++ b/module/icp/algs/sha2/sha2.c
@@ -52,7 +52,8 @@
static void Encode(uint8_t *, uint32_t *, size_t);
static void Encode64(uint8_t *, uint64_t *, size_t);
-#if defined(__amd64)
+/* userspace only supports the generic version */
+#if defined(__amd64) && defined(_KERNEL)
#define SHA512Transform(ctx, in) SHA512TransformBlocks((ctx), (in), 1)
#define SHA256Transform(ctx, in) SHA256TransformBlocks((ctx), (in), 1)
@@ -62,7 +63,7 @@ void SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num);
#else
static void SHA256Transform(SHA2_CTX *, const uint8_t *);
static void SHA512Transform(SHA2_CTX *, const uint8_t *);
-#endif /* __amd64 */
+#endif /* __amd64 && _KERNEL */
static uint8_t PADDING[128] = { 0x80, /* all zeros */ };
@@ -142,7 +143,7 @@ static uint8_t PADDING[128] = { 0x80, /* all zeros */ };
#endif /* _BIG_ENDIAN */
-#if !defined(__amd64)
+#if !defined(__amd64) || !defined(_KERNEL)
/* SHA256 Transform */
static void
@@ -600,7 +601,7 @@ SHA512Transform(SHA2_CTX *ctx, const uint8_t *blk)
ctx->state.s64[7] += h;
}
-#endif /* !__amd64 */
+#endif /* !__amd64 || !_KERNEL */
/*
@@ -838,7 +839,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
i = buf_len;
}
-#if !defined(__amd64)
+#if !defined(__amd64) || !defined(_KERNEL)
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
for (; i + buf_limit - 1 < input_len; i += buf_limit) {
SHA256Transform(ctx, &input[i]);
@@ -866,7 +867,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
i += block_count << 7;
}
}
-#endif /* !__amd64 */
+#endif /* !__amd64 || !_KERNEL */
/*
* general optimization:
diff --git a/module/zfs/Makefile.in b/module/zfs/Makefile.in
index 72f28a89d..606b0a47c 100644
--- a/module/zfs/Makefile.in
+++ b/module/zfs/Makefile.in
@@ -41,6 +41,7 @@ $(MODULE)-objs += dsl_synctask.o
$(MODULE)-objs += edonr_zfs.o
$(MODULE)-objs += fm.o
$(MODULE)-objs += gzip.o
+$(MODULE)-objs += hkdf.o
$(MODULE)-objs += lzjb.o
$(MODULE)-objs += lz4.o
$(MODULE)-objs += metaslab.o
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 992e57ce6..6256fed2b 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -3155,13 +3155,14 @@ arc_buf_destroy_impl(arc_buf_t *buf)
hdr->b_crypt_hdr.b_ebufcnt -= 1;
/*
- * if we have no more encrypted buffers and we've already
+ * If we have no more encrypted buffers and we've already
* gotten a copy of the decrypted data we can free b_rabd to
* save some space.
*/
if (hdr->b_crypt_hdr.b_ebufcnt == 0 && HDR_HAS_RABD(hdr) &&
- hdr->b_l1hdr.b_pabd != NULL)
+ hdr->b_l1hdr.b_pabd != NULL && !HDR_IO_IN_PROGRESS(hdr)) {
arc_hdr_free_abd(hdr, B_TRUE);
+ }
}
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
@@ -3716,9 +3717,8 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
arc_hdr_free_abd(hdr, B_FALSE);
}
- if (HDR_HAS_RABD(hdr)) {
+ if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
- }
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
@@ -5746,16 +5746,15 @@ arc_read_done(zio_t *zio)
callback_cnt++;
int error = arc_buf_alloc_impl(hdr, zio->io_spa,
- zio->io_bookmark.zb_objset, acb->acb_private,
- acb->acb_encrypted, acb->acb_compressed, acb->acb_noauth,
- no_zio_error, &acb->acb_buf);
+ acb->acb_dsobj, acb->acb_private, acb->acb_encrypted,
+ acb->acb_compressed, acb->acb_noauth, no_zio_error,
+ &acb->acb_buf);
/*
- * assert non-speculative zios didn't fail because an
+ * Assert non-speculative zios didn't fail because an
* encryption key wasn't loaded
*/
- ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
- error == 0 || error != ENOENT);
+ ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) || error == 0);
/*
* If we failed to decrypt, report an error now (as the zio
@@ -5778,10 +5777,8 @@ arc_read_done(zio_t *zio)
}
hdr->b_l1hdr.b_acb = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
- if (callback_cnt == 0) {
- ASSERT(HDR_PREFETCH(hdr) || HDR_HAS_RABD(hdr));
+ if (callback_cnt == 0)
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
- }
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
callback_list != NULL);
@@ -5943,6 +5940,9 @@ top:
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
+ acb->acb_encrypted = encrypted_read;
+ acb->acb_noauth = noauth_read;
+ acb->acb_dsobj = zb->zb_objset;
if (pio != NULL)
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
@@ -5981,9 +5981,7 @@ top:
rc = arc_buf_alloc_impl(hdr, spa, zb->zb_objset,
private, encrypted_read, compressed_read,
noauth_read, B_TRUE, &buf);
-
- ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
- rc == 0 || rc != ENOENT);
+ ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) || rc == 0);
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
@@ -6008,7 +6006,7 @@ top:
uint64_t addr = 0;
boolean_t devw = B_FALSE;
uint64_t size;
- void *hdr_abd;
+ abd_t *hdr_abd;
/*
* Gracefully handle a damaged logical block size as a
@@ -6131,6 +6129,7 @@ top:
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
+ acb->acb_dsobj = zb->zb_objset;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
hdr->b_l1hdr.b_acb = acb;
@@ -6698,6 +6697,9 @@ arc_write_ready(zio_t *zio)
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
+ if (zio->io_error != 0 || psize == 0)
+ goto out;
+
/*
* Fill the hdr with data. If the buffer is encrypted we have no choice
* but to copy the data into b_radb. If the hdr is compressed, the data
@@ -6713,6 +6715,7 @@ arc_write_ready(zio_t *zio)
* the data into it; otherwise, we share the data directly if we can.
*/
if (ARC_BUF_ENCRYPTED(buf)) {
+ ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, B_TRUE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
@@ -6745,6 +6748,7 @@ arc_write_ready(zio_t *zio)
arc_share_buf(hdr, buf);
}
+out:
arc_hdr_verify(hdr, bp);
spl_fstrans_unmark(cookie);
}
@@ -7956,9 +7960,15 @@ l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
*/
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
+ ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
- /* If the data was encrypted, decrypt it now */
- if (HDR_ENCRYPTED(hdr)) {
+ /*
+ * If the data was encrypted, decrypt it now. Note that
+ * we must check the bp here and not the hdr, since the
+ * hdr does not have its encryption parameters updated
+ * until arc_read_done().
+ */
+ if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr,
arc_hdr_size(hdr), hdr);
@@ -8084,7 +8094,16 @@ l2arc_read_done(zio_t *zio)
*/
abd_free(cb->l2rcb_abd);
zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
- zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
+
+ if (BP_IS_ENCRYPTED(&cb->l2rcb_bp) &&
+ (cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT)) {
+ ASSERT(HDR_HAS_RABD(hdr));
+ zio->io_abd = zio->io_orig_abd =
+ hdr->b_crypt_hdr.b_rabd;
+ } else {
+ ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
+ zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
+ }
}
ASSERT3P(zio->io_abd, !=, NULL);
@@ -8321,7 +8340,7 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
dsl_crypto_key_t *dck = NULL;
uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
- boolean_t no_crypt;
+ boolean_t no_crypt = B_FALSE;
ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) ||
@@ -8333,6 +8352,15 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
* and copy the data. This may be done to elimiate a depedency on a
* shared buffer or to reallocate the buffer to match asize.
*/
+ if (HDR_HAS_RABD(hdr) && asize != psize) {
+ ASSERT3U(size, ==, psize);
+ to_write = abd_alloc_for_io(asize, ismd);
+ abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, size);
+ if (size != asize)
+ abd_zero_off(to_write, size, asize - size);
+ goto out;
+ }
+
if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
!HDR_ENCRYPTED(hdr)) {
ASSERT3U(size, ==, psize);
@@ -8377,11 +8405,8 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
if (ret != 0)
goto error;
- if (no_crypt) {
- spa_keystore_dsl_key_rele(spa, dck, FTAG);
- abd_free(eabd);
- goto out;
- }
+ if (no_crypt)
+ abd_copy(eabd, to_write, psize);
if (psize != asize)
abd_zero_off(eabd, psize, asize - psize);
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index 1ea4c757e..c954dec1b 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -1175,7 +1175,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
* or (if there a no active holders)
* just null out the current db_data pointer.
*/
- ASSERT(dr->dr_txg >= txg - 2);
+ ASSERT3U(dr->dr_txg, >=, txg - 2);
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn = DB_DNODE(db);
int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
@@ -2153,6 +2153,13 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
if (db->db_state == DB_CACHED &&
refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
+ /*
+ * In practice, we will never have a case where we have an
+ * encrypted arc buffer while additional holds exist on the
+ * dbuf. We don't handle this here so we simply assert that
+ * fact instead.
+ */
+ ASSERT(!arc_is_encrypted(buf));
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
bcopy(buf->b_data, db->db.db_data, db->db.db_size);
@@ -2168,6 +2175,8 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
ASSERT(db->db_buf != NULL);
if (dr != NULL && dr->dr_txg == tx->tx_txg) {
ASSERT(dr->dt.dl.dr_data == db->db_buf);
+ IMPLY(arc_is_encrypted(buf), dr->dt.dl.dr_raw);
+
if (!arc_released(db->db_buf)) {
ASSERT(dr->dt.dl.dr_override_state ==
DR_OVERRIDDEN);
@@ -3458,7 +3467,6 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
dn->dn_type, psize, lsize, compress_type);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
- int lsize = arc_buf_lsize(*datap);
*datap = arc_alloc_compressed_buf(os->os_spa, db,
psize, lsize, compress_type);
} else {
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 1eb35b935..1aba0b133 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -761,7 +761,7 @@ dmu_objset_zfs_unmounting(objset_t *os)
static int
dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
- uint64_t length)
+ uint64_t length, boolean_t raw)
{
uint64_t object_size;
int err;
@@ -844,6 +844,17 @@ dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len,
uint64_t, dmu_tx_get_txg(tx));
dnode_free_range(dn, chunk_begin, chunk_len, tx);
+
+ /* if this is a raw free, mark the dirty record as such */
+ if (raw) {
+ dbuf_dirty_record_t *dr = dn->dn_dbuf->db_last_dirty;
+
+ while (dr != NULL && dr->dr_txg > tx->tx_txg)
+ dr = dr->dr_next;
+ if (dr != NULL && dr->dr_txg == tx->tx_txg)
+ dr->dt.dl.dr_raw = B_TRUE;
+ }
+
dmu_tx_commit(tx);
length -= chunk_len;
@@ -861,7 +872,7 @@ dmu_free_long_range(objset_t *os, uint64_t object,
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
- err = dmu_free_long_range_impl(os, dn, offset, length);
+ err = dmu_free_long_range_impl(os, dn, offset, length, B_FALSE);
/*
* It is important to zero out the maxblkid when freeing the entire
@@ -876,8 +887,37 @@ dmu_free_long_range(objset_t *os, uint64_t object,
return (err);
}
+/*
+ * This function is equivalent to dmu_free_long_range(), but also
+ * marks the new dirty record as a raw write.
+ */
int
-dmu_free_long_object(objset_t *os, uint64_t object)
+dmu_free_long_range_raw(objset_t *os, uint64_t object,
+ uint64_t offset, uint64_t length)
+{
+ dnode_t *dn;
+ int err;
+
+ err = dnode_hold(os, object, FTAG, &dn);
+ if (err != 0)
+ return (err);
+ err = dmu_free_long_range_impl(os, dn, offset, length, B_TRUE);
+
+ /*
+ * It is important to zero out the maxblkid when freeing the entire
+ * file, so that (a) subsequent calls to dmu_free_long_range_impl()
+ * will take the fast path, and (b) dnode_reallocate() can verify
+ * that the entire file has been freed.
+ */
+ if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
+ dn->dn_maxblkid = 0;
+
+ dnode_rele(dn, FTAG);
+ return (err);
+}
+
+static int
+dmu_free_long_object_impl(objset_t *os, uint64_t object, boolean_t raw)
{
dmu_tx_t *tx;
int err;
@@ -893,6 +933,9 @@ dmu_free_long_object(objset_t *os, uint64_t object)
err = dmu_tx_assign(tx, TXG_WAIT);
if (err == 0) {
err = dmu_object_free(os, object, tx);
+ if (err == 0 && raw)
+ VERIFY0(dmu_object_dirty_raw(os, object, tx));
+
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
@@ -902,6 +945,19 @@ dmu_free_long_object(objset_t *os, uint64_t object)
}
int
+dmu_free_long_object(objset_t *os, uint64_t object)
+{
+ return (dmu_free_long_object_impl(os, object, B_FALSE));
+}
+
+int
+dmu_free_long_object_raw(objset_t *os, uint64_t object)
+{
+ return (dmu_free_long_object_impl(os, object, B_TRUE));
+}
+
+
+int
dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
@@ -1487,13 +1543,6 @@ dmu_return_arcbuf(arc_buf_t *buf)
}
void
-dmu_assign_arcbuf_impl(dmu_buf_t *handle, arc_buf_t *buf, dmu_tx_t *tx)
-{
- dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
- dbuf_assign_arcbuf(db, buf, tx);
-}
-
-void
dmu_convert_to_raw(dmu_buf_t *handle, boolean_t byteorder, const uint8_t *salt,
const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
{
@@ -1569,22 +1618,19 @@ dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset,
* dmu_write().
*/
void
-dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
+dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
- dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
- dnode_t *dn;
dmu_buf_impl_t *db;
+ objset_t *os = dn->dn_objset;
+ uint64_t object = dn->dn_object;
uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
uint64_t blkid;
- DB_DNODE_ENTER(dbuf);
- dn = DB_DNODE(dbuf);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
rw_exit(&dn->dn_struct_rwlock);
- DB_DNODE_EXIT(dbuf);
/*
* We can only assign if the offset is aligned, the arc buf is the
@@ -1594,19 +1640,10 @@ dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
dbuf_assign_arcbuf(db, buf, tx);
dbuf_rele(db, FTAG);
} else {
- objset_t *os;
- uint64_t object;
-
/* compressed bufs must always be assignable to their dbuf */
ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
- DB_DNODE_ENTER(dbuf);
- dn = DB_DNODE(dbuf);
- os = dn->dn_objset;
- object = dn->dn_object;
- DB_DNODE_EXIT(dbuf);
-
dbuf_rele(db, FTAG);
dmu_write(os, object, offset, blksz, buf->b_data, tx);
dmu_return_arcbuf(buf);
@@ -1614,6 +1651,17 @@ dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
}
}
+void
+dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
+ dmu_tx_t *tx)
+{
+ dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
+
+ DB_DNODE_ENTER(dbuf);
+ dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
+ DB_DNODE_EXIT(dbuf);
+}
+
typedef struct {
dbuf_dirty_record_t *dsa_dr;
dmu_sync_cb_t *dsa_done;
@@ -2424,7 +2472,9 @@ EXPORT_SYMBOL(dmu_buf_rele_array);
EXPORT_SYMBOL(dmu_prefetch);
EXPORT_SYMBOL(dmu_free_range);
EXPORT_SYMBOL(dmu_free_long_range);
+EXPORT_SYMBOL(dmu_free_long_range_raw);
EXPORT_SYMBOL(dmu_free_long_object);
+EXPORT_SYMBOL(dmu_free_long_object_raw);
EXPORT_SYMBOL(dmu_read);
EXPORT_SYMBOL(dmu_read_by_dnode);
EXPORT_SYMBOL(dmu_write);
@@ -2443,7 +2493,8 @@ EXPORT_SYMBOL(dmu_write_policy);
EXPORT_SYMBOL(dmu_sync);
EXPORT_SYMBOL(dmu_request_arcbuf);
EXPORT_SYMBOL(dmu_return_arcbuf);
-EXPORT_SYMBOL(dmu_assign_arcbuf);
+EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode);
+EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf);
EXPORT_SYMBOL(dmu_buf_hold);
EXPORT_SYMBOL(dmu_ot);
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index 4445121a0..609e43fe8 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -706,7 +706,9 @@ dmu_objset_own(const char *name, dmu_objset_type_t type,
dsl_pool_rele(dp, FTAG);
- if (dmu_objset_userobjspace_upgradable(*osp))
+ /* user accounting requires the dataset to be decrypted */
+ if (dmu_objset_userobjspace_upgradable(*osp) &&
+ (ds->ds_dir->dd_crypto_obj == 0 || decrypt))
dmu_objset_userobjspace_upgrade(*osp);
return (0);
@@ -932,7 +934,7 @@ dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
if (blksz == 0)
blksz = DNODE_BLOCK_SIZE;
- if (blksz == 0)
+ if (ibs == 0)
ibs = DN_MAX_INDBLKSHIFT;
if (ds != NULL)
@@ -1096,7 +1098,7 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
}
/*
- * The doca_userfunc() will write out some data that needs to be
+ * The doca_userfunc() may write out some data that needs to be
* encrypted if the dataset is encrypted (specifically the root
* directory). This data must be written out before the encryption
* key mapping is removed by dsl_dataset_rele_flags(). Force the
@@ -1107,10 +1109,14 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
dsl_dataset_t *tmpds = NULL;
boolean_t need_sync_done = B_FALSE;
+ mutex_enter(&ds->ds_lock);
+ ds->ds_owner = FTAG;
+ mutex_exit(&ds->ds_lock);
+
rzio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
- tmpds = txg_list_remove(&dp->dp_dirty_datasets, tx->tx_txg);
+ tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
+ tx->tx_txg);
if (tmpds != NULL) {
- ASSERT3P(ds, ==, tmpds);
dsl_dataset_sync(ds, rzio, tx);
need_sync_done = B_TRUE;
}
@@ -1120,9 +1126,9 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
taskq_wait(dp->dp_sync_taskq);
rzio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
- tmpds = txg_list_remove(&dp->dp_dirty_datasets, tx->tx_txg);
+ tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
+ tx->tx_txg);
if (tmpds != NULL) {
- ASSERT3P(ds, ==, tmpds);
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, rzio, tx);
}
@@ -1130,6 +1136,10 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
if (need_sync_done)
dsl_dataset_sync_done(ds, tx);
+
+ mutex_enter(&ds->ds_lock);
+ ds->ds_owner = NULL;
+ mutex_exit(&ds->ds_lock);
}
spa_history_log_internal_ds(ds, "create", tx, "");
@@ -1336,6 +1346,7 @@ dmu_objset_upgrade_stop(objset_t *os)
mutex_exit(&os->os_upgrade_lock);
taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id);
+ txg_wait_synced(os->os_spa->spa_dsl_pool, 0);
} else {
mutex_exit(&os->os_upgrade_lock);
}
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
index c63ab43e1..cc6b97d53 100644
--- a/module/zfs/dmu_send.c
+++ b/module/zfs/dmu_send.c
@@ -517,7 +517,7 @@ dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object,
dnode_phys_t *dnp)
{
struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
- int bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
+ int bonuslen;
if (object < dsp->dsa_resume_object) {
/*
@@ -558,6 +558,8 @@ dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object,
drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
+ bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
+
if ((dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW)) {
ASSERT(BP_IS_ENCRYPTED(bp));
@@ -571,7 +573,7 @@ dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object,
/*
* Since we encrypt the entire bonus area, the (raw) part
- * beyond the the bonuslen is actually nonzero, so we need
+ * beyond the bonuslen is actually nonzero, so we need
* to send it.
*/
if (bonuslen != 0) {
@@ -2062,7 +2064,8 @@ dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
- ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)));
+ ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
+ drba->drba_cookie->drc_raw);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = ds;
@@ -2590,7 +2593,11 @@ receive_freeobjects(struct receive_writer_arg *rwa,
else if (err != 0)
return (err);
- err = dmu_free_long_object(rwa->os, obj);
+ if (rwa->raw)
+ err = dmu_free_long_object_raw(rwa->os, obj);
+ else
+ err = dmu_free_long_object(rwa->os, obj);
+
if (err != 0)
return (err);
@@ -2606,9 +2613,9 @@ noinline static int
receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
arc_buf_t *abuf)
{
- dmu_tx_t *tx;
- dmu_buf_t *bonus;
int err;
+ dmu_tx_t *tx;
+ dnode_t *dn;
if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
!DMU_OT_IS_VALID(drrw->drr_type))
@@ -2633,7 +2640,6 @@ receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
return (SET_ERROR(EINVAL));
tx = dmu_tx_create(rwa->os);
-
dmu_tx_hold_write(tx, drrw->drr_object,
drrw->drr_offset, drrw->drr_logical_size);
err = dmu_tx_assign(tx, TXG_WAIT);
@@ -2653,10 +2659,9 @@ receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
DRR_WRITE_PAYLOAD_SIZE(drrw));
}
- /* use the bonus buf to look up the dnode in dmu_assign_arcbuf */
- if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
- return (SET_ERROR(EINVAL));
- dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
+ VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn));
+ dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx);
+ dnode_rele(dn, FTAG);
/*
* Note: If the receive fails, we want the resume stream to start
@@ -2666,7 +2671,6 @@ receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
*/
save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
dmu_tx_commit(tx);
- dmu_buf_rele(bonus, FTAG);
return (0);
}
@@ -2765,6 +2769,8 @@ receive_write_embedded(struct receive_writer_arg *rwa,
return (SET_ERROR(EINVAL));
if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
return (SET_ERROR(EINVAL));
+ if (rwa->raw)
+ return (SET_ERROR(EINVAL));
if (drrwe->drr_object > rwa->max_object)
rwa->max_object = drrwe->drr_object;
@@ -2839,7 +2845,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
if (db_spill->db_size < drrs->drr_length)
VERIFY(0 == dbuf_spill_set_blksz(db_spill,
drrs->drr_length, tx));
- dmu_assign_arcbuf_impl(db_spill, abuf, tx);
+ dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
@@ -2864,8 +2870,13 @@ receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
if (drrf->drr_object > rwa->max_object)
rwa->max_object = drrf->drr_object;
- err = dmu_free_long_range(rwa->os, drrf->drr_object,
- drrf->drr_offset, drrf->drr_length);
+ if (rwa->raw) {
+ err = dmu_free_long_range_raw(rwa->os, drrf->drr_object,
+ drrf->drr_offset, drrf->drr_length);
+ } else {
+ err = dmu_free_long_range(rwa->os, drrf->drr_object,
+ drrf->drr_offset, drrf->drr_length);
+ }
return (err);
}
@@ -2948,6 +2959,7 @@ receive_object_range(struct receive_writer_arg *rwa,
static void
dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
{
+ dsl_dataset_t *ds = drc->drc_ds;
ds_hold_flags_t dsflags = (drc->drc_raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
/*
@@ -2957,14 +2969,17 @@ dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
* that the user accounting code will not attempt to do anything
* after we stopped receiving the dataset.
*/
- txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0);
+ txg_wait_synced(ds->ds_dir->dd_pool, 0);
- if (drc->drc_resumable) {
- dsl_dataset_disown(drc->drc_ds, dsflags, dmu_recv_tag);
+ rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
+ if (drc->drc_resumable && !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
+ rrw_exit(&ds->ds_bp_rwlock, FTAG);
+ dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
} else {
char name[ZFS_MAX_DATASET_NAME_LEN];
- dsl_dataset_name(drc->drc_ds, name);
- dsl_dataset_disown(drc->drc_ds, dsflags, dmu_recv_tag);
+ rrw_exit(&ds->ds_bp_rwlock, FTAG);
+ dsl_dataset_name(ds, name);
+ dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
(void) dsl_destroy_head(name);
}
}
diff --git a/module/zfs/dmu_traverse.c b/module/zfs/dmu_traverse.c
index a6c27b4be..268d82ce4 100644
--- a/module/zfs/dmu_traverse.c
+++ b/module/zfs/dmu_traverse.c
@@ -181,7 +181,7 @@ traverse_prefetch_metadata(traverse_data_t *td,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
- int zio_flags = ZIO_FLAG_CANFAIL;
+ int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
return;
diff --git a/module/zfs/dsl_crypt.c b/module/zfs/dsl_crypt.c
index af46dd753..a71142682 100644
--- a/module/zfs/dsl_crypt.c
+++ b/module/zfs/dsl_crypt.c
@@ -90,9 +90,9 @@ dsl_wrapping_key_free(dsl_wrapping_key_t *wkey)
if (wkey->wk_key.ck_data) {
bzero(wkey->wk_key.ck_data,
- BITS_TO_BYTES(wkey->wk_key.ck_length));
+ CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
kmem_free(wkey->wk_key.ck_data,
- BITS_TO_BYTES(wkey->wk_key.ck_length));
+ CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
}
refcount_destroy(&wkey->wk_refcnt);
@@ -119,7 +119,7 @@ dsl_wrapping_key_create(uint8_t *wkeydata, zfs_keyformat_t keyformat,
}
wkey->wk_key.ck_format = CRYPTO_KEY_RAW;
- wkey->wk_key.ck_length = BYTES_TO_BITS(WRAPPING_KEY_LEN);
+ wkey->wk_key.ck_length = CRYPTO_BYTES2BITS(WRAPPING_KEY_LEN);
bcopy(wkeydata, wkey->wk_key.ck_data, WRAPPING_KEY_LEN);
/* initialize the rest of the struct */
@@ -433,7 +433,6 @@ dsl_crypto_can_set_keylocation(const char *dsname, const char *keylocation)
int ret = 0;
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = NULL;
- dsl_wrapping_key_t *wkey = NULL;
uint64_t rddobj;
/* hold the dsl dir */
@@ -472,16 +471,12 @@ dsl_crypto_can_set_keylocation(const char *dsname, const char *keylocation)
goto out;
}
- if (wkey != NULL)
- dsl_wrapping_key_rele(wkey, FTAG);
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
return (0);
out:
- if (wkey != NULL)
- dsl_wrapping_key_rele(wkey, FTAG);
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
if (dp != NULL)
@@ -728,6 +723,7 @@ spa_keystore_load_wkey(const char *dsname, dsl_crypto_params_t *dcp,
dsl_crypto_key_t *dck = NULL;
dsl_wrapping_key_t *wkey = dcp->cp_wkey;
dsl_pool_t *dp = NULL;
+ uint64_t keyformat, salt, iters;
/*
* We don't validate the wrapping key's keyformat, salt, or iters
@@ -762,8 +758,36 @@ spa_keystore_load_wkey(const char *dsname, dsl_crypto_params_t *dcp,
if (ret != 0)
goto error;
+ /* initialize the wkey encryption parameters from the DSL Crypto Key */
+ ret = zap_lookup(dp->dp_meta_objset, dd->dd_crypto_obj,
+ zfs_prop_to_name(ZFS_PROP_KEYFORMAT), 8, 1, &keyformat);
+ if (ret != 0)
+ goto error;
+
+ ret = zap_lookup(dp->dp_meta_objset, dd->dd_crypto_obj,
+ zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 8, 1, &salt);
+ if (ret != 0)
+ goto error;
+
+ ret = zap_lookup(dp->dp_meta_objset, dd->dd_crypto_obj,
+ zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 8, 1, &iters);
+ if (ret != 0)
+ goto error;
+
+ ASSERT3U(keyformat, <, ZFS_KEYFORMAT_FORMATS);
+ ASSERT3U(keyformat, !=, ZFS_KEYFORMAT_NONE);
+ IMPLY(keyformat == ZFS_KEYFORMAT_PASSPHRASE, iters != 0);
+ IMPLY(keyformat == ZFS_KEYFORMAT_PASSPHRASE, salt != 0);
+ IMPLY(keyformat != ZFS_KEYFORMAT_PASSPHRASE, iters == 0);
+ IMPLY(keyformat != ZFS_KEYFORMAT_PASSPHRASE, salt == 0);
+
+ wkey->wk_keyformat = keyformat;
+ wkey->wk_salt = salt;
+ wkey->wk_iters = iters;
+
/*
- * At this point we have verified the key. We can simply cleanup and
+ * At this point we have verified the wkey and confirmed that it can
+ * be used to decrypt a DSL Crypto Key. We can simply cleanup and
* return if this is all the user wanted to do.
*/
if (noop)
@@ -1326,10 +1350,12 @@ spa_keystore_change_key_sync_impl(uint64_t rddobj, uint64_t ddobj,
return;
}
- /* stop recursing if this dsl dir didn't inherit from the root */
+ /*
+ * Stop recursing if this dsl dir didn't inherit from the root
+ * or if this dd is a clone.
+ */
VERIFY0(dsl_dir_get_encryption_root_ddobj(dd, &curr_rddobj));
-
- if (curr_rddobj != rddobj) {
+ if (curr_rddobj != rddobj || dsl_dir_is_clone(dd)) {
dsl_dir_rele(dd, FTAG);
return;
}
@@ -1355,7 +1381,7 @@ spa_keystore_change_key_sync_impl(uint64_t rddobj, uint64_t ddobj,
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
- /* Recurse into all child and clone dsl dirs. */
+ /* Recurse into all child dsl dirs. */
for (zap_cursor_init(zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(zc, za) == 0;
@@ -1365,20 +1391,6 @@ spa_keystore_change_key_sync_impl(uint64_t rddobj, uint64_t ddobj,
}
zap_cursor_fini(zc);
- for (zap_cursor_init(zc, dp->dp_meta_objset,
- dsl_dir_phys(dd)->dd_clones);
- zap_cursor_retrieve(zc, za) == 0;
- zap_cursor_advance(zc)) {
- dsl_dataset_t *clone;
-
- VERIFY0(dsl_dataset_hold_obj(dp,
- za->za_first_integer, FTAG, &clone));
- spa_keystore_change_key_sync_impl(rddobj,
- clone->ds_dir->dd_object, new_rddobj, wkey, tx);
- dsl_dataset_rele(clone, FTAG);
- }
- zap_cursor_fini(zc);
-
kmem_free(za, sizeof (zap_attribute_t));
kmem_free(zc, sizeof (zap_cursor_t));
@@ -1831,6 +1843,8 @@ dsl_dataset_create_crypt_sync(uint64_t dsobj, dsl_dir_t *dd,
wkey->wk_ddobj = dd->dd_object;
}
+ ASSERT3P(wkey, !=, NULL);
+
/* Create or clone the DSL crypto key and activate the feature */
dd->dd_crypto_obj = dsl_crypto_key_create_sync(crypt, wkey, tx);
VERIFY0(zap_add(dp->dp_meta_objset, dd->dd_object,
@@ -2191,6 +2205,7 @@ dsl_crypto_populate_key_nvlist(dsl_dataset_t *ds, nvlist_t **nvl_out)
uint64_t rddobj;
nvlist_t *nvl = NULL;
uint64_t dckobj = ds->ds_dir->dd_crypto_obj;
+ dsl_dir_t *rdd = NULL;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
uint64_t crypt = 0, guid = 0, format = 0, iters = 0, salt = 0;
@@ -2209,10 +2224,6 @@ dsl_crypto_populate_key_nvlist(dsl_dataset_t *ds, nvlist_t **nvl_out)
goto error;
/* lookup values from the DSL Crypto Key */
- ret = dsl_dir_get_encryption_root_ddobj(ds->ds_dir, &rddobj);
- if (ret != 0)
- goto error;
-
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_CRYPTO_SUITE, 8, 1,
&crypt);
if (ret != 0)
@@ -2242,24 +2253,43 @@ dsl_crypto_populate_key_nvlist(dsl_dataset_t *ds, nvlist_t **nvl_out)
if (ret != 0)
goto error;
- /* lookup wrapping key properties */
- ret = zap_lookup(dp->dp_meta_objset, dckobj,
- zfs_prop_to_name(ZFS_PROP_KEYFORMAT), 8, 1, &format);
+ /*
+ * Lookup wrapping key properties. An early version of the code did
+ * not correctly add these values to the wrapping key or the DSL
+ * Crypto Key on disk for non encryption roots, so to be safe we
+ * always take the slightly circuitous route of looking it up from
+ * the encryption root's key.
+ */
+ ret = dsl_dir_get_encryption_root_ddobj(ds->ds_dir, &rddobj);
if (ret != 0)
goto error;
+ dsl_pool_config_enter(dp, FTAG);
+
+ ret = dsl_dir_hold_obj(dp, rddobj, NULL, FTAG, &rdd);
+ if (ret != 0)
+ goto error_unlock;
+
+ ret = zap_lookup(dp->dp_meta_objset, rdd->dd_crypto_obj,
+ zfs_prop_to_name(ZFS_PROP_KEYFORMAT), 8, 1, &format);
+ if (ret != 0)
+ goto error_unlock;
+
if (format == ZFS_KEYFORMAT_PASSPHRASE) {
- ret = zap_lookup(dp->dp_meta_objset, dckobj,
+ ret = zap_lookup(dp->dp_meta_objset, rdd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 8, 1, &iters);
if (ret != 0)
- goto error;
+ goto error_unlock;
- ret = zap_lookup(dp->dp_meta_objset, dckobj,
+ ret = zap_lookup(dp->dp_meta_objset, rdd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 8, 1, &salt);
if (ret != 0)
- goto error;
+ goto error_unlock;
}
+ dsl_dir_rele(rdd, FTAG);
+ dsl_pool_config_exit(dp, FTAG);
+
fnvlist_add_uint64(nvl, DSL_CRYPTO_KEY_CRYPTO_SUITE, crypt);
fnvlist_add_uint64(nvl, DSL_CRYPTO_KEY_GUID, guid);
VERIFY0(nvlist_add_uint8_array(nvl, DSL_CRYPTO_KEY_MASTER_KEY,
@@ -2285,7 +2315,11 @@ dsl_crypto_populate_key_nvlist(dsl_dataset_t *ds, nvlist_t **nvl_out)
*nvl_out = nvl;
return (0);
+error_unlock:
+ dsl_pool_config_exit(dp, FTAG);
error:
+ if (rdd != NULL)
+ dsl_dir_rele(rdd, FTAG);
nvlist_free(nvl);
*nvl_out = NULL;
@@ -2488,7 +2522,8 @@ spa_do_crypt_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj, abd_t *abd,
goto error;
/* perform the hmac */
- ret = zio_crypt_do_hmac(&dck->dck_key, buf, datalen, digestbuf);
+ ret = zio_crypt_do_hmac(&dck->dck_key, buf, datalen,
+ digestbuf, ZIO_DATA_MAC_LEN);
if (ret != 0)
goto error;
@@ -2604,8 +2639,7 @@ error:
abd_return_buf(cabd, cipherbuf, datalen);
}
- if (dck != NULL)
- spa_keystore_dsl_key_rele(spa, dck, FTAG);
+ spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (ret);
}
diff --git a/module/zfs/hkdf.c b/module/zfs/hkdf.c
new file mode 100644
index 000000000..14265472d
--- /dev/null
+++ b/module/zfs/hkdf.c
@@ -0,0 +1,171 @@
+/*
+ * CDDL HEADER START
+ *
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2017, Datto, Inc. All rights reserved.
+ */
+
+#include <sys/crypto/api.h>
+#include <sys/sha2.h>
+#include <sys/hkdf.h>
+
+static int
+hkdf_sha512_extract(uint8_t *salt, uint_t salt_len, uint8_t *key_material,
+ uint_t km_len, uint8_t *out_buf)
+{
+ int ret;
+ crypto_mechanism_t mech;
+ crypto_key_t key;
+ crypto_data_t input_cd, output_cd;
+
+ /* initialize HMAC mechanism */
+ mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
+ mech.cm_param = NULL;
+ mech.cm_param_len = 0;
+
+ /* initialize the salt as a crypto key */
+ key.ck_format = CRYPTO_KEY_RAW;
+ key.ck_length = CRYPTO_BYTES2BITS(salt_len);
+ key.ck_data = salt;
+
+ /* initialize crypto data for the input and output data */
+ input_cd.cd_format = CRYPTO_DATA_RAW;
+ input_cd.cd_offset = 0;
+ input_cd.cd_length = km_len;
+ input_cd.cd_raw.iov_base = (char *)key_material;
+ input_cd.cd_raw.iov_len = input_cd.cd_length;
+
+ output_cd.cd_format = CRYPTO_DATA_RAW;
+ output_cd.cd_offset = 0;
+ output_cd.cd_length = SHA512_DIGEST_LENGTH;
+ output_cd.cd_raw.iov_base = (char *)out_buf;
+ output_cd.cd_raw.iov_len = output_cd.cd_length;
+
+ ret = crypto_mac(&mech, &input_cd, &key, NULL, &output_cd, NULL);
+ if (ret != CRYPTO_SUCCESS)
+ return (SET_ERROR(EIO));
+
+ return (0);
+}
+
+static int
+hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len,
+ uint8_t *out_buf, uint_t out_len)
+{
+ int ret;
+ crypto_mechanism_t mech;
+ crypto_context_t ctx;
+ crypto_key_t key;
+ crypto_data_t T_cd, info_cd, c_cd;
+ uint_t i, T_len = 0, pos = 0;
+ uint8_t c;
+ uint_t N = (out_len + SHA512_DIGEST_LENGTH) / SHA512_DIGEST_LENGTH;
+ uint8_t T[SHA512_DIGEST_LENGTH];
+
+ if (N > 255)
+ return (SET_ERROR(EINVAL));
+
+ /* initialize HMAC mechanism */
+ mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
+ mech.cm_param = NULL;
+ mech.cm_param_len = 0;
+
+ /* initialize the salt as a crypto key */
+ key.ck_format = CRYPTO_KEY_RAW;
+ key.ck_length = CRYPTO_BYTES2BITS(SHA512_DIGEST_LENGTH);
+ key.ck_data = extract_key;
+
+ /* initialize crypto data for the input and output data */
+ T_cd.cd_format = CRYPTO_DATA_RAW;
+ T_cd.cd_offset = 0;
+ T_cd.cd_raw.iov_base = (char *)T;
+
+ c_cd.cd_format = CRYPTO_DATA_RAW;
+ c_cd.cd_offset = 0;
+ c_cd.cd_length = 1;
+ c_cd.cd_raw.iov_base = (char *)&c;
+ c_cd.cd_raw.iov_len = c_cd.cd_length;
+
+ info_cd.cd_format = CRYPTO_DATA_RAW;
+ info_cd.cd_offset = 0;
+ info_cd.cd_length = info_len;
+ info_cd.cd_raw.iov_base = (char *)info;
+ info_cd.cd_raw.iov_len = info_cd.cd_length;
+
+ for (i = 1; i <= N; i++) {
+ c = i;
+
+ T_cd.cd_length = T_len;
+ T_cd.cd_raw.iov_len = T_cd.cd_length;
+
+ ret = crypto_mac_init(&mech, &key, NULL, &ctx, NULL);
+ if (ret != CRYPTO_SUCCESS)
+ return (SET_ERROR(EIO));
+
+ ret = crypto_mac_update(ctx, &T_cd, NULL);
+ if (ret != CRYPTO_SUCCESS)
+ return (SET_ERROR(EIO));
+
+ ret = crypto_mac_update(ctx, &info_cd, NULL);
+ if (ret != CRYPTO_SUCCESS)
+ return (SET_ERROR(EIO));
+
+ ret = crypto_mac_update(ctx, &c_cd, NULL);
+ if (ret != CRYPTO_SUCCESS)
+ return (SET_ERROR(EIO));
+
+ T_len = SHA512_DIGEST_LENGTH;
+ T_cd.cd_length = T_len;
+ T_cd.cd_raw.iov_len = T_cd.cd_length;
+
+ ret = crypto_mac_final(ctx, &T_cd, NULL);
+ if (ret != CRYPTO_SUCCESS)
+ return (SET_ERROR(EIO));
+
+ bcopy(T, out_buf + pos,
+ (i != N) ? SHA512_DIGEST_LENGTH : (out_len - pos));
+ pos += SHA512_DIGEST_LENGTH;
+ }
+
+ return (0);
+}
+
+/*
+ * HKDF is designed to be a relatively fast function for deriving keys from a
+ * master key + a salt. We use this function to generate new encryption keys
+ * so as to avoid hitting the cryptographic limits of the underlying
+ * encryption modes. Note that, for the sake of deriving encryption keys, the
+ * info parameter is called the "salt" everywhere else in the code.
+ */
+int
+hkdf_sha512(uint8_t *key_material, uint_t km_len, uint8_t *salt,
+ uint_t salt_len, uint8_t *info, uint_t info_len, uint8_t *output_key,
+ uint_t out_len)
+{
+ int ret;
+ uint8_t extract_key[SHA512_DIGEST_LENGTH];
+
+ ret = hkdf_sha512_extract(salt, salt_len, key_material, km_len,
+ extract_key);
+ if (ret != 0)
+ return (ret);
+
+ ret = hkdf_sha512_expand(extract_key, info, info_len, output_key,
+ out_len);
+ if (ret != 0)
+ return (ret);
+
+ return (0);
+}
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index 5e413c065..01e5234c7 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -1937,7 +1937,8 @@ metaslab_passivate(metaslab_t *msp, uint64_t weight)
* this metaslab again. In that case, it had better be empty,
* or we would be leaving space on the table.
*/
- ASSERT(size >= SPA_MINBLOCKSIZE ||
+ ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
+ size >= SPA_MINBLOCKSIZE ||
range_tree_space(msp->ms_tree) == 0);
ASSERT0(weight & METASLAB_ACTIVE_MASK);
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 62241a46b..85dd4a049 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -841,8 +841,8 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
xuio_stat_wbuf_copied();
} else {
ASSERT(xuio || tx_bytes == max_blksz);
- dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
- woff, abuf, tx);
+ dmu_assign_arcbuf_by_dbuf(
+ sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
}
ASSERT(tx_bytes <= uio->uio_resid);
uioskip(uio, tx_bytes);
diff --git a/module/zfs/zil.c b/module/zfs/zil.c
index f15e8cddb..e90e583ae 100644
--- a/module/zfs/zil.c
+++ b/module/zfs/zil.c
@@ -2115,6 +2115,21 @@ zil_suspend(const char *osname, void **cookiep)
return (0);
}
+ /*
+ * The ZIL has work to do. Ensure that the associated encryption
+ * key will remain mapped while we are committing the log by
+ * grabbing a reference to it. If the key isn't loaded we have no
+ * choice but to return an error until the wrapping key is loaded.
+ */
+ if (os->os_encrypted && spa_keystore_create_mapping(os->os_spa,
+ dmu_objset_ds(os), FTAG) != 0) {
+ zilog->zl_suspend--;
+ mutex_exit(&zilog->zl_lock);
+ dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
+ dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
+ return (SET_ERROR(EBUSY));
+ }
+
zilog->zl_suspending = B_TRUE;
mutex_exit(&zilog->zl_lock);
@@ -2127,6 +2142,20 @@ zil_suspend(const char *osname, void **cookiep)
cv_broadcast(&zilog->zl_cv_suspend);
mutex_exit(&zilog->zl_lock);
+ if (os->os_encrypted) {
+ /*
+ * Encrypted datasets need to wait for all data to be
+ * synced out before removing the mapping.
+ *
+ * XXX: Depending on the number of datasets with
+ * outstanding ZIL data on a given log device, this
+ * might cause spa_offline_log() to take a long time.
+ */
+ txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
+ VERIFY0(spa_keystore_remove_mapping(os->os_spa,
+ dmu_objset_id(os), FTAG));
+ }
+
if (cookiep == NULL)
zil_resume(os);
else
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index 057a1405f..508011a86 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -2518,13 +2518,14 @@ zio_write_gang_block(zio_t *pio)
zp.zp_checksum = gio->io_prop.zp_checksum;
zp.zp_compress = ZIO_COMPRESS_OFF;
- zp.zp_encrypt = gio->io_prop.zp_encrypt;
zp.zp_type = DMU_OT_NONE;
zp.zp_level = 0;
zp.zp_copies = gio->io_prop.zp_copies;
zp.zp_dedup = B_FALSE;
zp.zp_dedup_verify = B_FALSE;
zp.zp_nopwrite = B_FALSE;
+ zp.zp_encrypt = gio->io_prop.zp_encrypt;
+ zp.zp_byteorder = gio->io_prop.zp_byteorder;
bzero(zp.zp_salt, ZIO_DATA_SALT_LEN);
bzero(zp.zp_iv, ZIO_DATA_IV_LEN);
bzero(zp.zp_mac, ZIO_DATA_MAC_LEN);
diff --git a/module/zfs/zio_crypt.c b/module/zfs/zio_crypt.c
index 8fcf51550..5ffa1e8b0 100644
--- a/module/zfs/zio_crypt.c
+++ b/module/zfs/zio_crypt.c
@@ -25,6 +25,7 @@
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/sha2.h>
+#include <sys/hkdf.h>
/*
* This file is responsible for handling all of the details of generating
@@ -198,176 +199,6 @@ zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = {
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 32, "aes-256-gcm"}
};
-static int
-hkdf_sha512_extract(uint8_t *salt, uint_t salt_len, uint8_t *key_material,
- uint_t km_len, uint8_t *out_buf)
-{
- int ret;
- crypto_mechanism_t mech;
- crypto_key_t key;
- crypto_data_t input_cd, output_cd;
-
- /* initialize HMAC mechanism */
- mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
- mech.cm_param = NULL;
- mech.cm_param_len = 0;
-
- /* initialize the salt as a crypto key */
- key.ck_format = CRYPTO_KEY_RAW;
- key.ck_length = BYTES_TO_BITS(salt_len);
- key.ck_data = salt;
-
- /* initialize crypto data for the input and output data */
- input_cd.cd_format = CRYPTO_DATA_RAW;
- input_cd.cd_offset = 0;
- input_cd.cd_length = km_len;
- input_cd.cd_raw.iov_base = (char *)key_material;
- input_cd.cd_raw.iov_len = input_cd.cd_length;
-
- output_cd.cd_format = CRYPTO_DATA_RAW;
- output_cd.cd_offset = 0;
- output_cd.cd_length = SHA512_DIGEST_LEN;
- output_cd.cd_raw.iov_base = (char *)out_buf;
- output_cd.cd_raw.iov_len = output_cd.cd_length;
-
- ret = crypto_mac(&mech, &input_cd, &key, NULL, &output_cd, NULL);
- if (ret != CRYPTO_SUCCESS) {
- ret = SET_ERROR(EIO);
- goto error;
- }
-
- return (0);
-
-error:
- return (ret);
-}
-
-static int
-hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len,
- uint8_t *out_buf, uint_t out_len)
-{
- int ret;
- crypto_mechanism_t mech;
- crypto_context_t ctx;
- crypto_key_t key;
- crypto_data_t T_cd, info_cd, c_cd;
- uint_t i, T_len = 0, pos = 0;
- uint8_t c;
- uint_t N = (out_len + SHA512_DIGEST_LEN) / SHA512_DIGEST_LEN;
- uint8_t T[SHA512_DIGEST_LEN];
-
- if (N > 255)
- return (SET_ERROR(EINVAL));
-
- /* initialize HMAC mechanism */
- mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
- mech.cm_param = NULL;
- mech.cm_param_len = 0;
-
- /* initialize the salt as a crypto key */
- key.ck_format = CRYPTO_KEY_RAW;
- key.ck_length = BYTES_TO_BITS(SHA512_DIGEST_LEN);
- key.ck_data = extract_key;
-
- /* initialize crypto data for the input and output data */
- T_cd.cd_format = CRYPTO_DATA_RAW;
- T_cd.cd_offset = 0;
- T_cd.cd_raw.iov_base = (char *)T;
-
- c_cd.cd_format = CRYPTO_DATA_RAW;
- c_cd.cd_offset = 0;
- c_cd.cd_length = 1;
- c_cd.cd_raw.iov_base = (char *)&c;
- c_cd.cd_raw.iov_len = c_cd.cd_length;
-
- info_cd.cd_format = CRYPTO_DATA_RAW;
- info_cd.cd_offset = 0;
- info_cd.cd_length = info_len;
- info_cd.cd_raw.iov_base = (char *)info;
- info_cd.cd_raw.iov_len = info_cd.cd_length;
-
- for (i = 1; i <= N; i++) {
- c = i;
-
- T_cd.cd_length = T_len;
- T_cd.cd_raw.iov_len = T_cd.cd_length;
-
- ret = crypto_mac_init(&mech, &key, NULL, &ctx, NULL);
- if (ret != CRYPTO_SUCCESS) {
- ret = SET_ERROR(EIO);
- goto error;
- }
-
- ret = crypto_mac_update(ctx, &T_cd, NULL);
- if (ret != CRYPTO_SUCCESS) {
- ret = SET_ERROR(EIO);
- goto error;
- }
-
- ret = crypto_mac_update(ctx, &info_cd, NULL);
- if (ret != CRYPTO_SUCCESS) {
- ret = SET_ERROR(EIO);
- goto error;
- }
-
- ret = crypto_mac_update(ctx, &c_cd, NULL);
- if (ret != CRYPTO_SUCCESS) {
- ret = SET_ERROR(EIO);
- goto error;
- }
-
- T_len = SHA512_DIGEST_LEN;
- T_cd.cd_length = T_len;
- T_cd.cd_raw.iov_len = T_cd.cd_length;
-
- ret = crypto_mac_final(ctx, &T_cd, NULL);
- if (ret != CRYPTO_SUCCESS) {
- ret = SET_ERROR(EIO);
- goto error;
- }
-
- bcopy(T, out_buf + pos,
- (i != N) ? SHA512_DIGEST_LEN : (out_len - pos));
- pos += SHA512_DIGEST_LEN;
- }
-
- return (0);
-
-error:
- return (ret);
-}
-
-/*
- * HKDF is designed to be a relatively fast function for deriving keys from a
- * master key + a salt. We use this function to generate new encryption keys
- * so as to avoid hitting the cryptographic limits of the underlying
- * encryption modes. Note that, for the sake of deriving encryption keys, the
- * info parameter is called the "salt" everywhere else in the code.
- */
-static int
-hkdf_sha512(uint8_t *key_material, uint_t km_len, uint8_t *salt,
- uint_t salt_len, uint8_t *info, uint_t info_len, uint8_t *output_key,
- uint_t out_len)
-{
- int ret;
- uint8_t extract_key[SHA512_DIGEST_LEN];
-
- ret = hkdf_sha512_extract(salt, salt_len, key_material, km_len,
- extract_key);
- if (ret != 0)
- goto error;
-
- ret = hkdf_sha512_expand(extract_key, info, info_len, output_key,
- out_len);
- if (ret != 0)
- goto error;
-
- return (0);
-
-error:
- return (ret);
-}
-
void
zio_crypt_key_destroy(zio_crypt_key_t *key)
{
@@ -421,11 +252,11 @@ zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
/* initialize keys for the ICP */
key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
key->zk_current_key.ck_data = key->zk_current_keydata;
- key->zk_current_key.ck_length = BYTES_TO_BITS(keydata_len);
+ key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
key->zk_hmac_key.ck_data = &key->zk_hmac_key;
- key->zk_hmac_key.ck_length = BYTES_TO_BITS(SHA512_HMAC_KEYLEN);
+ key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
/*
* Initialize the crypto templates. It's ok if this fails because
@@ -588,10 +419,10 @@ zio_do_crypt_uio(boolean_t encrypt, uint64_t crypt, crypto_key_t *key,
mech.cm_param_len = sizeof (CK_AES_CCM_PARAMS);
} else {
gcmp.ulIvLen = ZIO_DATA_IV_LEN;
- gcmp.ulIvBits = BYTES_TO_BITS(ZIO_DATA_IV_LEN);
+ gcmp.ulIvBits = CRYPTO_BYTES2BITS(ZIO_DATA_IV_LEN);
gcmp.ulAADLen = auth_len;
gcmp.pAAD = authbuf;
- gcmp.ulTagBits = BYTES_TO_BITS(maclen);
+ gcmp.ulTagBits = CRYPTO_BYTES2BITS(maclen);
gcmp.pIv = ivbuf;
mech.cm_param = (char *)(&gcmp);
@@ -748,11 +579,11 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t guid,
/* initialize keys for ICP */
key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
key->zk_current_key.ck_data = key->zk_current_keydata;
- key->zk_current_key.ck_length = BYTES_TO_BITS(keydata_len);
+ key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
key->zk_hmac_key.ck_data = key->zk_hmac_keydata;
- key->zk_hmac_key.ck_length = BYTES_TO_BITS(SHA512_HMAC_KEYLEN);
+ key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
/*
* Initialize the crypto templates. It's ok if this fails because
@@ -801,12 +632,14 @@ error:
int
zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
- uint8_t *digestbuf)
+ uint8_t *digestbuf, uint_t digestlen)
{
int ret;
crypto_mechanism_t mech;
crypto_data_t in_data, digest_data;
- uint8_t raw_digestbuf[SHA512_DIGEST_LEN];
+ uint8_t raw_digestbuf[SHA512_DIGEST_LENGTH];
+
+ ASSERT3U(digestlen, <=, SHA512_DIGEST_LENGTH);
/* initialize sha512-hmac mechanism and crypto data */
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
@@ -822,7 +655,7 @@ zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
digest_data.cd_format = CRYPTO_DATA_RAW;
digest_data.cd_offset = 0;
- digest_data.cd_length = SHA512_DIGEST_LEN;
+ digest_data.cd_length = SHA512_DIGEST_LENGTH;
digest_data.cd_raw.iov_base = (char *)raw_digestbuf;
digest_data.cd_raw.iov_len = digest_data.cd_length;
@@ -834,12 +667,12 @@ zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
goto error;
}
- bcopy(raw_digestbuf, digestbuf, ZIO_DATA_MAC_LEN);
+ bcopy(raw_digestbuf, digestbuf, digestlen);
return (0);
error:
- bzero(digestbuf, ZIO_DATA_MAC_LEN);
+ bzero(digestbuf, digestlen);
return (ret);
}
@@ -848,9 +681,10 @@ zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
uint_t datalen, uint8_t *ivbuf, uint8_t *salt)
{
int ret;
- uint8_t digestbuf[SHA512_DIGEST_LEN];
+ uint8_t digestbuf[SHA512_DIGEST_LENGTH];
- ret = zio_crypt_do_hmac(key, data, datalen, digestbuf);
+ ret = zio_crypt_do_hmac(key, data, datalen,
+ digestbuf, SHA512_DIGEST_LENGTH);
if (ret != 0)
return (ret);
@@ -1212,8 +1046,8 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
objset_phys_t *osp = data;
uint64_t intval;
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
- uint8_t raw_portable_mac[SHA512_DIGEST_LEN];
- uint8_t raw_local_mac[SHA512_DIGEST_LEN];
+ uint8_t raw_portable_mac[SHA512_DIGEST_LENGTH];
+ uint8_t raw_local_mac[SHA512_DIGEST_LENGTH];
/* initialize HMAC mechanism */
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
@@ -1267,7 +1101,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
goto error;
/* store the final digest in a temporary buffer and copy what we need */
- cd.cd_length = SHA512_DIGEST_LEN;
+ cd.cd_length = SHA512_DIGEST_LENGTH;
cd.cd_raw.iov_base = (char *)raw_portable_mac;
cd.cd_raw.iov_len = cd.cd_length;
@@ -1284,7 +1118,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
* objects are not present, the local MAC is zeroed out.
*/
if (osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
- osp->os_userused_dnode.dn_type == DMU_OT_NONE) {
+ osp->os_groupused_dnode.dn_type == DMU_OT_NONE) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
}
@@ -1326,7 +1160,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
goto error;
/* store the final digest in a temporary buffer and copy what we need */
- cd.cd_length = SHA512_DIGEST_LEN;
+ cd.cd_length = SHA512_DIGEST_LENGTH;
cd.cd_raw.iov_base = (char *)raw_local_mac;
cd.cd_raw.iov_len = cd.cd_length;
@@ -1367,7 +1201,7 @@ zio_crypt_do_indirect_mac_checksum(boolean_t generate, void *buf,
blkptr_t *bp;
int i, epb = datalen >> SPA_BLKPTRSHIFT;
SHA2_CTX ctx;
- uint8_t digestbuf[SHA512_DIGEST_LEN];
+ uint8_t digestbuf[SHA512_DIGEST_LENGTH];
/* checksum all of the MACs from the layer below */
SHA2Init(SHA512, &ctx);
@@ -1417,8 +1251,8 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
boolean_t *no_crypt)
{
int ret;
- uint64_t txtype;
- uint_t nr_src, nr_dst, lr_len, crypt_len;
+ uint64_t txtype, lr_len;
+ uint_t nr_src, nr_dst, crypt_len;
uint_t aad_len = 0, nr_iovecs = 0, total_len = 0;
iovec_t *src_iovecs = NULL, *dst_iovecs = NULL;
uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp;
@@ -1468,7 +1302,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
/* allocate the iovec arrays */
if (nr_src != 0) {
src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP);
- if (!src_iovecs) {
+ if (src_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
@@ -1476,7 +1310,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
if (nr_dst != 0) {
dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP);
- if (!dst_iovecs) {
+ if (dst_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
@@ -1515,6 +1349,9 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
+ ASSERT3P(src_iovecs, !=, NULL);
+ ASSERT3P(dst_iovecs, !=, NULL);
+
/*
* If this is a TX_WRITE record we want to encrypt everything
* except the bp if exists. If the bp does exist we want to
@@ -1655,7 +1492,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint8_t *plainbuf,
if (nr_src != 0) {
src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP);
- if (!src_iovecs) {
+ if (src_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
@@ -1663,7 +1500,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint8_t *plainbuf,
if (nr_dst != 0) {
dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP);
- if (!dst_iovecs) {
+ if (dst_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
@@ -1729,6 +1566,10 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint8_t *plainbuf,
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
+ ASSERT3U(nr_iovecs, <, nr_src);
+ ASSERT3U(nr_iovecs, <, nr_dst);
+ ASSERT3P(src_iovecs, !=, NULL);
+ ASSERT3P(dst_iovecs, !=, NULL);
src_iovecs[nr_iovecs].iov_base = DN_BONUS(dnp);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base = DN_BONUS(&ddnp[i]);
@@ -1942,7 +1783,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key, uint8_t *salt,
tmp_ckey.ck_format = CRYPTO_KEY_RAW;
tmp_ckey.ck_data = enc_keydata;
- tmp_ckey.ck_length = BYTES_TO_BITS(keydata_len);
+ tmp_ckey.ck_length = CRYPTO_BYTES2BITS(keydata_len);
ckey = &tmp_ckey;
tmpl = NULL;