diff options
Diffstat (limited to 'module')
-rw-r--r-- | module/icp/algs/sha2/sha2.c | 13 | ||||
-rw-r--r-- | module/zfs/Makefile.in | 1 | ||||
-rw-r--r-- | module/zfs/arc.c | 23 | ||||
-rw-r--r-- | module/zfs/dbuf.c | 3 | ||||
-rw-r--r-- | module/zfs/dmu_objset.c | 25 | ||||
-rw-r--r-- | module/zfs/dmu_send.c | 6 | ||||
-rw-r--r-- | module/zfs/dsl_crypt.c | 19 | ||||
-rw-r--r-- | module/zfs/hkdf.c | 171 | ||||
-rw-r--r-- | module/zfs/zil.c | 29 | ||||
-rw-r--r-- | module/zfs/zio.c | 3 | ||||
-rw-r--r-- | module/zfs/zio_crypt.c | 229 |
11 files changed, 293 insertions, 229 deletions
diff --git a/module/icp/algs/sha2/sha2.c b/module/icp/algs/sha2/sha2.c index 6f7971afd..05a2e6ad1 100644 --- a/module/icp/algs/sha2/sha2.c +++ b/module/icp/algs/sha2/sha2.c @@ -52,7 +52,8 @@ static void Encode(uint8_t *, uint32_t *, size_t); static void Encode64(uint8_t *, uint64_t *, size_t); -#if defined(__amd64) +/* userspace only supports the generic version */ +#if defined(__amd64) && defined(_KERNEL) #define SHA512Transform(ctx, in) SHA512TransformBlocks((ctx), (in), 1) #define SHA256Transform(ctx, in) SHA256TransformBlocks((ctx), (in), 1) @@ -62,7 +63,7 @@ void SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num); #else static void SHA256Transform(SHA2_CTX *, const uint8_t *); static void SHA512Transform(SHA2_CTX *, const uint8_t *); -#endif /* __amd64 */ +#endif /* __amd64 && _KERNEL */ static uint8_t PADDING[128] = { 0x80, /* all zeros */ }; @@ -142,7 +143,7 @@ static uint8_t PADDING[128] = { 0x80, /* all zeros */ }; #endif /* _BIG_ENDIAN */ -#if !defined(__amd64) +#if !defined(__amd64) || !defined(_KERNEL) /* SHA256 Transform */ static void @@ -600,7 +601,7 @@ SHA512Transform(SHA2_CTX *ctx, const uint8_t *blk) ctx->state.s64[7] += h; } -#endif /* !__amd64 */ +#endif /* !__amd64 || !_KERNEL */ /* @@ -838,7 +839,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len) i = buf_len; } -#if !defined(__amd64) +#if !defined(__amd64) || !defined(_KERNEL) if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) { for (; i + buf_limit - 1 < input_len; i += buf_limit) { SHA256Transform(ctx, &input[i]); @@ -866,7 +867,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len) i += block_count << 7; } } -#endif /* !__amd64 */ +#endif /* !__amd64 || !_KERNEL */ /* * general optimization: diff --git a/module/zfs/Makefile.in b/module/zfs/Makefile.in index 72f28a89d..606b0a47c 100644 --- a/module/zfs/Makefile.in +++ b/module/zfs/Makefile.in @@ -41,6 +41,7 @@ $(MODULE)-objs += dsl_synctask.o $(MODULE)-objs += edonr_zfs.o $(MODULE)-objs += fm.o $(MODULE)-objs += gzip.o +$(MODULE)-objs += hkdf.o $(MODULE)-objs += lzjb.o $(MODULE)-objs += lz4.o $(MODULE)-objs += metaslab.o diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 992e57ce6..1329e8e83 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -6698,6 +6698,9 @@ arc_write_ready(zio_t *zio) HDR_SET_PSIZE(hdr, psize); arc_hdr_set_compress(hdr, compress); + if (zio->io_error != 0 || psize == 0) + goto out; + /* * Fill the hdr with data. If the buffer is encrypted we have no choice * but to copy the data into b_radb. If the hdr is compressed, the data @@ -6713,6 +6716,7 @@ arc_write_ready(zio_t *zio) * the data into it; otherwise, we share the data directly if we can. */ if (ARC_BUF_ENCRYPTED(buf)) { + ASSERT3U(psize, >, 0); ASSERT(ARC_BUF_COMPRESSED(buf)); arc_hdr_alloc_abd(hdr, B_TRUE); abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); @@ -6745,6 +6749,7 @@ arc_write_ready(zio_t *zio) arc_share_buf(hdr, buf); } +out: arc_hdr_verify(hdr, bp); spl_fstrans_unmark(cookie); } @@ -8321,7 +8326,7 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize, boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); dsl_crypto_key_t *dck = NULL; uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 }; - boolean_t no_crypt; + boolean_t no_crypt = B_FALSE; ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) || @@ -8333,6 +8338,15 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize, * and copy the data. This may be done to elimiate a depedency on a * shared buffer or to reallocate the buffer to match asize. */ + if (HDR_HAS_RABD(hdr) && asize != psize) { + ASSERT3U(size, ==, psize); + to_write = abd_alloc_for_io(asize, ismd); + abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, size); + if (size != asize) + abd_zero_off(to_write, size, asize - size); + goto out; + } + if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) && !HDR_ENCRYPTED(hdr)) { ASSERT3U(size, ==, psize); @@ -8377,11 +8391,8 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize, if (ret != 0) goto error; - if (no_crypt) { - spa_keystore_dsl_key_rele(spa, dck, FTAG); - abd_free(eabd); - goto out; - } + if (no_crypt) + abd_copy(eabd, to_write, psize); if (psize != asize) abd_zero_off(eabd, psize, asize - psize); diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 1ea4c757e..537f22011 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -1175,7 +1175,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) * or (if there a no active holders) * just null out the current db_data pointer. */ - ASSERT(dr->dr_txg >= txg - 2); + ASSERT3U(dr->dr_txg, >=, txg - 2); if (db->db_blkid == DMU_BONUS_BLKID) { dnode_t *dn = DB_DNODE(db); int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); @@ -3458,7 +3458,6 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) dn->dn_type, psize, lsize, compress_type); } else if (compress_type != ZIO_COMPRESS_OFF) { ASSERT3U(type, ==, ARC_BUFC_DATA); - int lsize = arc_buf_lsize(*datap); *datap = arc_alloc_compressed_buf(os->os_spa, db, psize, lsize, compress_type); } else { diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 4445121a0..609e43fe8 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -706,7 +706,9 @@ dmu_objset_own(const char *name, dmu_objset_type_t type, dsl_pool_rele(dp, FTAG); - if (dmu_objset_userobjspace_upgradable(*osp)) + /* user accounting requires the dataset to be decrypted */ + if (dmu_objset_userobjspace_upgradable(*osp) && + (ds->ds_dir->dd_crypto_obj == 0 || decrypt)) dmu_objset_userobjspace_upgrade(*osp); return (0); @@ -932,7 +934,7 @@ dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, if (blksz == 0) blksz = DNODE_BLOCK_SIZE; - if (blksz == 0) + if (ibs == 0) ibs = DN_MAX_INDBLKSHIFT; if (ds != NULL) @@ -1096,7 +1098,7 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx) } /* - * The doca_userfunc() will write out some data that needs to be + * The doca_userfunc() may write out some data that needs to be * encrypted if the dataset is encrypted (specifically the root * directory). This data must be written out before the encryption * key mapping is removed by dsl_dataset_rele_flags(). Force the @@ -1107,10 +1109,14 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx) dsl_dataset_t *tmpds = NULL; boolean_t need_sync_done = B_FALSE; + mutex_enter(&ds->ds_lock); + ds->ds_owner = FTAG; + mutex_exit(&ds->ds_lock); + rzio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); - tmpds = txg_list_remove(&dp->dp_dirty_datasets, tx->tx_txg); + tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds, + tx->tx_txg); if (tmpds != NULL) { - ASSERT3P(ds, ==, tmpds); dsl_dataset_sync(ds, rzio, tx); need_sync_done = B_TRUE; } @@ -1120,9 +1126,9 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx) taskq_wait(dp->dp_sync_taskq); rzio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); - tmpds = txg_list_remove(&dp->dp_dirty_datasets, tx->tx_txg); + tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds, + tx->tx_txg); if (tmpds != NULL) { - ASSERT3P(ds, ==, tmpds); dmu_buf_rele(ds->ds_dbuf, ds); dsl_dataset_sync(ds, rzio, tx); } @@ -1130,6 +1136,10 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx) if (need_sync_done) dsl_dataset_sync_done(ds, tx); + + mutex_enter(&ds->ds_lock); + ds->ds_owner = NULL; + mutex_exit(&ds->ds_lock); } spa_history_log_internal_ds(ds, "create", tx, ""); @@ -1336,6 +1346,7 @@ dmu_objset_upgrade_stop(objset_t *os) mutex_exit(&os->os_upgrade_lock); taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id); + txg_wait_synced(os->os_spa->spa_dsl_pool, 0); } else { mutex_exit(&os->os_upgrade_lock); } diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c index c63ab43e1..235e832d7 100644 --- a/module/zfs/dmu_send.c +++ b/module/zfs/dmu_send.c @@ -517,7 +517,7 @@ dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object, dnode_phys_t *dnp) { struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); - int bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8); + int bonuslen; if (object < dsp->dsa_resume_object) { /* @@ -558,6 +558,8 @@ dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object, drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE) drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE; + bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8); + if ((dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW)) { ASSERT(BP_IS_ENCRYPTED(bp)); @@ -571,7 +573,7 @@ dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object, /* * Since we encrypt the entire bonus area, the (raw) part - * beyond the the bonuslen is actually nonzero, so we need + * beyond the bonuslen is actually nonzero, so we need * to send it. */ if (bonuslen != 0) { diff --git a/module/zfs/dsl_crypt.c b/module/zfs/dsl_crypt.c index af46dd753..3c2babfda 100644 --- a/module/zfs/dsl_crypt.c +++ b/module/zfs/dsl_crypt.c @@ -90,9 +90,9 @@ dsl_wrapping_key_free(dsl_wrapping_key_t *wkey) if (wkey->wk_key.ck_data) { bzero(wkey->wk_key.ck_data, - BITS_TO_BYTES(wkey->wk_key.ck_length)); + CRYPTO_BITS2BYTES(wkey->wk_key.ck_length)); kmem_free(wkey->wk_key.ck_data, - BITS_TO_BYTES(wkey->wk_key.ck_length)); + CRYPTO_BITS2BYTES(wkey->wk_key.ck_length)); } refcount_destroy(&wkey->wk_refcnt); @@ -119,7 +119,7 @@ dsl_wrapping_key_create(uint8_t *wkeydata, zfs_keyformat_t keyformat, } wkey->wk_key.ck_format = CRYPTO_KEY_RAW; - wkey->wk_key.ck_length = BYTES_TO_BITS(WRAPPING_KEY_LEN); + wkey->wk_key.ck_length = CRYPTO_BYTES2BITS(WRAPPING_KEY_LEN); bcopy(wkeydata, wkey->wk_key.ck_data, WRAPPING_KEY_LEN); /* initialize the rest of the struct */ @@ -433,7 +433,6 @@ dsl_crypto_can_set_keylocation(const char *dsname, const char *keylocation) int ret = 0; dsl_dir_t *dd = NULL; dsl_pool_t *dp = NULL; - dsl_wrapping_key_t *wkey = NULL; uint64_t rddobj; /* hold the dsl dir */ @@ -472,16 +471,12 @@ dsl_crypto_can_set_keylocation(const char *dsname, const char *keylocation) goto out; } - if (wkey != NULL) - dsl_wrapping_key_rele(wkey, FTAG); dsl_dir_rele(dd, FTAG); dsl_pool_rele(dp, FTAG); return (0); out: - if (wkey != NULL) - dsl_wrapping_key_rele(wkey, FTAG); if (dd != NULL) dsl_dir_rele(dd, FTAG); if (dp != NULL) @@ -1831,6 +1826,8 @@ dsl_dataset_create_crypt_sync(uint64_t dsobj, dsl_dir_t *dd, wkey->wk_ddobj = dd->dd_object; } + ASSERT3P(wkey, !=, NULL); + /* Create or clone the DSL crypto key and activate the feature */ dd->dd_crypto_obj = dsl_crypto_key_create_sync(crypt, wkey, tx); VERIFY0(zap_add(dp->dp_meta_objset, dd->dd_object, @@ -2488,7 +2485,8 @@ spa_do_crypt_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj, abd_t *abd, goto error; /* perform the hmac */ - ret = zio_crypt_do_hmac(&dck->dck_key, buf, datalen, digestbuf); + ret = zio_crypt_do_hmac(&dck->dck_key, buf, datalen, + digestbuf, ZIO_DATA_MAC_LEN); if (ret != 0) goto error; @@ -2604,8 +2602,7 @@ error: abd_return_buf(cabd, cipherbuf, datalen); } - if (dck != NULL) - spa_keystore_dsl_key_rele(spa, dck, FTAG); + spa_keystore_dsl_key_rele(spa, dck, FTAG); return (ret); } diff --git a/module/zfs/hkdf.c b/module/zfs/hkdf.c new file mode 100644 index 000000000..14265472d --- /dev/null +++ b/module/zfs/hkdf.c @@ -0,0 +1,171 @@ +/* + * CDDL HEADER START + * + * This file and its contents are supplied under the terms of the + * Common Development and Distribution License ("CDDL"), version 1.0. + * You may only use this file in accordance with the terms of version + * 1.0 of the CDDL. + * + * A full copy of the text of the CDDL should have accompanied this + * source. A copy of the CDDL is also available via the Internet at + * http://www.illumos.org/license/CDDL. + * + * CDDL HEADER END + */ + +/* + * Copyright (c) 2017, Datto, Inc. All rights reserved. + */ + +#include <sys/crypto/api.h> +#include <sys/sha2.h> +#include <sys/hkdf.h> + +static int +hkdf_sha512_extract(uint8_t *salt, uint_t salt_len, uint8_t *key_material, + uint_t km_len, uint8_t *out_buf) +{ + int ret; + crypto_mechanism_t mech; + crypto_key_t key; + crypto_data_t input_cd, output_cd; + + /* initialize HMAC mechanism */ + mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); + mech.cm_param = NULL; + mech.cm_param_len = 0; + + /* initialize the salt as a crypto key */ + key.ck_format = CRYPTO_KEY_RAW; + key.ck_length = CRYPTO_BYTES2BITS(salt_len); + key.ck_data = salt; + + /* initialize crypto data for the input and output data */ + input_cd.cd_format = CRYPTO_DATA_RAW; + input_cd.cd_offset = 0; + input_cd.cd_length = km_len; + input_cd.cd_raw.iov_base = (char *)key_material; + input_cd.cd_raw.iov_len = input_cd.cd_length; + + output_cd.cd_format = CRYPTO_DATA_RAW; + output_cd.cd_offset = 0; + output_cd.cd_length = SHA512_DIGEST_LENGTH; + output_cd.cd_raw.iov_base = (char *)out_buf; + output_cd.cd_raw.iov_len = output_cd.cd_length; + + ret = crypto_mac(&mech, &input_cd, &key, NULL, &output_cd, NULL); + if (ret != CRYPTO_SUCCESS) + return (SET_ERROR(EIO)); + + return (0); +} + +static int +hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len, + uint8_t *out_buf, uint_t out_len) +{ + int ret; + crypto_mechanism_t mech; + crypto_context_t ctx; + crypto_key_t key; + crypto_data_t T_cd, info_cd, c_cd; + uint_t i, T_len = 0, pos = 0; + uint8_t c; + uint_t N = (out_len + SHA512_DIGEST_LENGTH) / SHA512_DIGEST_LENGTH; + uint8_t T[SHA512_DIGEST_LENGTH]; + + if (N > 255) + return (SET_ERROR(EINVAL)); + + /* initialize HMAC mechanism */ + mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); + mech.cm_param = NULL; + mech.cm_param_len = 0; + + /* initialize the salt as a crypto key */ + key.ck_format = CRYPTO_KEY_RAW; + key.ck_length = CRYPTO_BYTES2BITS(SHA512_DIGEST_LENGTH); + key.ck_data = extract_key; + + /* initialize crypto data for the input and output data */ + T_cd.cd_format = CRYPTO_DATA_RAW; + T_cd.cd_offset = 0; + T_cd.cd_raw.iov_base = (char *)T; + + c_cd.cd_format = CRYPTO_DATA_RAW; + c_cd.cd_offset = 0; + c_cd.cd_length = 1; + c_cd.cd_raw.iov_base = (char *)&c; + c_cd.cd_raw.iov_len = c_cd.cd_length; + + info_cd.cd_format = CRYPTO_DATA_RAW; + info_cd.cd_offset = 0; + info_cd.cd_length = info_len; + info_cd.cd_raw.iov_base = (char *)info; + info_cd.cd_raw.iov_len = info_cd.cd_length; + + for (i = 1; i <= N; i++) { + c = i; + + T_cd.cd_length = T_len; + T_cd.cd_raw.iov_len = T_cd.cd_length; + + ret = crypto_mac_init(&mech, &key, NULL, &ctx, NULL); + if (ret != CRYPTO_SUCCESS) + return (SET_ERROR(EIO)); + + ret = crypto_mac_update(ctx, &T_cd, NULL); + if (ret != CRYPTO_SUCCESS) + return (SET_ERROR(EIO)); + + ret = crypto_mac_update(ctx, &info_cd, NULL); + if (ret != CRYPTO_SUCCESS) + return (SET_ERROR(EIO)); + + ret = crypto_mac_update(ctx, &c_cd, NULL); + if (ret != CRYPTO_SUCCESS) + return (SET_ERROR(EIO)); + + T_len = SHA512_DIGEST_LENGTH; + T_cd.cd_length = T_len; + T_cd.cd_raw.iov_len = T_cd.cd_length; + + ret = crypto_mac_final(ctx, &T_cd, NULL); + if (ret != CRYPTO_SUCCESS) + return (SET_ERROR(EIO)); + + bcopy(T, out_buf + pos, + (i != N) ? SHA512_DIGEST_LENGTH : (out_len - pos)); + pos += SHA512_DIGEST_LENGTH; + } + + return (0); +} + +/* + * HKDF is designed to be a relatively fast function for deriving keys from a + * master key + a salt. We use this function to generate new encryption keys + * so as to avoid hitting the cryptographic limits of the underlying + * encryption modes. Note that, for the sake of deriving encryption keys, the + * info parameter is called the "salt" everywhere else in the code. + */ +int +hkdf_sha512(uint8_t *key_material, uint_t km_len, uint8_t *salt, + uint_t salt_len, uint8_t *info, uint_t info_len, uint8_t *output_key, + uint_t out_len) +{ + int ret; + uint8_t extract_key[SHA512_DIGEST_LENGTH]; + + ret = hkdf_sha512_extract(salt, salt_len, key_material, km_len, + extract_key); + if (ret != 0) + return (ret); + + ret = hkdf_sha512_expand(extract_key, info, info_len, output_key, + out_len); + if (ret != 0) + return (ret); + + return (0); +} diff --git a/module/zfs/zil.c b/module/zfs/zil.c index f15e8cddb..e90e583ae 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -2115,6 +2115,21 @@ zil_suspend(const char *osname, void **cookiep) return (0); } + /* + * The ZIL has work to do. Ensure that the associated encryption + * key will remain mapped while we are committing the log by + * grabbing a reference to it. If the key isn't loaded we have no + * choice but to return an error until the wrapping key is loaded. + */ + if (os->os_encrypted && spa_keystore_create_mapping(os->os_spa, + dmu_objset_ds(os), FTAG) != 0) { + zilog->zl_suspend--; + mutex_exit(&zilog->zl_lock); + dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); + dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); + return (SET_ERROR(EBUSY)); + } + zilog->zl_suspending = B_TRUE; mutex_exit(&zilog->zl_lock); @@ -2127,6 +2142,20 @@ zil_suspend(const char *osname, void **cookiep) cv_broadcast(&zilog->zl_cv_suspend); mutex_exit(&zilog->zl_lock); + if (os->os_encrypted) { + /* + * Encrypted datasets need to wait for all data to be + * synced out before removing the mapping. + * + * XXX: Depending on the number of datasets with + * outstanding ZIL data on a given log device, this + * might cause spa_offline_log() to take a long time. + */ + txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); + VERIFY0(spa_keystore_remove_mapping(os->os_spa, + dmu_objset_id(os), FTAG)); + } + if (cookiep == NULL) zil_resume(os); else diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 057a1405f..508011a86 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -2518,13 +2518,14 @@ zio_write_gang_block(zio_t *pio) zp.zp_checksum = gio->io_prop.zp_checksum; zp.zp_compress = ZIO_COMPRESS_OFF; - zp.zp_encrypt = gio->io_prop.zp_encrypt; zp.zp_type = DMU_OT_NONE; zp.zp_level = 0; zp.zp_copies = gio->io_prop.zp_copies; zp.zp_dedup = B_FALSE; zp.zp_dedup_verify = B_FALSE; zp.zp_nopwrite = B_FALSE; + zp.zp_encrypt = gio->io_prop.zp_encrypt; + zp.zp_byteorder = gio->io_prop.zp_byteorder; bzero(zp.zp_salt, ZIO_DATA_SALT_LEN); bzero(zp.zp_iv, ZIO_DATA_IV_LEN); bzero(zp.zp_mac, ZIO_DATA_MAC_LEN); diff --git a/module/zfs/zio_crypt.c b/module/zfs/zio_crypt.c index 8fcf51550..6238e6f74 100644 --- a/module/zfs/zio_crypt.c +++ b/module/zfs/zio_crypt.c @@ -25,6 +25,7 @@ #include <sys/zio.h> #include <sys/zil.h> #include <sys/sha2.h> +#include <sys/hkdf.h> /* * This file is responsible for handling all of the details of generating @@ -198,176 +199,6 @@ zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = { {SUN_CKM_AES_GCM, ZC_TYPE_GCM, 32, "aes-256-gcm"} }; -static int -hkdf_sha512_extract(uint8_t *salt, uint_t salt_len, uint8_t *key_material, - uint_t km_len, uint8_t *out_buf) -{ - int ret; - crypto_mechanism_t mech; - crypto_key_t key; - crypto_data_t input_cd, output_cd; - - /* initialize HMAC mechanism */ - mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); - mech.cm_param = NULL; - mech.cm_param_len = 0; - - /* initialize the salt as a crypto key */ - key.ck_format = CRYPTO_KEY_RAW; - key.ck_length = BYTES_TO_BITS(salt_len); - key.ck_data = salt; - - /* initialize crypto data for the input and output data */ - input_cd.cd_format = CRYPTO_DATA_RAW; - input_cd.cd_offset = 0; - input_cd.cd_length = km_len; - input_cd.cd_raw.iov_base = (char *)key_material; - input_cd.cd_raw.iov_len = input_cd.cd_length; - - output_cd.cd_format = CRYPTO_DATA_RAW; - output_cd.cd_offset = 0; - output_cd.cd_length = SHA512_DIGEST_LEN; - output_cd.cd_raw.iov_base = (char *)out_buf; - output_cd.cd_raw.iov_len = output_cd.cd_length; - - ret = crypto_mac(&mech, &input_cd, &key, NULL, &output_cd, NULL); - if (ret != CRYPTO_SUCCESS) { - ret = SET_ERROR(EIO); - goto error; - } - - return (0); - -error: - return (ret); -} - -static int -hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len, - uint8_t *out_buf, uint_t out_len) -{ - int ret; - crypto_mechanism_t mech; - crypto_context_t ctx; - crypto_key_t key; - crypto_data_t T_cd, info_cd, c_cd; - uint_t i, T_len = 0, pos = 0; - uint8_t c; - uint_t N = (out_len + SHA512_DIGEST_LEN) / SHA512_DIGEST_LEN; - uint8_t T[SHA512_DIGEST_LEN]; - - if (N > 255) - return (SET_ERROR(EINVAL)); - - /* initialize HMAC mechanism */ - mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); - mech.cm_param = NULL; - mech.cm_param_len = 0; - - /* initialize the salt as a crypto key */ - key.ck_format = CRYPTO_KEY_RAW; - key.ck_length = BYTES_TO_BITS(SHA512_DIGEST_LEN); - key.ck_data = extract_key; - - /* initialize crypto data for the input and output data */ - T_cd.cd_format = CRYPTO_DATA_RAW; - T_cd.cd_offset = 0; - T_cd.cd_raw.iov_base = (char *)T; - - c_cd.cd_format = CRYPTO_DATA_RAW; - c_cd.cd_offset = 0; - c_cd.cd_length = 1; - c_cd.cd_raw.iov_base = (char *)&c; - c_cd.cd_raw.iov_len = c_cd.cd_length; - - info_cd.cd_format = CRYPTO_DATA_RAW; - info_cd.cd_offset = 0; - info_cd.cd_length = info_len; - info_cd.cd_raw.iov_base = (char *)info; - info_cd.cd_raw.iov_len = info_cd.cd_length; - - for (i = 1; i <= N; i++) { - c = i; - - T_cd.cd_length = T_len; - T_cd.cd_raw.iov_len = T_cd.cd_length; - - ret = crypto_mac_init(&mech, &key, NULL, &ctx, NULL); - if (ret != CRYPTO_SUCCESS) { - ret = SET_ERROR(EIO); - goto error; - } - - ret = crypto_mac_update(ctx, &T_cd, NULL); - if (ret != CRYPTO_SUCCESS) { - ret = SET_ERROR(EIO); - goto error; - } - - ret = crypto_mac_update(ctx, &info_cd, NULL); - if (ret != CRYPTO_SUCCESS) { - ret = SET_ERROR(EIO); - goto error; - } - - ret = crypto_mac_update(ctx, &c_cd, NULL); - if (ret != CRYPTO_SUCCESS) { - ret = SET_ERROR(EIO); - goto error; - } - - T_len = SHA512_DIGEST_LEN; - T_cd.cd_length = T_len; - T_cd.cd_raw.iov_len = T_cd.cd_length; - - ret = crypto_mac_final(ctx, &T_cd, NULL); - if (ret != CRYPTO_SUCCESS) { - ret = SET_ERROR(EIO); - goto error; - } - - bcopy(T, out_buf + pos, - (i != N) ? SHA512_DIGEST_LEN : (out_len - pos)); - pos += SHA512_DIGEST_LEN; - } - - return (0); - -error: - return (ret); -} - -/* - * HKDF is designed to be a relatively fast function for deriving keys from a - * master key + a salt. We use this function to generate new encryption keys - * so as to avoid hitting the cryptographic limits of the underlying - * encryption modes. Note that, for the sake of deriving encryption keys, the - * info parameter is called the "salt" everywhere else in the code. - */ -static int -hkdf_sha512(uint8_t *key_material, uint_t km_len, uint8_t *salt, - uint_t salt_len, uint8_t *info, uint_t info_len, uint8_t *output_key, - uint_t out_len) -{ - int ret; - uint8_t extract_key[SHA512_DIGEST_LEN]; - - ret = hkdf_sha512_extract(salt, salt_len, key_material, km_len, - extract_key); - if (ret != 0) - goto error; - - ret = hkdf_sha512_expand(extract_key, info, info_len, output_key, - out_len); - if (ret != 0) - goto error; - - return (0); - -error: - return (ret); -} - void zio_crypt_key_destroy(zio_crypt_key_t *key) { @@ -421,11 +252,11 @@ zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key) /* initialize keys for the ICP */ key->zk_current_key.ck_format = CRYPTO_KEY_RAW; key->zk_current_key.ck_data = key->zk_current_keydata; - key->zk_current_key.ck_length = BYTES_TO_BITS(keydata_len); + key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len); key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW; key->zk_hmac_key.ck_data = &key->zk_hmac_key; - key->zk_hmac_key.ck_length = BYTES_TO_BITS(SHA512_HMAC_KEYLEN); + key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN); /* * Initialize the crypto templates. It's ok if this fails because @@ -588,10 +419,10 @@ zio_do_crypt_uio(boolean_t encrypt, uint64_t crypt, crypto_key_t *key, mech.cm_param_len = sizeof (CK_AES_CCM_PARAMS); } else { gcmp.ulIvLen = ZIO_DATA_IV_LEN; - gcmp.ulIvBits = BYTES_TO_BITS(ZIO_DATA_IV_LEN); + gcmp.ulIvBits = CRYPTO_BYTES2BITS(ZIO_DATA_IV_LEN); gcmp.ulAADLen = auth_len; gcmp.pAAD = authbuf; - gcmp.ulTagBits = BYTES_TO_BITS(maclen); + gcmp.ulTagBits = CRYPTO_BYTES2BITS(maclen); gcmp.pIv = ivbuf; mech.cm_param = (char *)(&gcmp); @@ -748,11 +579,11 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t guid, /* initialize keys for ICP */ key->zk_current_key.ck_format = CRYPTO_KEY_RAW; key->zk_current_key.ck_data = key->zk_current_keydata; - key->zk_current_key.ck_length = BYTES_TO_BITS(keydata_len); + key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len); key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW; key->zk_hmac_key.ck_data = key->zk_hmac_keydata; - key->zk_hmac_key.ck_length = BYTES_TO_BITS(SHA512_HMAC_KEYLEN); + key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN); /* * Initialize the crypto templates. It's ok if this fails because @@ -801,12 +632,14 @@ error: int zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen, - uint8_t *digestbuf) + uint8_t *digestbuf, uint_t digestlen) { int ret; crypto_mechanism_t mech; crypto_data_t in_data, digest_data; - uint8_t raw_digestbuf[SHA512_DIGEST_LEN]; + uint8_t raw_digestbuf[SHA512_DIGEST_LENGTH]; + + ASSERT3U(digestlen, <=, SHA512_DIGEST_LENGTH); /* initialize sha512-hmac mechanism and crypto data */ mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); @@ -822,7 +655,7 @@ zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen, digest_data.cd_format = CRYPTO_DATA_RAW; digest_data.cd_offset = 0; - digest_data.cd_length = SHA512_DIGEST_LEN; + digest_data.cd_length = SHA512_DIGEST_LENGTH; digest_data.cd_raw.iov_base = (char *)raw_digestbuf; digest_data.cd_raw.iov_len = digest_data.cd_length; @@ -834,12 +667,12 @@ zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen, goto error; } - bcopy(raw_digestbuf, digestbuf, ZIO_DATA_MAC_LEN); + bcopy(raw_digestbuf, digestbuf, digestlen); return (0); error: - bzero(digestbuf, ZIO_DATA_MAC_LEN); + bzero(digestbuf, digestlen); return (ret); } @@ -848,9 +681,10 @@ zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data, uint_t datalen, uint8_t *ivbuf, uint8_t *salt) { int ret; - uint8_t digestbuf[SHA512_DIGEST_LEN]; + uint8_t digestbuf[SHA512_DIGEST_LENGTH]; - ret = zio_crypt_do_hmac(key, data, datalen, digestbuf); + ret = zio_crypt_do_hmac(key, data, datalen, + digestbuf, SHA512_DIGEST_LENGTH); if (ret != 0) return (ret); @@ -1212,8 +1046,8 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen, objset_phys_t *osp = data; uint64_t intval; boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER); - uint8_t raw_portable_mac[SHA512_DIGEST_LEN]; - uint8_t raw_local_mac[SHA512_DIGEST_LEN]; + uint8_t raw_portable_mac[SHA512_DIGEST_LENGTH]; + uint8_t raw_local_mac[SHA512_DIGEST_LENGTH]; /* initialize HMAC mechanism */ mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); @@ -1267,7 +1101,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen, goto error; /* store the final digest in a temporary buffer and copy what we need */ - cd.cd_length = SHA512_DIGEST_LEN; + cd.cd_length = SHA512_DIGEST_LENGTH; cd.cd_raw.iov_base = (char *)raw_portable_mac; cd.cd_raw.iov_len = cd.cd_length; @@ -1284,7 +1118,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen, * objects are not present, the local MAC is zeroed out. */ if (osp->os_userused_dnode.dn_type == DMU_OT_NONE && - osp->os_userused_dnode.dn_type == DMU_OT_NONE) { + osp->os_groupused_dnode.dn_type == DMU_OT_NONE) { bzero(local_mac, ZIO_OBJSET_MAC_LEN); return (0); } @@ -1326,7 +1160,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen, goto error; /* store the final digest in a temporary buffer and copy what we need */ - cd.cd_length = SHA512_DIGEST_LEN; + cd.cd_length = SHA512_DIGEST_LENGTH; cd.cd_raw.iov_base = (char *)raw_local_mac; cd.cd_raw.iov_len = cd.cd_length; @@ -1367,7 +1201,7 @@ zio_crypt_do_indirect_mac_checksum(boolean_t generate, void *buf, blkptr_t *bp; int i, epb = datalen >> SPA_BLKPTRSHIFT; SHA2_CTX ctx; - uint8_t digestbuf[SHA512_DIGEST_LEN]; + uint8_t digestbuf[SHA512_DIGEST_LENGTH]; /* checksum all of the MACs from the layer below */ SHA2Init(SHA512, &ctx); @@ -1468,7 +1302,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, /* allocate the iovec arrays */ if (nr_src != 0) { src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP); - if (!src_iovecs) { + if (src_iovecs == NULL) { ret = SET_ERROR(ENOMEM); goto error; } @@ -1476,7 +1310,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, if (nr_dst != 0) { dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP); - if (!dst_iovecs) { + if (dst_iovecs == NULL) { ret = SET_ERROR(ENOMEM); goto error; } @@ -1515,6 +1349,9 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, aadp += sizeof (lr_t); aad_len += sizeof (lr_t); + ASSERT3P(src_iovecs, !=, NULL); + ASSERT3P(dst_iovecs, !=, NULL); + /* * If this is a TX_WRITE record we want to encrypt everything * except the bp if exists. If the bp does exist we want to @@ -1655,7 +1492,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint8_t *plainbuf, if (nr_src != 0) { src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP); - if (!src_iovecs) { + if (src_iovecs == NULL) { ret = SET_ERROR(ENOMEM); goto error; } @@ -1663,7 +1500,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint8_t *plainbuf, if (nr_dst != 0) { dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP); - if (!dst_iovecs) { + if (dst_iovecs == NULL) { ret = SET_ERROR(ENOMEM); goto error; } @@ -1729,6 +1566,10 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint8_t *plainbuf, if (dnp->dn_type != DMU_OT_NONE && DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) && dnp->dn_bonuslen != 0) { + ASSERT3U(nr_iovecs, <, nr_src); + ASSERT3U(nr_iovecs, <, nr_dst); + ASSERT3P(src_iovecs, !=, NULL); + ASSERT3P(dst_iovecs, !=, NULL); src_iovecs[nr_iovecs].iov_base = DN_BONUS(dnp); src_iovecs[nr_iovecs].iov_len = crypt_len; dst_iovecs[nr_iovecs].iov_base = DN_BONUS(&ddnp[i]); @@ -1942,7 +1783,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key, uint8_t *salt, tmp_ckey.ck_format = CRYPTO_KEY_RAW; tmp_ckey.ck_data = enc_keydata; - tmp_ckey.ck_length = BYTES_TO_BITS(keydata_len); + tmp_ckey.ck_length = CRYPTO_BYTES2BITS(keydata_len); ckey = &tmp_ckey; tmpl = NULL; |