summaryrefslogtreecommitdiffstats
path: root/module/icp/io
diff options
context:
space:
mode:
authorTom Caputi <[email protected]>2016-05-12 10:51:24 -0400
committerBrian Behlendorf <[email protected]>2016-07-20 10:43:30 -0700
commit0b04990a5de594659d2cf20458965277dd6efeb1 (patch)
tree74369a3236e03359f7276cb9b19687e28c7f6d59 /module/icp/io
parentbe88e733a634ad0d7f20350e1a17ede51922d3ff (diff)
Illumos Crypto Port module added to enable native encryption in zfs
A port of the Illumos Crypto Framework to a Linux kernel module (found in module/icp). This is needed to do the actual encryption work. We cannot use the Linux kernel's built in crypto api because it is only exported to GPL-licensed modules. Having the ICP also means the crypto code can run on any of the other kernels under OpenZFS. I ended up porting over most of the internals of the framework, which means that porting over other API calls (if we need them) should be fairly easy. Specifically, I have ported over the API functions related to encryption, digests, macs, and crypto templates. The ICP is able to use assembly-accelerated encryption on amd64 machines and AES-NI instructions on Intel chips that support it. There are place-holder directories for similar assembly optimizations for other architectures (although they have not been written). Signed-off-by: Tom Caputi <[email protected]> Signed-off-by: Tony Hutter <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #4329
Diffstat (limited to 'module/icp/io')
-rw-r--r--module/icp/io/aes.c1437
-rw-r--r--module/icp/io/sha1_mod.c1239
-rw-r--r--module/icp/io/sha2_mod.c1307
3 files changed, 3983 insertions, 0 deletions
diff --git a/module/icp/io/aes.c b/module/icp/io/aes.c
new file mode 100644
index 000000000..ada697eb6
--- /dev/null
+++ b/module/icp/io/aes.c
@@ -0,0 +1,1437 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * AES provider for the Kernel Cryptographic Framework (KCF)
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/icp.h>
+#include <modes/modes.h>
+#include <sys/modctl.h>
+#define _AES_IMPL
+#include <aes/aes_impl.h>
+
+#define CRYPTO_PROVIDER_NAME "aes"
+
+extern struct mod_ops mod_cryptoops;
+
+/*
+ * Module linkage information for the kernel.
+ */
+static struct modlcrypto modlcrypto = {
+ &mod_cryptoops,
+ "AES Kernel SW Provider"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, { (void *)&modlcrypto, NULL }
+};
+
+/*
+ * Mechanism info structure passed to KCF during registration.
+ */
+static crypto_mech_info_t aes_mech_info_tab[] = {
+ /* AES_ECB */
+ {SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_CBC */
+ {SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_CTR */
+ {SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_CCM */
+ {SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_GCM */
+ {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_GMAC */
+ {SUN_CKM_AES_GMAC, AES_GMAC_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC |
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC |
+ CRYPTO_FG_SIGN | CRYPTO_FG_SIGN_ATOMIC |
+ CRYPTO_FG_VERIFY | CRYPTO_FG_VERIFY_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
+};
+
+/* operations are in-place if the output buffer is NULL */
+#define AES_ARG_INPLACE(input, output) \
+ if ((output) == NULL) \
+ (output) = (input);
+
+static void aes_provider_status(crypto_provider_handle_t, uint_t *);
+
+static crypto_control_ops_t aes_control_ops = {
+ aes_provider_status
+};
+
+static int aes_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int aes_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t, boolean_t);
+static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *,
+ crypto_mechanism_t *, crypto_key_t *, int, boolean_t);
+static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+
+static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+static int aes_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_cipher_ops_t aes_cipher_ops = {
+ aes_encrypt_init,
+ aes_encrypt,
+ aes_encrypt_update,
+ aes_encrypt_final,
+ aes_encrypt_atomic,
+ aes_decrypt_init,
+ aes_decrypt,
+ aes_decrypt_update,
+ aes_decrypt_final,
+ aes_decrypt_atomic
+};
+
+static int aes_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int aes_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_mac_ops_t aes_mac_ops = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ aes_mac_atomic,
+ aes_mac_verify_atomic
+};
+
+static int aes_create_ctx_template(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
+ size_t *, crypto_req_handle_t);
+static int aes_free_context(crypto_ctx_t *);
+
+static crypto_ctx_ops_t aes_ctx_ops = {
+ aes_create_ctx_template,
+ aes_free_context
+};
+
+static crypto_ops_t aes_crypto_ops = {{{{{
+ &aes_control_ops,
+ NULL,
+ &aes_cipher_ops,
+ &aes_mac_ops,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ &aes_ctx_ops
+}}}}};
+
+static crypto_provider_info_t aes_prov_info = {{{{
+ CRYPTO_SPI_VERSION_1,
+ "AES Software Provider",
+ CRYPTO_SW_PROVIDER,
+ NULL,
+ &aes_crypto_ops,
+ sizeof (aes_mech_info_tab)/sizeof (crypto_mech_info_t),
+ aes_mech_info_tab
+}}}};
+
+static crypto_kcf_provider_handle_t aes_prov_handle = 0;
+static crypto_data_t null_crypto_data = { CRYPTO_DATA_RAW };
+
+int
+aes_mod_init(void)
+{
+ int ret;
+
+ if ((ret = mod_install(&modlinkage)) != 0)
+ return (ret);
+
+ /* Register with KCF. If the registration fails, remove the module. */
+ if (crypto_register_provider(&aes_prov_info, &aes_prov_handle)) {
+ (void) mod_remove(&modlinkage);
+ return (EACCES);
+ }
+
+ return (0);
+}
+
+int
+aes_mod_fini(void)
+{
+ /* Unregister from KCF if module is registered */
+ if (aes_prov_handle != 0) {
+ if (crypto_unregister_provider(aes_prov_handle))
+ return (EBUSY);
+
+ aes_prov_handle = 0;
+ }
+
+ return (mod_remove(&modlinkage));
+}
+
+static int
+aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx, int kmflag)
+{
+ void *p = NULL;
+ boolean_t param_required = B_TRUE;
+ size_t param_len;
+ void *(*alloc_fun)(int);
+ int rv = CRYPTO_SUCCESS;
+
+ switch (mechanism->cm_type) {
+ case AES_ECB_MECH_INFO_TYPE:
+ param_required = B_FALSE;
+ alloc_fun = ecb_alloc_ctx;
+ break;
+ case AES_CBC_MECH_INFO_TYPE:
+ param_len = AES_BLOCK_LEN;
+ alloc_fun = cbc_alloc_ctx;
+ break;
+ case AES_CTR_MECH_INFO_TYPE:
+ param_len = sizeof (CK_AES_CTR_PARAMS);
+ alloc_fun = ctr_alloc_ctx;
+ break;
+ case AES_CCM_MECH_INFO_TYPE:
+ param_len = sizeof (CK_AES_CCM_PARAMS);
+ alloc_fun = ccm_alloc_ctx;
+ break;
+ case AES_GCM_MECH_INFO_TYPE:
+ param_len = sizeof (CK_AES_GCM_PARAMS);
+ alloc_fun = gcm_alloc_ctx;
+ break;
+ case AES_GMAC_MECH_INFO_TYPE:
+ param_len = sizeof (CK_AES_GMAC_PARAMS);
+ alloc_fun = gmac_alloc_ctx;
+ break;
+ default:
+ rv = CRYPTO_MECHANISM_INVALID;
+ return (rv);
+ }
+ if (param_required && mechanism->cm_param != NULL &&
+ mechanism->cm_param_len != param_len) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+ if (ctx != NULL) {
+ p = (alloc_fun)(kmflag);
+ *ctx = p;
+ }
+ return (rv);
+}
+
+/*
+ * Initialize key schedules for AES
+ */
+static int
+init_keysched(crypto_key_t *key, void *newbie)
+{
+ /*
+ * Only keys by value are supported by this module.
+ */
+ switch (key->ck_format) {
+ case CRYPTO_KEY_RAW:
+ if (key->ck_length < AES_MINBITS ||
+ key->ck_length > AES_MAXBITS) {
+ return (CRYPTO_KEY_SIZE_RANGE);
+ }
+
+ /* key length must be either 128, 192, or 256 */
+ if ((key->ck_length & 63) != 0)
+ return (CRYPTO_KEY_SIZE_RANGE);
+ break;
+ default:
+ return (CRYPTO_KEY_TYPE_INCONSISTENT);
+ }
+
+ aes_init_keysched(key->ck_data, key->ck_length, newbie);
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * KCF software provider control entry points.
+ */
+/* ARGSUSED */
+static void
+aes_provider_status(crypto_provider_handle_t provider, uint_t *status)
+{
+ *status = CRYPTO_PROVIDER_READY;
+}
+
+static int
+aes_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t template,
+ crypto_req_handle_t req) {
+ return (aes_common_init(ctx, mechanism, key, template, req, B_TRUE));
+}
+
+static int
+aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t template,
+ crypto_req_handle_t req) {
+ return (aes_common_init(ctx, mechanism, key, template, req, B_FALSE));
+}
+
+
+
+/*
+ * KCF software provider encrypt entry points.
+ */
+static int
+aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t template,
+ crypto_req_handle_t req, boolean_t is_encrypt_init)
+{
+ aes_ctx_t *aes_ctx;
+ int rv;
+ int kmflag;
+
+ /*
+ * Only keys by value are supported by this module.
+ */
+ if (key->ck_format != CRYPTO_KEY_RAW) {
+ return (CRYPTO_KEY_TYPE_INCONSISTENT);
+ }
+
+ kmflag = crypto_kmflag(req);
+ if ((rv = aes_check_mech_param(mechanism, &aes_ctx, kmflag))
+ != CRYPTO_SUCCESS)
+ return (rv);
+
+ rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, kmflag,
+ is_encrypt_init);
+ if (rv != CRYPTO_SUCCESS) {
+ crypto_free_mode_ctx(aes_ctx);
+ return (rv);
+ }
+
+ ctx->cc_provider_private = aes_ctx;
+
+ return (CRYPTO_SUCCESS);
+}
+
+static void
+aes_copy_block64(uint8_t *in, uint64_t *out)
+{
+ if (IS_P2ALIGNED(in, sizeof (uint64_t))) {
+ /* LINTED: pointer alignment */
+ out[0] = *(uint64_t *)&in[0];
+ /* LINTED: pointer alignment */
+ out[1] = *(uint64_t *)&in[8];
+ } else {
+ uint8_t *iv8 = (uint8_t *)&out[0];
+
+ AES_COPY_BLOCK(in, iv8);
+ }
+}
+
+
+static int
+aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
+ crypto_data_t *ciphertext, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_FAILED;
+
+ aes_ctx_t *aes_ctx;
+ size_t saved_length, saved_offset, length_needed;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ /*
+ * For block ciphers, plaintext must be a multiple of AES block size.
+ * This test is only valid for ciphers whose blocksize is a power of 2.
+ */
+ if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
+ == 0) && (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ AES_ARG_INPLACE(plaintext, ciphertext);
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following case.
+ */
+ switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
+ case CCM_MODE:
+ length_needed = plaintext->cd_length + aes_ctx->ac_mac_len;
+ break;
+ case GCM_MODE:
+ length_needed = plaintext->cd_length + aes_ctx->ac_tag_len;
+ break;
+ case GMAC_MODE:
+ if (plaintext->cd_length != 0)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ length_needed = aes_ctx->ac_tag_len;
+ break;
+ default:
+ length_needed = plaintext->cd_length;
+ }
+
+ if (ciphertext->cd_length < length_needed) {
+ ciphertext->cd_length = length_needed;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ saved_length = ciphertext->cd_length;
+ saved_offset = ciphertext->cd_offset;
+
+ /*
+ * Do an update on the specified input data.
+ */
+ ret = aes_encrypt_update(ctx, plaintext, ciphertext, req);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+
+ /*
+ * For CCM mode, aes_ccm_encrypt_final() will take care of any
+ * left-over unprocessed data, and compute the MAC
+ */
+ if (aes_ctx->ac_flags & CCM_MODE) {
+ /*
+ * ccm_encrypt_final() will compute the MAC and append
+ * it to existing ciphertext. So, need to adjust the left over
+ * length value accordingly
+ */
+
+ /* order of following 2 lines MUST not be reversed */
+ ciphertext->cd_offset = ciphertext->cd_length;
+ ciphertext->cd_length = saved_length - ciphertext->cd_length;
+ ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, ciphertext,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+
+ if (plaintext != ciphertext) {
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ }
+ ciphertext->cd_offset = saved_offset;
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ /*
+ * gcm_encrypt_final() will compute the MAC and append
+ * it to existing ciphertext. So, need to adjust the left over
+ * length value accordingly
+ */
+
+ /* order of following 2 lines MUST not be reversed */
+ ciphertext->cd_offset = ciphertext->cd_length;
+ ciphertext->cd_length = saved_length - ciphertext->cd_length;
+ ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, ciphertext,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+
+ if (plaintext != ciphertext) {
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ }
+ ciphertext->cd_offset = saved_offset;
+ }
+
+ ASSERT(aes_ctx->ac_remainder_len == 0);
+ (void) aes_free_context(ctx);
+
+ return (ret);
+}
+
+
+static int
+aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
+ crypto_data_t *plaintext, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_FAILED;
+
+ aes_ctx_t *aes_ctx;
+ off_t saved_offset;
+ size_t saved_length, length_needed;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ /*
+ * For block ciphers, plaintext must be a multiple of AES block size.
+ * This test is only valid for ciphers whose blocksize is a power of 2.
+ */
+ if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
+ == 0) && (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) {
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ }
+
+ AES_ARG_INPLACE(ciphertext, plaintext);
+
+ /*
+ * Return length needed to store the output.
+ * Do not destroy context when plaintext buffer is too small.
+ *
+ * CCM: plaintext is MAC len smaller than cipher text
+ * GCM: plaintext is TAG len smaller than cipher text
+ * GMAC: plaintext length must be zero
+ */
+ switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
+ case CCM_MODE:
+ length_needed = aes_ctx->ac_processed_data_len;
+ break;
+ case GCM_MODE:
+ length_needed = ciphertext->cd_length - aes_ctx->ac_tag_len;
+ break;
+ case GMAC_MODE:
+ if (plaintext->cd_length != 0)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ length_needed = 0;
+ break;
+ default:
+ length_needed = ciphertext->cd_length;
+ }
+
+ if (plaintext->cd_length < length_needed) {
+ plaintext->cd_length = length_needed;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ saved_offset = plaintext->cd_offset;
+ saved_length = plaintext->cd_length;
+
+ /*
+ * Do an update on the specified input data.
+ */
+ ret = aes_decrypt_update(ctx, ciphertext, plaintext, req);
+ if (ret != CRYPTO_SUCCESS) {
+ goto cleanup;
+ }
+
+ if (aes_ctx->ac_flags & CCM_MODE) {
+ ASSERT(aes_ctx->ac_processed_data_len == aes_ctx->ac_data_len);
+ ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
+
+ /* order of following 2 lines MUST not be reversed */
+ plaintext->cd_offset = plaintext->cd_length;
+ plaintext->cd_length = saved_length - plaintext->cd_length;
+
+ ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, plaintext,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ if (ret == CRYPTO_SUCCESS) {
+ if (plaintext != ciphertext) {
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ }
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+
+ plaintext->cd_offset = saved_offset;
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ /* order of following 2 lines MUST not be reversed */
+ plaintext->cd_offset = plaintext->cd_length;
+ plaintext->cd_length = saved_length - plaintext->cd_length;
+
+ ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, plaintext,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (ret == CRYPTO_SUCCESS) {
+ if (plaintext != ciphertext) {
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ }
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+
+ plaintext->cd_offset = saved_offset;
+ }
+
+ ASSERT(aes_ctx->ac_remainder_len == 0);
+
+cleanup:
+ (void) aes_free_context(ctx);
+
+ return (ret);
+}
+
+
+/* ARGSUSED */
+static int
+aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
+ crypto_data_t *ciphertext, crypto_req_handle_t req)
+{
+ off_t saved_offset;
+ size_t saved_length, out_len;
+ int ret = CRYPTO_SUCCESS;
+ aes_ctx_t *aes_ctx;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ AES_ARG_INPLACE(plaintext, ciphertext);
+
+ /* compute number of bytes that will hold the ciphertext */
+ out_len = aes_ctx->ac_remainder_len;
+ out_len += plaintext->cd_length;
+ out_len &= ~(AES_BLOCK_LEN - 1);
+
+ /* return length needed to store the output */
+ if (ciphertext->cd_length < out_len) {
+ ciphertext->cd_length = out_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ saved_offset = ciphertext->cd_offset;
+ saved_length = ciphertext->cd_length;
+
+ /*
+ * Do the AES update on the specified input data.
+ */
+ switch (plaintext->cd_format) {
+ case CRYPTO_DATA_RAW:
+ ret = crypto_update_iov(ctx->cc_provider_private,
+ plaintext, ciphertext, aes_encrypt_contiguous_blocks,
+ aes_copy_block64);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = crypto_update_uio(ctx->cc_provider_private,
+ plaintext, ciphertext, aes_encrypt_contiguous_blocks,
+ aes_copy_block64);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /*
+ * Since AES counter mode is a stream cipher, we call
+ * ctr_mode_final() to pick up any remaining bytes.
+ * It is an internal function that does not destroy
+ * the context like *normal* final routines.
+ */
+ if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
+ ret = ctr_mode_final((ctr_ctx_t *)aes_ctx,
+ ciphertext, aes_encrypt_block);
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ if (plaintext != ciphertext)
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ } else {
+ ciphertext->cd_length = saved_length;
+ }
+ ciphertext->cd_offset = saved_offset;
+
+ return (ret);
+}
+
+
+static int
+aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
+ crypto_data_t *plaintext, crypto_req_handle_t req)
+{
+ off_t saved_offset;
+ size_t saved_length, out_len;
+ int ret = CRYPTO_SUCCESS;
+ aes_ctx_t *aes_ctx;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ AES_ARG_INPLACE(ciphertext, plaintext);
+
+ /*
+ * Compute number of bytes that will hold the plaintext.
+ * This is not necessary for CCM, GCM, and GMAC since these
+ * mechanisms never return plaintext for update operations.
+ */
+ if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
+ out_len = aes_ctx->ac_remainder_len;
+ out_len += ciphertext->cd_length;
+ out_len &= ~(AES_BLOCK_LEN - 1);
+
+ /* return length needed to store the output */
+ if (plaintext->cd_length < out_len) {
+ plaintext->cd_length = out_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+ }
+
+ saved_offset = plaintext->cd_offset;
+ saved_length = plaintext->cd_length;
+
+ if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE))
+ gcm_set_kmflag((gcm_ctx_t *)aes_ctx, crypto_kmflag(req));
+
+ /*
+ * Do the AES update on the specified input data.
+ */
+ switch (ciphertext->cd_format) {
+ case CRYPTO_DATA_RAW:
+ ret = crypto_update_iov(ctx->cc_provider_private,
+ ciphertext, plaintext, aes_decrypt_contiguous_blocks,
+ aes_copy_block64);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = crypto_update_uio(ctx->cc_provider_private,
+ ciphertext, plaintext, aes_decrypt_contiguous_blocks,
+ aes_copy_block64);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /*
+ * Since AES counter mode is a stream cipher, we call
+ * ctr_mode_final() to pick up any remaining bytes.
+ * It is an internal function that does not destroy
+ * the context like *normal* final routines.
+ */
+ if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
+ ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, plaintext,
+ aes_encrypt_block);
+ if (ret == CRYPTO_DATA_LEN_RANGE)
+ ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ if (ciphertext != plaintext)
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+ plaintext->cd_offset = saved_offset;
+
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ aes_ctx_t *aes_ctx;
+ int ret;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ if (data->cd_format != CRYPTO_DATA_RAW &&
+ data->cd_format != CRYPTO_DATA_UIO) {
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
+ if (aes_ctx->ac_flags & CTR_MODE) {
+ if (aes_ctx->ac_remainder_len > 0) {
+ ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
+ aes_encrypt_block);
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+ }
+ } else if (aes_ctx->ac_flags & CCM_MODE) {
+ ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, data,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ size_t saved_offset = data->cd_offset;
+
+ ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, data,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+ data->cd_length = data->cd_offset - saved_offset;
+ data->cd_offset = saved_offset;
+ } else {
+ /*
+ * There must be no unprocessed plaintext.
+ * This happens if the length of the last data is
+ * not a multiple of the AES block length.
+ */
+ if (aes_ctx->ac_remainder_len > 0) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ data->cd_length = 0;
+ }
+
+ (void) aes_free_context(ctx);
+
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ aes_ctx_t *aes_ctx;
+ int ret;
+ off_t saved_offset;
+ size_t saved_length;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ if (data->cd_format != CRYPTO_DATA_RAW &&
+ data->cd_format != CRYPTO_DATA_UIO) {
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
+ /*
+ * There must be no unprocessed ciphertext.
+ * This happens if the length of the last ciphertext is
+ * not a multiple of the AES block length.
+ */
+ if (aes_ctx->ac_remainder_len > 0) {
+ if ((aes_ctx->ac_flags & CTR_MODE) == 0)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ else {
+ ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
+ aes_encrypt_block);
+ if (ret == CRYPTO_DATA_LEN_RANGE)
+ ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+ }
+ }
+
+ if (aes_ctx->ac_flags & CCM_MODE) {
+ /*
+ * This is where all the plaintext is returned, make sure
+ * the plaintext buffer is big enough
+ */
+ size_t pt_len = aes_ctx->ac_data_len;
+ if (data->cd_length < pt_len) {
+ data->cd_length = pt_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ ASSERT(aes_ctx->ac_processed_data_len == pt_len);
+ ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
+ saved_offset = data->cd_offset;
+ saved_length = data->cd_length;
+ ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, data,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ if (ret == CRYPTO_SUCCESS) {
+ data->cd_length = data->cd_offset - saved_offset;
+ } else {
+ data->cd_length = saved_length;
+ }
+
+ data->cd_offset = saved_offset;
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ /*
+ * This is where all the plaintext is returned, make sure
+ * the plaintext buffer is big enough
+ */
+ gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx;
+ size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
+
+ if (data->cd_length < pt_len) {
+ data->cd_length = pt_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ saved_offset = data->cd_offset;
+ saved_length = data->cd_length;
+ ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, data,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (ret == CRYPTO_SUCCESS) {
+ data->cd_length = data->cd_offset - saved_offset;
+ } else {
+ data->cd_length = saved_length;
+ }
+
+ data->cd_offset = saved_offset;
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+ }
+
+
+ if ((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
+ data->cd_length = 0;
+ }
+
+ (void) aes_free_context(ctx);
+
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+aes_encrypt_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
+ crypto_spi_ctx_template_t template, crypto_req_handle_t req)
+{
+ aes_ctx_t aes_ctx; /* on the stack */
+ off_t saved_offset;
+ size_t saved_length;
+ size_t length_needed;
+ int ret;
+
+ AES_ARG_INPLACE(plaintext, ciphertext);
+
+ /*
+ * CTR, CCM, GCM, and GMAC modes do not require that plaintext
+ * be a multiple of AES block size.
+ */
+ switch (mechanism->cm_type) {
+ case AES_CTR_MECH_INFO_TYPE:
+ case AES_CCM_MECH_INFO_TYPE:
+ case AES_GCM_MECH_INFO_TYPE:
+ case AES_GMAC_MECH_INFO_TYPE:
+ break;
+ default:
+ if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
+ return (ret);
+
+ bzero(&aes_ctx, sizeof (aes_ctx_t));
+
+ ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
+ crypto_kmflag(req), B_TRUE);
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+
+ switch (mechanism->cm_type) {
+ case AES_CCM_MECH_INFO_TYPE:
+ length_needed = plaintext->cd_length + aes_ctx.ac_mac_len;
+ break;
+ case AES_GMAC_MECH_INFO_TYPE:
+ if (plaintext->cd_length != 0)
+ return (CRYPTO_ARGUMENTS_BAD);
+ /* FALLTHRU */
+ case AES_GCM_MECH_INFO_TYPE:
+ length_needed = plaintext->cd_length + aes_ctx.ac_tag_len;
+ break;
+ default:
+ length_needed = plaintext->cd_length;
+ }
+
+ /* return size of buffer needed to store output */
+ if (ciphertext->cd_length < length_needed) {
+ ciphertext->cd_length = length_needed;
+ ret = CRYPTO_BUFFER_TOO_SMALL;
+ goto out;
+ }
+
+ saved_offset = ciphertext->cd_offset;
+ saved_length = ciphertext->cd_length;
+
+ /*
+ * Do an update on the specified input data.
+ */
+ switch (plaintext->cd_format) {
+ case CRYPTO_DATA_RAW:
+ ret = crypto_update_iov(&aes_ctx, plaintext, ciphertext,
+ aes_encrypt_contiguous_blocks, aes_copy_block64);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = crypto_update_uio(&aes_ctx, plaintext, ciphertext,
+ aes_encrypt_contiguous_blocks, aes_copy_block64);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
+ ret = ccm_encrypt_final((ccm_ctx_t *)&aes_ctx,
+ ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_xor_block);
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
+ mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
+ ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx,
+ ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_copy_block, aes_xor_block);
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
+ if (aes_ctx.ac_remainder_len > 0) {
+ ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
+ ciphertext, aes_encrypt_block);
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ }
+ } else {
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ }
+
+ if (plaintext != ciphertext) {
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ }
+ } else {
+ ciphertext->cd_length = saved_length;
+ }
+ ciphertext->cd_offset = saved_offset;
+
+out:
+ if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
+ bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+aes_decrypt_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
+ crypto_spi_ctx_template_t template, crypto_req_handle_t req)
+{
+ aes_ctx_t aes_ctx; /* on the stack */
+ off_t saved_offset;
+ size_t saved_length;
+ size_t length_needed;
+ int ret;
+
+ AES_ARG_INPLACE(ciphertext, plaintext);
+
+ /*
+ * CCM, GCM, CTR, and GMAC modes do not require that ciphertext
+ * be a multiple of AES block size.
+ */
+ switch (mechanism->cm_type) {
+ case AES_CTR_MECH_INFO_TYPE:
+ case AES_CCM_MECH_INFO_TYPE:
+ case AES_GCM_MECH_INFO_TYPE:
+ case AES_GMAC_MECH_INFO_TYPE:
+ break;
+ default:
+ if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ }
+
+ if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
+ return (ret);
+
+ bzero(&aes_ctx, sizeof (aes_ctx_t));
+
+ ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
+ crypto_kmflag(req), B_FALSE);
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+
+ switch (mechanism->cm_type) {
+ case AES_CCM_MECH_INFO_TYPE:
+ length_needed = aes_ctx.ac_data_len;
+ break;
+ case AES_GCM_MECH_INFO_TYPE:
+ length_needed = ciphertext->cd_length - aes_ctx.ac_tag_len;
+ break;
+ case AES_GMAC_MECH_INFO_TYPE:
+ if (plaintext->cd_length != 0)
+ return (CRYPTO_ARGUMENTS_BAD);
+ length_needed = 0;
+ break;
+ default:
+ length_needed = ciphertext->cd_length;
+ }
+
+ /* return size of buffer needed to store output */
+ if (plaintext->cd_length < length_needed) {
+ plaintext->cd_length = length_needed;
+ ret = CRYPTO_BUFFER_TOO_SMALL;
+ goto out;
+ }
+
+ saved_offset = plaintext->cd_offset;
+ saved_length = plaintext->cd_length;
+
+ if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
+ mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE)
+ gcm_set_kmflag((gcm_ctx_t *)&aes_ctx, crypto_kmflag(req));
+
+ /*
+ * Do an update on the specified input data.
+ */
+ switch (ciphertext->cd_format) {
+ case CRYPTO_DATA_RAW:
+ ret = crypto_update_iov(&aes_ctx, ciphertext, plaintext,
+ aes_decrypt_contiguous_blocks, aes_copy_block64);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = crypto_update_uio(&aes_ctx, ciphertext, plaintext,
+ aes_decrypt_contiguous_blocks, aes_copy_block64);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
+ ASSERT(aes_ctx.ac_processed_data_len
+ == aes_ctx.ac_data_len);
+ ASSERT(aes_ctx.ac_processed_mac_len
+ == aes_ctx.ac_mac_len);
+ ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx,
+ plaintext, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_copy_block, aes_xor_block);
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ if ((ret == CRYPTO_SUCCESS) &&
+ (ciphertext != plaintext)) {
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+ } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
+ mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
+ ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx,
+ plaintext, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_xor_block);
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ if ((ret == CRYPTO_SUCCESS) &&
+ (ciphertext != plaintext)) {
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+ } else if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ if (ciphertext != plaintext)
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ } else {
+ if (aes_ctx.ac_remainder_len > 0) {
+ ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
+ plaintext, aes_encrypt_block);
+ if (ret == CRYPTO_DATA_LEN_RANGE)
+ ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ }
+ if (ciphertext != plaintext)
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ }
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+ plaintext->cd_offset = saved_offset;
+
+out:
+ if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
+ bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ }
+
+ if (aes_ctx.ac_flags & CCM_MODE) {
+ if (aes_ctx.ac_pt_buf != NULL) {
+ vmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len);
+ }
+ } else if (aes_ctx.ac_flags & (GCM_MODE|GMAC_MODE)) {
+ if (((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf != NULL) {
+ vmem_free(((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf,
+ ((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf_len);
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * KCF software provider context template entry points.
+ */
+/* ARGSUSED */
+static int
+aes_create_ctx_template(crypto_provider_handle_t provider,
+ crypto_mechanism_t *mechanism, crypto_key_t *key,
+ crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size, crypto_req_handle_t req)
+{
+ void *keysched;
+ size_t size;
+ int rv;
+
+ if (mechanism->cm_type != AES_ECB_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_CBC_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_CTR_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_CCM_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_GCM_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_GMAC_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ if ((keysched = aes_alloc_keysched(&size,
+ crypto_kmflag(req))) == NULL) {
+ return (CRYPTO_HOST_MEMORY);
+ }
+
+ /*
+ * Initialize key schedule. Key length information is stored
+ * in the key.
+ */
+ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
+ bzero(keysched, size);
+ kmem_free(keysched, size);
+ return (rv);
+ }
+
+ *tmpl = keysched;
+ *tmpl_size = size;
+
+ return (CRYPTO_SUCCESS);
+}
+
+
+static int
+aes_free_context(crypto_ctx_t *ctx)
+{
+ aes_ctx_t *aes_ctx = ctx->cc_provider_private;
+
+ if (aes_ctx != NULL) {
+ if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
+ ASSERT(aes_ctx->ac_keysched_len != 0);
+ bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len);
+ kmem_free(aes_ctx->ac_keysched,
+ aes_ctx->ac_keysched_len);
+ }
+ crypto_free_mode_ctx(aes_ctx);
+ ctx->cc_provider_private = NULL;
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+
+static int
+aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
+ crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag,
+ boolean_t is_encrypt_init)
+{
+ int rv = CRYPTO_SUCCESS;
+ void *keysched;
+ size_t size;
+
+ if (template == NULL) {
+ if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL)
+ return (CRYPTO_HOST_MEMORY);
+ /*
+ * Initialize key schedule.
+ * Key length is stored in the key.
+ */
+ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
+ kmem_free(keysched, size);
+ return (rv);
+ }
+
+ aes_ctx->ac_flags |= PROVIDER_OWNS_KEY_SCHEDULE;
+ aes_ctx->ac_keysched_len = size;
+ } else {
+ keysched = template;
+ }
+ aes_ctx->ac_keysched = keysched;
+
+ switch (mechanism->cm_type) {
+ case AES_CBC_MECH_INFO_TYPE:
+ rv = cbc_init_ctx((cbc_ctx_t *)aes_ctx, mechanism->cm_param,
+ mechanism->cm_param_len, AES_BLOCK_LEN, aes_copy_block64);
+ break;
+ case AES_CTR_MECH_INFO_TYPE: {
+ CK_AES_CTR_PARAMS *pp;
+
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (CK_AES_CTR_PARAMS)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ pp = (CK_AES_CTR_PARAMS *)(void *)mechanism->cm_param;
+ rv = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
+ pp->cb, aes_copy_block);
+ break;
+ }
+ case AES_CCM_MECH_INFO_TYPE:
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (CK_AES_CCM_PARAMS)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ rv = ccm_init_ctx((ccm_ctx_t *)aes_ctx, mechanism->cm_param,
+ kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_xor_block);
+ break;
+ case AES_GCM_MECH_INFO_TYPE:
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ break;
+ case AES_GMAC_MECH_INFO_TYPE:
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (CK_AES_GMAC_PARAMS)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ rv = gmac_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ break;
+ case AES_ECB_MECH_INFO_TYPE:
+ aes_ctx->ac_flags |= ECB_MODE;
+ }
+
+ if (rv != CRYPTO_SUCCESS) {
+ if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
+ bzero(keysched, size);
+ kmem_free(keysched, size);
+ }
+ }
+
+ return (rv);
+}
+
+static int
+process_gmac_mech(crypto_mechanism_t *mech, crypto_data_t *data,
+ CK_AES_GCM_PARAMS *gcm_params)
+{
+ /* LINTED: pointer alignment */
+ CK_AES_GMAC_PARAMS *params = (CK_AES_GMAC_PARAMS *)mech->cm_param;
+
+ if (mech->cm_type != AES_GMAC_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ if (mech->cm_param_len != sizeof (CK_AES_GMAC_PARAMS))
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+
+ if (params->pIv == NULL)
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+
+ gcm_params->pIv = params->pIv;
+ gcm_params->ulIvLen = AES_GMAC_IV_LEN;
+ gcm_params->ulTagBits = AES_GMAC_TAG_BITS;
+
+ if (data == NULL)
+ return (CRYPTO_SUCCESS);
+
+ if (data->cd_format != CRYPTO_DATA_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ gcm_params->pAAD = (uchar_t *)data->cd_raw.iov_base;
+ gcm_params->ulAADLen = data->cd_length;
+ return (CRYPTO_SUCCESS);
+}
+
+static int
+aes_mac_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t template, crypto_req_handle_t req)
+{
+ CK_AES_GCM_PARAMS gcm_params;
+ crypto_mechanism_t gcm_mech;
+ int rv;
+
+ if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
+ != CRYPTO_SUCCESS)
+ return (rv);
+
+ gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
+ gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
+ gcm_mech.cm_param = (char *)&gcm_params;
+
+ return (aes_encrypt_atomic(provider, session_id, &gcm_mech,
+ key, &null_crypto_data, mac, template, req));
+}
+
+static int
+aes_mac_verify_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t template, crypto_req_handle_t req)
+{
+ CK_AES_GCM_PARAMS gcm_params;
+ crypto_mechanism_t gcm_mech;
+ int rv;
+
+ if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
+ != CRYPTO_SUCCESS)
+ return (rv);
+
+ gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
+ gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
+ gcm_mech.cm_param = (char *)&gcm_params;
+
+ return (aes_decrypt_atomic(provider, session_id, &gcm_mech,
+ key, mac, &null_crypto_data, template, req));
+}
diff --git a/module/icp/io/sha1_mod.c b/module/icp/io/sha1_mod.c
new file mode 100644
index 000000000..a278dac7f
--- /dev/null
+++ b/module/icp/io/sha1_mod.c
@@ -0,0 +1,1239 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/modctl.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/spi.h>
+
+#include <sha1/sha1.h>
+#include <sha1/sha1_impl.h>
+
+/*
+ * The sha1 module is created with two modlinkages:
+ * - a modlmisc that allows consumers to directly call the entry points
+ * SHA1Init, SHA1Update, and SHA1Final.
+ * - a modlcrypto that allows the module to register with the Kernel
+ * Cryptographic Framework (KCF) as a software provider for the SHA1
+ * mechanisms.
+ */
+
+static struct modlcrypto modlcrypto = {
+ &mod_cryptoops,
+ "SHA1 Kernel SW Provider 1.1"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, { &modlcrypto, NULL }
+};
+
+
+/*
+ * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
+ * by KCF to one of the entry points.
+ */
+
+#define PROV_SHA1_CTX(ctx) ((sha1_ctx_t *)(ctx)->cc_provider_private)
+#define PROV_SHA1_HMAC_CTX(ctx) ((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
+
+/* to extract the digest length passed as mechanism parameter */
+#define PROV_SHA1_GET_DIGEST_LEN(m, len) { \
+ if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \
+ (len) = (uint32_t)*((ulong_t *)(void *)mechanism->cm_param); \
+ else { \
+ ulong_t tmp_ulong; \
+ bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \
+ (len) = (uint32_t)tmp_ulong; \
+ } \
+}
+
+#define PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) { \
+ SHA1Init(ctx); \
+ SHA1Update(ctx, key, len); \
+ SHA1Final(digest, ctx); \
+}
+
+/*
+ * Mechanism info structure passed to KCF during registration.
+ */
+static crypto_mech_info_t sha1_mech_info_tab[] = {
+ /* SHA1 */
+ {SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
+ CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
+ 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
+ /* SHA1-HMAC */
+ {SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
+ SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
+ CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* SHA1-HMAC GENERAL */
+ {SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
+ SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
+ CRYPTO_KEYSIZE_UNIT_IN_BYTES}
+};
+
+static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
+
+static crypto_control_ops_t sha1_control_ops = {
+ sha1_provider_status
+};
+
+static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_req_handle_t);
+static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+
+static crypto_digest_ops_t sha1_digest_ops = {
+ sha1_digest_init,
+ sha1_digest,
+ sha1_digest_update,
+ NULL,
+ sha1_digest_final,
+ sha1_digest_atomic
+};
+
+static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
+static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_mac_ops_t sha1_mac_ops = {
+ sha1_mac_init,
+ NULL,
+ sha1_mac_update,
+ sha1_mac_final,
+ sha1_mac_atomic,
+ sha1_mac_verify_atomic
+};
+
+static int sha1_create_ctx_template(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
+ size_t *, crypto_req_handle_t);
+static int sha1_free_context(crypto_ctx_t *);
+
+static crypto_ctx_ops_t sha1_ctx_ops = {
+ sha1_create_ctx_template,
+ sha1_free_context
+};
+
+static crypto_ops_t sha1_crypto_ops = {{{{{
+ &sha1_control_ops,
+ &sha1_digest_ops,
+ NULL,
+ &sha1_mac_ops,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ &sha1_ctx_ops,
+}}}}};
+
+static crypto_provider_info_t sha1_prov_info = {{{{
+ CRYPTO_SPI_VERSION_1,
+ "SHA1 Software Provider",
+ CRYPTO_SW_PROVIDER,
+ NULL,
+ &sha1_crypto_ops,
+ sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
+ sha1_mech_info_tab
+}}}};
+
+static crypto_kcf_provider_handle_t sha1_prov_handle = 0;
+
+int
+sha1_mod_init(void)
+{
+ int ret;
+
+ if ((ret = mod_install(&modlinkage)) != 0)
+ return (ret);
+
+ /*
+ * Register with KCF. If the registration fails, log an
+ * error but do not uninstall the module, since the functionality
+ * provided by misc/sha1 should still be available.
+ */
+ if ((ret = crypto_register_provider(&sha1_prov_info,
+ &sha1_prov_handle)) != CRYPTO_SUCCESS)
+ cmn_err(CE_WARN, "sha1 _init: "
+ "crypto_register_provider() failed (0x%x)", ret);
+
+ return (0);
+}
+
+int
+sha1_mod_fini(void)
+{
+ int ret;
+
+ if (sha1_prov_handle != 0) {
+ if ((ret = crypto_unregister_provider(sha1_prov_handle)) !=
+ CRYPTO_SUCCESS) {
+ cmn_err(CE_WARN,
+ "sha1 _fini: crypto_unregister_provider() "
+ "failed (0x%x)", ret);
+ return (EBUSY);
+ }
+ sha1_prov_handle = 0;
+ }
+
+ return (mod_remove(&modlinkage));
+}
+
+/*
+ * KCF software provider control entry points.
+ */
+/* ARGSUSED */
+static void
+sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
+{
+ *status = CRYPTO_PROVIDER_READY;
+}
+
+/*
+ * KCF software provider digest entry points.
+ */
+
+static int
+sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_req_handle_t req)
+{
+ if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /*
+ * Allocate and initialize SHA1 context.
+ */
+ ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
+ crypto_kmflag(req));
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
+ SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Helper SHA1 digest update function for uio data.
+ */
+static int
+sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
+{
+ off_t offset = data->cd_offset;
+ size_t length = data->cd_length;
+ uint_t vec_idx;
+ size_t cur_len;
+
+ /* we support only kernel buffer */
+ if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Jump to the first iovec containing data to be
+ * digested.
+ */
+ for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
+ offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
+ offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == data->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than the
+ * total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now do the digesting on the iovecs.
+ */
+ while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ SHA1Update(sha1_ctx,
+ (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len);
+
+ length -= cur_len;
+ vec_idx++;
+ offset = 0;
+ }
+
+ if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed, i.e.
+ * The caller requested to digest more data than it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Helper SHA1 digest final function for uio data.
+ * digest_len is the length of the desired digest. If digest_len
+ * is smaller than the default SHA1 digest length, the caller
+ * must pass a scratch buffer, digest_scratch, which must
+ * be at least SHA1_DIGEST_LENGTH bytes.
+ */
+static int
+sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
+ ulong_t digest_len, uchar_t *digest_scratch)
+{
+ off_t offset = digest->cd_offset;
+ uint_t vec_idx;
+
+ /* we support only kernel buffer */
+ if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Jump to the first iovec containing ptr to the digest to
+ * be returned.
+ */
+ for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
+ vec_idx < digest->cd_uio->uio_iovcnt;
+ offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == digest->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is
+ * larger than the total size of the buffers
+ * it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ if (offset + digest_len <=
+ digest->cd_uio->uio_iov[vec_idx].iov_len) {
+ /*
+ * The computed SHA1 digest will fit in the current
+ * iovec.
+ */
+ if (digest_len != SHA1_DIGEST_LENGTH) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA1Final(digest_scratch, sha1_ctx);
+ bcopy(digest_scratch, (uchar_t *)digest->
+ cd_uio->uio_iov[vec_idx].iov_base + offset,
+ digest_len);
+ } else {
+ SHA1Final((uchar_t *)digest->
+ cd_uio->uio_iov[vec_idx].iov_base + offset,
+ sha1_ctx);
+ }
+ } else {
+ /*
+ * The computed digest will be crossing one or more iovec's.
+ * This is bad performance-wise but we need to support it.
+ * Allocate a small scratch buffer on the stack and
+ * copy it piece meal to the specified digest iovec's.
+ */
+ uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
+ off_t scratch_offset = 0;
+ size_t length = digest_len;
+ size_t cur_len;
+
+ SHA1Final(digest_tmp, sha1_ctx);
+
+ while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+ bcopy(digest_tmp + scratch_offset,
+ digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len);
+
+ length -= cur_len;
+ vec_idx++;
+ scratch_offset += cur_len;
+ offset = 0;
+ }
+
+ if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed, i.e.
+ * The caller requested to digest more data than it
+ * provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((digest->cd_length == 0) ||
+ (digest->cd_length < SHA1_DIGEST_LENGTH)) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do the SHA1 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ /* the update failed, free context and bail */
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
+ ctx->cc_provider_private = NULL;
+ digest->cd_length = 0;
+ return (ret);
+ }
+
+ /*
+ * Do a SHA1 final, must be done separately since the digest
+ * type can be different than the input data type.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ digest, SHA1_DIGEST_LENGTH, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /* all done, free context and return */
+
+ if (ret == CRYPTO_SUCCESS) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ } else {
+ digest->cd_length = 0;
+ }
+
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
+ ctx->cc_provider_private = NULL;
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * Do the SHA1 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((digest->cd_length == 0) ||
+ (digest->cd_length < SHA1_DIGEST_LENGTH)) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do a SHA1 final.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ digest, SHA1_DIGEST_LENGTH, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /* all done, free context and return */
+
+ if (ret == CRYPTO_SUCCESS) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ } else {
+ digest->cd_length = 0;
+ }
+
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
+ ctx->cc_provider_private = NULL;
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_digest_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_data_t *data, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ SHA1_CTX sha1_ctx;
+
+ if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /*
+ * Do the SHA1 init.
+ */
+ SHA1Init(&sha1_ctx);
+
+ /*
+ * Do the SHA1 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Update(&sha1_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_update_uio(&sha1_ctx, data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ /* the update failed, bail */
+ digest->cd_length = 0;
+ return (ret);
+ }
+
+ /*
+ * Do a SHA1 final, must be done separately since the digest
+ * type can be different than the input data type.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &sha1_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(&sha1_ctx, digest,
+ SHA1_DIGEST_LENGTH, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ } else {
+ digest->cd_length = 0;
+ }
+
+ return (ret);
+}
+
+/*
+ * KCF software provider mac entry points.
+ *
+ * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
+ *
+ * Init:
+ * The initialization routine initializes what we denote
+ * as the inner and outer contexts by doing
+ * - for inner context: SHA1(key XOR ipad)
+ * - for outer context: SHA1(key XOR opad)
+ *
+ * Update:
+ * Each subsequent SHA1 HMAC update will result in an
+ * update of the inner context with the specified data.
+ *
+ * Final:
+ * The SHA1 HMAC final will do a SHA1 final operation on the
+ * inner context, and the resulting digest will be used
+ * as the data for an update on the outer context. Last
+ * but not least, a SHA1 final on the outer context will
+ * be performed to obtain the SHA1 HMAC digest to return
+ * to the user.
+ */
+
+/*
+ * Initialize a SHA1-HMAC context.
+ */
+static void
+sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
+{
+ uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
+ uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
+ uint_t i;
+
+ bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
+ bzero(opad, SHA1_HMAC_BLOCK_SIZE);
+
+ bcopy(keyval, ipad, length_in_bytes);
+ bcopy(keyval, opad, length_in_bytes);
+
+ /* XOR key with ipad (0x36) and opad (0x5c) */
+ for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
+ ipad[i] ^= 0x36363636;
+ opad[i] ^= 0x5c5c5c5c;
+ }
+
+ /* perform SHA1 on ipad */
+ SHA1Init(&ctx->hc_icontext);
+ SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
+
+ /* perform SHA1 on opad */
+ SHA1Init(&ctx->hc_ocontext);
+ SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
+}
+
+/*
+ */
+static int
+sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
+ mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
+ crypto_kmflag(req));
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
+ sizeof (sha1_hmac_ctx_t));
+ } else {
+ /* no context template, compute context */
+ if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
+ uchar_t digested_key[SHA1_DIGEST_LENGTH];
+ sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
+
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
+ key->ck_data, keylen_in_bytes, digested_key);
+ sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
+ digested_key, SHA1_DIGEST_LENGTH);
+ } else {
+ sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
+ key->ck_data, keylen_in_bytes);
+ }
+ }
+
+ /*
+ * Get the mechanism parameters, if applicable.
+ */
+ PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
+ if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t))
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ PROV_SHA1_GET_DIGEST_LEN(mechanism,
+ PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
+ if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
+ SHA1_DIGEST_LENGTH)
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
+ ctx->cc_provider_private = NULL;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * Do a SHA1 update of the inner context using the specified
+ * data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_update_uio(
+ &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA1_DIGEST_LENGTH];
+ uint32_t digest_len = SHA1_DIGEST_LENGTH;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
+ SHA1_HMAC_GEN_MECH_INFO_TYPE)
+ digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
+ mac->cd_length = digest_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do a SHA1 final on the inner context.
+ */
+ SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
+
+ /*
+ * Do a SHA1 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
+ SHA1_DIGEST_LENGTH);
+
+ /*
+ * Do a SHA1 final on the outer context, storing the computing
+ * digest in the users buffer.
+ */
+ switch (mac->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (digest_len != SHA1_DIGEST_LENGTH) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA1Final(digest,
+ &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
+ bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len);
+ } else {
+ SHA1Final((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset,
+ &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
+ }
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(
+ &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
+ digest_len, digest);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ mac->cd_length = digest_len;
+ } else {
+ mac->cd_length = 0;
+ }
+
+ bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
+ ctx->cc_provider_private = NULL;
+
+ return (ret);
+}
+
+#define SHA1_MAC_UPDATE(data, ctx, ret) { \
+ switch (data->cd_format) { \
+ case CRYPTO_DATA_RAW: \
+ SHA1Update(&(ctx).hc_icontext, \
+ (uint8_t *)data->cd_raw.iov_base + \
+ data->cd_offset, data->cd_length); \
+ break; \
+ case CRYPTO_DATA_UIO: \
+ ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
+ break; \
+ default: \
+ ret = CRYPTO_ARGUMENTS_BAD; \
+ } \
+}
+
+/* ARGSUSED */
+static int
+sha1_mac_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA1_DIGEST_LENGTH];
+ sha1_hmac_ctx_t sha1_hmac_ctx;
+ uint32_t digest_len = SHA1_DIGEST_LENGTH;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
+ mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ } else {
+ /* no context template, initialize context */
+ if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
+ key->ck_data, keylen_in_bytes, digest);
+ sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
+ SHA1_DIGEST_LENGTH);
+ } else {
+ sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
+ keylen_in_bytes);
+ }
+ }
+
+ /* get the mechanism parameters, if applicable */
+ if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t)) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
+ if (digest_len > SHA1_DIGEST_LENGTH) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ }
+
+ /* do a SHA1 update of the inner context using the specified data */
+ SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
+ if (ret != CRYPTO_SUCCESS)
+ /* the update failed, free context and bail */
+ goto bail;
+
+ /*
+ * Do a SHA1 final on the inner context.
+ */
+ SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
+
+ /*
+ * Do an SHA1 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
+
+ /*
+ * Do a SHA1 final on the outer context, storing the computed
+ * digest in the users buffer.
+ */
+ switch (mac->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (digest_len != SHA1_DIGEST_LENGTH) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
+ bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len);
+ } else {
+ SHA1Final((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
+ }
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
+ digest_len, digest);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ mac->cd_length = digest_len;
+ } else {
+ mac->cd_length = 0;
+ }
+ /* Extra paranoia: zeroize the context on the stack */
+ bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+
+ return (ret);
+bail:
+ bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ mac->cd_length = 0;
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_mac_verify_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA1_DIGEST_LENGTH];
+ sha1_hmac_ctx_t sha1_hmac_ctx;
+ uint32_t digest_len = SHA1_DIGEST_LENGTH;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
+ mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ } else {
+ /* no context template, initialize context */
+ if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
+ key->ck_data, keylen_in_bytes, digest);
+ sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
+ SHA1_DIGEST_LENGTH);
+ } else {
+ sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
+ keylen_in_bytes);
+ }
+ }
+
+ /* get the mechanism parameters, if applicable */
+ if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t)) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
+ if (digest_len > SHA1_DIGEST_LENGTH) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ }
+
+ if (mac->cd_length != digest_len) {
+ ret = CRYPTO_INVALID_MAC;
+ goto bail;
+ }
+
+ /* do a SHA1 update of the inner context using the specified data */
+ SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
+ if (ret != CRYPTO_SUCCESS)
+ /* the update failed, free context and bail */
+ goto bail;
+
+ /* do a SHA1 final on the inner context */
+ SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
+
+ /*
+ * Do an SHA1 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
+
+ /*
+ * Do a SHA1 final on the outer context, storing the computed
+ * digest in the users buffer.
+ */
+ SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
+
+ /*
+ * Compare the computed digest against the expected digest passed
+ * as argument.
+ */
+
+ switch (mac->cd_format) {
+
+ case CRYPTO_DATA_RAW:
+ if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len) != 0)
+ ret = CRYPTO_INVALID_MAC;
+ break;
+
+ case CRYPTO_DATA_UIO: {
+ off_t offset = mac->cd_offset;
+ uint_t vec_idx;
+ off_t scratch_offset = 0;
+ size_t length = digest_len;
+ size_t cur_len;
+
+ /* we support only kernel buffer */
+ if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* jump to the first iovec containing the expected digest */
+ for (vec_idx = 0;
+ offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
+ vec_idx < mac->cd_uio->uio_iovcnt;
+ offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == mac->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is
+ * larger than the total size of the buffers
+ * it provided.
+ */
+ ret = CRYPTO_DATA_LEN_RANGE;
+ break;
+ }
+
+ /* do the comparison of computed digest vs specified one */
+ while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ if (bcmp(digest + scratch_offset,
+ mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len) != 0) {
+ ret = CRYPTO_INVALID_MAC;
+ break;
+ }
+
+ length -= cur_len;
+ vec_idx++;
+ scratch_offset += cur_len;
+ offset = 0;
+ }
+ break;
+ }
+
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ return (ret);
+bail:
+ bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ mac->cd_length = 0;
+ return (ret);
+}
+
+/*
+ * KCF software provider context management entry points.
+ */
+
+/* ARGSUSED */
+static int
+sha1_create_ctx_template(crypto_provider_handle_t provider,
+ crypto_mechanism_t *mechanism, crypto_key_t *key,
+ crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
+ crypto_req_handle_t req)
+{
+ sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
+ (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Allocate and initialize SHA1 context.
+ */
+ sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
+ crypto_kmflag(req));
+ if (sha1_hmac_ctx_tmpl == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
+ uchar_t digested_key[SHA1_DIGEST_LENGTH];
+
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
+ key->ck_data, keylen_in_bytes, digested_key);
+ sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
+ SHA1_DIGEST_LENGTH);
+ } else {
+ sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
+ keylen_in_bytes);
+ }
+
+ sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
+ *ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
+ *ctx_template_size = sizeof (sha1_hmac_ctx_t);
+
+
+ return (CRYPTO_SUCCESS);
+}
+
+static int
+sha1_free_context(crypto_ctx_t *ctx)
+{
+ uint_t ctx_len;
+ sha1_mech_type_t mech_type;
+
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_SUCCESS);
+
+ /*
+ * We have to free either SHA1 or SHA1-HMAC contexts, which
+ * have different lengths.
+ */
+
+ mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
+ if (mech_type == SHA1_MECH_INFO_TYPE)
+ ctx_len = sizeof (sha1_ctx_t);
+ else {
+ ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
+ mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
+ ctx_len = sizeof (sha1_hmac_ctx_t);
+ }
+
+ bzero(ctx->cc_provider_private, ctx_len);
+ kmem_free(ctx->cc_provider_private, ctx_len);
+ ctx->cc_provider_private = NULL;
+
+ return (CRYPTO_SUCCESS);
+}
diff --git a/module/icp/io/sha2_mod.c b/module/icp/io/sha2_mod.c
new file mode 100644
index 000000000..4466fcff0
--- /dev/null
+++ b/module/icp/io/sha2_mod.c
@@ -0,0 +1,1307 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/modctl.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/icp.h>
+#define _SHA2_IMPL
+#include <sha2/sha2.h>
+#include <sha2/sha2_impl.h>
+
+/*
+ * The sha2 module is created with two modlinkages:
+ * - a modlmisc that allows consumers to directly call the entry points
+ * SHA2Init, SHA2Update, and SHA2Final.
+ * - a modlcrypto that allows the module to register with the Kernel
+ * Cryptographic Framework (KCF) as a software provider for the SHA2
+ * mechanisms.
+ */
+
+static struct modlcrypto modlcrypto = {
+ &mod_cryptoops,
+ "SHA2 Kernel SW Provider"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, {&modlcrypto, NULL}
+};
+
+/*
+ * Macros to access the SHA2 or SHA2-HMAC contexts from a context passed
+ * by KCF to one of the entry points.
+ */
+
+#define PROV_SHA2_CTX(ctx) ((sha2_ctx_t *)(ctx)->cc_provider_private)
+#define PROV_SHA2_HMAC_CTX(ctx) ((sha2_hmac_ctx_t *)(ctx)->cc_provider_private)
+
+/* to extract the digest length passed as mechanism parameter */
+#define PROV_SHA2_GET_DIGEST_LEN(m, len) { \
+ if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \
+ (len) = (uint32_t)*((ulong_t *)(m)->cm_param); \
+ else { \
+ ulong_t tmp_ulong; \
+ bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \
+ (len) = (uint32_t)tmp_ulong; \
+ } \
+}
+
+#define PROV_SHA2_DIGEST_KEY(mech, ctx, key, len, digest) { \
+ SHA2Init(mech, ctx); \
+ SHA2Update(ctx, key, len); \
+ SHA2Final(digest, ctx); \
+}
+
+/*
+ * Mechanism info structure passed to KCF during registration.
+ */
+static crypto_mech_info_t sha2_mech_info_tab[] = {
+ /* SHA256 */
+ {SUN_CKM_SHA256, SHA256_MECH_INFO_TYPE,
+ CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
+ 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
+ /* SHA256-HMAC */
+ {SUN_CKM_SHA256_HMAC, SHA256_HMAC_MECH_INFO_TYPE,
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
+ SHA2_HMAC_MIN_KEY_LEN, SHA2_HMAC_MAX_KEY_LEN,
+ CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* SHA256-HMAC GENERAL */
+ {SUN_CKM_SHA256_HMAC_GENERAL, SHA256_HMAC_GEN_MECH_INFO_TYPE,
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
+ SHA2_HMAC_MIN_KEY_LEN, SHA2_HMAC_MAX_KEY_LEN,
+ CRYPTO_KEYSIZE_UNIT_IN_BYTES}
+};
+
+static void sha2_provider_status(crypto_provider_handle_t, uint_t *);
+
+static crypto_control_ops_t sha2_control_ops = {
+ sha2_provider_status
+};
+
+static int sha2_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_req_handle_t);
+static int sha2_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha2_digest_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha2_digest_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha2_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+
+static crypto_digest_ops_t sha2_digest_ops = {
+ sha2_digest_init,
+ sha2_digest,
+ sha2_digest_update,
+ NULL,
+ sha2_digest_final,
+ sha2_digest_atomic
+};
+
+static int sha2_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int sha2_mac_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha2_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
+static int sha2_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int sha2_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_mac_ops_t sha2_mac_ops = {
+ sha2_mac_init,
+ NULL,
+ sha2_mac_update,
+ sha2_mac_final,
+ sha2_mac_atomic,
+ sha2_mac_verify_atomic
+};
+
+static int sha2_create_ctx_template(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
+ size_t *, crypto_req_handle_t);
+static int sha2_free_context(crypto_ctx_t *);
+
+static crypto_ctx_ops_t sha2_ctx_ops = {
+ sha2_create_ctx_template,
+ sha2_free_context
+};
+
+static crypto_ops_t sha2_crypto_ops = {{{{{
+ &sha2_control_ops,
+ &sha2_digest_ops,
+ NULL,
+ &sha2_mac_ops,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ &sha2_ctx_ops
+}}}}};
+
+static crypto_provider_info_t sha2_prov_info = {{{{
+ CRYPTO_SPI_VERSION_1,
+ "SHA2 Software Provider",
+ CRYPTO_SW_PROVIDER,
+ NULL,
+ &sha2_crypto_ops,
+ sizeof (sha2_mech_info_tab)/sizeof (crypto_mech_info_t),
+ sha2_mech_info_tab
+}}}};
+
+static crypto_kcf_provider_handle_t sha2_prov_handle = 0;
+
+int
+sha2_mod_init(void)
+{
+ int ret;
+
+ if ((ret = mod_install(&modlinkage)) != 0)
+ return (ret);
+
+ /*
+ * Register with KCF. If the registration fails, log an
+ * error but do not uninstall the module, since the functionality
+ * provided by misc/sha2 should still be available.
+ */
+ if ((ret = crypto_register_provider(&sha2_prov_info,
+ &sha2_prov_handle)) != CRYPTO_SUCCESS)
+ cmn_err(CE_WARN, "sha2 _init: "
+ "crypto_register_provider() failed (0x%x)", ret);
+
+ return (0);
+}
+
+int
+sha2_mod_fini(void)
+{
+ int ret;
+
+ if (sha2_prov_handle != 0) {
+ if ((ret = crypto_unregister_provider(sha2_prov_handle)) !=
+ CRYPTO_SUCCESS) {
+ cmn_err(CE_WARN,
+ "sha2 _fini: crypto_unregister_provider() "
+ "failed (0x%x)", ret);
+ return (EBUSY);
+ }
+ sha2_prov_handle = 0;
+ }
+
+ return (mod_remove(&modlinkage));
+}
+
+/*
+ * KCF software provider control entry points.
+ */
+/* ARGSUSED */
+static void
+sha2_provider_status(crypto_provider_handle_t provider, uint_t *status)
+{
+ *status = CRYPTO_PROVIDER_READY;
+}
+
+/*
+ * KCF software provider digest entry points.
+ */
+
+static int
+sha2_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_req_handle_t req)
+{
+
+ /*
+ * Allocate and initialize SHA2 context.
+ */
+ ctx->cc_provider_private = kmem_alloc(sizeof (sha2_ctx_t),
+ crypto_kmflag(req));
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ PROV_SHA2_CTX(ctx)->sc_mech_type = mechanism->cm_type;
+ SHA2Init(mechanism->cm_type, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx);
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Helper SHA2 digest update function for uio data.
+ */
+static int
+sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data)
+{
+ off_t offset = data->cd_offset;
+ size_t length = data->cd_length;
+ uint_t vec_idx;
+ size_t cur_len;
+
+ /* we support only kernel buffer */
+ if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Jump to the first iovec containing data to be
+ * digested.
+ */
+ for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
+ offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
+ offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == data->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than the
+ * total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now do the digesting on the iovecs.
+ */
+ while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ SHA2Update(sha2_ctx, (uint8_t *)data->cd_uio->
+ uio_iov[vec_idx].iov_base + offset, cur_len);
+ length -= cur_len;
+ vec_idx++;
+ offset = 0;
+ }
+
+ if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed, i.e.
+ * The caller requested to digest more data than it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Helper SHA2 digest final function for uio data.
+ * digest_len is the length of the desired digest. If digest_len
+ * is smaller than the default SHA2 digest length, the caller
+ * must pass a scratch buffer, digest_scratch, which must
+ * be at least the algorithm's digest length bytes.
+ */
+static int
+sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
+ ulong_t digest_len, uchar_t *digest_scratch)
+{
+ off_t offset = digest->cd_offset;
+ uint_t vec_idx;
+
+ /* we support only kernel buffer */
+ if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Jump to the first iovec containing ptr to the digest to
+ * be returned.
+ */
+ for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
+ vec_idx < digest->cd_uio->uio_iovcnt;
+ offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == digest->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is
+ * larger than the total size of the buffers
+ * it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ if (offset + digest_len <=
+ digest->cd_uio->uio_iov[vec_idx].iov_len) {
+ /*
+ * The computed SHA2 digest will fit in the current
+ * iovec.
+ */
+ if (((sha2_ctx->algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) &&
+ (digest_len != SHA256_DIGEST_LENGTH))) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA2Final(digest_scratch, sha2_ctx);
+
+ bcopy(digest_scratch, (uchar_t *)digest->
+ cd_uio->uio_iov[vec_idx].iov_base + offset,
+ digest_len);
+ } else {
+ SHA2Final((uchar_t *)digest->
+ cd_uio->uio_iov[vec_idx].iov_base + offset,
+ sha2_ctx);
+
+ }
+ } else {
+ /*
+ * The computed digest will be crossing one or more iovec's.
+ * This is bad performance-wise but we need to support it.
+ * Allocate a small scratch buffer on the stack and
+ * copy it piece meal to the specified digest iovec's.
+ */
+ uchar_t digest_tmp[SHA256_DIGEST_LENGTH];
+ off_t scratch_offset = 0;
+ size_t length = digest_len;
+ size_t cur_len;
+
+ SHA2Final(digest_tmp, sha2_ctx);
+
+ while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
+ cur_len =
+ MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+ bcopy(digest_tmp + scratch_offset,
+ digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len);
+
+ length -= cur_len;
+ vec_idx++;
+ scratch_offset += cur_len;
+ offset = 0;
+ }
+
+ if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed, i.e.
+ * The caller requested to digest more data than it
+ * provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+sha2_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uint_t sha_digest_len;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ switch (PROV_SHA2_CTX(ctx)->sc_mech_type) {
+ case SHA256_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((digest->cd_length == 0) ||
+ (digest->cd_length < sha_digest_len)) {
+ digest->cd_length = sha_digest_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do the SHA2 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Update(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_update_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ /* the update failed, free context and bail */
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t));
+ ctx->cc_provider_private = NULL;
+ digest->cd_length = 0;
+ return (ret);
+ }
+
+ /*
+ * Do a SHA2 final, must be done separately since the digest
+ * type can be different than the input data type.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ digest, sha_digest_len, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /* all done, free context and return */
+
+ if (ret == CRYPTO_SUCCESS)
+ digest->cd_length = sha_digest_len;
+ else
+ digest->cd_length = 0;
+
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t));
+ ctx->cc_provider_private = NULL;
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * Do the SHA2 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Update(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_update_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uint_t sha_digest_len;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ switch (PROV_SHA2_CTX(ctx)->sc_mech_type) {
+ case SHA256_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((digest->cd_length == 0) ||
+ (digest->cd_length < sha_digest_len)) {
+ digest->cd_length = sha_digest_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do a SHA2 final.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ digest, sha_digest_len, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /* all done, free context and return */
+
+ if (ret == CRYPTO_SUCCESS)
+ digest->cd_length = sha_digest_len;
+ else
+ digest->cd_length = 0;
+
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t));
+ ctx->cc_provider_private = NULL;
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_digest_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_data_t *data, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ SHA2_CTX sha2_ctx;
+ uint32_t sha_digest_len;
+
+ /*
+ * Do the SHA inits.
+ */
+
+ SHA2Init(mechanism->cm_type, &sha2_ctx);
+
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Update(&sha2_ctx, (uint8_t *)data->
+ cd_raw.iov_base + data->cd_offset, data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_update_uio(&sha2_ctx, data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /*
+ * Do the SHA updates on the specified input data.
+ */
+
+ if (ret != CRYPTO_SUCCESS) {
+ /* the update failed, bail */
+ digest->cd_length = 0;
+ return (ret);
+ }
+
+ if (mechanism->cm_type <= SHA256_HMAC_GEN_MECH_INFO_TYPE)
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+
+ /*
+ * Do a SHA2 final, must be done separately since the digest
+ * type can be different than the input data type.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &sha2_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(&sha2_ctx, digest,
+ sha_digest_len, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS)
+ digest->cd_length = sha_digest_len;
+ else
+ digest->cd_length = 0;
+
+ return (ret);
+}
+
+/*
+ * KCF software provider mac entry points.
+ *
+ * SHA2 HMAC is: SHA2(key XOR opad, SHA2(key XOR ipad, text))
+ *
+ * Init:
+ * The initialization routine initializes what we denote
+ * as the inner and outer contexts by doing
+ * - for inner context: SHA2(key XOR ipad)
+ * - for outer context: SHA2(key XOR opad)
+ *
+ * Update:
+ * Each subsequent SHA2 HMAC update will result in an
+ * update of the inner context with the specified data.
+ *
+ * Final:
+ * The SHA2 HMAC final will do a SHA2 final operation on the
+ * inner context, and the resulting digest will be used
+ * as the data for an update on the outer context. Last
+ * but not least, a SHA2 final on the outer context will
+ * be performed to obtain the SHA2 HMAC digest to return
+ * to the user.
+ */
+
+/*
+ * Initialize a SHA2-HMAC context.
+ */
+static void
+sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
+{
+ uint64_t ipad[SHA256_HMAC_BLOCK_SIZE / sizeof (uint64_t)];
+ uint64_t opad[SHA256_HMAC_BLOCK_SIZE / sizeof (uint64_t)];
+ int i, block_size, blocks_per_int64;
+
+ /* Determine the block size */
+ if (ctx->hc_mech_type <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ block_size = SHA256_HMAC_BLOCK_SIZE;
+ blocks_per_int64 = SHA256_HMAC_BLOCK_SIZE / sizeof (uint64_t);
+ }
+
+ (void) bzero(ipad, block_size);
+ (void) bzero(opad, block_size);
+ (void) bcopy(keyval, ipad, length_in_bytes);
+ (void) bcopy(keyval, opad, length_in_bytes);
+
+ /* XOR key with ipad (0x36) and opad (0x5c) */
+ for (i = 0; i < blocks_per_int64; i ++) {
+ ipad[i] ^= 0x3636363636363636;
+ opad[i] ^= 0x5c5c5c5c5c5c5c5c;
+ }
+
+ /* perform SHA2 on ipad */
+ SHA2Init(ctx->hc_mech_type, &ctx->hc_icontext);
+ SHA2Update(&ctx->hc_icontext, (uint8_t *)ipad, block_size);
+
+ /* perform SHA2 on opad */
+ SHA2Init(ctx->hc_mech_type, &ctx->hc_ocontext);
+ SHA2Update(&ctx->hc_ocontext, (uint8_t *)opad, block_size);
+
+}
+
+/*
+ */
+static int
+sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+ uint_t sha_digest_len, sha_hmac_block_size;
+
+ /*
+ * Set the digest length and block size to values approriate to the
+ * mechanism
+ */
+ switch (mechanism->cm_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ ctx->cc_provider_private = kmem_alloc(sizeof (sha2_hmac_ctx_t),
+ crypto_kmflag(req));
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, PROV_SHA2_HMAC_CTX(ctx),
+ sizeof (sha2_hmac_ctx_t));
+ } else {
+ /* no context template, compute context */
+ if (keylen_in_bytes > sha_hmac_block_size) {
+ uchar_t digested_key[SHA256_DIGEST_LENGTH];
+ sha2_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
+
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
+ &hmac_ctx->hc_icontext,
+ key->ck_data, keylen_in_bytes, digested_key);
+ sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx),
+ digested_key, sha_digest_len);
+ } else {
+ sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx),
+ key->ck_data, keylen_in_bytes);
+ }
+ }
+
+ /*
+ * Get the mechanism parameters, if applicable.
+ */
+ if (mechanism->cm_type % 3 == 2) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t))
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ PROV_SHA2_GET_DIGEST_LEN(mechanism,
+ PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len);
+ if (PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len > sha_digest_len)
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ ctx->cc_provider_private = NULL;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_mac_update(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * Do a SHA2 update of the inner context using the specified
+ * data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_icontext,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_update_uio(
+ &PROV_SHA2_HMAC_CTX(ctx)->hc_icontext, data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA256_DIGEST_LENGTH];
+ uint32_t digest_len = 0, sha_digest_len = 0;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /* Set the digest lengths to values approriate to the mechanism */
+ switch (PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ sha_digest_len = digest_len = SHA256_DIGEST_LENGTH;
+ break;
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ digest_len = PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
+ mac->cd_length = digest_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do a SHA2 final on the inner context.
+ */
+ SHA2Final(digest, &PROV_SHA2_HMAC_CTX(ctx)->hc_icontext);
+
+ /*
+ * Do a SHA2 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, digest,
+ sha_digest_len);
+
+ /*
+ * Do a SHA2 final on the outer context, storing the computing
+ * digest in the users buffer.
+ */
+ switch (mac->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (digest_len != sha_digest_len) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA2Final(digest,
+ &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
+ bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len);
+ } else {
+ SHA2Final((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset,
+ &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
+ }
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(
+ &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, mac,
+ digest_len, digest);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS)
+ mac->cd_length = digest_len;
+ else
+ mac->cd_length = 0;
+
+ bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ ctx->cc_provider_private = NULL;
+
+ return (ret);
+}
+
+#define SHA2_MAC_UPDATE(data, ctx, ret) { \
+ switch (data->cd_format) { \
+ case CRYPTO_DATA_RAW: \
+ SHA2Update(&(ctx).hc_icontext, \
+ (uint8_t *)data->cd_raw.iov_base + \
+ data->cd_offset, data->cd_length); \
+ break; \
+ case CRYPTO_DATA_UIO: \
+ ret = sha2_digest_update_uio(&(ctx).hc_icontext, data); \
+ break; \
+ default: \
+ ret = CRYPTO_ARGUMENTS_BAD; \
+ } \
+}
+
+/* ARGSUSED */
+static int
+sha2_mac_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA256_DIGEST_LENGTH];
+ sha2_hmac_ctx_t sha2_hmac_ctx;
+ uint32_t sha_digest_len, digest_len, sha_hmac_block_size;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ /*
+ * Set the digest length and block size to values appropriate to the
+ * mechanism
+ */
+ switch (mechanism->cm_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = digest_len = SHA256_DIGEST_LENGTH;
+ sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ } else {
+ sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
+ /* no context template, initialize context */
+ if (keylen_in_bytes > sha_hmac_block_size) {
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
+ &sha2_hmac_ctx.hc_icontext,
+ key->ck_data, keylen_in_bytes, digest);
+ sha2_mac_init_ctx(&sha2_hmac_ctx, digest,
+ sha_digest_len);
+ } else {
+ sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data,
+ keylen_in_bytes);
+ }
+ }
+
+ /* get the mechanism parameters, if applicable */
+ if ((mechanism->cm_type % 3) == 2) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t)) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ PROV_SHA2_GET_DIGEST_LEN(mechanism, digest_len);
+ if (digest_len > sha_digest_len) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ }
+
+ /* do a SHA2 update of the inner context using the specified data */
+ SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret);
+ if (ret != CRYPTO_SUCCESS)
+ /* the update failed, free context and bail */
+ goto bail;
+
+ /*
+ * Do a SHA2 final on the inner context.
+ */
+ SHA2Final(digest, &sha2_hmac_ctx.hc_icontext);
+
+ /*
+ * Do an SHA2 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
+
+ /*
+ * Do a SHA2 final on the outer context, storing the computed
+ * digest in the users buffer.
+ */
+ switch (mac->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (digest_len != sha_digest_len) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
+ bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len);
+ } else {
+ SHA2Final((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, &sha2_hmac_ctx.hc_ocontext);
+ }
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(&sha2_hmac_ctx.hc_ocontext, mac,
+ digest_len, digest);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ mac->cd_length = digest_len;
+ return (CRYPTO_SUCCESS);
+ }
+bail:
+ bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ mac->cd_length = 0;
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_mac_verify_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA256_DIGEST_LENGTH];
+ sha2_hmac_ctx_t sha2_hmac_ctx;
+ uint32_t sha_digest_len, digest_len, sha_hmac_block_size;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ /*
+ * Set the digest length and block size to values appropriate to the
+ * mechanism
+ */
+ switch (mechanism->cm_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = digest_len = SHA256_DIGEST_LENGTH;
+ sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ } else {
+ sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
+ /* no context template, initialize context */
+ if (keylen_in_bytes > sha_hmac_block_size) {
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
+ &sha2_hmac_ctx.hc_icontext,
+ key->ck_data, keylen_in_bytes, digest);
+ sha2_mac_init_ctx(&sha2_hmac_ctx, digest,
+ sha_digest_len);
+ } else {
+ sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data,
+ keylen_in_bytes);
+ }
+ }
+
+ /* get the mechanism parameters, if applicable */
+ if (mechanism->cm_type % 3 == 2) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t)) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ PROV_SHA2_GET_DIGEST_LEN(mechanism, digest_len);
+ if (digest_len > sha_digest_len) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ }
+
+ if (mac->cd_length != digest_len) {
+ ret = CRYPTO_INVALID_MAC;
+ goto bail;
+ }
+
+ /* do a SHA2 update of the inner context using the specified data */
+ SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret);
+ if (ret != CRYPTO_SUCCESS)
+ /* the update failed, free context and bail */
+ goto bail;
+
+ /* do a SHA2 final on the inner context */
+ SHA2Final(digest, &sha2_hmac_ctx.hc_icontext);
+
+ /*
+ * Do an SHA2 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
+
+ /*
+ * Do a SHA2 final on the outer context, storing the computed
+ * digest in the users buffer.
+ */
+ SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
+
+ /*
+ * Compare the computed digest against the expected digest passed
+ * as argument.
+ */
+
+ switch (mac->cd_format) {
+
+ case CRYPTO_DATA_RAW:
+ if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len) != 0)
+ ret = CRYPTO_INVALID_MAC;
+ break;
+
+ case CRYPTO_DATA_UIO: {
+ off_t offset = mac->cd_offset;
+ uint_t vec_idx;
+ off_t scratch_offset = 0;
+ size_t length = digest_len;
+ size_t cur_len;
+
+ /* we support only kernel buffer */
+ if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* jump to the first iovec containing the expected digest */
+ for (vec_idx = 0;
+ offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
+ vec_idx < mac->cd_uio->uio_iovcnt;
+ offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == mac->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is
+ * larger than the total size of the buffers
+ * it provided.
+ */
+ ret = CRYPTO_DATA_LEN_RANGE;
+ break;
+ }
+
+ /* do the comparison of computed digest vs specified one */
+ while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ if (bcmp(digest + scratch_offset,
+ mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len) != 0) {
+ ret = CRYPTO_INVALID_MAC;
+ break;
+ }
+
+ length -= cur_len;
+ vec_idx++;
+ scratch_offset += cur_len;
+ offset = 0;
+ }
+ break;
+ }
+
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+bail:
+ bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ mac->cd_length = 0;
+ return (ret);
+}
+
+/*
+ * KCF software provider context management entry points.
+ */
+
+/* ARGSUSED */
+static int
+sha2_create_ctx_template(crypto_provider_handle_t provider,
+ crypto_mechanism_t *mechanism, crypto_key_t *key,
+ crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
+ crypto_req_handle_t req)
+{
+ sha2_hmac_ctx_t *sha2_hmac_ctx_tmpl;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+ uint32_t sha_digest_len, sha_hmac_block_size;
+
+ /*
+ * Set the digest length and block size to values appropriate to the
+ * mechanism
+ */
+ switch (mechanism->cm_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Allocate and initialize SHA2 context.
+ */
+ sha2_hmac_ctx_tmpl = kmem_alloc(sizeof (sha2_hmac_ctx_t),
+ crypto_kmflag(req));
+ if (sha2_hmac_ctx_tmpl == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ sha2_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
+
+ if (keylen_in_bytes > sha_hmac_block_size) {
+ uchar_t digested_key[SHA256_DIGEST_LENGTH];
+
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
+ &sha2_hmac_ctx_tmpl->hc_icontext,
+ key->ck_data, keylen_in_bytes, digested_key);
+ sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, digested_key,
+ sha_digest_len);
+ } else {
+ sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, key->ck_data,
+ keylen_in_bytes);
+ }
+
+ *ctx_template = (crypto_spi_ctx_template_t)sha2_hmac_ctx_tmpl;
+ *ctx_template_size = sizeof (sha2_hmac_ctx_t);
+
+ return (CRYPTO_SUCCESS);
+}
+
+static int
+sha2_free_context(crypto_ctx_t *ctx)
+{
+ uint_t ctx_len;
+
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_SUCCESS);
+
+ /*
+ * We have to free either SHA2 or SHA2-HMAC contexts, which
+ * have different lengths.
+ *
+ * Note: Below is dependent on the mechanism ordering.
+ */
+
+ if (PROV_SHA2_CTX(ctx)->sc_mech_type % 3 == 0)
+ ctx_len = sizeof (sha2_ctx_t);
+ else
+ ctx_len = sizeof (sha2_hmac_ctx_t);
+
+ bzero(ctx->cc_provider_private, ctx_len);
+ kmem_free(ctx->cc_provider_private, ctx_len);
+ ctx->cc_provider_private = NULL;
+
+ return (CRYPTO_SUCCESS);
+}