aboutsummaryrefslogtreecommitdiffstats
path: root/module/icp/core
diff options
context:
space:
mode:
authorнаб <[email protected]>2021-12-22 23:29:25 +0100
committerBrian Behlendorf <[email protected]>2022-02-15 16:23:28 -0800
commit464700ae0293a3bb5d84d35eb7fc771ec22f3fad (patch)
treeadd44730a05d959049485cce3b21d58bcb0d4aca /module/icp/core
parentf5896e2bdf9d8824befe8660c7fe1f77ff773e3b (diff)
module: icp: spi: crypto_ops_t: remove unused op types
Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Ahelenia Ziemiańska <[email protected]> Closes #12901
Diffstat (limited to 'module/icp/core')
-rw-r--r--module/icp/core/kcf_callprov.c1002
-rw-r--r--module/icp/core/kcf_mech_tabs.c121
-rw-r--r--module/icp/core/kcf_prov_tabs.c216
-rw-r--r--module/icp/core/kcf_sched.c574
4 files changed, 24 insertions, 1889 deletions
diff --git a/module/icp/core/kcf_callprov.c b/module/icp/core/kcf_callprov.c
index 345014d0a..1468e0a1a 100644
--- a/module/icp/core/kcf_callprov.c
+++ b/module/icp/core/kcf_callprov.c
@@ -27,9 +27,6 @@
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
-static int kcf_emulate_dual(kcf_provider_desc_t *, crypto_ctx_t *,
- kcf_req_params_t *);
-
void
kcf_free_triedlist(kcf_prov_tried_t *list)
{
@@ -349,144 +346,6 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
}
/*
- * Very similar to kcf_get_mech_provider(). Finds the best provider capable of
- * a dual operation with both me1 and me2.
- * When no dual-ops capable providers are available, return the best provider
- * for me1 only, and sets *prov_mt2 to CRYPTO_INVALID_MECHID;
- * We assume/expect that a slower HW capable of the dual is still
- * faster than the 2 fastest providers capable of the individual ops
- * separately.
- */
-kcf_provider_desc_t *
-kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
- kcf_mech_entry_t **mepp, crypto_mech_type_t *prov_mt1,
- crypto_mech_type_t *prov_mt2, int *error, kcf_prov_tried_t *triedl,
- crypto_func_group_t fg1, crypto_func_group_t fg2, boolean_t call_restrict,
- size_t data_size)
-{
- kcf_provider_desc_t *pd = NULL, *pdm1 = NULL, *pdm1m2 = NULL;
- kcf_prov_mech_desc_t *prov_chain, *mdesc;
- int len, gqlen = INT_MAX, dgqlen = INT_MAX;
- crypto_mech_info_list_t *mil;
- crypto_mech_type_t m2id = mech2->cm_type;
- kcf_mech_entry_t *me;
-
- /* when mech is a valid mechanism, me will be its mech_entry */
- if (kcf_get_mech_entry(mech1->cm_type, &me) != KCF_SUCCESS) {
- *error = CRYPTO_MECHANISM_INVALID;
- return (NULL);
- }
-
- *prov_mt2 = CRYPTO_MECH_INVALID;
-
- if (mepp != NULL)
- *mepp = me;
- mutex_enter(&me->me_mutex);
-
- prov_chain = me->me_hw_prov_chain;
- /*
- * We check the threshold for using a hardware provider for
- * this amount of data. If there is no software provider available
- * for the first mechanism, then the threshold is ignored.
- */
- if ((prov_chain != NULL) &&
- ((data_size == 0) || (me->me_threshold == 0) ||
- (data_size >= me->me_threshold) ||
- ((mdesc = me->me_sw_prov) == NULL) ||
- (!IS_FG_SUPPORTED(mdesc, fg1)) ||
- (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
- /* there is at least one provider */
- ASSERT(me->me_num_hwprov > 0);
-
- /*
- * Find the least loaded provider capable of the combo
- * me1 + me2, and save a pointer to the least loaded
- * provider capable of me1 only.
- */
- while (prov_chain != NULL) {
- pd = prov_chain->pm_prov_desc;
- len = KCF_PROV_LOAD(pd);
-
- if (!IS_FG_SUPPORTED(prov_chain, fg1) ||
- !KCF_IS_PROV_USABLE(pd) ||
- IS_PROVIDER_TRIED(pd, triedl) ||
- (call_restrict &&
- (pd->pd_flags & KCF_PROV_RESTRICTED))) {
- prov_chain = prov_chain->pm_next;
- continue;
- }
-
- /* Save the best provider capable of m1 */
- if (len < gqlen) {
- *prov_mt1 =
- prov_chain->pm_mech_info.cm_mech_number;
- gqlen = len;
- pdm1 = pd;
- }
-
- /* See if pd can do me2 too */
- for (mil = prov_chain->pm_mi_list;
- mil != NULL; mil = mil->ml_next) {
- if ((mil->ml_mech_info.cm_func_group_mask &
- fg2) == 0)
- continue;
-
- if ((mil->ml_kcf_mechid == m2id) &&
- (len < dgqlen)) {
- /* Bingo! */
- dgqlen = len;
- pdm1m2 = pd;
- *prov_mt2 =
- mil->ml_mech_info.cm_mech_number;
- *prov_mt1 = prov_chain->
- pm_mech_info.cm_mech_number;
- break;
- }
- }
-
- prov_chain = prov_chain->pm_next;
- }
-
- pd = (pdm1m2 != NULL) ? pdm1m2 : pdm1;
- }
-
- /* no HW provider for this mech, is there a SW provider? */
- if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
- pd = mdesc->pm_prov_desc;
- if (!IS_FG_SUPPORTED(mdesc, fg1) ||
- !KCF_IS_PROV_USABLE(pd) ||
- IS_PROVIDER_TRIED(pd, triedl) ||
- (call_restrict && (pd->pd_flags & KCF_PROV_RESTRICTED)))
- pd = NULL;
- else {
- /* See if pd can do me2 too */
- for (mil = me->me_sw_prov->pm_mi_list;
- mil != NULL; mil = mil->ml_next) {
- if ((mil->ml_mech_info.cm_func_group_mask &
- fg2) == 0)
- continue;
-
- if (mil->ml_kcf_mechid == m2id) {
- /* Bingo! */
- *prov_mt2 =
- mil->ml_mech_info.cm_mech_number;
- break;
- }
- }
- *prov_mt1 = me->me_sw_prov->pm_mech_info.cm_mech_number;
- }
- }
-
- if (pd == NULL)
- *error = CRYPTO_MECH_NOT_SUPPORTED;
- else
- KCF_PROV_REFHOLD(pd);
-
- mutex_exit(&me->me_mutex);
- return (pd);
-}
-
-/*
* Do the actual work of calling the provider routines.
*
* pd - Provider structure
@@ -697,605 +556,6 @@ common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
}
break;
}
-
- case KCF_OG_SIGN: {
- kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
-
- switch (optype) {
- case KCF_OP_INIT:
- KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
- pd, &sops->so_mech);
-
- err = KCF_PROV_SIGN_INIT(pd, ctx, &sops->so_mech,
- sops->so_key, sops->so_templ, rhndl);
- break;
-
- case KCF_OP_SIGN_RECOVER_INIT:
- KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
- pd, &sops->so_mech);
-
- err = KCF_PROV_SIGN_RECOVER_INIT(pd, ctx,
- &sops->so_mech, sops->so_key, sops->so_templ,
- rhndl);
- break;
-
- case KCF_OP_SINGLE:
- err = KCF_PROV_SIGN(pd, ctx, sops->so_data,
- sops->so_signature, rhndl);
- break;
-
- case KCF_OP_SIGN_RECOVER:
- err = KCF_PROV_SIGN_RECOVER(pd, ctx,
- sops->so_data, sops->so_signature, rhndl);
- break;
-
- case KCF_OP_UPDATE:
- err = KCF_PROV_SIGN_UPDATE(pd, ctx, sops->so_data,
- rhndl);
- break;
-
- case KCF_OP_FINAL:
- err = KCF_PROV_SIGN_FINAL(pd, ctx, sops->so_signature,
- rhndl);
- break;
-
- case KCF_OP_ATOMIC:
- ASSERT(ctx == NULL);
- KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
- pd, &sops->so_mech);
-
- err = KCF_PROV_SIGN_ATOMIC(pd, sops->so_sid,
- &sops->so_mech, sops->so_key, sops->so_data,
- sops->so_templ, sops->so_signature, rhndl);
- break;
-
- case KCF_OP_SIGN_RECOVER_ATOMIC:
- ASSERT(ctx == NULL);
- KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
- pd, &sops->so_mech);
-
- err = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, sops->so_sid,
- &sops->so_mech, sops->so_key, sops->so_data,
- sops->so_templ, sops->so_signature, rhndl);
- break;
-
- default:
- break;
- }
- break;
- }
-
- case KCF_OG_VERIFY: {
- kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
-
- switch (optype) {
- case KCF_OP_INIT:
- KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
- pd, &vops->vo_mech);
-
- err = KCF_PROV_VERIFY_INIT(pd, ctx, &vops->vo_mech,
- vops->vo_key, vops->vo_templ, rhndl);
- break;
-
- case KCF_OP_VERIFY_RECOVER_INIT:
- KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
- pd, &vops->vo_mech);
-
- err = KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx,
- &vops->vo_mech, vops->vo_key, vops->vo_templ,
- rhndl);
- break;
-
- case KCF_OP_SINGLE:
- err = KCF_PROV_VERIFY(pd, ctx, vops->vo_data,
- vops->vo_signature, rhndl);
- break;
-
- case KCF_OP_VERIFY_RECOVER:
- err = KCF_PROV_VERIFY_RECOVER(pd, ctx,
- vops->vo_signature, vops->vo_data, rhndl);
- break;
-
- case KCF_OP_UPDATE:
- err = KCF_PROV_VERIFY_UPDATE(pd, ctx, vops->vo_data,
- rhndl);
- break;
-
- case KCF_OP_FINAL:
- err = KCF_PROV_VERIFY_FINAL(pd, ctx, vops->vo_signature,
- rhndl);
- break;
-
- case KCF_OP_ATOMIC:
- ASSERT(ctx == NULL);
- KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
- pd, &vops->vo_mech);
-
- err = KCF_PROV_VERIFY_ATOMIC(pd, vops->vo_sid,
- &vops->vo_mech, vops->vo_key, vops->vo_data,
- vops->vo_templ, vops->vo_signature, rhndl);
- break;
-
- case KCF_OP_VERIFY_RECOVER_ATOMIC:
- ASSERT(ctx == NULL);
- KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
- pd, &vops->vo_mech);
-
- err = KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, vops->vo_sid,
- &vops->vo_mech, vops->vo_key, vops->vo_signature,
- vops->vo_templ, vops->vo_data, rhndl);
- break;
-
- default:
- break;
- }
- break;
- }
-
- case KCF_OG_ENCRYPT_MAC: {
- kcf_encrypt_mac_ops_params_t *eops =
- &params->rp_u.encrypt_mac_params;
- kcf_context_t *kcf_secondctx;
-
- switch (optype) {
- case KCF_OP_INIT:
- kcf_secondctx = ((kcf_context_t *)
- (ctx->cc_framework_private))->kc_secondctx;
-
- if (kcf_secondctx != NULL) {
- err = kcf_emulate_dual(pd, ctx, params);
- break;
- }
- KCF_SET_PROVIDER_MECHNUM(
- eops->em_framework_encr_mechtype,
- pd, &eops->em_encr_mech);
-
- KCF_SET_PROVIDER_MECHNUM(
- eops->em_framework_mac_mechtype,
- pd, &eops->em_mac_mech);
-
- err = KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx,
- &eops->em_encr_mech, eops->em_encr_key,
- &eops->em_mac_mech, eops->em_mac_key,
- eops->em_encr_templ, eops->em_mac_templ,
- rhndl);
-
- break;
-
- case KCF_OP_SINGLE:
- err = KCF_PROV_ENCRYPT_MAC(pd, ctx,
- eops->em_plaintext, eops->em_ciphertext,
- eops->em_mac, rhndl);
- break;
-
- case KCF_OP_UPDATE:
- kcf_secondctx = ((kcf_context_t *)
- (ctx->cc_framework_private))->kc_secondctx;
- if (kcf_secondctx != NULL) {
- err = kcf_emulate_dual(pd, ctx, params);
- break;
- }
- err = KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx,
- eops->em_plaintext, eops->em_ciphertext, rhndl);
- break;
-
- case KCF_OP_FINAL:
- kcf_secondctx = ((kcf_context_t *)
- (ctx->cc_framework_private))->kc_secondctx;
- if (kcf_secondctx != NULL) {
- err = kcf_emulate_dual(pd, ctx, params);
- break;
- }
- err = KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx,
- eops->em_ciphertext, eops->em_mac, rhndl);
- break;
-
- case KCF_OP_ATOMIC:
- ASSERT(ctx == NULL);
-
- KCF_SET_PROVIDER_MECHNUM(
- eops->em_framework_encr_mechtype,
- pd, &eops->em_encr_mech);
-
- KCF_SET_PROVIDER_MECHNUM(
- eops->em_framework_mac_mechtype,
- pd, &eops->em_mac_mech);
-
- err = KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, eops->em_sid,
- &eops->em_encr_mech, eops->em_encr_key,
- &eops->em_mac_mech, eops->em_mac_key,
- eops->em_plaintext, eops->em_ciphertext,
- eops->em_mac,
- eops->em_encr_templ, eops->em_mac_templ,
- rhndl);
-
- break;
-
- default:
- break;
- }
- break;
- }
-
- case KCF_OG_MAC_DECRYPT: {
- kcf_mac_decrypt_ops_params_t *dops =
- &params->rp_u.mac_decrypt_params;
- kcf_context_t *kcf_secondctx;
-
- switch (optype) {
- case KCF_OP_INIT:
- kcf_secondctx = ((kcf_context_t *)
- (ctx->cc_framework_private))->kc_secondctx;
-
- if (kcf_secondctx != NULL) {
- err = kcf_emulate_dual(pd, ctx, params);
- break;
- }
- KCF_SET_PROVIDER_MECHNUM(
- dops->md_framework_mac_mechtype,
- pd, &dops->md_mac_mech);
-
- KCF_SET_PROVIDER_MECHNUM(
- dops->md_framework_decr_mechtype,
- pd, &dops->md_decr_mech);
-
- err = KCF_PROV_MAC_DECRYPT_INIT(pd, ctx,
- &dops->md_mac_mech, dops->md_mac_key,
- &dops->md_decr_mech, dops->md_decr_key,
- dops->md_mac_templ, dops->md_decr_templ,
- rhndl);
-
- break;
-
- case KCF_OP_SINGLE:
- err = KCF_PROV_MAC_DECRYPT(pd, ctx,
- dops->md_ciphertext, dops->md_mac,
- dops->md_plaintext, rhndl);
- break;
-
- case KCF_OP_UPDATE:
- kcf_secondctx = ((kcf_context_t *)
- (ctx->cc_framework_private))->kc_secondctx;
- if (kcf_secondctx != NULL) {
- err = kcf_emulate_dual(pd, ctx, params);
- break;
- }
- err = KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx,
- dops->md_ciphertext, dops->md_plaintext, rhndl);
- break;
-
- case KCF_OP_FINAL:
- kcf_secondctx = ((kcf_context_t *)
- (ctx->cc_framework_private))->kc_secondctx;
- if (kcf_secondctx != NULL) {
- err = kcf_emulate_dual(pd, ctx, params);
- break;
- }
- err = KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx,
- dops->md_mac, dops->md_plaintext, rhndl);
- break;
-
- case KCF_OP_ATOMIC:
- ASSERT(ctx == NULL);
-
- KCF_SET_PROVIDER_MECHNUM(
- dops->md_framework_mac_mechtype,
- pd, &dops->md_mac_mech);
-
- KCF_SET_PROVIDER_MECHNUM(
- dops->md_framework_decr_mechtype,
- pd, &dops->md_decr_mech);
-
- err = KCF_PROV_MAC_DECRYPT_ATOMIC(pd, dops->md_sid,
- &dops->md_mac_mech, dops->md_mac_key,
- &dops->md_decr_mech, dops->md_decr_key,
- dops->md_ciphertext, dops->md_mac,
- dops->md_plaintext,
- dops->md_mac_templ, dops->md_decr_templ,
- rhndl);
-
- break;
-
- case KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC:
- ASSERT(ctx == NULL);
-
- KCF_SET_PROVIDER_MECHNUM(
- dops->md_framework_mac_mechtype,
- pd, &dops->md_mac_mech);
-
- KCF_SET_PROVIDER_MECHNUM(
- dops->md_framework_decr_mechtype,
- pd, &dops->md_decr_mech);
-
- err = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd,
- dops->md_sid, &dops->md_mac_mech, dops->md_mac_key,
- &dops->md_decr_mech, dops->md_decr_key,
- dops->md_ciphertext, dops->md_mac,
- dops->md_plaintext,
- dops->md_mac_templ, dops->md_decr_templ,
- rhndl);
-
- break;
-
- default:
- break;
- }
- break;
- }
-
- case KCF_OG_KEY: {
- kcf_key_ops_params_t *kops = &params->rp_u.key_params;
-
- ASSERT(ctx == NULL);
- KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd,
- &kops->ko_mech);
-
- switch (optype) {
- case KCF_OP_KEY_GENERATE:
- err = KCF_PROV_KEY_GENERATE(pd, kops->ko_sid,
- &kops->ko_mech,
- kops->ko_key_template, kops->ko_key_attribute_count,
- kops->ko_key_object_id_ptr, rhndl);
- break;
-
- case KCF_OP_KEY_GENERATE_PAIR:
- err = KCF_PROV_KEY_GENERATE_PAIR(pd, kops->ko_sid,
- &kops->ko_mech,
- kops->ko_key_template, kops->ko_key_attribute_count,
- kops->ko_private_key_template,
- kops->ko_private_key_attribute_count,
- kops->ko_key_object_id_ptr,
- kops->ko_private_key_object_id_ptr, rhndl);
- break;
-
- case KCF_OP_KEY_WRAP:
- err = KCF_PROV_KEY_WRAP(pd, kops->ko_sid,
- &kops->ko_mech,
- kops->ko_key, kops->ko_key_object_id_ptr,
- kops->ko_wrapped_key, kops->ko_wrapped_key_len_ptr,
- rhndl);
- break;
-
- case KCF_OP_KEY_UNWRAP:
- err = KCF_PROV_KEY_UNWRAP(pd, kops->ko_sid,
- &kops->ko_mech,
- kops->ko_key, kops->ko_wrapped_key,
- kops->ko_wrapped_key_len_ptr,
- kops->ko_key_template, kops->ko_key_attribute_count,
- kops->ko_key_object_id_ptr, rhndl);
- break;
-
- case KCF_OP_KEY_DERIVE:
- err = KCF_PROV_KEY_DERIVE(pd, kops->ko_sid,
- &kops->ko_mech,
- kops->ko_key, kops->ko_key_template,
- kops->ko_key_attribute_count,
- kops->ko_key_object_id_ptr, rhndl);
- break;
-
- default:
- break;
- }
- break;
- }
-
- case KCF_OG_RANDOM: {
- kcf_random_number_ops_params_t *rops =
- &params->rp_u.random_number_params;
-
- ASSERT(ctx == NULL);
-
- switch (optype) {
- case KCF_OP_RANDOM_SEED:
- err = KCF_PROV_SEED_RANDOM(pd, rops->rn_sid,
- rops->rn_buf, rops->rn_buflen, rops->rn_entropy_est,
- rops->rn_flags, rhndl);
- break;
-
- case KCF_OP_RANDOM_GENERATE:
- err = KCF_PROV_GENERATE_RANDOM(pd, rops->rn_sid,
- rops->rn_buf, rops->rn_buflen, rhndl);
- break;
-
- default:
- break;
- }
- break;
- }
-
- case KCF_OG_SESSION: {
- kcf_session_ops_params_t *sops = &params->rp_u.session_params;
-
- ASSERT(ctx == NULL);
- switch (optype) {
- case KCF_OP_SESSION_OPEN:
- /*
- * so_pd may be a logical provider, in which case
- * we need to check whether it has been removed.
- */
- if (KCF_IS_PROV_REMOVED(sops->so_pd)) {
- err = CRYPTO_DEVICE_ERROR;
- break;
- }
- err = KCF_PROV_SESSION_OPEN(pd, sops->so_sid_ptr,
- rhndl, sops->so_pd);
- break;
-
- case KCF_OP_SESSION_CLOSE:
- /*
- * so_pd may be a logical provider, in which case
- * we need to check whether it has been removed.
- */
- if (KCF_IS_PROV_REMOVED(sops->so_pd)) {
- err = CRYPTO_DEVICE_ERROR;
- break;
- }
- err = KCF_PROV_SESSION_CLOSE(pd, sops->so_sid,
- rhndl, sops->so_pd);
- break;
-
- case KCF_OP_SESSION_LOGIN:
- err = KCF_PROV_SESSION_LOGIN(pd, sops->so_sid,
- sops->so_user_type, sops->so_pin,
- sops->so_pin_len, rhndl);
- break;
-
- case KCF_OP_SESSION_LOGOUT:
- err = KCF_PROV_SESSION_LOGOUT(pd, sops->so_sid, rhndl);
- break;
-
- default:
- break;
- }
- break;
- }
-
- case KCF_OG_OBJECT: {
- kcf_object_ops_params_t *jops = &params->rp_u.object_params;
-
- ASSERT(ctx == NULL);
- switch (optype) {
- case KCF_OP_OBJECT_CREATE:
- err = KCF_PROV_OBJECT_CREATE(pd, jops->oo_sid,
- jops->oo_template, jops->oo_attribute_count,
- jops->oo_object_id_ptr, rhndl);
- break;
-
- case KCF_OP_OBJECT_COPY:
- err = KCF_PROV_OBJECT_COPY(pd, jops->oo_sid,
- jops->oo_object_id,
- jops->oo_template, jops->oo_attribute_count,
- jops->oo_object_id_ptr, rhndl);
- break;
-
- case KCF_OP_OBJECT_DESTROY:
- err = KCF_PROV_OBJECT_DESTROY(pd, jops->oo_sid,
- jops->oo_object_id, rhndl);
- break;
-
- case KCF_OP_OBJECT_GET_SIZE:
- err = KCF_PROV_OBJECT_GET_SIZE(pd, jops->oo_sid,
- jops->oo_object_id, jops->oo_object_size, rhndl);
- break;
-
- case KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE:
- err = KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd,
- jops->oo_sid, jops->oo_object_id,
- jops->oo_template, jops->oo_attribute_count, rhndl);
- break;
-
- case KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE:
- err = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd,
- jops->oo_sid, jops->oo_object_id,
- jops->oo_template, jops->oo_attribute_count, rhndl);
- break;
-
- case KCF_OP_OBJECT_FIND_INIT:
- err = KCF_PROV_OBJECT_FIND_INIT(pd, jops->oo_sid,
- jops->oo_template, jops->oo_attribute_count,
- jops->oo_find_init_pp_ptr, rhndl);
- break;
-
- case KCF_OP_OBJECT_FIND:
- err = KCF_PROV_OBJECT_FIND(pd, jops->oo_find_pp,
- jops->oo_object_id_ptr, jops->oo_max_object_count,
- jops->oo_object_count_ptr, rhndl);
- break;
-
- case KCF_OP_OBJECT_FIND_FINAL:
- err = KCF_PROV_OBJECT_FIND_FINAL(pd, jops->oo_find_pp,
- rhndl);
- break;
-
- default:
- break;
- }
- break;
- }
-
- case KCF_OG_PROVMGMT: {
- kcf_provmgmt_ops_params_t *pops = &params->rp_u.provmgmt_params;
-
- ASSERT(ctx == NULL);
- switch (optype) {
- case KCF_OP_MGMT_EXTINFO:
- /*
- * po_pd may be a logical provider, in which case
- * we need to check whether it has been removed.
- */
- if (KCF_IS_PROV_REMOVED(pops->po_pd)) {
- err = CRYPTO_DEVICE_ERROR;
- break;
- }
- err = KCF_PROV_EXT_INFO(pd, pops->po_ext_info, rhndl,
- pops->po_pd);
- break;
-
- case KCF_OP_MGMT_INITTOKEN:
- err = KCF_PROV_INIT_TOKEN(pd, pops->po_pin,
- pops->po_pin_len, pops->po_label, rhndl);
- break;
-
- case KCF_OP_MGMT_INITPIN:
- err = KCF_PROV_INIT_PIN(pd, pops->po_sid, pops->po_pin,
- pops->po_pin_len, rhndl);
- break;
-
- case KCF_OP_MGMT_SETPIN:
- err = KCF_PROV_SET_PIN(pd, pops->po_sid,
- pops->po_old_pin, pops->po_old_pin_len,
- pops->po_pin, pops->po_pin_len, rhndl);
- break;
-
- default:
- break;
- }
- break;
- }
-
- case KCF_OG_NOSTORE_KEY: {
- kcf_key_ops_params_t *kops = &params->rp_u.key_params;
-
- ASSERT(ctx == NULL);
- KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd,
- &kops->ko_mech);
-
- switch (optype) {
- case KCF_OP_KEY_GENERATE:
- err = KCF_PROV_NOSTORE_KEY_GENERATE(pd, kops->ko_sid,
- &kops->ko_mech, kops->ko_key_template,
- kops->ko_key_attribute_count,
- kops->ko_out_template1,
- kops->ko_out_attribute_count1, rhndl);
- break;
-
- case KCF_OP_KEY_GENERATE_PAIR:
- err = KCF_PROV_NOSTORE_KEY_GENERATE_PAIR(pd,
- kops->ko_sid, &kops->ko_mech,
- kops->ko_key_template, kops->ko_key_attribute_count,
- kops->ko_private_key_template,
- kops->ko_private_key_attribute_count,
- kops->ko_out_template1,
- kops->ko_out_attribute_count1,
- kops->ko_out_template2,
- kops->ko_out_attribute_count2,
- rhndl);
- break;
-
- case KCF_OP_KEY_DERIVE:
- err = KCF_PROV_NOSTORE_KEY_DERIVE(pd, kops->ko_sid,
- &kops->ko_mech, kops->ko_key,
- kops->ko_key_template,
- kops->ko_key_attribute_count,
- kops->ko_out_template1,
- kops->ko_out_attribute_count1, rhndl);
- break;
-
- default:
- break;
- }
- break;
- }
default:
break;
} /* end of switch(params->rp_opgrp) */
@@ -1303,265 +563,3 @@ common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
KCF_PROV_INCRSTATS(pd, err);
return (err);
}
-
-
-/*
- * Emulate the call for a multipart dual ops with 2 single steps.
- * This routine is always called in the context of a working thread
- * running kcf_svc_do_run().
- * The single steps are submitted in a pure synchronous way (blocking).
- * When this routine returns, kcf_svc_do_run() will call kcf_aop_done()
- * so the originating consumer's callback gets invoked. kcf_aop_done()
- * takes care of freeing the operation context. So, this routine does
- * not free the operation context.
- *
- * The provider descriptor is assumed held by the callers.
- */
-static int
-kcf_emulate_dual(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
- kcf_req_params_t *params)
-{
- int err = CRYPTO_ARGUMENTS_BAD;
- kcf_op_type_t optype;
- size_t save_len;
- off_t save_offset;
-
- optype = params->rp_optype;
-
- switch (params->rp_opgrp) {
- case KCF_OG_ENCRYPT_MAC: {
- kcf_encrypt_mac_ops_params_t *cmops =
- &params->rp_u.encrypt_mac_params;
- kcf_context_t *encr_kcf_ctx;
- crypto_ctx_t *mac_ctx;
- kcf_req_params_t encr_params;
-
- encr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
-
- switch (optype) {
- case KCF_OP_INIT: {
- encr_kcf_ctx->kc_secondctx = NULL;
-
- KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_INIT,
- pd->pd_sid, &cmops->em_encr_mech,
- cmops->em_encr_key, NULL, NULL,
- cmops->em_encr_templ);
-
- err = kcf_submit_request(pd, ctx, NULL, &encr_params,
- B_FALSE);
-
- /* It can't be CRYPTO_QUEUED */
- if (err != CRYPTO_SUCCESS) {
- break;
- }
-
- err = crypto_mac_init(&cmops->em_mac_mech,
- cmops->em_mac_key, cmops->em_mac_templ,
- (crypto_context_t *)&mac_ctx, NULL);
-
- if (err == CRYPTO_SUCCESS) {
- encr_kcf_ctx->kc_secondctx = (kcf_context_t *)
- mac_ctx->cc_framework_private;
- KCF_CONTEXT_REFHOLD((kcf_context_t *)
- mac_ctx->cc_framework_private);
- }
-
- break;
-
- }
- case KCF_OP_UPDATE: {
- crypto_dual_data_t *ct = cmops->em_ciphertext;
- crypto_data_t *pt = cmops->em_plaintext;
- kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
- crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
-
- KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_UPDATE,
- pd->pd_sid, NULL, NULL, pt, (crypto_data_t *)ct,
- NULL);
-
- err = kcf_submit_request(pd, ctx, NULL, &encr_params,
- B_FALSE);
-
- /* It can't be CRYPTO_QUEUED */
- if (err != CRYPTO_SUCCESS) {
- break;
- }
-
- save_offset = ct->dd_offset1;
- save_len = ct->dd_len1;
- if (ct->dd_len2 == 0) {
- /*
- * The previous encrypt step was an
- * accumulation only and didn't produce any
- * partial output
- */
- if (ct->dd_len1 == 0)
- break;
-
- } else {
- ct->dd_offset1 = ct->dd_offset2;
- ct->dd_len1 = ct->dd_len2;
- }
- err = crypto_mac_update((crypto_context_t)mac_ctx,
- (crypto_data_t *)ct, NULL);
-
- ct->dd_offset1 = save_offset;
- ct->dd_len1 = save_len;
-
- break;
- }
- case KCF_OP_FINAL: {
- crypto_dual_data_t *ct = cmops->em_ciphertext;
- crypto_data_t *mac = cmops->em_mac;
- kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
- crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
- crypto_context_t mac_context = mac_ctx;
-
- KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_FINAL,
- pd->pd_sid, NULL, NULL, NULL, (crypto_data_t *)ct,
- NULL);
-
- err = kcf_submit_request(pd, ctx, NULL, &encr_params,
- B_FALSE);
-
- /* It can't be CRYPTO_QUEUED */
- if (err != CRYPTO_SUCCESS) {
- crypto_cancel_ctx(mac_context);
- break;
- }
-
- if (ct->dd_len2 > 0) {
- save_offset = ct->dd_offset1;
- save_len = ct->dd_len1;
- ct->dd_offset1 = ct->dd_offset2;
- ct->dd_len1 = ct->dd_len2;
-
- err = crypto_mac_update(mac_context,
- (crypto_data_t *)ct, NULL);
-
- ct->dd_offset1 = save_offset;
- ct->dd_len1 = save_len;
-
- if (err != CRYPTO_SUCCESS) {
- crypto_cancel_ctx(mac_context);
- return (err);
- }
- }
-
- /* and finally, collect the MAC */
- err = crypto_mac_final(mac_context, mac, NULL);
- break;
- }
-
- default:
- break;
- }
- KCF_PROV_INCRSTATS(pd, err);
- break;
- }
- case KCF_OG_MAC_DECRYPT: {
- kcf_mac_decrypt_ops_params_t *mdops =
- &params->rp_u.mac_decrypt_params;
- kcf_context_t *decr_kcf_ctx;
- crypto_ctx_t *mac_ctx;
- kcf_req_params_t decr_params;
-
- decr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
-
- switch (optype) {
- case KCF_OP_INIT: {
- decr_kcf_ctx->kc_secondctx = NULL;
-
- err = crypto_mac_init(&mdops->md_mac_mech,
- mdops->md_mac_key, mdops->md_mac_templ,
- (crypto_context_t *)&mac_ctx, NULL);
-
- /* It can't be CRYPTO_QUEUED */
- if (err != CRYPTO_SUCCESS) {
- break;
- }
-
- KCF_WRAP_DECRYPT_OPS_PARAMS(&decr_params, KCF_OP_INIT,
- pd->pd_sid, &mdops->md_decr_mech,
- mdops->md_decr_key, NULL, NULL,
- mdops->md_decr_templ);
-
- err = kcf_submit_request(pd, ctx, NULL, &decr_params,
- B_FALSE);
-
- /* It can't be CRYPTO_QUEUED */
- if (err != CRYPTO_SUCCESS) {
- crypto_cancel_ctx((crypto_context_t)mac_ctx);
- break;
- }
-
- decr_kcf_ctx->kc_secondctx = (kcf_context_t *)
- mac_ctx->cc_framework_private;
- KCF_CONTEXT_REFHOLD((kcf_context_t *)
- mac_ctx->cc_framework_private);
-
- break;
- default:
- break;
-
- }
- case KCF_OP_UPDATE: {
- crypto_dual_data_t *ct = mdops->md_ciphertext;
- crypto_data_t *pt = mdops->md_plaintext;
- kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
- crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
-
- err = crypto_mac_update((crypto_context_t)mac_ctx,
- (crypto_data_t *)ct, NULL);
-
- if (err != CRYPTO_SUCCESS)
- break;
-
- save_offset = ct->dd_offset1;
- save_len = ct->dd_len1;
-
- /* zero ct->dd_len2 means decrypt everything */
- if (ct->dd_len2 > 0) {
- ct->dd_offset1 = ct->dd_offset2;
- ct->dd_len1 = ct->dd_len2;
- }
-
- err = crypto_decrypt_update((crypto_context_t)ctx,
- (crypto_data_t *)ct, pt, NULL);
-
- ct->dd_offset1 = save_offset;
- ct->dd_len1 = save_len;
-
- break;
- }
- case KCF_OP_FINAL: {
- crypto_data_t *pt = mdops->md_plaintext;
- crypto_data_t *mac = mdops->md_mac;
- kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
- crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
-
- err = crypto_mac_final((crypto_context_t)mac_ctx,
- mac, NULL);
-
- if (err != CRYPTO_SUCCESS) {
- crypto_cancel_ctx(ctx);
- break;
- }
-
- /* Get the last chunk of plaintext */
- KCF_CONTEXT_REFHOLD(decr_kcf_ctx);
- err = crypto_decrypt_final((crypto_context_t)ctx, pt,
- NULL);
-
- break;
- }
- }
- break;
- }
- default:
-
- break;
- } /* end of switch(params->rp_opgrp) */
-
- return (err);
-}
diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c
index 9df5f0734..4f2e04e37 100644
--- a/module/icp/core/kcf_mech_tabs.c
+++ b/module/icp/core/kcf_mech_tabs.c
@@ -85,18 +85,12 @@
static kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST];
static kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER];
static kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC];
-static kcf_mech_entry_t kcf_sign_mechs_tab[KCF_MAXSIGN];
-static kcf_mech_entry_t kcf_keyops_mechs_tab[KCF_MAXKEYOPS];
-static kcf_mech_entry_t kcf_misc_mechs_tab[KCF_MAXMISC];
const kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = {
{0, NULL}, /* No class zero */
{KCF_MAXDIGEST, kcf_digest_mechs_tab},
{KCF_MAXCIPHER, kcf_cipher_mechs_tab},
{KCF_MAXMAC, kcf_mac_mechs_tab},
- {KCF_MAXSIGN, kcf_sign_mechs_tab},
- {KCF_MAXKEYOPS, kcf_keyops_mechs_tab},
- {KCF_MAXMISC, kcf_misc_mechs_tab}
};
/*
@@ -240,10 +234,6 @@ kcf_init_mech_tabs(void)
kcf_mac_mechs_tab[3].me_threshold = kcf_sha1_threshold;
- /* 1 random number generation pseudo mechanism */
- (void) strncpy(kcf_misc_mechs_tab[0].me_name, SUN_RANDOM,
- CRYPTO_MAX_MECH_NAME);
-
kcf_mech_hash = mod_hash_create_strhash_nodtr("kcf mech2id hash",
kcf_mech_hash_size, mod_hash_null_valdtor);
@@ -376,13 +366,8 @@ kcf_add_mech_provider(short mech_indx,
int error;
kcf_mech_entry_t *mech_entry = NULL;
crypto_mech_info_t *mech_info;
- crypto_mech_type_t kcf_mech_type, mt;
- kcf_prov_mech_desc_t *prov_mech, *prov_mech2;
- crypto_func_group_t simple_fg_mask, dual_fg_mask;
- crypto_mech_info_t *dmi;
- crypto_mech_info_list_t *mil, *mil2;
- kcf_mech_entry_t *me;
- int i;
+ crypto_mech_type_t kcf_mech_type;
+ kcf_prov_mech_desc_t *prov_mech;
ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
@@ -406,19 +391,8 @@ kcf_add_mech_provider(short mech_indx,
class = KCF_CIPHER_CLASS;
else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC)
class = KCF_MAC_CLASS;
- else if (fg & CRYPTO_FG_SIGN || fg & CRYPTO_FG_VERIFY ||
- fg & CRYPTO_FG_SIGN_ATOMIC ||
- fg & CRYPTO_FG_VERIFY_ATOMIC ||
- fg & CRYPTO_FG_SIGN_RECOVER ||
- fg & CRYPTO_FG_VERIFY_RECOVER)
- class = KCF_SIGN_CLASS;
- else if (fg & CRYPTO_FG_GENERATE ||
- fg & CRYPTO_FG_GENERATE_KEY_PAIR ||
- fg & CRYPTO_FG_WRAP || fg & CRYPTO_FG_UNWRAP ||
- fg & CRYPTO_FG_DERIVE)
- class = KCF_KEYOPS_CLASS;
else
- class = KCF_MISC_CLASS;
+ __builtin_unreachable();
/*
* Attempt to create a new mech_entry for the specified
@@ -447,95 +421,6 @@ kcf_add_mech_provider(short mech_indx,
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
- dual_fg_mask = mech_info->cm_func_group_mask & CRYPTO_FG_DUAL_MASK;
-
- if (dual_fg_mask == ((crypto_func_group_t)0))
- goto add_entry;
-
- simple_fg_mask = (mech_info->cm_func_group_mask &
- CRYPTO_FG_SIMPLEOP_MASK) | CRYPTO_FG_RANDOM;
-
- for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
- dmi = &prov_desc->pd_mechanisms[i];
-
- /* skip self */
- if (dmi->cm_mech_number == mech_info->cm_mech_number)
- continue;
-
- /* skip if not a dual operation mechanism */
- if (!(dmi->cm_func_group_mask & dual_fg_mask) ||
- (dmi->cm_func_group_mask & simple_fg_mask))
- continue;
-
- mt = kcf_mech_hash_find(dmi->cm_mech_name);
- if (mt == CRYPTO_MECH_INVALID)
- continue;
-
- if (kcf_get_mech_entry(mt, &me) != KCF_SUCCESS)
- continue;
-
- mil = kmem_zalloc(sizeof (*mil), KM_SLEEP);
- mil2 = kmem_zalloc(sizeof (*mil2), KM_SLEEP);
-
- /*
- * Ignore hard-coded entries in the mech table
- * if the provider hasn't registered.
- */
- mutex_enter(&me->me_mutex);
- if (me->me_hw_prov_chain == NULL && me->me_sw_prov == NULL) {
- mutex_exit(&me->me_mutex);
- kmem_free(mil, sizeof (*mil));
- kmem_free(mil2, sizeof (*mil2));
- continue;
- }
-
- /*
- * Add other dual mechanisms that have registered
- * with the framework to this mechanism's
- * cross-reference list.
- */
- mil->ml_mech_info = *dmi; /* struct assignment */
- mil->ml_kcf_mechid = mt;
-
- /* add to head of list */
- mil->ml_next = prov_mech->pm_mi_list;
- prov_mech->pm_mi_list = mil;
-
- if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
- prov_mech2 = me->me_hw_prov_chain;
- else
- prov_mech2 = me->me_sw_prov;
-
- if (prov_mech2 == NULL) {
- kmem_free(mil2, sizeof (*mil2));
- mutex_exit(&me->me_mutex);
- continue;
- }
-
- /*
- * Update all other cross-reference lists by
- * adding this new mechanism.
- */
- while (prov_mech2 != NULL) {
- if (prov_mech2->pm_prov_desc == prov_desc) {
- /* struct assignment */
- mil2->ml_mech_info = *mech_info;
- mil2->ml_kcf_mechid = kcf_mech_type;
-
- /* add to head of list */
- mil2->ml_next = prov_mech2->pm_mi_list;
- prov_mech2->pm_mi_list = mil2;
- break;
- }
- prov_mech2 = prov_mech2->pm_next;
- }
- if (prov_mech2 == NULL)
- kmem_free(mil2, sizeof (*mil2));
-
- mutex_exit(&me->me_mutex);
- }
-
-add_entry:
/*
* Add new kcf_prov_mech_desc at the front of HW providers
* chain.
diff --git a/module/icp/core/kcf_prov_tabs.c b/module/icp/core/kcf_prov_tabs.c
index 734bf457c..482bd267c 100644
--- a/module/icp/core/kcf_prov_tabs.c
+++ b/module/icp/core/kcf_prov_tabs.c
@@ -205,8 +205,7 @@ kcf_prov_tab_lookup(crypto_provider_id_t prov_id)
}
static void
-allocate_ops(const crypto_ops_t *src, crypto_ops_t *dst,
- uint_t *mech_list_count)
+allocate_ops(const crypto_ops_t *src, crypto_ops_t *dst)
{
if (src->co_digest_ops != NULL)
dst->co_digest_ops = kmem_alloc(sizeof (crypto_digest_ops_t),
@@ -220,62 +219,9 @@ allocate_ops(const crypto_ops_t *src, crypto_ops_t *dst,
dst->co_mac_ops = kmem_alloc(sizeof (crypto_mac_ops_t),
KM_SLEEP);
- if (src->co_sign_ops != NULL)
- dst->co_sign_ops = kmem_alloc(sizeof (crypto_sign_ops_t),
- KM_SLEEP);
-
- if (src->co_verify_ops != NULL)
- dst->co_verify_ops = kmem_alloc(sizeof (crypto_verify_ops_t),
- KM_SLEEP);
-
- if (src->co_dual_ops != NULL)
- dst->co_dual_ops = kmem_alloc(sizeof (crypto_dual_ops_t),
- KM_SLEEP);
-
- if (src->co_dual_cipher_mac_ops != NULL)
- dst->co_dual_cipher_mac_ops = kmem_alloc(
- sizeof (crypto_dual_cipher_mac_ops_t), KM_SLEEP);
-
- if (src->co_random_ops != NULL) {
- dst->co_random_ops = kmem_alloc(
- sizeof (crypto_random_number_ops_t), KM_SLEEP);
-
- /*
- * Allocate storage to store the array of supported mechanisms
- * specified by provider. We allocate extra mechanism storage
- * if the provider has random_ops since we keep an internal
- * mechanism, SUN_RANDOM, in this case.
- */
- (*mech_list_count)++;
- }
-
- if (src->co_session_ops != NULL)
- dst->co_session_ops = kmem_alloc(sizeof (crypto_session_ops_t),
- KM_SLEEP);
-
- if (src->co_object_ops != NULL)
- dst->co_object_ops = kmem_alloc(sizeof (crypto_object_ops_t),
- KM_SLEEP);
-
- if (src->co_key_ops != NULL)
- dst->co_key_ops = kmem_alloc(sizeof (crypto_key_ops_t),
- KM_SLEEP);
-
- if (src->co_provider_ops != NULL)
- dst->co_provider_ops = kmem_alloc(
- sizeof (crypto_provider_management_ops_t), KM_SLEEP);
-
if (src->co_ctx_ops != NULL)
dst->co_ctx_ops = kmem_alloc(sizeof (crypto_ctx_ops_t),
KM_SLEEP);
-
- if (src->co_mech_ops != NULL)
- dst->co_mech_ops = kmem_alloc(sizeof (crypto_mech_ops_t),
- KM_SLEEP);
-
- if (src->co_nostore_key_ops != NULL)
- dst->co_nostore_key_ops =
- kmem_alloc(sizeof (crypto_nostore_key_ops_t), KM_SLEEP);
}
/*
@@ -289,7 +235,6 @@ kcf_provider_desc_t *
kcf_alloc_provider_desc(const crypto_provider_info_t *info)
{
kcf_provider_desc_t *desc;
- uint_t mech_list_count = info->pi_mech_list_count;
const crypto_ops_t *src_ops = info->pi_ops_vector;
desc = kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP);
@@ -319,15 +264,13 @@ kcf_alloc_provider_desc(const crypto_provider_info_t *info)
* vectors are copied.
*/
crypto_ops_t *opvec = kmem_zalloc(sizeof (crypto_ops_t), KM_SLEEP);
-
- if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
- allocate_ops(src_ops, opvec, &mech_list_count);
- }
+ if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
+ allocate_ops(src_ops, opvec);
desc->pd_ops_vector = opvec;
- desc->pd_mech_list_count = mech_list_count;
+ desc->pd_mech_list_count = info->pi_mech_list_count;
desc->pd_mechanisms = kmem_zalloc(sizeof (crypto_mech_info_t) *
- mech_list_count, KM_SLEEP);
+ info->pi_mech_list_count, KM_SLEEP);
for (int i = 0; i < KCF_OPS_CLASSSIZE; i++)
for (int j = 0; j < KCF_MAXMECHTAB; j++)
desc->pd_mech_indx[i][j] = KCF_INVALID_INDX;
@@ -408,54 +351,10 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc)
kmem_free(desc->pd_ops_vector->co_mac_ops,
sizeof (crypto_mac_ops_t));
- if (desc->pd_ops_vector->co_sign_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_sign_ops,
- sizeof (crypto_sign_ops_t));
-
- if (desc->pd_ops_vector->co_verify_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_verify_ops,
- sizeof (crypto_verify_ops_t));
-
- if (desc->pd_ops_vector->co_dual_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_dual_ops,
- sizeof (crypto_dual_ops_t));
-
- if (desc->pd_ops_vector->co_dual_cipher_mac_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_dual_cipher_mac_ops,
- sizeof (crypto_dual_cipher_mac_ops_t));
-
- if (desc->pd_ops_vector->co_random_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_random_ops,
- sizeof (crypto_random_number_ops_t));
-
- if (desc->pd_ops_vector->co_session_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_session_ops,
- sizeof (crypto_session_ops_t));
-
- if (desc->pd_ops_vector->co_object_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_object_ops,
- sizeof (crypto_object_ops_t));
-
- if (desc->pd_ops_vector->co_key_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_key_ops,
- sizeof (crypto_key_ops_t));
-
- if (desc->pd_ops_vector->co_provider_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_provider_ops,
- sizeof (crypto_provider_management_ops_t));
-
if (desc->pd_ops_vector->co_ctx_ops != NULL)
kmem_free(desc->pd_ops_vector->co_ctx_ops,
sizeof (crypto_ctx_ops_t));
- if (desc->pd_ops_vector->co_mech_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_mech_ops,
- sizeof (crypto_mech_ops_t));
-
- if (desc->pd_ops_vector->co_nostore_key_ops != NULL)
- kmem_free(desc->pd_ops_vector->co_nostore_key_ops,
- sizeof (crypto_nostore_key_ops_t));
-
kmem_free(desc->pd_ops_vector, sizeof (crypto_ops_t));
}
@@ -475,111 +374,6 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc)
}
/*
- * Returns an array of hardware and logical provider descriptors,
- * a.k.a the PKCS#11 slot list. A REFHOLD is done on each descriptor
- * before the array is returned. The entire table can be freed by
- * calling kcf_free_provider_tab().
- */
-int
-kcf_get_slot_list(uint_t *count, kcf_provider_desc_t ***array,
- boolean_t unverified)
-{
- kcf_provider_desc_t *prov_desc;
- kcf_provider_desc_t **p = NULL;
- char *last;
- uint_t cnt = 0;
- uint_t i, j;
- int rval = CRYPTO_SUCCESS;
- size_t n, final_size;
-
- /* count the providers */
- mutex_enter(&prov_tab_mutex);
- for (i = 0; i < KCF_MAX_PROVIDERS; i++) {
- if ((prov_desc = prov_tab[i]) != NULL &&
- ((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
- (prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
- prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
- if (KCF_IS_PROV_USABLE(prov_desc) ||
- (unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
- cnt++;
- }
- }
- }
- mutex_exit(&prov_tab_mutex);
-
- if (cnt == 0)
- goto out;
-
- n = cnt * sizeof (kcf_provider_desc_t *);
-again:
- p = kmem_zalloc(n, KM_SLEEP);
-
- /* pointer to last entry in the array */
- last = (char *)&p[cnt-1];
-
- mutex_enter(&prov_tab_mutex);
- /* fill the slot list */
- for (i = 0, j = 0; i < KCF_MAX_PROVIDERS; i++) {
- if ((prov_desc = prov_tab[i]) != NULL &&
- ((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
- (prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
- prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
- if (KCF_IS_PROV_USABLE(prov_desc) ||
- (unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
- if ((char *)&p[j] > last) {
- mutex_exit(&prov_tab_mutex);
- kcf_free_provider_tab(cnt, p);
- n = n << 1;
- cnt = cnt << 1;
- goto again;
- }
- p[j++] = prov_desc;
- KCF_PROV_REFHOLD(prov_desc);
- }
- }
- }
- mutex_exit(&prov_tab_mutex);
-
- final_size = j * sizeof (kcf_provider_desc_t *);
- cnt = j;
- ASSERT(final_size <= n);
-
- /* check if buffer we allocated is too large */
- if (final_size < n) {
- char *final_buffer = NULL;
-
- if (final_size > 0) {
- final_buffer = kmem_alloc(final_size, KM_SLEEP);
- bcopy(p, final_buffer, final_size);
- }
- kmem_free(p, n);
- p = (kcf_provider_desc_t **)final_buffer;
- }
-out:
- *count = cnt;
- *array = p;
- return (rval);
-}
-
-/*
- * Free an array of hardware provider descriptors. A REFRELE
- * is done on each descriptor before the table is freed.
- */
-void
-kcf_free_provider_tab(uint_t count, kcf_provider_desc_t **array)
-{
- kcf_provider_desc_t *prov_desc;
- int i;
-
- for (i = 0; i < count; i++) {
- if ((prov_desc = array[i]) != NULL) {
- KCF_PROV_REFRELE(prov_desc);
- }
- }
- kmem_free(array, count * sizeof (kcf_provider_desc_t *));
-}
-
-/*
* Returns in the location pointed to by pd a pointer to the descriptor
* for the software provider for the specified mechanism.
* The provider descriptor is returned held and it is the caller's
diff --git a/module/icp/core/kcf_sched.c b/module/icp/core/kcf_sched.c
index ee0fe0ac6..b50e80529 100644
--- a/module/icp/core/kcf_sched.c
+++ b/module/icp/core/kcf_sched.c
@@ -66,8 +66,6 @@ static kcf_stats_t kcf_ksdata = {
static kstat_t *kcf_misc_kstat = NULL;
ulong_t kcf_swprov_hndl = 0;
-static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *,
- kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t);
static int kcf_disp_sw_request(kcf_areq_node_t *);
static void process_req_hwp(void *);
static int kcf_enqueue(kcf_areq_node_t *);
@@ -121,7 +119,7 @@ kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
*/
static kcf_areq_node_t *
kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
- crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual)
+ crypto_call_req_t *crq, kcf_req_params_t *req)
{
kcf_areq_node_t *arptr, *areq;
@@ -134,7 +132,6 @@ kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
arptr->an_reqarg = *crq;
arptr->an_params = *req;
arptr->an_context = ictx;
- arptr->an_isdual = isdual;
arptr->an_next = arptr->an_prev = NULL;
KCF_PROV_REFHOLD(pd);
@@ -342,17 +339,16 @@ bail:
/*
* This routine checks if a request can be retried on another
* provider. If true, mech1 is initialized to point to the mechanism
- * structure. mech2 is also initialized in case of a dual operation. fg
- * is initialized to the correct crypto_func_group_t bit flag. They are
- * initialized by this routine, so that the caller can pass them to a
- * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change.
+ * structure. fg is initialized to the correct crypto_func_group_t bit flag.
+ * They are initialized by this routine, so that the caller can pass them to
+ * kcf_get_mech_provider() with no further change.
*
* We check that the request is for a init or atomic routine and that
* it is for one of the operation groups used from k-api .
*/
static boolean_t
can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
- crypto_mechanism_t **mech2, crypto_func_group_t *fg)
+ crypto_func_group_t *fg)
{
kcf_req_params_t *params;
kcf_op_type_t optype;
@@ -384,44 +380,6 @@ can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
break;
}
- case KCF_OG_SIGN: {
- kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
-
- sops->so_mech.cm_type = sops->so_framework_mechtype;
- *mech1 = &sops->so_mech;
- switch (optype) {
- case KCF_OP_INIT:
- *fg = CRYPTO_FG_SIGN;
- break;
- case KCF_OP_ATOMIC:
- *fg = CRYPTO_FG_SIGN_ATOMIC;
- break;
- default:
- ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC);
- *fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC;
- }
- break;
- }
-
- case KCF_OG_VERIFY: {
- kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
-
- vops->vo_mech.cm_type = vops->vo_framework_mechtype;
- *mech1 = &vops->vo_mech;
- switch (optype) {
- case KCF_OP_INIT:
- *fg = CRYPTO_FG_VERIFY;
- break;
- case KCF_OP_ATOMIC:
- *fg = CRYPTO_FG_VERIFY_ATOMIC;
- break;
- default:
- ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC);
- *fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC;
- }
- break;
- }
-
case KCF_OG_ENCRYPT: {
kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
@@ -442,32 +400,6 @@ can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
break;
}
- case KCF_OG_ENCRYPT_MAC: {
- kcf_encrypt_mac_ops_params_t *eops =
- &params->rp_u.encrypt_mac_params;
-
- eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype;
- *mech1 = &eops->em_encr_mech;
- eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype;
- *mech2 = &eops->em_mac_mech;
- *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC :
- CRYPTO_FG_ENCRYPT_MAC_ATOMIC;
- break;
- }
-
- case KCF_OG_MAC_DECRYPT: {
- kcf_mac_decrypt_ops_params_t *dops =
- &params->rp_u.mac_decrypt_params;
-
- dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype;
- *mech1 = &dops->md_mac_mech;
- dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype;
- *mech2 = &dops->md_decr_mech;
- *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT :
- CRYPTO_FG_MAC_DECRYPT_ATOMIC;
- break;
- }
-
default:
return (B_FALSE);
}
@@ -491,11 +423,10 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
kcf_context_t *ictx;
kcf_provider_desc_t *old_pd;
kcf_provider_desc_t *new_pd;
- crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
- crypto_mech_type_t prov_mt1, prov_mt2;
+ crypto_mechanism_t *mech1 = NULL;
crypto_func_group_t fg = 0;
- if (!can_resubmit(areq, &mech1, &mech2, &fg))
+ if (!can_resubmit(areq, &mech1, &fg))
return (error);
old_pd = areq->an_provider;
@@ -508,17 +439,9 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
KM_NOSLEEP) == NULL)
return (error);
- if (mech1 && !mech2) {
- new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
- areq->an_tried_plist, fg,
- (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
- } else {
- ASSERT(mech1 != NULL && mech2 != NULL);
-
- new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1,
- &prov_mt2, &error, areq->an_tried_plist, fg, fg,
- (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
- }
+ new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
+ areq->an_tried_plist, fg,
+ (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
if (new_pd == NULL)
return (error);
@@ -588,7 +511,7 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
*/
int
kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
- crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
+ crypto_call_req_t *crq, kcf_req_params_t *params)
{
int error = CRYPTO_SUCCESS;
kcf_areq_node_t *areq;
@@ -703,16 +626,14 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
* queue the request and return.
*/
areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
- params, cont);
+ params);
if (areq == NULL)
error = CRYPTO_HOST_MEMORY;
else {
if (!(crq->cr_flag
& CRYPTO_SKIP_REQID)) {
/*
- * Set the request handle. This handle
- * is used for any crypto_cancel_req(9f)
- * calls from the consumer. We have to
+ * Set the request handle. We have to
* do this before dispatching the
* request.
*/
@@ -739,8 +660,7 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
/*
* We need to queue the request and return.
*/
- areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params,
- cont);
+ areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params);
if (areq == NULL) {
error = CRYPTO_HOST_MEMORY;
goto done;
@@ -760,10 +680,8 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
/*
- * Set the request handle. This handle is used
- * for any crypto_cancel_req(9f) calls from the
- * consumer. We have to do this before dispatching
- * the request.
+ * Set the request handle. We have to do this
+ * before dispatching the request.
*/
crq->cr_reqid = kcf_reqid_insert(areq);
}
@@ -857,66 +775,6 @@ kcf_free_req(kcf_areq_node_t *areq)
}
/*
- * Utility routine to remove a request from the chain of requests
- * hanging off a context.
- */
-static void
-kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
-{
- kcf_areq_node_t *cur, *prev;
-
- /*
- * Get context lock, search for areq in the chain and remove it.
- */
- ASSERT(ictx != NULL);
- mutex_enter(&ictx->kc_in_use_lock);
- prev = cur = ictx->kc_req_chain_first;
-
- while (cur != NULL) {
- if (cur == areq) {
- if (prev == cur) {
- if ((ictx->kc_req_chain_first =
- cur->an_ctxchain_next) == NULL)
- ictx->kc_req_chain_last = NULL;
- } else {
- if (cur == ictx->kc_req_chain_last)
- ictx->kc_req_chain_last = prev;
- prev->an_ctxchain_next = cur->an_ctxchain_next;
- }
-
- break;
- }
- prev = cur;
- cur = cur->an_ctxchain_next;
- }
- mutex_exit(&ictx->kc_in_use_lock);
-}
-
-/*
- * Remove the specified node from the global software queue.
- *
- * The caller must hold the queue lock and request lock (an_lock).
- */
-static void
-kcf_remove_node(kcf_areq_node_t *node)
-{
- kcf_areq_node_t *nextp = node->an_next;
- kcf_areq_node_t *prevp = node->an_prev;
-
- if (nextp != NULL)
- nextp->an_prev = prevp;
- else
- gswq->gs_last = prevp;
-
- if (prevp != NULL)
- prevp->an_next = nextp;
- else
- gswq->gs_first = nextp;
-
- node->an_state = REQ_CANCELED;
-}
-
-/*
* Add the request node to the end of the global software queue.
*
* The caller should not hold the queue lock. Returns 0 if the
@@ -1224,19 +1082,6 @@ kcf_aop_done(kcf_areq_node_t *areq, int error)
}
}
- /* Deal with the internal continuation to this request first */
-
- if (areq->an_isdual) {
- kcf_dual_req_t *next_arg;
- next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg;
- next_arg->kr_areq = areq;
- KCF_AREQ_REFHOLD(areq);
- areq->an_isdual = B_FALSE;
-
- NOTIFY_CLIENT(areq, error);
- return;
- }
-
/*
* If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
* always. If this flag is clear, we skip the notification
@@ -1345,146 +1190,6 @@ kcf_reqid_delete(kcf_areq_node_t *areq)
}
/*
- * Cancel a single asynchronous request.
- *
- * We guarantee that no problems will result from calling
- * crypto_cancel_req() for a request which is either running, or
- * has already completed. We remove the request from any queues
- * if it is possible. We wait for request completion if the
- * request is dispatched to a provider.
- *
- * Calling context:
- * Can be called from user context only.
- *
- * NOTE: We acquire the following locks in this routine (in order):
- * - rt_lock (kcf_reqid_table_t)
- * - gswq->gs_lock
- * - areq->an_lock
- * - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain())
- *
- * This locking order MUST be maintained in code every where else.
- */
-void
-crypto_cancel_req(crypto_req_id_t id)
-{
- int indx;
- kcf_areq_node_t *areq;
- kcf_provider_desc_t *pd;
- kcf_context_t *ictx;
- kcf_reqid_table_t *rt;
-
- rt = kcf_reqid_table[id & REQID_TABLE_MASK];
- indx = REQID_HASH(id);
-
- mutex_enter(&rt->rt_lock);
- for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) {
- if (GET_REQID(areq) == id) {
- /*
- * We found the request. It is either still waiting
- * in the framework queues or running at the provider.
- */
- pd = areq->an_provider;
- ASSERT(pd != NULL);
-
- switch (pd->pd_prov_type) {
- case CRYPTO_SW_PROVIDER:
- mutex_enter(&gswq->gs_lock);
- mutex_enter(&areq->an_lock);
-
- /* This request can be safely canceled. */
- if (areq->an_state <= REQ_WAITING) {
- /* Remove from gswq, global software queue. */
- kcf_remove_node(areq);
- if ((ictx = areq->an_context) != NULL)
- kcf_removereq_in_ctxchain(ictx, areq);
-
- mutex_exit(&areq->an_lock);
- mutex_exit(&gswq->gs_lock);
- mutex_exit(&rt->rt_lock);
-
- /* Remove areq from hash table and free it. */
- kcf_reqid_delete(areq);
- KCF_AREQ_REFRELE(areq);
- return;
- }
-
- mutex_exit(&areq->an_lock);
- mutex_exit(&gswq->gs_lock);
- break;
-
- case CRYPTO_HW_PROVIDER:
- /*
- * There is no interface to remove an entry
- * once it is on the taskq. So, we do not do
- * anything for a hardware provider.
- */
- break;
- default:
- break;
- }
-
- /*
- * The request is running. Wait for the request completion
- * to notify us.
- */
- KCF_AREQ_REFHOLD(areq);
- while (GET_REQID(areq) == id)
- cv_wait(&areq->an_done, &rt->rt_lock);
- KCF_AREQ_REFRELE(areq);
- break;
- }
- }
-
- mutex_exit(&rt->rt_lock);
-}
-
-/*
- * Cancel all asynchronous requests associated with the
- * passed in crypto context and free it.
- *
- * A client SHOULD NOT call this routine after calling a crypto_*_final
- * routine. This routine is called only during intermediate operations.
- * The client should not use the crypto context after this function returns
- * since we destroy it.
- *
- * Calling context:
- * Can be called from user context only.
- */
-void
-crypto_cancel_ctx(crypto_context_t ctx)
-{
- kcf_context_t *ictx;
- kcf_areq_node_t *areq;
-
- if (ctx == NULL)
- return;
-
- ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private;
-
- mutex_enter(&ictx->kc_in_use_lock);
-
- /* Walk the chain and cancel each request */
- while ((areq = ictx->kc_req_chain_first) != NULL) {
- /*
- * We have to drop the lock here as we may have
- * to wait for request completion. We hold the
- * request before dropping the lock though, so that it
- * won't be freed underneath us.
- */
- KCF_AREQ_REFHOLD(areq);
- mutex_exit(&ictx->kc_in_use_lock);
-
- crypto_cancel_req(GET_REQID(areq));
- KCF_AREQ_REFRELE(areq);
-
- mutex_enter(&ictx->kc_in_use_lock);
- }
-
- mutex_exit(&ictx->kc_in_use_lock);
- KCF_CONTEXT_REFRELE(ictx);
-}
-
-/*
* Update kstats.
*/
static int
@@ -1517,250 +1222,3 @@ kcf_misc_kstat_update(kstat_t *ksp, int rw)
return (0);
}
-
-/*
- * Allocate and initialize a kcf_dual_req, used for saving the arguments of
- * a dual operation or an atomic operation that has to be internally
- * simulated with multiple single steps.
- * crq determines the memory allocation flags.
- */
-
-kcf_dual_req_t *
-kcf_alloc_req(crypto_call_req_t *crq)
-{
- kcf_dual_req_t *kcr;
-
- kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq));
-
- if (kcr == NULL)
- return (NULL);
-
- /* Copy the whole crypto_call_req struct, as it isn't persistent */
- if (crq != NULL)
- kcr->kr_callreq = *crq;
- else
- bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t));
- kcr->kr_areq = NULL;
- kcr->kr_saveoffset = 0;
- kcr->kr_savelen = 0;
-
- return (kcr);
-}
-
-/*
- * Callback routine for the next part of a simulated dual part.
- * Schedules the next step.
- *
- * This routine can be called from interrupt context.
- */
-void
-kcf_next_req(void *next_req_arg, int status)
-{
- kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
- kcf_req_params_t *params = &(next_req->kr_params);
- kcf_areq_node_t *areq = next_req->kr_areq;
- int error = status;
- kcf_provider_desc_t *pd = NULL;
- crypto_dual_data_t *ct = NULL;
-
- /* Stop the processing if an error occurred at this step */
- if (error != CRYPTO_SUCCESS) {
-out:
- areq->an_reqarg = next_req->kr_callreq;
- KCF_AREQ_REFRELE(areq);
- kmem_free(next_req, sizeof (kcf_dual_req_t));
- areq->an_isdual = B_FALSE;
- kcf_aop_done(areq, error);
- return;
- }
-
- switch (params->rp_opgrp) {
- case KCF_OG_MAC: {
-
- /*
- * The next req is submitted with the same reqid as the
- * first part. The consumer only got back that reqid, and
- * should still be able to cancel the operation during its
- * second step.
- */
- kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
- crypto_ctx_template_t mac_tmpl;
- kcf_mech_entry_t *me;
-
- ct = (crypto_dual_data_t *)mops->mo_data;
- mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;
-
- /* No expected recoverable failures, so no retry list */
- pd = kcf_get_mech_provider(mops->mo_framework_mechtype,
- &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC,
- (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2);
-
- if (pd == NULL) {
- error = CRYPTO_MECH_NOT_SUPPORTED;
- goto out;
- }
- /* Validate the MAC context template here */
- if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
- (mac_tmpl != NULL)) {
- kcf_ctx_template_t *ctx_mac_tmpl;
-
- ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
-
- if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
- KCF_PROV_REFRELE(pd);
- error = CRYPTO_OLD_CTX_TEMPLATE;
- goto out;
- }
- mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
- }
-
- break;
- }
- case KCF_OG_DECRYPT: {
- kcf_decrypt_ops_params_t *dcrops =
- &(params->rp_u.decrypt_params);
-
- ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
- /* No expected recoverable failures, so no retry list */
- pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
- NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
- (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1);
-
- if (pd == NULL) {
- error = CRYPTO_MECH_NOT_SUPPORTED;
- goto out;
- }
- break;
- }
- default:
- break;
- }
-
- /* The second step uses len2 and offset2 of the dual_data */
- next_req->kr_saveoffset = ct->dd_offset1;
- next_req->kr_savelen = ct->dd_len1;
- ct->dd_offset1 = ct->dd_offset2;
- ct->dd_len1 = ct->dd_len2;
-
- /* preserve if the caller is restricted */
- if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) {
- areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED;
- } else {
- areq->an_reqarg.cr_flag = 0;
- }
-
- areq->an_reqarg.cr_callback_func = kcf_last_req;
- areq->an_reqarg.cr_callback_arg = next_req;
- areq->an_isdual = B_TRUE;
-
- /*
- * We would like to call kcf_submit_request() here. But,
- * that is not possible as that routine allocates a new
- * kcf_areq_node_t request structure, while we need to
- * reuse the existing request structure.
- */
- switch (pd->pd_prov_type) {
- case CRYPTO_SW_PROVIDER:
- error = common_submit_request(pd, NULL, params,
- KCF_RHNDL(KM_NOSLEEP));
- break;
-
- case CRYPTO_HW_PROVIDER: {
- kcf_provider_desc_t *old_pd;
- taskq_t *taskq = pd->pd_sched_info.ks_taskq;
-
- /*
- * Set the params for the second step in the
- * dual-ops.
- */
- areq->an_params = *params;
- old_pd = areq->an_provider;
- KCF_PROV_REFRELE(old_pd);
- KCF_PROV_REFHOLD(pd);
- areq->an_provider = pd;
-
- /*
- * Note that we have to do a taskq_dispatch()
- * here as we may be in interrupt context.
- */
- if (taskq_dispatch(taskq, process_req_hwp, areq,
- TQ_NOSLEEP) == (taskqid_t)0) {
- error = CRYPTO_HOST_MEMORY;
- } else {
- error = CRYPTO_QUEUED;
- }
- break;
- }
- default:
- break;
- }
-
- /*
- * We have to release the holds on the request and the provider
- * in all cases.
- */
- KCF_AREQ_REFRELE(areq);
- KCF_PROV_REFRELE(pd);
-
- if (error != CRYPTO_QUEUED) {
- /* restore, clean up, and invoke the client's callback */
-
- ct->dd_offset1 = next_req->kr_saveoffset;
- ct->dd_len1 = next_req->kr_savelen;
- areq->an_reqarg = next_req->kr_callreq;
- kmem_free(next_req, sizeof (kcf_dual_req_t));
- areq->an_isdual = B_FALSE;
- kcf_aop_done(areq, error);
- }
-}
-
-/*
- * Last part of an emulated dual operation.
- * Clean up and restore ...
- */
-void
-kcf_last_req(void *last_req_arg, int status)
-{
- kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg;
-
- kcf_req_params_t *params = &(last_req->kr_params);
- kcf_areq_node_t *areq = last_req->kr_areq;
- crypto_dual_data_t *ct = NULL;
-
- switch (params->rp_opgrp) {
- case KCF_OG_MAC: {
- kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
-
- ct = (crypto_dual_data_t *)mops->mo_data;
- break;
- }
- case KCF_OG_DECRYPT: {
- kcf_decrypt_ops_params_t *dcrops =
- &(params->rp_u.decrypt_params);
-
- ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
- break;
- }
- default: {
- panic("invalid kcf_op_group_t %d", (int)params->rp_opgrp);
- return;
- }
- }
- ct->dd_offset1 = last_req->kr_saveoffset;
- ct->dd_len1 = last_req->kr_savelen;
-
- /* The submitter used kcf_last_req as its callback */
-
- if (areq == NULL) {
- crypto_call_req_t *cr = &last_req->kr_callreq;
-
- (*(cr->cr_callback_func))(cr->cr_callback_arg, status);
- kmem_free(last_req, sizeof (kcf_dual_req_t));
- return;
- }
- areq->an_reqarg = last_req->kr_callreq;
- KCF_AREQ_REFRELE(areq);
- kmem_free(last_req, sizeof (kcf_dual_req_t));
- areq->an_isdual = B_FALSE;
- kcf_aop_done(areq, status);
-}