diff options
author | наб <[email protected]> | 2021-12-23 19:51:00 +0100 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2022-02-15 16:23:53 -0800 |
commit | 710657f51d65b8fb1f389bf6a973e67a8243502f (patch) | |
tree | 3d43afb1b735af9c07ef15c32ead2f62ac6c0bb3 /module/icp/core | |
parent | 167ced3fb10ce9c2336828cb041420d96de8cf67 (diff) |
module: icp: remove other provider types
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Ahelenia Ziemiańska <[email protected]>
Closes #12901
Diffstat (limited to 'module/icp/core')
-rw-r--r-- | module/icp/core/kcf_callprov.c | 214 | ||||
-rw-r--r-- | module/icp/core/kcf_mech_tabs.c | 162 | ||||
-rw-r--r-- | module/icp/core/kcf_prov_tabs.c | 37 | ||||
-rw-r--r-- | module/icp/core/kcf_sched.c | 352 |
4 files changed, 91 insertions, 674 deletions
diff --git a/module/icp/core/kcf_callprov.c b/module/icp/core/kcf_callprov.c index 1468e0a1a..3fe6e3ebc 100644 --- a/module/icp/core/kcf_callprov.c +++ b/module/icp/core/kcf_callprov.c @@ -69,168 +69,6 @@ is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl) } /* - * Search a mech entry's hardware provider list for the specified - * provider. Return true if found. - */ -static boolean_t -is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me, - crypto_func_group_t fg) -{ - kcf_prov_mech_desc_t *prov_chain; - - prov_chain = me->me_hw_prov_chain; - if (prov_chain != NULL) { - ASSERT(me->me_num_hwprov > 0); - for (; prov_chain != NULL; prov_chain = prov_chain->pm_next) { - if (prov_chain->pm_prov_desc == pd && - IS_FG_SUPPORTED(prov_chain, fg)) { - return (B_TRUE); - } - } - } - return (B_FALSE); -} - -/* - * This routine, given a logical provider, returns the least loaded - * provider belonging to the logical provider. The provider must be - * able to do the specified mechanism, i.e. check that the mechanism - * hasn't been disabled. In addition, just in case providers are not - * entirely equivalent, the provider's entry point is checked for - * non-nullness. This is accomplished by having the caller pass, as - * arguments, the offset of the function group (offset_1), and the - * offset of the function within the function group (offset_2). - * Returns NULL if no provider can be found. - */ -int -kcf_get_hardware_provider(crypto_mech_type_t mech_type_1, - crypto_mech_type_t mech_type_2, boolean_t call_restrict, - kcf_provider_desc_t *old, kcf_provider_desc_t **new, crypto_func_group_t fg) -{ - kcf_provider_desc_t *provider, *real_pd = old; - kcf_provider_desc_t *gpd = NULL; /* good provider */ - kcf_provider_desc_t *bpd = NULL; /* busy provider */ - kcf_provider_list_t *p; - kcf_ops_class_t class; - kcf_mech_entry_t *me; - const kcf_mech_entry_tab_t *me_tab; - int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS; - - /* get the mech entry for the specified mechanism */ - class = KCF_MECH2CLASS(mech_type_1); - if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { - return (CRYPTO_MECHANISM_INVALID); - } - - me_tab = &kcf_mech_tabs_tab[class]; - index = KCF_MECH2INDEX(mech_type_1); - if ((index < 0) || (index >= me_tab->met_size)) { - return (CRYPTO_MECHANISM_INVALID); - } - - me = &((me_tab->met_tab)[index]); - mutex_enter(&me->me_mutex); - - /* - * We assume the provider descriptor will not go away because - * it is being held somewhere, i.e. its reference count has been - * incremented. In the case of the crypto module, the provider - * descriptor is held by the session structure. - */ - if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { - if (old->pd_provider_list == NULL) { - real_pd = NULL; - rv = CRYPTO_DEVICE_ERROR; - goto out; - } - /* - * Find the least loaded real provider. KCF_PROV_LOAD gives - * the load (number of pending requests) of the provider. - */ - mutex_enter(&old->pd_lock); - p = old->pd_provider_list; - while (p != NULL) { - provider = p->pl_provider; - - ASSERT(provider->pd_prov_type != - CRYPTO_LOGICAL_PROVIDER); - - if (call_restrict && - (provider->pd_flags & KCF_PROV_RESTRICTED)) { - p = p->pl_next; - continue; - } - - if (!is_valid_provider_for_mech(provider, me, fg)) { - p = p->pl_next; - continue; - } - - /* provider does second mech */ - if (mech_type_2 != CRYPTO_MECH_INVALID) { - int i; - - i = KCF_TO_PROV_MECH_INDX(provider, - mech_type_2); - if (i == KCF_INVALID_INDX) { - p = p->pl_next; - continue; - } - } - - if (provider->pd_state != KCF_PROV_READY) { - /* choose BUSY if no READY providers */ - if (provider->pd_state == KCF_PROV_BUSY) - bpd = provider; - p = p->pl_next; - continue; - } - - len = KCF_PROV_LOAD(provider); - if (len < gqlen) { - gqlen = len; - gpd = provider; - } - - p = p->pl_next; - } - - if (gpd != NULL) { - real_pd = gpd; - KCF_PROV_REFHOLD(real_pd); - } else if (bpd != NULL) { - real_pd = bpd; - KCF_PROV_REFHOLD(real_pd); - } else { - /* can't find provider */ - real_pd = NULL; - rv = CRYPTO_MECHANISM_INVALID; - } - mutex_exit(&old->pd_lock); - - } else { - if (!KCF_IS_PROV_USABLE(old) || - (call_restrict && (old->pd_flags & KCF_PROV_RESTRICTED))) { - real_pd = NULL; - rv = CRYPTO_DEVICE_ERROR; - goto out; - } - - if (!is_valid_provider_for_mech(old, me, fg)) { - real_pd = NULL; - rv = CRYPTO_MECHANISM_INVALID; - goto out; - } - - KCF_PROV_REFHOLD(real_pd); - } -out: - mutex_exit(&me->me_mutex); - *new = real_pd; - return (rv); -} - -/* * Return the best provider for the specified mechanism. The provider * is held and it is the caller's responsibility to release it when done. * The fg input argument is used as a search criterion to pick a provider. @@ -247,11 +85,10 @@ out: kcf_provider_desc_t * kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp, int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg, - boolean_t call_restrict, size_t data_size) + boolean_t call_restrict) { - kcf_provider_desc_t *pd = NULL, *gpd = NULL; - kcf_prov_mech_desc_t *prov_chain, *mdesc; - int len, gqlen = INT_MAX; + kcf_provider_desc_t *pd = NULL; + kcf_prov_mech_desc_t *mdesc; kcf_ops_class_t class; int index; kcf_mech_entry_t *me; @@ -276,50 +113,7 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp, mutex_enter(&me->me_mutex); - prov_chain = me->me_hw_prov_chain; - - /* - * We check for the threshold for using a hardware provider for - * this amount of data. If there is no software provider available - * for the mechanism, then the threshold is ignored. - */ - if ((prov_chain != NULL) && - ((data_size == 0) || (me->me_threshold == 0) || - (data_size >= me->me_threshold) || - ((mdesc = me->me_sw_prov) == NULL) || - (!IS_FG_SUPPORTED(mdesc, fg)) || - (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) { - ASSERT(me->me_num_hwprov > 0); - /* there is at least one provider */ - - /* - * Find the least loaded real provider. KCF_PROV_LOAD gives - * the load (number of pending requests) of the provider. - */ - while (prov_chain != NULL) { - pd = prov_chain->pm_prov_desc; - - if (!IS_FG_SUPPORTED(prov_chain, fg) || - !KCF_IS_PROV_USABLE(pd) || - IS_PROVIDER_TRIED(pd, triedl) || - (call_restrict && - (pd->pd_flags & KCF_PROV_RESTRICTED))) { - prov_chain = prov_chain->pm_next; - continue; - } - - if ((len = KCF_PROV_LOAD(pd)) < gqlen) { - gqlen = len; - gpd = pd; - } - - prov_chain = prov_chain->pm_next; - } - - pd = gpd; - } - - /* No HW provider for this mech, is there a SW provider? */ + /* Is there a provider? */ if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) { pd = mdesc->pm_prov_desc; if (!IS_FG_SUPPORTED(mdesc, fg) || diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c index 3d551afed..beed581a5 100644 --- a/module/icp/core/kcf_mech_tabs.c +++ b/module/icp/core/kcf_mech_tabs.c @@ -369,8 +369,6 @@ kcf_add_mech_provider(short mech_indx, crypto_mech_type_t kcf_mech_type; kcf_prov_mech_desc_t *prov_mech; - ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); - mech_info = &prov_desc->pd_mechanisms[mech_indx]; /* @@ -425,50 +423,34 @@ kcf_add_mech_provider(short mech_indx, * Add new kcf_prov_mech_desc at the front of HW providers * chain. */ - switch (prov_desc->pd_prov_type) { - - case CRYPTO_HW_PROVIDER: - mutex_enter(&mech_entry->me_mutex); - prov_mech->pm_me = mech_entry; - prov_mech->pm_next = mech_entry->me_hw_prov_chain; - mech_entry->me_hw_prov_chain = prov_mech; - mech_entry->me_num_hwprov++; - mutex_exit(&mech_entry->me_mutex); - break; - - case CRYPTO_SW_PROVIDER: - mutex_enter(&mech_entry->me_mutex); - if (mech_entry->me_sw_prov != NULL) { - /* - * There is already a SW provider for this mechanism. - * Since we allow only one SW provider per mechanism, - * report this condition. - */ - cmn_err(CE_WARN, "The cryptographic software provider " - "\"%s\" will not be used for %s. The provider " - "\"%s\" will be used for this mechanism " - "instead.", prov_desc->pd_description, - mech_info->cm_mech_name, - mech_entry->me_sw_prov->pm_prov_desc-> - pd_description); - KCF_PROV_REFRELE(prov_desc); - kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t)); - prov_mech = NULL; - } else { - /* - * Set the provider as the software provider for - * this mechanism. - */ - mech_entry->me_sw_prov = prov_mech; + mutex_enter(&mech_entry->me_mutex); + if (mech_entry->me_sw_prov != NULL) { + /* + * There is already a provider for this mechanism. + * Since we allow only one provider per mechanism, + * report this condition. + */ + cmn_err(CE_WARN, "The cryptographic provider " + "\"%s\" will not be used for %s. The provider " + "\"%s\" will be used for this mechanism " + "instead.", prov_desc->pd_description, + mech_info->cm_mech_name, + mech_entry->me_sw_prov->pm_prov_desc-> + pd_description); + KCF_PROV_REFRELE(prov_desc); + kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t)); + prov_mech = NULL; + } else { + /* + * Set the provider as the provider for + * this mechanism. + */ + mech_entry->me_sw_prov = prov_mech; - /* We'll wrap around after 4 billion registrations! */ - mech_entry->me_gen_swprov = kcf_gen_swprov++; - } - mutex_exit(&mech_entry->me_mutex); - break; - default: - break; + /* We'll wrap around after 4 billion registrations! */ + mech_entry->me_gen_swprov = kcf_gen_swprov++; } + mutex_exit(&mech_entry->me_mutex); *pmdpp = prov_mech; @@ -494,12 +476,8 @@ void kcf_remove_mech_provider(const char *mech_name, kcf_provider_desc_t *prov_desc) { crypto_mech_type_t mech_type; - kcf_prov_mech_desc_t *prov_mech = NULL, *prov_chain; - kcf_prov_mech_desc_t **prev_entry_next; + kcf_prov_mech_desc_t *prov_mech = NULL; kcf_mech_entry_t *mech_entry; - crypto_mech_info_list_t *mil, *mil2, *next, **prev_next; - - ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); /* get the KCF mech type that was assigned to the mechanism */ if ((mech_type = kcf_mech_hash_find(mech_name)) == @@ -521,88 +499,16 @@ kcf_remove_mech_provider(const char *mech_name, kcf_provider_desc_t *prov_desc) } mutex_enter(&mech_entry->me_mutex); - - switch (prov_desc->pd_prov_type) { - - case CRYPTO_HW_PROVIDER: - /* find the provider in the mech_entry chain */ - prev_entry_next = &mech_entry->me_hw_prov_chain; - prov_mech = mech_entry->me_hw_prov_chain; - while (prov_mech != NULL && - prov_mech->pm_prov_desc != prov_desc) { - prev_entry_next = &prov_mech->pm_next; - prov_mech = prov_mech->pm_next; - } - - if (prov_mech == NULL) { - /* entry not found, simply return */ - mutex_exit(&mech_entry->me_mutex); - return; - } - - /* remove provider entry from mech_entry chain */ - *prev_entry_next = prov_mech->pm_next; - ASSERT(mech_entry->me_num_hwprov > 0); - mech_entry->me_num_hwprov--; - break; - - case CRYPTO_SW_PROVIDER: - if (mech_entry->me_sw_prov == NULL || - mech_entry->me_sw_prov->pm_prov_desc != prov_desc) { - /* not the software provider for this mechanism */ - mutex_exit(&mech_entry->me_mutex); - return; - } - prov_mech = mech_entry->me_sw_prov; - mech_entry->me_sw_prov = NULL; - break; - default: - /* unexpected crypto_provider_type_t */ + if (mech_entry->me_sw_prov == NULL || + mech_entry->me_sw_prov->pm_prov_desc != prov_desc) { + /* not the provider for this mechanism */ mutex_exit(&mech_entry->me_mutex); return; } - + prov_mech = mech_entry->me_sw_prov; + mech_entry->me_sw_prov = NULL; mutex_exit(&mech_entry->me_mutex); - /* Free the dual ops cross-reference lists */ - mil = prov_mech->pm_mi_list; - while (mil != NULL) { - next = mil->ml_next; - if (kcf_get_mech_entry(mil->ml_kcf_mechid, - &mech_entry) != KCF_SUCCESS) { - mil = next; - continue; - } - - mutex_enter(&mech_entry->me_mutex); - if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) - prov_chain = mech_entry->me_hw_prov_chain; - else - prov_chain = mech_entry->me_sw_prov; - - while (prov_chain != NULL) { - if (prov_chain->pm_prov_desc == prov_desc) { - prev_next = &prov_chain->pm_mi_list; - mil2 = prov_chain->pm_mi_list; - while (mil2 != NULL && - mil2->ml_kcf_mechid != mech_type) { - prev_next = &mil2->ml_next; - mil2 = mil2->ml_next; - } - if (mil2 != NULL) { - *prev_next = mil2->ml_next; - kmem_free(mil2, sizeof (*mil2)); - } - break; - } - prov_chain = prov_chain->pm_next; - } - - mutex_exit(&mech_entry->me_mutex); - kmem_free(mil, sizeof (crypto_mech_info_list_t)); - mil = next; - } - /* free entry */ KCF_PROV_REFRELE(prov_mech->pm_prov_desc); KCF_PROV_IREFRELE(prov_mech->pm_prov_desc); @@ -656,8 +562,8 @@ kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep) /* * Lookup the hash table for an entry that matches the mechname. - * If there are no hardware or software providers for the mechanism, - * but there is an unloaded software provider, this routine will attempt + * If there are no providers for the mechanism, + * but there is an unloaded provider, this routine will attempt * to load it. */ crypto_mech_type_t diff --git a/module/icp/core/kcf_prov_tabs.c b/module/icp/core/kcf_prov_tabs.c index 525872aab..25d9908d1 100644 --- a/module/icp/core/kcf_prov_tabs.c +++ b/module/icp/core/kcf_prov_tabs.c @@ -201,7 +201,7 @@ kcf_prov_tab_lookup(crypto_provider_id_t prov_id) * since it is invoked from user context during provider registration. */ kcf_provider_desc_t * -kcf_alloc_provider_desc(const crypto_provider_info_t *info) +kcf_alloc_provider_desc(void) { kcf_provider_desc_t *desc = kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP); @@ -223,7 +223,7 @@ kcf_alloc_provider_desc(const crypto_provider_info_t *info) /* * Called by KCF_PROV_REFRELE when a provider's reference count drops * to zero. We free the descriptor when the last reference is released. - * However, for software providers, we do not free it when there is an + * However, for providers, we do not free it when there is an * unregister thread waiting. We signal that thread in this case and * that thread is responsible for freeing the descriptor. */ @@ -231,22 +231,16 @@ void kcf_provider_zero_refcnt(kcf_provider_desc_t *desc) { mutex_enter(&desc->pd_lock); - switch (desc->pd_prov_type) { - case CRYPTO_SW_PROVIDER: - if (desc->pd_state == KCF_PROV_REMOVED || - desc->pd_state == KCF_PROV_DISABLED) { - desc->pd_state = KCF_PROV_FREED; - cv_broadcast(&desc->pd_remove_cv); - mutex_exit(&desc->pd_lock); - break; - } - zfs_fallthrough; - - case CRYPTO_HW_PROVIDER: - case CRYPTO_LOGICAL_PROVIDER: + if (desc->pd_state == KCF_PROV_REMOVED || + desc->pd_state == KCF_PROV_DISABLED) { + desc->pd_state = KCF_PROV_FREED; + cv_broadcast(&desc->pd_remove_cv); mutex_exit(&desc->pd_lock); - kcf_free_provider_desc(desc); + return; } + + mutex_exit(&desc->pd_lock); + kcf_free_provider_desc(desc); } /* @@ -269,9 +263,6 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc) /* free the kernel memory associated with the provider descriptor */ - if (desc->pd_sched_info.ks_taskq != NULL) - taskq_destroy(desc->pd_sched_info.ks_taskq); - mutex_destroy(&desc->pd_lock); cv_destroy(&desc->pd_resume_cv); cv_destroy(&desc->pd_remove_cv); @@ -281,7 +272,7 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc) /* * Returns in the location pointed to by pd a pointer to the descriptor - * for the software provider for the specified mechanism. + * for the provider for the specified mechanism. * The provider descriptor is returned held and it is the caller's * responsibility to release it when done. The mechanism entry * is returned if the optional argument mep is non NULL. @@ -300,16 +291,16 @@ kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd, return (CRYPTO_MECHANISM_INVALID); /* - * Get the software provider for this mechanism. + * Get the provider for this mechanism. * Lock the mech_entry until we grab the 'pd'. */ mutex_enter(&me->me_mutex); if (me->me_sw_prov == NULL || (*pd = me->me_sw_prov->pm_prov_desc) == NULL) { - /* no SW provider for this mechanism */ + /* no provider for this mechanism */ if (log_warn) - cmn_err(CE_WARN, "no SW provider for \"%s\"\n", + cmn_err(CE_WARN, "no provider for \"%s\"\n", me->me_name); mutex_exit(&me->me_mutex); return (CRYPTO_MECH_NOT_SUPPORTED); diff --git a/module/icp/core/kcf_sched.c b/module/icp/core/kcf_sched.c index b50e80529..b1149072f 100644 --- a/module/icp/core/kcf_sched.c +++ b/module/icp/core/kcf_sched.c @@ -35,7 +35,7 @@ #include <sys/crypto/sched_impl.h> #include <sys/crypto/api.h> -static kcf_global_swq_t *gswq; /* Global software queue */ +static kcf_global_swq_t *gswq; /* Global queue */ /* Thread pool related variables */ static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */ @@ -58,16 +58,13 @@ static kcf_stats_t kcf_ksdata = { { "max threads in pool", KSTAT_DATA_UINT32}, { "requests in gswq", KSTAT_DATA_UINT32}, { "max requests in gswq", KSTAT_DATA_UINT32}, - { "threads for HW taskq", KSTAT_DATA_UINT32}, - { "minalloc for HW taskq", KSTAT_DATA_UINT32}, - { "maxalloc for HW taskq", KSTAT_DATA_UINT32} + { "maxalloc for gwsq", KSTAT_DATA_UINT32} }; static kstat_t *kcf_misc_kstat = NULL; ulong_t kcf_swprov_hndl = 0; static int kcf_disp_sw_request(kcf_areq_node_t *); -static void process_req_hwp(void *); static int kcf_enqueue(kcf_areq_node_t *); static void kcfpool_alloc(void); static void kcf_reqid_delete(kcf_areq_node_t *areq); @@ -225,118 +222,6 @@ kcf_disp_sw_request(kcf_areq_node_t *areq) } /* - * This routine is called by the taskq associated with - * each hardware provider. We notify the kernel consumer - * via the callback routine in case of CRYPTO_SUCCESS or - * a failure. - * - * A request can be of type kcf_areq_node_t or of type - * kcf_sreq_node_t. - */ -static void -process_req_hwp(void *ireq) -{ - int error = 0; - crypto_ctx_t *ctx; - kcf_call_type_t ctype; - kcf_provider_desc_t *pd; - kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq; - kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq; - - pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ? - sreq->sn_provider : areq->an_provider; - - /* - * Wait if flow control is in effect for the provider. A - * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED - * notification will signal us. We also get signaled if - * the provider is unregistering. - */ - if (pd->pd_state == KCF_PROV_BUSY) { - mutex_enter(&pd->pd_lock); - while (pd->pd_state == KCF_PROV_BUSY) - cv_wait(&pd->pd_resume_cv, &pd->pd_lock); - mutex_exit(&pd->pd_lock); - } - - /* - * Bump the internal reference count while the request is being - * processed. This is how we know when it's safe to unregister - * a provider. This step must precede the pd_state check below. - */ - KCF_PROV_IREFHOLD(pd); - - /* - * Fail the request if the provider has failed. We return a - * recoverable error and the notified clients attempt any - * recovery. For async clients this is done in kcf_aop_done() - * and for sync clients it is done in the k-api routines. - */ - if (pd->pd_state >= KCF_PROV_FAILED) { - error = CRYPTO_DEVICE_ERROR; - goto bail; - } - - if (ctype == CRYPTO_SYNCH) { - mutex_enter(&sreq->sn_lock); - sreq->sn_state = REQ_INPROGRESS; - mutex_exit(&sreq->sn_lock); - - ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL; - error = common_submit_request(sreq->sn_provider, ctx, - sreq->sn_params, sreq); - } else { - kcf_context_t *ictx; - ASSERT(ctype == CRYPTO_ASYNCH); - - /* - * We are in the per-hardware provider thread context and - * hence can sleep. Note that the caller would have done - * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned. - */ - ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL; - - mutex_enter(&areq->an_lock); - /* - * We need to maintain ordering for multi-part requests. - * an_is_my_turn is set to B_TRUE initially for a request - * when it is enqueued and there are no other requests - * for that context. It is set later from kcf_aop_done() when - * the request before us in the chain of requests for the - * context completes. We get signaled at that point. - */ - if (ictx != NULL) { - ASSERT(ictx->kc_prov_desc == areq->an_provider); - - while (areq->an_is_my_turn == B_FALSE) { - cv_wait(&areq->an_turn_cv, &areq->an_lock); - } - } - areq->an_state = REQ_INPROGRESS; - mutex_exit(&areq->an_lock); - - error = common_submit_request(areq->an_provider, ctx, - &areq->an_params, areq); - } - -bail: - if (error == CRYPTO_QUEUED) { - /* - * The request is queued by the provider and we should - * get a crypto_op_notification() from the provider later. - * We notify the consumer at that time. - */ - return; - } else { /* CRYPTO_SUCCESS or other failure */ - KCF_PROV_IREFRELE(pd); - if (ctype == CRYPTO_SYNCH) - kcf_sop_done(sreq, error); - else - kcf_aop_done(areq, error); - } -} - -/* * This routine checks if a request can be retried on another * provider. If true, mech1 is initialized to point to the mechanism * structure. fg is initialized to the correct crypto_func_group_t bit flag. @@ -441,7 +326,7 @@ kcf_resubmit_request(kcf_areq_node_t *areq) new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error, areq->an_tried_plist, fg, - (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); + (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED)); if (new_pd == NULL) return (error); @@ -472,26 +357,7 @@ kcf_resubmit_request(kcf_areq_node_t *areq) areq->an_state = REQ_WAITING; mutex_exit(&areq->an_lock); - switch (new_pd->pd_prov_type) { - case CRYPTO_SW_PROVIDER: - error = kcf_disp_sw_request(areq); - break; - - case CRYPTO_HW_PROVIDER: { - taskq_t *taskq = new_pd->pd_sched_info.ks_taskq; - - if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) == - TASKQID_INVALID) { - error = CRYPTO_HOST_MEMORY; - } else { - error = CRYPTO_QUEUED; - } - - break; - default: - break; - } - } + error = kcf_disp_sw_request(areq); return (error); } @@ -515,196 +381,58 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, { int error = CRYPTO_SUCCESS; kcf_areq_node_t *areq; - kcf_sreq_node_t *sreq; kcf_context_t *kcf_ctx; - taskq_t *taskq = pd->pd_sched_info.ks_taskq; kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL; - /* Synchronous cases */ + /* Synchronous */ if (crq == NULL) { - switch (pd->pd_prov_type) { - case CRYPTO_SW_PROVIDER: - error = common_submit_request(pd, ctx, params, - KCF_RHNDL(KM_SLEEP)); - break; - - case CRYPTO_HW_PROVIDER: + error = common_submit_request(pd, ctx, params, + KCF_RHNDL(KM_SLEEP)); + } else { /* Asynchronous */ + if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) { /* - * Special case for CRYPTO_SYNCHRONOUS providers that - * never return a CRYPTO_QUEUED error. We skip any - * request allocation and call the SPI directly. + * This case has less overhead since there is + * no switching of context. */ - if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) && - taskq_empty(taskq)) { - KCF_PROV_IREFHOLD(pd); - if (pd->pd_state == KCF_PROV_READY) { - error = common_submit_request(pd, ctx, - params, KCF_RHNDL(KM_SLEEP)); - KCF_PROV_IREFRELE(pd); - ASSERT(error != CRYPTO_QUEUED); - break; - } - KCF_PROV_IREFRELE(pd); - } - - sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP); - sreq->sn_state = REQ_ALLOCATED; - sreq->sn_rv = CRYPTO_FAILED; - sreq->sn_params = params; - - /* - * Note that we do not need to hold the context - * for synchronous case as the context will never - * become invalid underneath us. We do not need to hold - * the provider here either as the caller has a hold. - */ - sreq->sn_context = kcf_ctx; - ASSERT(KCF_PROV_REFHELD(pd)); - sreq->sn_provider = pd; - - ASSERT(taskq != NULL); + error = common_submit_request(pd, ctx, params, + KCF_RHNDL(KM_NOSLEEP)); + } else { /* - * Call the SPI directly if the taskq is empty and the - * provider is not busy, else dispatch to the taskq. - * Calling directly is fine as this is the synchronous - * case. This is unlike the asynchronous case where we - * must always dispatch to the taskq. + * CRYPTO_ALWAYS_QUEUE is set. We need to + * queue the request and return. */ - if (taskq_empty(taskq) && - pd->pd_state == KCF_PROV_READY) { - process_req_hwp(sreq); - } else { + areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, + params); + if (areq == NULL) + error = CRYPTO_HOST_MEMORY; + else { + if (!(crq->cr_flag + & CRYPTO_SKIP_REQID)) { /* - * We can not tell from taskq_dispatch() return - * value if we exceeded maxalloc. Hence the - * check here. Since we are allowed to wait in - * the synchronous case, we wait for the taskq - * to become empty. + * Set the request handle. We have to + * do this before dispatching the + * request. */ - if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { - taskq_wait(taskq); + crq->cr_reqid = kcf_reqid_insert(areq); } - (void) taskq_dispatch(taskq, process_req_hwp, - sreq, TQ_SLEEP); - } - - /* - * Wait for the notification to arrive, - * if the operation is not done yet. - * Bug# 4722589 will make the wait a cv_wait_sig(). - */ - mutex_enter(&sreq->sn_lock); - while (sreq->sn_state < REQ_DONE) - cv_wait(&sreq->sn_cv, &sreq->sn_lock); - mutex_exit(&sreq->sn_lock); - - error = sreq->sn_rv; - kmem_cache_free(kcf_sreq_cache, sreq); - - break; - - default: - error = CRYPTO_FAILED; - break; - } - - } else { /* Asynchronous cases */ - switch (pd->pd_prov_type) { - case CRYPTO_SW_PROVIDER: - if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) { + error = kcf_disp_sw_request(areq); /* - * This case has less overhead since there is - * no switching of context. + * There is an error processing this + * request. Remove the handle and + * release the request structure. */ - error = common_submit_request(pd, ctx, params, - KCF_RHNDL(KM_NOSLEEP)); - } else { - /* - * CRYPTO_ALWAYS_QUEUE is set. We need to - * queue the request and return. - */ - areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, - params); - if (areq == NULL) - error = CRYPTO_HOST_MEMORY; - else { + if (error != CRYPTO_QUEUED) { if (!(crq->cr_flag - & CRYPTO_SKIP_REQID)) { - /* - * Set the request handle. We have to - * do this before dispatching the - * request. - */ - crq->cr_reqid = kcf_reqid_insert(areq); - } - - error = kcf_disp_sw_request(areq); - /* - * There is an error processing this - * request. Remove the handle and - * release the request structure. - */ - if (error != CRYPTO_QUEUED) { - if (!(crq->cr_flag - & CRYPTO_SKIP_REQID)) - kcf_reqid_delete(areq); - KCF_AREQ_REFRELE(areq); - } + & CRYPTO_SKIP_REQID)) + kcf_reqid_delete(areq); + KCF_AREQ_REFRELE(areq); } } - break; - - case CRYPTO_HW_PROVIDER: - /* - * We need to queue the request and return. - */ - areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params); - if (areq == NULL) { - error = CRYPTO_HOST_MEMORY; - goto done; - } - - ASSERT(taskq != NULL); - /* - * We can not tell from taskq_dispatch() return - * value if we exceeded maxalloc. Hence the check - * here. - */ - if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { - error = CRYPTO_BUSY; - KCF_AREQ_REFRELE(areq); - goto done; - } - - if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) { - /* - * Set the request handle. We have to do this - * before dispatching the request. - */ - crq->cr_reqid = kcf_reqid_insert(areq); - } - - if (taskq_dispatch(taskq, - process_req_hwp, areq, TQ_NOSLEEP) == - TASKQID_INVALID) { - error = CRYPTO_HOST_MEMORY; - if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) - kcf_reqid_delete(areq); - KCF_AREQ_REFRELE(areq); - } else { - error = CRYPTO_QUEUED; - } - break; - - default: - error = CRYPTO_FAILED; - break; } } -done: return (error); } @@ -750,7 +478,7 @@ kcf_free_context(kcf_context_t *kcf_ctx) /* kcf_ctx->kc_prov_desc has a hold on pd */ KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc); - /* check if this context is shared with a software provider */ + /* check if this context is shared with a provider */ if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) && kcf_ctx->kc_sw_prov_desc != NULL) { KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc); @@ -775,7 +503,7 @@ kcf_free_req(kcf_areq_node_t *areq) } /* - * Add the request node to the end of the global software queue. + * Add the request node to the end of the global queue. * * The caller should not hold the queue lock. Returns 0 if the * request is successfully queued. Returns CRYPTO_BUSY if the limit @@ -969,7 +697,7 @@ kcf_sched_init(void) mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL); gswq->gs_njobs = 0; - gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc; + gswq->gs_maxjobs = kcf_maxthreads * CRYPTO_TASKQ_MAX; gswq->gs_first = gswq->gs_last = NULL; /* Initialize the global reqid table */ @@ -1216,9 +944,7 @@ kcf_misc_kstat_update(kstat_t *ksp, int rw) ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads; ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs; ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs; - ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads; - ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc; - ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc; + ks_data->ks_swq_maxalloc.value.ui32 = CRYPTO_TASKQ_MAX; return (0); } |