aboutsummaryrefslogtreecommitdiffstats
path: root/module/icp/algs/modes/gcm.c
diff options
context:
space:
mode:
Diffstat (limited to 'module/icp/algs/modes/gcm.c')
-rw-r--r--module/icp/algs/modes/gcm.c41
1 files changed, 26 insertions, 15 deletions
diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c
index 13bceef0f..f6f8434de 100644
--- a/module/icp/algs/modes/gcm.c
+++ b/module/icp/algs/modes/gcm.c
@@ -29,6 +29,7 @@
#include <sys/crypto/impl.h>
#include <sys/byteorder.h>
#include <modes/gcm_impl.h>
+#include <linux/simd.h>
#define GHASH(c, d, t, o) \
xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
@@ -46,7 +47,7 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
- gcm_impl_ops_t *gops;
+ const gcm_impl_ops_t *gops;
size_t remainder = length;
size_t need = 0;
uint8_t *datap = (uint8_t *)data;
@@ -168,7 +169,7 @@ gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
- gcm_impl_ops_t *gops;
+ const gcm_impl_ops_t *gops;
uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
uint8_t *ghash, *macp = NULL;
int i, rv;
@@ -320,7 +321,7 @@ gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
- gcm_impl_ops_t *gops;
+ const gcm_impl_ops_t *gops;
size_t pt_len;
size_t remainder;
uint8_t *ghash;
@@ -427,7 +428,7 @@ gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
- gcm_impl_ops_t *gops;
+ const gcm_impl_ops_t *gops;
uint8_t *cb;
ulong_t remainder = iv_len;
ulong_t processed = 0;
@@ -481,7 +482,7 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
void (*copy_block)(uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *))
{
- gcm_impl_ops_t *gops;
+ const gcm_impl_ops_t *gops;
uint8_t *ghash, *datap, *authp;
size_t remainder, processed;
@@ -660,12 +661,17 @@ static size_t gcm_supp_impl_cnt = 0;
static gcm_impl_ops_t *gcm_supp_impl[ARRAY_SIZE(gcm_all_impl)];
/*
- * Selects the gcm operation
+ * Returns the GCM operations for encrypt/decrypt/key setup. When a
+ * SIMD implementation is not allowed in the current context, then
+ * fallback to the fastest generic implementation.
*/
-gcm_impl_ops_t *
+const gcm_impl_ops_t *
gcm_impl_get_ops()
{
- gcm_impl_ops_t *ops = NULL;
+ if (!kfpu_allowed())
+ return (&gcm_generic_impl);
+
+ const gcm_impl_ops_t *ops = NULL;
const uint32_t impl = GCM_IMPL_READ(icp_gcm_impl);
switch (impl) {
@@ -674,15 +680,13 @@ gcm_impl_get_ops()
ops = &gcm_fastest_impl;
break;
case IMPL_CYCLE:
- {
+ /* Cycle through supported implementations */
ASSERT(gcm_impl_initialized);
ASSERT3U(gcm_supp_impl_cnt, >, 0);
- /* Cycle through supported implementations */
static size_t cycle_impl_idx = 0;
size_t idx = (++cycle_impl_idx) % gcm_supp_impl_cnt;
ops = gcm_supp_impl[idx];
- }
- break;
+ break;
default:
ASSERT3U(impl, <, gcm_supp_impl_cnt);
ASSERT3U(gcm_supp_impl_cnt, >, 0);
@@ -696,13 +700,17 @@ gcm_impl_get_ops()
return (ops);
}
+/*
+ * Initialize all supported implementations.
+ */
+/* ARGSUSED */
void
-gcm_impl_init(void)
+gcm_impl_init(void *arg)
{
gcm_impl_ops_t *curr_impl;
int i, c;
- /* move supported impl into aes_supp_impls */
+ /* Move supported implementations into gcm_supp_impls */
for (i = 0, c = 0; i < ARRAY_SIZE(gcm_all_impl); i++) {
curr_impl = (gcm_impl_ops_t *)gcm_all_impl[i];
@@ -711,7 +719,10 @@ gcm_impl_init(void)
}
gcm_supp_impl_cnt = c;
- /* set fastest implementation. assume hardware accelerated is fastest */
+ /*
+ * Set the fastest implementation given the assumption that the
+ * hardware accelerated version is the fastest.
+ */
#if defined(__x86_64) && defined(HAVE_PCLMULQDQ)
if (gcm_pclmulqdq_impl.is_supported())
memcpy(&gcm_fastest_impl, &gcm_pclmulqdq_impl,