summaryrefslogtreecommitdiffstats
path: root/module/icp
diff options
context:
space:
mode:
authorTom Caputi <[email protected]>2016-05-12 10:51:24 -0400
committerBrian Behlendorf <[email protected]>2016-07-20 10:43:30 -0700
commit0b04990a5de594659d2cf20458965277dd6efeb1 (patch)
tree74369a3236e03359f7276cb9b19687e28c7f6d59 /module/icp
parentbe88e733a634ad0d7f20350e1a17ede51922d3ff (diff)
Illumos Crypto Port module added to enable native encryption in zfs
A port of the Illumos Crypto Framework to a Linux kernel module (found in module/icp). This is needed to do the actual encryption work. We cannot use the Linux kernel's built in crypto api because it is only exported to GPL-licensed modules. Having the ICP also means the crypto code can run on any of the other kernels under OpenZFS. I ended up porting over most of the internals of the framework, which means that porting over other API calls (if we need them) should be fairly easy. Specifically, I have ported over the API functions related to encryption, digests, macs, and crypto templates. The ICP is able to use assembly-accelerated encryption on amd64 machines and AES-NI instructions on Intel chips that support it. There are place-holder directories for similar assembly optimizations for other architectures (although they have not been written). Signed-off-by: Tom Caputi <[email protected]> Signed-off-by: Tony Hutter <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #4329
Diffstat (limited to 'module/icp')
-rw-r--r--module/icp/Makefile.in82
-rw-r--r--module/icp/algs/aes/aes_impl.c1618
-rw-r--r--module/icp/algs/aes/aes_modes.c135
-rw-r--r--module/icp/algs/modes/cbc.c305
-rw-r--r--module/icp/algs/modes/ccm.c920
-rw-r--r--module/icp/algs/modes/ctr.c238
-rw-r--r--module/icp/algs/modes/ecb.c143
-rw-r--r--module/icp/algs/modes/gcm.c748
-rw-r--r--module/icp/algs/modes/modes.c159
-rw-r--r--module/icp/algs/sha1/sha1.c663
-rw-r--r--module/icp/algs/sha2/sha2.c495
-rw-r--r--module/icp/api/kcf_cipher.c935
-rw-r--r--module/icp/api/kcf_ctxops.c151
-rw-r--r--module/icp/api/kcf_digest.c494
-rw-r--r--module/icp/api/kcf_mac.c648
-rw-r--r--module/icp/api/kcf_miscapi.c127
-rw-r--r--module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman23
-rw-r--r--module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip1
-rw-r--r--module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl127
-rw-r--r--module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip1
-rw-r--r--module/icp/asm-x86_64/aes/aes_amd64.S900
-rw-r--r--module/icp/asm-x86_64/aes/aes_intel.S851
-rw-r--r--module/icp/asm-x86_64/aes/aeskey.c580
-rw-r--r--module/icp/asm-x86_64/aes/aesopt.h770
-rw-r--r--module/icp/asm-x86_64/aes/aestab.h165
-rw-r--r--module/icp/asm-x86_64/aes/aestab2.h594
-rw-r--r--module/icp/asm-x86_64/modes/gcm_intel.S334
-rw-r--r--module/icp/asm-x86_64/sha1/sha1-x86_64.S1346
-rw-r--r--module/icp/asm-x86_64/sha2/sha256_impl.S2060
-rw-r--r--module/icp/core/kcf_callprov.c1567
-rw-r--r--module/icp/core/kcf_mech_tabs.c775
-rw-r--r--module/icp/core/kcf_prov_lib.c229
-rw-r--r--module/icp/core/kcf_prov_tabs.c638
-rw-r--r--module/icp/core/kcf_sched.c1763
-rw-r--r--module/icp/illumos-crypto.c152
-rw-r--r--module/icp/include/aes/aes_impl.h170
-rw-r--r--module/icp/include/modes/modes.h385
-rw-r--r--module/icp/include/sha1/sha1.h61
-rw-r--r--module/icp/include/sha1/sha1_consts.h65
-rw-r--r--module/icp/include/sha1/sha1_impl.h73
-rw-r--r--module/icp/include/sha2/sha2.h116
-rw-r--r--module/icp/include/sha2/sha2_consts.h219
-rw-r--r--module/icp/include/sha2/sha2_impl.h62
-rw-r--r--module/icp/include/sys/asm_linkage.h36
-rw-r--r--module/icp/include/sys/bitmap.h183
-rw-r--r--module/icp/include/sys/crypto/elfsign.h137
-rw-r--r--module/icp/include/sys/crypto/impl.h1370
-rw-r--r--module/icp/include/sys/crypto/ioctl.h1483
-rw-r--r--module/icp/include/sys/crypto/ioctladmin.h136
-rw-r--r--module/icp/include/sys/crypto/ops_impl.h630
-rw-r--r--module/icp/include/sys/crypto/sched_impl.h531
-rw-r--r--module/icp/include/sys/crypto/spi.h721
-rw-r--r--module/icp/include/sys/ia32/asm_linkage.h307
-rw-r--r--module/icp/include/sys/ia32/stack.h160
-rw-r--r--module/icp/include/sys/ia32/trap.h107
-rw-r--r--module/icp/include/sys/modctl.h477
-rw-r--r--module/icp/include/sys/modhash.h147
-rw-r--r--module/icp/include/sys/modhash_impl.h108
-rw-r--r--module/icp/include/sys/stack.h36
-rw-r--r--module/icp/include/sys/trap.h36
-rw-r--r--module/icp/io/aes.c1437
-rw-r--r--module/icp/io/sha1_mod.c1239
-rw-r--r--module/icp/io/sha2_mod.c1307
-rw-r--r--module/icp/os/modconf.c171
-rw-r--r--module/icp/os/modhash.c925
-rw-r--r--module/icp/spi/kcf_spi.c927
66 files changed, 34499 insertions, 0 deletions
diff --git a/module/icp/Makefile.in b/module/icp/Makefile.in
new file mode 100644
index 000000000..4be03dbae
--- /dev/null
+++ b/module/icp/Makefile.in
@@ -0,0 +1,82 @@
+src = @abs_top_srcdir@/module/icp
+obj = @abs_builddir@
+
+MODULE := icp
+
+TARGET_ASM_DIR = @TARGET_ASM_DIR@
+
+ifeq ($(TARGET_ASM_DIR), asm-x86_64)
+ASM_SOURCES := asm-x86_64/aes/aeskey.o
+ASM_SOURCES += asm-x86_64/aes/aes_amd64.o
+ASM_SOURCES += asm-x86_64/aes/aes_intel.o
+ASM_SOURCES += asm-x86_64/modes/gcm_intel.o
+ASM_SOURCES += asm-x86_64/sha1/sha1-x86_64.o
+ASM_SOURCES += asm-x86_64/sha2/sha256_impl.o
+endif
+
+ifeq ($(TARGET_ASM_DIR), asm-i386)
+ASM_SOURCES :=
+endif
+
+ifeq ($(TARGET_ASM_DIR), asm-generic)
+ASM_SOURCES :=
+endif
+
+EXTRA_CFLAGS = $(ZFS_MODULE_CFLAGS) @KERNELCPPFLAGS@
+
+obj-$(CONFIG_ZFS) := $(MODULE).o
+
+ccflags-y += -I$(src)/include
+asflags-y += -I$(src)/include
+asflags-y += $(ZFS_MODULE_CFLAGS)
+
+$(MODULE)-objs += illumos-crypto.o
+$(MODULE)-objs += api/kcf_cipher.o
+$(MODULE)-objs += api/kcf_digest.o
+$(MODULE)-objs += api/kcf_mac.o
+$(MODULE)-objs += api/kcf_miscapi.o
+$(MODULE)-objs += api/kcf_ctxops.o
+$(MODULE)-objs += core/kcf_callprov.o
+$(MODULE)-objs += core/kcf_prov_tabs.o
+$(MODULE)-objs += core/kcf_sched.o
+$(MODULE)-objs += core/kcf_mech_tabs.o
+$(MODULE)-objs += core/kcf_prov_lib.o
+$(MODULE)-objs += spi/kcf_spi.o
+$(MODULE)-objs += io/aes.o
+$(MODULE)-objs += io/sha1_mod.o
+$(MODULE)-objs += io/sha2_mod.o
+$(MODULE)-objs += os/modhash.o
+$(MODULE)-objs += os/modconf.o
+$(MODULE)-objs += algs/modes/cbc.o
+$(MODULE)-objs += algs/modes/ccm.o
+$(MODULE)-objs += algs/modes/ctr.o
+$(MODULE)-objs += algs/modes/ecb.o
+$(MODULE)-objs += algs/modes/gcm.o
+$(MODULE)-objs += algs/modes/modes.o
+$(MODULE)-objs += algs/aes/aes_impl.o
+$(MODULE)-objs += algs/aes/aes_modes.o
+$(MODULE)-objs += algs/sha1/sha1.o
+$(MODULE)-objs += algs/sha2/sha2.o
+$(MODULE)-objs += $(ASM_SOURCES)
+
+ICP_DIRS = \
+ api \
+ core \
+ spi \
+ io \
+ os \
+ algs \
+ algs/aes \
+ algs/modes \
+ algs/sha1 \
+ algs/sha2 \
+ asm-x86_64 \
+ asm-x86_64/aes \
+ asm-x86_64/modes \
+ asm-x86_64/sha1 \
+ asm-x86_64/sha2 \
+ asm-i386 \
+ asm-generic
+
+all:
+ mkdir -p $(ICP_DIRS)
diff --git a/module/icp/algs/aes/aes_impl.c b/module/icp/algs/aes/aes_impl.c
new file mode 100644
index 000000000..9c53964f0
--- /dev/null
+++ b/module/icp/algs/aes/aes_impl.c
@@ -0,0 +1,1618 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/spi.h>
+#include <modes/modes.h>
+#include <aes/aes_impl.h>
+
+#ifdef __amd64
+
+#ifdef _KERNEL
+/* Workaround for no XMM kernel thread save/restore */
+#define KPREEMPT_DISABLE kpreempt_disable()
+#define KPREEMPT_ENABLE kpreempt_enable()
+
+#else
+#define KPREEMPT_DISABLE
+#define KPREEMPT_ENABLE
+#endif /* _KERNEL */
+#endif /* __amd64 */
+
+
+/*
+ * This file is derived from the file rijndael-alg-fst.c taken from the
+ * "optimized C code v3.0" on the "rijndael home page"
+ * http://www.iaik.tu-graz.ac.at/research/krypto/AES/old/~rijmen/rijndael/
+ * pointed by the NIST web-site http://csrc.nist.gov/archive/aes/
+ *
+ * The following note is from the original file:
+ */
+
+/*
+ * rijndael-alg-fst.c
+ *
+ * @version 3.0 (December 2000)
+ *
+ * Optimised ANSI C code for the Rijndael cipher (now AES)
+ *
+ * @author Vincent Rijmen <[email protected]>
+ * @author Antoon Bosselaers <[email protected]>
+ * @author Paulo Barreto <[email protected]>
+ *
+ * This code is hereby placed in the public domain.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__amd64)
+
+/* These functions are used to execute amd64 instructions for AMD or Intel: */
+extern int rijndael_key_setup_enc_amd64(uint32_t rk[],
+ const uint32_t cipherKey[], int keyBits);
+extern int rijndael_key_setup_dec_amd64(uint32_t rk[],
+ const uint32_t cipherKey[], int keyBits);
+extern void aes_encrypt_amd64(const uint32_t rk[], int Nr,
+ const uint32_t pt[4], uint32_t ct[4]);
+extern void aes_decrypt_amd64(const uint32_t rk[], int Nr,
+ const uint32_t ct[4], uint32_t pt[4]);
+
+/* These functions are used to execute Intel-specific AES-NI instructions: */
+extern int rijndael_key_setup_enc_intel(uint32_t rk[],
+ const uint32_t cipherKey[], uint64_t keyBits);
+extern int rijndael_key_setup_dec_intel(uint32_t rk[],
+ const uint32_t cipherKey[], uint64_t keyBits);
+extern void aes_encrypt_intel(const uint32_t rk[], int Nr,
+ const uint32_t pt[4], uint32_t ct[4]);
+extern void aes_decrypt_intel(const uint32_t rk[], int Nr,
+ const uint32_t ct[4], uint32_t pt[4]);
+
+static int intel_aes_instructions_present(void);
+
+#define AES_ENCRYPT_IMPL(a, b, c, d, e) rijndael_encrypt(a, b, c, d, e)
+#define AES_DECRYPT_IMPL(a, b, c, d, e) rijndael_decrypt(a, b, c, d, e)
+
+#else /* Generic C implementation */
+
+#define AES_ENCRYPT_IMPL(a, b, c, d, e) rijndael_encrypt(a, b, c, d)
+#define AES_DECRYPT_IMPL(a, b, c, d, e) rijndael_decrypt(a, b, c, d)
+#define rijndael_key_setup_enc_raw rijndael_key_setup_enc
+#endif /* __amd64 */
+
+#if defined(_LITTLE_ENDIAN) && !defined(__amd64)
+#define AES_BYTE_SWAP
+#endif
+
+
+#if !defined(__amd64)
+/*
+ * Constant tables
+ */
+
+/*
+ * Te0[x] = S [x].[02, 01, 01, 03];
+ * Te1[x] = S [x].[03, 02, 01, 01];
+ * Te2[x] = S [x].[01, 03, 02, 01];
+ * Te3[x] = S [x].[01, 01, 03, 02];
+ * Te4[x] = S [x].[01, 01, 01, 01];
+ *
+ * Td0[x] = Si[x].[0e, 09, 0d, 0b];
+ * Td1[x] = Si[x].[0b, 0e, 09, 0d];
+ * Td2[x] = Si[x].[0d, 0b, 0e, 09];
+ * Td3[x] = Si[x].[09, 0d, 0b, 0e];
+ * Td4[x] = Si[x].[01, 01, 01, 01];
+ */
+
+/* Encrypt Sbox constants (for the substitute bytes operation) */
+
+static const uint32_t Te0[256] =
+{
+ 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
+ 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
+ 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
+ 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
+ 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
+ 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
+ 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
+ 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
+ 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
+ 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
+ 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
+ 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
+ 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
+ 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
+ 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
+ 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
+ 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
+ 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
+ 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
+ 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
+ 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
+ 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
+ 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
+ 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
+ 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
+ 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
+ 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
+ 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
+ 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
+ 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
+ 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
+ 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
+ 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
+ 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
+ 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
+ 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
+ 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
+ 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
+ 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
+ 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
+ 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
+ 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
+ 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
+ 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
+ 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
+ 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
+ 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
+ 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
+ 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
+ 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
+ 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
+ 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
+ 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
+ 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
+ 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
+ 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
+ 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
+ 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
+ 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
+ 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
+ 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
+ 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
+ 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
+ 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU
+};
+
+
+static const uint32_t Te1[256] =
+{
+ 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
+ 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
+ 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
+ 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
+ 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
+ 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
+ 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
+ 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
+ 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
+ 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
+ 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
+ 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
+ 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
+ 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
+ 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
+ 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
+ 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
+ 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
+ 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
+ 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
+ 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
+ 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
+ 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
+ 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
+ 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
+ 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
+ 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
+ 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
+ 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
+ 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
+ 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
+ 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
+ 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
+ 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
+ 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
+ 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
+ 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
+ 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
+ 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
+ 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
+ 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
+ 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
+ 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
+ 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
+ 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
+ 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
+ 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
+ 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
+ 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
+ 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
+ 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
+ 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
+ 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
+ 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
+ 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
+ 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
+ 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
+ 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
+ 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
+ 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
+ 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
+ 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
+ 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
+ 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U
+};
+
+
+static const uint32_t Te2[256] =
+{
+ 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
+ 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
+ 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
+ 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
+ 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
+ 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
+ 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
+ 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
+ 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
+ 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
+ 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
+ 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
+ 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
+ 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
+ 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
+ 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
+ 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
+ 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
+ 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
+ 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
+ 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
+ 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
+ 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
+ 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
+ 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
+ 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
+ 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
+ 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
+ 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
+ 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
+ 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
+ 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
+ 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
+ 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
+ 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
+ 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
+ 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
+ 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
+ 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
+ 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
+ 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
+ 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
+ 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
+ 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
+ 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
+ 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
+ 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
+ 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
+ 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
+ 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
+ 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
+ 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
+ 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
+ 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
+ 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
+ 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
+ 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
+ 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
+ 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
+ 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
+ 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
+ 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
+ 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
+ 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U
+};
+
+
+static const uint32_t Te3[256] =
+{
+ 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
+ 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
+ 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
+ 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
+ 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
+ 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
+ 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
+ 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
+ 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
+ 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
+ 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
+ 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
+ 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
+ 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
+ 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
+ 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
+ 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
+ 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
+ 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
+ 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
+ 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
+ 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
+ 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
+ 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
+ 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
+ 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
+ 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
+ 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
+ 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
+ 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
+ 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
+ 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
+ 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
+ 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
+ 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
+ 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
+ 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
+ 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
+ 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
+ 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
+ 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
+ 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
+ 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
+ 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
+ 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
+ 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
+ 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
+ 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
+ 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
+ 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
+ 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
+ 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
+ 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
+ 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
+ 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
+ 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
+ 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
+ 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
+ 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
+ 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
+ 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
+ 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
+ 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
+ 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU
+};
+
+static const uint32_t Te4[256] =
+{
+ 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU,
+ 0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U,
+ 0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU,
+ 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U,
+ 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU,
+ 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U,
+ 0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU,
+ 0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U,
+ 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U,
+ 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU,
+ 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U,
+ 0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U,
+ 0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U,
+ 0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU,
+ 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U,
+ 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U,
+ 0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU,
+ 0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U,
+ 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U,
+ 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U,
+ 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU,
+ 0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU,
+ 0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U,
+ 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU,
+ 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU,
+ 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U,
+ 0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU,
+ 0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U,
+ 0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU,
+ 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U,
+ 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U,
+ 0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U,
+ 0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU,
+ 0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U,
+ 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU,
+ 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U,
+ 0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU,
+ 0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U,
+ 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U,
+ 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU,
+ 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU,
+ 0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU,
+ 0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U,
+ 0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U,
+ 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU,
+ 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U,
+ 0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU,
+ 0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U,
+ 0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU,
+ 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U,
+ 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU,
+ 0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU,
+ 0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U,
+ 0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU,
+ 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U,
+ 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU,
+ 0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U,
+ 0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U,
+ 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U,
+ 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU,
+ 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU,
+ 0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U,
+ 0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU,
+ 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U
+};
+
+/* Decrypt Sbox constants (for the substitute bytes operation) */
+
+static const uint32_t Td0[256] =
+{
+ 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
+ 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
+ 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
+ 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
+ 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
+ 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
+ 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
+ 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
+ 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
+ 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
+ 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
+ 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
+ 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
+ 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
+ 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
+ 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
+ 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
+ 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
+ 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
+ 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
+ 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
+ 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
+ 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
+ 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
+ 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
+ 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
+ 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
+ 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
+ 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
+ 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
+ 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
+ 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
+ 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
+ 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
+ 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
+ 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
+ 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
+ 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
+ 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
+ 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
+ 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
+ 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
+ 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
+ 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
+ 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
+ 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
+ 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
+ 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
+ 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
+ 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
+ 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
+ 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
+ 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
+ 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
+ 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
+ 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
+ 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
+ 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
+ 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
+ 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
+ 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
+ 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
+ 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
+ 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U
+};
+
+static const uint32_t Td1[256] =
+{
+ 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
+ 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
+ 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
+ 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
+ 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
+ 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
+ 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
+ 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
+ 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
+ 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
+ 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
+ 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
+ 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
+ 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
+ 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
+ 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
+ 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
+ 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
+ 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
+ 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
+ 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
+ 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
+ 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
+ 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
+ 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
+ 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
+ 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
+ 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
+ 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
+ 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
+ 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
+ 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
+ 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
+ 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
+ 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
+ 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
+ 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
+ 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
+ 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
+ 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
+ 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
+ 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
+ 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
+ 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
+ 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
+ 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
+ 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
+ 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
+ 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
+ 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
+ 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
+ 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
+ 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
+ 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
+ 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
+ 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
+ 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
+ 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
+ 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
+ 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
+ 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
+ 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
+ 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
+ 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U
+};
+
+static const uint32_t Td2[256] =
+{
+ 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
+ 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
+ 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
+ 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
+ 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
+ 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
+ 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
+ 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
+ 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
+ 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
+ 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
+ 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
+ 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
+ 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
+ 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
+ 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
+ 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
+ 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
+ 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
+ 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
+ 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
+ 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
+ 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
+ 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
+ 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
+ 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
+ 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
+ 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
+ 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
+ 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
+ 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
+ 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
+ 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
+ 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
+ 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
+ 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
+ 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
+ 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
+ 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
+ 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
+ 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
+ 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
+ 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
+ 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
+ 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
+ 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
+ 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
+ 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
+ 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
+ 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
+ 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
+ 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
+ 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
+ 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
+ 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
+ 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
+ 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
+ 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
+ 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
+ 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
+ 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
+ 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
+ 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
+ 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U
+};
+
+static const uint32_t Td3[256] =
+{
+ 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
+ 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
+ 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
+ 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
+ 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
+ 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
+ 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
+ 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
+ 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
+ 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
+ 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
+ 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
+ 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
+ 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
+ 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
+ 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
+ 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
+ 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
+ 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
+ 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
+ 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
+ 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
+ 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
+ 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
+ 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
+ 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
+ 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
+ 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
+ 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
+ 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
+ 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
+ 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
+ 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
+ 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
+ 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
+ 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
+ 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
+ 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
+ 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
+ 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
+ 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
+ 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
+ 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
+ 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
+ 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
+ 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
+ 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
+ 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
+ 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
+ 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
+ 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
+ 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
+ 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
+ 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
+ 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
+ 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
+ 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
+ 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
+ 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
+ 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
+ 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
+ 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
+ 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
+ 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U
+};
+
+static const uint32_t Td4[256] =
+{
+ 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U,
+ 0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U,
+ 0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU,
+ 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU,
+ 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U,
+ 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U,
+ 0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U,
+ 0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU,
+ 0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U,
+ 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU,
+ 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU,
+ 0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU,
+ 0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U,
+ 0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U,
+ 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U,
+ 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U,
+ 0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U,
+ 0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U,
+ 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU,
+ 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U,
+ 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U,
+ 0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU,
+ 0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U,
+ 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U,
+ 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U,
+ 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU,
+ 0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U,
+ 0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U,
+ 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU,
+ 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U,
+ 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U,
+ 0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU,
+ 0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U,
+ 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU,
+ 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU,
+ 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U,
+ 0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U,
+ 0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U,
+ 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U,
+ 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU,
+ 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U,
+ 0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U,
+ 0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU,
+ 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU,
+ 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU,
+ 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U,
+ 0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU,
+ 0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U,
+ 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U,
+ 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U,
+ 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U,
+ 0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU,
+ 0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U,
+ 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU,
+ 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU,
+ 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU,
+ 0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU,
+ 0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U,
+ 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU,
+ 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U,
+ 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU,
+ 0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U,
+ 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U,
+ 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU
+};
+
+/* Rcon is Round Constant; used for encryption key expansion */
+static const uint32_t rcon[RC_LENGTH] =
+{
+ /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000
+};
+
+
+/*
+ * Expand the cipher key into the encryption key schedule.
+ *
+ * Return the number of rounds for the given cipher key size.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk AES key schedule 32-bit array to be initialized
+ * cipherKey User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+static int
+rijndael_key_setup_enc_raw(uint32_t rk[], const uint32_t cipherKey[],
+ int keyBits)
+{
+ int i = 0;
+ uint32_t temp;
+
+ rk[0] = cipherKey[0];
+ rk[1] = cipherKey[1];
+ rk[2] = cipherKey[2];
+ rk[3] = cipherKey[3];
+
+ if (keyBits == 128) {
+ for (;;) {
+ temp = rk[3];
+ rk[4] = rk[0] ^
+ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
+ (Te4[temp & 0xff] & 0x0000ff00) ^
+ (Te4[temp >> 24] & 0x000000ff) ^
+ rcon[i];
+ rk[5] = rk[1] ^ rk[4];
+ rk[6] = rk[2] ^ rk[5];
+ rk[7] = rk[3] ^ rk[6];
+
+ if (++i == 10) {
+ return (10);
+ }
+ rk += 4;
+ }
+ }
+
+ rk[4] = cipherKey[4];
+ rk[5] = cipherKey[5];
+
+ if (keyBits == 192) {
+ for (;;) {
+ temp = rk[5];
+ rk[6] = rk[0] ^
+ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
+ (Te4[temp & 0xff] & 0x0000ff00) ^
+ (Te4[temp >> 24] & 0x000000ff) ^
+ rcon[i];
+ rk[7] = rk[1] ^ rk[6];
+ rk[8] = rk[2] ^ rk[7];
+ rk[9] = rk[3] ^ rk[8];
+
+ if (++i == 8) {
+ return (12);
+ }
+
+ rk[10] = rk[4] ^ rk[9];
+ rk[11] = rk[5] ^ rk[10];
+ rk += 6;
+ }
+ }
+
+ rk[6] = cipherKey[6];
+ rk[7] = cipherKey[7];
+
+ if (keyBits == 256) {
+ for (;;) {
+ temp = rk[7];
+ rk[8] = rk[0] ^
+ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
+ (Te4[temp & 0xff] & 0x0000ff00) ^
+ (Te4[temp >> 24] & 0x000000ff) ^
+ rcon[i];
+ rk[9] = rk[1] ^ rk[8];
+ rk[10] = rk[2] ^ rk[9];
+ rk[11] = rk[3] ^ rk[10];
+
+ if (++i == 7) {
+ return (14);
+ }
+ temp = rk[11];
+ rk[12] = rk[4] ^
+ (Te4[temp >> 24] & 0xff000000) ^
+ (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[temp & 0xff] & 0x000000ff);
+ rk[13] = rk[5] ^ rk[12];
+ rk[14] = rk[6] ^ rk[13];
+ rk[15] = rk[7] ^ rk[14];
+
+ rk += 8;
+ }
+ }
+
+ return (0);
+}
+#endif /* !__amd64 */
+
+#if defined(__amd64)
+
+/*
+ * Expand the 32-bit AES cipher key array into the encryption and decryption
+ * key schedules.
+ *
+ * Parameters:
+ * key AES key schedule to be initialized
+ * keyarr32 User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+static void
+aes_setupkeys(aes_key_t *key, const uint32_t *keyarr32, int keybits)
+{
+ if (intel_aes_instructions_present()) {
+ key->flags = INTEL_AES_NI_CAPABLE;
+ KPREEMPT_DISABLE;
+ key->nr = rijndael_key_setup_enc_intel(&(key->encr_ks.ks32[0]),
+ keyarr32, keybits);
+ key->nr = rijndael_key_setup_dec_intel(&(key->decr_ks.ks32[0]),
+ keyarr32, keybits);
+ KPREEMPT_ENABLE;
+ } else {
+ key->flags = 0;
+ key->nr = rijndael_key_setup_enc_amd64(&(key->encr_ks.ks32[0]),
+ keyarr32, keybits);
+ key->nr = rijndael_key_setup_dec_amd64(&(key->decr_ks.ks32[0]),
+ keyarr32, keybits);
+ }
+
+ key->type = AES_32BIT_KS;
+}
+
+/*
+ * Encrypt one block of data. The block is assumed to be an array
+ * of four uint32_t values, so copy for alignment (and byte-order
+ * reversal for little endian systems might be necessary on the
+ * input and output byte streams.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk Key schedule, of aes_ks_t (60 32-bit integers)
+ * Nr Number of rounds
+ * pt Input block (plain text)
+ * ct Output block (crypto text). Can overlap with pt
+ * flags Indicates whether we're on Intel AES-NI-capable hardware
+ */
+static void
+rijndael_encrypt(const uint32_t rk[], int Nr, const uint32_t pt[4],
+ uint32_t ct[4], int flags) {
+ if (flags & INTEL_AES_NI_CAPABLE) {
+ KPREEMPT_DISABLE;
+ aes_encrypt_intel(rk, Nr, pt, ct);
+ KPREEMPT_ENABLE;
+ } else {
+ aes_encrypt_amd64(rk, Nr, pt, ct);
+ }
+}
+
+/*
+ * Decrypt one block of data. The block is assumed to be an array
+ * of four uint32_t values, so copy for alignment (and byte-order
+ * reversal for little endian systems might be necessary on the
+ * input and output byte streams.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk Key schedule, of aes_ks_t (60 32-bit integers)
+ * Nr Number of rounds
+ * ct Input block (crypto text)
+ * pt Output block (plain text). Can overlap with pt
+ * flags Indicates whether we're on Intel AES-NI-capable hardware
+ */
+static void
+rijndael_decrypt(const uint32_t rk[], int Nr, const uint32_t ct[4],
+ uint32_t pt[4], int flags) {
+ if (flags & INTEL_AES_NI_CAPABLE) {
+ KPREEMPT_DISABLE;
+ aes_decrypt_intel(rk, Nr, ct, pt);
+ KPREEMPT_ENABLE;
+ } else {
+ aes_decrypt_amd64(rk, Nr, ct, pt);
+ }
+}
+
+
+#else /* generic C implementation */
+
+/*
+ * Expand the cipher key into the decryption key schedule.
+ * Return the number of rounds for the given cipher key size.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk AES key schedule 32-bit array to be initialized
+ * cipherKey User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+static int
+rijndael_key_setup_dec(uint32_t rk[], const uint32_t cipherKey[], int keyBits)
+{
+ int Nr, i, j;
+ uint32_t temp;
+
+ /* expand the cipher key: */
+ Nr = rijndael_key_setup_enc_raw(rk, cipherKey, keyBits);
+
+ /* invert the order of the round keys: */
+ for (i = 0, j = 4 * Nr; i < j; i += 4, j -= 4) {
+ temp = rk[i];
+ rk[i] = rk[j];
+ rk[j] = temp;
+ temp = rk[i + 1];
+ rk[i + 1] = rk[j + 1];
+ rk[j + 1] = temp;
+ temp = rk[i + 2];
+ rk[i + 2] = rk[j + 2];
+ rk[j + 2] = temp;
+ temp = rk[i + 3];
+ rk[i + 3] = rk[j + 3];
+ rk[j + 3] = temp;
+ }
+
+ /*
+ * apply the inverse MixColumn transform to all
+ * round keys but the first and the last:
+ */
+ for (i = 1; i < Nr; i++) {
+ rk += 4;
+ rk[0] = Td0[Te4[rk[0] >> 24] & 0xff] ^
+ Td1[Te4[(rk[0] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[0] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[rk[0] & 0xff] & 0xff];
+ rk[1] = Td0[Te4[rk[1] >> 24] & 0xff] ^
+ Td1[Te4[(rk[1] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[1] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[rk[1] & 0xff] & 0xff];
+ rk[2] = Td0[Te4[rk[2] >> 24] & 0xff] ^
+ Td1[Te4[(rk[2] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[2] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[rk[2] & 0xff] & 0xff];
+ rk[3] = Td0[Te4[rk[3] >> 24] & 0xff] ^
+ Td1[Te4[(rk[3] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[3] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[rk[3] & 0xff] & 0xff];
+ }
+
+ return (Nr);
+}
+
+
+/*
+ * Expand the 32-bit AES cipher key array into the encryption and decryption
+ * key schedules.
+ *
+ * Parameters:
+ * key AES key schedule to be initialized
+ * keyarr32 User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+static void
+aes_setupkeys(aes_key_t *key, const uint32_t *keyarr32, int keybits)
+{
+ key->nr = rijndael_key_setup_enc(&(key->encr_ks.ks32[0]), keyarr32,
+ keybits);
+ key->nr = rijndael_key_setup_dec(&(key->decr_ks.ks32[0]), keyarr32,
+ keybits);
+ key->type = AES_32BIT_KS;
+}
+
+
+/*
+ * Encrypt one block of data. The block is assumed to be an array
+ * of four uint32_t values, so copy for alignment (and byte-order
+ * reversal for little endian systems might be necessary on the
+ * input and output byte streams.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk Key schedule, of aes_ks_t (60 32-bit integers)
+ * Nr Number of rounds
+ * pt Input block (plain text)
+ * ct Output block (crypto text). Can overlap with pt
+ */
+static void
+rijndael_encrypt(const uint32_t rk[], int Nr, const uint32_t pt[4],
+ uint32_t ct[4])
+{
+ uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
+ int r;
+
+ /*
+ * map byte array block to cipher state
+ * and add initial round key:
+ */
+
+ s0 = pt[0] ^ rk[0];
+ s1 = pt[1] ^ rk[1];
+ s2 = pt[2] ^ rk[2];
+ s3 = pt[3] ^ rk[3];
+
+ /*
+ * Nr - 1 full rounds:
+ */
+
+ r = Nr >> 1;
+
+ for (;;) {
+ t0 = Te0[s0 >> 24] ^
+ Te1[(s1 >> 16) & 0xff] ^
+ Te2[(s2 >> 8) & 0xff] ^
+ Te3[s3 & 0xff] ^
+ rk[4];
+
+ t1 = Te0[s1 >> 24] ^
+ Te1[(s2 >> 16) & 0xff] ^
+ Te2[(s3 >> 8) & 0xff] ^
+ Te3[s0 & 0xff] ^
+ rk[5];
+
+ t2 = Te0[s2 >> 24] ^
+ Te1[(s3 >> 16) & 0xff] ^
+ Te2[(s0 >> 8) & 0xff] ^
+ Te3[s1 & 0xff] ^
+ rk[6];
+
+ t3 = Te0[s3 >> 24] ^
+ Te1[(s0 >> 16) & 0xff] ^
+ Te2[(s1 >> 8) & 0xff] ^
+ Te3[s2 & 0xff] ^
+ rk[7];
+
+ rk += 8;
+
+ if (--r == 0) {
+ break;
+ }
+
+ s0 = Te0[t0 >> 24] ^
+ Te1[(t1 >> 16) & 0xff] ^
+ Te2[(t2 >> 8) & 0xff] ^
+ Te3[t3 & 0xff] ^
+ rk[0];
+
+ s1 = Te0[t1 >> 24] ^
+ Te1[(t2 >> 16) & 0xff] ^
+ Te2[(t3 >> 8) & 0xff] ^
+ Te3[t0 & 0xff] ^
+ rk[1];
+
+ s2 = Te0[t2 >> 24] ^
+ Te1[(t3 >> 16) & 0xff] ^
+ Te2[(t0 >> 8) & 0xff] ^
+ Te3[t1 & 0xff] ^
+ rk[2];
+
+ s3 = Te0[t3 >> 24] ^
+ Te1[(t0 >> 16) & 0xff] ^
+ Te2[(t1 >> 8) & 0xff] ^
+ Te3[t2 & 0xff] ^
+ rk[3];
+ }
+
+ /*
+ * apply last round and
+ * map cipher state to byte array block:
+ */
+
+ s0 = (Te4[(t0 >> 24)] & 0xff000000) ^
+ (Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[t3 & 0xff] & 0x000000ff) ^
+ rk[0];
+ ct[0] = s0;
+
+ s1 = (Te4[(t1 >> 24)] & 0xff000000) ^
+ (Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[t0 & 0xff] & 0x000000ff) ^
+ rk[1];
+ ct[1] = s1;
+
+ s2 = (Te4[(t2 >> 24)] & 0xff000000) ^
+ (Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[t1 & 0xff] & 0x000000ff) ^
+ rk[2];
+ ct[2] = s2;
+
+ s3 = (Te4[(t3 >> 24)] & 0xff000000) ^
+ (Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[t2 & 0xff] & 0x000000ff) ^
+ rk[3];
+ ct[3] = s3;
+}
+
+
+/*
+ * Decrypt one block of data. The block is assumed to be an array
+ * of four uint32_t values, so copy for alignment (and byte-order
+ * reversal for little endian systems might be necessary on the
+ * input and output byte streams.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk Key schedule, of aes_ks_t (60 32-bit integers)
+ * Nr Number of rounds
+ * ct Input block (crypto text)
+ * pt Output block (plain text). Can overlap with pt
+ */
+static void
+rijndael_decrypt(const uint32_t rk[], int Nr, const uint32_t ct[4],
+ uint32_t pt[4])
+{
+ uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
+ int r;
+
+ /*
+ * map byte array block to cipher state
+ * and add initial round key:
+ */
+ s0 = ct[0] ^ rk[0];
+ s1 = ct[1] ^ rk[1];
+ s2 = ct[2] ^ rk[2];
+ s3 = ct[3] ^ rk[3];
+
+ /*
+ * Nr - 1 full rounds:
+ */
+
+ r = Nr >> 1;
+
+ for (;;) {
+ t0 = Td0[s0 >> 24] ^
+ Td1[(s3 >> 16) & 0xff] ^
+ Td2[(s2 >> 8) & 0xff] ^
+ Td3[s1 & 0xff] ^
+ rk[4];
+
+ t1 = Td0[s1 >> 24] ^
+ Td1[(s0 >> 16) & 0xff] ^
+ Td2[(s3 >> 8) & 0xff] ^
+ Td3[s2 & 0xff] ^
+ rk[5];
+
+ t2 = Td0[s2 >> 24] ^
+ Td1[(s1 >> 16) & 0xff] ^
+ Td2[(s0 >> 8) & 0xff] ^
+ Td3[s3 & 0xff] ^
+ rk[6];
+
+ t3 = Td0[s3 >> 24] ^
+ Td1[(s2 >> 16) & 0xff] ^
+ Td2[(s1 >> 8) & 0xff] ^
+ Td3[s0 & 0xff] ^
+ rk[7];
+
+ rk += 8;
+
+ if (--r == 0) {
+ break;
+ }
+
+ s0 = Td0[t0 >> 24] ^
+ Td1[(t3 >> 16) & 0xff] ^
+ Td2[(t2 >> 8) & 0xff] ^
+ Td3[t1 & 0xff] ^
+ rk[0];
+
+ s1 = Td0[t1 >> 24] ^
+ Td1[(t0 >> 16) & 0xff] ^
+ Td2[(t3 >> 8) & 0xff] ^
+ Td3[t2 & 0xff] ^
+ rk[1];
+
+ s2 = Td0[t2 >> 24] ^
+ Td1[(t1 >> 16) & 0xff] ^
+ Td2[(t0 >> 8) & 0xff] ^
+ Td3[t3 & 0xff] ^
+ rk[2];
+
+ s3 = Td0[t3 >> 24] ^
+ Td1[(t2 >> 16) & 0xff] ^
+ Td2[(t1 >> 8) & 0xff] ^
+ Td3[t0 & 0xff] ^
+ rk[3];
+ }
+
+ /*
+ * apply last round and
+ * map cipher state to byte array block:
+ */
+
+ s0 = (Td4[t0 >> 24] & 0xff000000) ^
+ (Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[t1 & 0xff] & 0x000000ff) ^
+ rk[0];
+ pt[0] = s0;
+
+ s1 = (Td4[t1 >> 24] & 0xff000000) ^
+ (Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[t2 & 0xff] & 0x000000ff) ^
+ rk[1];
+ pt[1] = s1;
+
+ s2 = (Td4[t2 >> 24] & 0xff000000) ^
+ (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[t3 & 0xff] & 0x000000ff) ^
+ rk[2];
+ pt[2] = s2;
+
+ s3 = (Td4[t3 >> 24] & 0xff000000) ^
+ (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[t0 & 0xff] & 0x000000ff) ^
+ rk[3];
+ pt[3] = s3;
+}
+#endif /* __amd64 */
+
+
+/*
+ * Initialize AES encryption and decryption key schedules.
+ *
+ * Parameters:
+ * cipherKey User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ * keysched AES key schedule to be initialized, of type aes_key_t.
+ * Allocated by aes_alloc_keysched().
+ */
+void
+aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
+{
+ aes_key_t *newbie = keysched;
+ uint_t keysize, i, j;
+ union {
+ uint64_t ka64[4];
+ uint32_t ka32[8];
+ } keyarr;
+
+ switch (keyBits) {
+ case 128:
+ newbie->nr = 10;
+ break;
+
+ case 192:
+ newbie->nr = 12;
+ break;
+
+ case 256:
+ newbie->nr = 14;
+ break;
+
+ default:
+ /* should never get here */
+ return;
+ }
+ keysize = CRYPTO_BITS2BYTES(keyBits);
+
+ /*
+ * For _LITTLE_ENDIAN machines (except AMD64), reverse every
+ * 4 bytes in the key. On _BIG_ENDIAN and AMD64, copy the key
+ * without reversing bytes.
+ * For AMD64, do not byte swap for aes_setupkeys().
+ *
+ * SPARCv8/v9 uses a key schedule array with 64-bit elements.
+ * X86/AMD64 uses a key schedule array with 32-bit elements.
+ */
+#ifndef AES_BYTE_SWAP
+ if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) {
+ for (i = 0, j = 0; j < keysize; i++, j += 8) {
+ /* LINTED: pointer alignment */
+ keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
+ }
+ } else {
+ bcopy(cipherKey, keyarr.ka32, keysize);
+ }
+
+#else /* byte swap */
+ for (i = 0, j = 0; j < keysize; i++, j += 4) {
+ keyarr.ka32[i] = htonl(*(uint32_t *)(void *)&cipherKey[j]);
+ }
+#endif
+
+ aes_setupkeys(newbie, keyarr.ka32, keyBits);
+}
+
+
+/*
+ * Encrypt one block using AES.
+ * Align if needed and (for x86 32-bit only) byte-swap.
+ *
+ * Parameters:
+ * ks Key schedule, of type aes_key_t
+ * pt Input block (plain text)
+ * ct Output block (crypto text). Can overlap with pt
+ */
+int
+aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
+{
+ aes_key_t *ksch = (aes_key_t *)ks;
+
+#ifndef AES_BYTE_SWAP
+ if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t))) {
+ /* LINTED: pointer alignment */
+ AES_ENCRYPT_IMPL(&ksch->encr_ks.ks32[0], ksch->nr,
+ /* LINTED: pointer alignment */
+ (uint32_t *)pt, (uint32_t *)ct, ksch->flags);
+ } else {
+#endif
+ uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
+
+ /* Copy input block into buffer */
+#ifndef AES_BYTE_SWAP
+ bcopy(pt, &buffer, AES_BLOCK_LEN);
+
+#else /* byte swap */
+ buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]);
+ buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]);
+ buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
+ buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
+#endif
+
+ AES_ENCRYPT_IMPL(&ksch->encr_ks.ks32[0], ksch->nr,
+ buffer, buffer, ksch->flags);
+
+ /* Copy result from buffer to output block */
+#ifndef AES_BYTE_SWAP
+ bcopy(&buffer, ct, AES_BLOCK_LEN);
+ }
+
+#else /* byte swap */
+ *(uint32_t *)(void *)&ct[0] = htonl(buffer[0]);
+ *(uint32_t *)(void *)&ct[4] = htonl(buffer[1]);
+ *(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
+ *(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
+#endif
+ return (CRYPTO_SUCCESS);
+}
+
+
+/*
+ * Decrypt one block using AES.
+ * Align and byte-swap if needed.
+ *
+ * Parameters:
+ * ks Key schedule, of type aes_key_t
+ * ct Input block (crypto text)
+ * pt Output block (plain text). Can overlap with pt
+ */
+int
+aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
+{
+ aes_key_t *ksch = (aes_key_t *)ks;
+
+#ifndef AES_BYTE_SWAP
+ if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t))) {
+ /* LINTED: pointer alignment */
+ AES_DECRYPT_IMPL(&ksch->decr_ks.ks32[0], ksch->nr,
+ /* LINTED: pointer alignment */
+ (uint32_t *)ct, (uint32_t *)pt, ksch->flags);
+ } else {
+#endif
+ uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
+
+ /* Copy input block into buffer */
+#ifndef AES_BYTE_SWAP
+ bcopy(ct, &buffer, AES_BLOCK_LEN);
+
+#else /* byte swap */
+ buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]);
+ buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]);
+ buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
+ buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
+#endif
+
+ AES_DECRYPT_IMPL(&ksch->decr_ks.ks32[0], ksch->nr,
+ buffer, buffer, ksch->flags);
+
+ /* Copy result from buffer to output block */
+#ifndef AES_BYTE_SWAP
+ bcopy(&buffer, pt, AES_BLOCK_LEN);
+ }
+
+#else /* byte swap */
+ *(uint32_t *)(void *)&pt[0] = htonl(buffer[0]);
+ *(uint32_t *)(void *)&pt[4] = htonl(buffer[1]);
+ *(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
+ *(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
+#endif
+
+ return (CRYPTO_SUCCESS);
+}
+
+
+/*
+ * Allocate key schedule for AES.
+ *
+ * Return the pointer and set size to the number of bytes allocated.
+ * Memory allocated must be freed by the caller when done.
+ *
+ * Parameters:
+ * size Size of key schedule allocated, in bytes
+ * kmflag Flag passed to kmem_alloc(9F); ignored in userland.
+ */
+/* ARGSUSED */
+void *
+aes_alloc_keysched(size_t *size, int kmflag)
+{
+ aes_key_t *keysched;
+
+ keysched = (aes_key_t *)kmem_alloc(sizeof (aes_key_t), kmflag);
+ if (keysched != NULL) {
+ *size = sizeof (aes_key_t);
+ return (keysched);
+ }
+ return (NULL);
+}
+
+
+#ifdef __amd64
+
+#define INTEL_AESNI_FLAG (1 << 25)
+
+/*
+ * Return 1 if executing on Intel with AES-NI instructions,
+ * otherwise 0 (i.e., Intel without AES-NI or AMD64).
+ * Cache the result, as the CPU can't change.
+ */
+static int
+intel_aes_instructions_present(void)
+{
+ static int cached_result = -1;
+ unsigned eax, ebx, ecx, edx;
+ unsigned func, subfunc;
+
+ if (cached_result == -1) { /* first time */
+ /* check for an intel cpu */
+ func = 0;
+ subfunc = 0;
+
+ __asm__ __volatile__(
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
+
+ if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
+ memcmp((char *) (&edx), "ineI", 4) == 0 &&
+ memcmp((char *) (&ecx), "ntel", 4) == 0) {
+
+ func = 1;
+ subfunc = 0;
+
+ /* check for aes-ni instruction set */
+ __asm__ __volatile__(
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
+
+ cached_result = !!(ecx & INTEL_AESNI_FLAG);
+ } else {
+ cached_result = 0;
+ }
+ }
+
+ return (cached_result);
+}
+
+#endif /* __amd64 */
diff --git a/module/icp/algs/aes/aes_modes.c b/module/icp/algs/aes/aes_modes.c
new file mode 100644
index 000000000..9e4b498ff
--- /dev/null
+++ b/module/icp/algs/aes/aes_modes.c
@@ -0,0 +1,135 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <aes/aes_impl.h>
+
+/* Copy a 16-byte AES block from "in" to "out" */
+void
+aes_copy_block(uint8_t *in, uint8_t *out)
+{
+ if (IS_P2ALIGNED2(in, out, sizeof (uint32_t))) {
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&out[0] = *(uint32_t *)&in[0];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&out[4] = *(uint32_t *)&in[4];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&out[8] = *(uint32_t *)&in[8];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&out[12] = *(uint32_t *)&in[12];
+ } else {
+ AES_COPY_BLOCK(in, out);
+ }
+}
+
+
+/* XOR a 16-byte AES block of data into dst */
+void
+aes_xor_block(uint8_t *data, uint8_t *dst)
+{
+ if (IS_P2ALIGNED2(dst, data, sizeof (uint32_t))) {
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&dst[0] ^= *(uint32_t *)&data[0];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&dst[4] ^= *(uint32_t *)&data[4];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&dst[8] ^= *(uint32_t *)&data[8];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&dst[12] ^= *(uint32_t *)&data[12];
+ } else {
+ AES_XOR_BLOCK(data, dst);
+ }
+}
+
+
+/*
+ * Encrypt multiple blocks of data according to mode.
+ */
+int
+aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length,
+ crypto_data_t *out)
+{
+ aes_ctx_t *aes_ctx = ctx;
+ int rv;
+
+ if (aes_ctx->ac_flags & CTR_MODE) {
+ rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ } else if (aes_ctx->ac_flags & CCM_MODE) {
+ rv = ccm_mode_encrypt_contiguous_blocks(ctx, data, length,
+ out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ rv = gcm_mode_encrypt_contiguous_blocks(ctx, data, length,
+ out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else if (aes_ctx->ac_flags & CBC_MODE) {
+ rv = cbc_encrypt_contiguous_blocks(ctx,
+ data, length, out, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_copy_block, aes_xor_block);
+ } else {
+ rv = ecb_cipher_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_encrypt_block);
+ }
+ return (rv);
+}
+
+
+/*
+ * Decrypt multiple blocks of data according to mode.
+ */
+int
+aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length,
+ crypto_data_t *out)
+{
+ aes_ctx_t *aes_ctx = ctx;
+ int rv;
+
+ if (aes_ctx->ac_flags & CTR_MODE) {
+ rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (rv == CRYPTO_DATA_LEN_RANGE)
+ rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ } else if (aes_ctx->ac_flags & CCM_MODE) {
+ rv = ccm_mode_decrypt_contiguous_blocks(ctx, data, length,
+ out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ rv = gcm_mode_decrypt_contiguous_blocks(ctx, data, length,
+ out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else if (aes_ctx->ac_flags & CBC_MODE) {
+ rv = cbc_decrypt_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_decrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else {
+ rv = ecb_cipher_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_decrypt_block);
+ if (rv == CRYPTO_DATA_LEN_RANGE)
+ rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ }
+ return (rv);
+}
diff --git a/module/icp/algs/modes/cbc.c b/module/icp/algs/modes/cbc.c
new file mode 100644
index 000000000..2cc94ec72
--- /dev/null
+++ b/module/icp/algs/modes/cbc.c
@@ -0,0 +1,305 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+/*
+ * Algorithm independent CBC functions.
+ */
+int
+cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+
+ if (length + ctx->cbc_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
+ length);
+ ctx->cbc_remainder_len += length;
+ ctx->cbc_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->cbc_iv;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->cbc_remainder_len > 0) {
+ need = block_size - ctx->cbc_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
+ [ctx->cbc_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->cbc_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ if (out == NULL) {
+ /*
+ * XOR the previous cipher block or IV with the
+ * current clear block.
+ */
+ xor_block(lastp, blockp);
+ encrypt(ctx->cbc_keysched, blockp, blockp);
+
+ ctx->cbc_lastp = blockp;
+ lastp = blockp;
+
+ if (ctx->cbc_remainder_len > 0) {
+ bcopy(blockp, ctx->cbc_copy_to,
+ ctx->cbc_remainder_len);
+ bcopy(blockp + ctx->cbc_remainder_len, datap,
+ need);
+ }
+ } else {
+ /*
+ * XOR the previous cipher block or IV with the
+ * current clear block.
+ */
+ xor_block(blockp, lastp);
+ encrypt(ctx->cbc_keysched, lastp, lastp);
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ if (out_data_1_len == block_size) {
+ copy_block(lastp, out_data_1);
+ } else {
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len,
+ out_data_2,
+ block_size - out_data_1_len);
+ }
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->cbc_remainder_len != 0) {
+ datap += need;
+ ctx->cbc_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->cbc_remainder, remainder);
+ ctx->cbc_remainder_len = remainder;
+ ctx->cbc_copy_to = datap;
+ goto out;
+ }
+ ctx->cbc_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ /*
+ * Save the last encrypted block in the context.
+ */
+ if (ctx->cbc_lastp != NULL) {
+ copy_block((uint8_t *)ctx->cbc_lastp, (uint8_t *)ctx->cbc_iv);
+ ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv;
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+#define OTHER(a, ctx) \
+ (((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock)
+
+/* ARGSUSED */
+int
+cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*decrypt)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+
+ if (length + ctx->cbc_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
+ length);
+ ctx->cbc_remainder_len += length;
+ ctx->cbc_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = ctx->cbc_lastp;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->cbc_remainder_len > 0) {
+ need = block_size - ctx->cbc_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
+ [ctx->cbc_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->cbc_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /* LINTED: pointer alignment */
+ copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx));
+
+ if (out != NULL) {
+ decrypt(ctx->cbc_keysched, blockp,
+ (uint8_t *)ctx->cbc_remainder);
+ blockp = (uint8_t *)ctx->cbc_remainder;
+ } else {
+ decrypt(ctx->cbc_keysched, blockp, blockp);
+ }
+
+ /*
+ * XOR the previous cipher block or IV with the
+ * currently decrypted block.
+ */
+ xor_block(lastp, blockp);
+
+ /* LINTED: pointer alignment */
+ lastp = (uint8_t *)OTHER((uint64_t *)lastp, ctx);
+
+ if (out != NULL) {
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ bcopy(blockp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(blockp + out_data_1_len, out_data_2,
+ block_size - out_data_1_len);
+ }
+
+ /* update offset */
+ out->cd_offset += block_size;
+
+ } else if (ctx->cbc_remainder_len > 0) {
+ /* copy temporary block to where it belongs */
+ bcopy(blockp, ctx->cbc_copy_to, ctx->cbc_remainder_len);
+ bcopy(blockp + ctx->cbc_remainder_len, datap, need);
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->cbc_remainder_len != 0) {
+ datap += need;
+ ctx->cbc_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->cbc_remainder, remainder);
+ ctx->cbc_remainder_len = remainder;
+ ctx->cbc_lastp = lastp;
+ ctx->cbc_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+ ctx->cbc_copy_to = NULL;
+
+ } while (remainder > 0);
+
+ ctx->cbc_lastp = lastp;
+ return (CRYPTO_SUCCESS);
+}
+
+int
+cbc_init_ctx(cbc_ctx_t *cbc_ctx, char *param, size_t param_len,
+ size_t block_size, void (*copy_block)(uint8_t *, uint64_t *))
+{
+ /*
+ * Copy IV into context.
+ *
+ * If cm_param == NULL then the IV comes from the
+ * cd_miscdata field in the crypto_data structure.
+ */
+ if (param != NULL) {
+ ASSERT(param_len == block_size);
+ copy_block((uchar_t *)param, cbc_ctx->cbc_iv);
+ }
+
+ cbc_ctx->cbc_lastp = (uint8_t *)&cbc_ctx->cbc_iv[0];
+ cbc_ctx->cbc_flags |= CBC_MODE;
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+void *
+cbc_alloc_ctx(int kmflag)
+{
+ cbc_ctx_t *cbc_ctx;
+
+ if ((cbc_ctx = kmem_zalloc(sizeof (cbc_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ cbc_ctx->cbc_flags = CBC_MODE;
+ return (cbc_ctx);
+}
diff --git a/module/icp/algs/modes/ccm.c b/module/icp/algs/modes/ccm.c
new file mode 100644
index 000000000..22aeb0a6a
--- /dev/null
+++ b/module/icp/algs/modes/ccm.c
@@ -0,0 +1,920 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+#if defined(__i386) || defined(__amd64)
+#include <sys/byteorder.h>
+#define UNALIGNED_POINTERS_PERMITTED
+#endif
+
+/*
+ * Encrypt multiple blocks of data in CCM mode. Decrypt for CCM mode
+ * is done in another function.
+ */
+int
+ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ uint64_t counter;
+ uint8_t *mac_buf;
+
+ if (length + ctx->ccm_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
+ length);
+ ctx->ccm_remainder_len += length;
+ ctx->ccm_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->ccm_cb;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ mac_buf = (uint8_t *)ctx->ccm_mac_buf;
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->ccm_remainder_len > 0) {
+ need = block_size - ctx->ccm_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
+ [ctx->ccm_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->ccm_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /*
+ * do CBC MAC
+ *
+ * XOR the previous cipher block current clear block.
+ * mac_buf always contain previous cipher block.
+ */
+ xor_block(blockp, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+
+ /* ccm_cb is the counter block */
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb,
+ (uint8_t *)ctx->ccm_tmp);
+
+ lastp = (uint8_t *)ctx->ccm_tmp;
+
+ /*
+ * Increment counter. Counter bits are confined
+ * to the bottom 64 bits of the counter block.
+ */
+#ifdef _LITTLE_ENDIAN
+ counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
+ counter = htonll(counter + 1);
+#else
+ counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
+ counter++;
+#endif /* _LITTLE_ENDIAN */
+ counter &= ctx->ccm_counter_mask;
+ ctx->ccm_cb[1] =
+ (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
+
+ /*
+ * XOR encrypted counter block with the current clear block.
+ */
+ xor_block(blockp, lastp);
+
+ ctx->ccm_processed_data_len += block_size;
+
+ if (out == NULL) {
+ if (ctx->ccm_remainder_len > 0) {
+ bcopy(blockp, ctx->ccm_copy_to,
+ ctx->ccm_remainder_len);
+ bcopy(blockp + ctx->ccm_remainder_len, datap,
+ need);
+ }
+ } else {
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ if (out_data_1_len == block_size) {
+ copy_block(lastp, out_data_1);
+ } else {
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len,
+ out_data_2,
+ block_size - out_data_1_len);
+ }
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->ccm_remainder_len != 0) {
+ datap += need;
+ ctx->ccm_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->ccm_remainder, remainder);
+ ctx->ccm_remainder_len = remainder;
+ ctx->ccm_copy_to = datap;
+ goto out;
+ }
+ ctx->ccm_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+void
+calculate_ccm_mac(ccm_ctx_t *ctx, uint8_t *ccm_mac,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
+{
+ uint64_t counter;
+ uint8_t *counterp, *mac_buf;
+ int i;
+
+ mac_buf = (uint8_t *)ctx->ccm_mac_buf;
+
+ /* first counter block start with index 0 */
+ counter = 0;
+ ctx->ccm_cb[1] = (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
+
+ counterp = (uint8_t *)ctx->ccm_tmp;
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
+
+ /* calculate XOR of MAC with first counter block */
+ for (i = 0; i < ctx->ccm_mac_len; i++) {
+ ccm_mac[i] = mac_buf[i] ^ counterp[i];
+ }
+}
+
+/* ARGSUSED */
+int
+ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *lastp, *mac_buf, *ccm_mac_p, *macp = NULL;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ int i;
+
+ if (out->cd_length < (ctx->ccm_remainder_len + ctx->ccm_mac_len)) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * When we get here, the number of bytes of payload processed
+ * plus whatever data remains, if any,
+ * should be the same as the number of bytes that's being
+ * passed in the argument during init time.
+ */
+ if ((ctx->ccm_processed_data_len + ctx->ccm_remainder_len)
+ != (ctx->ccm_data_len)) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ mac_buf = (uint8_t *)ctx->ccm_mac_buf;
+
+ if (ctx->ccm_remainder_len > 0) {
+
+ /* ccm_mac_input_buf is not used for encryption */
+ macp = (uint8_t *)ctx->ccm_mac_input_buf;
+ bzero(macp, block_size);
+
+ /* copy remainder to temporary buffer */
+ bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len);
+
+ /* calculate the CBC MAC */
+ xor_block(macp, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+
+ /* calculate the counter mode */
+ lastp = (uint8_t *)ctx->ccm_tmp;
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, lastp);
+
+ /* XOR with counter block */
+ for (i = 0; i < ctx->ccm_remainder_len; i++) {
+ macp[i] ^= lastp[i];
+ }
+ ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
+ }
+
+ /* Calculate the CCM MAC */
+ ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
+ calculate_ccm_mac(ctx, ccm_mac_p, encrypt_block);
+
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2,
+ ctx->ccm_remainder_len + ctx->ccm_mac_len);
+
+ if (ctx->ccm_remainder_len > 0) {
+
+ /* copy temporary block to where it belongs */
+ if (out_data_2 == NULL) {
+ /* everything will fit in out_data_1 */
+ bcopy(macp, out_data_1, ctx->ccm_remainder_len);
+ bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len,
+ ctx->ccm_mac_len);
+ } else {
+
+ if (out_data_1_len < ctx->ccm_remainder_len) {
+
+ size_t data_2_len_used;
+
+ bcopy(macp, out_data_1, out_data_1_len);
+
+ data_2_len_used = ctx->ccm_remainder_len
+ - out_data_1_len;
+
+ bcopy((uint8_t *)macp + out_data_1_len,
+ out_data_2, data_2_len_used);
+ bcopy(ccm_mac_p, out_data_2 + data_2_len_used,
+ ctx->ccm_mac_len);
+ } else {
+ bcopy(macp, out_data_1, out_data_1_len);
+ if (out_data_1_len == ctx->ccm_remainder_len) {
+ /* mac will be in out_data_2 */
+ bcopy(ccm_mac_p, out_data_2,
+ ctx->ccm_mac_len);
+ } else {
+ size_t len_not_used = out_data_1_len -
+ ctx->ccm_remainder_len;
+ /*
+ * part of mac in will be in
+ * out_data_1, part of the mac will be
+ * in out_data_2
+ */
+ bcopy(ccm_mac_p,
+ out_data_1 + ctx->ccm_remainder_len,
+ len_not_used);
+ bcopy(ccm_mac_p + len_not_used,
+ out_data_2,
+ ctx->ccm_mac_len - len_not_used);
+
+ }
+ }
+ }
+ } else {
+ /* copy block to where it belongs */
+ bcopy(ccm_mac_p, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(ccm_mac_p + out_data_1_len, out_data_2,
+ block_size - out_data_1_len);
+ }
+ }
+ out->cd_offset += ctx->ccm_remainder_len + ctx->ccm_mac_len;
+ ctx->ccm_remainder_len = 0;
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * This will only deal with decrypting the last block of the input that
+ * might not be a multiple of block length.
+ */
+void
+ccm_decrypt_incomplete_block(ccm_ctx_t *ctx,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
+{
+ uint8_t *datap, *outp, *counterp;
+ int i;
+
+ datap = (uint8_t *)ctx->ccm_remainder;
+ outp = &((ctx->ccm_pt_buf)[ctx->ccm_processed_data_len]);
+
+ counterp = (uint8_t *)ctx->ccm_tmp;
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
+
+ /* XOR with counter block */
+ for (i = 0; i < ctx->ccm_remainder_len; i++) {
+ outp[i] = datap[i] ^ counterp[i];
+ }
+}
+
+/*
+ * This will decrypt the cipher text. However, the plaintext won't be
+ * returned to the caller. It will be returned when decrypt_final() is
+ * called if the MAC matches
+ */
+/* ARGSUSED */
+int
+ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *cbp;
+ uint64_t counter;
+ size_t pt_len, total_decrypted_len, mac_len, pm_len, pd_len;
+ uint8_t *resultp;
+
+
+ pm_len = ctx->ccm_processed_mac_len;
+
+ if (pm_len > 0) {
+ uint8_t *tmp;
+ /*
+ * all ciphertext has been processed, just waiting for
+ * part of the value of the mac
+ */
+ if ((pm_len + length) > ctx->ccm_mac_len) {
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ }
+ tmp = (uint8_t *)ctx->ccm_mac_input_buf;
+
+ bcopy(datap, tmp + pm_len, length);
+
+ ctx->ccm_processed_mac_len += length;
+ return (CRYPTO_SUCCESS);
+ }
+
+ /*
+ * If we decrypt the given data, what total amount of data would
+ * have been decrypted?
+ */
+ pd_len = ctx->ccm_processed_data_len;
+ total_decrypted_len = pd_len + length + ctx->ccm_remainder_len;
+
+ if (total_decrypted_len >
+ (ctx->ccm_data_len + ctx->ccm_mac_len)) {
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ }
+
+ pt_len = ctx->ccm_data_len;
+
+ if (total_decrypted_len > pt_len) {
+ /*
+ * part of the input will be the MAC, need to isolate that
+ * to be dealt with later. The left-over data in
+ * ccm_remainder_len from last time will not be part of the
+ * MAC. Otherwise, it would have already been taken out
+ * when this call is made last time.
+ */
+ size_t pt_part = pt_len - pd_len - ctx->ccm_remainder_len;
+
+ mac_len = length - pt_part;
+
+ ctx->ccm_processed_mac_len = mac_len;
+ bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len);
+
+ if (pt_part + ctx->ccm_remainder_len < block_size) {
+ /*
+ * since this is last of the ciphertext, will
+ * just decrypt with it here
+ */
+ bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
+ [ctx->ccm_remainder_len], pt_part);
+ ctx->ccm_remainder_len += pt_part;
+ ccm_decrypt_incomplete_block(ctx, encrypt_block);
+ ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
+ ctx->ccm_remainder_len = 0;
+ return (CRYPTO_SUCCESS);
+ } else {
+ /* let rest of the code handle this */
+ length = pt_part;
+ }
+ } else if (length + ctx->ccm_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
+ length);
+ ctx->ccm_remainder_len += length;
+ ctx->ccm_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->ccm_remainder_len > 0) {
+ need = block_size - ctx->ccm_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
+ [ctx->ccm_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->ccm_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /* Calculate the counter mode, ccm_cb is the counter block */
+ cbp = (uint8_t *)ctx->ccm_tmp;
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, cbp);
+
+ /*
+ * Increment counter.
+ * Counter bits are confined to the bottom 64 bits
+ */
+#ifdef _LITTLE_ENDIAN
+ counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
+ counter = htonll(counter + 1);
+#else
+ counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
+ counter++;
+#endif /* _LITTLE_ENDIAN */
+ counter &= ctx->ccm_counter_mask;
+ ctx->ccm_cb[1] =
+ (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
+
+ /* XOR with the ciphertext */
+ xor_block(blockp, cbp);
+
+ /* Copy the plaintext to the "holding buffer" */
+ resultp = (uint8_t *)ctx->ccm_pt_buf +
+ ctx->ccm_processed_data_len;
+ copy_block(cbp, resultp);
+
+ ctx->ccm_processed_data_len += block_size;
+
+ ctx->ccm_lastp = blockp;
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->ccm_remainder_len != 0) {
+ datap += need;
+ ctx->ccm_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->ccm_remainder, remainder);
+ ctx->ccm_remainder_len = remainder;
+ ctx->ccm_copy_to = datap;
+ if (ctx->ccm_processed_mac_len > 0) {
+ /*
+ * not expecting anymore ciphertext, just
+ * compute plaintext for the remaining input
+ */
+ ccm_decrypt_incomplete_block(ctx,
+ encrypt_block);
+ ctx->ccm_processed_data_len += remainder;
+ ctx->ccm_remainder_len = 0;
+ }
+ goto out;
+ }
+ ctx->ccm_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t mac_remain, pt_len;
+ uint8_t *pt, *mac_buf, *macp, *ccm_mac_p;
+ int rv;
+
+ pt_len = ctx->ccm_data_len;
+
+ /* Make sure output buffer can fit all of the plaintext */
+ if (out->cd_length < pt_len) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ pt = ctx->ccm_pt_buf;
+ mac_remain = ctx->ccm_processed_data_len;
+ mac_buf = (uint8_t *)ctx->ccm_mac_buf;
+
+ macp = (uint8_t *)ctx->ccm_tmp;
+
+ while (mac_remain > 0) {
+
+ if (mac_remain < block_size) {
+ bzero(macp, block_size);
+ bcopy(pt, macp, mac_remain);
+ mac_remain = 0;
+ } else {
+ copy_block(pt, macp);
+ mac_remain -= block_size;
+ pt += block_size;
+ }
+
+ /* calculate the CBC MAC */
+ xor_block(macp, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+ }
+
+ /* Calculate the CCM MAC */
+ ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
+ calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block);
+
+ /* compare the input CCM MAC value with what we calculated */
+ if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
+ /* They don't match */
+ return (CRYPTO_INVALID_MAC);
+ } else {
+ rv = crypto_put_output_data(ctx->ccm_pt_buf, out, pt_len);
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ out->cd_offset += pt_len;
+ }
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ccm_validate_args(CK_AES_CCM_PARAMS *ccm_param, boolean_t is_encrypt_init)
+{
+ size_t macSize, nonceSize;
+ uint8_t q;
+ uint64_t maxValue;
+
+ /*
+ * Check the length of the MAC. The only valid
+ * lengths for the MAC are: 4, 6, 8, 10, 12, 14, 16
+ */
+ macSize = ccm_param->ulMACSize;
+ if ((macSize < 4) || (macSize > 16) || ((macSize % 2) != 0)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ /* Check the nonce length. Valid values are 7, 8, 9, 10, 11, 12, 13 */
+ nonceSize = ccm_param->ulNonceSize;
+ if ((nonceSize < 7) || (nonceSize > 13)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ /* q is the length of the field storing the length, in bytes */
+ q = (uint8_t)((15 - nonceSize) & 0xFF);
+
+
+ /*
+ * If it is decrypt, need to make sure size of ciphertext is at least
+ * bigger than MAC len
+ */
+ if ((!is_encrypt_init) && (ccm_param->ulDataSize < macSize)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ /*
+ * Check to make sure the length of the payload is within the
+ * range of values allowed by q
+ */
+ if (q < 8) {
+ maxValue = (1ULL << (q * 8)) - 1;
+ } else {
+ maxValue = ULONG_MAX;
+ }
+
+ if (ccm_param->ulDataSize > maxValue) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Format the first block used in CBC-MAC (B0) and the initial counter
+ * block based on formatting functions and counter generation functions
+ * specified in RFC 3610 and NIST publication 800-38C, appendix A
+ *
+ * b0 is the first block used in CBC-MAC
+ * cb0 is the first counter block
+ *
+ * It's assumed that the arguments b0 and cb0 are preallocated AES blocks
+ *
+ */
+static void
+ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
+ ulong_t authDataSize, uint8_t *b0, ccm_ctx_t *aes_ctx)
+{
+ uint64_t payloadSize;
+ uint8_t t, q, have_adata = 0;
+ size_t limit;
+ int i, j, k;
+ uint64_t mask = 0;
+ uint8_t *cb;
+
+ q = (uint8_t)((15 - nonceSize) & 0xFF);
+ t = (uint8_t)((aes_ctx->ccm_mac_len) & 0xFF);
+
+ /* Construct the first octet of b0 */
+ if (authDataSize > 0) {
+ have_adata = 1;
+ }
+ b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1);
+
+ /* copy the nonce value into b0 */
+ bcopy(nonce, &(b0[1]), nonceSize);
+
+ /* store the length of the payload into b0 */
+ bzero(&(b0[1+nonceSize]), q);
+
+ payloadSize = aes_ctx->ccm_data_len;
+ limit = 8 < q ? 8 : q;
+
+ for (i = 0, j = 0, k = 15; i < limit; i++, j += 8, k--) {
+ b0[k] = (uint8_t)((payloadSize >> j) & 0xFF);
+ }
+
+ /* format the counter block */
+
+ cb = (uint8_t *)aes_ctx->ccm_cb;
+
+ cb[0] = 0x07 & (q-1); /* first byte */
+
+ /* copy the nonce value into the counter block */
+ bcopy(nonce, &(cb[1]), nonceSize);
+
+ bzero(&(cb[1+nonceSize]), q);
+
+ /* Create the mask for the counter field based on the size of nonce */
+ q <<= 3;
+ while (q-- > 0) {
+ mask |= (1ULL << q);
+ }
+
+#ifdef _LITTLE_ENDIAN
+ mask = htonll(mask);
+#endif
+ aes_ctx->ccm_counter_mask = mask;
+
+ /*
+ * During calculation, we start using counter block 1, we will
+ * set it up right here.
+ * We can just set the last byte to have the value 1, because
+ * even with the biggest nonce of 13, the last byte of the
+ * counter block will be used for the counter value.
+ */
+ cb[15] = 0x01;
+}
+
+/*
+ * Encode the length of the associated data as
+ * specified in RFC 3610 and NIST publication 800-38C, appendix A
+ */
+static void
+encode_adata_len(ulong_t auth_data_len, uint8_t *encoded, size_t *encoded_len)
+{
+#ifdef UNALIGNED_POINTERS_PERMITTED
+ uint32_t *lencoded_ptr;
+#ifdef _LP64
+ uint64_t *llencoded_ptr;
+#endif
+#endif /* UNALIGNED_POINTERS_PERMITTED */
+
+ if (auth_data_len < ((1ULL<<16) - (1ULL<<8))) {
+ /* 0 < a < (2^16-2^8) */
+ *encoded_len = 2;
+ encoded[0] = (auth_data_len & 0xff00) >> 8;
+ encoded[1] = auth_data_len & 0xff;
+
+ } else if ((auth_data_len >= ((1ULL<<16) - (1ULL<<8))) &&
+ (auth_data_len < (1ULL << 31))) {
+ /* (2^16-2^8) <= a < 2^32 */
+ *encoded_len = 6;
+ encoded[0] = 0xff;
+ encoded[1] = 0xfe;
+#ifdef UNALIGNED_POINTERS_PERMITTED
+ lencoded_ptr = (uint32_t *)&encoded[2];
+ *lencoded_ptr = htonl(auth_data_len);
+#else
+ encoded[2] = (auth_data_len & 0xff000000) >> 24;
+ encoded[3] = (auth_data_len & 0xff0000) >> 16;
+ encoded[4] = (auth_data_len & 0xff00) >> 8;
+ encoded[5] = auth_data_len & 0xff;
+#endif /* UNALIGNED_POINTERS_PERMITTED */
+
+#ifdef _LP64
+ } else {
+ /* 2^32 <= a < 2^64 */
+ *encoded_len = 10;
+ encoded[0] = 0xff;
+ encoded[1] = 0xff;
+#ifdef UNALIGNED_POINTERS_PERMITTED
+ llencoded_ptr = (uint64_t *)&encoded[2];
+ *llencoded_ptr = htonl(auth_data_len);
+#else
+ encoded[2] = (auth_data_len & 0xff00000000000000) >> 56;
+ encoded[3] = (auth_data_len & 0xff000000000000) >> 48;
+ encoded[4] = (auth_data_len & 0xff0000000000) >> 40;
+ encoded[5] = (auth_data_len & 0xff00000000) >> 32;
+ encoded[6] = (auth_data_len & 0xff000000) >> 24;
+ encoded[7] = (auth_data_len & 0xff0000) >> 16;
+ encoded[8] = (auth_data_len & 0xff00) >> 8;
+ encoded[9] = auth_data_len & 0xff;
+#endif /* UNALIGNED_POINTERS_PERMITTED */
+#endif /* _LP64 */
+ }
+}
+
+/*
+ * The following function should be call at encrypt or decrypt init time
+ * for AES CCM mode.
+ */
+int
+ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
+ unsigned char *auth_data, size_t auth_data_len, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *mac_buf, *datap, *ivp, *authp;
+ size_t remainder, processed;
+ uint8_t encoded_a[10]; /* max encoded auth data length is 10 octets */
+ size_t encoded_a_len = 0;
+
+ mac_buf = (uint8_t *)&(ctx->ccm_mac_buf);
+
+ /*
+ * Format the 1st block for CBC-MAC and construct the
+ * 1st counter block.
+ *
+ * aes_ctx->ccm_iv is used for storing the counter block
+ * mac_buf will store b0 at this time.
+ */
+ ccm_format_initial_blocks(nonce, nonce_len,
+ auth_data_len, mac_buf, ctx);
+
+ /* The IV for CBC MAC for AES CCM mode is always zero */
+ ivp = (uint8_t *)ctx->ccm_tmp;
+ bzero(ivp, block_size);
+
+ xor_block(ivp, mac_buf);
+
+ /* encrypt the nonce */
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+
+ /* take care of the associated data, if any */
+ if (auth_data_len == 0) {
+ return (CRYPTO_SUCCESS);
+ }
+
+ encode_adata_len(auth_data_len, encoded_a, &encoded_a_len);
+
+ remainder = auth_data_len;
+
+ /* 1st block: it contains encoded associated data, and some data */
+ authp = (uint8_t *)ctx->ccm_tmp;
+ bzero(authp, block_size);
+ bcopy(encoded_a, authp, encoded_a_len);
+ processed = block_size - encoded_a_len;
+ if (processed > auth_data_len) {
+ /* in case auth_data is very small */
+ processed = auth_data_len;
+ }
+ bcopy(auth_data, authp+encoded_a_len, processed);
+ /* xor with previous buffer */
+ xor_block(authp, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+ remainder -= processed;
+ if (remainder == 0) {
+ /* a small amount of associated data, it's all done now */
+ return (CRYPTO_SUCCESS);
+ }
+
+ do {
+ if (remainder < block_size) {
+ /*
+ * There's not a block full of data, pad rest of
+ * buffer with zero
+ */
+ bzero(authp, block_size);
+ bcopy(&(auth_data[processed]), authp, remainder);
+ datap = (uint8_t *)authp;
+ remainder = 0;
+ } else {
+ datap = (uint8_t *)(&(auth_data[processed]));
+ processed += block_size;
+ remainder -= block_size;
+ }
+
+ xor_block(datap, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+
+ } while (remainder > 0);
+
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ccm_init_ctx(ccm_ctx_t *ccm_ctx, char *param, int kmflag,
+ boolean_t is_encrypt_init, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ int rv;
+ CK_AES_CCM_PARAMS *ccm_param;
+
+ if (param != NULL) {
+ ccm_param = (CK_AES_CCM_PARAMS *)param;
+
+ if ((rv = ccm_validate_args(ccm_param,
+ is_encrypt_init)) != 0) {
+ return (rv);
+ }
+
+ ccm_ctx->ccm_mac_len = ccm_param->ulMACSize;
+ if (is_encrypt_init) {
+ ccm_ctx->ccm_data_len = ccm_param->ulDataSize;
+ } else {
+ ccm_ctx->ccm_data_len =
+ ccm_param->ulDataSize - ccm_ctx->ccm_mac_len;
+ ccm_ctx->ccm_processed_mac_len = 0;
+ }
+ ccm_ctx->ccm_processed_data_len = 0;
+
+ ccm_ctx->ccm_flags |= CCM_MODE;
+ } else {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+
+ if (ccm_init(ccm_ctx, ccm_param->nonce, ccm_param->ulNonceSize,
+ ccm_param->authData, ccm_param->ulAuthDataSize, block_size,
+ encrypt_block, xor_block) != 0) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+ if (!is_encrypt_init) {
+ /* allocate buffer for storing decrypted plaintext */
+ ccm_ctx->ccm_pt_buf = vmem_alloc(ccm_ctx->ccm_data_len,
+ kmflag);
+ if (ccm_ctx->ccm_pt_buf == NULL) {
+ rv = CRYPTO_HOST_MEMORY;
+ }
+ }
+out:
+ return (rv);
+}
+
+void *
+ccm_alloc_ctx(int kmflag)
+{
+ ccm_ctx_t *ccm_ctx;
+
+ if ((ccm_ctx = kmem_zalloc(sizeof (ccm_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ ccm_ctx->ccm_flags = CCM_MODE;
+ return (ccm_ctx);
+}
diff --git a/module/icp/algs/modes/ctr.c b/module/icp/algs/modes/ctr.c
new file mode 100644
index 000000000..77ba28ddd
--- /dev/null
+++ b/module/icp/algs/modes/ctr.c
@@ -0,0 +1,238 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/byteorder.h>
+
+/*
+ * Encrypt and decrypt multiple blocks of data in counter mode.
+ */
+int
+ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ uint64_t lower_counter, upper_counter;
+
+ if (length + ctx->ctr_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len,
+ length);
+ ctx->ctr_remainder_len += length;
+ ctx->ctr_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->ctr_cb;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->ctr_remainder_len > 0) {
+ need = block_size - ctx->ctr_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->ctr_remainder)
+ [ctx->ctr_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->ctr_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /* ctr_cb is the counter block */
+ cipher(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
+ (uint8_t *)ctx->ctr_tmp);
+
+ lastp = (uint8_t *)ctx->ctr_tmp;
+
+ /*
+ * Increment Counter.
+ */
+ lower_counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_lower_mask);
+ lower_counter = htonll(lower_counter + 1);
+ lower_counter &= ctx->ctr_lower_mask;
+ ctx->ctr_cb[1] = (ctx->ctr_cb[1] & ~(ctx->ctr_lower_mask)) |
+ lower_counter;
+
+ /* wrap around */
+ if (lower_counter == 0) {
+ upper_counter =
+ ntohll(ctx->ctr_cb[0] & ctx->ctr_upper_mask);
+ upper_counter = htonll(upper_counter + 1);
+ upper_counter &= ctx->ctr_upper_mask;
+ ctx->ctr_cb[0] =
+ (ctx->ctr_cb[0] & ~(ctx->ctr_upper_mask)) |
+ upper_counter;
+ }
+
+ /*
+ * XOR encrypted counter block with the current clear block.
+ */
+ xor_block(blockp, lastp);
+
+ if (out == NULL) {
+ if (ctx->ctr_remainder_len > 0) {
+ bcopy(lastp, ctx->ctr_copy_to,
+ ctx->ctr_remainder_len);
+ bcopy(lastp + ctx->ctr_remainder_len, datap,
+ need);
+ }
+ } else {
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len, out_data_2,
+ block_size - out_data_1_len);
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->ctr_remainder_len != 0) {
+ datap += need;
+ ctx->ctr_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->ctr_remainder, remainder);
+ ctx->ctr_remainder_len = remainder;
+ ctx->ctr_copy_to = datap;
+ goto out;
+ }
+ ctx->ctr_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
+{
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ uint8_t *p;
+ int i;
+
+ if (out->cd_length < ctx->ctr_remainder_len)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ encrypt_block(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
+ (uint8_t *)ctx->ctr_tmp);
+
+ lastp = (uint8_t *)ctx->ctr_tmp;
+ p = (uint8_t *)ctx->ctr_remainder;
+ for (i = 0; i < ctx->ctr_remainder_len; i++) {
+ p[i] ^= lastp[i];
+ }
+
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, ctx->ctr_remainder_len);
+
+ bcopy(p, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy((uint8_t *)p + out_data_1_len,
+ out_data_2, ctx->ctr_remainder_len - out_data_1_len);
+ }
+ out->cd_offset += ctx->ctr_remainder_len;
+ ctx->ctr_remainder_len = 0;
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb,
+void (*copy_block)(uint8_t *, uint8_t *))
+{
+ uint64_t upper_mask = 0;
+ uint64_t lower_mask = 0;
+
+ if (count == 0 || count > 128) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ /* upper 64 bits of the mask */
+ if (count >= 64) {
+ count -= 64;
+ upper_mask = (count == 64) ? UINT64_MAX : (1ULL << count) - 1;
+ lower_mask = UINT64_MAX;
+ } else {
+ /* now the lower 63 bits */
+ lower_mask = (1ULL << count) - 1;
+ }
+ ctr_ctx->ctr_lower_mask = htonll(lower_mask);
+ ctr_ctx->ctr_upper_mask = htonll(upper_mask);
+
+ copy_block(cb, (uchar_t *)ctr_ctx->ctr_cb);
+ ctr_ctx->ctr_lastp = (uint8_t *)&ctr_ctx->ctr_cb[0];
+ ctr_ctx->ctr_flags |= CTR_MODE;
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+void *
+ctr_alloc_ctx(int kmflag)
+{
+ ctr_ctx_t *ctr_ctx;
+
+ if ((ctr_ctx = kmem_zalloc(sizeof (ctr_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ ctr_ctx->ctr_flags = CTR_MODE;
+ return (ctr_ctx);
+}
diff --git a/module/icp/algs/modes/ecb.c b/module/icp/algs/modes/ecb.c
new file mode 100644
index 000000000..04e6c5eaa
--- /dev/null
+++ b/module/icp/algs/modes/ecb.c
@@ -0,0 +1,143 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+/*
+ * Algorithm independent ECB functions.
+ */
+int
+ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+
+ if (length + ctx->ecb_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len,
+ length);
+ ctx->ecb_remainder_len += length;
+ ctx->ecb_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->ecb_iv;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->ecb_remainder_len > 0) {
+ need = block_size - ctx->ecb_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->ecb_remainder)
+ [ctx->ecb_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->ecb_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ if (out == NULL) {
+ cipher(ctx->ecb_keysched, blockp, blockp);
+
+ ctx->ecb_lastp = blockp;
+ lastp = blockp;
+
+ if (ctx->ecb_remainder_len > 0) {
+ bcopy(blockp, ctx->ecb_copy_to,
+ ctx->ecb_remainder_len);
+ bcopy(blockp + ctx->ecb_remainder_len, datap,
+ need);
+ }
+ } else {
+ cipher(ctx->ecb_keysched, blockp, lastp);
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len, out_data_2,
+ block_size - out_data_1_len);
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->ecb_remainder_len != 0) {
+ datap += need;
+ ctx->ecb_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->ecb_remainder, remainder);
+ ctx->ecb_remainder_len = remainder;
+ ctx->ecb_copy_to = datap;
+ goto out;
+ }
+ ctx->ecb_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+void *
+ecb_alloc_ctx(int kmflag)
+{
+ ecb_ctx_t *ecb_ctx;
+
+ if ((ecb_ctx = kmem_zalloc(sizeof (ecb_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ ecb_ctx->ecb_flags = ECB_MODE;
+ return (ecb_ctx);
+}
diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c
new file mode 100644
index 000000000..9cd8ab1e9
--- /dev/null
+++ b/module/icp/algs/modes/gcm.c
@@ -0,0 +1,748 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/byteorder.h>
+
+#ifdef __amd64
+
+#ifdef _KERNEL
+/* Workaround for no XMM kernel thread save/restore */
+#define KPREEMPT_DISABLE kpreempt_disable()
+#define KPREEMPT_ENABLE kpreempt_enable()
+
+#else
+#define KPREEMPT_DISABLE
+#define KPREEMPT_ENABLE
+#endif /* _KERNEL */
+
+extern void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
+static int intel_pclmulqdq_instruction_present(void);
+#endif /* __amd64 */
+
+struct aes_block {
+ uint64_t a;
+ uint64_t b;
+};
+
+
+/*
+ * gcm_mul()
+ * Perform a carry-less multiplication (that is, use XOR instead of the
+ * multiply operator) on *x_in and *y and place the result in *res.
+ *
+ * Byte swap the input (*x_in and *y) and the output (*res).
+ *
+ * Note: x_in, y, and res all point to 16-byte numbers (an array of two
+ * 64-bit integers).
+ */
+void
+gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res)
+{
+#ifdef __amd64
+ if (intel_pclmulqdq_instruction_present()) {
+ KPREEMPT_DISABLE;
+ gcm_mul_pclmulqdq(x_in, y, res);
+ KPREEMPT_ENABLE;
+ } else
+#endif /* __amd64 */
+ {
+ static const uint64_t R = 0xe100000000000000ULL;
+ struct aes_block z = {0, 0};
+ struct aes_block v;
+ uint64_t x;
+ int i, j;
+
+ v.a = ntohll(y[0]);
+ v.b = ntohll(y[1]);
+
+ for (j = 0; j < 2; j++) {
+ x = ntohll(x_in[j]);
+ for (i = 0; i < 64; i++, x <<= 1) {
+ if (x & 0x8000000000000000ULL) {
+ z.a ^= v.a;
+ z.b ^= v.b;
+ }
+ if (v.b & 1ULL) {
+ v.b = (v.a << 63)|(v.b >> 1);
+ v.a = (v.a >> 1) ^ R;
+ } else {
+ v.b = (v.a << 63)|(v.b >> 1);
+ v.a = v.a >> 1;
+ }
+ }
+ }
+ res[0] = htonll(z.a);
+ res[1] = htonll(z.b);
+ }
+}
+
+
+#define GHASH(c, d, t) \
+ xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
+ gcm_mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
+ (uint64_t *)(void *)(t));
+
+
+/*
+ * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode
+ * is done in another function.
+ */
+int
+gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ uint64_t counter;
+ uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
+
+ if (length + ctx->gcm_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
+ length);
+ ctx->gcm_remainder_len += length;
+ ctx->gcm_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->gcm_cb;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->gcm_remainder_len > 0) {
+ need = block_size - ctx->gcm_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
+ [ctx->gcm_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->gcm_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /*
+ * Increment counter. Counter bits are confined
+ * to the bottom 32 bits of the counter block.
+ */
+ counter = ntohll(ctx->gcm_cb[1] & counter_mask);
+ counter = htonll(counter + 1);
+ counter &= counter_mask;
+ ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
+
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
+ (uint8_t *)ctx->gcm_tmp);
+ xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
+
+ lastp = (uint8_t *)ctx->gcm_tmp;
+
+ ctx->gcm_processed_data_len += block_size;
+
+ if (out == NULL) {
+ if (ctx->gcm_remainder_len > 0) {
+ bcopy(blockp, ctx->gcm_copy_to,
+ ctx->gcm_remainder_len);
+ bcopy(blockp + ctx->gcm_remainder_len, datap,
+ need);
+ }
+ } else {
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ if (out_data_1_len == block_size) {
+ copy_block(lastp, out_data_1);
+ } else {
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len,
+ out_data_2,
+ block_size - out_data_1_len);
+ }
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* add ciphertext to the hash */
+ GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->gcm_remainder_len != 0) {
+ datap += need;
+ ctx->gcm_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->gcm_remainder, remainder);
+ ctx->gcm_remainder_len = remainder;
+ ctx->gcm_copy_to = datap;
+ goto out;
+ }
+ ctx->gcm_copy_to = NULL;
+
+ } while (remainder > 0);
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+int
+gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
+ uint8_t *ghash, *macp = NULL;
+ int i, rv;
+
+ if (out->cd_length <
+ (ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ ghash = (uint8_t *)ctx->gcm_ghash;
+
+ if (ctx->gcm_remainder_len > 0) {
+ uint64_t counter;
+ uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
+
+ /*
+ * Here is where we deal with data that is not a
+ * multiple of the block size.
+ */
+
+ /*
+ * Increment counter.
+ */
+ counter = ntohll(ctx->gcm_cb[1] & counter_mask);
+ counter = htonll(counter + 1);
+ counter &= counter_mask;
+ ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
+
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
+ (uint8_t *)ctx->gcm_tmp);
+
+ macp = (uint8_t *)ctx->gcm_remainder;
+ bzero(macp + ctx->gcm_remainder_len,
+ block_size - ctx->gcm_remainder_len);
+
+ /* XOR with counter block */
+ for (i = 0; i < ctx->gcm_remainder_len; i++) {
+ macp[i] ^= tmpp[i];
+ }
+
+ /* add ciphertext to the hash */
+ GHASH(ctx, macp, ghash);
+
+ ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
+ }
+
+ ctx->gcm_len_a_len_c[1] =
+ htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len));
+ GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
+ (uint8_t *)ctx->gcm_J0);
+ xor_block((uint8_t *)ctx->gcm_J0, ghash);
+
+ if (ctx->gcm_remainder_len > 0) {
+ rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+ out->cd_offset += ctx->gcm_remainder_len;
+ ctx->gcm_remainder_len = 0;
+ rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ out->cd_offset += ctx->gcm_tag_len;
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * This will only deal with decrypting the last block of the input that
+ * might not be a multiple of block length.
+ */
+static void
+gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *datap, *outp, *counterp;
+ uint64_t counter;
+ uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
+ int i;
+
+ /*
+ * Increment counter.
+ * Counter bits are confined to the bottom 32 bits
+ */
+ counter = ntohll(ctx->gcm_cb[1] & counter_mask);
+ counter = htonll(counter + 1);
+ counter &= counter_mask;
+ ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
+
+ datap = (uint8_t *)ctx->gcm_remainder;
+ outp = &((ctx->gcm_pt_buf)[index]);
+ counterp = (uint8_t *)ctx->gcm_tmp;
+
+ /* authentication tag */
+ bzero((uint8_t *)ctx->gcm_tmp, block_size);
+ bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
+
+ /* add ciphertext to the hash */
+ GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
+
+ /* decrypt remaining ciphertext */
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
+
+ /* XOR with counter block */
+ for (i = 0; i < ctx->gcm_remainder_len; i++) {
+ outp[i] = datap[i] ^ counterp[i];
+ }
+}
+
+/* ARGSUSED */
+int
+gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t new_len;
+ uint8_t *new;
+
+ /*
+ * Copy contiguous ciphertext input blocks to plaintext buffer.
+ * Ciphertext will be decrypted in the final.
+ */
+ if (length > 0) {
+ new_len = ctx->gcm_pt_buf_len + length;
+ new = vmem_alloc(new_len, ctx->gcm_kmflag);
+ bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
+ vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
+ if (new == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ ctx->gcm_pt_buf = new;
+ ctx->gcm_pt_buf_len = new_len;
+ bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
+ length);
+ ctx->gcm_processed_data_len += length;
+ }
+
+ ctx->gcm_remainder_len = 0;
+ return (CRYPTO_SUCCESS);
+}
+
+int
+gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t pt_len;
+ size_t remainder;
+ uint8_t *ghash;
+ uint8_t *blockp;
+ uint8_t *cbp;
+ uint64_t counter;
+ uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
+ int processed = 0, rv;
+
+ ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len);
+
+ pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
+ ghash = (uint8_t *)ctx->gcm_ghash;
+ blockp = ctx->gcm_pt_buf;
+ remainder = pt_len;
+ while (remainder > 0) {
+ /* Incomplete last block */
+ if (remainder < block_size) {
+ bcopy(blockp, ctx->gcm_remainder, remainder);
+ ctx->gcm_remainder_len = remainder;
+ /*
+ * not expecting anymore ciphertext, just
+ * compute plaintext for the remaining input
+ */
+ gcm_decrypt_incomplete_block(ctx, block_size,
+ processed, encrypt_block, xor_block);
+ ctx->gcm_remainder_len = 0;
+ goto out;
+ }
+ /* add ciphertext to the hash */
+ GHASH(ctx, blockp, ghash);
+
+ /*
+ * Increment counter.
+ * Counter bits are confined to the bottom 32 bits
+ */
+ counter = ntohll(ctx->gcm_cb[1] & counter_mask);
+ counter = htonll(counter + 1);
+ counter &= counter_mask;
+ ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
+
+ cbp = (uint8_t *)ctx->gcm_tmp;
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
+
+ /* XOR with ciphertext */
+ xor_block(cbp, blockp);
+
+ processed += block_size;
+ blockp += block_size;
+ remainder -= block_size;
+ }
+out:
+ ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len));
+ GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
+ (uint8_t *)ctx->gcm_J0);
+ xor_block((uint8_t *)ctx->gcm_J0, ghash);
+
+ /* compare the input authentication tag with what we calculated */
+ if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
+ /* They don't match */
+ return (CRYPTO_INVALID_MAC);
+ } else {
+ rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ out->cd_offset += pt_len;
+ }
+ return (CRYPTO_SUCCESS);
+}
+
+static int
+gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
+{
+ size_t tag_len;
+
+ /*
+ * Check the length of the authentication tag (in bits).
+ */
+ tag_len = gcm_param->ulTagBits;
+ switch (tag_len) {
+ case 32:
+ case 64:
+ case 96:
+ case 104:
+ case 112:
+ case 120:
+ case 128:
+ break;
+ default:
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ if (gcm_param->ulIvLen == 0)
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+
+ return (CRYPTO_SUCCESS);
+}
+
+static void
+gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
+ gcm_ctx_t *ctx, size_t block_size,
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *cb;
+ ulong_t remainder = iv_len;
+ ulong_t processed = 0;
+ uint8_t *datap, *ghash;
+ uint64_t len_a_len_c[2];
+
+ ghash = (uint8_t *)ctx->gcm_ghash;
+ cb = (uint8_t *)ctx->gcm_cb;
+ if (iv_len == 12) {
+ bcopy(iv, cb, 12);
+ cb[12] = 0;
+ cb[13] = 0;
+ cb[14] = 0;
+ cb[15] = 1;
+ /* J0 will be used again in the final */
+ copy_block(cb, (uint8_t *)ctx->gcm_J0);
+ } else {
+ /* GHASH the IV */
+ do {
+ if (remainder < block_size) {
+ bzero(cb, block_size);
+ bcopy(&(iv[processed]), cb, remainder);
+ datap = (uint8_t *)cb;
+ remainder = 0;
+ } else {
+ datap = (uint8_t *)(&(iv[processed]));
+ processed += block_size;
+ remainder -= block_size;
+ }
+ GHASH(ctx, datap, ghash);
+ } while (remainder > 0);
+
+ len_a_len_c[0] = 0;
+ len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len));
+ GHASH(ctx, len_a_len_c, ctx->gcm_J0);
+
+ /* J0 will be used again in the final */
+ copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
+ }
+}
+
+/*
+ * The following function is called at encrypt or decrypt init time
+ * for AES GCM mode.
+ */
+int
+gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
+ unsigned char *auth_data, size_t auth_data_len, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *ghash, *datap, *authp;
+ size_t remainder, processed;
+
+ /* encrypt zero block to get subkey H */
+ bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
+ (uint8_t *)ctx->gcm_H);
+
+ gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
+ copy_block, xor_block);
+
+ authp = (uint8_t *)ctx->gcm_tmp;
+ ghash = (uint8_t *)ctx->gcm_ghash;
+ bzero(authp, block_size);
+ bzero(ghash, block_size);
+
+ processed = 0;
+ remainder = auth_data_len;
+ do {
+ if (remainder < block_size) {
+ /*
+ * There's not a block full of data, pad rest of
+ * buffer with zero
+ */
+ bzero(authp, block_size);
+ bcopy(&(auth_data[processed]), authp, remainder);
+ datap = (uint8_t *)authp;
+ remainder = 0;
+ } else {
+ datap = (uint8_t *)(&(auth_data[processed]));
+ processed += block_size;
+ remainder -= block_size;
+ }
+
+ /* add auth data to the hash */
+ GHASH(ctx, datap, ghash);
+
+ } while (remainder > 0);
+
+ return (CRYPTO_SUCCESS);
+}
+
+int
+gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ int rv;
+ CK_AES_GCM_PARAMS *gcm_param;
+
+ if (param != NULL) {
+ gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
+
+ if ((rv = gcm_validate_args(gcm_param)) != 0) {
+ return (rv);
+ }
+
+ gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
+ gcm_ctx->gcm_tag_len >>= 3;
+ gcm_ctx->gcm_processed_data_len = 0;
+
+ /* these values are in bits */
+ gcm_ctx->gcm_len_a_len_c[0]
+ = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen));
+
+ rv = CRYPTO_SUCCESS;
+ gcm_ctx->gcm_flags |= GCM_MODE;
+ } else {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+
+ if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
+ gcm_param->pAAD, gcm_param->ulAADLen, block_size,
+ encrypt_block, copy_block, xor_block) != 0) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+out:
+ return (rv);
+}
+
+int
+gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ int rv;
+ CK_AES_GMAC_PARAMS *gmac_param;
+
+ if (param != NULL) {
+ gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param;
+
+ gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
+ gcm_ctx->gcm_processed_data_len = 0;
+
+ /* these values are in bits */
+ gcm_ctx->gcm_len_a_len_c[0]
+ = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen));
+
+ rv = CRYPTO_SUCCESS;
+ gcm_ctx->gcm_flags |= GMAC_MODE;
+ } else {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+
+ if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
+ gmac_param->pAAD, gmac_param->ulAADLen, block_size,
+ encrypt_block, copy_block, xor_block) != 0) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+out:
+ return (rv);
+}
+
+void *
+gcm_alloc_ctx(int kmflag)
+{
+ gcm_ctx_t *gcm_ctx;
+
+ if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ gcm_ctx->gcm_flags = GCM_MODE;
+ return (gcm_ctx);
+}
+
+void *
+gmac_alloc_ctx(int kmflag)
+{
+ gcm_ctx_t *gcm_ctx;
+
+ if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ gcm_ctx->gcm_flags = GMAC_MODE;
+ return (gcm_ctx);
+}
+
+void
+gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
+{
+ ctx->gcm_kmflag = kmflag;
+}
+
+
+#ifdef __amd64
+
+#define INTEL_PCLMULQDQ_FLAG (1 << 1)
+
+/*
+ * Return 1 if executing on Intel with PCLMULQDQ instructions,
+ * otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
+ * Cache the result, as the CPU can't change.
+ *
+ * Note: the userland version uses getisax(). The kernel version uses
+ * is_x86_featureset().
+ */
+static int
+intel_pclmulqdq_instruction_present(void)
+{
+ static int cached_result = -1;
+ unsigned eax, ebx, ecx, edx;
+ unsigned func, subfunc;
+
+ if (cached_result == -1) { /* first time */
+ /* check for an intel cpu */
+ func = 0;
+ subfunc = 0;
+
+ __asm__ __volatile__(
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
+
+ if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
+ memcmp((char *) (&edx), "ineI", 4) == 0 &&
+ memcmp((char *) (&ecx), "ntel", 4) == 0) {
+
+ func = 1;
+ subfunc = 0;
+
+ /* check for aes-ni instruction set */
+ __asm__ __volatile__(
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
+
+ cached_result = !!(ecx & INTEL_PCLMULQDQ_FLAG);
+ } else {
+ cached_result = 0;
+ }
+ }
+
+ return (cached_result);
+}
+
+#endif /* __amd64 */
diff --git a/module/icp/algs/modes/modes.c b/module/icp/algs/modes/modes.c
new file mode 100644
index 000000000..1d33c4268
--- /dev/null
+++ b/module/icp/algs/modes/modes.c
@@ -0,0 +1,159 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+/*
+ * Initialize by setting iov_or_mp to point to the current iovec or mp,
+ * and by setting current_offset to an offset within the current iovec or mp.
+ */
+void
+crypto_init_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset)
+{
+ offset_t offset;
+
+ switch (out->cd_format) {
+ case CRYPTO_DATA_RAW:
+ *current_offset = out->cd_offset;
+ break;
+
+ case CRYPTO_DATA_UIO: {
+ uio_t *uiop = out->cd_uio;
+ uintptr_t vec_idx;
+
+ offset = out->cd_offset;
+ for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
+ offset >= uiop->uio_iov[vec_idx].iov_len;
+ offset -= uiop->uio_iov[vec_idx++].iov_len)
+ ;
+
+ *current_offset = offset;
+ *iov_or_mp = (void *)vec_idx;
+ break;
+ }
+ } /* end switch */
+}
+
+/*
+ * Get pointers for where in the output to copy a block of encrypted or
+ * decrypted data. The iov_or_mp argument stores a pointer to the current
+ * iovec or mp, and offset stores an offset into the current iovec or mp.
+ */
+void
+crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset,
+ uint8_t **out_data_1, size_t *out_data_1_len, uint8_t **out_data_2,
+ size_t amt)
+{
+ offset_t offset;
+
+ switch (out->cd_format) {
+ case CRYPTO_DATA_RAW: {
+ iovec_t *iov;
+
+ offset = *current_offset;
+ iov = &out->cd_raw;
+ if ((offset + amt) <= iov->iov_len) {
+ /* one block fits */
+ *out_data_1 = (uint8_t *)iov->iov_base + offset;
+ *out_data_1_len = amt;
+ *out_data_2 = NULL;
+ *current_offset = offset + amt;
+ }
+ break;
+ }
+
+ case CRYPTO_DATA_UIO: {
+ uio_t *uio = out->cd_uio;
+ iovec_t *iov;
+ offset_t offset;
+ uintptr_t vec_idx;
+ uint8_t *p;
+
+ offset = *current_offset;
+ vec_idx = (uintptr_t)(*iov_or_mp);
+ iov = (iovec_t *)&uio->uio_iov[vec_idx];
+ p = (uint8_t *)iov->iov_base + offset;
+ *out_data_1 = p;
+
+ if (offset + amt <= iov->iov_len) {
+ /* can fit one block into this iov */
+ *out_data_1_len = amt;
+ *out_data_2 = NULL;
+ *current_offset = offset + amt;
+ } else {
+ /* one block spans two iovecs */
+ *out_data_1_len = iov->iov_len - offset;
+ if (vec_idx == uio->uio_iovcnt)
+ return;
+ vec_idx++;
+ iov = (iovec_t *)&uio->uio_iov[vec_idx];
+ *out_data_2 = (uint8_t *)iov->iov_base;
+ *current_offset = amt - *out_data_1_len;
+ }
+ *iov_or_mp = (void *)vec_idx;
+ break;
+ }
+ } /* end switch */
+}
+
+void
+crypto_free_mode_ctx(void *ctx)
+{
+ common_ctx_t *common_ctx = (common_ctx_t *)ctx;
+
+ switch (common_ctx->cc_flags &
+ (ECB_MODE|CBC_MODE|CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) {
+ case ECB_MODE:
+ kmem_free(common_ctx, sizeof (ecb_ctx_t));
+ break;
+
+ case CBC_MODE:
+ kmem_free(common_ctx, sizeof (cbc_ctx_t));
+ break;
+
+ case CTR_MODE:
+ kmem_free(common_ctx, sizeof (ctr_ctx_t));
+ break;
+
+ case CCM_MODE:
+ if (((ccm_ctx_t *)ctx)->ccm_pt_buf != NULL)
+ vmem_free(((ccm_ctx_t *)ctx)->ccm_pt_buf,
+ ((ccm_ctx_t *)ctx)->ccm_data_len);
+
+ kmem_free(ctx, sizeof (ccm_ctx_t));
+ break;
+
+ case GCM_MODE:
+ case GMAC_MODE:
+ if (((gcm_ctx_t *)ctx)->gcm_pt_buf != NULL)
+ vmem_free(((gcm_ctx_t *)ctx)->gcm_pt_buf,
+ ((gcm_ctx_t *)ctx)->gcm_pt_buf_len);
+
+ kmem_free(ctx, sizeof (gcm_ctx_t));
+ }
+}
diff --git a/module/icp/algs/sha1/sha1.c b/module/icp/algs/sha1/sha1.c
new file mode 100644
index 000000000..b826c54ad
--- /dev/null
+++ b/module/icp/algs/sha1/sha1.c
@@ -0,0 +1,663 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * The basic framework for this code came from the reference
+ * implementation for MD5. That implementation is Copyright (C)
+ * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
+ *
+ * License to copy and use this software is granted provided that it
+ * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
+ * Algorithm" in all material mentioning or referencing this software
+ * or this function.
+ *
+ * License is also granted to make and use derivative works provided
+ * that such works are identified as "derived from the RSA Data
+ * Security, Inc. MD5 Message-Digest Algorithm" in all material
+ * mentioning or referencing the derived work.
+ *
+ * RSA Data Security, Inc. makes no representations concerning either
+ * the merchantability of this software or the suitability of this
+ * software for any particular purpose. It is provided "as is"
+ * without express or implied warranty of any kind.
+ *
+ * These notices must be retained in any copies of any part of this
+ * documentation and/or software.
+ *
+ * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1
+ * standard, available at http://www.itl.nist.gov/fipspubs/fip180-1.htm
+ * Not as fast as one would like -- further optimizations are encouraged
+ * and appreciated.
+ */
+
+#include <sys/zfs_context.h>
+#include <sha1/sha1.h>
+#include <sha1/sha1_consts.h>
+
+#ifdef _LITTLE_ENDIAN
+#include <sys/byteorder.h>
+#define HAVE_HTONL
+#endif
+
+#define _RESTRICT_KYWD
+
+static void Encode(uint8_t *, const uint32_t *, size_t);
+
+#if defined(__amd64)
+
+#define SHA1_TRANSFORM(ctx, in) sha1_block_data_order((ctx), (in), 1)
+#define SHA1_TRANSFORM_BLOCKS(ctx, in, num) sha1_block_data_order((ctx), \
+ (in), (num))
+
+void sha1_block_data_order(SHA1_CTX *ctx, const void *inpp, size_t num_blocks);
+
+#else
+
+#define SHA1_TRANSFORM(ctx, in) SHA1Transform((ctx), (in))
+
+static void SHA1Transform(SHA1_CTX *, const uint8_t *);
+
+#endif
+
+
+static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
+
+/*
+ * F, G, and H are the basic SHA1 functions.
+ */
+#define F(b, c, d) (((b) & (c)) | ((~b) & (d)))
+#define G(b, c, d) ((b) ^ (c) ^ (d))
+#define H(b, c, d) (((b) & (c)) | (((b)|(c)) & (d)))
+
+/*
+ * ROTATE_LEFT rotates x left n bits.
+ */
+
+#if defined(__GNUC__) && defined(_LP64)
+static __inline__ uint64_t
+ROTATE_LEFT(uint64_t value, uint32_t n)
+{
+ uint32_t t32;
+
+ t32 = (uint32_t)value;
+ return ((t32 << n) | (t32 >> (32 - n)));
+}
+
+#else
+
+#define ROTATE_LEFT(x, n) \
+ (((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n))))
+
+#endif
+
+
+/*
+ * SHA1Init()
+ *
+ * purpose: initializes the sha1 context and begins and sha1 digest operation
+ * input: SHA1_CTX * : the context to initializes.
+ * output: void
+ */
+
+void
+SHA1Init(SHA1_CTX *ctx)
+{
+ ctx->count[0] = ctx->count[1] = 0;
+
+ /*
+ * load magic initialization constants. Tell lint
+ * that these constants are unsigned by using U.
+ */
+
+ ctx->state[0] = 0x67452301U;
+ ctx->state[1] = 0xefcdab89U;
+ ctx->state[2] = 0x98badcfeU;
+ ctx->state[3] = 0x10325476U;
+ ctx->state[4] = 0xc3d2e1f0U;
+}
+
+void
+SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len)
+{
+ uint32_t i, buf_index, buf_len;
+ const uint8_t *input = inptr;
+#if defined(__amd64)
+ uint32_t block_count;
+#endif /* __amd64 */
+
+ /* check for noop */
+ if (input_len == 0)
+ return;
+
+ /* compute number of bytes mod 64 */
+ buf_index = (ctx->count[1] >> 3) & 0x3F;
+
+ /* update number of bits */
+ if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
+ ctx->count[0]++;
+
+ ctx->count[0] += (input_len >> 29);
+
+ buf_len = 64 - buf_index;
+
+ /* transform as many times as possible */
+ i = 0;
+ if (input_len >= buf_len) {
+
+ /*
+ * general optimization:
+ *
+ * only do initial bcopy() and SHA1Transform() if
+ * buf_index != 0. if buf_index == 0, we're just
+ * wasting our time doing the bcopy() since there
+ * wasn't any data left over from a previous call to
+ * SHA1Update().
+ */
+
+ if (buf_index) {
+ bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
+ SHA1_TRANSFORM(ctx, ctx->buf_un.buf8);
+ i = buf_len;
+ }
+
+#if !defined(__amd64)
+ for (; i + 63 < input_len; i += 64)
+ SHA1_TRANSFORM(ctx, &input[i]);
+#else
+ block_count = (input_len - i) >> 6;
+ if (block_count > 0) {
+ SHA1_TRANSFORM_BLOCKS(ctx, &input[i], block_count);
+ i += block_count << 6;
+ }
+#endif /* !__amd64 */
+
+ /*
+ * general optimization:
+ *
+ * if i and input_len are the same, return now instead
+ * of calling bcopy(), since the bcopy() in this case
+ * will be an expensive nop.
+ */
+
+ if (input_len == i)
+ return;
+
+ buf_index = 0;
+ }
+
+ /* buffer remaining input */
+ bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
+}
+
+/*
+ * SHA1Final()
+ *
+ * purpose: ends an sha1 digest operation, finalizing the message digest and
+ * zeroing the context.
+ * input: uchar_t * : A buffer to store the digest.
+ * : The function actually uses void* because many
+ * : callers pass things other than uchar_t here.
+ * SHA1_CTX * : the context to finalize, save, and zero
+ * output: void
+ */
+
+void
+SHA1Final(void *digest, SHA1_CTX *ctx)
+{
+ uint8_t bitcount_be[sizeof (ctx->count)];
+ uint32_t index = (ctx->count[1] >> 3) & 0x3f;
+
+ /* store bit count, big endian */
+ Encode(bitcount_be, ctx->count, sizeof (bitcount_be));
+
+ /* pad out to 56 mod 64 */
+ SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
+
+ /* append length (before padding) */
+ SHA1Update(ctx, bitcount_be, sizeof (bitcount_be));
+
+ /* store state in digest */
+ Encode(digest, ctx->state, sizeof (ctx->state));
+
+ /* zeroize sensitive information */
+ bzero(ctx, sizeof (*ctx));
+}
+
+
+#if !defined(__amd64)
+
+typedef uint32_t sha1word;
+
+/*
+ * sparc optimization:
+ *
+ * on the sparc, we can load big endian 32-bit data easily. note that
+ * special care must be taken to ensure the address is 32-bit aligned.
+ * in the interest of speed, we don't check to make sure, since
+ * careful programming can guarantee this for us.
+ */
+
+#if defined(_BIG_ENDIAN)
+#define LOAD_BIG_32(addr) (*(uint32_t *)(addr))
+
+#elif defined(HAVE_HTONL)
+#define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr)))
+
+#else
+/* little endian -- will work on big endian, but slowly */
+#define LOAD_BIG_32(addr) \
+ (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
+#endif /* _BIG_ENDIAN */
+
+/*
+ * SHA1Transform()
+ */
+#if defined(W_ARRAY)
+#define W(n) w[n]
+#else /* !defined(W_ARRAY) */
+#define W(n) w_ ## n
+#endif /* !defined(W_ARRAY) */
+
+void /* CSTYLED */
+SHA1Transform(SHA1_CTX *ctx, const uint8_t blk[64])
+{
+ /* CSTYLED */
+ sha1word a = ctx->state[0];
+ sha1word b = ctx->state[1];
+ sha1word c = ctx->state[2];
+ sha1word d = ctx->state[3];
+ sha1word e = ctx->state[4];
+
+#if defined(W_ARRAY)
+ sha1word w[16];
+#else /* !defined(W_ARRAY) */
+ sha1word w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7;
+ sha1word w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
+#endif /* !defined(W_ARRAY) */
+
+ W(0) = LOAD_BIG_32((void *)(blk + 0));
+ W(1) = LOAD_BIG_32((void *)(blk + 4));
+ W(2) = LOAD_BIG_32((void *)(blk + 8));
+ W(3) = LOAD_BIG_32((void *)(blk + 12));
+ W(4) = LOAD_BIG_32((void *)(blk + 16));
+ W(5) = LOAD_BIG_32((void *)(blk + 20));
+ W(6) = LOAD_BIG_32((void *)(blk + 24));
+ W(7) = LOAD_BIG_32((void *)(blk + 28));
+ W(8) = LOAD_BIG_32((void *)(blk + 32));
+ W(9) = LOAD_BIG_32((void *)(blk + 36));
+ W(10) = LOAD_BIG_32((void *)(blk + 40));
+ W(11) = LOAD_BIG_32((void *)(blk + 44));
+ W(12) = LOAD_BIG_32((void *)(blk + 48));
+ W(13) = LOAD_BIG_32((void *)(blk + 52));
+ W(14) = LOAD_BIG_32((void *)(blk + 56));
+ W(15) = LOAD_BIG_32((void *)(blk + 60));
+
+ /*
+ * general optimization:
+ *
+ * even though this approach is described in the standard as
+ * being slower algorithmically, it is 30-40% faster than the
+ * "faster" version under SPARC, because this version has more
+ * of the constraints specified at compile-time and uses fewer
+ * variables (and therefore has better register utilization)
+ * than its "speedier" brother. (i've tried both, trust me)
+ *
+ * for either method given in the spec, there is an "assignment"
+ * phase where the following takes place:
+ *
+ * tmp = (main_computation);
+ * e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp;
+ *
+ * we can make the algorithm go faster by not doing this work,
+ * but just pretending that `d' is now `e', etc. this works
+ * really well and obviates the need for a temporary variable.
+ * however, we still explicitly perform the rotate action,
+ * since it is cheaper on SPARC to do it once than to have to
+ * do it over and over again.
+ */
+
+ /* round 1 */
+ e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(0) + SHA1_CONST(0); /* 0 */
+ b = ROTATE_LEFT(b, 30);
+
+ d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(1) + SHA1_CONST(0); /* 1 */
+ a = ROTATE_LEFT(a, 30);
+
+ c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(2) + SHA1_CONST(0); /* 2 */
+ e = ROTATE_LEFT(e, 30);
+
+ b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(3) + SHA1_CONST(0); /* 3 */
+ d = ROTATE_LEFT(d, 30);
+
+ a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(4) + SHA1_CONST(0); /* 4 */
+ c = ROTATE_LEFT(c, 30);
+
+ e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(5) + SHA1_CONST(0); /* 5 */
+ b = ROTATE_LEFT(b, 30);
+
+ d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(6) + SHA1_CONST(0); /* 6 */
+ a = ROTATE_LEFT(a, 30);
+
+ c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(7) + SHA1_CONST(0); /* 7 */
+ e = ROTATE_LEFT(e, 30);
+
+ b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(8) + SHA1_CONST(0); /* 8 */
+ d = ROTATE_LEFT(d, 30);
+
+ a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(9) + SHA1_CONST(0); /* 9 */
+ c = ROTATE_LEFT(c, 30);
+
+ e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(10) + SHA1_CONST(0); /* 10 */
+ b = ROTATE_LEFT(b, 30);
+
+ d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(11) + SHA1_CONST(0); /* 11 */
+ a = ROTATE_LEFT(a, 30);
+
+ c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(12) + SHA1_CONST(0); /* 12 */
+ e = ROTATE_LEFT(e, 30);
+
+ b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(13) + SHA1_CONST(0); /* 13 */
+ d = ROTATE_LEFT(d, 30);
+
+ a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(14) + SHA1_CONST(0); /* 14 */
+ c = ROTATE_LEFT(c, 30);
+
+ e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(15) + SHA1_CONST(0); /* 15 */
+ b = ROTATE_LEFT(b, 30);
+
+ W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 16 */
+ d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(0) + SHA1_CONST(0);
+ a = ROTATE_LEFT(a, 30);
+
+ W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 17 */
+ c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(1) + SHA1_CONST(0);
+ e = ROTATE_LEFT(e, 30);
+
+ W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 18 */
+ b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(2) + SHA1_CONST(0);
+ d = ROTATE_LEFT(d, 30);
+
+ W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 19 */
+ a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(3) + SHA1_CONST(0);
+ c = ROTATE_LEFT(c, 30);
+
+ /* round 2 */
+ W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 20 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(4) + SHA1_CONST(1);
+ b = ROTATE_LEFT(b, 30);
+
+ W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 21 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(5) + SHA1_CONST(1);
+ a = ROTATE_LEFT(a, 30);
+
+ W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 22 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(6) + SHA1_CONST(1);
+ e = ROTATE_LEFT(e, 30);
+
+ W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 23 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(7) + SHA1_CONST(1);
+ d = ROTATE_LEFT(d, 30);
+
+ W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 24 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(8) + SHA1_CONST(1);
+ c = ROTATE_LEFT(c, 30);
+
+ W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 25 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(9) + SHA1_CONST(1);
+ b = ROTATE_LEFT(b, 30);
+
+ W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 26 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(10) + SHA1_CONST(1);
+ a = ROTATE_LEFT(a, 30);
+
+ W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 27 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(11) + SHA1_CONST(1);
+ e = ROTATE_LEFT(e, 30);
+
+ W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 28 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(12) + SHA1_CONST(1);
+ d = ROTATE_LEFT(d, 30);
+
+ W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 29 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(13) + SHA1_CONST(1);
+ c = ROTATE_LEFT(c, 30);
+
+ W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 30 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(14) + SHA1_CONST(1);
+ b = ROTATE_LEFT(b, 30);
+
+ W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 31 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(15) + SHA1_CONST(1);
+ a = ROTATE_LEFT(a, 30);
+
+ W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 32 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(0) + SHA1_CONST(1);
+ e = ROTATE_LEFT(e, 30);
+
+ W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 33 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(1) + SHA1_CONST(1);
+ d = ROTATE_LEFT(d, 30);
+
+ W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 34 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(2) + SHA1_CONST(1);
+ c = ROTATE_LEFT(c, 30);
+
+ W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 35 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(3) + SHA1_CONST(1);
+ b = ROTATE_LEFT(b, 30);
+
+ W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 36 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(4) + SHA1_CONST(1);
+ a = ROTATE_LEFT(a, 30);
+
+ W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 37 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(5) + SHA1_CONST(1);
+ e = ROTATE_LEFT(e, 30);
+
+ W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 38 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(6) + SHA1_CONST(1);
+ d = ROTATE_LEFT(d, 30);
+
+ W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 39 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(7) + SHA1_CONST(1);
+ c = ROTATE_LEFT(c, 30);
+
+ /* round 3 */
+ W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 40 */
+ e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(8) + SHA1_CONST(2);
+ b = ROTATE_LEFT(b, 30);
+
+ W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 41 */
+ d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(9) + SHA1_CONST(2);
+ a = ROTATE_LEFT(a, 30);
+
+ W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 42 */
+ c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(10) + SHA1_CONST(2);
+ e = ROTATE_LEFT(e, 30);
+
+ W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 43 */
+ b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(11) + SHA1_CONST(2);
+ d = ROTATE_LEFT(d, 30);
+
+ W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 44 */
+ a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(12) + SHA1_CONST(2);
+ c = ROTATE_LEFT(c, 30);
+
+ W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 45 */
+ e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(13) + SHA1_CONST(2);
+ b = ROTATE_LEFT(b, 30);
+
+ W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 46 */
+ d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(14) + SHA1_CONST(2);
+ a = ROTATE_LEFT(a, 30);
+
+ W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 47 */
+ c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(15) + SHA1_CONST(2);
+ e = ROTATE_LEFT(e, 30);
+
+ W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 48 */
+ b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(0) + SHA1_CONST(2);
+ d = ROTATE_LEFT(d, 30);
+
+ W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 49 */
+ a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(1) + SHA1_CONST(2);
+ c = ROTATE_LEFT(c, 30);
+
+ W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 50 */
+ e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(2) + SHA1_CONST(2);
+ b = ROTATE_LEFT(b, 30);
+
+ W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 51 */
+ d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(3) + SHA1_CONST(2);
+ a = ROTATE_LEFT(a, 30);
+
+ W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 52 */
+ c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(4) + SHA1_CONST(2);
+ e = ROTATE_LEFT(e, 30);
+
+ W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 53 */
+ b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(5) + SHA1_CONST(2);
+ d = ROTATE_LEFT(d, 30);
+
+ W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 54 */
+ a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(6) + SHA1_CONST(2);
+ c = ROTATE_LEFT(c, 30);
+
+ W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 55 */
+ e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(7) + SHA1_CONST(2);
+ b = ROTATE_LEFT(b, 30);
+
+ W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 56 */
+ d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(8) + SHA1_CONST(2);
+ a = ROTATE_LEFT(a, 30);
+
+ W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 57 */
+ c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(9) + SHA1_CONST(2);
+ e = ROTATE_LEFT(e, 30);
+
+ W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 58 */
+ b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(10) + SHA1_CONST(2);
+ d = ROTATE_LEFT(d, 30);
+
+ W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 59 */
+ a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(11) + SHA1_CONST(2);
+ c = ROTATE_LEFT(c, 30);
+
+ /* round 4 */
+ W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 60 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(12) + SHA1_CONST(3);
+ b = ROTATE_LEFT(b, 30);
+
+ W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 61 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(13) + SHA1_CONST(3);
+ a = ROTATE_LEFT(a, 30);
+
+ W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 62 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(14) + SHA1_CONST(3);
+ e = ROTATE_LEFT(e, 30);
+
+ W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 63 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(15) + SHA1_CONST(3);
+ d = ROTATE_LEFT(d, 30);
+
+ W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 64 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(0) + SHA1_CONST(3);
+ c = ROTATE_LEFT(c, 30);
+
+ W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 65 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(1) + SHA1_CONST(3);
+ b = ROTATE_LEFT(b, 30);
+
+ W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 66 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(2) + SHA1_CONST(3);
+ a = ROTATE_LEFT(a, 30);
+
+ W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 67 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(3) + SHA1_CONST(3);
+ e = ROTATE_LEFT(e, 30);
+
+ W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 68 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(4) + SHA1_CONST(3);
+ d = ROTATE_LEFT(d, 30);
+
+ W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 69 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(5) + SHA1_CONST(3);
+ c = ROTATE_LEFT(c, 30);
+
+ W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 70 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(6) + SHA1_CONST(3);
+ b = ROTATE_LEFT(b, 30);
+
+ W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 71 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(7) + SHA1_CONST(3);
+ a = ROTATE_LEFT(a, 30);
+
+ W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 72 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(8) + SHA1_CONST(3);
+ e = ROTATE_LEFT(e, 30);
+
+ W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 73 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(9) + SHA1_CONST(3);
+ d = ROTATE_LEFT(d, 30);
+
+ W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 74 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(10) + SHA1_CONST(3);
+ c = ROTATE_LEFT(c, 30);
+
+ W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 75 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(11) + SHA1_CONST(3);
+ b = ROTATE_LEFT(b, 30);
+
+ W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 76 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(12) + SHA1_CONST(3);
+ a = ROTATE_LEFT(a, 30);
+
+ W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 77 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(13) + SHA1_CONST(3);
+ e = ROTATE_LEFT(e, 30);
+
+ W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 78 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(14) + SHA1_CONST(3);
+ d = ROTATE_LEFT(d, 30);
+
+ W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 79 */
+
+ ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(15) +
+ SHA1_CONST(3);
+ ctx->state[1] += b;
+ ctx->state[2] += ROTATE_LEFT(c, 30);
+ ctx->state[3] += d;
+ ctx->state[4] += e;
+
+ /* zeroize sensitive information */
+ W(0) = W(1) = W(2) = W(3) = W(4) = W(5) = W(6) = W(7) = W(8) = 0;
+ W(9) = W(10) = W(11) = W(12) = W(13) = W(14) = W(15) = 0;
+}
+#endif /* !__amd64 */
+
+
+/*
+ * Encode()
+ *
+ * purpose: to convert a list of numbers from little endian to big endian
+ * input: uint8_t * : place to store the converted big endian numbers
+ * uint32_t * : place to get numbers to convert from
+ * size_t : the length of the input in bytes
+ * output: void
+ */
+
+static void
+Encode(uint8_t *_RESTRICT_KYWD output, const uint32_t *_RESTRICT_KYWD input,
+ size_t len)
+{
+ size_t i, j;
+
+ for (i = 0, j = 0; j < len; i++, j += 4) {
+ output[j] = (input[i] >> 24) & 0xff;
+ output[j + 1] = (input[i] >> 16) & 0xff;
+ output[j + 2] = (input[i] >> 8) & 0xff;
+ output[j + 3] = input[i] & 0xff;
+ }
+}
diff --git a/module/icp/algs/sha2/sha2.c b/module/icp/algs/sha2/sha2.c
new file mode 100644
index 000000000..792ca8825
--- /dev/null
+++ b/module/icp/algs/sha2/sha2.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * Copyright 2013 Saso Kiselkov. All rights reserved.
+ */
+
+/*
+ * The basic framework for this code came from the reference
+ * implementation for MD5. That implementation is Copyright (C)
+ * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
+ *
+ * License to copy and use this software is granted provided that it
+ * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
+ * Algorithm" in all material mentioning or referencing this software
+ * or this function.
+ *
+ * License is also granted to make and use derivative works provided
+ * that such works are identified as "derived from the RSA Data
+ * Security, Inc. MD5 Message-Digest Algorithm" in all material
+ * mentioning or referencing the derived work.
+ *
+ * RSA Data Security, Inc. makes no representations concerning either
+ * the merchantability of this software or the suitability of this
+ * software for any particular purpose. It is provided "as is"
+ * without express or implied warranty of any kind.
+ *
+ * These notices must be retained in any copies of any part of this
+ * documentation and/or software.
+ *
+ * NOTE: Cleaned-up and optimized, version of SHA2, based on the FIPS 180-2
+ * standard, available at
+ * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
+ * Not as fast as one would like -- further optimizations are encouraged
+ * and appreciated.
+ */
+
+#include <sys/zfs_context.h>
+#define _SHA2_IMPL
+#include <sha2/sha2.h>
+#include <sha2/sha2_consts.h>
+
+#define _RESTRICT_KYWD
+
+#ifdef _LITTLE_ENDIAN
+#include <sys/byteorder.h>
+#define HAVE_HTONL
+#endif
+
+static void Encode(uint8_t *, uint32_t *, size_t);
+
+#if defined(__amd64)
+#define SHA256Transform(ctx, in) SHA256TransformBlocks((ctx), (in), 1)
+void SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num);
+#else
+static void SHA256Transform(SHA2_CTX *, const uint8_t *);
+#endif /* __amd64 */
+
+static uint8_t PADDING[128] = { 0x80, /* all zeros */ };
+
+/* Ch and Maj are the basic SHA2 functions. */
+#define Ch(b, c, d) (((b) & (c)) ^ ((~b) & (d)))
+#define Maj(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d)))
+
+/* Rotates x right n bits. */
+#define ROTR(x, n) \
+ (((x) >> (n)) | ((x) << ((sizeof (x) * NBBY)-(n))))
+
+/* Shift x right n bits */
+#define SHR(x, n) ((x) >> (n))
+
+/* SHA256 Functions */
+#define BIGSIGMA0_256(x) (ROTR((x), 2) ^ ROTR((x), 13) ^ ROTR((x), 22))
+#define BIGSIGMA1_256(x) (ROTR((x), 6) ^ ROTR((x), 11) ^ ROTR((x), 25))
+#define SIGMA0_256(x) (ROTR((x), 7) ^ ROTR((x), 18) ^ SHR((x), 3))
+#define SIGMA1_256(x) (ROTR((x), 17) ^ ROTR((x), 19) ^ SHR((x), 10))
+
+#define SHA256ROUND(a, b, c, d, e, f, g, h, i, w) \
+ T1 = h + BIGSIGMA1_256(e) + Ch(e, f, g) + SHA256_CONST(i) + w; \
+ d += T1; \
+ T2 = BIGSIGMA0_256(a) + Maj(a, b, c); \
+ h = T1 + T2
+
+/*
+ * sparc optimization:
+ *
+ * on the sparc, we can load big endian 32-bit data easily. note that
+ * special care must be taken to ensure the address is 32-bit aligned.
+ * in the interest of speed, we don't check to make sure, since
+ * careful programming can guarantee this for us.
+ */
+
+#if defined(_BIG_ENDIAN)
+#define LOAD_BIG_32(addr) (*(uint32_t *)(addr))
+#define LOAD_BIG_64(addr) (*(uint64_t *)(addr))
+
+#elif defined(HAVE_HTONL)
+#define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr)))
+#define LOAD_BIG_64(addr) htonll(*((uint64_t *)(addr)))
+
+#else
+/* little endian -- will work on big endian, but slowly */
+#define LOAD_BIG_32(addr) \
+ (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
+#define LOAD_BIG_64(addr) \
+ (((uint64_t)(addr)[0] << 56) | ((uint64_t)(addr)[1] << 48) | \
+ ((uint64_t)(addr)[2] << 40) | ((uint64_t)(addr)[3] << 32) | \
+ ((uint64_t)(addr)[4] << 24) | ((uint64_t)(addr)[5] << 16) | \
+ ((uint64_t)(addr)[6] << 8) | (uint64_t)(addr)[7])
+#endif /* _BIG_ENDIAN */
+
+
+#if !defined(__amd64)
+/* SHA256 Transform */
+
+static void
+SHA256Transform(SHA2_CTX *ctx, const uint8_t *blk)
+{
+ uint32_t a = ctx->state.s32[0];
+ uint32_t b = ctx->state.s32[1];
+ uint32_t c = ctx->state.s32[2];
+ uint32_t d = ctx->state.s32[3];
+ uint32_t e = ctx->state.s32[4];
+ uint32_t f = ctx->state.s32[5];
+ uint32_t g = ctx->state.s32[6];
+ uint32_t h = ctx->state.s32[7];
+
+ uint32_t w0, w1, w2, w3, w4, w5, w6, w7;
+ uint32_t w8, w9, w10, w11, w12, w13, w14, w15;
+ uint32_t T1, T2;
+
+ if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */
+ bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
+ blk = (uint8_t *)ctx->buf_un.buf32;
+ }
+
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w0 = LOAD_BIG_32(blk + 4 * 0);
+ SHA256ROUND(a, b, c, d, e, f, g, h, 0, w0);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w1 = LOAD_BIG_32(blk + 4 * 1);
+ SHA256ROUND(h, a, b, c, d, e, f, g, 1, w1);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w2 = LOAD_BIG_32(blk + 4 * 2);
+ SHA256ROUND(g, h, a, b, c, d, e, f, 2, w2);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w3 = LOAD_BIG_32(blk + 4 * 3);
+ SHA256ROUND(f, g, h, a, b, c, d, e, 3, w3);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w4 = LOAD_BIG_32(blk + 4 * 4);
+ SHA256ROUND(e, f, g, h, a, b, c, d, 4, w4);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w5 = LOAD_BIG_32(blk + 4 * 5);
+ SHA256ROUND(d, e, f, g, h, a, b, c, 5, w5);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w6 = LOAD_BIG_32(blk + 4 * 6);
+ SHA256ROUND(c, d, e, f, g, h, a, b, 6, w6);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w7 = LOAD_BIG_32(blk + 4 * 7);
+ SHA256ROUND(b, c, d, e, f, g, h, a, 7, w7);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w8 = LOAD_BIG_32(blk + 4 * 8);
+ SHA256ROUND(a, b, c, d, e, f, g, h, 8, w8);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w9 = LOAD_BIG_32(blk + 4 * 9);
+ SHA256ROUND(h, a, b, c, d, e, f, g, 9, w9);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w10 = LOAD_BIG_32(blk + 4 * 10);
+ SHA256ROUND(g, h, a, b, c, d, e, f, 10, w10);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w11 = LOAD_BIG_32(blk + 4 * 11);
+ SHA256ROUND(f, g, h, a, b, c, d, e, 11, w11);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w12 = LOAD_BIG_32(blk + 4 * 12);
+ SHA256ROUND(e, f, g, h, a, b, c, d, 12, w12);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w13 = LOAD_BIG_32(blk + 4 * 13);
+ SHA256ROUND(d, e, f, g, h, a, b, c, 13, w13);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w14 = LOAD_BIG_32(blk + 4 * 14);
+ SHA256ROUND(c, d, e, f, g, h, a, b, 14, w14);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w15 = LOAD_BIG_32(blk + 4 * 15);
+ SHA256ROUND(b, c, d, e, f, g, h, a, 15, w15);
+
+ w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 16, w0);
+ w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 17, w1);
+ w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 18, w2);
+ w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 19, w3);
+ w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 20, w4);
+ w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 21, w5);
+ w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 22, w6);
+ w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 23, w7);
+ w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 24, w8);
+ w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 25, w9);
+ w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 26, w10);
+ w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 27, w11);
+ w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 28, w12);
+ w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 29, w13);
+ w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 30, w14);
+ w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 31, w15);
+
+ w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 32, w0);
+ w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 33, w1);
+ w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 34, w2);
+ w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 35, w3);
+ w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 36, w4);
+ w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 37, w5);
+ w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 38, w6);
+ w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 39, w7);
+ w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 40, w8);
+ w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 41, w9);
+ w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 42, w10);
+ w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 43, w11);
+ w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 44, w12);
+ w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 45, w13);
+ w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 46, w14);
+ w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 47, w15);
+
+ w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 48, w0);
+ w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 49, w1);
+ w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 50, w2);
+ w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 51, w3);
+ w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 52, w4);
+ w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 53, w5);
+ w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 54, w6);
+ w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 55, w7);
+ w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 56, w8);
+ w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 57, w9);
+ w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 58, w10);
+ w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 59, w11);
+ w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 60, w12);
+ w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 61, w13);
+ w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 62, w14);
+ w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 63, w15);
+
+ ctx->state.s32[0] += a;
+ ctx->state.s32[1] += b;
+ ctx->state.s32[2] += c;
+ ctx->state.s32[3] += d;
+ ctx->state.s32[4] += e;
+ ctx->state.s32[5] += f;
+ ctx->state.s32[6] += g;
+ ctx->state.s32[7] += h;
+}
+#endif /* !__amd64 */
+
+
+/*
+ * Encode()
+ *
+ * purpose: to convert a list of numbers from little endian to big endian
+ * input: uint8_t * : place to store the converted big endian numbers
+ * uint32_t * : place to get numbers to convert from
+ * size_t : the length of the input in bytes
+ * output: void
+ */
+
+static void
+Encode(uint8_t *_RESTRICT_KYWD output, uint32_t *_RESTRICT_KYWD input,
+ size_t len)
+{
+ size_t i, j;
+
+ for (i = 0, j = 0; j < len; i++, j += 4) {
+ output[j] = (input[i] >> 24) & 0xff;
+ output[j + 1] = (input[i] >> 16) & 0xff;
+ output[j + 2] = (input[i] >> 8) & 0xff;
+ output[j + 3] = input[i] & 0xff;
+ }
+}
+
+void
+SHA2Init(uint64_t mech, SHA2_CTX *ctx)
+{
+
+ switch (mech) {
+ case SHA256_MECH_INFO_TYPE:
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ ctx->state.s32[0] = 0x6a09e667U;
+ ctx->state.s32[1] = 0xbb67ae85U;
+ ctx->state.s32[2] = 0x3c6ef372U;
+ ctx->state.s32[3] = 0xa54ff53aU;
+ ctx->state.s32[4] = 0x510e527fU;
+ ctx->state.s32[5] = 0x9b05688cU;
+ ctx->state.s32[6] = 0x1f83d9abU;
+ ctx->state.s32[7] = 0x5be0cd19U;
+ break;
+ default:
+ cmn_err(CE_PANIC,
+ "sha2_init: failed to find a supported algorithm: 0x%x",
+ (uint32_t)mech);
+ }
+
+ ctx->algotype = (uint32_t)mech;
+ ctx->count.c64[0] = ctx->count.c64[1] = 0;
+}
+
+void
+SHA256Init(SHA256_CTX *ctx)
+{
+ SHA2Init(SHA256, ctx);
+}
+
+/*
+ * SHA2Update()
+ *
+ * purpose: continues an sha2 digest operation, using the message block
+ * to update the context.
+ * input: SHA2_CTX * : the context to update
+ * void * : the message block
+ * size_t : the length of the message block, in bytes
+ * output: void
+ */
+
+void
+SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
+{
+ uint32_t i, buf_index, buf_len, buf_limit;
+ const uint8_t *input = inptr;
+ uint32_t algotype = ctx->algotype;
+#if defined(__amd64)
+ uint32_t block_count;
+#endif /* !__amd64 */
+
+
+ /* check for noop */
+ if (input_len == 0)
+ return;
+
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ buf_limit = 64;
+
+ /* compute number of bytes mod 64 */
+ buf_index = (ctx->count.c32[1] >> 3) & 0x3F;
+
+ /* update number of bits */
+ if ((ctx->count.c32[1] += (input_len << 3)) < (input_len << 3))
+ ctx->count.c32[0]++;
+
+ ctx->count.c32[0] += (input_len >> 29);
+
+ } else {
+ buf_limit = 128;
+
+ /* compute number of bytes mod 128 */
+ buf_index = (ctx->count.c64[1] >> 3) & 0x7F;
+
+ /* update number of bits */
+ if ((ctx->count.c64[1] += (input_len << 3)) < (input_len << 3))
+ ctx->count.c64[0]++;
+
+ ctx->count.c64[0] += (input_len >> 29);
+ }
+
+ buf_len = buf_limit - buf_index;
+
+ /* transform as many times as possible */
+ i = 0;
+ if (input_len >= buf_len) {
+
+ /*
+ * general optimization:
+ *
+ * only do initial bcopy() and SHA2Transform() if
+ * buf_index != 0. if buf_index == 0, we're just
+ * wasting our time doing the bcopy() since there
+ * wasn't any data left over from a previous call to
+ * SHA2Update().
+ */
+ if (buf_index) {
+ bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE)
+ SHA256Transform(ctx, ctx->buf_un.buf8);
+
+ i = buf_len;
+ }
+
+#if !defined(__amd64)
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ for (; i + buf_limit - 1 < input_len; i += buf_limit) {
+ SHA256Transform(ctx, &input[i]);
+ }
+ }
+
+#else
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ block_count = (input_len - i) >> 6;
+ if (block_count > 0) {
+ SHA256TransformBlocks(ctx, &input[i],
+ block_count);
+ i += block_count << 6;
+ }
+ }
+#endif /* !__amd64 */
+
+ /*
+ * general optimization:
+ *
+ * if i and input_len are the same, return now instead
+ * of calling bcopy(), since the bcopy() in this case
+ * will be an expensive noop.
+ */
+
+ if (input_len == i)
+ return;
+
+ buf_index = 0;
+ }
+
+ /* buffer remaining input */
+ bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
+}
+
+
+/*
+ * SHA2Final()
+ *
+ * purpose: ends an sha2 digest operation, finalizing the message digest and
+ * zeroing the context.
+ * input: uchar_t * : a buffer to store the digest
+ * : The function actually uses void* because many
+ * : callers pass things other than uchar_t here.
+ * SHA2_CTX * : the context to finalize, save, and zero
+ * output: void
+ */
+
+void
+SHA2Final(void *digest, SHA2_CTX *ctx)
+{
+ uint8_t bitcount_be[sizeof (ctx->count.c32)];
+ uint32_t index;
+ uint32_t algotype = ctx->algotype;
+
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ index = (ctx->count.c32[1] >> 3) & 0x3f;
+ Encode(bitcount_be, ctx->count.c32, sizeof (bitcount_be));
+ SHA2Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
+ SHA2Update(ctx, bitcount_be, sizeof (bitcount_be));
+ Encode(digest, ctx->state.s32, sizeof (ctx->state.s32));
+ }
+
+ /* zeroize sensitive information */
+ bzero(ctx, sizeof (*ctx));
+}
diff --git a/module/icp/api/kcf_cipher.c b/module/icp/api/kcf_cipher.c
new file mode 100644
index 000000000..2585b7fed
--- /dev/null
+++ b/module/icp/api/kcf_cipher.c
@@ -0,0 +1,935 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/sched_impl.h>
+
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_CIPHER_OFFSET(f) offsetof(crypto_cipher_ops_t, f)
+
+/*
+ * Encryption and decryption routines.
+ */
+
+/*
+ * The following are the possible returned values common to all the routines
+ * below. The applicability of some of these return values depends on the
+ * presence of the arguments.
+ *
+ * CRYPTO_SUCCESS: The operation completed successfully.
+ * CRYPTO_QUEUED: A request was submitted successfully. The callback
+ * routine will be called when the operation is done.
+ * CRYPTO_INVALID_MECH_NUMBER, CRYPTO_INVALID_MECH_PARAM, or
+ * CRYPTO_INVALID_MECH for problems with the 'mech'.
+ * CRYPTO_INVALID_DATA for bogus 'data'
+ * CRYPTO_HOST_MEMORY for failure to allocate memory to handle this work.
+ * CRYPTO_INVALID_CONTEXT: Not a valid context.
+ * CRYPTO_BUSY: Cannot process the request now. Schedule a
+ * crypto_bufcall(), or try later.
+ * CRYPTO_NOT_SUPPORTED and CRYPTO_MECH_NOT_SUPPORTED: No provider is
+ * capable of a function or a mechanism.
+ * CRYPTO_INVALID_KEY: bogus 'key' argument.
+ * CRYPTO_INVALID_PLAINTEXT: bogus 'plaintext' argument.
+ * CRYPTO_INVALID_CIPHERTEXT: bogus 'ciphertext' argument.
+ */
+
+/*
+ * crypto_cipher_init_prov()
+ *
+ * Arguments:
+ *
+ * pd: provider descriptor
+ * sid: session id
+ * mech: crypto_mechanism_t pointer.
+ * mech_type is a valid value previously returned by
+ * crypto_mech2id();
+ * When the mech's parameter is not NULL, its definition depends
+ * on the standard definition of the mechanism.
+ * key: pointer to a crypto_key_t structure.
+ * tmpl: a crypto_ctx_template_t, opaque template of a context of an
+ * encryption or decryption with the 'mech' using 'key'.
+ * 'tmpl' is created by a previous call to
+ * crypto_create_ctx_template().
+ * ctxp: Pointer to a crypto_context_t.
+ * func: CRYPTO_FG_ENCRYPT or CRYPTO_FG_DECRYPT.
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * This is a common function invoked internally by both
+ * crypto_encrypt_init() and crypto_decrypt_init().
+ * Asynchronously submits a request for, or synchronously performs the
+ * initialization of an encryption or a decryption operation.
+ * When possible and applicable, will internally use the pre-expanded key
+ * schedule from the context template, tmpl.
+ * When complete and successful, 'ctxp' will contain a crypto_context_t
+ * valid for later calls to encrypt_update() and encrypt_final(), or
+ * decrypt_update() and decrypt_final().
+ * The caller should hold a reference on the specified provider
+ * descriptor before calling this function.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+static int
+crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *key,
+ crypto_spi_ctx_template_t tmpl, crypto_context_t *ctxp,
+ crypto_call_req_t *crq, crypto_func_group_t func)
+{
+ int error;
+ crypto_ctx_t *ctx;
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ if (func == CRYPTO_FG_ENCRYPT) {
+ error = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
+ &real_provider, CRYPTO_FG_ENCRYPT);
+ } else {
+ error = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
+ &real_provider, CRYPTO_FG_DECRYPT);
+ }
+
+ if (error != CRYPTO_SUCCESS)
+ return (error);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+ return (CRYPTO_HOST_MEMORY);
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ crypto_mechanism_t lmech;
+
+ lmech = *mech;
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
+
+ if (func == CRYPTO_FG_ENCRYPT)
+ error = KCF_PROV_ENCRYPT_INIT(real_provider, ctx,
+ &lmech, key, tmpl, KCF_SWFP_RHNDL(crq));
+ else {
+ ASSERT(func == CRYPTO_FG_DECRYPT);
+
+ error = KCF_PROV_DECRYPT_INIT(real_provider, ctx,
+ &lmech, key, tmpl, KCF_SWFP_RHNDL(crq));
+ }
+ KCF_PROV_INCRSTATS(pd, error);
+
+ goto done;
+ }
+
+ /* Check if context sharing is possible */
+ if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ key->ck_format == CRYPTO_KEY_RAW &&
+ KCF_CAN_SHARE_OPSTATE(pd, mech->cm_type)) {
+ kcf_context_t *tctxp = (kcf_context_t *)ctx;
+ kcf_provider_desc_t *tpd = NULL;
+ crypto_mech_info_t *sinfo;
+
+ if ((kcf_get_sw_prov(mech->cm_type, &tpd, &tctxp->kc_mech,
+ B_FALSE) == CRYPTO_SUCCESS)) {
+ int tlen;
+
+ sinfo = &(KCF_TO_PROV_MECHINFO(tpd, mech->cm_type));
+ /*
+ * key->ck_length from the consumer is always in bits.
+ * We convert it to be in the same unit registered by
+ * the provider in order to do a comparison.
+ */
+ if (sinfo->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)
+ tlen = key->ck_length >> 3;
+ else
+ tlen = key->ck_length;
+ /*
+ * Check if the software provider can support context
+ * sharing and support this key length.
+ */
+ if ((sinfo->cm_mech_flags & CRYPTO_CAN_SHARE_OPSTATE) &&
+ (tlen >= sinfo->cm_min_key_length) &&
+ (tlen <= sinfo->cm_max_key_length)) {
+ ctx->cc_flags = CRYPTO_INIT_OPSTATE;
+ tctxp->kc_sw_prov_desc = tpd;
+ } else
+ KCF_PROV_REFRELE(tpd);
+ }
+ }
+
+ if (func == CRYPTO_FG_ENCRYPT) {
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_INIT, sid,
+ mech, key, NULL, NULL, tmpl);
+ } else {
+ ASSERT(func == CRYPTO_FG_DECRYPT);
+ KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_INIT, sid,
+ mech, key, NULL, NULL, tmpl);
+ }
+
+ error = kcf_submit_request(real_provider, ctx, crq, &params,
+ B_FALSE);
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+done:
+ if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
+ *ctxp = (crypto_context_t)ctx;
+ else {
+ /* Release the hold done in kcf_new_ctx(). */
+ KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
+ }
+
+ return (error);
+}
+
+/*
+ * Same as crypto_cipher_init_prov(), but relies on the scheduler to pick
+ * an appropriate provider. See crypto_cipher_init_prov() comments for more
+ * details.
+ */
+static int
+crypto_cipher_init(crypto_mechanism_t *mech, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
+ crypto_call_req_t *crq, crypto_func_group_t func)
+{
+ int error;
+ kcf_mech_entry_t *me;
+ kcf_provider_desc_t *pd;
+ kcf_ctx_template_t *ctx_tmpl;
+ crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
+ kcf_prov_tried_t *list = NULL;
+
+retry:
+ /* pd is returned held */
+ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
+ list, func, CHECK_RESTRICT(crq), 0)) == NULL) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ return (error);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ */
+ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
+ ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
+ if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ KCF_PROV_REFRELE(pd);
+ return (CRYPTO_OLD_CTX_TEMPLATE);
+ } else {
+ spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ error = crypto_cipher_init_prov(pd, pd->pd_sid, mech, key,
+ spi_ctx_tmpl, ctxp, crq, func);
+ if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
+ IS_RECOVERABLE(error)) {
+ /* Add pd to the linked list of providers tried. */
+ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
+ goto retry;
+ }
+
+ if (list != NULL)
+ kcf_free_triedlist(list);
+
+ KCF_PROV_REFRELE(pd);
+ return (error);
+}
+
+/*
+ * crypto_encrypt_prov()
+ *
+ * Arguments:
+ * pd: provider descriptor
+ * sid: session id
+ * mech: crypto_mechanism_t pointer.
+ * mech_type is a valid value previously returned by
+ * crypto_mech2id();
+ * When the mech's parameter is not NULL, its definition depends
+ * on the standard definition of the mechanism.
+ * key: pointer to a crypto_key_t structure.
+ * plaintext: The message to be encrypted
+ * ciphertext: Storage for the encrypted message. The length needed
+ * depends on the mechanism, and the plaintext's size.
+ * tmpl: a crypto_ctx_template_t, opaque template of a context of an
+ * encryption with the 'mech' using 'key'. 'tmpl' is created by
+ * a previous call to crypto_create_ctx_template().
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs a
+ * single-part encryption of 'plaintext' with the mechanism 'mech', using
+ * the key 'key'.
+ * When complete and successful, 'ciphertext' will contain the encrypted
+ * message.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_encrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *plaintext, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_data_t *ciphertext,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int error;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ error = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
+ &real_provider, CRYPTO_FG_ENCRYPT_ATOMIC);
+
+ if (error != CRYPTO_SUCCESS)
+ return (error);
+ }
+
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
+ plaintext, ciphertext, tmpl);
+
+ error = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (error);
+}
+
+/*
+ * Same as crypto_encrypt_prov(), but relies on the scheduler to pick
+ * a provider. See crypto_encrypt_prov() for more details.
+ */
+int
+crypto_encrypt(crypto_mechanism_t *mech, crypto_data_t *plaintext,
+ crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *ciphertext,
+ crypto_call_req_t *crq)
+{
+ int error;
+ kcf_mech_entry_t *me;
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd;
+ kcf_ctx_template_t *ctx_tmpl;
+ crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
+ kcf_prov_tried_t *list = NULL;
+
+retry:
+ /* pd is returned held */
+ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
+ list, CRYPTO_FG_ENCRYPT_ATOMIC, CHECK_RESTRICT(crq),
+ plaintext->cd_length)) == NULL) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ return (error);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ */
+ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
+ ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
+ if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ KCF_PROV_REFRELE(pd);
+ return (CRYPTO_OLD_CTX_TEMPLATE);
+ } else {
+ spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ crypto_mechanism_t lmech;
+
+ lmech = *mech;
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
+
+ error = KCF_PROV_ENCRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key,
+ plaintext, ciphertext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, pd->pd_sid,
+ mech, key, plaintext, ciphertext, spi_ctx_tmpl);
+ error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
+ }
+
+ if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
+ IS_RECOVERABLE(error)) {
+ /* Add pd to the linked list of providers tried. */
+ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
+ goto retry;
+ }
+
+ if (list != NULL)
+ kcf_free_triedlist(list);
+
+ KCF_PROV_REFRELE(pd);
+ return (error);
+}
+
+/*
+ * crypto_encrypt_init_prov()
+ *
+ * Calls crypto_cipher_init_prov() to initialize an encryption operation.
+ */
+int
+crypto_encrypt_init_prov(crypto_provider_t pd, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
+ crypto_call_req_t *crq)
+{
+ return (crypto_cipher_init_prov(pd, sid, mech, key, tmpl, ctxp, crq,
+ CRYPTO_FG_ENCRYPT));
+}
+
+/*
+ * crypto_encrypt_init()
+ *
+ * Calls crypto_cipher_init() to initialize an encryption operation
+ */
+int
+crypto_encrypt_init(crypto_mechanism_t *mech, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
+ crypto_call_req_t *crq)
+{
+ return (crypto_cipher_init(mech, key, tmpl, ctxp, crq,
+ CRYPTO_FG_ENCRYPT));
+}
+
+/*
+ * crypto_encrypt_update()
+ *
+ * Arguments:
+ * context: A crypto_context_t initialized by encrypt_init().
+ * plaintext: The message part to be encrypted
+ * ciphertext: Storage for the encrypted message part.
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs a
+ * part of an encryption operation.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_encrypt_update(crypto_context_t context, crypto_data_t *plaintext,
+ crypto_data_t *ciphertext, crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_ENCRYPT_UPDATE(pd, ctx, plaintext,
+ ciphertext, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ return (error);
+ }
+
+ /* Check if we should use a software provider for small jobs */
+ if ((ctx->cc_flags & CRYPTO_USE_OPSTATE) && cr == NULL) {
+ if (plaintext->cd_length < kcf_ctx->kc_mech->me_threshold &&
+ kcf_ctx->kc_sw_prov_desc != NULL &&
+ KCF_IS_PROV_USABLE(kcf_ctx->kc_sw_prov_desc)) {
+ pd = kcf_ctx->kc_sw_prov_desc;
+ }
+ }
+
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
+ ctx->cc_session, NULL, NULL, plaintext, ciphertext, NULL);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+
+ return (error);
+}
+
+/*
+ * crypto_encrypt_final()
+ *
+ * Arguments:
+ * context: A crypto_context_t initialized by encrypt_init().
+ * ciphertext: Storage for the last part of encrypted message
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs the
+ * final part of an encryption operation.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_encrypt_final(crypto_context_t context, crypto_data_t *ciphertext,
+ crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_ENCRYPT_FINAL(pd, ctx, ciphertext, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
+ ctx->cc_session, NULL, NULL, NULL, ciphertext, NULL);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ /* Release the hold done in kcf_new_ctx() during init step. */
+ KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
+ return (error);
+}
+
+/*
+ * crypto_decrypt_prov()
+ *
+ * Arguments:
+ * pd: provider descriptor
+ * sid: session id
+ * mech: crypto_mechanism_t pointer.
+ * mech_type is a valid value previously returned by
+ * crypto_mech2id();
+ * When the mech's parameter is not NULL, its definition depends
+ * on the standard definition of the mechanism.
+ * key: pointer to a crypto_key_t structure.
+ * ciphertext: The message to be encrypted
+ * plaintext: Storage for the encrypted message. The length needed
+ * depends on the mechanism, and the plaintext's size.
+ * tmpl: a crypto_ctx_template_t, opaque template of a context of an
+ * encryption with the 'mech' using 'key'. 'tmpl' is created by
+ * a previous call to crypto_create_ctx_template().
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs a
+ * single-part decryption of 'ciphertext' with the mechanism 'mech', using
+ * the key 'key'.
+ * When complete and successful, 'plaintext' will contain the decrypted
+ * message.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_decrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *ciphertext, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_data_t *plaintext,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
+ &real_provider, CRYPTO_FG_DECRYPT_ATOMIC);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
+ ciphertext, plaintext, tmpl);
+
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+/*
+ * Same as crypto_decrypt_prov(), but relies on the KCF scheduler to
+ * choose a provider. See crypto_decrypt_prov() comments for more
+ * information.
+ */
+int
+crypto_decrypt(crypto_mechanism_t *mech, crypto_data_t *ciphertext,
+ crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *plaintext,
+ crypto_call_req_t *crq)
+{
+ int error;
+ kcf_mech_entry_t *me;
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd;
+ kcf_ctx_template_t *ctx_tmpl;
+ crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
+ kcf_prov_tried_t *list = NULL;
+
+retry:
+ /* pd is returned held */
+ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
+ list, CRYPTO_FG_DECRYPT_ATOMIC, CHECK_RESTRICT(crq),
+ ciphertext->cd_length)) == NULL) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ return (error);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ */
+ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
+ ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
+ if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ KCF_PROV_REFRELE(pd);
+ return (CRYPTO_OLD_CTX_TEMPLATE);
+ } else {
+ spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ crypto_mechanism_t lmech;
+
+ lmech = *mech;
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
+
+ error = KCF_PROV_DECRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key,
+ ciphertext, plaintext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, pd->pd_sid,
+ mech, key, ciphertext, plaintext, spi_ctx_tmpl);
+ error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
+ }
+
+ if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
+ IS_RECOVERABLE(error)) {
+ /* Add pd to the linked list of providers tried. */
+ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
+ goto retry;
+ }
+
+ if (list != NULL)
+ kcf_free_triedlist(list);
+
+ KCF_PROV_REFRELE(pd);
+ return (error);
+}
+
+/*
+ * crypto_decrypt_init_prov()
+ *
+ * Calls crypto_cipher_init_prov() to initialize a decryption operation
+ */
+int
+crypto_decrypt_init_prov(crypto_provider_t pd, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
+ crypto_call_req_t *crq)
+{
+ return (crypto_cipher_init_prov(pd, sid, mech, key, tmpl, ctxp, crq,
+ CRYPTO_FG_DECRYPT));
+}
+
+/*
+ * crypto_decrypt_init()
+ *
+ * Calls crypto_cipher_init() to initialize a decryption operation
+ */
+int
+crypto_decrypt_init(crypto_mechanism_t *mech, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
+ crypto_call_req_t *crq)
+{
+ return (crypto_cipher_init(mech, key, tmpl, ctxp, crq,
+ CRYPTO_FG_DECRYPT));
+}
+
+/*
+ * crypto_decrypt_update()
+ *
+ * Arguments:
+ * context: A crypto_context_t initialized by decrypt_init().
+ * ciphertext: The message part to be decrypted
+ * plaintext: Storage for the decrypted message part.
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs a
+ * part of an decryption operation.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_decrypt_update(crypto_context_t context, crypto_data_t *ciphertext,
+ crypto_data_t *plaintext, crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_DECRYPT_UPDATE(pd, ctx, ciphertext,
+ plaintext, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ return (error);
+ }
+
+ /* Check if we should use a software provider for small jobs */
+ if ((ctx->cc_flags & CRYPTO_USE_OPSTATE) && cr == NULL) {
+ if (ciphertext->cd_length < kcf_ctx->kc_mech->me_threshold &&
+ kcf_ctx->kc_sw_prov_desc != NULL &&
+ KCF_IS_PROV_USABLE(kcf_ctx->kc_sw_prov_desc)) {
+ pd = kcf_ctx->kc_sw_prov_desc;
+ }
+ }
+
+ KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
+ ctx->cc_session, NULL, NULL, ciphertext, plaintext, NULL);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+
+ return (error);
+}
+
+/*
+ * crypto_decrypt_final()
+ *
+ * Arguments:
+ * context: A crypto_context_t initialized by decrypt_init().
+ * plaintext: Storage for the last part of the decrypted message
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs the
+ * final part of a decryption operation.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_decrypt_final(crypto_context_t context, crypto_data_t *plaintext,
+ crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_DECRYPT_FINAL(pd, ctx, plaintext,
+ NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
+ ctx->cc_session, NULL, NULL, NULL, plaintext, NULL);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ /* Release the hold done in kcf_new_ctx() during init step. */
+ KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
+ return (error);
+}
+
+/*
+ * See comments for crypto_encrypt_update().
+ */
+int
+crypto_encrypt_single(crypto_context_t context, crypto_data_t *plaintext,
+ crypto_data_t *ciphertext, crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_ENCRYPT(pd, ctx, plaintext,
+ ciphertext, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
+ NULL, NULL, plaintext, ciphertext, NULL);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ /* Release the hold done in kcf_new_ctx() during init step. */
+ KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
+ return (error);
+}
+
+/*
+ * See comments for crypto_decrypt_update().
+ */
+int
+crypto_decrypt_single(crypto_context_t context, crypto_data_t *ciphertext,
+ crypto_data_t *plaintext, crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_DECRYPT(pd, ctx, ciphertext,
+ plaintext, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
+ NULL, NULL, ciphertext, plaintext, NULL);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ /* Release the hold done in kcf_new_ctx() during init step. */
+ KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
+ return (error);
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(crypto_cipher_init_prov);
+EXPORT_SYMBOL(crypto_cipher_init);
+EXPORT_SYMBOL(crypto_encrypt_prov);
+EXPORT_SYMBOL(crypto_encrypt);
+EXPORT_SYMBOL(crypto_encrypt_init_prov);
+EXPORT_SYMBOL(crypto_encrypt_init);
+EXPORT_SYMBOL(crypto_encrypt_update);
+EXPORT_SYMBOL(crypto_encrypt_final);
+EXPORT_SYMBOL(crypto_decrypt_prov);
+EXPORT_SYMBOL(crypto_decrypt);
+EXPORT_SYMBOL(crypto_decrypt_init_prov);
+EXPORT_SYMBOL(crypto_decrypt_init);
+EXPORT_SYMBOL(crypto_decrypt_update);
+EXPORT_SYMBOL(crypto_decrypt_final);
+EXPORT_SYMBOL(crypto_encrypt_single);
+EXPORT_SYMBOL(crypto_decrypt_single);
+#endif
diff --git a/module/icp/api/kcf_ctxops.c b/module/icp/api/kcf_ctxops.c
new file mode 100644
index 000000000..3f90674b0
--- /dev/null
+++ b/module/icp/api/kcf_ctxops.c
@@ -0,0 +1,151 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/sched_impl.h>
+
+/*
+ * Crypto contexts manipulation routines
+ */
+
+/*
+ * crypto_create_ctx_template()
+ *
+ * Arguments:
+ *
+ * mech: crypto_mechanism_t pointer.
+ * mech_type is a valid value previously returned by
+ * crypto_mech2id();
+ * When the mech's parameter is not NULL, its definition depends
+ * on the standard definition of the mechanism.
+ * key: pointer to a crypto_key_t structure.
+ * ptmpl: a storage for the opaque crypto_ctx_template_t, allocated and
+ * initialized by the software provider this routine is
+ * dispatched to.
+ * kmflag: KM_SLEEP/KM_NOSLEEP mem. alloc. flag.
+ *
+ * Description:
+ * Redirects the call to the software provider of the specified
+ * mechanism. That provider will allocate and pre-compute/pre-expand
+ * the context template, reusable by later calls to crypto_xxx_init().
+ * The size and address of that provider context template are stored
+ * in an internal structure, kcf_ctx_template_t. The address of that
+ * structure is given back to the caller in *ptmpl.
+ *
+ * Context:
+ * Process or interrupt.
+ *
+ * Returns:
+ * CRYPTO_SUCCESS when the context template is successfully created.
+ * CRYPTO_HOST_MEMEORY: mem alloc failure
+ * CRYPTO_ARGUMENTS_BAD: NULL storage for the ctx template.
+ * RYPTO_MECHANISM_INVALID: invalid mechanism 'mech'.
+ */
+int
+crypto_create_ctx_template(crypto_mechanism_t *mech, crypto_key_t *key,
+ crypto_ctx_template_t *ptmpl, int kmflag)
+{
+ int error;
+ kcf_mech_entry_t *me;
+ kcf_provider_desc_t *pd;
+ kcf_ctx_template_t *ctx_tmpl;
+ crypto_mechanism_t prov_mech;
+
+ /* A few args validation */
+
+ if (ptmpl == NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (mech == NULL)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ error = kcf_get_sw_prov(mech->cm_type, &pd, &me, B_TRUE);
+ if (error != CRYPTO_SUCCESS)
+ return (error);
+
+ if ((ctx_tmpl = (kcf_ctx_template_t *)kmem_alloc(
+ sizeof (kcf_ctx_template_t), kmflag)) == NULL) {
+ KCF_PROV_REFRELE(pd);
+ return (CRYPTO_HOST_MEMORY);
+ }
+
+ /* Pass a mechtype that the provider understands */
+ prov_mech.cm_type = KCF_TO_PROV_MECHNUM(pd, mech->cm_type);
+ prov_mech.cm_param = mech->cm_param;
+ prov_mech.cm_param_len = mech->cm_param_len;
+
+ error = KCF_PROV_CREATE_CTX_TEMPLATE(pd, &prov_mech, key,
+ &(ctx_tmpl->ct_prov_tmpl), &(ctx_tmpl->ct_size), KCF_RHNDL(kmflag));
+
+ if (error == CRYPTO_SUCCESS) {
+ ctx_tmpl->ct_generation = me->me_gen_swprov;
+ *ptmpl = ctx_tmpl;
+ } else {
+ kmem_free(ctx_tmpl, sizeof (kcf_ctx_template_t));
+ }
+ KCF_PROV_REFRELE(pd);
+
+ return (error);
+}
+
+/*
+ * crypto_destroy_ctx_template()
+ *
+ * Arguments:
+ *
+ * tmpl: an opaque crypto_ctx_template_t previously created by
+ * crypto_create_ctx_template()
+ *
+ * Description:
+ * Frees the inbedded crypto_spi_ctx_template_t, then the
+ * kcf_ctx_template_t.
+ *
+ * Context:
+ * Process or interrupt.
+ *
+ */
+void
+crypto_destroy_ctx_template(crypto_ctx_template_t tmpl)
+{
+ kcf_ctx_template_t *ctx_tmpl = (kcf_ctx_template_t *)tmpl;
+
+ if (ctx_tmpl == NULL)
+ return;
+
+ ASSERT(ctx_tmpl->ct_prov_tmpl != NULL);
+
+ bzero(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size);
+ kmem_free(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size);
+ kmem_free(ctx_tmpl, sizeof (kcf_ctx_template_t));
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(crypto_create_ctx_template);
+EXPORT_SYMBOL(crypto_destroy_ctx_template);
+#endif
diff --git a/module/icp/api/kcf_digest.c b/module/icp/api/kcf_digest.c
new file mode 100644
index 000000000..b58d3b452
--- /dev/null
+++ b/module/icp/api/kcf_digest.c
@@ -0,0 +1,494 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/sched_impl.h>
+
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_DIGEST_OFFSET(f) offsetof(crypto_digest_ops_t, f)
+
+/*
+ * Message digest routines
+ */
+
+/*
+ * The following are the possible returned values common to all the routines
+ * below. The applicability of some of these return values depends on the
+ * presence of the arguments.
+ *
+ * CRYPTO_SUCCESS: The operation completed successfully.
+ * CRYPTO_QUEUED: A request was submitted successfully. The callback
+ * routine will be called when the operation is done.
+ * CRYPTO_MECHANISM_INVALID or CRYPTO_INVALID_MECH_PARAM
+ * for problems with the 'mech'.
+ * CRYPTO_INVALID_DATA for bogus 'data'
+ * CRYPTO_HOST_MEMORY for failure to allocate memory to handle this work.
+ * CRYPTO_INVALID_CONTEXT: Not a valid context.
+ * CRYPTO_BUSY: Cannot process the request now. Schedule a
+ * crypto_bufcall(), or try later.
+ * CRYPTO_NOT_SUPPORTED and CRYPTO_MECH_NOT_SUPPORTED:
+ * No provider is capable of a function or a mechanism.
+ */
+
+
+/*
+ * crypto_digest_prov()
+ *
+ * Arguments:
+ * pd: pointer to the descriptor of the provider to use for this
+ * operation.
+ * sid: provider session id.
+ * mech: crypto_mechanism_t pointer.
+ * mech_type is a valid value previously returned by
+ * crypto_mech2id();
+ * When the mech's parameter is not NULL, its definition depends
+ * on the standard definition of the mechanism.
+ * data: The message to be digested.
+ * digest: Storage for the digest. The length needed depends on the
+ * mechanism.
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs the
+ * digesting operation of 'data' on the specified
+ * provider with the specified session.
+ * When complete and successful, 'digest' will contain the digest value.
+ * The caller should hold a reference on the specified provider
+ * descriptor before calling this function.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_digest_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *data, crypto_data_t *digest,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq),
+ pd, &real_provider, CRYPTO_FG_DIGEST_ATOMIC);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+ KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, NULL,
+ data, digest);
+
+ /* no crypto context to carry between multiple parts. */
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+
+/*
+ * Same as crypto_digest_prov(), but relies on the KCF scheduler to
+ * choose a provider. See crypto_digest_prov() comments for more information.
+ */
+int
+crypto_digest(crypto_mechanism_t *mech, crypto_data_t *data,
+ crypto_data_t *digest, crypto_call_req_t *crq)
+{
+ int error;
+ kcf_provider_desc_t *pd;
+ kcf_req_params_t params;
+ kcf_prov_tried_t *list = NULL;
+
+retry:
+ /* The pd is returned held */
+ if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error, list,
+ CRYPTO_FG_DIGEST_ATOMIC, CHECK_RESTRICT(crq),
+ data->cd_length)) == NULL) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ return (error);
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ crypto_mechanism_t lmech;
+
+ lmech = *mech;
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
+ error = KCF_PROV_DIGEST_ATOMIC(pd, pd->pd_sid, &lmech, data,
+ digest, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
+ (data->cd_length > pd->pd_hash_limit)) {
+ error = CRYPTO_BUFFER_TOO_BIG;
+ } else {
+ KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC,
+ pd->pd_sid, mech, NULL, data, digest);
+
+ /* no crypto context to carry between multiple parts. */
+ error = kcf_submit_request(pd, NULL, crq, &params,
+ B_FALSE);
+ }
+ }
+
+ if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
+ IS_RECOVERABLE(error)) {
+ /* Add pd to the linked list of providers tried. */
+ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
+ goto retry;
+ }
+
+ if (list != NULL)
+ kcf_free_triedlist(list);
+
+ KCF_PROV_REFRELE(pd);
+ return (error);
+}
+
+/*
+ * crypto_digest_init_prov()
+ *
+ * pd: pointer to the descriptor of the provider to use for this
+ * operation.
+ * sid: provider session id.
+ * mech: crypto_mechanism_t pointer.
+ * mech_type is a valid value previously returned by
+ * crypto_mech2id();
+ * When the mech's parameter is not NULL, its definition depends
+ * on the standard definition of the mechanism.
+ * ctxp: Pointer to a crypto_context_t.
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs the
+ * initialization of a message digest operation on the specified
+ * provider with the specified session.
+ * When complete and successful, 'ctxp' will contain a crypto_context_t
+ * valid for later calls to digest_update() and digest_final().
+ * The caller should hold a reference on the specified provider
+ * descriptor before calling this function.
+ */
+int
+crypto_digest_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_context_t *ctxp, crypto_call_req_t *crq)
+{
+ int error;
+ crypto_ctx_t *ctx;
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ error = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
+ &real_provider, CRYPTO_FG_DIGEST);
+
+ if (error != CRYPTO_SUCCESS)
+ return (error);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+ return (CRYPTO_HOST_MEMORY);
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ crypto_mechanism_t lmech;
+
+ lmech = *mech;
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
+ error = KCF_PROV_DIGEST_INIT(real_provider, ctx, &lmech,
+ KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_INIT, sid,
+ mech, NULL, NULL, NULL);
+ error = kcf_submit_request(real_provider, ctx, crq, &params,
+ B_FALSE);
+ }
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
+ *ctxp = (crypto_context_t)ctx;
+ else {
+ /* Release the hold done in kcf_new_ctx(). */
+ KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
+ }
+
+ return (error);
+}
+
+/*
+ * Same as crypto_digest_init_prov(), but relies on the KCF scheduler
+ * to choose a provider. See crypto_digest_init_prov() comments for
+ * more information.
+ */
+int
+crypto_digest_init(crypto_mechanism_t *mech, crypto_context_t *ctxp,
+ crypto_call_req_t *crq)
+{
+ int error;
+ kcf_provider_desc_t *pd;
+ kcf_prov_tried_t *list = NULL;
+
+retry:
+ /* The pd is returned held */
+ if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error,
+ list, CRYPTO_FG_DIGEST, CHECK_RESTRICT(crq), 0)) == NULL) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ return (error);
+ }
+
+ if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ (pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) {
+ /*
+ * The hardware provider has limited digest support.
+ * So, we fallback early here to using a software provider.
+ *
+ * XXX - need to enhance to do the fallback later in
+ * crypto_digest_update() if the size of accumulated input data
+ * exceeds the maximum size digestable by hardware provider.
+ */
+ error = CRYPTO_BUFFER_TOO_BIG;
+ } else {
+ error = crypto_digest_init_prov(pd, pd->pd_sid,
+ mech, ctxp, crq);
+ }
+
+ if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
+ IS_RECOVERABLE(error)) {
+ /* Add pd to the linked list of providers tried. */
+ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
+ goto retry;
+ }
+
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ KCF_PROV_REFRELE(pd);
+ return (error);
+}
+
+/*
+ * crypto_digest_update()
+ *
+ * Arguments:
+ * context: A crypto_context_t initialized by digest_init().
+ * data: The part of message to be digested.
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs a
+ * part of a message digest operation.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_digest_update(crypto_context_t context, crypto_data_t *data,
+ crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_DIGEST_UPDATE(pd, ctx, data, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_UPDATE,
+ ctx->cc_session, NULL, NULL, data, NULL);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ return (error);
+}
+
+/*
+ * crypto_digest_final()
+ *
+ * Arguments:
+ * context: A crypto_context_t initialized by digest_init().
+ * digest: The storage for the digest.
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs the
+ * final part of a message digest operation.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_digest_final(crypto_context_t context, crypto_data_t *digest,
+ crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_DIGEST_FINAL(pd, ctx, digest, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_FINAL,
+ ctx->cc_session, NULL, NULL, NULL, digest);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ /* Release the hold done in kcf_new_ctx() during init step. */
+ KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
+ return (error);
+}
+
+/*
+ * Performs a digest update on the specified key. Note that there is
+ * no k-API crypto_digest_key() equivalent of this function.
+ */
+int
+crypto_digest_key_prov(crypto_context_t context, crypto_key_t *key,
+ crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_DIGEST_KEY(pd, ctx, key, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_DIGEST_KEY,
+ ctx->cc_session, NULL, key, NULL, NULL);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ return (error);
+}
+
+/*
+ * See comments for crypto_digest_update() and crypto_digest_final().
+ */
+int
+crypto_digest_single(crypto_context_t context, crypto_data_t *data,
+ crypto_data_t *digest, crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_DIGEST(pd, ctx, data, digest, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
+ NULL, NULL, data, digest);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ /* Release the hold done in kcf_new_ctx() during init step. */
+ KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
+ return (error);
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(crypto_digest_prov);
+EXPORT_SYMBOL(crypto_digest);
+EXPORT_SYMBOL(crypto_digest_init_prov);
+EXPORT_SYMBOL(crypto_digest_init);
+EXPORT_SYMBOL(crypto_digest_update);
+EXPORT_SYMBOL(crypto_digest_final);
+EXPORT_SYMBOL(crypto_digest_key_prov);
+EXPORT_SYMBOL(crypto_digest_single);
+#endif
diff --git a/module/icp/api/kcf_mac.c b/module/icp/api/kcf_mac.c
new file mode 100644
index 000000000..2b4691c03
--- /dev/null
+++ b/module/icp/api/kcf_mac.c
@@ -0,0 +1,648 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/sched_impl.h>
+
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_MAC_OFFSET(f) offsetof(crypto_mac_ops_t, f)
+
+/*
+ * Message authentication codes routines.
+ */
+
+/*
+ * The following are the possible returned values common to all the routines
+ * below. The applicability of some of these return values depends on the
+ * presence of the arguments.
+ *
+ * CRYPTO_SUCCESS: The operation completed successfully.
+ * CRYPTO_QUEUED: A request was submitted successfully. The callback
+ * routine will be called when the operation is done.
+ * CRYPTO_INVALID_MECH_NUMBER, CRYPTO_INVALID_MECH_PARAM, or
+ * CRYPTO_INVALID_MECH for problems with the 'mech'.
+ * CRYPTO_INVALID_DATA for bogus 'data'
+ * CRYPTO_HOST_MEMORY for failure to allocate memory to handle this work.
+ * CRYPTO_INVALID_CONTEXT: Not a valid context.
+ * CRYPTO_BUSY: Cannot process the request now. Schedule a
+ * crypto_bufcall(), or try later.
+ * CRYPTO_NOT_SUPPORTED and CRYPTO_MECH_NOT_SUPPORTED: No provider is
+ * capable of a function or a mechanism.
+ * CRYPTO_INVALID_KEY: bogus 'key' argument.
+ * CRYPTO_INVALID_MAC: bogus 'mac' argument.
+ */
+
+/*
+ * crypto_mac_prov()
+ *
+ * Arguments:
+ * mech: crypto_mechanism_t pointer.
+ * mech_type is a valid value previously returned by
+ * crypto_mech2id();
+ * When the mech's parameter is not NULL, its definition depends
+ * on the standard definition of the mechanism.
+ * key: pointer to a crypto_key_t structure.
+ * data: The message to compute the MAC for.
+ * mac: Storage for the MAC. The length needed depends on the mechanism.
+ * tmpl: a crypto_ctx_template_t, opaque template of a context of a
+ * MAC with the 'mech' using 'key'. 'tmpl' is created by
+ * a previous call to crypto_create_ctx_template().
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs a
+ * single-part message authentication of 'data' with the mechanism
+ * 'mech', using * the key 'key', on the specified provider with
+ * the specified session id.
+ * When complete and successful, 'mac' will contain the message
+ * authentication code.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'crq'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_mac_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
+ &real_provider, CRYPTO_FG_MAC_ATOMIC);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
+ data, mac, tmpl);
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+/*
+ * Same as crypto_mac_prov(), but relies on the KCF scheduler to choose
+ * a provider. See crypto_mac() comments for more information.
+ */
+int
+crypto_mac(crypto_mechanism_t *mech, crypto_data_t *data,
+ crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
+ crypto_call_req_t *crq)
+{
+ int error;
+ kcf_mech_entry_t *me;
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd;
+ kcf_ctx_template_t *ctx_tmpl;
+ crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
+ kcf_prov_tried_t *list = NULL;
+
+retry:
+ /* The pd is returned held */
+ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
+ list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq),
+ data->cd_length)) == NULL) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ return (error);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ */
+ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
+ ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
+ if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ KCF_PROV_REFRELE(pd);
+ return (CRYPTO_OLD_CTX_TEMPLATE);
+ } else {
+ spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ crypto_mechanism_t lmech;
+
+ lmech = *mech;
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
+
+ error = KCF_PROV_MAC_ATOMIC(pd, pd->pd_sid, &lmech, key, data,
+ mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
+ (data->cd_length > pd->pd_hash_limit)) {
+ /*
+ * XXX - We need a check to see if this is indeed
+ * a HMAC. So far, all kernel clients use
+ * this interface only for HMAC. So, this is fine
+ * for now.
+ */
+ error = CRYPTO_BUFFER_TOO_BIG;
+ } else {
+ KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC,
+ pd->pd_sid, mech, key, data, mac, spi_ctx_tmpl);
+
+ error = kcf_submit_request(pd, NULL, crq, &params,
+ KCF_ISDUALREQ(crq));
+ }
+ }
+
+ if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
+ IS_RECOVERABLE(error)) {
+ /* Add pd to the linked list of providers tried. */
+ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
+ goto retry;
+ }
+
+ if (list != NULL)
+ kcf_free_triedlist(list);
+
+ KCF_PROV_REFRELE(pd);
+ return (error);
+}
+
+/*
+ * Single part operation to compute the MAC corresponding to the specified
+ * 'data' and to verify that it matches the MAC specified by 'mac'.
+ * The other arguments are the same as the function crypto_mac_prov().
+ */
+int
+crypto_mac_verify_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
+ &real_provider, CRYPTO_FG_MAC_ATOMIC);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_MAC_VERIFY_ATOMIC, sid, mech,
+ key, data, mac, tmpl);
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+/*
+ * Same as crypto_mac_verify_prov(), but relies on the KCF scheduler to choose
+ * a provider. See crypto_mac_verify_prov() comments for more information.
+ */
+int
+crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data,
+ crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
+ crypto_call_req_t *crq)
+{
+ int error;
+ kcf_mech_entry_t *me;
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd;
+ kcf_ctx_template_t *ctx_tmpl;
+ crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
+ kcf_prov_tried_t *list = NULL;
+
+retry:
+ /* The pd is returned held */
+ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
+ list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq),
+ data->cd_length)) == NULL) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ return (error);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ */
+ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
+ ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
+ if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ KCF_PROV_REFRELE(pd);
+ return (CRYPTO_OLD_CTX_TEMPLATE);
+ } else {
+ spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ crypto_mechanism_t lmech;
+
+ lmech = *mech;
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
+
+ error = KCF_PROV_MAC_VERIFY_ATOMIC(pd, pd->pd_sid, &lmech, key,
+ data, mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
+ (data->cd_length > pd->pd_hash_limit)) {
+ /* see comments in crypto_mac() */
+ error = CRYPTO_BUFFER_TOO_BIG;
+ } else {
+ KCF_WRAP_MAC_OPS_PARAMS(&params,
+ KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech,
+ key, data, mac, spi_ctx_tmpl);
+
+ error = kcf_submit_request(pd, NULL, crq, &params,
+ KCF_ISDUALREQ(crq));
+ }
+ }
+
+ if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
+ IS_RECOVERABLE(error)) {
+ /* Add pd to the linked list of providers tried. */
+ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
+ goto retry;
+ }
+
+ if (list != NULL)
+ kcf_free_triedlist(list);
+
+ KCF_PROV_REFRELE(pd);
+ return (error);
+}
+
+/*
+ * crypto_mac_init_prov()
+ *
+ * Arguments:
+ * pd: pointer to the descriptor of the provider to use for this
+ * operation.
+ * sid: provider session id.
+ * mech: crypto_mechanism_t pointer.
+ * mech_type is a valid value previously returned by
+ * crypto_mech2id();
+ * When the mech's parameter is not NULL, its definition depends
+ * on the standard definition of the mechanism.
+ * key: pointer to a crypto_key_t structure.
+ * tmpl: a crypto_ctx_template_t, opaque template of a context of a
+ * MAC with the 'mech' using 'key'. 'tmpl' is created by
+ * a previous call to crypto_create_ctx_template().
+ * ctxp: Pointer to a crypto_context_t.
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs the
+ * initialization of a MAC operation on the specified provider with
+ * the specified session.
+ * When possible and applicable, will internally use the pre-computed MAC
+ * context from the context template, tmpl.
+ * When complete and successful, 'ctxp' will contain a crypto_context_t
+ * valid for later calls to mac_update() and mac_final().
+ * The caller should hold a reference on the specified provider
+ * descriptor before calling this function.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *key, crypto_spi_ctx_template_t tmpl,
+ crypto_context_t *ctxp, crypto_call_req_t *crq)
+{
+ int rv;
+ crypto_ctx_t *ctx;
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
+ &real_provider, CRYPTO_FG_MAC);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+ return (CRYPTO_HOST_MEMORY);
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ crypto_mechanism_t lmech;
+
+ lmech = *mech;
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
+ rv = KCF_PROV_MAC_INIT(real_provider, ctx, &lmech, key, tmpl,
+ KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_INIT, sid, mech, key,
+ NULL, NULL, tmpl);
+ rv = kcf_submit_request(real_provider, ctx, crq, &params,
+ B_FALSE);
+ }
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
+ *ctxp = (crypto_context_t)ctx;
+ else {
+ /* Release the hold done in kcf_new_ctx(). */
+ KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
+ }
+
+ return (rv);
+}
+
+/*
+ * Same as crypto_mac_init_prov(), but relies on the KCF scheduler to
+ * choose a provider. See crypto_mac_init_prov() comments for more
+ * information.
+ */
+int
+crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
+ crypto_call_req_t *crq)
+{
+ int error;
+ kcf_mech_entry_t *me;
+ kcf_provider_desc_t *pd;
+ kcf_ctx_template_t *ctx_tmpl;
+ crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
+ kcf_prov_tried_t *list = NULL;
+
+retry:
+ /* The pd is returned held */
+ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
+ list, CRYPTO_FG_MAC, CHECK_RESTRICT(crq), 0)) == NULL) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ return (error);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ */
+
+ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
+ ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
+ if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
+ if (list != NULL)
+ kcf_free_triedlist(list);
+ KCF_PROV_REFRELE(pd);
+ return (CRYPTO_OLD_CTX_TEMPLATE);
+ } else {
+ spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ (pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) {
+ /*
+ * The hardware provider has limited HMAC support.
+ * So, we fallback early here to using a software provider.
+ *
+ * XXX - need to enhance to do the fallback later in
+ * crypto_mac_update() if the size of accumulated input data
+ * exceeds the maximum size digestable by hardware provider.
+ */
+ error = CRYPTO_BUFFER_TOO_BIG;
+ } else {
+ error = crypto_mac_init_prov(pd, pd->pd_sid, mech, key,
+ spi_ctx_tmpl, ctxp, crq);
+ }
+ if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
+ IS_RECOVERABLE(error)) {
+ /* Add pd to the linked list of providers tried. */
+ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
+ goto retry;
+ }
+
+ if (list != NULL)
+ kcf_free_triedlist(list);
+
+ KCF_PROV_REFRELE(pd);
+ return (error);
+}
+
+/*
+ * crypto_mac_update()
+ *
+ * Arguments:
+ * context: A crypto_context_t initialized by mac_init().
+ * data: The message part to be MAC'ed
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs a
+ * part of a MAC operation.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_mac_update(crypto_context_t context, crypto_data_t *data,
+ crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ kcf_req_params_t params;
+ int rv;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ rv = KCF_PROV_MAC_UPDATE(pd, ctx, data, NULL);
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_UPDATE,
+ ctx->cc_session, NULL, NULL, data, NULL, NULL);
+ rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ return (rv);
+}
+
+/*
+ * crypto_mac_final()
+ *
+ * Arguments:
+ * context: A crypto_context_t initialized by mac_init().
+ * mac: Storage for the message authentication code.
+ * cr: crypto_call_req_t calling conditions and call back info.
+ *
+ * Description:
+ * Asynchronously submits a request for, or synchronously performs a
+ * part of a message authentication operation.
+ *
+ * Context:
+ * Process or interrupt, according to the semantics dictated by the 'cr'.
+ *
+ * Returns:
+ * See comment in the beginning of the file.
+ */
+int
+crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
+ crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ kcf_req_params_t params;
+ int rv;
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ rv = KCF_PROV_MAC_FINAL(pd, ctx, mac, NULL);
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_FINAL,
+ ctx->cc_session, NULL, NULL, NULL, mac, NULL);
+ rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ /* Release the hold done in kcf_new_ctx() during init step. */
+ KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx);
+ return (rv);
+}
+
+/*
+ * See comments for crypto_mac_update() and crypto_mac_final().
+ */
+int
+crypto_mac_single(crypto_context_t context, crypto_data_t *data,
+ crypto_data_t *mac, crypto_call_req_t *cr)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)context;
+ kcf_context_t *kcf_ctx;
+ kcf_provider_desc_t *pd;
+ int error;
+ kcf_req_params_t params;
+
+
+ if ((ctx == NULL) ||
+ ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
+ ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
+ return (CRYPTO_INVALID_CONTEXT);
+ }
+
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ error = KCF_PROV_MAC(pd, ctx, data, mac, NULL);
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_SINGLE, pd->pd_sid,
+ NULL, NULL, data, mac, NULL);
+ error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ }
+
+ /* Release the hold done in kcf_new_ctx() during init step. */
+ KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
+ return (error);
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(crypto_mac_prov);
+EXPORT_SYMBOL(crypto_mac);
+EXPORT_SYMBOL(crypto_mac_verify_prov);
+EXPORT_SYMBOL(crypto_mac_verify);
+EXPORT_SYMBOL(crypto_mac_init_prov);
+EXPORT_SYMBOL(crypto_mac_init);
+EXPORT_SYMBOL(crypto_mac_update);
+EXPORT_SYMBOL(crypto_mac_final);
+EXPORT_SYMBOL(crypto_mac_single);
+#endif
diff --git a/module/icp/api/kcf_miscapi.c b/module/icp/api/kcf_miscapi.c
new file mode 100644
index 000000000..09d50f7be
--- /dev/null
+++ b/module/icp/api/kcf_miscapi.c
@@ -0,0 +1,127 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/sched_impl.h>
+
+/*
+ * All event subscribers are put on a list. kcf_notify_list_lock
+ * protects changes to this list.
+ *
+ * The following locking order is maintained in the code - The
+ * global kcf_notify_list_lock followed by the individual lock
+ * in a kcf_ntfy_elem structure (kn_lock).
+ */
+kmutex_t ntfy_list_lock;
+kcondvar_t ntfy_list_cv; /* cv the service thread waits on */
+static kcf_ntfy_elem_t *ntfy_list_head;
+
+/*
+ * crypto_mech2id()
+ *
+ * Arguments:
+ * . mechname: A null-terminated string identifying the mechanism name.
+ *
+ * Description:
+ * Walks the mechanisms tables, looking for an entry that matches the
+ * mechname. Once it find it, it builds the 64-bit mech_type and returns
+ * it. If there are no hardware or software providers for the mechanism,
+ * but there is an unloaded software provider, this routine will attempt
+ * to load it.
+ *
+ * Context:
+ * Process and interruption.
+ *
+ * Returns:
+ * The unique mechanism identified by 'mechname', if found.
+ * CRYPTO_MECH_INVALID otherwise.
+ */
+crypto_mech_type_t
+crypto_mech2id(char *mechname)
+{
+ return (crypto_mech2id_common(mechname, B_TRUE));
+}
+
+/*
+ * We walk the notification list and do the callbacks.
+ */
+void
+kcf_walk_ntfylist(uint32_t event, void *event_arg)
+{
+ kcf_ntfy_elem_t *nep;
+ int nelem = 0;
+
+ mutex_enter(&ntfy_list_lock);
+
+ /*
+ * Count how many clients are on the notification list. We need
+ * this count to ensure that clients which joined the list after we
+ * have started this walk, are not wrongly notified.
+ */
+ for (nep = ntfy_list_head; nep != NULL; nep = nep->kn_next)
+ nelem++;
+
+ for (nep = ntfy_list_head; (nep != NULL && nelem); nep = nep->kn_next) {
+ nelem--;
+
+ /*
+ * Check if this client is interested in the
+ * event.
+ */
+ if (!(nep->kn_event_mask & event))
+ continue;
+
+ mutex_enter(&nep->kn_lock);
+ nep->kn_state = NTFY_RUNNING;
+ mutex_exit(&nep->kn_lock);
+ mutex_exit(&ntfy_list_lock);
+
+ /*
+ * We invoke the callback routine with no locks held. Another
+ * client could have joined the list meanwhile. This is fine
+ * as we maintain nelem as stated above. The NULL check in the
+ * for loop guards against shrinkage. Also, any callers of
+ * crypto_unnotify_events() at this point cv_wait till kn_state
+ * changes to NTFY_WAITING. Hence, nep is assured to be valid.
+ */
+ (*nep->kn_func)(event, event_arg);
+
+ mutex_enter(&nep->kn_lock);
+ nep->kn_state = NTFY_WAITING;
+ cv_broadcast(&nep->kn_cv);
+ mutex_exit(&nep->kn_lock);
+
+ mutex_enter(&ntfy_list_lock);
+ }
+
+ mutex_exit(&ntfy_list_lock);
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(crypto_mech2id);
+#endif
diff --git a/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman
new file mode 100644
index 000000000..48fea7bb3
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman
@@ -0,0 +1,23 @@
+ ---------------------------------------------------------------------------
+ Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software is allowed (with or without
+ changes) provided that:
+
+ 1. source code distributions include the above copyright notice, this
+ list of conditions and the following disclaimer;
+
+ 2. binary distributions include the above copyright notice, this list
+ of conditions and the following disclaimer in their documentation;
+
+ 3. the name of the copyright holder is not used to endorse products
+ built using this software without specific written permission.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
diff --git a/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip
new file mode 100644
index 000000000..5f822cf27
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip
@@ -0,0 +1 @@
+PORTIONS OF AES FUNCTIONALITY
diff --git a/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl
new file mode 100644
index 000000000..a2c4adcbe
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl
@@ -0,0 +1,127 @@
+
+ LICENSE ISSUES
+ ==============
+
+ The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
+ the OpenSSL License and the original SSLeay license apply to the toolkit.
+ See below for the actual license texts. Actually both licenses are BSD-style
+ Open Source licenses. In case of any license issues related to OpenSSL
+ please contact [email protected].
+
+ OpenSSL License
+ ---------------
+
+/* ====================================================================
+ * Copyright (c) 1998-2008 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * ([email protected]). This product includes software written by Tim
+ * Hudson ([email protected]).
+ *
+ */
+
+ Original SSLeay License
+ -----------------------
+
+/* Copyright (C) 1995-1998 Eric Young ([email protected])
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young ([email protected]).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson ([email protected]).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young ([email protected])"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson ([email protected])"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
diff --git a/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip
new file mode 100644
index 000000000..5f822cf27
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip
@@ -0,0 +1 @@
+PORTIONS OF AES FUNCTIONALITY
diff --git a/module/icp/asm-x86_64/aes/aes_amd64.S b/module/icp/asm-x86_64/aes/aes_amd64.S
new file mode 100644
index 000000000..fb6444119
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/aes_amd64.S
@@ -0,0 +1,900 @@
+/*
+ * ---------------------------------------------------------------------------
+ * Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software is allowed (with or without
+ * changes) provided that:
+ *
+ * 1. source code distributions include the above copyright notice, this
+ * list of conditions and the following disclaimer;
+ *
+ * 2. binary distributions include the above copyright notice, this list
+ * of conditions and the following disclaimer in their documentation;
+ *
+ * 3. the name of the copyright holder is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ * Issue 20/12/2007
+ *
+ * I am grateful to Dag Arne Osvik for many discussions of the techniques that
+ * can be used to optimise AES assembler code on AMD64/EM64T architectures.
+ * Some of the techniques used in this implementation are the result of
+ * suggestions made by him for which I am most grateful.
+ *
+ * An AES implementation for AMD64 processors using the YASM assembler. This
+ * implementation provides only encryption, decryption and hence requires key
+ * scheduling support in C. It uses 8k bytes of tables but its encryption and
+ * decryption performance is very close to that obtained using large tables.
+ * It can use either MS Windows or Gnu/Linux/OpenSolaris OS calling conventions,
+ * which are as follows:
+ * ms windows gnu/linux/opensolaris os
+ *
+ * in_blk rcx rdi
+ * out_blk rdx rsi
+ * context (cx) r8 rdx
+ *
+ * preserved rsi - + rbx, rbp, rsp, r12, r13, r14 & r15
+ * registers rdi - on both
+ *
+ * destroyed - rsi + rax, rcx, rdx, r8, r9, r10 & r11
+ * registers - rdi on both
+ *
+ * The convention used here is that for gnu/linux/opensolaris os.
+ *
+ * This code provides the standard AES block size (128 bits, 16 bytes) and the
+ * three standard AES key sizes (128, 192 and 256 bits). It has the same call
+ * interface as my C implementation. It uses the Microsoft C AMD64 calling
+ * conventions in which the three parameters are placed in rcx, rdx and r8
+ * respectively. The rbx, rsi, rdi, rbp and r12..r15 registers are preserved.
+ *
+ * OpenSolaris Note:
+ * Modified to use GNU/Linux/Solaris calling conventions.
+ * That is parameters are placed in rdi, rsi, rdx, and rcx, respectively.
+ *
+ * AES_RETURN aes_encrypt(const unsigned char in_blk[],
+ * unsigned char out_blk[], const aes_encrypt_ctx cx[1])/
+ *
+ * AES_RETURN aes_decrypt(const unsigned char in_blk[],
+ * unsigned char out_blk[], const aes_decrypt_ctx cx[1])/
+ *
+ * AES_RETURN aes_encrypt_key<NNN>(const unsigned char key[],
+ * const aes_encrypt_ctx cx[1])/
+ *
+ * AES_RETURN aes_decrypt_key<NNN>(const unsigned char key[],
+ * const aes_decrypt_ctx cx[1])/
+ *
+ * AES_RETURN aes_encrypt_key(const unsigned char key[],
+ * unsigned int len, const aes_decrypt_ctx cx[1])/
+ *
+ * AES_RETURN aes_decrypt_key(const unsigned char key[],
+ * unsigned int len, const aes_decrypt_ctx cx[1])/
+ *
+ * where <NNN> is 128, 102 or 256. In the last two calls the length can be in
+ * either bits or bytes.
+ *
+ * Comment in/out the following lines to obtain the desired subroutines. These
+ * selections MUST match those in the C header file aesopt.h
+ */
+#define AES_REV_DKS /* define if key decryption schedule is reversed */
+
+#define LAST_ROUND_TABLES /* define for the faster version using extra tables */
+
+/*
+ * The encryption key schedule has the following in memory layout where N is the
+ * number of rounds (10, 12 or 14):
+ *
+ * lo: | input key (round 0) | / each round is four 32-bit words
+ * | encryption round 1 |
+ * | encryption round 2 |
+ * ....
+ * | encryption round N-1 |
+ * hi: | encryption round N |
+ *
+ * The decryption key schedule is normally set up so that it has the same
+ * layout as above by actually reversing the order of the encryption key
+ * schedule in memory (this happens when AES_REV_DKS is set):
+ *
+ * lo: | decryption round 0 | = | encryption round N |
+ * | decryption round 1 | = INV_MIX_COL[ | encryption round N-1 | ]
+ * | decryption round 2 | = INV_MIX_COL[ | encryption round N-2 | ]
+ * .... ....
+ * | decryption round N-1 | = INV_MIX_COL[ | encryption round 1 | ]
+ * hi: | decryption round N | = | input key (round 0) |
+ *
+ * with rounds except the first and last modified using inv_mix_column()
+ * But if AES_REV_DKS is NOT set the order of keys is left as it is for
+ * encryption so that it has to be accessed in reverse when used for
+ * decryption (although the inverse mix column modifications are done)
+ *
+ * lo: | decryption round 0 | = | input key (round 0) |
+ * | decryption round 1 | = INV_MIX_COL[ | encryption round 1 | ]
+ * | decryption round 2 | = INV_MIX_COL[ | encryption round 2 | ]
+ * .... ....
+ * | decryption round N-1 | = INV_MIX_COL[ | encryption round N-1 | ]
+ * hi: | decryption round N | = | encryption round N |
+ *
+ * This layout is faster when the assembler key scheduling provided here
+ * is used.
+ *
+ * End of user defines
+ */
+
+/*
+ * ---------------------------------------------------------------------------
+ * OpenSolaris OS modifications
+ *
+ * This source originates from Brian Gladman file aes_amd64.asm
+ * in http://fp.gladman.plus.com/AES/aes-src-04-03-08.zip
+ * with these changes:
+ *
+ * 1. Removed MS Windows-specific code within DLL_EXPORT, _SEH_, and
+ * !__GNUC__ ifdefs. Also removed ENCRYPTION, DECRYPTION,
+ * AES_128, AES_192, AES_256, AES_VAR ifdefs.
+ *
+ * 2. Translate yasm/nasm %define and .macro definitions to cpp(1) #define
+ *
+ * 3. Translate yasm/nasm %ifdef/%ifndef to cpp(1) #ifdef
+ *
+ * 4. Translate Intel/yasm/nasm syntax to ATT/OpenSolaris as(1) syntax
+ * (operands reversed, literals prefixed with "$", registers prefixed with "%",
+ * and "[register+offset]", addressing changed to "offset(register)",
+ * parenthesis in constant expressions "()" changed to square brackets "[]",
+ * "." removed from local (numeric) labels, and other changes.
+ * Examples:
+ * Intel/yasm/nasm Syntax ATT/OpenSolaris Syntax
+ * mov rax,(4*20h) mov $[4*0x20],%rax
+ * mov rax,[ebx+20h] mov 0x20(%ebx),%rax
+ * lea rax,[ebx+ecx] lea (%ebx,%ecx),%rax
+ * sub rax,[ebx+ecx*4-20h] sub -0x20(%ebx,%ecx,4),%rax
+ *
+ * 5. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
+ * /usr/include/sys/asm_linkage.h, lint(1B) guards, and dummy C function
+ * definitions for lint.
+ *
+ * 6. Renamed functions and reordered parameters to match OpenSolaris:
+ * Original Gladman interface:
+ * int aes_encrypt(const unsigned char *in,
+ * unsigned char *out, const aes_encrypt_ctx cx[1])/
+ * int aes_decrypt(const unsigned char *in,
+ * unsigned char *out, const aes_encrypt_ctx cx[1])/
+ * Note: aes_encrypt_ctx contains ks, a 60 element array of uint32_t,
+ * and a union type, inf., containing inf.l, a uint32_t and
+ * inf.b, a 4-element array of uint32_t. Only b[0] in the array (aka "l") is
+ * used and contains the key schedule length * 16 where key schedule length is
+ * 10, 12, or 14 bytes.
+ *
+ * OpenSolaris OS interface:
+ * void aes_encrypt_amd64(const aes_ks_t *ks, int Nr,
+ * const uint32_t pt[4], uint32_t ct[4])/
+ * void aes_decrypt_amd64(const aes_ks_t *ks, int Nr,
+ * const uint32_t pt[4], uint32_t ct[4])/
+ * typedef union {uint64_t ks64[(MAX_AES_NR + 1) * 4]/
+ * uint32_t ks32[(MAX_AES_NR + 1) * 4]/ } aes_ks_t/
+ * Note: ks is the AES key schedule, Nr is number of rounds, pt is plain text,
+ * ct is crypto text, and MAX_AES_NR is 14.
+ * For the x86 64-bit architecture, OpenSolaris OS uses ks32 instead of ks64.
+ */
+
+#if defined(lint) || defined(__lint)
+
+#include <sys/types.h>
+/* ARGSUSED */
+void
+aes_encrypt_amd64(const uint32_t rk[], int Nr, const uint32_t pt[4],
+ uint32_t ct[4]) {
+}
+/* ARGSUSED */
+void
+aes_decrypt_amd64(const uint32_t rk[], int Nr, const uint32_t ct[4],
+ uint32_t pt[4]) {
+}
+
+
+#else
+
+#define _ASM
+#include <sys/asm_linkage.h>
+
+#define KS_LENGTH 60
+
+#define raxd eax
+#define rdxd edx
+#define rcxd ecx
+#define rbxd ebx
+#define rsid esi
+#define rdid edi
+
+#define raxb al
+#define rdxb dl
+#define rcxb cl
+#define rbxb bl
+#define rsib sil
+#define rdib dil
+
+// finite field multiplies by {02}, {04} and {08}
+
+#define f2(x) [[x<<1]^[[[x>>7]&1]*0x11b]]
+#define f4(x) [[x<<2]^[[[x>>6]&1]*0x11b]^[[[x>>6]&2]*0x11b]]
+#define f8(x) [[x<<3]^[[[x>>5]&1]*0x11b]^[[[x>>5]&2]*0x11b]^[[[x>>5]&4]*0x11b]]
+
+// finite field multiplies required in table generation
+
+#define f3(x) [[f2(x)] ^ [x]]
+#define f9(x) [[f8(x)] ^ [x]]
+#define fb(x) [[f8(x)] ^ [f2(x)] ^ [x]]
+#define fd(x) [[f8(x)] ^ [f4(x)] ^ [x]]
+#define fe(x) [[f8(x)] ^ [f4(x)] ^ [f2(x)]]
+
+// macros for expanding S-box data
+
+#define u8(x) [f2(x)], [x], [x], [f3(x)], [f2(x)], [x], [x], [f3(x)]
+#define v8(x) [fe(x)], [f9(x)], [fd(x)], [fb(x)], [fe(x)], [f9(x)], [fd(x)], [x]
+#define w8(x) [x], 0, 0, 0, [x], 0, 0, 0
+
+#define enc_vals(x) \
+ .byte x(0x63),x(0x7c),x(0x77),x(0x7b),x(0xf2),x(0x6b),x(0x6f),x(0xc5); \
+ .byte x(0x30),x(0x01),x(0x67),x(0x2b),x(0xfe),x(0xd7),x(0xab),x(0x76); \
+ .byte x(0xca),x(0x82),x(0xc9),x(0x7d),x(0xfa),x(0x59),x(0x47),x(0xf0); \
+ .byte x(0xad),x(0xd4),x(0xa2),x(0xaf),x(0x9c),x(0xa4),x(0x72),x(0xc0); \
+ .byte x(0xb7),x(0xfd),x(0x93),x(0x26),x(0x36),x(0x3f),x(0xf7),x(0xcc); \
+ .byte x(0x34),x(0xa5),x(0xe5),x(0xf1),x(0x71),x(0xd8),x(0x31),x(0x15); \
+ .byte x(0x04),x(0xc7),x(0x23),x(0xc3),x(0x18),x(0x96),x(0x05),x(0x9a); \
+ .byte x(0x07),x(0x12),x(0x80),x(0xe2),x(0xeb),x(0x27),x(0xb2),x(0x75); \
+ .byte x(0x09),x(0x83),x(0x2c),x(0x1a),x(0x1b),x(0x6e),x(0x5a),x(0xa0); \
+ .byte x(0x52),x(0x3b),x(0xd6),x(0xb3),x(0x29),x(0xe3),x(0x2f),x(0x84); \
+ .byte x(0x53),x(0xd1),x(0x00),x(0xed),x(0x20),x(0xfc),x(0xb1),x(0x5b); \
+ .byte x(0x6a),x(0xcb),x(0xbe),x(0x39),x(0x4a),x(0x4c),x(0x58),x(0xcf); \
+ .byte x(0xd0),x(0xef),x(0xaa),x(0xfb),x(0x43),x(0x4d),x(0x33),x(0x85); \
+ .byte x(0x45),x(0xf9),x(0x02),x(0x7f),x(0x50),x(0x3c),x(0x9f),x(0xa8); \
+ .byte x(0x51),x(0xa3),x(0x40),x(0x8f),x(0x92),x(0x9d),x(0x38),x(0xf5); \
+ .byte x(0xbc),x(0xb6),x(0xda),x(0x21),x(0x10),x(0xff),x(0xf3),x(0xd2); \
+ .byte x(0xcd),x(0x0c),x(0x13),x(0xec),x(0x5f),x(0x97),x(0x44),x(0x17); \
+ .byte x(0xc4),x(0xa7),x(0x7e),x(0x3d),x(0x64),x(0x5d),x(0x19),x(0x73); \
+ .byte x(0x60),x(0x81),x(0x4f),x(0xdc),x(0x22),x(0x2a),x(0x90),x(0x88); \
+ .byte x(0x46),x(0xee),x(0xb8),x(0x14),x(0xde),x(0x5e),x(0x0b),x(0xdb); \
+ .byte x(0xe0),x(0x32),x(0x3a),x(0x0a),x(0x49),x(0x06),x(0x24),x(0x5c); \
+ .byte x(0xc2),x(0xd3),x(0xac),x(0x62),x(0x91),x(0x95),x(0xe4),x(0x79); \
+ .byte x(0xe7),x(0xc8),x(0x37),x(0x6d),x(0x8d),x(0xd5),x(0x4e),x(0xa9); \
+ .byte x(0x6c),x(0x56),x(0xf4),x(0xea),x(0x65),x(0x7a),x(0xae),x(0x08); \
+ .byte x(0xba),x(0x78),x(0x25),x(0x2e),x(0x1c),x(0xa6),x(0xb4),x(0xc6); \
+ .byte x(0xe8),x(0xdd),x(0x74),x(0x1f),x(0x4b),x(0xbd),x(0x8b),x(0x8a); \
+ .byte x(0x70),x(0x3e),x(0xb5),x(0x66),x(0x48),x(0x03),x(0xf6),x(0x0e); \
+ .byte x(0x61),x(0x35),x(0x57),x(0xb9),x(0x86),x(0xc1),x(0x1d),x(0x9e); \
+ .byte x(0xe1),x(0xf8),x(0x98),x(0x11),x(0x69),x(0xd9),x(0x8e),x(0x94); \
+ .byte x(0x9b),x(0x1e),x(0x87),x(0xe9),x(0xce),x(0x55),x(0x28),x(0xdf); \
+ .byte x(0x8c),x(0xa1),x(0x89),x(0x0d),x(0xbf),x(0xe6),x(0x42),x(0x68); \
+ .byte x(0x41),x(0x99),x(0x2d),x(0x0f),x(0xb0),x(0x54),x(0xbb),x(0x16)
+
+#define dec_vals(x) \
+ .byte x(0x52),x(0x09),x(0x6a),x(0xd5),x(0x30),x(0x36),x(0xa5),x(0x38); \
+ .byte x(0xbf),x(0x40),x(0xa3),x(0x9e),x(0x81),x(0xf3),x(0xd7),x(0xfb); \
+ .byte x(0x7c),x(0xe3),x(0x39),x(0x82),x(0x9b),x(0x2f),x(0xff),x(0x87); \
+ .byte x(0x34),x(0x8e),x(0x43),x(0x44),x(0xc4),x(0xde),x(0xe9),x(0xcb); \
+ .byte x(0x54),x(0x7b),x(0x94),x(0x32),x(0xa6),x(0xc2),x(0x23),x(0x3d); \
+ .byte x(0xee),x(0x4c),x(0x95),x(0x0b),x(0x42),x(0xfa),x(0xc3),x(0x4e); \
+ .byte x(0x08),x(0x2e),x(0xa1),x(0x66),x(0x28),x(0xd9),x(0x24),x(0xb2); \
+ .byte x(0x76),x(0x5b),x(0xa2),x(0x49),x(0x6d),x(0x8b),x(0xd1),x(0x25); \
+ .byte x(0x72),x(0xf8),x(0xf6),x(0x64),x(0x86),x(0x68),x(0x98),x(0x16); \
+ .byte x(0xd4),x(0xa4),x(0x5c),x(0xcc),x(0x5d),x(0x65),x(0xb6),x(0x92); \
+ .byte x(0x6c),x(0x70),x(0x48),x(0x50),x(0xfd),x(0xed),x(0xb9),x(0xda); \
+ .byte x(0x5e),x(0x15),x(0x46),x(0x57),x(0xa7),x(0x8d),x(0x9d),x(0x84); \
+ .byte x(0x90),x(0xd8),x(0xab),x(0x00),x(0x8c),x(0xbc),x(0xd3),x(0x0a); \
+ .byte x(0xf7),x(0xe4),x(0x58),x(0x05),x(0xb8),x(0xb3),x(0x45),x(0x06); \
+ .byte x(0xd0),x(0x2c),x(0x1e),x(0x8f),x(0xca),x(0x3f),x(0x0f),x(0x02); \
+ .byte x(0xc1),x(0xaf),x(0xbd),x(0x03),x(0x01),x(0x13),x(0x8a),x(0x6b); \
+ .byte x(0x3a),x(0x91),x(0x11),x(0x41),x(0x4f),x(0x67),x(0xdc),x(0xea); \
+ .byte x(0x97),x(0xf2),x(0xcf),x(0xce),x(0xf0),x(0xb4),x(0xe6),x(0x73); \
+ .byte x(0x96),x(0xac),x(0x74),x(0x22),x(0xe7),x(0xad),x(0x35),x(0x85); \
+ .byte x(0xe2),x(0xf9),x(0x37),x(0xe8),x(0x1c),x(0x75),x(0xdf),x(0x6e); \
+ .byte x(0x47),x(0xf1),x(0x1a),x(0x71),x(0x1d),x(0x29),x(0xc5),x(0x89); \
+ .byte x(0x6f),x(0xb7),x(0x62),x(0x0e),x(0xaa),x(0x18),x(0xbe),x(0x1b); \
+ .byte x(0xfc),x(0x56),x(0x3e),x(0x4b),x(0xc6),x(0xd2),x(0x79),x(0x20); \
+ .byte x(0x9a),x(0xdb),x(0xc0),x(0xfe),x(0x78),x(0xcd),x(0x5a),x(0xf4); \
+ .byte x(0x1f),x(0xdd),x(0xa8),x(0x33),x(0x88),x(0x07),x(0xc7),x(0x31); \
+ .byte x(0xb1),x(0x12),x(0x10),x(0x59),x(0x27),x(0x80),x(0xec),x(0x5f); \
+ .byte x(0x60),x(0x51),x(0x7f),x(0xa9),x(0x19),x(0xb5),x(0x4a),x(0x0d); \
+ .byte x(0x2d),x(0xe5),x(0x7a),x(0x9f),x(0x93),x(0xc9),x(0x9c),x(0xef); \
+ .byte x(0xa0),x(0xe0),x(0x3b),x(0x4d),x(0xae),x(0x2a),x(0xf5),x(0xb0); \
+ .byte x(0xc8),x(0xeb),x(0xbb),x(0x3c),x(0x83),x(0x53),x(0x99),x(0x61); \
+ .byte x(0x17),x(0x2b),x(0x04),x(0x7e),x(0xba),x(0x77),x(0xd6),x(0x26); \
+ .byte x(0xe1),x(0x69),x(0x14),x(0x63),x(0x55),x(0x21),x(0x0c),x(0x7d)
+
+#define tptr %rbp /* table pointer */
+#define kptr %r8 /* key schedule pointer */
+#define fofs 128 /* adjust offset in key schedule to keep |disp| < 128 */
+#define fk_ref(x, y) -16*x+fofs+4*y(kptr)
+
+#ifdef AES_REV_DKS
+#define rofs 128
+#define ik_ref(x, y) -16*x+rofs+4*y(kptr)
+
+#else
+#define rofs -128
+#define ik_ref(x, y) 16*x+rofs+4*y(kptr)
+#endif /* AES_REV_DKS */
+
+#define tab_0(x) (tptr,x,8)
+#define tab_1(x) 3(tptr,x,8)
+#define tab_2(x) 2(tptr,x,8)
+#define tab_3(x) 1(tptr,x,8)
+#define tab_f(x) 1(tptr,x,8)
+#define tab_i(x) 7(tptr,x,8)
+
+#define ff_rnd(p1, p2, p3, p4, round) /* normal forward round */ \
+ mov fk_ref(round,0), p1; \
+ mov fk_ref(round,1), p2; \
+ mov fk_ref(round,2), p3; \
+ mov fk_ref(round,3), p4; \
+ \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ shr $16, %eax; \
+ xor tab_0(%rsi), p1; \
+ xor tab_1(%rdi), p4; \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ xor tab_2(%rsi), p3; \
+ xor tab_3(%rdi), p2; \
+ \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ shr $16, %ebx; \
+ xor tab_0(%rsi), p2; \
+ xor tab_1(%rdi), p1; \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ xor tab_2(%rsi), p4; \
+ xor tab_3(%rdi), p3; \
+ \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ shr $16, %ecx; \
+ xor tab_0(%rsi), p3; \
+ xor tab_1(%rdi), p2; \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ xor tab_2(%rsi), p1; \
+ xor tab_3(%rdi), p4; \
+ \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ shr $16, %edx; \
+ xor tab_0(%rsi), p4; \
+ xor tab_1(%rdi), p3; \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ xor tab_2(%rsi), p2; \
+ xor tab_3(%rdi), p1; \
+ \
+ mov p1, %eax; \
+ mov p2, %ebx; \
+ mov p3, %ecx; \
+ mov p4, %edx
+
+#ifdef LAST_ROUND_TABLES
+
+#define fl_rnd(p1, p2, p3, p4, round) /* last forward round */ \
+ add $2048, tptr; \
+ mov fk_ref(round,0), p1; \
+ mov fk_ref(round,1), p2; \
+ mov fk_ref(round,2), p3; \
+ mov fk_ref(round,3), p4; \
+ \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ shr $16, %eax; \
+ xor tab_0(%rsi), p1; \
+ xor tab_1(%rdi), p4; \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ xor tab_2(%rsi), p3; \
+ xor tab_3(%rdi), p2; \
+ \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ shr $16, %ebx; \
+ xor tab_0(%rsi), p2; \
+ xor tab_1(%rdi), p1; \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ xor tab_2(%rsi), p4; \
+ xor tab_3(%rdi), p3; \
+ \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ shr $16, %ecx; \
+ xor tab_0(%rsi), p3; \
+ xor tab_1(%rdi), p2; \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ xor tab_2(%rsi), p1; \
+ xor tab_3(%rdi), p4; \
+ \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ shr $16, %edx; \
+ xor tab_0(%rsi), p4; \
+ xor tab_1(%rdi), p3; \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ xor tab_2(%rsi), p2; \
+ xor tab_3(%rdi), p1
+
+#else
+
+#define fl_rnd(p1, p2, p3, p4, round) /* last forward round */ \
+ mov fk_ref(round,0), p1; \
+ mov fk_ref(round,1), p2; \
+ mov fk_ref(round,2), p3; \
+ mov fk_ref(round,3), p4; \
+ \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ shr $16, %eax; \
+ movzx tab_f(%rsi), %esi; \
+ movzx tab_f(%rdi), %edi; \
+ xor %esi, p1; \
+ rol $8, %edi; \
+ xor %edi, p4; \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ movzx tab_f(%rsi), %esi; \
+ movzx tab_f(%rdi), %edi; \
+ rol $16, %esi; \
+ rol $24, %edi; \
+ xor %esi, p3; \
+ xor %edi, p2; \
+ \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ shr $16, %ebx; \
+ movzx tab_f(%rsi), %esi; \
+ movzx tab_f(%rdi), %edi; \
+ xor %esi, p2; \
+ rol $8, %edi; \
+ xor %edi, p1; \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ movzx tab_f(%rsi), %esi; \
+ movzx tab_f(%rdi), %edi; \
+ rol $16, %esi; \
+ rol $24, %edi; \
+ xor %esi, p4; \
+ xor %edi, p3; \
+ \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ movzx tab_f(%rsi), %esi; \
+ movzx tab_f(%rdi), %edi; \
+ shr $16, %ecx; \
+ xor %esi, p3; \
+ rol $8, %edi; \
+ xor %edi, p2; \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ movzx tab_f(%rsi), %esi; \
+ movzx tab_f(%rdi), %edi; \
+ rol $16, %esi; \
+ rol $24, %edi; \
+ xor %esi, p1; \
+ xor %edi, p4; \
+ \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ movzx tab_f(%rsi), %esi; \
+ movzx tab_f(%rdi), %edi; \
+ shr $16, %edx; \
+ xor %esi, p4; \
+ rol $8, %edi; \
+ xor %edi, p3; \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ movzx tab_f(%rsi), %esi; \
+ movzx tab_f(%rdi), %edi; \
+ rol $16, %esi; \
+ rol $24, %edi; \
+ xor %esi, p2; \
+ xor %edi, p1
+
+#endif /* LAST_ROUND_TABLES */
+
+#define ii_rnd(p1, p2, p3, p4, round) /* normal inverse round */ \
+ mov ik_ref(round,0), p1; \
+ mov ik_ref(round,1), p2; \
+ mov ik_ref(round,2), p3; \
+ mov ik_ref(round,3), p4; \
+ \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ shr $16, %eax; \
+ xor tab_0(%rsi), p1; \
+ xor tab_1(%rdi), p2; \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ xor tab_2(%rsi), p3; \
+ xor tab_3(%rdi), p4; \
+ \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ shr $16, %ebx; \
+ xor tab_0(%rsi), p2; \
+ xor tab_1(%rdi), p3; \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ xor tab_2(%rsi), p4; \
+ xor tab_3(%rdi), p1; \
+ \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ shr $16, %ecx; \
+ xor tab_0(%rsi), p3; \
+ xor tab_1(%rdi), p4; \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ xor tab_2(%rsi), p1; \
+ xor tab_3(%rdi), p2; \
+ \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ shr $16, %edx; \
+ xor tab_0(%rsi), p4; \
+ xor tab_1(%rdi), p1; \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ xor tab_2(%rsi), p2; \
+ xor tab_3(%rdi), p3; \
+ \
+ mov p1, %eax; \
+ mov p2, %ebx; \
+ mov p3, %ecx; \
+ mov p4, %edx
+
+#ifdef LAST_ROUND_TABLES
+
+#define il_rnd(p1, p2, p3, p4, round) /* last inverse round */ \
+ add $2048, tptr; \
+ mov ik_ref(round,0), p1; \
+ mov ik_ref(round,1), p2; \
+ mov ik_ref(round,2), p3; \
+ mov ik_ref(round,3), p4; \
+ \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ shr $16, %eax; \
+ xor tab_0(%rsi), p1; \
+ xor tab_1(%rdi), p2; \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ xor tab_2(%rsi), p3; \
+ xor tab_3(%rdi), p4; \
+ \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ shr $16, %ebx; \
+ xor tab_0(%rsi), p2; \
+ xor tab_1(%rdi), p3; \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ xor tab_2(%rsi), p4; \
+ xor tab_3(%rdi), p1; \
+ \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ shr $16, %ecx; \
+ xor tab_0(%rsi), p3; \
+ xor tab_1(%rdi), p4; \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ xor tab_2(%rsi), p1; \
+ xor tab_3(%rdi), p2; \
+ \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ shr $16, %edx; \
+ xor tab_0(%rsi), p4; \
+ xor tab_1(%rdi), p1; \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ xor tab_2(%rsi), p2; \
+ xor tab_3(%rdi), p3
+
+#else
+
+#define il_rnd(p1, p2, p3, p4, round) /* last inverse round */ \
+ mov ik_ref(round,0), p1; \
+ mov ik_ref(round,1), p2; \
+ mov ik_ref(round,2), p3; \
+ mov ik_ref(round,3), p4; \
+ \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ movzx tab_i(%rsi), %esi; \
+ movzx tab_i(%rdi), %edi; \
+ shr $16, %eax; \
+ xor %esi, p1; \
+ rol $8, %edi; \
+ xor %edi, p2; \
+ movzx %al, %esi; \
+ movzx %ah, %edi; \
+ movzx tab_i(%rsi), %esi; \
+ movzx tab_i(%rdi), %edi; \
+ rol $16, %esi; \
+ rol $24, %edi; \
+ xor %esi, p3; \
+ xor %edi, p4; \
+ \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ movzx tab_i(%rsi), %esi; \
+ movzx tab_i(%rdi), %edi; \
+ shr $16, %ebx; \
+ xor %esi, p2; \
+ rol $8, %edi; \
+ xor %edi, p3; \
+ movzx %bl, %esi; \
+ movzx %bh, %edi; \
+ movzx tab_i(%rsi), %esi; \
+ movzx tab_i(%rdi), %edi; \
+ rol $16, %esi; \
+ rol $24, %edi; \
+ xor %esi, p4; \
+ xor %edi, p1; \
+ \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ movzx tab_i(%rsi), %esi; \
+ movzx tab_i(%rdi), %edi; \
+ shr $16, %ecx; \
+ xor %esi, p3; \
+ rol $8, %edi; \
+ xor %edi, p4; \
+ movzx %cl, %esi; \
+ movzx %ch, %edi; \
+ movzx tab_i(%rsi), %esi; \
+ movzx tab_i(%rdi), %edi; \
+ rol $16, %esi; \
+ rol $24, %edi; \
+ xor %esi, p1; \
+ xor %edi, p2; \
+ \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ movzx tab_i(%rsi), %esi; \
+ movzx tab_i(%rdi), %edi; \
+ shr $16, %edx; \
+ xor %esi, p4; \
+ rol $8, %edi; \
+ xor %edi, p1; \
+ movzx %dl, %esi; \
+ movzx %dh, %edi; \
+ movzx tab_i(%rsi), %esi; \
+ movzx tab_i(%rdi), %edi; \
+ rol $16, %esi; \
+ rol $24, %edi; \
+ xor %esi, p2; \
+ xor %edi, p3
+
+#endif /* LAST_ROUND_TABLES */
+
+/*
+ * OpenSolaris OS:
+ * void aes_encrypt_amd64(const aes_ks_t *ks, int Nr,
+ * const uint32_t pt[4], uint32_t ct[4])/
+ *
+ * Original interface:
+ * int aes_encrypt(const unsigned char *in,
+ * unsigned char *out, const aes_encrypt_ctx cx[1])/
+ */
+ .align 64
+enc_tab:
+ enc_vals(u8)
+#ifdef LAST_ROUND_TABLES
+ // Last Round Tables:
+ enc_vals(w8)
+#endif
+
+
+ ENTRY_NP(aes_encrypt_amd64)
+#ifdef GLADMAN_INTERFACE
+ // Original interface
+ sub $[4*8], %rsp // gnu/linux/opensolaris binary interface
+ mov %rsi, (%rsp) // output pointer (P2)
+ mov %rdx, %r8 // context (P3)
+
+ mov %rbx, 1*8(%rsp) // P1: input pointer in rdi
+ mov %rbp, 2*8(%rsp) // P2: output pointer in (rsp)
+ mov %r12, 3*8(%rsp) // P3: context in r8
+ movzx 4*KS_LENGTH(kptr), %esi // Get byte key length * 16
+
+#else
+ // OpenSolaris OS interface
+ sub $[4*8], %rsp // Make room on stack to save registers
+ mov %rcx, (%rsp) // Save output pointer (P4) on stack
+ mov %rdi, %r8 // context (P1)
+ mov %rdx, %rdi // P3: save input pointer
+ shl $4, %esi // P2: esi byte key length * 16
+
+ mov %rbx, 1*8(%rsp) // Save registers
+ mov %rbp, 2*8(%rsp)
+ mov %r12, 3*8(%rsp)
+ // P1: context in r8
+ // P2: byte key length * 16 in esi
+ // P3: input pointer in rdi
+ // P4: output pointer in (rsp)
+#endif /* GLADMAN_INTERFACE */
+
+ lea enc_tab(%rip), tptr
+ sub $fofs, kptr
+
+ // Load input block into registers
+ mov (%rdi), %eax
+ mov 1*4(%rdi), %ebx
+ mov 2*4(%rdi), %ecx
+ mov 3*4(%rdi), %edx
+
+ xor fofs(kptr), %eax
+ xor fofs+4(kptr), %ebx
+ xor fofs+8(kptr), %ecx
+ xor fofs+12(kptr), %edx
+
+ lea (kptr,%rsi), kptr
+ // Jump based on byte key length * 16:
+ cmp $[10*16], %esi
+ je 3f
+ cmp $[12*16], %esi
+ je 2f
+ cmp $[14*16], %esi
+ je 1f
+ mov $-1, %rax // error
+ jmp 4f
+
+ // Perform normal forward rounds
+1: ff_rnd(%r9d, %r10d, %r11d, %r12d, 13)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 12)
+2: ff_rnd(%r9d, %r10d, %r11d, %r12d, 11)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 10)
+3: ff_rnd(%r9d, %r10d, %r11d, %r12d, 9)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 8)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 7)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 6)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 5)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 4)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 3)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 2)
+ ff_rnd(%r9d, %r10d, %r11d, %r12d, 1)
+ fl_rnd(%r9d, %r10d, %r11d, %r12d, 0)
+
+ // Copy results
+ mov (%rsp), %rbx
+ mov %r9d, (%rbx)
+ mov %r10d, 4(%rbx)
+ mov %r11d, 8(%rbx)
+ mov %r12d, 12(%rbx)
+ xor %rax, %rax
+4: // Restore registers
+ mov 1*8(%rsp), %rbx
+ mov 2*8(%rsp), %rbp
+ mov 3*8(%rsp), %r12
+ add $[4*8], %rsp
+ ret
+
+ SET_SIZE(aes_encrypt_amd64)
+
+/*
+ * OpenSolaris OS:
+ * void aes_decrypt_amd64(const aes_ks_t *ks, int Nr,
+ * const uint32_t pt[4], uint32_t ct[4])/
+ *
+ * Original interface:
+ * int aes_decrypt(const unsigned char *in,
+ * unsigned char *out, const aes_encrypt_ctx cx[1])/
+ */
+ .align 64
+dec_tab:
+ dec_vals(v8)
+#ifdef LAST_ROUND_TABLES
+ // Last Round Tables:
+ dec_vals(w8)
+#endif
+
+
+ ENTRY_NP(aes_decrypt_amd64)
+#ifdef GLADMAN_INTERFACE
+ // Original interface
+ sub $[4*8], %rsp // gnu/linux/opensolaris binary interface
+ mov %rsi, (%rsp) // output pointer (P2)
+ mov %rdx, %r8 // context (P3)
+
+ mov %rbx, 1*8(%rsp) // P1: input pointer in rdi
+ mov %rbp, 2*8(%rsp) // P2: output pointer in (rsp)
+ mov %r12, 3*8(%rsp) // P3: context in r8
+ movzx 4*KS_LENGTH(kptr), %esi // Get byte key length * 16
+
+#else
+ // OpenSolaris OS interface
+ sub $[4*8], %rsp // Make room on stack to save registers
+ mov %rcx, (%rsp) // Save output pointer (P4) on stack
+ mov %rdi, %r8 // context (P1)
+ mov %rdx, %rdi // P3: save input pointer
+ shl $4, %esi // P2: esi byte key length * 16
+
+ mov %rbx, 1*8(%rsp) // Save registers
+ mov %rbp, 2*8(%rsp)
+ mov %r12, 3*8(%rsp)
+ // P1: context in r8
+ // P2: byte key length * 16 in esi
+ // P3: input pointer in rdi
+ // P4: output pointer in (rsp)
+#endif /* GLADMAN_INTERFACE */
+
+ lea dec_tab(%rip), tptr
+ sub $rofs, kptr
+
+ // Load input block into registers
+ mov (%rdi), %eax
+ mov 1*4(%rdi), %ebx
+ mov 2*4(%rdi), %ecx
+ mov 3*4(%rdi), %edx
+
+#ifdef AES_REV_DKS
+ mov kptr, %rdi
+ lea (kptr,%rsi), kptr
+#else
+ lea (kptr,%rsi), %rdi
+#endif
+
+ xor rofs(%rdi), %eax
+ xor rofs+4(%rdi), %ebx
+ xor rofs+8(%rdi), %ecx
+ xor rofs+12(%rdi), %edx
+
+ // Jump based on byte key length * 16:
+ cmp $[10*16], %esi
+ je 3f
+ cmp $[12*16], %esi
+ je 2f
+ cmp $[14*16], %esi
+ je 1f
+ mov $-1, %rax // error
+ jmp 4f
+
+ // Perform normal inverse rounds
+1: ii_rnd(%r9d, %r10d, %r11d, %r12d, 13)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 12)
+2: ii_rnd(%r9d, %r10d, %r11d, %r12d, 11)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 10)
+3: ii_rnd(%r9d, %r10d, %r11d, %r12d, 9)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 8)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 7)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 6)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 5)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 4)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 3)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 2)
+ ii_rnd(%r9d, %r10d, %r11d, %r12d, 1)
+ il_rnd(%r9d, %r10d, %r11d, %r12d, 0)
+
+ // Copy results
+ mov (%rsp), %rbx
+ mov %r9d, (%rbx)
+ mov %r10d, 4(%rbx)
+ mov %r11d, 8(%rbx)
+ mov %r12d, 12(%rbx)
+ xor %rax, %rax
+4: // Restore registers
+ mov 1*8(%rsp), %rbx
+ mov 2*8(%rsp), %rbp
+ mov 3*8(%rsp), %r12
+ add $[4*8], %rsp
+ ret
+
+ SET_SIZE(aes_decrypt_amd64)
+#endif /* lint || __lint */
diff --git a/module/icp/asm-x86_64/aes/aes_intel.S b/module/icp/asm-x86_64/aes/aes_intel.S
new file mode 100644
index 000000000..0b4700f96
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/aes_intel.S
@@ -0,0 +1,851 @@
+/*
+ * ====================================================================
+ * Written by Intel Corporation for the OpenSSL project to add support
+ * for Intel AES-NI instructions. Rights for redistribution and usage
+ * in source and binary forms are granted according to the OpenSSL
+ * license.
+ *
+ * Author: Huang Ying <ying.huang at intel dot com>
+ * Vinodh Gopal <vinodh.gopal at intel dot com>
+ * Kahraman Akdemir
+ *
+ * Intel AES-NI is a new set of Single Instruction Multiple Data (SIMD)
+ * instructions that are going to be introduced in the next generation
+ * of Intel processor, as of 2009. These instructions enable fast and
+ * secure data encryption and decryption, using the Advanced Encryption
+ * Standard (AES), defined by FIPS Publication number 197. The
+ * architecture introduces six instructions that offer full hardware
+ * support for AES. Four of them support high performance data
+ * encryption and decryption, and the other two instructions support
+ * the AES key expansion procedure.
+ * ====================================================================
+ */
+
+/*
+ * ====================================================================
+ * Copyright (c) 1998-2008 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ */
+
+/*
+ * ====================================================================
+ * OpenSolaris OS modifications
+ *
+ * This source originates as files aes-intel.S and eng_aesni_asm.pl, in
+ * patches sent sent Dec. 9, 2008 and Dec. 24, 2008, respectively, by
+ * Huang Ying of Intel to the openssl-dev mailing list under the subject
+ * of "Add support to Intel AES-NI instruction set for x86_64 platform".
+ *
+ * This OpenSolaris version has these major changes from the original source:
+ *
+ * 1. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
+ * /usr/include/sys/asm_linkage.h, lint(1B) guards, and dummy C function
+ * definitions for lint.
+ *
+ * 2. Formatted code, added comments, and added #includes and #defines.
+ *
+ * 3. If bit CR0.TS is set, clear and set the TS bit, after and before
+ * calling kpreempt_disable() and kpreempt_enable().
+ * If the TS bit is not set, Save and restore %xmm registers at the beginning
+ * and end of function calls (%xmm* registers are not saved and restored by
+ * during kernel thread preemption).
+ *
+ * 4. Renamed functions, reordered parameters, and changed return value
+ * to match OpenSolaris:
+ *
+ * OpenSSL interface:
+ * int intel_AES_set_encrypt_key(const unsigned char *userKey,
+ * const int bits, AES_KEY *key);
+ * int intel_AES_set_decrypt_key(const unsigned char *userKey,
+ * const int bits, AES_KEY *key);
+ * Return values for above are non-zero on error, 0 on success.
+ *
+ * void intel_AES_encrypt(const unsigned char *in, unsigned char *out,
+ * const AES_KEY *key);
+ * void intel_AES_decrypt(const unsigned char *in, unsigned char *out,
+ * const AES_KEY *key);
+ * typedef struct aes_key_st {
+ * unsigned int rd_key[4 *(AES_MAXNR + 1)];
+ * int rounds;
+ * unsigned int pad[3];
+ * } AES_KEY;
+ * Note: AES_LONG is undefined (that is, Intel uses 32-bit key schedules
+ * (ks32) instead of 64-bit (ks64).
+ * Number of rounds (aka round count) is at offset 240 of AES_KEY.
+ *
+ * OpenSolaris OS interface (#ifdefs removed for readability):
+ * int rijndael_key_setup_dec_intel(uint32_t rk[],
+ * const uint32_t cipherKey[], uint64_t keyBits);
+ * int rijndael_key_setup_enc_intel(uint32_t rk[],
+ * const uint32_t cipherKey[], uint64_t keyBits);
+ * Return values for above are 0 on error, number of rounds on success.
+ *
+ * void aes_encrypt_intel(const aes_ks_t *ks, int Nr,
+ * const uint32_t pt[4], uint32_t ct[4]);
+ * void aes_decrypt_intel(const aes_ks_t *ks, int Nr,
+ * const uint32_t pt[4], uint32_t ct[4]);
+ * typedef union {uint64_t ks64[(MAX_AES_NR + 1) * 4];
+ * uint32_t ks32[(MAX_AES_NR + 1) * 4]; } aes_ks_t;
+ *
+ * typedef union {
+ * uint32_t ks32[((MAX_AES_NR) + 1) * (MAX_AES_NB)];
+ * } aes_ks_t;
+ * typedef struct aes_key {
+ * aes_ks_t encr_ks, decr_ks;
+ * long double align128;
+ * int flags, nr, type;
+ * } aes_key_t;
+ *
+ * Note: ks is the AES key schedule, Nr is number of rounds, pt is plain text,
+ * ct is crypto text, and MAX_AES_NR is 14.
+ * For the x86 64-bit architecture, OpenSolaris OS uses ks32 instead of ks64.
+ *
+ * Note2: aes_ks_t must be aligned on a 0 mod 128 byte boundary.
+ *
+ * ====================================================================
+ */
+
+#if defined(lint) || defined(__lint)
+
+#include <sys/types.h>
+
+/* ARGSUSED */
+void
+aes_encrypt_intel(const uint32_t rk[], int Nr, const uint32_t pt[4],
+ uint32_t ct[4]) {
+}
+/* ARGSUSED */
+void
+aes_decrypt_intel(const uint32_t rk[], int Nr, const uint32_t ct[4],
+ uint32_t pt[4]) {
+}
+/* ARGSUSED */
+int
+rijndael_key_setup_enc_intel(uint32_t rk[], const uint32_t cipherKey[],
+ uint64_t keyBits) {
+ return (0);
+}
+/* ARGSUSED */
+int
+rijndael_key_setup_dec_intel(uint32_t rk[], const uint32_t cipherKey[],
+ uint64_t keyBits) {
+ return (0);
+}
+
+
+#else /* lint */
+
+#define _ASM
+#include <sys/asm_linkage.h>
+
+#ifdef _KERNEL
+ /*
+ * Note: the CLTS macro clobbers P2 (%rsi) under i86xpv. That is,
+ * it calls HYPERVISOR_fpu_taskswitch() which modifies %rsi when it
+ * uses it to pass P2 to syscall.
+ * This also occurs with the STTS macro, but we dont care if
+ * P2 (%rsi) is modified just before function exit.
+ * The CLTS and STTS macros push and pop P1 (%rdi) already.
+ */
+#ifdef __xpv
+#define PROTECTED_CLTS \
+ push %rsi; \
+ CLTS; \
+ pop %rsi
+#else
+#define PROTECTED_CLTS \
+ CLTS
+#endif /* __xpv */
+
+#define CLEAR_TS_OR_PUSH_XMM0_XMM1(tmpreg) \
+ push %rbp; \
+ mov %rsp, %rbp; \
+ movq %cr0, tmpreg; \
+ testq $CR0_TS, tmpreg; \
+ jnz 1f; \
+ and $-XMM_ALIGN, %rsp; \
+ sub $[XMM_SIZE * 2], %rsp; \
+ movaps %xmm0, 16(%rsp); \
+ movaps %xmm1, (%rsp); \
+ jmp 2f; \
+1: \
+ PROTECTED_CLTS; \
+2:
+
+ /*
+ * If CR0_TS was not set above, pop %xmm0 and %xmm1 off stack,
+ * otherwise set CR0_TS.
+ */
+#define SET_TS_OR_POP_XMM0_XMM1(tmpreg) \
+ testq $CR0_TS, tmpreg; \
+ jnz 1f; \
+ movaps (%rsp), %xmm1; \
+ movaps 16(%rsp), %xmm0; \
+ jmp 2f; \
+1: \
+ STTS(tmpreg); \
+2: \
+ mov %rbp, %rsp; \
+ pop %rbp
+
+ /*
+ * If CR0_TS is not set, align stack (with push %rbp) and push
+ * %xmm0 - %xmm6 on stack, otherwise clear CR0_TS
+ */
+#define CLEAR_TS_OR_PUSH_XMM0_TO_XMM6(tmpreg) \
+ push %rbp; \
+ mov %rsp, %rbp; \
+ movq %cr0, tmpreg; \
+ testq $CR0_TS, tmpreg; \
+ jnz 1f; \
+ and $-XMM_ALIGN, %rsp; \
+ sub $[XMM_SIZE * 7], %rsp; \
+ movaps %xmm0, 96(%rsp); \
+ movaps %xmm1, 80(%rsp); \
+ movaps %xmm2, 64(%rsp); \
+ movaps %xmm3, 48(%rsp); \
+ movaps %xmm4, 32(%rsp); \
+ movaps %xmm5, 16(%rsp); \
+ movaps %xmm6, (%rsp); \
+ jmp 2f; \
+1: \
+ PROTECTED_CLTS; \
+2:
+
+
+ /*
+ * If CR0_TS was not set above, pop %xmm0 - %xmm6 off stack,
+ * otherwise set CR0_TS.
+ */
+#define SET_TS_OR_POP_XMM0_TO_XMM6(tmpreg) \
+ testq $CR0_TS, tmpreg; \
+ jnz 1f; \
+ movaps (%rsp), %xmm6; \
+ movaps 16(%rsp), %xmm5; \
+ movaps 32(%rsp), %xmm4; \
+ movaps 48(%rsp), %xmm3; \
+ movaps 64(%rsp), %xmm2; \
+ movaps 80(%rsp), %xmm1; \
+ movaps 96(%rsp), %xmm0; \
+ jmp 2f; \
+1: \
+ STTS(tmpreg); \
+2: \
+ mov %rbp, %rsp; \
+ pop %rbp
+
+
+#else
+#define PROTECTED_CLTS
+#define CLEAR_TS_OR_PUSH_XMM0_XMM1(tmpreg)
+#define SET_TS_OR_POP_XMM0_XMM1(tmpreg)
+#define CLEAR_TS_OR_PUSH_XMM0_TO_XMM6(tmpreg)
+#define SET_TS_OR_POP_XMM0_TO_XMM6(tmpreg)
+#endif /* _KERNEL */
+
+
+/*
+ * _key_expansion_128(), * _key_expansion_192a(), _key_expansion_192b(),
+ * _key_expansion_256a(), _key_expansion_256b()
+ *
+ * Helper functions called by rijndael_key_setup_inc_intel().
+ * Also used indirectly by rijndael_key_setup_dec_intel().
+ *
+ * Input:
+ * %xmm0 User-provided cipher key
+ * %xmm1 Round constant
+ * Output:
+ * (%rcx) AES key
+ */
+
+.align 16
+_key_expansion_128:
+_key_expansion_256a:
+ pshufd $0b11111111, %xmm1, %xmm1
+ shufps $0b00010000, %xmm0, %xmm4
+ pxor %xmm4, %xmm0
+ shufps $0b10001100, %xmm0, %xmm4
+ pxor %xmm4, %xmm0
+ pxor %xmm1, %xmm0
+ movaps %xmm0, (%rcx)
+ add $0x10, %rcx
+ ret
+ SET_SIZE(_key_expansion_128)
+ SET_SIZE(_key_expansion_256a)
+
+.align 16
+_key_expansion_192a:
+ pshufd $0b01010101, %xmm1, %xmm1
+ shufps $0b00010000, %xmm0, %xmm4
+ pxor %xmm4, %xmm0
+ shufps $0b10001100, %xmm0, %xmm4
+ pxor %xmm4, %xmm0
+ pxor %xmm1, %xmm0
+
+ movaps %xmm2, %xmm5
+ movaps %xmm2, %xmm6
+ pslldq $4, %xmm5
+ pshufd $0b11111111, %xmm0, %xmm3
+ pxor %xmm3, %xmm2
+ pxor %xmm5, %xmm2
+
+ movaps %xmm0, %xmm1
+ shufps $0b01000100, %xmm0, %xmm6
+ movaps %xmm6, (%rcx)
+ shufps $0b01001110, %xmm2, %xmm1
+ movaps %xmm1, 0x10(%rcx)
+ add $0x20, %rcx
+ ret
+ SET_SIZE(_key_expansion_192a)
+
+.align 16
+_key_expansion_192b:
+ pshufd $0b01010101, %xmm1, %xmm1
+ shufps $0b00010000, %xmm0, %xmm4
+ pxor %xmm4, %xmm0
+ shufps $0b10001100, %xmm0, %xmm4
+ pxor %xmm4, %xmm0
+ pxor %xmm1, %xmm0
+
+ movaps %xmm2, %xmm5
+ pslldq $4, %xmm5
+ pshufd $0b11111111, %xmm0, %xmm3
+ pxor %xmm3, %xmm2
+ pxor %xmm5, %xmm2
+
+ movaps %xmm0, (%rcx)
+ add $0x10, %rcx
+ ret
+ SET_SIZE(_key_expansion_192b)
+
+.align 16
+_key_expansion_256b:
+ pshufd $0b10101010, %xmm1, %xmm1
+ shufps $0b00010000, %xmm2, %xmm4
+ pxor %xmm4, %xmm2
+ shufps $0b10001100, %xmm2, %xmm4
+ pxor %xmm4, %xmm2
+ pxor %xmm1, %xmm2
+ movaps %xmm2, (%rcx)
+ add $0x10, %rcx
+ ret
+ SET_SIZE(_key_expansion_256b)
+
+
+/*
+ * rijndael_key_setup_enc_intel()
+ * Expand the cipher key into the encryption key schedule.
+ *
+ * For kernel code, caller is responsible for ensuring kpreempt_disable()
+ * has been called. This is because %xmm registers are not saved/restored.
+ * Clear and set the CR0.TS bit on entry and exit, respectively, if TS is set
+ * on entry. Otherwise, if TS is not set, save and restore %xmm registers
+ * on the stack.
+ *
+ * OpenSolaris interface:
+ * int rijndael_key_setup_enc_intel(uint32_t rk[], const uint32_t cipherKey[],
+ * uint64_t keyBits);
+ * Return value is 0 on error, number of rounds on success.
+ *
+ * Original Intel OpenSSL interface:
+ * int intel_AES_set_encrypt_key(const unsigned char *userKey,
+ * const int bits, AES_KEY *key);
+ * Return value is non-zero on error, 0 on success.
+ */
+
+#ifdef OPENSSL_INTERFACE
+#define rijndael_key_setup_enc_intel intel_AES_set_encrypt_key
+#define rijndael_key_setup_dec_intel intel_AES_set_decrypt_key
+
+#define USERCIPHERKEY rdi /* P1, 64 bits */
+#define KEYSIZE32 esi /* P2, 32 bits */
+#define KEYSIZE64 rsi /* P2, 64 bits */
+#define AESKEY rdx /* P3, 64 bits */
+
+#else /* OpenSolaris Interface */
+#define AESKEY rdi /* P1, 64 bits */
+#define USERCIPHERKEY rsi /* P2, 64 bits */
+#define KEYSIZE32 edx /* P3, 32 bits */
+#define KEYSIZE64 rdx /* P3, 64 bits */
+#endif /* OPENSSL_INTERFACE */
+
+#define ROUNDS32 KEYSIZE32 /* temp */
+#define ROUNDS64 KEYSIZE64 /* temp */
+#define ENDAESKEY USERCIPHERKEY /* temp */
+
+ENTRY_NP(rijndael_key_setup_enc_intel)
+rijndael_key_setup_enc_intel_local:
+ CLEAR_TS_OR_PUSH_XMM0_TO_XMM6(%r10)
+
+ // NULL pointer sanity check
+ test %USERCIPHERKEY, %USERCIPHERKEY
+ jz .Lenc_key_invalid_param
+ test %AESKEY, %AESKEY
+ jz .Lenc_key_invalid_param
+
+ movups (%USERCIPHERKEY), %xmm0 // user key (first 16 bytes)
+ movaps %xmm0, (%AESKEY)
+ lea 0x10(%AESKEY), %rcx // key addr
+ pxor %xmm4, %xmm4 // xmm4 is assumed 0 in _key_expansion_x
+
+ cmp $256, %KEYSIZE32
+ jnz .Lenc_key192
+
+ // AES 256: 14 rounds in encryption key schedule
+#ifdef OPENSSL_INTERFACE
+ mov $14, %ROUNDS32
+ movl %ROUNDS32, 240(%AESKEY) // key.rounds = 14
+#endif /* OPENSSL_INTERFACE */
+
+ movups 0x10(%USERCIPHERKEY), %xmm2 // other user key (2nd 16 bytes)
+ movaps %xmm2, (%rcx)
+ add $0x10, %rcx
+
+ aeskeygenassist $0x1, %xmm2, %xmm1 // expand the key
+ call _key_expansion_256a
+ aeskeygenassist $0x1, %xmm0, %xmm1
+ call _key_expansion_256b
+ aeskeygenassist $0x2, %xmm2, %xmm1 // expand the key
+ call _key_expansion_256a
+ aeskeygenassist $0x2, %xmm0, %xmm1
+ call _key_expansion_256b
+ aeskeygenassist $0x4, %xmm2, %xmm1 // expand the key
+ call _key_expansion_256a
+ aeskeygenassist $0x4, %xmm0, %xmm1
+ call _key_expansion_256b
+ aeskeygenassist $0x8, %xmm2, %xmm1 // expand the key
+ call _key_expansion_256a
+ aeskeygenassist $0x8, %xmm0, %xmm1
+ call _key_expansion_256b
+ aeskeygenassist $0x10, %xmm2, %xmm1 // expand the key
+ call _key_expansion_256a
+ aeskeygenassist $0x10, %xmm0, %xmm1
+ call _key_expansion_256b
+ aeskeygenassist $0x20, %xmm2, %xmm1 // expand the key
+ call _key_expansion_256a
+ aeskeygenassist $0x20, %xmm0, %xmm1
+ call _key_expansion_256b
+ aeskeygenassist $0x40, %xmm2, %xmm1 // expand the key
+ call _key_expansion_256a
+
+ SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
+#ifdef OPENSSL_INTERFACE
+ xor %rax, %rax // return 0 (OK)
+#else /* Open Solaris Interface */
+ mov $14, %rax // return # rounds = 14
+#endif
+ ret
+
+.align 4
+.Lenc_key192:
+ cmp $192, %KEYSIZE32
+ jnz .Lenc_key128
+
+ // AES 192: 12 rounds in encryption key schedule
+#ifdef OPENSSL_INTERFACE
+ mov $12, %ROUNDS32
+ movl %ROUNDS32, 240(%AESKEY) // key.rounds = 12
+#endif /* OPENSSL_INTERFACE */
+
+ movq 0x10(%USERCIPHERKEY), %xmm2 // other user key
+ aeskeygenassist $0x1, %xmm2, %xmm1 // expand the key
+ call _key_expansion_192a
+ aeskeygenassist $0x2, %xmm2, %xmm1 // expand the key
+ call _key_expansion_192b
+ aeskeygenassist $0x4, %xmm2, %xmm1 // expand the key
+ call _key_expansion_192a
+ aeskeygenassist $0x8, %xmm2, %xmm1 // expand the key
+ call _key_expansion_192b
+ aeskeygenassist $0x10, %xmm2, %xmm1 // expand the key
+ call _key_expansion_192a
+ aeskeygenassist $0x20, %xmm2, %xmm1 // expand the key
+ call _key_expansion_192b
+ aeskeygenassist $0x40, %xmm2, %xmm1 // expand the key
+ call _key_expansion_192a
+ aeskeygenassist $0x80, %xmm2, %xmm1 // expand the key
+ call _key_expansion_192b
+
+ SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
+#ifdef OPENSSL_INTERFACE
+ xor %rax, %rax // return 0 (OK)
+#else /* OpenSolaris Interface */
+ mov $12, %rax // return # rounds = 12
+#endif
+ ret
+
+.align 4
+.Lenc_key128:
+ cmp $128, %KEYSIZE32
+ jnz .Lenc_key_invalid_key_bits
+
+ // AES 128: 10 rounds in encryption key schedule
+#ifdef OPENSSL_INTERFACE
+ mov $10, %ROUNDS32
+ movl %ROUNDS32, 240(%AESKEY) // key.rounds = 10
+#endif /* OPENSSL_INTERFACE */
+
+ aeskeygenassist $0x1, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+ aeskeygenassist $0x2, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+ aeskeygenassist $0x4, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+ aeskeygenassist $0x8, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+ aeskeygenassist $0x10, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+ aeskeygenassist $0x20, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+ aeskeygenassist $0x40, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+ aeskeygenassist $0x80, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+ aeskeygenassist $0x1b, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+ aeskeygenassist $0x36, %xmm0, %xmm1 // expand the key
+ call _key_expansion_128
+
+ SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
+#ifdef OPENSSL_INTERFACE
+ xor %rax, %rax // return 0 (OK)
+#else /* OpenSolaris Interface */
+ mov $10, %rax // return # rounds = 10
+#endif
+ ret
+
+.Lenc_key_invalid_param:
+#ifdef OPENSSL_INTERFACE
+ SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
+ mov $-1, %rax // user key or AES key pointer is NULL
+ ret
+#else
+ /* FALLTHROUGH */
+#endif /* OPENSSL_INTERFACE */
+
+.Lenc_key_invalid_key_bits:
+ SET_TS_OR_POP_XMM0_TO_XMM6(%r10)
+#ifdef OPENSSL_INTERFACE
+ mov $-2, %rax // keysize is invalid
+#else /* Open Solaris Interface */
+ xor %rax, %rax // a key pointer is NULL or invalid keysize
+#endif /* OPENSSL_INTERFACE */
+
+ ret
+ SET_SIZE(rijndael_key_setup_enc_intel)
+
+
+/*
+ * rijndael_key_setup_dec_intel()
+ * Expand the cipher key into the decryption key schedule.
+ *
+ * For kernel code, caller is responsible for ensuring kpreempt_disable()
+ * has been called. This is because %xmm registers are not saved/restored.
+ * Clear and set the CR0.TS bit on entry and exit, respectively, if TS is set
+ * on entry. Otherwise, if TS is not set, save and restore %xmm registers
+ * on the stack.
+ *
+ * OpenSolaris interface:
+ * int rijndael_key_setup_dec_intel(uint32_t rk[], const uint32_t cipherKey[],
+ * uint64_t keyBits);
+ * Return value is 0 on error, number of rounds on success.
+ * P1->P2, P2->P3, P3->P1
+ *
+ * Original Intel OpenSSL interface:
+ * int intel_AES_set_decrypt_key(const unsigned char *userKey,
+ * const int bits, AES_KEY *key);
+ * Return value is non-zero on error, 0 on success.
+ */
+ENTRY_NP(rijndael_key_setup_dec_intel)
+ // Generate round keys used for encryption
+ call rijndael_key_setup_enc_intel_local
+ test %rax, %rax
+#ifdef OPENSSL_INTERFACE
+ jnz .Ldec_key_exit // Failed if returned non-0
+#else /* OpenSolaris Interface */
+ jz .Ldec_key_exit // Failed if returned 0
+#endif /* OPENSSL_INTERFACE */
+
+ CLEAR_TS_OR_PUSH_XMM0_XMM1(%r10)
+
+ /*
+ * Convert round keys used for encryption
+ * to a form usable for decryption
+ */
+#ifndef OPENSSL_INTERFACE /* OpenSolaris Interface */
+ mov %rax, %ROUNDS64 // set # rounds (10, 12, or 14)
+ // (already set for OpenSSL)
+#endif
+
+ lea 0x10(%AESKEY), %rcx // key addr
+ shl $4, %ROUNDS32
+ add %AESKEY, %ROUNDS64
+ mov %ROUNDS64, %ENDAESKEY
+
+.align 4
+.Ldec_key_reorder_loop:
+ movaps (%AESKEY), %xmm0
+ movaps (%ROUNDS64), %xmm1
+ movaps %xmm0, (%ROUNDS64)
+ movaps %xmm1, (%AESKEY)
+ lea 0x10(%AESKEY), %AESKEY
+ lea -0x10(%ROUNDS64), %ROUNDS64
+ cmp %AESKEY, %ROUNDS64
+ ja .Ldec_key_reorder_loop
+
+.align 4
+.Ldec_key_inv_loop:
+ movaps (%rcx), %xmm0
+ // Convert an encryption round key to a form usable for decryption
+ // with the "AES Inverse Mix Columns" instruction
+ aesimc %xmm0, %xmm1
+ movaps %xmm1, (%rcx)
+ lea 0x10(%rcx), %rcx
+ cmp %ENDAESKEY, %rcx
+ jnz .Ldec_key_inv_loop
+
+ SET_TS_OR_POP_XMM0_XMM1(%r10)
+
+.Ldec_key_exit:
+ // OpenSolaris: rax = # rounds (10, 12, or 14) or 0 for error
+ // OpenSSL: rax = 0 for OK, or non-zero for error
+ ret
+ SET_SIZE(rijndael_key_setup_dec_intel)
+
+
+/*
+ * aes_encrypt_intel()
+ * Encrypt a single block (in and out can overlap).
+ *
+ * For kernel code, caller is responsible for ensuring kpreempt_disable()
+ * has been called. This is because %xmm registers are not saved/restored.
+ * Clear and set the CR0.TS bit on entry and exit, respectively, if TS is set
+ * on entry. Otherwise, if TS is not set, save and restore %xmm registers
+ * on the stack.
+ *
+ * Temporary register usage:
+ * %xmm0 State
+ * %xmm1 Key
+ *
+ * Original OpenSolaris Interface:
+ * void aes_encrypt_intel(const aes_ks_t *ks, int Nr,
+ * const uint32_t pt[4], uint32_t ct[4])
+ *
+ * Original Intel OpenSSL Interface:
+ * void intel_AES_encrypt(const unsigned char *in, unsigned char *out,
+ * const AES_KEY *key)
+ */
+
+#ifdef OPENSSL_INTERFACE
+#define aes_encrypt_intel intel_AES_encrypt
+#define aes_decrypt_intel intel_AES_decrypt
+
+#define INP rdi /* P1, 64 bits */
+#define OUTP rsi /* P2, 64 bits */
+#define KEYP rdx /* P3, 64 bits */
+
+/* No NROUNDS parameter--offset 240 from KEYP saved in %ecx: */
+#define NROUNDS32 ecx /* temporary, 32 bits */
+#define NROUNDS cl /* temporary, 8 bits */
+
+#else /* OpenSolaris Interface */
+#define KEYP rdi /* P1, 64 bits */
+#define NROUNDS esi /* P2, 32 bits */
+#define INP rdx /* P3, 64 bits */
+#define OUTP rcx /* P4, 64 bits */
+#endif /* OPENSSL_INTERFACE */
+
+#define STATE xmm0 /* temporary, 128 bits */
+#define KEY xmm1 /* temporary, 128 bits */
+
+ENTRY_NP(aes_encrypt_intel)
+ CLEAR_TS_OR_PUSH_XMM0_XMM1(%r10)
+
+ movups (%INP), %STATE // input
+ movaps (%KEYP), %KEY // key
+#ifdef OPENSSL_INTERFACE
+ mov 240(%KEYP), %NROUNDS32 // round count
+#else /* OpenSolaris Interface */
+ /* Round count is already present as P2 in %rsi/%esi */
+#endif /* OPENSSL_INTERFACE */
+
+ pxor %KEY, %STATE // round 0
+ lea 0x30(%KEYP), %KEYP
+ cmp $12, %NROUNDS
+ jb .Lenc128
+ lea 0x20(%KEYP), %KEYP
+ je .Lenc192
+
+ // AES 256
+ lea 0x20(%KEYP), %KEYP
+ movaps -0x60(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps -0x50(%KEYP), %KEY
+ aesenc %KEY, %STATE
+
+.align 4
+.Lenc192:
+ // AES 192 and 256
+ movaps -0x40(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps -0x30(%KEYP), %KEY
+ aesenc %KEY, %STATE
+
+.align 4
+.Lenc128:
+ // AES 128, 192, and 256
+ movaps -0x20(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps -0x10(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps (%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps 0x10(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps 0x20(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps 0x30(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps 0x40(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps 0x50(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps 0x60(%KEYP), %KEY
+ aesenc %KEY, %STATE
+ movaps 0x70(%KEYP), %KEY
+ aesenclast %KEY, %STATE // last round
+ movups %STATE, (%OUTP) // output
+
+ SET_TS_OR_POP_XMM0_XMM1(%r10)
+ ret
+ SET_SIZE(aes_encrypt_intel)
+
+
+/*
+ * aes_decrypt_intel()
+ * Decrypt a single block (in and out can overlap).
+ *
+ * For kernel code, caller is responsible for ensuring kpreempt_disable()
+ * has been called. This is because %xmm registers are not saved/restored.
+ * Clear and set the CR0.TS bit on entry and exit, respectively, if TS is set
+ * on entry. Otherwise, if TS is not set, save and restore %xmm registers
+ * on the stack.
+ *
+ * Temporary register usage:
+ * %xmm0 State
+ * %xmm1 Key
+ *
+ * Original OpenSolaris Interface:
+ * void aes_decrypt_intel(const aes_ks_t *ks, int Nr,
+ * const uint32_t pt[4], uint32_t ct[4])/
+ *
+ * Original Intel OpenSSL Interface:
+ * void intel_AES_decrypt(const unsigned char *in, unsigned char *out,
+ * const AES_KEY *key);
+ */
+ENTRY_NP(aes_decrypt_intel)
+ CLEAR_TS_OR_PUSH_XMM0_XMM1(%r10)
+
+ movups (%INP), %STATE // input
+ movaps (%KEYP), %KEY // key
+#ifdef OPENSSL_INTERFACE
+ mov 240(%KEYP), %NROUNDS32 // round count
+#else /* OpenSolaris Interface */
+ /* Round count is already present as P2 in %rsi/%esi */
+#endif /* OPENSSL_INTERFACE */
+
+ pxor %KEY, %STATE // round 0
+ lea 0x30(%KEYP), %KEYP
+ cmp $12, %NROUNDS
+ jb .Ldec128
+ lea 0x20(%KEYP), %KEYP
+ je .Ldec192
+
+ // AES 256
+ lea 0x20(%KEYP), %KEYP
+ movaps -0x60(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps -0x50(%KEYP), %KEY
+ aesdec %KEY, %STATE
+
+.align 4
+.Ldec192:
+ // AES 192 and 256
+ movaps -0x40(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps -0x30(%KEYP), %KEY
+ aesdec %KEY, %STATE
+
+.align 4
+.Ldec128:
+ // AES 128, 192, and 256
+ movaps -0x20(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps -0x10(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps (%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps 0x10(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps 0x20(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps 0x30(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps 0x40(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps 0x50(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps 0x60(%KEYP), %KEY
+ aesdec %KEY, %STATE
+ movaps 0x70(%KEYP), %KEY
+ aesdeclast %KEY, %STATE // last round
+ movups %STATE, (%OUTP) // output
+
+ SET_TS_OR_POP_XMM0_XMM1(%r10)
+ ret
+ SET_SIZE(aes_decrypt_intel)
+
+#endif /* lint || __lint */
diff --git a/module/icp/asm-x86_64/aes/aeskey.c b/module/icp/asm-x86_64/aes/aeskey.c
new file mode 100644
index 000000000..96767fbea
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/aeskey.c
@@ -0,0 +1,580 @@
+/*
+ * ---------------------------------------------------------------------------
+ * Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software is allowed (with or without
+ * changes) provided that:
+ *
+ * 1. source code distributions include the above copyright notice, this
+ * list of conditions and the following disclaimer;
+ *
+ * 2. binary distributions include the above copyright notice, this list
+ * of conditions and the following disclaimer in their documentation;
+ *
+ * 3. the name of the copyright holder is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ * Issue Date: 20/12/2007
+ */
+
+#include <aes/aes_impl.h>
+#include "aesopt.h"
+#include "aestab.h"
+#include "aestab2.h"
+
+/*
+ * Initialise the key schedule from the user supplied key. The key
+ * length can be specified in bytes, with legal values of 16, 24
+ * and 32, or in bits, with legal values of 128, 192 and 256. These
+ * values correspond with Nk values of 4, 6 and 8 respectively.
+ *
+ * The following macros implement a single cycle in the key
+ * schedule generation process. The number of cycles needed
+ * for each cx->n_col and nk value is:
+ *
+ * nk = 4 5 6 7 8
+ * ------------------------------
+ * cx->n_col = 4 10 9 8 7 7
+ * cx->n_col = 5 14 11 10 9 9
+ * cx->n_col = 6 19 15 12 11 11
+ * cx->n_col = 7 21 19 16 13 14
+ * cx->n_col = 8 29 23 19 17 14
+ */
+
+/*
+ * OpenSolaris changes
+ * 1. Added header files aes_impl.h and aestab2.h
+ * 2. Changed uint_8t and uint_32t to uint8_t and uint32_t
+ * 3. Remove code under ifdef USE_VIA_ACE_IF_PRESENT (always undefined)
+ * 4. Removed always-defined ifdefs FUNCS_IN_C, ENC_KEYING_IN_C,
+ * AES_128, AES_192, AES_256, AES_VAR defines
+ * 5. Changed aes_encrypt_key* aes_decrypt_key* functions to "static void"
+ * 6. Changed N_COLS to MAX_AES_NB
+ * 7. Replaced functions aes_encrypt_key and aes_decrypt_key with
+ * OpenSolaris-compatible functions rijndael_key_setup_enc_amd64 and
+ * rijndael_key_setup_dec_amd64
+ * 8. cstyled code and removed lint warnings
+ */
+
+#if defined(REDUCE_CODE_SIZE)
+#define ls_box ls_sub
+ uint32_t ls_sub(const uint32_t t, const uint32_t n);
+#define inv_mcol im_sub
+ uint32_t im_sub(const uint32_t x);
+#ifdef ENC_KS_UNROLL
+#undef ENC_KS_UNROLL
+#endif
+#ifdef DEC_KS_UNROLL
+#undef DEC_KS_UNROLL
+#endif
+#endif /* REDUCE_CODE_SIZE */
+
+
+#define ke4(k, i) \
+{ k[4 * (i) + 4] = ss[0] ^= ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
+ k[4 * (i) + 5] = ss[1] ^= ss[0]; \
+ k[4 * (i) + 6] = ss[2] ^= ss[1]; \
+ k[4 * (i) + 7] = ss[3] ^= ss[2]; \
+}
+
+static void
+aes_encrypt_key128(const unsigned char *key, uint32_t rk[])
+{
+ uint32_t ss[4];
+
+ rk[0] = ss[0] = word_in(key, 0);
+ rk[1] = ss[1] = word_in(key, 1);
+ rk[2] = ss[2] = word_in(key, 2);
+ rk[3] = ss[3] = word_in(key, 3);
+
+#ifdef ENC_KS_UNROLL
+ ke4(rk, 0); ke4(rk, 1);
+ ke4(rk, 2); ke4(rk, 3);
+ ke4(rk, 4); ke4(rk, 5);
+ ke4(rk, 6); ke4(rk, 7);
+ ke4(rk, 8);
+#else
+ {
+ uint32_t i;
+ for (i = 0; i < 9; ++i)
+ ke4(rk, i);
+ }
+#endif /* ENC_KS_UNROLL */
+ ke4(rk, 9);
+}
+
+
+#define kef6(k, i) \
+{ k[6 * (i) + 6] = ss[0] ^= ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
+ k[6 * (i) + 7] = ss[1] ^= ss[0]; \
+ k[6 * (i) + 8] = ss[2] ^= ss[1]; \
+ k[6 * (i) + 9] = ss[3] ^= ss[2]; \
+}
+
+#define ke6(k, i) \
+{ kef6(k, i); \
+ k[6 * (i) + 10] = ss[4] ^= ss[3]; \
+ k[6 * (i) + 11] = ss[5] ^= ss[4]; \
+}
+
+static void
+aes_encrypt_key192(const unsigned char *key, uint32_t rk[])
+{
+ uint32_t ss[6];
+
+ rk[0] = ss[0] = word_in(key, 0);
+ rk[1] = ss[1] = word_in(key, 1);
+ rk[2] = ss[2] = word_in(key, 2);
+ rk[3] = ss[3] = word_in(key, 3);
+ rk[4] = ss[4] = word_in(key, 4);
+ rk[5] = ss[5] = word_in(key, 5);
+
+#ifdef ENC_KS_UNROLL
+ ke6(rk, 0); ke6(rk, 1);
+ ke6(rk, 2); ke6(rk, 3);
+ ke6(rk, 4); ke6(rk, 5);
+ ke6(rk, 6);
+#else
+ {
+ uint32_t i;
+ for (i = 0; i < 7; ++i)
+ ke6(rk, i);
+ }
+#endif /* ENC_KS_UNROLL */
+ kef6(rk, 7);
+}
+
+
+
+#define kef8(k, i) \
+{ k[8 * (i) + 8] = ss[0] ^= ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
+ k[8 * (i) + 9] = ss[1] ^= ss[0]; \
+ k[8 * (i) + 10] = ss[2] ^= ss[1]; \
+ k[8 * (i) + 11] = ss[3] ^= ss[2]; \
+}
+
+#define ke8(k, i) \
+{ kef8(k, i); \
+ k[8 * (i) + 12] = ss[4] ^= ls_box(ss[3], 0); \
+ k[8 * (i) + 13] = ss[5] ^= ss[4]; \
+ k[8 * (i) + 14] = ss[6] ^= ss[5]; \
+ k[8 * (i) + 15] = ss[7] ^= ss[6]; \
+}
+
+static void
+aes_encrypt_key256(const unsigned char *key, uint32_t rk[])
+{
+ uint32_t ss[8];
+
+ rk[0] = ss[0] = word_in(key, 0);
+ rk[1] = ss[1] = word_in(key, 1);
+ rk[2] = ss[2] = word_in(key, 2);
+ rk[3] = ss[3] = word_in(key, 3);
+ rk[4] = ss[4] = word_in(key, 4);
+ rk[5] = ss[5] = word_in(key, 5);
+ rk[6] = ss[6] = word_in(key, 6);
+ rk[7] = ss[7] = word_in(key, 7);
+
+#ifdef ENC_KS_UNROLL
+ ke8(rk, 0); ke8(rk, 1);
+ ke8(rk, 2); ke8(rk, 3);
+ ke8(rk, 4); ke8(rk, 5);
+#else
+ {
+ uint32_t i;
+ for (i = 0; i < 6; ++i)
+ ke8(rk, i);
+ }
+#endif /* ENC_KS_UNROLL */
+ kef8(rk, 6);
+}
+
+
+/*
+ * Expand the cipher key into the encryption key schedule.
+ *
+ * Return the number of rounds for the given cipher key size.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4 * (Nr + 1).
+ *
+ * Parameters:
+ * rk AES key schedule 32-bit array to be initialized
+ * cipherKey User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+int
+rijndael_key_setup_enc_amd64(uint32_t rk[], const uint32_t cipherKey[],
+ int keyBits)
+{
+ switch (keyBits) {
+ case 128:
+ aes_encrypt_key128((unsigned char *)&cipherKey[0], rk);
+ return (10);
+ case 192:
+ aes_encrypt_key192((unsigned char *)&cipherKey[0], rk);
+ return (12);
+ case 256:
+ aes_encrypt_key256((unsigned char *)&cipherKey[0], rk);
+ return (14);
+ default: /* should never get here */
+ break;
+ }
+
+ return (0);
+}
+
+
+/* this is used to store the decryption round keys */
+/* in forward or reverse order */
+
+#ifdef AES_REV_DKS
+#define v(n, i) ((n) - (i) + 2 * ((i) & 3))
+#else
+#define v(n, i) (i)
+#endif
+
+#if DEC_ROUND == NO_TABLES
+#define ff(x) (x)
+#else
+#define ff(x) inv_mcol(x)
+#if defined(dec_imvars)
+#define d_vars dec_imvars
+#endif
+#endif /* FUNCS_IN_C & DEC_KEYING_IN_C */
+
+
+#define k4e(k, i) \
+{ k[v(40, (4 * (i)) + 4)] = ss[0] ^= ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
+ k[v(40, (4 * (i)) + 5)] = ss[1] ^= ss[0]; \
+ k[v(40, (4 * (i)) + 6)] = ss[2] ^= ss[1]; \
+ k[v(40, (4 * (i)) + 7)] = ss[3] ^= ss[2]; \
+}
+
+#if 1
+
+#define kdf4(k, i) \
+{ ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; \
+ ss[1] = ss[1] ^ ss[3]; \
+ ss[2] = ss[2] ^ ss[3]; \
+ ss[4] = ls_box(ss[(i + 3) % 4], 3) ^ t_use(r, c)[i]; \
+ ss[i % 4] ^= ss[4]; \
+ ss[4] ^= k[v(40, (4 * (i)))]; k[v(40, (4 * (i)) + 4)] = ff(ss[4]); \
+ ss[4] ^= k[v(40, (4 * (i)) + 1)]; k[v(40, (4 * (i)) + 5)] = ff(ss[4]); \
+ ss[4] ^= k[v(40, (4 * (i)) + 2)]; k[v(40, (4 * (i)) + 6)] = ff(ss[4]); \
+ ss[4] ^= k[v(40, (4 * (i)) + 3)]; k[v(40, (4 * (i)) + 7)] = ff(ss[4]); \
+}
+
+#define kd4(k, i) \
+{ ss[4] = ls_box(ss[(i + 3) % 4], 3) ^ t_use(r, c)[i]; \
+ ss[i % 4] ^= ss[4]; ss[4] = ff(ss[4]); \
+ k[v(40, (4 * (i)) + 4)] = ss[4] ^= k[v(40, (4 * (i)))]; \
+ k[v(40, (4 * (i)) + 5)] = ss[4] ^= k[v(40, (4 * (i)) + 1)]; \
+ k[v(40, (4 * (i)) + 6)] = ss[4] ^= k[v(40, (4 * (i)) + 2)]; \
+ k[v(40, (4 * (i)) + 7)] = ss[4] ^= k[v(40, (4 * (i)) + 3)]; \
+}
+
+#define kdl4(k, i) \
+{ ss[4] = ls_box(ss[(i + 3) % 4], 3) ^ t_use(r, c)[i]; \
+ ss[i % 4] ^= ss[4]; \
+ k[v(40, (4 * (i)) + 4)] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; \
+ k[v(40, (4 * (i)) + 5)] = ss[1] ^ ss[3]; \
+ k[v(40, (4 * (i)) + 6)] = ss[0]; \
+ k[v(40, (4 * (i)) + 7)] = ss[1]; \
+}
+
+#else
+
+#define kdf4(k, i) \
+{ ss[0] ^= ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
+ k[v(40, (4 * (i)) + 4)] = ff(ss[0]); \
+ ss[1] ^= ss[0]; k[v(40, (4 * (i)) + 5)] = ff(ss[1]); \
+ ss[2] ^= ss[1]; k[v(40, (4 * (i)) + 6)] = ff(ss[2]); \
+ ss[3] ^= ss[2]; k[v(40, (4 * (i)) + 7)] = ff(ss[3]); \
+}
+
+#define kd4(k, i) \
+{ ss[4] = ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
+ ss[0] ^= ss[4]; \
+ ss[4] = ff(ss[4]); \
+ k[v(40, (4 * (i)) + 4)] = ss[4] ^= k[v(40, (4 * (i)))]; \
+ ss[1] ^= ss[0]; \
+ k[v(40, (4 * (i)) + 5)] = ss[4] ^= k[v(40, (4 * (i)) + 1)]; \
+ ss[2] ^= ss[1]; \
+ k[v(40, (4 * (i)) + 6)] = ss[4] ^= k[v(40, (4 * (i)) + 2)]; \
+ ss[3] ^= ss[2]; \
+ k[v(40, (4 * (i)) + 7)] = ss[4] ^= k[v(40, (4 * (i)) + 3)]; \
+}
+
+#define kdl4(k, i) \
+{ ss[0] ^= ls_box(ss[3], 3) ^ t_use(r, c)[i]; \
+ k[v(40, (4 * (i)) + 4)] = ss[0]; \
+ ss[1] ^= ss[0]; k[v(40, (4 * (i)) + 5)] = ss[1]; \
+ ss[2] ^= ss[1]; k[v(40, (4 * (i)) + 6)] = ss[2]; \
+ ss[3] ^= ss[2]; k[v(40, (4 * (i)) + 7)] = ss[3]; \
+}
+
+#endif
+
+static void
+aes_decrypt_key128(const unsigned char *key, uint32_t rk[])
+{
+ uint32_t ss[5];
+#if defined(d_vars)
+ d_vars;
+#endif
+ rk[v(40, (0))] = ss[0] = word_in(key, 0);
+ rk[v(40, (1))] = ss[1] = word_in(key, 1);
+ rk[v(40, (2))] = ss[2] = word_in(key, 2);
+ rk[v(40, (3))] = ss[3] = word_in(key, 3);
+
+#ifdef DEC_KS_UNROLL
+ kdf4(rk, 0); kd4(rk, 1);
+ kd4(rk, 2); kd4(rk, 3);
+ kd4(rk, 4); kd4(rk, 5);
+ kd4(rk, 6); kd4(rk, 7);
+ kd4(rk, 8); kdl4(rk, 9);
+#else
+ {
+ uint32_t i;
+ for (i = 0; i < 10; ++i)
+ k4e(rk, i);
+#if !(DEC_ROUND == NO_TABLES)
+ for (i = MAX_AES_NB; i < 10 * MAX_AES_NB; ++i)
+ rk[i] = inv_mcol(rk[i]);
+#endif
+ }
+#endif /* DEC_KS_UNROLL */
+}
+
+
+
+#define k6ef(k, i) \
+{ k[v(48, (6 * (i)) + 6)] = ss[0] ^= ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
+ k[v(48, (6 * (i)) + 7)] = ss[1] ^= ss[0]; \
+ k[v(48, (6 * (i)) + 8)] = ss[2] ^= ss[1]; \
+ k[v(48, (6 * (i)) + 9)] = ss[3] ^= ss[2]; \
+}
+
+#define k6e(k, i) \
+{ k6ef(k, i); \
+ k[v(48, (6 * (i)) + 10)] = ss[4] ^= ss[3]; \
+ k[v(48, (6 * (i)) + 11)] = ss[5] ^= ss[4]; \
+}
+
+#define kdf6(k, i) \
+{ ss[0] ^= ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
+ k[v(48, (6 * (i)) + 6)] = ff(ss[0]); \
+ ss[1] ^= ss[0]; k[v(48, (6 * (i)) + 7)] = ff(ss[1]); \
+ ss[2] ^= ss[1]; k[v(48, (6 * (i)) + 8)] = ff(ss[2]); \
+ ss[3] ^= ss[2]; k[v(48, (6 * (i)) + 9)] = ff(ss[3]); \
+ ss[4] ^= ss[3]; k[v(48, (6 * (i)) + 10)] = ff(ss[4]); \
+ ss[5] ^= ss[4]; k[v(48, (6 * (i)) + 11)] = ff(ss[5]); \
+}
+
+#define kd6(k, i) \
+{ ss[6] = ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
+ ss[0] ^= ss[6]; ss[6] = ff(ss[6]); \
+ k[v(48, (6 * (i)) + 6)] = ss[6] ^= k[v(48, (6 * (i)))]; \
+ ss[1] ^= ss[0]; \
+ k[v(48, (6 * (i)) + 7)] = ss[6] ^= k[v(48, (6 * (i)) + 1)]; \
+ ss[2] ^= ss[1]; \
+ k[v(48, (6 * (i)) + 8)] = ss[6] ^= k[v(48, (6 * (i)) + 2)]; \
+ ss[3] ^= ss[2]; \
+ k[v(48, (6 * (i)) + 9)] = ss[6] ^= k[v(48, (6 * (i)) + 3)]; \
+ ss[4] ^= ss[3]; \
+ k[v(48, (6 * (i)) + 10)] = ss[6] ^= k[v(48, (6 * (i)) + 4)]; \
+ ss[5] ^= ss[4]; \
+ k[v(48, (6 * (i)) + 11)] = ss[6] ^= k[v(48, (6 * (i)) + 5)]; \
+}
+
+#define kdl6(k, i) \
+{ ss[0] ^= ls_box(ss[5], 3) ^ t_use(r, c)[i]; \
+ k[v(48, (6 * (i)) + 6)] = ss[0]; \
+ ss[1] ^= ss[0]; k[v(48, (6 * (i)) + 7)] = ss[1]; \
+ ss[2] ^= ss[1]; k[v(48, (6 * (i)) + 8)] = ss[2]; \
+ ss[3] ^= ss[2]; k[v(48, (6 * (i)) + 9)] = ss[3]; \
+}
+
+static void
+aes_decrypt_key192(const unsigned char *key, uint32_t rk[])
+{
+ uint32_t ss[7];
+#if defined(d_vars)
+ d_vars;
+#endif
+ rk[v(48, (0))] = ss[0] = word_in(key, 0);
+ rk[v(48, (1))] = ss[1] = word_in(key, 1);
+ rk[v(48, (2))] = ss[2] = word_in(key, 2);
+ rk[v(48, (3))] = ss[3] = word_in(key, 3);
+
+#ifdef DEC_KS_UNROLL
+ ss[4] = word_in(key, 4);
+ rk[v(48, (4))] = ff(ss[4]);
+ ss[5] = word_in(key, 5);
+ rk[v(48, (5))] = ff(ss[5]);
+ kdf6(rk, 0); kd6(rk, 1);
+ kd6(rk, 2); kd6(rk, 3);
+ kd6(rk, 4); kd6(rk, 5);
+ kd6(rk, 6); kdl6(rk, 7);
+#else
+ rk[v(48, (4))] = ss[4] = word_in(key, 4);
+ rk[v(48, (5))] = ss[5] = word_in(key, 5);
+ {
+ uint32_t i;
+
+ for (i = 0; i < 7; ++i)
+ k6e(rk, i);
+ k6ef(rk, 7);
+#if !(DEC_ROUND == NO_TABLES)
+ for (i = MAX_AES_NB; i < 12 * MAX_AES_NB; ++i)
+ rk[i] = inv_mcol(rk[i]);
+#endif
+ }
+#endif
+}
+
+
+
+#define k8ef(k, i) \
+{ k[v(56, (8 * (i)) + 8)] = ss[0] ^= ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
+ k[v(56, (8 * (i)) + 9)] = ss[1] ^= ss[0]; \
+ k[v(56, (8 * (i)) + 10)] = ss[2] ^= ss[1]; \
+ k[v(56, (8 * (i)) + 11)] = ss[3] ^= ss[2]; \
+}
+
+#define k8e(k, i) \
+{ k8ef(k, i); \
+ k[v(56, (8 * (i)) + 12)] = ss[4] ^= ls_box(ss[3], 0); \
+ k[v(56, (8 * (i)) + 13)] = ss[5] ^= ss[4]; \
+ k[v(56, (8 * (i)) + 14)] = ss[6] ^= ss[5]; \
+ k[v(56, (8 * (i)) + 15)] = ss[7] ^= ss[6]; \
+}
+
+#define kdf8(k, i) \
+{ ss[0] ^= ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
+ k[v(56, (8 * (i)) + 8)] = ff(ss[0]); \
+ ss[1] ^= ss[0]; k[v(56, (8 * (i)) + 9)] = ff(ss[1]); \
+ ss[2] ^= ss[1]; k[v(56, (8 * (i)) + 10)] = ff(ss[2]); \
+ ss[3] ^= ss[2]; k[v(56, (8 * (i)) + 11)] = ff(ss[3]); \
+ ss[4] ^= ls_box(ss[3], 0); k[v(56, (8 * (i)) + 12)] = ff(ss[4]); \
+ ss[5] ^= ss[4]; k[v(56, (8 * (i)) + 13)] = ff(ss[5]); \
+ ss[6] ^= ss[5]; k[v(56, (8 * (i)) + 14)] = ff(ss[6]); \
+ ss[7] ^= ss[6]; k[v(56, (8 * (i)) + 15)] = ff(ss[7]); \
+}
+
+#define kd8(k, i) \
+{ ss[8] = ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
+ ss[0] ^= ss[8]; \
+ ss[8] = ff(ss[8]); \
+ k[v(56, (8 * (i)) + 8)] = ss[8] ^= k[v(56, (8 * (i)))]; \
+ ss[1] ^= ss[0]; \
+ k[v(56, (8 * (i)) + 9)] = ss[8] ^= k[v(56, (8 * (i)) + 1)]; \
+ ss[2] ^= ss[1]; \
+ k[v(56, (8 * (i)) + 10)] = ss[8] ^= k[v(56, (8 * (i)) + 2)]; \
+ ss[3] ^= ss[2]; \
+ k[v(56, (8 * (i)) + 11)] = ss[8] ^= k[v(56, (8 * (i)) + 3)]; \
+ ss[8] = ls_box(ss[3], 0); \
+ ss[4] ^= ss[8]; \
+ ss[8] = ff(ss[8]); \
+ k[v(56, (8 * (i)) + 12)] = ss[8] ^= k[v(56, (8 * (i)) + 4)]; \
+ ss[5] ^= ss[4]; \
+ k[v(56, (8 * (i)) + 13)] = ss[8] ^= k[v(56, (8 * (i)) + 5)]; \
+ ss[6] ^= ss[5]; \
+ k[v(56, (8 * (i)) + 14)] = ss[8] ^= k[v(56, (8 * (i)) + 6)]; \
+ ss[7] ^= ss[6]; \
+ k[v(56, (8 * (i)) + 15)] = ss[8] ^= k[v(56, (8 * (i)) + 7)]; \
+}
+
+#define kdl8(k, i) \
+{ ss[0] ^= ls_box(ss[7], 3) ^ t_use(r, c)[i]; \
+ k[v(56, (8 * (i)) + 8)] = ss[0]; \
+ ss[1] ^= ss[0]; k[v(56, (8 * (i)) + 9)] = ss[1]; \
+ ss[2] ^= ss[1]; k[v(56, (8 * (i)) + 10)] = ss[2]; \
+ ss[3] ^= ss[2]; k[v(56, (8 * (i)) + 11)] = ss[3]; \
+}
+
+static void
+aes_decrypt_key256(const unsigned char *key, uint32_t rk[])
+{
+ uint32_t ss[9];
+#if defined(d_vars)
+ d_vars;
+#endif
+ rk[v(56, (0))] = ss[0] = word_in(key, 0);
+ rk[v(56, (1))] = ss[1] = word_in(key, 1);
+ rk[v(56, (2))] = ss[2] = word_in(key, 2);
+ rk[v(56, (3))] = ss[3] = word_in(key, 3);
+
+#ifdef DEC_KS_UNROLL
+ ss[4] = word_in(key, 4);
+ rk[v(56, (4))] = ff(ss[4]);
+ ss[5] = word_in(key, 5);
+ rk[v(56, (5))] = ff(ss[5]);
+ ss[6] = word_in(key, 6);
+ rk[v(56, (6))] = ff(ss[6]);
+ ss[7] = word_in(key, 7);
+ rk[v(56, (7))] = ff(ss[7]);
+ kdf8(rk, 0); kd8(rk, 1);
+ kd8(rk, 2); kd8(rk, 3);
+ kd8(rk, 4); kd8(rk, 5);
+ kdl8(rk, 6);
+#else
+ rk[v(56, (4))] = ss[4] = word_in(key, 4);
+ rk[v(56, (5))] = ss[5] = word_in(key, 5);
+ rk[v(56, (6))] = ss[6] = word_in(key, 6);
+ rk[v(56, (7))] = ss[7] = word_in(key, 7);
+ {
+ uint32_t i;
+
+ for (i = 0; i < 6; ++i)
+ k8e(rk, i);
+ k8ef(rk, 6);
+#if !(DEC_ROUND == NO_TABLES)
+ for (i = MAX_AES_NB; i < 14 * MAX_AES_NB; ++i)
+ rk[i] = inv_mcol(rk[i]);
+#endif
+ }
+#endif /* DEC_KS_UNROLL */
+}
+
+
+/*
+ * Expand the cipher key into the decryption key schedule.
+ *
+ * Return the number of rounds for the given cipher key size.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4 * (Nr + 1).
+ *
+ * Parameters:
+ * rk AES key schedule 32-bit array to be initialized
+ * cipherKey User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+int
+rijndael_key_setup_dec_amd64(uint32_t rk[], const uint32_t cipherKey[],
+ int keyBits)
+{
+ switch (keyBits) {
+ case 128:
+ aes_decrypt_key128((unsigned char *)&cipherKey[0], rk);
+ return (10);
+ case 192:
+ aes_decrypt_key192((unsigned char *)&cipherKey[0], rk);
+ return (12);
+ case 256:
+ aes_decrypt_key256((unsigned char *)&cipherKey[0], rk);
+ return (14);
+ default: /* should never get here */
+ break;
+ }
+
+ return (0);
+}
diff --git a/module/icp/asm-x86_64/aes/aesopt.h b/module/icp/asm-x86_64/aes/aesopt.h
new file mode 100644
index 000000000..6aa61db82
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/aesopt.h
@@ -0,0 +1,770 @@
+/*
+ * ---------------------------------------------------------------------------
+ * Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software is allowed (with or without
+ * changes) provided that:
+ *
+ * 1. source code distributions include the above copyright notice, this
+ * list of conditions and the following disclaimer;
+ *
+ * 2. binary distributions include the above copyright notice, this list
+ * of conditions and the following disclaimer in their documentation;
+ *
+ * 3. the name of the copyright holder is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ * Issue Date: 20/12/2007
+ *
+ * This file contains the compilation options for AES (Rijndael) and code
+ * that is common across encryption, key scheduling and table generation.
+ *
+ * OPERATION
+ *
+ * These source code files implement the AES algorithm Rijndael designed by
+ * Joan Daemen and Vincent Rijmen. This version is designed for the standard
+ * block size of 16 bytes and for key sizes of 128, 192 and 256 bits (16, 24
+ * and 32 bytes).
+ *
+ * This version is designed for flexibility and speed using operations on
+ * 32-bit words rather than operations on bytes. It can be compiled with
+ * either big or little endian internal byte order but is faster when the
+ * native byte order for the processor is used.
+ *
+ * THE CIPHER INTERFACE
+ *
+ * The cipher interface is implemented as an array of bytes in which lower
+ * AES bit sequence indexes map to higher numeric significance within bytes.
+ */
+
+/*
+ * OpenSolaris changes
+ * 1. Added __cplusplus and _AESTAB_H header guards
+ * 2. Added header files sys/types.h and aes_impl.h
+ * 3. Added defines for AES_ENCRYPT, AES_DECRYPT, AES_REV_DKS, and ASM_AMD64_C
+ * 4. Moved defines for IS_BIG_ENDIAN, IS_LITTLE_ENDIAN, PLATFORM_BYTE_ORDER
+ * from brg_endian.h
+ * 5. Undefined VIA_ACE_POSSIBLE and ASSUME_VIA_ACE_PRESENT
+ * 6. Changed uint_8t and uint_32t to uint8_t and uint32_t
+ * 7. Defined aes_sw32 as htonl() for byte swapping
+ * 8. Cstyled and hdrchk code
+ *
+ */
+
+#ifndef _AESOPT_H
+#define _AESOPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/zfs_context.h>
+#include <aes/aes_impl.h>
+
+/* SUPPORT FEATURES */
+#define AES_ENCRYPT /* if support for encryption is needed */
+#define AES_DECRYPT /* if support for decryption is needed */
+
+/* PLATFORM-SPECIFIC FEATURES */
+#define IS_BIG_ENDIAN 4321 /* byte 0 is most significant (mc68k) */
+#define IS_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */
+#define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#define AES_REV_DKS /* define to reverse decryption key schedule */
+
+
+/*
+ * CONFIGURATION - THE USE OF DEFINES
+ * Later in this section there are a number of defines that control the
+ * operation of the code. In each section, the purpose of each define is
+ * explained so that the relevant form can be included or excluded by
+ * setting either 1's or 0's respectively on the branches of the related
+ * #if clauses. The following local defines should not be changed.
+ */
+
+#define ENCRYPTION_IN_C 1
+#define DECRYPTION_IN_C 2
+#define ENC_KEYING_IN_C 4
+#define DEC_KEYING_IN_C 8
+
+#define NO_TABLES 0
+#define ONE_TABLE 1
+#define FOUR_TABLES 4
+#define NONE 0
+#define PARTIAL 1
+#define FULL 2
+
+/* --- START OF USER CONFIGURED OPTIONS --- */
+
+/*
+ * 1. BYTE ORDER WITHIN 32 BIT WORDS
+ *
+ * The fundamental data processing units in Rijndael are 8-bit bytes. The
+ * input, output and key input are all enumerated arrays of bytes in which
+ * bytes are numbered starting at zero and increasing to one less than the
+ * number of bytes in the array in question. This enumeration is only used
+ * for naming bytes and does not imply any adjacency or order relationship
+ * from one byte to another. When these inputs and outputs are considered
+ * as bit sequences, bits 8*n to 8*n+7 of the bit sequence are mapped to
+ * byte[n] with bit 8n+i in the sequence mapped to bit 7-i within the byte.
+ * In this implementation bits are numbered from 0 to 7 starting at the
+ * numerically least significant end of each byte. Bit n represents 2^n.
+ *
+ * However, Rijndael can be implemented more efficiently using 32-bit
+ * words by packing bytes into words so that bytes 4*n to 4*n+3 are placed
+ * into word[n]. While in principle these bytes can be assembled into words
+ * in any positions, this implementation only supports the two formats in
+ * which bytes in adjacent positions within words also have adjacent byte
+ * numbers. This order is called big-endian if the lowest numbered bytes
+ * in words have the highest numeric significance and little-endian if the
+ * opposite applies.
+ *
+ * This code can work in either order irrespective of the order used by the
+ * machine on which it runs. Normally the internal byte order will be set
+ * to the order of the processor on which the code is to be run but this
+ * define can be used to reverse this in special situations
+ *
+ * WARNING: Assembler code versions rely on PLATFORM_BYTE_ORDER being set.
+ * This define will hence be redefined later (in section 4) if necessary
+ */
+
+#if 1
+#define ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER
+#elif 0
+#define ALGORITHM_BYTE_ORDER IS_LITTLE_ENDIAN
+#elif 0
+#define ALGORITHM_BYTE_ORDER IS_BIG_ENDIAN
+#else
+#error The algorithm byte order is not defined
+#endif
+
+/* 2. VIA ACE SUPPORT */
+
+#if defined(__GNUC__) && defined(__i386__) || \
+ defined(_WIN32) && defined(_M_IX86) && \
+ !(defined(_WIN64) || defined(_WIN32_WCE) || \
+ defined(_MSC_VER) && (_MSC_VER <= 800))
+#define VIA_ACE_POSSIBLE
+#endif
+
+/*
+ * Define this option if support for the VIA ACE is required. This uses
+ * inline assembler instructions and is only implemented for the Microsoft,
+ * Intel and GCC compilers. If VIA ACE is known to be present, then defining
+ * ASSUME_VIA_ACE_PRESENT will remove the ordinary encryption/decryption
+ * code. If USE_VIA_ACE_IF_PRESENT is defined then VIA ACE will be used if
+ * it is detected (both present and enabled) but the normal AES code will
+ * also be present.
+ *
+ * When VIA ACE is to be used, all AES encryption contexts MUST be 16 byte
+ * aligned; other input/output buffers do not need to be 16 byte aligned
+ * but there are very large performance gains if this can be arranged.
+ * VIA ACE also requires the decryption key schedule to be in reverse
+ * order (which later checks below ensure).
+ */
+
+/* VIA ACE is not used here for OpenSolaris: */
+#undef VIA_ACE_POSSIBLE
+#undef ASSUME_VIA_ACE_PRESENT
+
+#if 0 && defined(VIA_ACE_POSSIBLE) && !defined(USE_VIA_ACE_IF_PRESENT)
+#define USE_VIA_ACE_IF_PRESENT
+#endif
+
+#if 0 && defined(VIA_ACE_POSSIBLE) && !defined(ASSUME_VIA_ACE_PRESENT)
+#define ASSUME_VIA_ACE_PRESENT
+#endif
+
+
+/*
+ * 3. ASSEMBLER SUPPORT
+ *
+ * This define (which can be on the command line) enables the use of the
+ * assembler code routines for encryption, decryption and key scheduling
+ * as follows:
+ *
+ * ASM_X86_V1C uses the assembler (aes_x86_v1.asm) with large tables for
+ * encryption and decryption and but with key scheduling in C
+ * ASM_X86_V2 uses assembler (aes_x86_v2.asm) with compressed tables for
+ * encryption, decryption and key scheduling
+ * ASM_X86_V2C uses assembler (aes_x86_v2.asm) with compressed tables for
+ * encryption and decryption and but with key scheduling in C
+ * ASM_AMD64_C uses assembler (aes_amd64.asm) with compressed tables for
+ * encryption and decryption and but with key scheduling in C
+ *
+ * Change one 'if 0' below to 'if 1' to select the version or define
+ * as a compilation option.
+ */
+
+#if 0 && !defined(ASM_X86_V1C)
+#define ASM_X86_V1C
+#elif 0 && !defined(ASM_X86_V2)
+#define ASM_X86_V2
+#elif 0 && !defined(ASM_X86_V2C)
+#define ASM_X86_V2C
+#elif 1 && !defined(ASM_AMD64_C)
+#define ASM_AMD64_C
+#endif
+
+#if (defined(ASM_X86_V1C) || defined(ASM_X86_V2) || defined(ASM_X86_V2C)) && \
+ !defined(_M_IX86) || defined(ASM_AMD64_C) && !defined(_M_X64) && \
+ !defined(__amd64)
+#error Assembler code is only available for x86 and AMD64 systems
+#endif
+
+/*
+ * 4. FAST INPUT/OUTPUT OPERATIONS.
+ *
+ * On some machines it is possible to improve speed by transferring the
+ * bytes in the input and output arrays to and from the internal 32-bit
+ * variables by addressing these arrays as if they are arrays of 32-bit
+ * words. On some machines this will always be possible but there may
+ * be a large performance penalty if the byte arrays are not aligned on
+ * the normal word boundaries. On other machines this technique will
+ * lead to memory access errors when such 32-bit word accesses are not
+ * properly aligned. The option SAFE_IO avoids such problems but will
+ * often be slower on those machines that support misaligned access
+ * (especially so if care is taken to align the input and output byte
+ * arrays on 32-bit word boundaries). If SAFE_IO is not defined it is
+ * assumed that access to byte arrays as if they are arrays of 32-bit
+ * words will not cause problems when such accesses are misaligned.
+ */
+#if 1 && !defined(_MSC_VER)
+#define SAFE_IO
+#endif
+
+/*
+ * 5. LOOP UNROLLING
+ *
+ * The code for encryption and decryption cycles through a number of rounds
+ * that can be implemented either in a loop or by expanding the code into a
+ * long sequence of instructions, the latter producing a larger program but
+ * one that will often be much faster. The latter is called loop unrolling.
+ * There are also potential speed advantages in expanding two iterations in
+ * a loop with half the number of iterations, which is called partial loop
+ * unrolling. The following options allow partial or full loop unrolling
+ * to be set independently for encryption and decryption
+ */
+#if 1
+#define ENC_UNROLL FULL
+#elif 0
+#define ENC_UNROLL PARTIAL
+#else
+#define ENC_UNROLL NONE
+#endif
+
+#if 1
+#define DEC_UNROLL FULL
+#elif 0
+#define DEC_UNROLL PARTIAL
+#else
+#define DEC_UNROLL NONE
+#endif
+
+#if 1
+#define ENC_KS_UNROLL
+#endif
+
+#if 1
+#define DEC_KS_UNROLL
+#endif
+
+/*
+ * 6. FAST FINITE FIELD OPERATIONS
+ *
+ * If this section is included, tables are used to provide faster finite
+ * field arithmetic. This has no effect if FIXED_TABLES is defined.
+ */
+#if 1
+#define FF_TABLES
+#endif
+
+/*
+ * 7. INTERNAL STATE VARIABLE FORMAT
+ *
+ * The internal state of Rijndael is stored in a number of local 32-bit
+ * word variables which can be defined either as an array or as individual
+ * names variables. Include this section if you want to store these local
+ * variables in arrays. Otherwise individual local variables will be used.
+ */
+#if 1
+#define ARRAYS
+#endif
+
+/*
+ * 8. FIXED OR DYNAMIC TABLES
+ *
+ * When this section is included the tables used by the code are compiled
+ * statically into the binary file. Otherwise the subroutine aes_init()
+ * must be called to compute them before the code is first used.
+ */
+#if 1 && !(defined(_MSC_VER) && (_MSC_VER <= 800))
+#define FIXED_TABLES
+#endif
+
+/*
+ * 9. MASKING OR CASTING FROM LONGER VALUES TO BYTES
+ *
+ * In some systems it is better to mask longer values to extract bytes
+ * rather than using a cast. This option allows this choice.
+ */
+#if 0
+#define to_byte(x) ((uint8_t)(x))
+#else
+#define to_byte(x) ((x) & 0xff)
+#endif
+
+/*
+ * 10. TABLE ALIGNMENT
+ *
+ * On some systems speed will be improved by aligning the AES large lookup
+ * tables on particular boundaries. This define should be set to a power of
+ * two giving the desired alignment. It can be left undefined if alignment
+ * is not needed. This option is specific to the Micrsoft VC++ compiler -
+ * it seems to sometimes cause trouble for the VC++ version 6 compiler.
+ */
+
+#if 1 && defined(_MSC_VER) && (_MSC_VER >= 1300)
+#define TABLE_ALIGN 32
+#endif
+
+/*
+ * 11. REDUCE CODE AND TABLE SIZE
+ *
+ * This replaces some expanded macros with function calls if AES_ASM_V2 or
+ * AES_ASM_V2C are defined
+ */
+
+#if 1 && (defined(ASM_X86_V2) || defined(ASM_X86_V2C))
+#define REDUCE_CODE_SIZE
+#endif
+
+/*
+ * 12. TABLE OPTIONS
+ *
+ * This cipher proceeds by repeating in a number of cycles known as rounds
+ * which are implemented by a round function which is optionally be speeded
+ * up using tables. The basic tables are 256 32-bit words, with either
+ * one or four tables being required for each round function depending on
+ * how much speed is required. Encryption and decryption round functions
+ * are different and the last encryption and decryption round functions are
+ * different again making four different round functions in all.
+ *
+ * This means that:
+ * 1. Normal encryption and decryption rounds can each use either 0, 1
+ * or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
+ * 2. The last encryption and decryption rounds can also use either 0, 1
+ * or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
+ *
+ * Include or exclude the appropriate definitions below to set the number
+ * of tables used by this implementation.
+ */
+
+#if 1 /* set tables for the normal encryption round */
+#define ENC_ROUND FOUR_TABLES
+#elif 0
+#define ENC_ROUND ONE_TABLE
+#else
+#define ENC_ROUND NO_TABLES
+#endif
+
+#if 1 /* set tables for the last encryption round */
+#define LAST_ENC_ROUND FOUR_TABLES
+#elif 0
+#define LAST_ENC_ROUND ONE_TABLE
+#else
+#define LAST_ENC_ROUND NO_TABLES
+#endif
+
+#if 1 /* set tables for the normal decryption round */
+#define DEC_ROUND FOUR_TABLES
+#elif 0
+#define DEC_ROUND ONE_TABLE
+#else
+#define DEC_ROUND NO_TABLES
+#endif
+
+#if 1 /* set tables for the last decryption round */
+#define LAST_DEC_ROUND FOUR_TABLES
+#elif 0
+#define LAST_DEC_ROUND ONE_TABLE
+#else
+#define LAST_DEC_ROUND NO_TABLES
+#endif
+
+/*
+ * The decryption key schedule can be speeded up with tables in the same
+ * way that the round functions can. Include or exclude the following
+ * defines to set this requirement.
+ */
+#if 1
+#define KEY_SCHED FOUR_TABLES
+#elif 0
+#define KEY_SCHED ONE_TABLE
+#else
+#define KEY_SCHED NO_TABLES
+#endif
+
+/* ---- END OF USER CONFIGURED OPTIONS ---- */
+
+/* VIA ACE support is only available for VC++ and GCC */
+
+#if !defined(_MSC_VER) && !defined(__GNUC__)
+#if defined(ASSUME_VIA_ACE_PRESENT)
+#undef ASSUME_VIA_ACE_PRESENT
+#endif
+#if defined(USE_VIA_ACE_IF_PRESENT)
+#undef USE_VIA_ACE_IF_PRESENT
+#endif
+#endif
+
+#if defined(ASSUME_VIA_ACE_PRESENT) && !defined(USE_VIA_ACE_IF_PRESENT)
+#define USE_VIA_ACE_IF_PRESENT
+#endif
+
+#if defined(USE_VIA_ACE_IF_PRESENT) && !defined(AES_REV_DKS)
+#define AES_REV_DKS
+#endif
+
+/* Assembler support requires the use of platform byte order */
+
+#if (defined(ASM_X86_V1C) || defined(ASM_X86_V2C) || defined(ASM_AMD64_C)) && \
+ (ALGORITHM_BYTE_ORDER != PLATFORM_BYTE_ORDER)
+#undef ALGORITHM_BYTE_ORDER
+#define ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER
+#endif
+
+/*
+ * In this implementation the columns of the state array are each held in
+ * 32-bit words. The state array can be held in various ways: in an array
+ * of words, in a number of individual word variables or in a number of
+ * processor registers. The following define maps a variable name x and
+ * a column number c to the way the state array variable is to be held.
+ * The first define below maps the state into an array x[c] whereas the
+ * second form maps the state into a number of individual variables x0,
+ * x1, etc. Another form could map individual state columns to machine
+ * register names.
+ */
+
+#if defined(ARRAYS)
+#define s(x, c) x[c]
+#else
+#define s(x, c) x##c
+#endif
+
+/*
+ * This implementation provides subroutines for encryption, decryption
+ * and for setting the three key lengths (separately) for encryption
+ * and decryption. Since not all functions are needed, masks are set
+ * up here to determine which will be implemented in C
+ */
+
+#if !defined(AES_ENCRYPT)
+#define EFUNCS_IN_C 0
+#elif defined(ASSUME_VIA_ACE_PRESENT) || defined(ASM_X86_V1C) || \
+ defined(ASM_X86_V2C) || defined(ASM_AMD64_C)
+#define EFUNCS_IN_C ENC_KEYING_IN_C
+#elif !defined(ASM_X86_V2)
+#define EFUNCS_IN_C (ENCRYPTION_IN_C | ENC_KEYING_IN_C)
+#else
+#define EFUNCS_IN_C 0
+#endif
+
+#if !defined(AES_DECRYPT)
+#define DFUNCS_IN_C 0
+#elif defined(ASSUME_VIA_ACE_PRESENT) || defined(ASM_X86_V1C) || \
+ defined(ASM_X86_V2C) || defined(ASM_AMD64_C)
+#define DFUNCS_IN_C DEC_KEYING_IN_C
+#elif !defined(ASM_X86_V2)
+#define DFUNCS_IN_C (DECRYPTION_IN_C | DEC_KEYING_IN_C)
+#else
+#define DFUNCS_IN_C 0
+#endif
+
+#define FUNCS_IN_C (EFUNCS_IN_C | DFUNCS_IN_C)
+
+/* END OF CONFIGURATION OPTIONS */
+
+/* Disable or report errors on some combinations of options */
+
+#if ENC_ROUND == NO_TABLES && LAST_ENC_ROUND != NO_TABLES
+#undef LAST_ENC_ROUND
+#define LAST_ENC_ROUND NO_TABLES
+#elif ENC_ROUND == ONE_TABLE && LAST_ENC_ROUND == FOUR_TABLES
+#undef LAST_ENC_ROUND
+#define LAST_ENC_ROUND ONE_TABLE
+#endif
+
+#if ENC_ROUND == NO_TABLES && ENC_UNROLL != NONE
+#undef ENC_UNROLL
+#define ENC_UNROLL NONE
+#endif
+
+#if DEC_ROUND == NO_TABLES && LAST_DEC_ROUND != NO_TABLES
+#undef LAST_DEC_ROUND
+#define LAST_DEC_ROUND NO_TABLES
+#elif DEC_ROUND == ONE_TABLE && LAST_DEC_ROUND == FOUR_TABLES
+#undef LAST_DEC_ROUND
+#define LAST_DEC_ROUND ONE_TABLE
+#endif
+
+#if DEC_ROUND == NO_TABLES && DEC_UNROLL != NONE
+#undef DEC_UNROLL
+#define DEC_UNROLL NONE
+#endif
+
+#if (ALGORITHM_BYTE_ORDER == IS_LITTLE_ENDIAN)
+#define aes_sw32 htonl
+#elif defined(bswap32)
+#define aes_sw32 bswap32
+#elif defined(bswap_32)
+#define aes_sw32 bswap_32
+#else
+#define brot(x, n) (((uint32_t)(x) << (n)) | ((uint32_t)(x) >> (32 - (n))))
+#define aes_sw32(x) ((brot((x), 8) & 0x00ff00ff) | (brot((x), 24) & 0xff00ff00))
+#endif
+
+
+/*
+ * upr(x, n): rotates bytes within words by n positions, moving bytes to
+ * higher index positions with wrap around into low positions
+ * ups(x, n): moves bytes by n positions to higher index positions in
+ * words but without wrap around
+ * bval(x, n): extracts a byte from a word
+ *
+ * WARNING: The definitions given here are intended only for use with
+ * unsigned variables and with shift counts that are compile
+ * time constants
+ */
+
+#if (ALGORITHM_BYTE_ORDER == IS_LITTLE_ENDIAN)
+#define upr(x, n) (((uint32_t)(x) << (8 * (n))) | \
+ ((uint32_t)(x) >> (32 - 8 * (n))))
+#define ups(x, n) ((uint32_t)(x) << (8 * (n)))
+#define bval(x, n) to_byte((x) >> (8 * (n)))
+#define bytes2word(b0, b1, b2, b3) \
+ (((uint32_t)(b3) << 24) | ((uint32_t)(b2) << 16) | \
+ ((uint32_t)(b1) << 8) | (b0))
+#endif
+
+#if (ALGORITHM_BYTE_ORDER == IS_BIG_ENDIAN)
+#define upr(x, n) (((uint32_t)(x) >> (8 * (n))) | \
+ ((uint32_t)(x) << (32 - 8 * (n))))
+#define ups(x, n) ((uint32_t)(x) >> (8 * (n)))
+#define bval(x, n) to_byte((x) >> (24 - 8 * (n)))
+#define bytes2word(b0, b1, b2, b3) \
+ (((uint32_t)(b0) << 24) | ((uint32_t)(b1) << 16) | \
+ ((uint32_t)(b2) << 8) | (b3))
+#endif
+
+#if defined(SAFE_IO)
+#define word_in(x, c) bytes2word(((const uint8_t *)(x) + 4 * c)[0], \
+ ((const uint8_t *)(x) + 4 * c)[1], \
+ ((const uint8_t *)(x) + 4 * c)[2], \
+ ((const uint8_t *)(x) + 4 * c)[3])
+#define word_out(x, c, v) { ((uint8_t *)(x) + 4 * c)[0] = bval(v, 0); \
+ ((uint8_t *)(x) + 4 * c)[1] = bval(v, 1); \
+ ((uint8_t *)(x) + 4 * c)[2] = bval(v, 2); \
+ ((uint8_t *)(x) + 4 * c)[3] = bval(v, 3); }
+#elif (ALGORITHM_BYTE_ORDER == PLATFORM_BYTE_ORDER)
+#define word_in(x, c) (*((uint32_t *)(x) + (c)))
+#define word_out(x, c, v) (*((uint32_t *)(x) + (c)) = (v))
+#else
+#define word_in(x, c) aes_sw32(*((uint32_t *)(x) + (c)))
+#define word_out(x, c, v) (*((uint32_t *)(x) + (c)) = aes_sw32(v))
+#endif
+
+/* the finite field modular polynomial and elements */
+
+#define WPOLY 0x011b
+#define BPOLY 0x1b
+
+/* multiply four bytes in GF(2^8) by 'x' {02} in parallel */
+
+#define m1 0x80808080
+#define m2 0x7f7f7f7f
+#define gf_mulx(x) ((((x) & m2) << 1) ^ ((((x) & m1) >> 7) * BPOLY))
+
+/*
+ * The following defines provide alternative definitions of gf_mulx that might
+ * give improved performance if a fast 32-bit multiply is not available. Note
+ * that a temporary variable u needs to be defined where gf_mulx is used.
+ *
+ * #define gf_mulx(x) (u = (x) & m1, u |= (u >> 1), ((x) & m2) << 1) ^ \
+ * ((u >> 3) | (u >> 6))
+ * #define m4 (0x01010101 * BPOLY)
+ * #define gf_mulx(x) (u = (x) & m1, ((x) & m2) << 1) ^ ((u - (u >> 7)) \
+ * & m4)
+ */
+
+/* Work out which tables are needed for the different options */
+
+#if defined(ASM_X86_V1C)
+#if defined(ENC_ROUND)
+#undef ENC_ROUND
+#endif
+#define ENC_ROUND FOUR_TABLES
+#if defined(LAST_ENC_ROUND)
+#undef LAST_ENC_ROUND
+#endif
+#define LAST_ENC_ROUND FOUR_TABLES
+#if defined(DEC_ROUND)
+#undef DEC_ROUND
+#endif
+#define DEC_ROUND FOUR_TABLES
+#if defined(LAST_DEC_ROUND)
+#undef LAST_DEC_ROUND
+#endif
+#define LAST_DEC_ROUND FOUR_TABLES
+#if defined(KEY_SCHED)
+#undef KEY_SCHED
+#define KEY_SCHED FOUR_TABLES
+#endif
+#endif
+
+#if (FUNCS_IN_C & ENCRYPTION_IN_C) || defined(ASM_X86_V1C)
+#if ENC_ROUND == ONE_TABLE
+#define FT1_SET
+#elif ENC_ROUND == FOUR_TABLES
+#define FT4_SET
+#else
+#define SBX_SET
+#endif
+#if LAST_ENC_ROUND == ONE_TABLE
+#define FL1_SET
+#elif LAST_ENC_ROUND == FOUR_TABLES
+#define FL4_SET
+#elif !defined(SBX_SET)
+#define SBX_SET
+#endif
+#endif
+
+#if (FUNCS_IN_C & DECRYPTION_IN_C) || defined(ASM_X86_V1C)
+#if DEC_ROUND == ONE_TABLE
+#define IT1_SET
+#elif DEC_ROUND == FOUR_TABLES
+#define IT4_SET
+#else
+#define ISB_SET
+#endif
+#if LAST_DEC_ROUND == ONE_TABLE
+#define IL1_SET
+#elif LAST_DEC_ROUND == FOUR_TABLES
+#define IL4_SET
+#elif !defined(ISB_SET)
+#define ISB_SET
+#endif
+#endif
+
+
+#if !(defined(REDUCE_CODE_SIZE) && (defined(ASM_X86_V2) || \
+ defined(ASM_X86_V2C)))
+#if ((FUNCS_IN_C & ENC_KEYING_IN_C) || (FUNCS_IN_C & DEC_KEYING_IN_C))
+#if KEY_SCHED == ONE_TABLE
+#if !defined(FL1_SET) && !defined(FL4_SET)
+#define LS1_SET
+#endif
+#elif KEY_SCHED == FOUR_TABLES
+#if !defined(FL4_SET)
+#define LS4_SET
+#endif
+#elif !defined(SBX_SET)
+#define SBX_SET
+#endif
+#endif
+#if (FUNCS_IN_C & DEC_KEYING_IN_C)
+#if KEY_SCHED == ONE_TABLE
+#define IM1_SET
+#elif KEY_SCHED == FOUR_TABLES
+#define IM4_SET
+#elif !defined(SBX_SET)
+#define SBX_SET
+#endif
+#endif
+#endif
+
+/* generic definitions of Rijndael macros that use tables */
+
+#define no_table(x, box, vf, rf, c) bytes2word(\
+ box[bval(vf(x, 0, c), rf(0, c))], \
+ box[bval(vf(x, 1, c), rf(1, c))], \
+ box[bval(vf(x, 2, c), rf(2, c))], \
+ box[bval(vf(x, 3, c), rf(3, c))])
+
+#define one_table(x, op, tab, vf, rf, c) \
+ (tab[bval(vf(x, 0, c), rf(0, c))] \
+ ^ op(tab[bval(vf(x, 1, c), rf(1, c))], 1) \
+ ^ op(tab[bval(vf(x, 2, c), rf(2, c))], 2) \
+ ^ op(tab[bval(vf(x, 3, c), rf(3, c))], 3))
+
+#define four_tables(x, tab, vf, rf, c) \
+ (tab[0][bval(vf(x, 0, c), rf(0, c))] \
+ ^ tab[1][bval(vf(x, 1, c), rf(1, c))] \
+ ^ tab[2][bval(vf(x, 2, c), rf(2, c))] \
+ ^ tab[3][bval(vf(x, 3, c), rf(3, c))])
+
+#define vf1(x, r, c) (x)
+#define rf1(r, c) (r)
+#define rf2(r, c) ((8+r-c)&3)
+
+/*
+ * Perform forward and inverse column mix operation on four bytes in long word
+ * x in parallel. NOTE: x must be a simple variable, NOT an expression in
+ * these macros.
+ */
+
+#if !(defined(REDUCE_CODE_SIZE) && (defined(ASM_X86_V2) || \
+ defined(ASM_X86_V2C)))
+
+#if defined(FM4_SET) /* not currently used */
+#define fwd_mcol(x) four_tables(x, t_use(f, m), vf1, rf1, 0)
+#elif defined(FM1_SET) /* not currently used */
+#define fwd_mcol(x) one_table(x, upr, t_use(f, m), vf1, rf1, 0)
+#else
+#define dec_fmvars uint32_t g2
+#define fwd_mcol(x) (g2 = gf_mulx(x), g2 ^ upr((x) ^ g2, 3) ^ \
+ upr((x), 2) ^ upr((x), 1))
+#endif
+
+#if defined(IM4_SET)
+#define inv_mcol(x) four_tables(x, t_use(i, m), vf1, rf1, 0)
+#elif defined(IM1_SET)
+#define inv_mcol(x) one_table(x, upr, t_use(i, m), vf1, rf1, 0)
+#else
+#define dec_imvars uint32_t g2, g4, g9
+#define inv_mcol(x) (g2 = gf_mulx(x), g4 = gf_mulx(g2), g9 = \
+ (x) ^ gf_mulx(g4), g4 ^= g9, \
+ (x) ^ g2 ^ g4 ^ upr(g2 ^ g9, 3) ^ \
+ upr(g4, 2) ^ upr(g9, 1))
+#endif
+
+#if defined(FL4_SET)
+#define ls_box(x, c) four_tables(x, t_use(f, l), vf1, rf2, c)
+#elif defined(LS4_SET)
+#define ls_box(x, c) four_tables(x, t_use(l, s), vf1, rf2, c)
+#elif defined(FL1_SET)
+#define ls_box(x, c) one_table(x, upr, t_use(f, l), vf1, rf2, c)
+#elif defined(LS1_SET)
+#define ls_box(x, c) one_table(x, upr, t_use(l, s), vf1, rf2, c)
+#else
+#define ls_box(x, c) no_table(x, t_use(s, box), vf1, rf2, c)
+#endif
+
+#endif
+
+#if defined(ASM_X86_V1C) && defined(AES_DECRYPT) && !defined(ISB_SET)
+#define ISB_SET
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _AESOPT_H */
diff --git a/module/icp/asm-x86_64/aes/aestab.h b/module/icp/asm-x86_64/aes/aestab.h
new file mode 100644
index 000000000..33cdb6c6f
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/aestab.h
@@ -0,0 +1,165 @@
+/*
+ * ---------------------------------------------------------------------------
+ * Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software is allowed (with or without
+ * changes) provided that:
+ *
+ * 1. source code distributions include the above copyright notice, this
+ * list of conditions and the following disclaimer;
+ *
+ * 2. binary distributions include the above copyright notice, this list
+ * of conditions and the following disclaimer in their documentation;
+ *
+ * 3. the name of the copyright holder is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ * Issue Date: 20/12/2007
+ *
+ * This file contains the code for declaring the tables needed to implement
+ * AES. The file aesopt.h is assumed to be included before this header file.
+ * If there are no global variables, the definitions here can be used to put
+ * the AES tables in a structure so that a pointer can then be added to the
+ * AES context to pass them to the AES routines that need them. If this
+ * facility is used, the calling program has to ensure that this pointer is
+ * managed appropriately. In particular, the value of the t_dec(in, it) item
+ * in the table structure must be set to zero in order to ensure that the
+ * tables are initialised. In practice the three code sequences in aeskey.c
+ * that control the calls to aes_init() and the aes_init() routine itself will
+ * have to be changed for a specific implementation. If global variables are
+ * available it will generally be preferable to use them with the precomputed
+ * FIXED_TABLES option that uses static global tables.
+ *
+ * The following defines can be used to control the way the tables
+ * are defined, initialised and used in embedded environments that
+ * require special features for these purposes
+ *
+ * the 't_dec' construction is used to declare fixed table arrays
+ * the 't_set' construction is used to set fixed table values
+ * the 't_use' construction is used to access fixed table values
+ *
+ * 256 byte tables:
+ *
+ * t_xxx(s, box) => forward S box
+ * t_xxx(i, box) => inverse S box
+ *
+ * 256 32-bit word OR 4 x 256 32-bit word tables:
+ *
+ * t_xxx(f, n) => forward normal round
+ * t_xxx(f, l) => forward last round
+ * t_xxx(i, n) => inverse normal round
+ * t_xxx(i, l) => inverse last round
+ * t_xxx(l, s) => key schedule table
+ * t_xxx(i, m) => key schedule table
+ *
+ * Other variables and tables:
+ *
+ * t_xxx(r, c) => the rcon table
+ */
+
+/*
+ * OpenSolaris OS modifications
+ *
+ * 1. Added __cplusplus and _AESTAB_H header guards
+ * 2. Added header file sys/types.h
+ * 3. Remove code defined for _MSC_VER
+ * 4. Changed all variables to "static const"
+ * 5. Changed uint_8t and uint_32t to uint8_t and uint32_t
+ * 6. Cstyled and hdrchk code
+ */
+
+#ifndef _AESTAB_H
+#define _AESTAB_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+
+#define t_dec(m, n) t_##m##n
+#define t_set(m, n) t_##m##n
+#define t_use(m, n) t_##m##n
+
+#if defined(DO_TABLES) && defined(FIXED_TABLES)
+#define d_1(t, n, b, e) static const t n[256] = b(e)
+#define d_4(t, n, b, e, f, g, h) static const t n[4][256] = \
+ {b(e), b(f), b(g), b(h)}
+static const uint32_t t_dec(r, c)[RC_LENGTH] = rc_data(w0);
+#else
+#define d_1(t, n, b, e) static const t n[256]
+#define d_4(t, n, b, e, f, g, h) static const t n[4][256]
+static const uint32_t t_dec(r, c)[RC_LENGTH];
+#endif
+
+#if defined(SBX_SET)
+ d_1(uint8_t, t_dec(s, box), sb_data, h0);
+#endif
+#if defined(ISB_SET)
+ d_1(uint8_t, t_dec(i, box), isb_data, h0);
+#endif
+
+#if defined(FT1_SET)
+ d_1(uint32_t, t_dec(f, n), sb_data, u0);
+#endif
+#if defined(FT4_SET)
+ d_4(uint32_t, t_dec(f, n), sb_data, u0, u1, u2, u3);
+#endif
+
+#if defined(FL1_SET)
+ d_1(uint32_t, t_dec(f, l), sb_data, w0);
+#endif
+#if defined(FL4_SET)
+ d_4(uint32_t, t_dec(f, l), sb_data, w0, w1, w2, w3);
+#endif
+
+#if defined(IT1_SET)
+ d_1(uint32_t, t_dec(i, n), isb_data, v0);
+#endif
+#if defined(IT4_SET)
+ d_4(uint32_t, t_dec(i, n), isb_data, v0, v1, v2, v3);
+#endif
+
+#if defined(IL1_SET)
+ d_1(uint32_t, t_dec(i, l), isb_data, w0);
+#endif
+#if defined(IL4_SET)
+ d_4(uint32_t, t_dec(i, l), isb_data, w0, w1, w2, w3);
+#endif
+
+#if defined(LS1_SET)
+#if defined(FL1_SET)
+#undef LS1_SET
+#else
+ d_1(uint32_t, t_dec(l, s), sb_data, w0);
+#endif
+#endif
+
+#if defined(LS4_SET)
+#if defined(FL4_SET)
+#undef LS4_SET
+#else
+ d_4(uint32_t, t_dec(l, s), sb_data, w0, w1, w2, w3);
+#endif
+#endif
+
+#if defined(IM1_SET)
+ d_1(uint32_t, t_dec(i, m), mm_data, v0);
+#endif
+#if defined(IM4_SET)
+ d_4(uint32_t, t_dec(i, m), mm_data, v0, v1, v2, v3);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _AESTAB_H */
diff --git a/module/icp/asm-x86_64/aes/aestab2.h b/module/icp/asm-x86_64/aes/aestab2.h
new file mode 100644
index 000000000..eb13f72b1
--- /dev/null
+++ b/module/icp/asm-x86_64/aes/aestab2.h
@@ -0,0 +1,594 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _AESTAB2_H
+#define _AESTAB2_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * To create this file for OpenSolaris:
+ * 1. Compile and run tablegen.c, from aes-src-04-03-08.zip,
+ * after defining ASM_AMD64_C
+ * 2. mv aestab2.c aestab2.h
+ * 3. Add __cplusplus and _AESTAB2_H header guards
+ * 3. Add #include <aes_impl.h>
+ * 4. Change "uint_32t" to "uint32_t"
+ * 5. Change all variables to "static const"
+ * 6. Cstyle and hdrchk this file
+ */
+
+#include <aes/aes_impl.h>
+
+static const uint32_t t_rc[RC_LENGTH] =
+{
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008,
+ 0x00000010, 0x00000020, 0x00000040, 0x00000080,
+ 0x0000001b, 0x00000036
+};
+
+static const uint32_t t_ls[4][256] =
+{
+ {
+ 0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
+ 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
+ 0x00000030, 0x00000001, 0x00000067, 0x0000002b,
+ 0x000000fe, 0x000000d7, 0x000000ab, 0x00000076,
+ 0x000000ca, 0x00000082, 0x000000c9, 0x0000007d,
+ 0x000000fa, 0x00000059, 0x00000047, 0x000000f0,
+ 0x000000ad, 0x000000d4, 0x000000a2, 0x000000af,
+ 0x0000009c, 0x000000a4, 0x00000072, 0x000000c0,
+ 0x000000b7, 0x000000fd, 0x00000093, 0x00000026,
+ 0x00000036, 0x0000003f, 0x000000f7, 0x000000cc,
+ 0x00000034, 0x000000a5, 0x000000e5, 0x000000f1,
+ 0x00000071, 0x000000d8, 0x00000031, 0x00000015,
+ 0x00000004, 0x000000c7, 0x00000023, 0x000000c3,
+ 0x00000018, 0x00000096, 0x00000005, 0x0000009a,
+ 0x00000007, 0x00000012, 0x00000080, 0x000000e2,
+ 0x000000eb, 0x00000027, 0x000000b2, 0x00000075,
+ 0x00000009, 0x00000083, 0x0000002c, 0x0000001a,
+ 0x0000001b, 0x0000006e, 0x0000005a, 0x000000a0,
+ 0x00000052, 0x0000003b, 0x000000d6, 0x000000b3,
+ 0x00000029, 0x000000e3, 0x0000002f, 0x00000084,
+ 0x00000053, 0x000000d1, 0x00000000, 0x000000ed,
+ 0x00000020, 0x000000fc, 0x000000b1, 0x0000005b,
+ 0x0000006a, 0x000000cb, 0x000000be, 0x00000039,
+ 0x0000004a, 0x0000004c, 0x00000058, 0x000000cf,
+ 0x000000d0, 0x000000ef, 0x000000aa, 0x000000fb,
+ 0x00000043, 0x0000004d, 0x00000033, 0x00000085,
+ 0x00000045, 0x000000f9, 0x00000002, 0x0000007f,
+ 0x00000050, 0x0000003c, 0x0000009f, 0x000000a8,
+ 0x00000051, 0x000000a3, 0x00000040, 0x0000008f,
+ 0x00000092, 0x0000009d, 0x00000038, 0x000000f5,
+ 0x000000bc, 0x000000b6, 0x000000da, 0x00000021,
+ 0x00000010, 0x000000ff, 0x000000f3, 0x000000d2,
+ 0x000000cd, 0x0000000c, 0x00000013, 0x000000ec,
+ 0x0000005f, 0x00000097, 0x00000044, 0x00000017,
+ 0x000000c4, 0x000000a7, 0x0000007e, 0x0000003d,
+ 0x00000064, 0x0000005d, 0x00000019, 0x00000073,
+ 0x00000060, 0x00000081, 0x0000004f, 0x000000dc,
+ 0x00000022, 0x0000002a, 0x00000090, 0x00000088,
+ 0x00000046, 0x000000ee, 0x000000b8, 0x00000014,
+ 0x000000de, 0x0000005e, 0x0000000b, 0x000000db,
+ 0x000000e0, 0x00000032, 0x0000003a, 0x0000000a,
+ 0x00000049, 0x00000006, 0x00000024, 0x0000005c,
+ 0x000000c2, 0x000000d3, 0x000000ac, 0x00000062,
+ 0x00000091, 0x00000095, 0x000000e4, 0x00000079,
+ 0x000000e7, 0x000000c8, 0x00000037, 0x0000006d,
+ 0x0000008d, 0x000000d5, 0x0000004e, 0x000000a9,
+ 0x0000006c, 0x00000056, 0x000000f4, 0x000000ea,
+ 0x00000065, 0x0000007a, 0x000000ae, 0x00000008,
+ 0x000000ba, 0x00000078, 0x00000025, 0x0000002e,
+ 0x0000001c, 0x000000a6, 0x000000b4, 0x000000c6,
+ 0x000000e8, 0x000000dd, 0x00000074, 0x0000001f,
+ 0x0000004b, 0x000000bd, 0x0000008b, 0x0000008a,
+ 0x00000070, 0x0000003e, 0x000000b5, 0x00000066,
+ 0x00000048, 0x00000003, 0x000000f6, 0x0000000e,
+ 0x00000061, 0x00000035, 0x00000057, 0x000000b9,
+ 0x00000086, 0x000000c1, 0x0000001d, 0x0000009e,
+ 0x000000e1, 0x000000f8, 0x00000098, 0x00000011,
+ 0x00000069, 0x000000d9, 0x0000008e, 0x00000094,
+ 0x0000009b, 0x0000001e, 0x00000087, 0x000000e9,
+ 0x000000ce, 0x00000055, 0x00000028, 0x000000df,
+ 0x0000008c, 0x000000a1, 0x00000089, 0x0000000d,
+ 0x000000bf, 0x000000e6, 0x00000042, 0x00000068,
+ 0x00000041, 0x00000099, 0x0000002d, 0x0000000f,
+ 0x000000b0, 0x00000054, 0x000000bb, 0x00000016
+ },
+ {
+ 0x00006300, 0x00007c00, 0x00007700, 0x00007b00,
+ 0x0000f200, 0x00006b00, 0x00006f00, 0x0000c500,
+ 0x00003000, 0x00000100, 0x00006700, 0x00002b00,
+ 0x0000fe00, 0x0000d700, 0x0000ab00, 0x00007600,
+ 0x0000ca00, 0x00008200, 0x0000c900, 0x00007d00,
+ 0x0000fa00, 0x00005900, 0x00004700, 0x0000f000,
+ 0x0000ad00, 0x0000d400, 0x0000a200, 0x0000af00,
+ 0x00009c00, 0x0000a400, 0x00007200, 0x0000c000,
+ 0x0000b700, 0x0000fd00, 0x00009300, 0x00002600,
+ 0x00003600, 0x00003f00, 0x0000f700, 0x0000cc00,
+ 0x00003400, 0x0000a500, 0x0000e500, 0x0000f100,
+ 0x00007100, 0x0000d800, 0x00003100, 0x00001500,
+ 0x00000400, 0x0000c700, 0x00002300, 0x0000c300,
+ 0x00001800, 0x00009600, 0x00000500, 0x00009a00,
+ 0x00000700, 0x00001200, 0x00008000, 0x0000e200,
+ 0x0000eb00, 0x00002700, 0x0000b200, 0x00007500,
+ 0x00000900, 0x00008300, 0x00002c00, 0x00001a00,
+ 0x00001b00, 0x00006e00, 0x00005a00, 0x0000a000,
+ 0x00005200, 0x00003b00, 0x0000d600, 0x0000b300,
+ 0x00002900, 0x0000e300, 0x00002f00, 0x00008400,
+ 0x00005300, 0x0000d100, 0x00000000, 0x0000ed00,
+ 0x00002000, 0x0000fc00, 0x0000b100, 0x00005b00,
+ 0x00006a00, 0x0000cb00, 0x0000be00, 0x00003900,
+ 0x00004a00, 0x00004c00, 0x00005800, 0x0000cf00,
+ 0x0000d000, 0x0000ef00, 0x0000aa00, 0x0000fb00,
+ 0x00004300, 0x00004d00, 0x00003300, 0x00008500,
+ 0x00004500, 0x0000f900, 0x00000200, 0x00007f00,
+ 0x00005000, 0x00003c00, 0x00009f00, 0x0000a800,
+ 0x00005100, 0x0000a300, 0x00004000, 0x00008f00,
+ 0x00009200, 0x00009d00, 0x00003800, 0x0000f500,
+ 0x0000bc00, 0x0000b600, 0x0000da00, 0x00002100,
+ 0x00001000, 0x0000ff00, 0x0000f300, 0x0000d200,
+ 0x0000cd00, 0x00000c00, 0x00001300, 0x0000ec00,
+ 0x00005f00, 0x00009700, 0x00004400, 0x00001700,
+ 0x0000c400, 0x0000a700, 0x00007e00, 0x00003d00,
+ 0x00006400, 0x00005d00, 0x00001900, 0x00007300,
+ 0x00006000, 0x00008100, 0x00004f00, 0x0000dc00,
+ 0x00002200, 0x00002a00, 0x00009000, 0x00008800,
+ 0x00004600, 0x0000ee00, 0x0000b800, 0x00001400,
+ 0x0000de00, 0x00005e00, 0x00000b00, 0x0000db00,
+ 0x0000e000, 0x00003200, 0x00003a00, 0x00000a00,
+ 0x00004900, 0x00000600, 0x00002400, 0x00005c00,
+ 0x0000c200, 0x0000d300, 0x0000ac00, 0x00006200,
+ 0x00009100, 0x00009500, 0x0000e400, 0x00007900,
+ 0x0000e700, 0x0000c800, 0x00003700, 0x00006d00,
+ 0x00008d00, 0x0000d500, 0x00004e00, 0x0000a900,
+ 0x00006c00, 0x00005600, 0x0000f400, 0x0000ea00,
+ 0x00006500, 0x00007a00, 0x0000ae00, 0x00000800,
+ 0x0000ba00, 0x00007800, 0x00002500, 0x00002e00,
+ 0x00001c00, 0x0000a600, 0x0000b400, 0x0000c600,
+ 0x0000e800, 0x0000dd00, 0x00007400, 0x00001f00,
+ 0x00004b00, 0x0000bd00, 0x00008b00, 0x00008a00,
+ 0x00007000, 0x00003e00, 0x0000b500, 0x00006600,
+ 0x00004800, 0x00000300, 0x0000f600, 0x00000e00,
+ 0x00006100, 0x00003500, 0x00005700, 0x0000b900,
+ 0x00008600, 0x0000c100, 0x00001d00, 0x00009e00,
+ 0x0000e100, 0x0000f800, 0x00009800, 0x00001100,
+ 0x00006900, 0x0000d900, 0x00008e00, 0x00009400,
+ 0x00009b00, 0x00001e00, 0x00008700, 0x0000e900,
+ 0x0000ce00, 0x00005500, 0x00002800, 0x0000df00,
+ 0x00008c00, 0x0000a100, 0x00008900, 0x00000d00,
+ 0x0000bf00, 0x0000e600, 0x00004200, 0x00006800,
+ 0x00004100, 0x00009900, 0x00002d00, 0x00000f00,
+ 0x0000b000, 0x00005400, 0x0000bb00, 0x00001600
+ },
+ {
+ 0x00630000, 0x007c0000, 0x00770000, 0x007b0000,
+ 0x00f20000, 0x006b0000, 0x006f0000, 0x00c50000,
+ 0x00300000, 0x00010000, 0x00670000, 0x002b0000,
+ 0x00fe0000, 0x00d70000, 0x00ab0000, 0x00760000,
+ 0x00ca0000, 0x00820000, 0x00c90000, 0x007d0000,
+ 0x00fa0000, 0x00590000, 0x00470000, 0x00f00000,
+ 0x00ad0000, 0x00d40000, 0x00a20000, 0x00af0000,
+ 0x009c0000, 0x00a40000, 0x00720000, 0x00c00000,
+ 0x00b70000, 0x00fd0000, 0x00930000, 0x00260000,
+ 0x00360000, 0x003f0000, 0x00f70000, 0x00cc0000,
+ 0x00340000, 0x00a50000, 0x00e50000, 0x00f10000,
+ 0x00710000, 0x00d80000, 0x00310000, 0x00150000,
+ 0x00040000, 0x00c70000, 0x00230000, 0x00c30000,
+ 0x00180000, 0x00960000, 0x00050000, 0x009a0000,
+ 0x00070000, 0x00120000, 0x00800000, 0x00e20000,
+ 0x00eb0000, 0x00270000, 0x00b20000, 0x00750000,
+ 0x00090000, 0x00830000, 0x002c0000, 0x001a0000,
+ 0x001b0000, 0x006e0000, 0x005a0000, 0x00a00000,
+ 0x00520000, 0x003b0000, 0x00d60000, 0x00b30000,
+ 0x00290000, 0x00e30000, 0x002f0000, 0x00840000,
+ 0x00530000, 0x00d10000, 0x00000000, 0x00ed0000,
+ 0x00200000, 0x00fc0000, 0x00b10000, 0x005b0000,
+ 0x006a0000, 0x00cb0000, 0x00be0000, 0x00390000,
+ 0x004a0000, 0x004c0000, 0x00580000, 0x00cf0000,
+ 0x00d00000, 0x00ef0000, 0x00aa0000, 0x00fb0000,
+ 0x00430000, 0x004d0000, 0x00330000, 0x00850000,
+ 0x00450000, 0x00f90000, 0x00020000, 0x007f0000,
+ 0x00500000, 0x003c0000, 0x009f0000, 0x00a80000,
+ 0x00510000, 0x00a30000, 0x00400000, 0x008f0000,
+ 0x00920000, 0x009d0000, 0x00380000, 0x00f50000,
+ 0x00bc0000, 0x00b60000, 0x00da0000, 0x00210000,
+ 0x00100000, 0x00ff0000, 0x00f30000, 0x00d20000,
+ 0x00cd0000, 0x000c0000, 0x00130000, 0x00ec0000,
+ 0x005f0000, 0x00970000, 0x00440000, 0x00170000,
+ 0x00c40000, 0x00a70000, 0x007e0000, 0x003d0000,
+ 0x00640000, 0x005d0000, 0x00190000, 0x00730000,
+ 0x00600000, 0x00810000, 0x004f0000, 0x00dc0000,
+ 0x00220000, 0x002a0000, 0x00900000, 0x00880000,
+ 0x00460000, 0x00ee0000, 0x00b80000, 0x00140000,
+ 0x00de0000, 0x005e0000, 0x000b0000, 0x00db0000,
+ 0x00e00000, 0x00320000, 0x003a0000, 0x000a0000,
+ 0x00490000, 0x00060000, 0x00240000, 0x005c0000,
+ 0x00c20000, 0x00d30000, 0x00ac0000, 0x00620000,
+ 0x00910000, 0x00950000, 0x00e40000, 0x00790000,
+ 0x00e70000, 0x00c80000, 0x00370000, 0x006d0000,
+ 0x008d0000, 0x00d50000, 0x004e0000, 0x00a90000,
+ 0x006c0000, 0x00560000, 0x00f40000, 0x00ea0000,
+ 0x00650000, 0x007a0000, 0x00ae0000, 0x00080000,
+ 0x00ba0000, 0x00780000, 0x00250000, 0x002e0000,
+ 0x001c0000, 0x00a60000, 0x00b40000, 0x00c60000,
+ 0x00e80000, 0x00dd0000, 0x00740000, 0x001f0000,
+ 0x004b0000, 0x00bd0000, 0x008b0000, 0x008a0000,
+ 0x00700000, 0x003e0000, 0x00b50000, 0x00660000,
+ 0x00480000, 0x00030000, 0x00f60000, 0x000e0000,
+ 0x00610000, 0x00350000, 0x00570000, 0x00b90000,
+ 0x00860000, 0x00c10000, 0x001d0000, 0x009e0000,
+ 0x00e10000, 0x00f80000, 0x00980000, 0x00110000,
+ 0x00690000, 0x00d90000, 0x008e0000, 0x00940000,
+ 0x009b0000, 0x001e0000, 0x00870000, 0x00e90000,
+ 0x00ce0000, 0x00550000, 0x00280000, 0x00df0000,
+ 0x008c0000, 0x00a10000, 0x00890000, 0x000d0000,
+ 0x00bf0000, 0x00e60000, 0x00420000, 0x00680000,
+ 0x00410000, 0x00990000, 0x002d0000, 0x000f0000,
+ 0x00b00000, 0x00540000, 0x00bb0000, 0x00160000
+ },
+ {
+ 0x63000000, 0x7c000000, 0x77000000, 0x7b000000,
+ 0xf2000000, 0x6b000000, 0x6f000000, 0xc5000000,
+ 0x30000000, 0x01000000, 0x67000000, 0x2b000000,
+ 0xfe000000, 0xd7000000, 0xab000000, 0x76000000,
+ 0xca000000, 0x82000000, 0xc9000000, 0x7d000000,
+ 0xfa000000, 0x59000000, 0x47000000, 0xf0000000,
+ 0xad000000, 0xd4000000, 0xa2000000, 0xaf000000,
+ 0x9c000000, 0xa4000000, 0x72000000, 0xc0000000,
+ 0xb7000000, 0xfd000000, 0x93000000, 0x26000000,
+ 0x36000000, 0x3f000000, 0xf7000000, 0xcc000000,
+ 0x34000000, 0xa5000000, 0xe5000000, 0xf1000000,
+ 0x71000000, 0xd8000000, 0x31000000, 0x15000000,
+ 0x04000000, 0xc7000000, 0x23000000, 0xc3000000,
+ 0x18000000, 0x96000000, 0x05000000, 0x9a000000,
+ 0x07000000, 0x12000000, 0x80000000, 0xe2000000,
+ 0xeb000000, 0x27000000, 0xb2000000, 0x75000000,
+ 0x09000000, 0x83000000, 0x2c000000, 0x1a000000,
+ 0x1b000000, 0x6e000000, 0x5a000000, 0xa0000000,
+ 0x52000000, 0x3b000000, 0xd6000000, 0xb3000000,
+ 0x29000000, 0xe3000000, 0x2f000000, 0x84000000,
+ 0x53000000, 0xd1000000, 0x00000000, 0xed000000,
+ 0x20000000, 0xfc000000, 0xb1000000, 0x5b000000,
+ 0x6a000000, 0xcb000000, 0xbe000000, 0x39000000,
+ 0x4a000000, 0x4c000000, 0x58000000, 0xcf000000,
+ 0xd0000000, 0xef000000, 0xaa000000, 0xfb000000,
+ 0x43000000, 0x4d000000, 0x33000000, 0x85000000,
+ 0x45000000, 0xf9000000, 0x02000000, 0x7f000000,
+ 0x50000000, 0x3c000000, 0x9f000000, 0xa8000000,
+ 0x51000000, 0xa3000000, 0x40000000, 0x8f000000,
+ 0x92000000, 0x9d000000, 0x38000000, 0xf5000000,
+ 0xbc000000, 0xb6000000, 0xda000000, 0x21000000,
+ 0x10000000, 0xff000000, 0xf3000000, 0xd2000000,
+ 0xcd000000, 0x0c000000, 0x13000000, 0xec000000,
+ 0x5f000000, 0x97000000, 0x44000000, 0x17000000,
+ 0xc4000000, 0xa7000000, 0x7e000000, 0x3d000000,
+ 0x64000000, 0x5d000000, 0x19000000, 0x73000000,
+ 0x60000000, 0x81000000, 0x4f000000, 0xdc000000,
+ 0x22000000, 0x2a000000, 0x90000000, 0x88000000,
+ 0x46000000, 0xee000000, 0xb8000000, 0x14000000,
+ 0xde000000, 0x5e000000, 0x0b000000, 0xdb000000,
+ 0xe0000000, 0x32000000, 0x3a000000, 0x0a000000,
+ 0x49000000, 0x06000000, 0x24000000, 0x5c000000,
+ 0xc2000000, 0xd3000000, 0xac000000, 0x62000000,
+ 0x91000000, 0x95000000, 0xe4000000, 0x79000000,
+ 0xe7000000, 0xc8000000, 0x37000000, 0x6d000000,
+ 0x8d000000, 0xd5000000, 0x4e000000, 0xa9000000,
+ 0x6c000000, 0x56000000, 0xf4000000, 0xea000000,
+ 0x65000000, 0x7a000000, 0xae000000, 0x08000000,
+ 0xba000000, 0x78000000, 0x25000000, 0x2e000000,
+ 0x1c000000, 0xa6000000, 0xb4000000, 0xc6000000,
+ 0xe8000000, 0xdd000000, 0x74000000, 0x1f000000,
+ 0x4b000000, 0xbd000000, 0x8b000000, 0x8a000000,
+ 0x70000000, 0x3e000000, 0xb5000000, 0x66000000,
+ 0x48000000, 0x03000000, 0xf6000000, 0x0e000000,
+ 0x61000000, 0x35000000, 0x57000000, 0xb9000000,
+ 0x86000000, 0xc1000000, 0x1d000000, 0x9e000000,
+ 0xe1000000, 0xf8000000, 0x98000000, 0x11000000,
+ 0x69000000, 0xd9000000, 0x8e000000, 0x94000000,
+ 0x9b000000, 0x1e000000, 0x87000000, 0xe9000000,
+ 0xce000000, 0x55000000, 0x28000000, 0xdf000000,
+ 0x8c000000, 0xa1000000, 0x89000000, 0x0d000000,
+ 0xbf000000, 0xe6000000, 0x42000000, 0x68000000,
+ 0x41000000, 0x99000000, 0x2d000000, 0x0f000000,
+ 0xb0000000, 0x54000000, 0xbb000000, 0x16000000
+ }
+};
+
+static const uint32_t t_im[4][256] =
+{
+ {
+ 0x00000000, 0x0b0d090e, 0x161a121c, 0x1d171b12,
+ 0x2c342438, 0x27392d36, 0x3a2e3624, 0x31233f2a,
+ 0x58684870, 0x5365417e, 0x4e725a6c, 0x457f5362,
+ 0x745c6c48, 0x7f516546, 0x62467e54, 0x694b775a,
+ 0xb0d090e0, 0xbbdd99ee, 0xa6ca82fc, 0xadc78bf2,
+ 0x9ce4b4d8, 0x97e9bdd6, 0x8afea6c4, 0x81f3afca,
+ 0xe8b8d890, 0xe3b5d19e, 0xfea2ca8c, 0xf5afc382,
+ 0xc48cfca8, 0xcf81f5a6, 0xd296eeb4, 0xd99be7ba,
+ 0x7bbb3bdb, 0x70b632d5, 0x6da129c7, 0x66ac20c9,
+ 0x578f1fe3, 0x5c8216ed, 0x41950dff, 0x4a9804f1,
+ 0x23d373ab, 0x28de7aa5, 0x35c961b7, 0x3ec468b9,
+ 0x0fe75793, 0x04ea5e9d, 0x19fd458f, 0x12f04c81,
+ 0xcb6bab3b, 0xc066a235, 0xdd71b927, 0xd67cb029,
+ 0xe75f8f03, 0xec52860d, 0xf1459d1f, 0xfa489411,
+ 0x9303e34b, 0x980eea45, 0x8519f157, 0x8e14f859,
+ 0xbf37c773, 0xb43ace7d, 0xa92dd56f, 0xa220dc61,
+ 0xf66d76ad, 0xfd607fa3, 0xe07764b1, 0xeb7a6dbf,
+ 0xda595295, 0xd1545b9b, 0xcc434089, 0xc74e4987,
+ 0xae053edd, 0xa50837d3, 0xb81f2cc1, 0xb31225cf,
+ 0x82311ae5, 0x893c13eb, 0x942b08f9, 0x9f2601f7,
+ 0x46bde64d, 0x4db0ef43, 0x50a7f451, 0x5baafd5f,
+ 0x6a89c275, 0x6184cb7b, 0x7c93d069, 0x779ed967,
+ 0x1ed5ae3d, 0x15d8a733, 0x08cfbc21, 0x03c2b52f,
+ 0x32e18a05, 0x39ec830b, 0x24fb9819, 0x2ff69117,
+ 0x8dd64d76, 0x86db4478, 0x9bcc5f6a, 0x90c15664,
+ 0xa1e2694e, 0xaaef6040, 0xb7f87b52, 0xbcf5725c,
+ 0xd5be0506, 0xdeb30c08, 0xc3a4171a, 0xc8a91e14,
+ 0xf98a213e, 0xf2872830, 0xef903322, 0xe49d3a2c,
+ 0x3d06dd96, 0x360bd498, 0x2b1ccf8a, 0x2011c684,
+ 0x1132f9ae, 0x1a3ff0a0, 0x0728ebb2, 0x0c25e2bc,
+ 0x656e95e6, 0x6e639ce8, 0x737487fa, 0x78798ef4,
+ 0x495ab1de, 0x4257b8d0, 0x5f40a3c2, 0x544daacc,
+ 0xf7daec41, 0xfcd7e54f, 0xe1c0fe5d, 0xeacdf753,
+ 0xdbeec879, 0xd0e3c177, 0xcdf4da65, 0xc6f9d36b,
+ 0xafb2a431, 0xa4bfad3f, 0xb9a8b62d, 0xb2a5bf23,
+ 0x83868009, 0x888b8907, 0x959c9215, 0x9e919b1b,
+ 0x470a7ca1, 0x4c0775af, 0x51106ebd, 0x5a1d67b3,
+ 0x6b3e5899, 0x60335197, 0x7d244a85, 0x7629438b,
+ 0x1f6234d1, 0x146f3ddf, 0x097826cd, 0x02752fc3,
+ 0x335610e9, 0x385b19e7, 0x254c02f5, 0x2e410bfb,
+ 0x8c61d79a, 0x876cde94, 0x9a7bc586, 0x9176cc88,
+ 0xa055f3a2, 0xab58faac, 0xb64fe1be, 0xbd42e8b0,
+ 0xd4099fea, 0xdf0496e4, 0xc2138df6, 0xc91e84f8,
+ 0xf83dbbd2, 0xf330b2dc, 0xee27a9ce, 0xe52aa0c0,
+ 0x3cb1477a, 0x37bc4e74, 0x2aab5566, 0x21a65c68,
+ 0x10856342, 0x1b886a4c, 0x069f715e, 0x0d927850,
+ 0x64d90f0a, 0x6fd40604, 0x72c31d16, 0x79ce1418,
+ 0x48ed2b32, 0x43e0223c, 0x5ef7392e, 0x55fa3020,
+ 0x01b79aec, 0x0aba93e2, 0x17ad88f0, 0x1ca081fe,
+ 0x2d83bed4, 0x268eb7da, 0x3b99acc8, 0x3094a5c6,
+ 0x59dfd29c, 0x52d2db92, 0x4fc5c080, 0x44c8c98e,
+ 0x75ebf6a4, 0x7ee6ffaa, 0x63f1e4b8, 0x68fcedb6,
+ 0xb1670a0c, 0xba6a0302, 0xa77d1810, 0xac70111e,
+ 0x9d532e34, 0x965e273a, 0x8b493c28, 0x80443526,
+ 0xe90f427c, 0xe2024b72, 0xff155060, 0xf418596e,
+ 0xc53b6644, 0xce366f4a, 0xd3217458, 0xd82c7d56,
+ 0x7a0ca137, 0x7101a839, 0x6c16b32b, 0x671bba25,
+ 0x5638850f, 0x5d358c01, 0x40229713, 0x4b2f9e1d,
+ 0x2264e947, 0x2969e049, 0x347efb5b, 0x3f73f255,
+ 0x0e50cd7f, 0x055dc471, 0x184adf63, 0x1347d66d,
+ 0xcadc31d7, 0xc1d138d9, 0xdcc623cb, 0xd7cb2ac5,
+ 0xe6e815ef, 0xede51ce1, 0xf0f207f3, 0xfbff0efd,
+ 0x92b479a7, 0x99b970a9, 0x84ae6bbb, 0x8fa362b5,
+ 0xbe805d9f, 0xb58d5491, 0xa89a4f83, 0xa397468d
+ },
+ {
+ 0x00000000, 0x0d090e0b, 0x1a121c16, 0x171b121d,
+ 0x3424382c, 0x392d3627, 0x2e36243a, 0x233f2a31,
+ 0x68487058, 0x65417e53, 0x725a6c4e, 0x7f536245,
+ 0x5c6c4874, 0x5165467f, 0x467e5462, 0x4b775a69,
+ 0xd090e0b0, 0xdd99eebb, 0xca82fca6, 0xc78bf2ad,
+ 0xe4b4d89c, 0xe9bdd697, 0xfea6c48a, 0xf3afca81,
+ 0xb8d890e8, 0xb5d19ee3, 0xa2ca8cfe, 0xafc382f5,
+ 0x8cfca8c4, 0x81f5a6cf, 0x96eeb4d2, 0x9be7bad9,
+ 0xbb3bdb7b, 0xb632d570, 0xa129c76d, 0xac20c966,
+ 0x8f1fe357, 0x8216ed5c, 0x950dff41, 0x9804f14a,
+ 0xd373ab23, 0xde7aa528, 0xc961b735, 0xc468b93e,
+ 0xe757930f, 0xea5e9d04, 0xfd458f19, 0xf04c8112,
+ 0x6bab3bcb, 0x66a235c0, 0x71b927dd, 0x7cb029d6,
+ 0x5f8f03e7, 0x52860dec, 0x459d1ff1, 0x489411fa,
+ 0x03e34b93, 0x0eea4598, 0x19f15785, 0x14f8598e,
+ 0x37c773bf, 0x3ace7db4, 0x2dd56fa9, 0x20dc61a2,
+ 0x6d76adf6, 0x607fa3fd, 0x7764b1e0, 0x7a6dbfeb,
+ 0x595295da, 0x545b9bd1, 0x434089cc, 0x4e4987c7,
+ 0x053eddae, 0x0837d3a5, 0x1f2cc1b8, 0x1225cfb3,
+ 0x311ae582, 0x3c13eb89, 0x2b08f994, 0x2601f79f,
+ 0xbde64d46, 0xb0ef434d, 0xa7f45150, 0xaafd5f5b,
+ 0x89c2756a, 0x84cb7b61, 0x93d0697c, 0x9ed96777,
+ 0xd5ae3d1e, 0xd8a73315, 0xcfbc2108, 0xc2b52f03,
+ 0xe18a0532, 0xec830b39, 0xfb981924, 0xf691172f,
+ 0xd64d768d, 0xdb447886, 0xcc5f6a9b, 0xc1566490,
+ 0xe2694ea1, 0xef6040aa, 0xf87b52b7, 0xf5725cbc,
+ 0xbe0506d5, 0xb30c08de, 0xa4171ac3, 0xa91e14c8,
+ 0x8a213ef9, 0x872830f2, 0x903322ef, 0x9d3a2ce4,
+ 0x06dd963d, 0x0bd49836, 0x1ccf8a2b, 0x11c68420,
+ 0x32f9ae11, 0x3ff0a01a, 0x28ebb207, 0x25e2bc0c,
+ 0x6e95e665, 0x639ce86e, 0x7487fa73, 0x798ef478,
+ 0x5ab1de49, 0x57b8d042, 0x40a3c25f, 0x4daacc54,
+ 0xdaec41f7, 0xd7e54ffc, 0xc0fe5de1, 0xcdf753ea,
+ 0xeec879db, 0xe3c177d0, 0xf4da65cd, 0xf9d36bc6,
+ 0xb2a431af, 0xbfad3fa4, 0xa8b62db9, 0xa5bf23b2,
+ 0x86800983, 0x8b890788, 0x9c921595, 0x919b1b9e,
+ 0x0a7ca147, 0x0775af4c, 0x106ebd51, 0x1d67b35a,
+ 0x3e58996b, 0x33519760, 0x244a857d, 0x29438b76,
+ 0x6234d11f, 0x6f3ddf14, 0x7826cd09, 0x752fc302,
+ 0x5610e933, 0x5b19e738, 0x4c02f525, 0x410bfb2e,
+ 0x61d79a8c, 0x6cde9487, 0x7bc5869a, 0x76cc8891,
+ 0x55f3a2a0, 0x58faacab, 0x4fe1beb6, 0x42e8b0bd,
+ 0x099fead4, 0x0496e4df, 0x138df6c2, 0x1e84f8c9,
+ 0x3dbbd2f8, 0x30b2dcf3, 0x27a9ceee, 0x2aa0c0e5,
+ 0xb1477a3c, 0xbc4e7437, 0xab55662a, 0xa65c6821,
+ 0x85634210, 0x886a4c1b, 0x9f715e06, 0x9278500d,
+ 0xd90f0a64, 0xd406046f, 0xc31d1672, 0xce141879,
+ 0xed2b3248, 0xe0223c43, 0xf7392e5e, 0xfa302055,
+ 0xb79aec01, 0xba93e20a, 0xad88f017, 0xa081fe1c,
+ 0x83bed42d, 0x8eb7da26, 0x99acc83b, 0x94a5c630,
+ 0xdfd29c59, 0xd2db9252, 0xc5c0804f, 0xc8c98e44,
+ 0xebf6a475, 0xe6ffaa7e, 0xf1e4b863, 0xfcedb668,
+ 0x670a0cb1, 0x6a0302ba, 0x7d1810a7, 0x70111eac,
+ 0x532e349d, 0x5e273a96, 0x493c288b, 0x44352680,
+ 0x0f427ce9, 0x024b72e2, 0x155060ff, 0x18596ef4,
+ 0x3b6644c5, 0x366f4ace, 0x217458d3, 0x2c7d56d8,
+ 0x0ca1377a, 0x01a83971, 0x16b32b6c, 0x1bba2567,
+ 0x38850f56, 0x358c015d, 0x22971340, 0x2f9e1d4b,
+ 0x64e94722, 0x69e04929, 0x7efb5b34, 0x73f2553f,
+ 0x50cd7f0e, 0x5dc47105, 0x4adf6318, 0x47d66d13,
+ 0xdc31d7ca, 0xd138d9c1, 0xc623cbdc, 0xcb2ac5d7,
+ 0xe815efe6, 0xe51ce1ed, 0xf207f3f0, 0xff0efdfb,
+ 0xb479a792, 0xb970a999, 0xae6bbb84, 0xa362b58f,
+ 0x805d9fbe, 0x8d5491b5, 0x9a4f83a8, 0x97468da3
+ },
+ {
+ 0x00000000, 0x090e0b0d, 0x121c161a, 0x1b121d17,
+ 0x24382c34, 0x2d362739, 0x36243a2e, 0x3f2a3123,
+ 0x48705868, 0x417e5365, 0x5a6c4e72, 0x5362457f,
+ 0x6c48745c, 0x65467f51, 0x7e546246, 0x775a694b,
+ 0x90e0b0d0, 0x99eebbdd, 0x82fca6ca, 0x8bf2adc7,
+ 0xb4d89ce4, 0xbdd697e9, 0xa6c48afe, 0xafca81f3,
+ 0xd890e8b8, 0xd19ee3b5, 0xca8cfea2, 0xc382f5af,
+ 0xfca8c48c, 0xf5a6cf81, 0xeeb4d296, 0xe7bad99b,
+ 0x3bdb7bbb, 0x32d570b6, 0x29c76da1, 0x20c966ac,
+ 0x1fe3578f, 0x16ed5c82, 0x0dff4195, 0x04f14a98,
+ 0x73ab23d3, 0x7aa528de, 0x61b735c9, 0x68b93ec4,
+ 0x57930fe7, 0x5e9d04ea, 0x458f19fd, 0x4c8112f0,
+ 0xab3bcb6b, 0xa235c066, 0xb927dd71, 0xb029d67c,
+ 0x8f03e75f, 0x860dec52, 0x9d1ff145, 0x9411fa48,
+ 0xe34b9303, 0xea45980e, 0xf1578519, 0xf8598e14,
+ 0xc773bf37, 0xce7db43a, 0xd56fa92d, 0xdc61a220,
+ 0x76adf66d, 0x7fa3fd60, 0x64b1e077, 0x6dbfeb7a,
+ 0x5295da59, 0x5b9bd154, 0x4089cc43, 0x4987c74e,
+ 0x3eddae05, 0x37d3a508, 0x2cc1b81f, 0x25cfb312,
+ 0x1ae58231, 0x13eb893c, 0x08f9942b, 0x01f79f26,
+ 0xe64d46bd, 0xef434db0, 0xf45150a7, 0xfd5f5baa,
+ 0xc2756a89, 0xcb7b6184, 0xd0697c93, 0xd967779e,
+ 0xae3d1ed5, 0xa73315d8, 0xbc2108cf, 0xb52f03c2,
+ 0x8a0532e1, 0x830b39ec, 0x981924fb, 0x91172ff6,
+ 0x4d768dd6, 0x447886db, 0x5f6a9bcc, 0x566490c1,
+ 0x694ea1e2, 0x6040aaef, 0x7b52b7f8, 0x725cbcf5,
+ 0x0506d5be, 0x0c08deb3, 0x171ac3a4, 0x1e14c8a9,
+ 0x213ef98a, 0x2830f287, 0x3322ef90, 0x3a2ce49d,
+ 0xdd963d06, 0xd498360b, 0xcf8a2b1c, 0xc6842011,
+ 0xf9ae1132, 0xf0a01a3f, 0xebb20728, 0xe2bc0c25,
+ 0x95e6656e, 0x9ce86e63, 0x87fa7374, 0x8ef47879,
+ 0xb1de495a, 0xb8d04257, 0xa3c25f40, 0xaacc544d,
+ 0xec41f7da, 0xe54ffcd7, 0xfe5de1c0, 0xf753eacd,
+ 0xc879dbee, 0xc177d0e3, 0xda65cdf4, 0xd36bc6f9,
+ 0xa431afb2, 0xad3fa4bf, 0xb62db9a8, 0xbf23b2a5,
+ 0x80098386, 0x8907888b, 0x9215959c, 0x9b1b9e91,
+ 0x7ca1470a, 0x75af4c07, 0x6ebd5110, 0x67b35a1d,
+ 0x58996b3e, 0x51976033, 0x4a857d24, 0x438b7629,
+ 0x34d11f62, 0x3ddf146f, 0x26cd0978, 0x2fc30275,
+ 0x10e93356, 0x19e7385b, 0x02f5254c, 0x0bfb2e41,
+ 0xd79a8c61, 0xde94876c, 0xc5869a7b, 0xcc889176,
+ 0xf3a2a055, 0xfaacab58, 0xe1beb64f, 0xe8b0bd42,
+ 0x9fead409, 0x96e4df04, 0x8df6c213, 0x84f8c91e,
+ 0xbbd2f83d, 0xb2dcf330, 0xa9ceee27, 0xa0c0e52a,
+ 0x477a3cb1, 0x4e7437bc, 0x55662aab, 0x5c6821a6,
+ 0x63421085, 0x6a4c1b88, 0x715e069f, 0x78500d92,
+ 0x0f0a64d9, 0x06046fd4, 0x1d1672c3, 0x141879ce,
+ 0x2b3248ed, 0x223c43e0, 0x392e5ef7, 0x302055fa,
+ 0x9aec01b7, 0x93e20aba, 0x88f017ad, 0x81fe1ca0,
+ 0xbed42d83, 0xb7da268e, 0xacc83b99, 0xa5c63094,
+ 0xd29c59df, 0xdb9252d2, 0xc0804fc5, 0xc98e44c8,
+ 0xf6a475eb, 0xffaa7ee6, 0xe4b863f1, 0xedb668fc,
+ 0x0a0cb167, 0x0302ba6a, 0x1810a77d, 0x111eac70,
+ 0x2e349d53, 0x273a965e, 0x3c288b49, 0x35268044,
+ 0x427ce90f, 0x4b72e202, 0x5060ff15, 0x596ef418,
+ 0x6644c53b, 0x6f4ace36, 0x7458d321, 0x7d56d82c,
+ 0xa1377a0c, 0xa8397101, 0xb32b6c16, 0xba25671b,
+ 0x850f5638, 0x8c015d35, 0x97134022, 0x9e1d4b2f,
+ 0xe9472264, 0xe0492969, 0xfb5b347e, 0xf2553f73,
+ 0xcd7f0e50, 0xc471055d, 0xdf63184a, 0xd66d1347,
+ 0x31d7cadc, 0x38d9c1d1, 0x23cbdcc6, 0x2ac5d7cb,
+ 0x15efe6e8, 0x1ce1ede5, 0x07f3f0f2, 0x0efdfbff,
+ 0x79a792b4, 0x70a999b9, 0x6bbb84ae, 0x62b58fa3,
+ 0x5d9fbe80, 0x5491b58d, 0x4f83a89a, 0x468da397
+ },
+ {
+ 0x00000000, 0x0e0b0d09, 0x1c161a12, 0x121d171b,
+ 0x382c3424, 0x3627392d, 0x243a2e36, 0x2a31233f,
+ 0x70586848, 0x7e536541, 0x6c4e725a, 0x62457f53,
+ 0x48745c6c, 0x467f5165, 0x5462467e, 0x5a694b77,
+ 0xe0b0d090, 0xeebbdd99, 0xfca6ca82, 0xf2adc78b,
+ 0xd89ce4b4, 0xd697e9bd, 0xc48afea6, 0xca81f3af,
+ 0x90e8b8d8, 0x9ee3b5d1, 0x8cfea2ca, 0x82f5afc3,
+ 0xa8c48cfc, 0xa6cf81f5, 0xb4d296ee, 0xbad99be7,
+ 0xdb7bbb3b, 0xd570b632, 0xc76da129, 0xc966ac20,
+ 0xe3578f1f, 0xed5c8216, 0xff41950d, 0xf14a9804,
+ 0xab23d373, 0xa528de7a, 0xb735c961, 0xb93ec468,
+ 0x930fe757, 0x9d04ea5e, 0x8f19fd45, 0x8112f04c,
+ 0x3bcb6bab, 0x35c066a2, 0x27dd71b9, 0x29d67cb0,
+ 0x03e75f8f, 0x0dec5286, 0x1ff1459d, 0x11fa4894,
+ 0x4b9303e3, 0x45980eea, 0x578519f1, 0x598e14f8,
+ 0x73bf37c7, 0x7db43ace, 0x6fa92dd5, 0x61a220dc,
+ 0xadf66d76, 0xa3fd607f, 0xb1e07764, 0xbfeb7a6d,
+ 0x95da5952, 0x9bd1545b, 0x89cc4340, 0x87c74e49,
+ 0xddae053e, 0xd3a50837, 0xc1b81f2c, 0xcfb31225,
+ 0xe582311a, 0xeb893c13, 0xf9942b08, 0xf79f2601,
+ 0x4d46bde6, 0x434db0ef, 0x5150a7f4, 0x5f5baafd,
+ 0x756a89c2, 0x7b6184cb, 0x697c93d0, 0x67779ed9,
+ 0x3d1ed5ae, 0x3315d8a7, 0x2108cfbc, 0x2f03c2b5,
+ 0x0532e18a, 0x0b39ec83, 0x1924fb98, 0x172ff691,
+ 0x768dd64d, 0x7886db44, 0x6a9bcc5f, 0x6490c156,
+ 0x4ea1e269, 0x40aaef60, 0x52b7f87b, 0x5cbcf572,
+ 0x06d5be05, 0x08deb30c, 0x1ac3a417, 0x14c8a91e,
+ 0x3ef98a21, 0x30f28728, 0x22ef9033, 0x2ce49d3a,
+ 0x963d06dd, 0x98360bd4, 0x8a2b1ccf, 0x842011c6,
+ 0xae1132f9, 0xa01a3ff0, 0xb20728eb, 0xbc0c25e2,
+ 0xe6656e95, 0xe86e639c, 0xfa737487, 0xf478798e,
+ 0xde495ab1, 0xd04257b8, 0xc25f40a3, 0xcc544daa,
+ 0x41f7daec, 0x4ffcd7e5, 0x5de1c0fe, 0x53eacdf7,
+ 0x79dbeec8, 0x77d0e3c1, 0x65cdf4da, 0x6bc6f9d3,
+ 0x31afb2a4, 0x3fa4bfad, 0x2db9a8b6, 0x23b2a5bf,
+ 0x09838680, 0x07888b89, 0x15959c92, 0x1b9e919b,
+ 0xa1470a7c, 0xaf4c0775, 0xbd51106e, 0xb35a1d67,
+ 0x996b3e58, 0x97603351, 0x857d244a, 0x8b762943,
+ 0xd11f6234, 0xdf146f3d, 0xcd097826, 0xc302752f,
+ 0xe9335610, 0xe7385b19, 0xf5254c02, 0xfb2e410b,
+ 0x9a8c61d7, 0x94876cde, 0x869a7bc5, 0x889176cc,
+ 0xa2a055f3, 0xacab58fa, 0xbeb64fe1, 0xb0bd42e8,
+ 0xead4099f, 0xe4df0496, 0xf6c2138d, 0xf8c91e84,
+ 0xd2f83dbb, 0xdcf330b2, 0xceee27a9, 0xc0e52aa0,
+ 0x7a3cb147, 0x7437bc4e, 0x662aab55, 0x6821a65c,
+ 0x42108563, 0x4c1b886a, 0x5e069f71, 0x500d9278,
+ 0x0a64d90f, 0x046fd406, 0x1672c31d, 0x1879ce14,
+ 0x3248ed2b, 0x3c43e022, 0x2e5ef739, 0x2055fa30,
+ 0xec01b79a, 0xe20aba93, 0xf017ad88, 0xfe1ca081,
+ 0xd42d83be, 0xda268eb7, 0xc83b99ac, 0xc63094a5,
+ 0x9c59dfd2, 0x9252d2db, 0x804fc5c0, 0x8e44c8c9,
+ 0xa475ebf6, 0xaa7ee6ff, 0xb863f1e4, 0xb668fced,
+ 0x0cb1670a, 0x02ba6a03, 0x10a77d18, 0x1eac7011,
+ 0x349d532e, 0x3a965e27, 0x288b493c, 0x26804435,
+ 0x7ce90f42, 0x72e2024b, 0x60ff1550, 0x6ef41859,
+ 0x44c53b66, 0x4ace366f, 0x58d32174, 0x56d82c7d,
+ 0x377a0ca1, 0x397101a8, 0x2b6c16b3, 0x25671bba,
+ 0x0f563885, 0x015d358c, 0x13402297, 0x1d4b2f9e,
+ 0x472264e9, 0x492969e0, 0x5b347efb, 0x553f73f2,
+ 0x7f0e50cd, 0x71055dc4, 0x63184adf, 0x6d1347d6,
+ 0xd7cadc31, 0xd9c1d138, 0xcbdcc623, 0xc5d7cb2a,
+ 0xefe6e815, 0xe1ede51c, 0xf3f0f207, 0xfdfbff0e,
+ 0xa792b479, 0xa999b970, 0xbb84ae6b, 0xb58fa362,
+ 0x9fbe805d, 0x91b58d54, 0x83a89a4f, 0x8da39746
+ }
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _AESTAB2_H */
diff --git a/module/icp/asm-x86_64/modes/gcm_intel.S b/module/icp/asm-x86_64/modes/gcm_intel.S
new file mode 100644
index 000000000..9bb40bf23
--- /dev/null
+++ b/module/icp/asm-x86_64/modes/gcm_intel.S
@@ -0,0 +1,334 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2009 Intel Corporation
+ * All Rights Reserved.
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Accelerated GHASH implementation with Intel PCLMULQDQ-NI
+ * instructions. This file contains an accelerated
+ * Galois Field Multiplication implementation.
+ *
+ * PCLMULQDQ is used to accelerate the most time-consuming part of GHASH,
+ * carry-less multiplication. More information about PCLMULQDQ can be
+ * found at:
+ * http://software.intel.com/en-us/articles/
+ * carry-less-multiplication-and-its-usage-for-computing-the-gcm-mode/
+ *
+ */
+
+/*
+ * ====================================================================
+ * OpenSolaris OS modifications
+ *
+ * This source originates as file galois_hash_asm.c from
+ * Intel Corporation dated September 21, 2009.
+ *
+ * This OpenSolaris version has these major changes from the original source:
+ *
+ * 1. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
+ * /usr/include/sys/asm_linkage.h, lint(1B) guards, and a dummy C function
+ * definition for lint.
+ *
+ * 2. Formatted code, added comments, and added #includes and #defines.
+ *
+ * 3. If bit CR0.TS is set, clear and set the TS bit, after and before
+ * calling kpreempt_disable() and kpreempt_enable().
+ * If the TS bit is not set, Save and restore %xmm registers at the beginning
+ * and end of function calls (%xmm* registers are not saved and restored by
+ * during kernel thread preemption).
+ *
+ * 4. Removed code to perform hashing. This is already done with C macro
+ * GHASH in gcm.c. For better performance, this removed code should be
+ * reintegrated in the future to replace the C GHASH macro.
+ *
+ * 5. Added code to byte swap 16-byte input and output.
+ *
+ * 6. Folded in comments from the original C source with embedded assembly
+ * (SB_w_shift_xor.c)
+ *
+ * 7. Renamed function and reordered parameters to match OpenSolaris:
+ * Intel interface:
+ * void galois_hash_asm(unsigned char *hk, unsigned char *s,
+ * unsigned char *d, int length)
+ * OpenSolaris OS interface:
+ * void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
+ * ====================================================================
+ */
+
+
+#if defined(lint) || defined(__lint)
+
+#include <sys/types.h>
+
+/* ARGSUSED */
+void
+gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res) {
+}
+
+#else /* lint */
+
+#define _ASM
+#include <sys/asm_linkage.h>
+
+#ifdef _KERNEL
+ /*
+ * Note: the CLTS macro clobbers P2 (%rsi) under i86xpv. That is,
+ * it calls HYPERVISOR_fpu_taskswitch() which modifies %rsi when it
+ * uses it to pass P2 to syscall.
+ * This also occurs with the STTS macro, but we dont care if
+ * P2 (%rsi) is modified just before function exit.
+ * The CLTS and STTS macros push and pop P1 (%rdi) already.
+ */
+#ifdef __xpv
+#define PROTECTED_CLTS \
+ push %rsi; \
+ CLTS; \
+ pop %rsi
+#else
+#define PROTECTED_CLTS \
+ CLTS
+#endif /* __xpv */
+
+ /*
+ * If CR0_TS is not set, align stack (with push %rbp) and push
+ * %xmm0 - %xmm10 on stack, otherwise clear CR0_TS
+ */
+#define CLEAR_TS_OR_PUSH_XMM_REGISTERS(tmpreg) \
+ push %rbp; \
+ mov %rsp, %rbp; \
+ movq %cr0, tmpreg; \
+ testq $CR0_TS, tmpreg; \
+ jnz 1f; \
+ and $-XMM_ALIGN, %rsp; \
+ sub $[XMM_SIZE * 11], %rsp; \
+ movaps %xmm0, 160(%rsp); \
+ movaps %xmm1, 144(%rsp); \
+ movaps %xmm2, 128(%rsp); \
+ movaps %xmm3, 112(%rsp); \
+ movaps %xmm4, 96(%rsp); \
+ movaps %xmm5, 80(%rsp); \
+ movaps %xmm6, 64(%rsp); \
+ movaps %xmm7, 48(%rsp); \
+ movaps %xmm8, 32(%rsp); \
+ movaps %xmm9, 16(%rsp); \
+ movaps %xmm10, (%rsp); \
+ jmp 2f; \
+1: \
+ PROTECTED_CLTS; \
+2:
+
+
+ /*
+ * If CR0_TS was not set above, pop %xmm0 - %xmm10 off stack,
+ * otherwise set CR0_TS.
+ */
+#define SET_TS_OR_POP_XMM_REGISTERS(tmpreg) \
+ testq $CR0_TS, tmpreg; \
+ jnz 1f; \
+ movaps (%rsp), %xmm10; \
+ movaps 16(%rsp), %xmm9; \
+ movaps 32(%rsp), %xmm8; \
+ movaps 48(%rsp), %xmm7; \
+ movaps 64(%rsp), %xmm6; \
+ movaps 80(%rsp), %xmm5; \
+ movaps 96(%rsp), %xmm4; \
+ movaps 112(%rsp), %xmm3; \
+ movaps 128(%rsp), %xmm2; \
+ movaps 144(%rsp), %xmm1; \
+ movaps 160(%rsp), %xmm0; \
+ jmp 2f; \
+1: \
+ STTS(tmpreg); \
+2: \
+ mov %rbp, %rsp; \
+ pop %rbp
+
+
+#else
+#define PROTECTED_CLTS
+#define CLEAR_TS_OR_PUSH_XMM_REGISTERS(tmpreg)
+#define SET_TS_OR_POP_XMM_REGISTERS(tmpreg)
+#endif /* _KERNEL */
+
+/*
+ * Use this mask to byte-swap a 16-byte integer with the pshufb instruction
+ */
+
+// static uint8_t byte_swap16_mask[] = {
+// 15, 14, 13, 12, 11, 10, 9, 8, 7, 6 ,5, 4, 3, 2, 1, 0 };
+.text
+.align XMM_ALIGN
+.Lbyte_swap16_mask:
+ .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+
+
+/*
+ * void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
+ *
+ * Perform a carry-less multiplication (that is, use XOR instead of the
+ * multiply operator) on P1 and P2 and place the result in P3.
+ *
+ * Byte swap the input and the output.
+ *
+ * Note: x_in, y, and res all point to a block of 20-byte numbers
+ * (an array of two 64-bit integers).
+ *
+ * Note2: For kernel code, caller is responsible for ensuring
+ * kpreempt_disable() has been called. This is because %xmm registers are
+ * not saved/restored. Clear and set the CR0.TS bit on entry and exit,
+ * respectively, if TS is set on entry. Otherwise, if TS is not set,
+ * save and restore %xmm registers on the stack.
+ *
+ * Note3: Original Intel definition:
+ * void galois_hash_asm(unsigned char *hk, unsigned char *s,
+ * unsigned char *d, int length)
+ *
+ * Note4: Register/parameter mapping:
+ * Intel:
+ * Parameter 1: %rcx (copied to %xmm0) hk or x_in
+ * Parameter 2: %rdx (copied to %xmm1) s or y
+ * Parameter 3: %rdi (result) d or res
+ * OpenSolaris:
+ * Parameter 1: %rdi (copied to %xmm0) x_in
+ * Parameter 2: %rsi (copied to %xmm1) y
+ * Parameter 3: %rdx (result) res
+ */
+
+ENTRY_NP(gcm_mul_pclmulqdq)
+ CLEAR_TS_OR_PUSH_XMM_REGISTERS(%r10)
+
+ //
+ // Copy Parameters
+ //
+ movdqu (%rdi), %xmm0 // P1
+ movdqu (%rsi), %xmm1 // P2
+
+ //
+ // Byte swap 16-byte input
+ //
+ lea .Lbyte_swap16_mask(%rip), %rax
+ movaps (%rax), %xmm10
+ pshufb %xmm10, %xmm0
+ pshufb %xmm10, %xmm1
+
+
+ //
+ // Multiply with the hash key
+ //
+ movdqu %xmm0, %xmm3
+ pclmulqdq $0, %xmm1, %xmm3 // xmm3 holds a0*b0
+
+ movdqu %xmm0, %xmm4
+ pclmulqdq $16, %xmm1, %xmm4 // xmm4 holds a0*b1
+
+ movdqu %xmm0, %xmm5
+ pclmulqdq $1, %xmm1, %xmm5 // xmm5 holds a1*b0
+ movdqu %xmm0, %xmm6
+ pclmulqdq $17, %xmm1, %xmm6 // xmm6 holds a1*b1
+
+ pxor %xmm5, %xmm4 // xmm4 holds a0*b1 + a1*b0
+
+ movdqu %xmm4, %xmm5 // move the contents of xmm4 to xmm5
+ psrldq $8, %xmm4 // shift by xmm4 64 bits to the right
+ pslldq $8, %xmm5 // shift by xmm5 64 bits to the left
+ pxor %xmm5, %xmm3
+ pxor %xmm4, %xmm6 // Register pair <xmm6:xmm3> holds the result
+ // of the carry-less multiplication of
+ // xmm0 by xmm1.
+
+ // We shift the result of the multiplication by one bit position
+ // to the left to cope for the fact that the bits are reversed.
+ movdqu %xmm3, %xmm7
+ movdqu %xmm6, %xmm8
+ pslld $1, %xmm3
+ pslld $1, %xmm6
+ psrld $31, %xmm7
+ psrld $31, %xmm8
+ movdqu %xmm7, %xmm9
+ pslldq $4, %xmm8
+ pslldq $4, %xmm7
+ psrldq $12, %xmm9
+ por %xmm7, %xmm3
+ por %xmm8, %xmm6
+ por %xmm9, %xmm6
+
+ //
+ // First phase of the reduction
+ //
+ // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts
+ // independently.
+ movdqu %xmm3, %xmm7
+ movdqu %xmm3, %xmm8
+ movdqu %xmm3, %xmm9
+ pslld $31, %xmm7 // packed right shift shifting << 31
+ pslld $30, %xmm8 // packed right shift shifting << 30
+ pslld $25, %xmm9 // packed right shift shifting << 25
+ pxor %xmm8, %xmm7 // xor the shifted versions
+ pxor %xmm9, %xmm7
+ movdqu %xmm7, %xmm8
+ pslldq $12, %xmm7
+ psrldq $4, %xmm8
+ pxor %xmm7, %xmm3 // first phase of the reduction complete
+
+ //
+ // Second phase of the reduction
+ //
+ // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these
+ // shift operations.
+ movdqu %xmm3, %xmm2
+ movdqu %xmm3, %xmm4 // packed left shifting >> 1
+ movdqu %xmm3, %xmm5
+ psrld $1, %xmm2
+ psrld $2, %xmm4 // packed left shifting >> 2
+ psrld $7, %xmm5 // packed left shifting >> 7
+ pxor %xmm4, %xmm2 // xor the shifted versions
+ pxor %xmm5, %xmm2
+ pxor %xmm8, %xmm2
+ pxor %xmm2, %xmm3
+ pxor %xmm3, %xmm6 // the result is in xmm6
+
+ //
+ // Byte swap 16-byte result
+ //
+ pshufb %xmm10, %xmm6 // %xmm10 has the swap mask
+
+ //
+ // Store the result
+ //
+ movdqu %xmm6, (%rdx) // P3
+
+
+ //
+ // Cleanup and Return
+ //
+ SET_TS_OR_POP_XMM_REGISTERS(%r10)
+ ret
+ SET_SIZE(gcm_mul_pclmulqdq)
+
+#endif /* lint || __lint */
diff --git a/module/icp/asm-x86_64/sha1/sha1-x86_64.S b/module/icp/asm-x86_64/sha1/sha1-x86_64.S
new file mode 100644
index 000000000..53cc156a7
--- /dev/null
+++ b/module/icp/asm-x86_64/sha1/sha1-x86_64.S
@@ -0,0 +1,1346 @@
+/*
+ * !/usr/bin/env perl
+ *
+ * ====================================================================
+ * Written by Andy Polyakov <[email protected]> for the OpenSSL
+ * project. The module is, however, dual licensed under OpenSSL and
+ * CRYPTOGAMS licenses depending on where you obtain it. For further
+ * details see http://www.openssl.org/~appro/cryptogams/.
+ * ====================================================================
+ *
+ * sha1_block procedure for x86_64.
+ *
+ * It was brought to my attention that on EM64T compiler-generated code
+ * was far behind 32-bit assembler implementation. This is unlike on
+ * Opteron where compiler-generated code was only 15% behind 32-bit
+ * assembler, which originally made it hard to motivate the effort.
+ * There was suggestion to mechanically translate 32-bit code, but I
+ * dismissed it, reasoning that x86_64 offers enough register bank
+ * capacity to fully utilize SHA-1 parallelism. Therefore this fresh
+ * implementation:-) However! While 64-bit code does performs better
+ * on Opteron, I failed to beat 32-bit assembler on EM64T core. Well,
+ * x86_64 does offer larger *addressable* bank, but out-of-order core
+ * reaches for even more registers through dynamic aliasing, and EM64T
+ * core must have managed to run-time optimize even 32-bit code just as
+ * good as 64-bit one. Performance improvement is summarized in the
+ * following table:
+ *
+ * gcc 3.4 32-bit asm cycles/byte
+ * Opteron +45% +20% 6.8
+ * Xeon P4 +65% +0% 9.9
+ * Core2 +60% +10% 7.0
+ *
+ *
+ * OpenSolaris OS modifications
+ *
+ * Sun elects to use this software under the BSD license.
+ *
+ * This source originates from OpenSSL file sha1-x86_64.pl at
+ * ftp://ftp.openssl.org/snapshot/openssl-0.9.8-stable-SNAP-20080131.tar.gz
+ * (presumably for future OpenSSL release 0.9.8h), with these changes:
+ *
+ * 1. Added perl "use strict" and declared variables.
+ *
+ * 2. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
+ * /usr/include/sys/asm_linkage.h, .ident keywords, and lint(1B) guards.
+ *
+ * 3. Removed x86_64-xlate.pl script (not needed for as(1) or gas(1)
+ * assemblers).
+ *
+ */
+
+/*
+ * This file was generated by a perl script (sha1-x86_64.pl). The comments from
+ * the original file have been pasted above.
+ */
+
+#if defined(lint) || defined(__lint)
+#include <sys/stdint.h>
+#include <sys/sha1.h>
+
+/* ARGSUSED */
+void
+sha1_block_data_order(SHA1_CTX *ctx, const void *inpp, size_t blocks)
+{
+}
+
+#else
+#define _ASM
+#include <sys/asm_linkage.h>
+ENTRY_NP(sha1_block_data_order)
+ push %rbx
+ push %rbp
+ push %r12
+ mov %rsp,%rax
+ mov %rdi,%r8 # reassigned argument
+ sub $72,%rsp
+ mov %rsi,%r9 # reassigned argument
+ and $-64,%rsp
+ mov %rdx,%r10 # reassigned argument
+ mov %rax,64(%rsp)
+
+ mov 0(%r8),%edx
+ mov 4(%r8),%esi
+ mov 8(%r8),%edi
+ mov 12(%r8),%ebp
+ mov 16(%r8),%r11d
+.align 4
+.Lloop:
+ mov 0(%r9),%eax
+ bswap %eax
+ mov %eax,0(%rsp)
+ lea 0x5a827999(%eax,%r11d),%r12d
+ mov %edi,%ebx
+ mov 4(%r9),%eax
+ mov %edx,%r11d
+ xor %ebp,%ebx
+ bswap %eax
+ rol $5,%r11d
+ and %esi,%ebx
+ mov %eax,4(%rsp)
+ add %r11d,%r12d
+ xor %ebp,%ebx
+ rol $30,%esi
+ add %ebx,%r12d
+ lea 0x5a827999(%eax,%ebp),%r11d
+ mov %esi,%ebx
+ mov 8(%r9),%eax
+ mov %r12d,%ebp
+ xor %edi,%ebx
+ bswap %eax
+ rol $5,%ebp
+ and %edx,%ebx
+ mov %eax,8(%rsp)
+ add %ebp,%r11d
+ xor %edi,%ebx
+ rol $30,%edx
+ add %ebx,%r11d
+ lea 0x5a827999(%eax,%edi),%ebp
+ mov %edx,%ebx
+ mov 12(%r9),%eax
+ mov %r11d,%edi
+ xor %esi,%ebx
+ bswap %eax
+ rol $5,%edi
+ and %r12d,%ebx
+ mov %eax,12(%rsp)
+ add %edi,%ebp
+ xor %esi,%ebx
+ rol $30,%r12d
+ add %ebx,%ebp
+ lea 0x5a827999(%eax,%esi),%edi
+ mov %r12d,%ebx
+ mov 16(%r9),%eax
+ mov %ebp,%esi
+ xor %edx,%ebx
+ bswap %eax
+ rol $5,%esi
+ and %r11d,%ebx
+ mov %eax,16(%rsp)
+ add %esi,%edi
+ xor %edx,%ebx
+ rol $30,%r11d
+ add %ebx,%edi
+ lea 0x5a827999(%eax,%edx),%esi
+ mov %r11d,%ebx
+ mov 20(%r9),%eax
+ mov %edi,%edx
+ xor %r12d,%ebx
+ bswap %eax
+ rol $5,%edx
+ and %ebp,%ebx
+ mov %eax,20(%rsp)
+ add %edx,%esi
+ xor %r12d,%ebx
+ rol $30,%ebp
+ add %ebx,%esi
+ lea 0x5a827999(%eax,%r12d),%edx
+ mov %ebp,%ebx
+ mov 24(%r9),%eax
+ mov %esi,%r12d
+ xor %r11d,%ebx
+ bswap %eax
+ rol $5,%r12d
+ and %edi,%ebx
+ mov %eax,24(%rsp)
+ add %r12d,%edx
+ xor %r11d,%ebx
+ rol $30,%edi
+ add %ebx,%edx
+ lea 0x5a827999(%eax,%r11d),%r12d
+ mov %edi,%ebx
+ mov 28(%r9),%eax
+ mov %edx,%r11d
+ xor %ebp,%ebx
+ bswap %eax
+ rol $5,%r11d
+ and %esi,%ebx
+ mov %eax,28(%rsp)
+ add %r11d,%r12d
+ xor %ebp,%ebx
+ rol $30,%esi
+ add %ebx,%r12d
+ lea 0x5a827999(%eax,%ebp),%r11d
+ mov %esi,%ebx
+ mov 32(%r9),%eax
+ mov %r12d,%ebp
+ xor %edi,%ebx
+ bswap %eax
+ rol $5,%ebp
+ and %edx,%ebx
+ mov %eax,32(%rsp)
+ add %ebp,%r11d
+ xor %edi,%ebx
+ rol $30,%edx
+ add %ebx,%r11d
+ lea 0x5a827999(%eax,%edi),%ebp
+ mov %edx,%ebx
+ mov 36(%r9),%eax
+ mov %r11d,%edi
+ xor %esi,%ebx
+ bswap %eax
+ rol $5,%edi
+ and %r12d,%ebx
+ mov %eax,36(%rsp)
+ add %edi,%ebp
+ xor %esi,%ebx
+ rol $30,%r12d
+ add %ebx,%ebp
+ lea 0x5a827999(%eax,%esi),%edi
+ mov %r12d,%ebx
+ mov 40(%r9),%eax
+ mov %ebp,%esi
+ xor %edx,%ebx
+ bswap %eax
+ rol $5,%esi
+ and %r11d,%ebx
+ mov %eax,40(%rsp)
+ add %esi,%edi
+ xor %edx,%ebx
+ rol $30,%r11d
+ add %ebx,%edi
+ lea 0x5a827999(%eax,%edx),%esi
+ mov %r11d,%ebx
+ mov 44(%r9),%eax
+ mov %edi,%edx
+ xor %r12d,%ebx
+ bswap %eax
+ rol $5,%edx
+ and %ebp,%ebx
+ mov %eax,44(%rsp)
+ add %edx,%esi
+ xor %r12d,%ebx
+ rol $30,%ebp
+ add %ebx,%esi
+ lea 0x5a827999(%eax,%r12d),%edx
+ mov %ebp,%ebx
+ mov 48(%r9),%eax
+ mov %esi,%r12d
+ xor %r11d,%ebx
+ bswap %eax
+ rol $5,%r12d
+ and %edi,%ebx
+ mov %eax,48(%rsp)
+ add %r12d,%edx
+ xor %r11d,%ebx
+ rol $30,%edi
+ add %ebx,%edx
+ lea 0x5a827999(%eax,%r11d),%r12d
+ mov %edi,%ebx
+ mov 52(%r9),%eax
+ mov %edx,%r11d
+ xor %ebp,%ebx
+ bswap %eax
+ rol $5,%r11d
+ and %esi,%ebx
+ mov %eax,52(%rsp)
+ add %r11d,%r12d
+ xor %ebp,%ebx
+ rol $30,%esi
+ add %ebx,%r12d
+ lea 0x5a827999(%eax,%ebp),%r11d
+ mov %esi,%ebx
+ mov 56(%r9),%eax
+ mov %r12d,%ebp
+ xor %edi,%ebx
+ bswap %eax
+ rol $5,%ebp
+ and %edx,%ebx
+ mov %eax,56(%rsp)
+ add %ebp,%r11d
+ xor %edi,%ebx
+ rol $30,%edx
+ add %ebx,%r11d
+ lea 0x5a827999(%eax,%edi),%ebp
+ mov %edx,%ebx
+ mov 60(%r9),%eax
+ mov %r11d,%edi
+ xor %esi,%ebx
+ bswap %eax
+ rol $5,%edi
+ and %r12d,%ebx
+ mov %eax,60(%rsp)
+ add %edi,%ebp
+ xor %esi,%ebx
+ rol $30,%r12d
+ add %ebx,%ebp
+ lea 0x5a827999(%eax,%esi),%edi
+ mov 0(%rsp),%eax
+ mov %r12d,%ebx
+ mov %ebp,%esi
+ xor 8(%rsp),%eax
+ xor %edx,%ebx
+ rol $5,%esi
+ xor 32(%rsp),%eax
+ and %r11d,%ebx
+ add %esi,%edi
+ xor 52(%rsp),%eax
+ xor %edx,%ebx
+ rol $30,%r11d
+ add %ebx,%edi
+ rol $1,%eax
+ mov %eax,0(%rsp)
+ lea 0x5a827999(%eax,%edx),%esi
+ mov 4(%rsp),%eax
+ mov %r11d,%ebx
+ mov %edi,%edx
+ xor 12(%rsp),%eax
+ xor %r12d,%ebx
+ rol $5,%edx
+ xor 36(%rsp),%eax
+ and %ebp,%ebx
+ add %edx,%esi
+ xor 56(%rsp),%eax
+ xor %r12d,%ebx
+ rol $30,%ebp
+ add %ebx,%esi
+ rol $1,%eax
+ mov %eax,4(%rsp)
+ lea 0x5a827999(%eax,%r12d),%edx
+ mov 8(%rsp),%eax
+ mov %ebp,%ebx
+ mov %esi,%r12d
+ xor 16(%rsp),%eax
+ xor %r11d,%ebx
+ rol $5,%r12d
+ xor 40(%rsp),%eax
+ and %edi,%ebx
+ add %r12d,%edx
+ xor 60(%rsp),%eax
+ xor %r11d,%ebx
+ rol $30,%edi
+ add %ebx,%edx
+ rol $1,%eax
+ mov %eax,8(%rsp)
+ lea 0x5a827999(%eax,%r11d),%r12d
+ mov 12(%rsp),%eax
+ mov %edi,%ebx
+ mov %edx,%r11d
+ xor 20(%rsp),%eax
+ xor %ebp,%ebx
+ rol $5,%r11d
+ xor 44(%rsp),%eax
+ and %esi,%ebx
+ add %r11d,%r12d
+ xor 0(%rsp),%eax
+ xor %ebp,%ebx
+ rol $30,%esi
+ add %ebx,%r12d
+ rol $1,%eax
+ mov %eax,12(%rsp)
+ lea 0x5a827999(%eax,%ebp),%r11d
+ mov 16(%rsp),%eax
+ mov %esi,%ebx
+ mov %r12d,%ebp
+ xor 24(%rsp),%eax
+ xor %edi,%ebx
+ rol $5,%ebp
+ xor 48(%rsp),%eax
+ and %edx,%ebx
+ add %ebp,%r11d
+ xor 4(%rsp),%eax
+ xor %edi,%ebx
+ rol $30,%edx
+ add %ebx,%r11d
+ rol $1,%eax
+ mov %eax,16(%rsp)
+ lea 0x6ed9eba1(%eax,%edi),%ebp
+ mov 20(%rsp),%eax
+ mov %edx,%ebx
+ mov %r11d,%edi
+ xor 28(%rsp),%eax
+ xor %r12d,%ebx
+ rol $5,%edi
+ xor 52(%rsp),%eax
+ xor %esi,%ebx
+ add %edi,%ebp
+ xor 8(%rsp),%eax
+ rol $30,%r12d
+ add %ebx,%ebp
+ rol $1,%eax
+ mov %eax,20(%rsp)
+ lea 0x6ed9eba1(%eax,%esi),%edi
+ mov 24(%rsp),%eax
+ mov %r12d,%ebx
+ mov %ebp,%esi
+ xor 32(%rsp),%eax
+ xor %r11d,%ebx
+ rol $5,%esi
+ xor 56(%rsp),%eax
+ xor %edx,%ebx
+ add %esi,%edi
+ xor 12(%rsp),%eax
+ rol $30,%r11d
+ add %ebx,%edi
+ rol $1,%eax
+ mov %eax,24(%rsp)
+ lea 0x6ed9eba1(%eax,%edx),%esi
+ mov 28(%rsp),%eax
+ mov %r11d,%ebx
+ mov %edi,%edx
+ xor 36(%rsp),%eax
+ xor %ebp,%ebx
+ rol $5,%edx
+ xor 60(%rsp),%eax
+ xor %r12d,%ebx
+ add %edx,%esi
+ xor 16(%rsp),%eax
+ rol $30,%ebp
+ add %ebx,%esi
+ rol $1,%eax
+ mov %eax,28(%rsp)
+ lea 0x6ed9eba1(%eax,%r12d),%edx
+ mov 32(%rsp),%eax
+ mov %ebp,%ebx
+ mov %esi,%r12d
+ xor 40(%rsp),%eax
+ xor %edi,%ebx
+ rol $5,%r12d
+ xor 0(%rsp),%eax
+ xor %r11d,%ebx
+ add %r12d,%edx
+ xor 20(%rsp),%eax
+ rol $30,%edi
+ add %ebx,%edx
+ rol $1,%eax
+ mov %eax,32(%rsp)
+ lea 0x6ed9eba1(%eax,%r11d),%r12d
+ mov 36(%rsp),%eax
+ mov %edi,%ebx
+ mov %edx,%r11d
+ xor 44(%rsp),%eax
+ xor %esi,%ebx
+ rol $5,%r11d
+ xor 4(%rsp),%eax
+ xor %ebp,%ebx
+ add %r11d,%r12d
+ xor 24(%rsp),%eax
+ rol $30,%esi
+ add %ebx,%r12d
+ rol $1,%eax
+ mov %eax,36(%rsp)
+ lea 0x6ed9eba1(%eax,%ebp),%r11d
+ mov 40(%rsp),%eax
+ mov %esi,%ebx
+ mov %r12d,%ebp
+ xor 48(%rsp),%eax
+ xor %edx,%ebx
+ rol $5,%ebp
+ xor 8(%rsp),%eax
+ xor %edi,%ebx
+ add %ebp,%r11d
+ xor 28(%rsp),%eax
+ rol $30,%edx
+ add %ebx,%r11d
+ rol $1,%eax
+ mov %eax,40(%rsp)
+ lea 0x6ed9eba1(%eax,%edi),%ebp
+ mov 44(%rsp),%eax
+ mov %edx,%ebx
+ mov %r11d,%edi
+ xor 52(%rsp),%eax
+ xor %r12d,%ebx
+ rol $5,%edi
+ xor 12(%rsp),%eax
+ xor %esi,%ebx
+ add %edi,%ebp
+ xor 32(%rsp),%eax
+ rol $30,%r12d
+ add %ebx,%ebp
+ rol $1,%eax
+ mov %eax,44(%rsp)
+ lea 0x6ed9eba1(%eax,%esi),%edi
+ mov 48(%rsp),%eax
+ mov %r12d,%ebx
+ mov %ebp,%esi
+ xor 56(%rsp),%eax
+ xor %r11d,%ebx
+ rol $5,%esi
+ xor 16(%rsp),%eax
+ xor %edx,%ebx
+ add %esi,%edi
+ xor 36(%rsp),%eax
+ rol $30,%r11d
+ add %ebx,%edi
+ rol $1,%eax
+ mov %eax,48(%rsp)
+ lea 0x6ed9eba1(%eax,%edx),%esi
+ mov 52(%rsp),%eax
+ mov %r11d,%ebx
+ mov %edi,%edx
+ xor 60(%rsp),%eax
+ xor %ebp,%ebx
+ rol $5,%edx
+ xor 20(%rsp),%eax
+ xor %r12d,%ebx
+ add %edx,%esi
+ xor 40(%rsp),%eax
+ rol $30,%ebp
+ add %ebx,%esi
+ rol $1,%eax
+ mov %eax,52(%rsp)
+ lea 0x6ed9eba1(%eax,%r12d),%edx
+ mov 56(%rsp),%eax
+ mov %ebp,%ebx
+ mov %esi,%r12d
+ xor 0(%rsp),%eax
+ xor %edi,%ebx
+ rol $5,%r12d
+ xor 24(%rsp),%eax
+ xor %r11d,%ebx
+ add %r12d,%edx
+ xor 44(%rsp),%eax
+ rol $30,%edi
+ add %ebx,%edx
+ rol $1,%eax
+ mov %eax,56(%rsp)
+ lea 0x6ed9eba1(%eax,%r11d),%r12d
+ mov 60(%rsp),%eax
+ mov %edi,%ebx
+ mov %edx,%r11d
+ xor 4(%rsp),%eax
+ xor %esi,%ebx
+ rol $5,%r11d
+ xor 28(%rsp),%eax
+ xor %ebp,%ebx
+ add %r11d,%r12d
+ xor 48(%rsp),%eax
+ rol $30,%esi
+ add %ebx,%r12d
+ rol $1,%eax
+ mov %eax,60(%rsp)
+ lea 0x6ed9eba1(%eax,%ebp),%r11d
+ mov 0(%rsp),%eax
+ mov %esi,%ebx
+ mov %r12d,%ebp
+ xor 8(%rsp),%eax
+ xor %edx,%ebx
+ rol $5,%ebp
+ xor 32(%rsp),%eax
+ xor %edi,%ebx
+ add %ebp,%r11d
+ xor 52(%rsp),%eax
+ rol $30,%edx
+ add %ebx,%r11d
+ rol $1,%eax
+ mov %eax,0(%rsp)
+ lea 0x6ed9eba1(%eax,%edi),%ebp
+ mov 4(%rsp),%eax
+ mov %edx,%ebx
+ mov %r11d,%edi
+ xor 12(%rsp),%eax
+ xor %r12d,%ebx
+ rol $5,%edi
+ xor 36(%rsp),%eax
+ xor %esi,%ebx
+ add %edi,%ebp
+ xor 56(%rsp),%eax
+ rol $30,%r12d
+ add %ebx,%ebp
+ rol $1,%eax
+ mov %eax,4(%rsp)
+ lea 0x6ed9eba1(%eax,%esi),%edi
+ mov 8(%rsp),%eax
+ mov %r12d,%ebx
+ mov %ebp,%esi
+ xor 16(%rsp),%eax
+ xor %r11d,%ebx
+ rol $5,%esi
+ xor 40(%rsp),%eax
+ xor %edx,%ebx
+ add %esi,%edi
+ xor 60(%rsp),%eax
+ rol $30,%r11d
+ add %ebx,%edi
+ rol $1,%eax
+ mov %eax,8(%rsp)
+ lea 0x6ed9eba1(%eax,%edx),%esi
+ mov 12(%rsp),%eax
+ mov %r11d,%ebx
+ mov %edi,%edx
+ xor 20(%rsp),%eax
+ xor %ebp,%ebx
+ rol $5,%edx
+ xor 44(%rsp),%eax
+ xor %r12d,%ebx
+ add %edx,%esi
+ xor 0(%rsp),%eax
+ rol $30,%ebp
+ add %ebx,%esi
+ rol $1,%eax
+ mov %eax,12(%rsp)
+ lea 0x6ed9eba1(%eax,%r12d),%edx
+ mov 16(%rsp),%eax
+ mov %ebp,%ebx
+ mov %esi,%r12d
+ xor 24(%rsp),%eax
+ xor %edi,%ebx
+ rol $5,%r12d
+ xor 48(%rsp),%eax
+ xor %r11d,%ebx
+ add %r12d,%edx
+ xor 4(%rsp),%eax
+ rol $30,%edi
+ add %ebx,%edx
+ rol $1,%eax
+ mov %eax,16(%rsp)
+ lea 0x6ed9eba1(%eax,%r11d),%r12d
+ mov 20(%rsp),%eax
+ mov %edi,%ebx
+ mov %edx,%r11d
+ xor 28(%rsp),%eax
+ xor %esi,%ebx
+ rol $5,%r11d
+ xor 52(%rsp),%eax
+ xor %ebp,%ebx
+ add %r11d,%r12d
+ xor 8(%rsp),%eax
+ rol $30,%esi
+ add %ebx,%r12d
+ rol $1,%eax
+ mov %eax,20(%rsp)
+ lea 0x6ed9eba1(%eax,%ebp),%r11d
+ mov 24(%rsp),%eax
+ mov %esi,%ebx
+ mov %r12d,%ebp
+ xor 32(%rsp),%eax
+ xor %edx,%ebx
+ rol $5,%ebp
+ xor 56(%rsp),%eax
+ xor %edi,%ebx
+ add %ebp,%r11d
+ xor 12(%rsp),%eax
+ rol $30,%edx
+ add %ebx,%r11d
+ rol $1,%eax
+ mov %eax,24(%rsp)
+ lea 0x6ed9eba1(%eax,%edi),%ebp
+ mov 28(%rsp),%eax
+ mov %edx,%ebx
+ mov %r11d,%edi
+ xor 36(%rsp),%eax
+ xor %r12d,%ebx
+ rol $5,%edi
+ xor 60(%rsp),%eax
+ xor %esi,%ebx
+ add %edi,%ebp
+ xor 16(%rsp),%eax
+ rol $30,%r12d
+ add %ebx,%ebp
+ rol $1,%eax
+ mov %eax,28(%rsp)
+ lea 0x6ed9eba1(%eax,%esi),%edi
+ mov 32(%rsp),%eax
+ mov %r12d,%ebx
+ mov %ebp,%esi
+ xor 40(%rsp),%eax
+ xor %r11d,%ebx
+ rol $5,%esi
+ xor 0(%rsp),%eax
+ xor %edx,%ebx
+ add %esi,%edi
+ xor 20(%rsp),%eax
+ rol $30,%r11d
+ add %ebx,%edi
+ rol $1,%eax
+ mov %eax,32(%rsp)
+ lea -0x70e44324(%eax,%edx),%esi
+ mov 36(%rsp),%eax
+ mov %ebp,%ebx
+ mov %ebp,%ecx
+ xor 44(%rsp),%eax
+ mov %edi,%edx
+ and %r11d,%ebx
+ xor 4(%rsp),%eax
+ or %r11d,%ecx
+ rol $5,%edx
+ xor 24(%rsp),%eax
+ and %r12d,%ecx
+ add %edx,%esi
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%ebp
+ mov %eax,36(%rsp)
+ add %ebx,%esi
+ lea -0x70e44324(%eax,%r12d),%edx
+ mov 40(%rsp),%eax
+ mov %edi,%ebx
+ mov %edi,%ecx
+ xor 48(%rsp),%eax
+ mov %esi,%r12d
+ and %ebp,%ebx
+ xor 8(%rsp),%eax
+ or %ebp,%ecx
+ rol $5,%r12d
+ xor 28(%rsp),%eax
+ and %r11d,%ecx
+ add %r12d,%edx
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%edi
+ mov %eax,40(%rsp)
+ add %ebx,%edx
+ lea -0x70e44324(%eax,%r11d),%r12d
+ mov 44(%rsp),%eax
+ mov %esi,%ebx
+ mov %esi,%ecx
+ xor 52(%rsp),%eax
+ mov %edx,%r11d
+ and %edi,%ebx
+ xor 12(%rsp),%eax
+ or %edi,%ecx
+ rol $5,%r11d
+ xor 32(%rsp),%eax
+ and %ebp,%ecx
+ add %r11d,%r12d
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%esi
+ mov %eax,44(%rsp)
+ add %ebx,%r12d
+ lea -0x70e44324(%eax,%ebp),%r11d
+ mov 48(%rsp),%eax
+ mov %edx,%ebx
+ mov %edx,%ecx
+ xor 56(%rsp),%eax
+ mov %r12d,%ebp
+ and %esi,%ebx
+ xor 16(%rsp),%eax
+ or %esi,%ecx
+ rol $5,%ebp
+ xor 36(%rsp),%eax
+ and %edi,%ecx
+ add %ebp,%r11d
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%edx
+ mov %eax,48(%rsp)
+ add %ebx,%r11d
+ lea -0x70e44324(%eax,%edi),%ebp
+ mov 52(%rsp),%eax
+ mov %r12d,%ebx
+ mov %r12d,%ecx
+ xor 60(%rsp),%eax
+ mov %r11d,%edi
+ and %edx,%ebx
+ xor 20(%rsp),%eax
+ or %edx,%ecx
+ rol $5,%edi
+ xor 40(%rsp),%eax
+ and %esi,%ecx
+ add %edi,%ebp
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%r12d
+ mov %eax,52(%rsp)
+ add %ebx,%ebp
+ lea -0x70e44324(%eax,%esi),%edi
+ mov 56(%rsp),%eax
+ mov %r11d,%ebx
+ mov %r11d,%ecx
+ xor 0(%rsp),%eax
+ mov %ebp,%esi
+ and %r12d,%ebx
+ xor 24(%rsp),%eax
+ or %r12d,%ecx
+ rol $5,%esi
+ xor 44(%rsp),%eax
+ and %edx,%ecx
+ add %esi,%edi
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%r11d
+ mov %eax,56(%rsp)
+ add %ebx,%edi
+ lea -0x70e44324(%eax,%edx),%esi
+ mov 60(%rsp),%eax
+ mov %ebp,%ebx
+ mov %ebp,%ecx
+ xor 4(%rsp),%eax
+ mov %edi,%edx
+ and %r11d,%ebx
+ xor 28(%rsp),%eax
+ or %r11d,%ecx
+ rol $5,%edx
+ xor 48(%rsp),%eax
+ and %r12d,%ecx
+ add %edx,%esi
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%ebp
+ mov %eax,60(%rsp)
+ add %ebx,%esi
+ lea -0x70e44324(%eax,%r12d),%edx
+ mov 0(%rsp),%eax
+ mov %edi,%ebx
+ mov %edi,%ecx
+ xor 8(%rsp),%eax
+ mov %esi,%r12d
+ and %ebp,%ebx
+ xor 32(%rsp),%eax
+ or %ebp,%ecx
+ rol $5,%r12d
+ xor 52(%rsp),%eax
+ and %r11d,%ecx
+ add %r12d,%edx
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%edi
+ mov %eax,0(%rsp)
+ add %ebx,%edx
+ lea -0x70e44324(%eax,%r11d),%r12d
+ mov 4(%rsp),%eax
+ mov %esi,%ebx
+ mov %esi,%ecx
+ xor 12(%rsp),%eax
+ mov %edx,%r11d
+ and %edi,%ebx
+ xor 36(%rsp),%eax
+ or %edi,%ecx
+ rol $5,%r11d
+ xor 56(%rsp),%eax
+ and %ebp,%ecx
+ add %r11d,%r12d
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%esi
+ mov %eax,4(%rsp)
+ add %ebx,%r12d
+ lea -0x70e44324(%eax,%ebp),%r11d
+ mov 8(%rsp),%eax
+ mov %edx,%ebx
+ mov %edx,%ecx
+ xor 16(%rsp),%eax
+ mov %r12d,%ebp
+ and %esi,%ebx
+ xor 40(%rsp),%eax
+ or %esi,%ecx
+ rol $5,%ebp
+ xor 60(%rsp),%eax
+ and %edi,%ecx
+ add %ebp,%r11d
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%edx
+ mov %eax,8(%rsp)
+ add %ebx,%r11d
+ lea -0x70e44324(%eax,%edi),%ebp
+ mov 12(%rsp),%eax
+ mov %r12d,%ebx
+ mov %r12d,%ecx
+ xor 20(%rsp),%eax
+ mov %r11d,%edi
+ and %edx,%ebx
+ xor 44(%rsp),%eax
+ or %edx,%ecx
+ rol $5,%edi
+ xor 0(%rsp),%eax
+ and %esi,%ecx
+ add %edi,%ebp
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%r12d
+ mov %eax,12(%rsp)
+ add %ebx,%ebp
+ lea -0x70e44324(%eax,%esi),%edi
+ mov 16(%rsp),%eax
+ mov %r11d,%ebx
+ mov %r11d,%ecx
+ xor 24(%rsp),%eax
+ mov %ebp,%esi
+ and %r12d,%ebx
+ xor 48(%rsp),%eax
+ or %r12d,%ecx
+ rol $5,%esi
+ xor 4(%rsp),%eax
+ and %edx,%ecx
+ add %esi,%edi
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%r11d
+ mov %eax,16(%rsp)
+ add %ebx,%edi
+ lea -0x70e44324(%eax,%edx),%esi
+ mov 20(%rsp),%eax
+ mov %ebp,%ebx
+ mov %ebp,%ecx
+ xor 28(%rsp),%eax
+ mov %edi,%edx
+ and %r11d,%ebx
+ xor 52(%rsp),%eax
+ or %r11d,%ecx
+ rol $5,%edx
+ xor 8(%rsp),%eax
+ and %r12d,%ecx
+ add %edx,%esi
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%ebp
+ mov %eax,20(%rsp)
+ add %ebx,%esi
+ lea -0x70e44324(%eax,%r12d),%edx
+ mov 24(%rsp),%eax
+ mov %edi,%ebx
+ mov %edi,%ecx
+ xor 32(%rsp),%eax
+ mov %esi,%r12d
+ and %ebp,%ebx
+ xor 56(%rsp),%eax
+ or %ebp,%ecx
+ rol $5,%r12d
+ xor 12(%rsp),%eax
+ and %r11d,%ecx
+ add %r12d,%edx
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%edi
+ mov %eax,24(%rsp)
+ add %ebx,%edx
+ lea -0x70e44324(%eax,%r11d),%r12d
+ mov 28(%rsp),%eax
+ mov %esi,%ebx
+ mov %esi,%ecx
+ xor 36(%rsp),%eax
+ mov %edx,%r11d
+ and %edi,%ebx
+ xor 60(%rsp),%eax
+ or %edi,%ecx
+ rol $5,%r11d
+ xor 16(%rsp),%eax
+ and %ebp,%ecx
+ add %r11d,%r12d
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%esi
+ mov %eax,28(%rsp)
+ add %ebx,%r12d
+ lea -0x70e44324(%eax,%ebp),%r11d
+ mov 32(%rsp),%eax
+ mov %edx,%ebx
+ mov %edx,%ecx
+ xor 40(%rsp),%eax
+ mov %r12d,%ebp
+ and %esi,%ebx
+ xor 0(%rsp),%eax
+ or %esi,%ecx
+ rol $5,%ebp
+ xor 20(%rsp),%eax
+ and %edi,%ecx
+ add %ebp,%r11d
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%edx
+ mov %eax,32(%rsp)
+ add %ebx,%r11d
+ lea -0x70e44324(%eax,%edi),%ebp
+ mov 36(%rsp),%eax
+ mov %r12d,%ebx
+ mov %r12d,%ecx
+ xor 44(%rsp),%eax
+ mov %r11d,%edi
+ and %edx,%ebx
+ xor 4(%rsp),%eax
+ or %edx,%ecx
+ rol $5,%edi
+ xor 24(%rsp),%eax
+ and %esi,%ecx
+ add %edi,%ebp
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%r12d
+ mov %eax,36(%rsp)
+ add %ebx,%ebp
+ lea -0x70e44324(%eax,%esi),%edi
+ mov 40(%rsp),%eax
+ mov %r11d,%ebx
+ mov %r11d,%ecx
+ xor 48(%rsp),%eax
+ mov %ebp,%esi
+ and %r12d,%ebx
+ xor 8(%rsp),%eax
+ or %r12d,%ecx
+ rol $5,%esi
+ xor 28(%rsp),%eax
+ and %edx,%ecx
+ add %esi,%edi
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%r11d
+ mov %eax,40(%rsp)
+ add %ebx,%edi
+ lea -0x70e44324(%eax,%edx),%esi
+ mov 44(%rsp),%eax
+ mov %ebp,%ebx
+ mov %ebp,%ecx
+ xor 52(%rsp),%eax
+ mov %edi,%edx
+ and %r11d,%ebx
+ xor 12(%rsp),%eax
+ or %r11d,%ecx
+ rol $5,%edx
+ xor 32(%rsp),%eax
+ and %r12d,%ecx
+ add %edx,%esi
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%ebp
+ mov %eax,44(%rsp)
+ add %ebx,%esi
+ lea -0x70e44324(%eax,%r12d),%edx
+ mov 48(%rsp),%eax
+ mov %edi,%ebx
+ mov %edi,%ecx
+ xor 56(%rsp),%eax
+ mov %esi,%r12d
+ and %ebp,%ebx
+ xor 16(%rsp),%eax
+ or %ebp,%ecx
+ rol $5,%r12d
+ xor 36(%rsp),%eax
+ and %r11d,%ecx
+ add %r12d,%edx
+ rol $1,%eax
+ or %ecx,%ebx
+ rol $30,%edi
+ mov %eax,48(%rsp)
+ add %ebx,%edx
+ lea -0x359d3e2a(%eax,%r11d),%r12d
+ mov 52(%rsp),%eax
+ mov %edi,%ebx
+ mov %edx,%r11d
+ xor 60(%rsp),%eax
+ xor %esi,%ebx
+ rol $5,%r11d
+ xor 20(%rsp),%eax
+ xor %ebp,%ebx
+ add %r11d,%r12d
+ xor 40(%rsp),%eax
+ rol $30,%esi
+ add %ebx,%r12d
+ rol $1,%eax
+ mov %eax,52(%rsp)
+ lea -0x359d3e2a(%eax,%ebp),%r11d
+ mov 56(%rsp),%eax
+ mov %esi,%ebx
+ mov %r12d,%ebp
+ xor 0(%rsp),%eax
+ xor %edx,%ebx
+ rol $5,%ebp
+ xor 24(%rsp),%eax
+ xor %edi,%ebx
+ add %ebp,%r11d
+ xor 44(%rsp),%eax
+ rol $30,%edx
+ add %ebx,%r11d
+ rol $1,%eax
+ mov %eax,56(%rsp)
+ lea -0x359d3e2a(%eax,%edi),%ebp
+ mov 60(%rsp),%eax
+ mov %edx,%ebx
+ mov %r11d,%edi
+ xor 4(%rsp),%eax
+ xor %r12d,%ebx
+ rol $5,%edi
+ xor 28(%rsp),%eax
+ xor %esi,%ebx
+ add %edi,%ebp
+ xor 48(%rsp),%eax
+ rol $30,%r12d
+ add %ebx,%ebp
+ rol $1,%eax
+ mov %eax,60(%rsp)
+ lea -0x359d3e2a(%eax,%esi),%edi
+ mov 0(%rsp),%eax
+ mov %r12d,%ebx
+ mov %ebp,%esi
+ xor 8(%rsp),%eax
+ xor %r11d,%ebx
+ rol $5,%esi
+ xor 32(%rsp),%eax
+ xor %edx,%ebx
+ add %esi,%edi
+ xor 52(%rsp),%eax
+ rol $30,%r11d
+ add %ebx,%edi
+ rol $1,%eax
+ mov %eax,0(%rsp)
+ lea -0x359d3e2a(%eax,%edx),%esi
+ mov 4(%rsp),%eax
+ mov %r11d,%ebx
+ mov %edi,%edx
+ xor 12(%rsp),%eax
+ xor %ebp,%ebx
+ rol $5,%edx
+ xor 36(%rsp),%eax
+ xor %r12d,%ebx
+ add %edx,%esi
+ xor 56(%rsp),%eax
+ rol $30,%ebp
+ add %ebx,%esi
+ rol $1,%eax
+ mov %eax,4(%rsp)
+ lea -0x359d3e2a(%eax,%r12d),%edx
+ mov 8(%rsp),%eax
+ mov %ebp,%ebx
+ mov %esi,%r12d
+ xor 16(%rsp),%eax
+ xor %edi,%ebx
+ rol $5,%r12d
+ xor 40(%rsp),%eax
+ xor %r11d,%ebx
+ add %r12d,%edx
+ xor 60(%rsp),%eax
+ rol $30,%edi
+ add %ebx,%edx
+ rol $1,%eax
+ mov %eax,8(%rsp)
+ lea -0x359d3e2a(%eax,%r11d),%r12d
+ mov 12(%rsp),%eax
+ mov %edi,%ebx
+ mov %edx,%r11d
+ xor 20(%rsp),%eax
+ xor %esi,%ebx
+ rol $5,%r11d
+ xor 44(%rsp),%eax
+ xor %ebp,%ebx
+ add %r11d,%r12d
+ xor 0(%rsp),%eax
+ rol $30,%esi
+ add %ebx,%r12d
+ rol $1,%eax
+ mov %eax,12(%rsp)
+ lea -0x359d3e2a(%eax,%ebp),%r11d
+ mov 16(%rsp),%eax
+ mov %esi,%ebx
+ mov %r12d,%ebp
+ xor 24(%rsp),%eax
+ xor %edx,%ebx
+ rol $5,%ebp
+ xor 48(%rsp),%eax
+ xor %edi,%ebx
+ add %ebp,%r11d
+ xor 4(%rsp),%eax
+ rol $30,%edx
+ add %ebx,%r11d
+ rol $1,%eax
+ mov %eax,16(%rsp)
+ lea -0x359d3e2a(%eax,%edi),%ebp
+ mov 20(%rsp),%eax
+ mov %edx,%ebx
+ mov %r11d,%edi
+ xor 28(%rsp),%eax
+ xor %r12d,%ebx
+ rol $5,%edi
+ xor 52(%rsp),%eax
+ xor %esi,%ebx
+ add %edi,%ebp
+ xor 8(%rsp),%eax
+ rol $30,%r12d
+ add %ebx,%ebp
+ rol $1,%eax
+ mov %eax,20(%rsp)
+ lea -0x359d3e2a(%eax,%esi),%edi
+ mov 24(%rsp),%eax
+ mov %r12d,%ebx
+ mov %ebp,%esi
+ xor 32(%rsp),%eax
+ xor %r11d,%ebx
+ rol $5,%esi
+ xor 56(%rsp),%eax
+ xor %edx,%ebx
+ add %esi,%edi
+ xor 12(%rsp),%eax
+ rol $30,%r11d
+ add %ebx,%edi
+ rol $1,%eax
+ mov %eax,24(%rsp)
+ lea -0x359d3e2a(%eax,%edx),%esi
+ mov 28(%rsp),%eax
+ mov %r11d,%ebx
+ mov %edi,%edx
+ xor 36(%rsp),%eax
+ xor %ebp,%ebx
+ rol $5,%edx
+ xor 60(%rsp),%eax
+ xor %r12d,%ebx
+ add %edx,%esi
+ xor 16(%rsp),%eax
+ rol $30,%ebp
+ add %ebx,%esi
+ rol $1,%eax
+ mov %eax,28(%rsp)
+ lea -0x359d3e2a(%eax,%r12d),%edx
+ mov 32(%rsp),%eax
+ mov %ebp,%ebx
+ mov %esi,%r12d
+ xor 40(%rsp),%eax
+ xor %edi,%ebx
+ rol $5,%r12d
+ xor 0(%rsp),%eax
+ xor %r11d,%ebx
+ add %r12d,%edx
+ xor 20(%rsp),%eax
+ rol $30,%edi
+ add %ebx,%edx
+ rol $1,%eax
+ mov %eax,32(%rsp)
+ lea -0x359d3e2a(%eax,%r11d),%r12d
+ mov 36(%rsp),%eax
+ mov %edi,%ebx
+ mov %edx,%r11d
+ xor 44(%rsp),%eax
+ xor %esi,%ebx
+ rol $5,%r11d
+ xor 4(%rsp),%eax
+ xor %ebp,%ebx
+ add %r11d,%r12d
+ xor 24(%rsp),%eax
+ rol $30,%esi
+ add %ebx,%r12d
+ rol $1,%eax
+ mov %eax,36(%rsp)
+ lea -0x359d3e2a(%eax,%ebp),%r11d
+ mov 40(%rsp),%eax
+ mov %esi,%ebx
+ mov %r12d,%ebp
+ xor 48(%rsp),%eax
+ xor %edx,%ebx
+ rol $5,%ebp
+ xor 8(%rsp),%eax
+ xor %edi,%ebx
+ add %ebp,%r11d
+ xor 28(%rsp),%eax
+ rol $30,%edx
+ add %ebx,%r11d
+ rol $1,%eax
+ mov %eax,40(%rsp)
+ lea -0x359d3e2a(%eax,%edi),%ebp
+ mov 44(%rsp),%eax
+ mov %edx,%ebx
+ mov %r11d,%edi
+ xor 52(%rsp),%eax
+ xor %r12d,%ebx
+ rol $5,%edi
+ xor 12(%rsp),%eax
+ xor %esi,%ebx
+ add %edi,%ebp
+ xor 32(%rsp),%eax
+ rol $30,%r12d
+ add %ebx,%ebp
+ rol $1,%eax
+ mov %eax,44(%rsp)
+ lea -0x359d3e2a(%eax,%esi),%edi
+ mov 48(%rsp),%eax
+ mov %r12d,%ebx
+ mov %ebp,%esi
+ xor 56(%rsp),%eax
+ xor %r11d,%ebx
+ rol $5,%esi
+ xor 16(%rsp),%eax
+ xor %edx,%ebx
+ add %esi,%edi
+ xor 36(%rsp),%eax
+ rol $30,%r11d
+ add %ebx,%edi
+ rol $1,%eax
+ mov %eax,48(%rsp)
+ lea -0x359d3e2a(%eax,%edx),%esi
+ mov 52(%rsp),%eax
+ mov %r11d,%ebx
+ mov %edi,%edx
+ xor 60(%rsp),%eax
+ xor %ebp,%ebx
+ rol $5,%edx
+ xor 20(%rsp),%eax
+ xor %r12d,%ebx
+ add %edx,%esi
+ xor 40(%rsp),%eax
+ rol $30,%ebp
+ add %ebx,%esi
+ rol $1,%eax
+ lea -0x359d3e2a(%eax,%r12d),%edx
+ mov 56(%rsp),%eax
+ mov %ebp,%ebx
+ mov %esi,%r12d
+ xor 0(%rsp),%eax
+ xor %edi,%ebx
+ rol $5,%r12d
+ xor 24(%rsp),%eax
+ xor %r11d,%ebx
+ add %r12d,%edx
+ xor 44(%rsp),%eax
+ rol $30,%edi
+ add %ebx,%edx
+ rol $1,%eax
+ lea -0x359d3e2a(%eax,%r11d),%r12d
+ mov 60(%rsp),%eax
+ mov %edi,%ebx
+ mov %edx,%r11d
+ xor 4(%rsp),%eax
+ xor %esi,%ebx
+ rol $5,%r11d
+ xor 28(%rsp),%eax
+ xor %ebp,%ebx
+ add %r11d,%r12d
+ xor 48(%rsp),%eax
+ rol $30,%esi
+ add %ebx,%r12d
+ rol $1,%eax
+ lea -0x359d3e2a(%eax,%ebp),%r11d
+ mov %esi,%ebx
+ mov %r12d,%ebp
+ xor %edx,%ebx
+ rol $5,%ebp
+ xor %edi,%ebx
+ add %ebp,%r11d
+ rol $30,%edx
+ add %ebx,%r11d
+ // Update and save state information in SHA-1 context
+ add 0(%r8),%r11d
+ add 4(%r8),%r12d
+ add 8(%r8),%edx
+ add 12(%r8),%esi
+ add 16(%r8),%edi
+ mov %r11d,0(%r8)
+ mov %r12d,4(%r8)
+ mov %edx,8(%r8)
+ mov %esi,12(%r8)
+ mov %edi,16(%r8)
+
+ xchg %r11d,%edx # mov %r11d,%edx
+ xchg %r12d,%esi # mov %r12d,%esi
+ xchg %r11d,%edi # mov %edx,%edi
+ xchg %r12d,%ebp # mov %esi,%ebp
+ # mov %edi,%r11d
+ lea 64(%r9),%r9
+ sub $1,%r10
+ jnz .Lloop
+ mov 64(%rsp),%rsp
+ pop %r12
+ pop %rbp
+ pop %rbx
+ ret
+SET_SIZE(sha1_block_data_order)
+.asciz "SHA1 block transform for x86_64, CRYPTOGAMS by <[email protected]>"
+
+#endif /* lint || __lint */
diff --git a/module/icp/asm-x86_64/sha2/sha256_impl.S b/module/icp/asm-x86_64/sha2/sha256_impl.S
new file mode 100644
index 000000000..b6a9bbc86
--- /dev/null
+++ b/module/icp/asm-x86_64/sha2/sha256_impl.S
@@ -0,0 +1,2060 @@
+/*
+ * ====================================================================
+ * Written by Andy Polyakov <[email protected]> for the OpenSSL
+ * project. Rights for redistribution and usage in source and binary
+ * forms are granted according to the OpenSSL license.
+ * ====================================================================
+ *
+ * sha256/512_block procedure for x86_64.
+ *
+ * 40% improvement over compiler-generated code on Opteron. On EM64T
+ * sha256 was observed to run >80% faster and sha512 - >40%. No magical
+ * tricks, just straight implementation... I really wonder why gcc
+ * [being armed with inline assembler] fails to generate as fast code.
+ * The only thing which is cool about this module is that it's very
+ * same instruction sequence used for both SHA-256 and SHA-512. In
+ * former case the instructions operate on 32-bit operands, while in
+ * latter - on 64-bit ones. All I had to do is to get one flavor right,
+ * the other one passed the test right away:-)
+ *
+ * sha256_block runs in ~1005 cycles on Opteron, which gives you
+ * asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
+ * frequency in GHz. sha512_block runs in ~1275 cycles, which results
+ * in 128*1000/1275=100MBps per GHz. Is there room for improvement?
+ * Well, if you compare it to IA-64 implementation, which maintains
+ * X[16] in register bank[!], tends to 4 instructions per CPU clock
+ * cycle and runs in 1003 cycles, 1275 is very good result for 3-way
+ * issue Opteron pipeline and X[16] maintained in memory. So that *if*
+ * there is a way to improve it, *then* the only way would be to try to
+ * offload X[16] updates to SSE unit, but that would require "deeper"
+ * loop unroll, which in turn would naturally cause size blow-up, not
+ * to mention increased complexity! And once again, only *if* it's
+ * actually possible to noticeably improve overall ILP, instruction
+ * level parallelism, on a given CPU implementation in this case.
+ *
+ * Special note on Intel EM64T. While Opteron CPU exhibits perfect
+ * perfromance ratio of 1.5 between 64- and 32-bit flavors [see above],
+ * [currently available] EM64T CPUs apparently are far from it. On the
+ * contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
+ * sha256_block:-( This is presumably because 64-bit shifts/rotates
+ * apparently are not atomic instructions, but implemented in microcode.
+ */
+
+/*
+ * OpenSolaris OS modifications
+ *
+ * Sun elects to use this software under the BSD license.
+ *
+ * This source originates from OpenSSL file sha512-x86_64.pl at
+ * ftp://ftp.openssl.org/snapshot/openssl-0.9.8-stable-SNAP-20080131.tar.gz
+ * (presumably for future OpenSSL release 0.9.8h), with these changes:
+ *
+ * 1. Added perl "use strict" and declared variables.
+ *
+ * 2. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
+ * /usr/include/sys/asm_linkage.h, .ident keywords, and lint(1B) guards.
+ *
+ * 3. Removed x86_64-xlate.pl script (not needed for as(1) or gas(1)
+ * assemblers). Replaced the .picmeup macro with assembler code.
+ *
+ * 4. Added 8 to $ctx, as OpenSolaris OS has an extra 4-byte field, "algotype",
+ * at the beginning of SHA2_CTX (the next field is 8-byte aligned).
+ */
+
+/*
+ * This file was generated by a perl script (sha512-x86_64.pl) that could
+ * be used to generate sha256 and sha512 variants from the same code base.
+ * For our purposes, we only need sha256 and so getting the perl script to
+ * run as part of the build process seemed superfluous. The comments from
+ * the original file have been pasted above.
+ */
+
+#if defined(lint) || defined(__lint)
+#include <sys/stdint.h>
+#include <sha2/sha2.h>
+
+/* ARGSUSED */
+void
+SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num)
+{
+}
+
+
+#else
+#define _ASM
+#include <sys/asm_linkage.h>
+
+ENTRY_NP(SHA256TransformBlocks)
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ mov %rsp,%rbp # copy %rsp
+ shl $4,%rdx # num*16
+ sub $16*4+4*8,%rsp
+ lea (%rsi,%rdx,4),%rdx # inp+num*16*4
+ and $-64,%rsp # align stack frame
+ add $8,%rdi # Skip OpenSolaris field, "algotype"
+ mov %rdi,16*4+0*8(%rsp) # save ctx, 1st arg
+ mov %rsi,16*4+1*8(%rsp) # save inp, 2nd arg
+ mov %rdx,16*4+2*8(%rsp) # save end pointer, "3rd" arg
+ mov %rbp,16*4+3*8(%rsp) # save copy of %rsp
+
+ /.picmeup %rbp
+ / The .picmeup pseudo-directive, from perlasm/x86_64_xlate.pl, puts
+ / the address of the "next" instruction into the target register
+ / (%rbp). This generates these 2 instructions:
+ lea .Llea(%rip),%rbp
+ /nop / .picmeup generates a nop for mod 8 alignment--not needed here
+
+.Llea:
+ lea K256-.(%rbp),%rbp
+
+ mov 4*0(%rdi),%eax
+ mov 4*1(%rdi),%ebx
+ mov 4*2(%rdi),%ecx
+ mov 4*3(%rdi),%edx
+ mov 4*4(%rdi),%r8d
+ mov 4*5(%rdi),%r9d
+ mov 4*6(%rdi),%r10d
+ mov 4*7(%rdi),%r11d
+ jmp .Lloop
+
+.align 16
+.Lloop:
+ xor %rdi,%rdi
+ mov 4*0(%rsi),%r12d
+ bswap %r12d
+ mov %r8d,%r13d
+ mov %r8d,%r14d
+ mov %r9d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r10d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r8d,%r15d # (f^g)&e
+ mov %r12d,0(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r10d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r11d,%r12d # T1+=h
+
+ mov %eax,%r11d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %eax,%r13d
+ mov %eax,%r14d
+
+ ror $2,%r11d
+ ror $13,%r13d
+ mov %eax,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r11d
+ ror $9,%r13d
+ or %ecx,%r14d # a|c
+
+ xor %r13d,%r11d # h=Sigma0(a)
+ and %ecx,%r15d # a&c
+ add %r12d,%edx # d+=T1
+
+ and %ebx,%r14d # (a|c)&b
+ add %r12d,%r11d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r11d # h+=Maj(a,b,c)
+ mov 4*1(%rsi),%r12d
+ bswap %r12d
+ mov %edx,%r13d
+ mov %edx,%r14d
+ mov %r8d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r9d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %edx,%r15d # (f^g)&e
+ mov %r12d,4(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r9d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r10d,%r12d # T1+=h
+
+ mov %r11d,%r10d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r11d,%r13d
+ mov %r11d,%r14d
+
+ ror $2,%r10d
+ ror $13,%r13d
+ mov %r11d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r10d
+ ror $9,%r13d
+ or %ebx,%r14d # a|c
+
+ xor %r13d,%r10d # h=Sigma0(a)
+ and %ebx,%r15d # a&c
+ add %r12d,%ecx # d+=T1
+
+ and %eax,%r14d # (a|c)&b
+ add %r12d,%r10d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r10d # h+=Maj(a,b,c)
+ mov 4*2(%rsi),%r12d
+ bswap %r12d
+ mov %ecx,%r13d
+ mov %ecx,%r14d
+ mov %edx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r8d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %ecx,%r15d # (f^g)&e
+ mov %r12d,8(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r8d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r9d,%r12d # T1+=h
+
+ mov %r10d,%r9d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r10d,%r13d
+ mov %r10d,%r14d
+
+ ror $2,%r9d
+ ror $13,%r13d
+ mov %r10d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r9d
+ ror $9,%r13d
+ or %eax,%r14d # a|c
+
+ xor %r13d,%r9d # h=Sigma0(a)
+ and %eax,%r15d # a&c
+ add %r12d,%ebx # d+=T1
+
+ and %r11d,%r14d # (a|c)&b
+ add %r12d,%r9d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r9d # h+=Maj(a,b,c)
+ mov 4*3(%rsi),%r12d
+ bswap %r12d
+ mov %ebx,%r13d
+ mov %ebx,%r14d
+ mov %ecx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %edx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %ebx,%r15d # (f^g)&e
+ mov %r12d,12(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %edx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r8d,%r12d # T1+=h
+
+ mov %r9d,%r8d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r9d,%r13d
+ mov %r9d,%r14d
+
+ ror $2,%r8d
+ ror $13,%r13d
+ mov %r9d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r8d
+ ror $9,%r13d
+ or %r11d,%r14d # a|c
+
+ xor %r13d,%r8d # h=Sigma0(a)
+ and %r11d,%r15d # a&c
+ add %r12d,%eax # d+=T1
+
+ and %r10d,%r14d # (a|c)&b
+ add %r12d,%r8d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r8d # h+=Maj(a,b,c)
+ mov 4*4(%rsi),%r12d
+ bswap %r12d
+ mov %eax,%r13d
+ mov %eax,%r14d
+ mov %ebx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %ecx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %eax,%r15d # (f^g)&e
+ mov %r12d,16(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %ecx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %edx,%r12d # T1+=h
+
+ mov %r8d,%edx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r8d,%r13d
+ mov %r8d,%r14d
+
+ ror $2,%edx
+ ror $13,%r13d
+ mov %r8d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%edx
+ ror $9,%r13d
+ or %r10d,%r14d # a|c
+
+ xor %r13d,%edx # h=Sigma0(a)
+ and %r10d,%r15d # a&c
+ add %r12d,%r11d # d+=T1
+
+ and %r9d,%r14d # (a|c)&b
+ add %r12d,%edx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%edx # h+=Maj(a,b,c)
+ mov 4*5(%rsi),%r12d
+ bswap %r12d
+ mov %r11d,%r13d
+ mov %r11d,%r14d
+ mov %eax,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %ebx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r11d,%r15d # (f^g)&e
+ mov %r12d,20(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %ebx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %ecx,%r12d # T1+=h
+
+ mov %edx,%ecx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %edx,%r13d
+ mov %edx,%r14d
+
+ ror $2,%ecx
+ ror $13,%r13d
+ mov %edx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%ecx
+ ror $9,%r13d
+ or %r9d,%r14d # a|c
+
+ xor %r13d,%ecx # h=Sigma0(a)
+ and %r9d,%r15d # a&c
+ add %r12d,%r10d # d+=T1
+
+ and %r8d,%r14d # (a|c)&b
+ add %r12d,%ecx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%ecx # h+=Maj(a,b,c)
+ mov 4*6(%rsi),%r12d
+ bswap %r12d
+ mov %r10d,%r13d
+ mov %r10d,%r14d
+ mov %r11d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %eax,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r10d,%r15d # (f^g)&e
+ mov %r12d,24(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %eax,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %ebx,%r12d # T1+=h
+
+ mov %ecx,%ebx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %ecx,%r13d
+ mov %ecx,%r14d
+
+ ror $2,%ebx
+ ror $13,%r13d
+ mov %ecx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%ebx
+ ror $9,%r13d
+ or %r8d,%r14d # a|c
+
+ xor %r13d,%ebx # h=Sigma0(a)
+ and %r8d,%r15d # a&c
+ add %r12d,%r9d # d+=T1
+
+ and %edx,%r14d # (a|c)&b
+ add %r12d,%ebx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%ebx # h+=Maj(a,b,c)
+ mov 4*7(%rsi),%r12d
+ bswap %r12d
+ mov %r9d,%r13d
+ mov %r9d,%r14d
+ mov %r10d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r11d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r9d,%r15d # (f^g)&e
+ mov %r12d,28(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r11d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %eax,%r12d # T1+=h
+
+ mov %ebx,%eax
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %ebx,%r13d
+ mov %ebx,%r14d
+
+ ror $2,%eax
+ ror $13,%r13d
+ mov %ebx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%eax
+ ror $9,%r13d
+ or %edx,%r14d # a|c
+
+ xor %r13d,%eax # h=Sigma0(a)
+ and %edx,%r15d # a&c
+ add %r12d,%r8d # d+=T1
+
+ and %ecx,%r14d # (a|c)&b
+ add %r12d,%eax # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%eax # h+=Maj(a,b,c)
+ mov 4*8(%rsi),%r12d
+ bswap %r12d
+ mov %r8d,%r13d
+ mov %r8d,%r14d
+ mov %r9d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r10d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r8d,%r15d # (f^g)&e
+ mov %r12d,32(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r10d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r11d,%r12d # T1+=h
+
+ mov %eax,%r11d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %eax,%r13d
+ mov %eax,%r14d
+
+ ror $2,%r11d
+ ror $13,%r13d
+ mov %eax,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r11d
+ ror $9,%r13d
+ or %ecx,%r14d # a|c
+
+ xor %r13d,%r11d # h=Sigma0(a)
+ and %ecx,%r15d # a&c
+ add %r12d,%edx # d+=T1
+
+ and %ebx,%r14d # (a|c)&b
+ add %r12d,%r11d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r11d # h+=Maj(a,b,c)
+ mov 4*9(%rsi),%r12d
+ bswap %r12d
+ mov %edx,%r13d
+ mov %edx,%r14d
+ mov %r8d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r9d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %edx,%r15d # (f^g)&e
+ mov %r12d,36(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r9d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r10d,%r12d # T1+=h
+
+ mov %r11d,%r10d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r11d,%r13d
+ mov %r11d,%r14d
+
+ ror $2,%r10d
+ ror $13,%r13d
+ mov %r11d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r10d
+ ror $9,%r13d
+ or %ebx,%r14d # a|c
+
+ xor %r13d,%r10d # h=Sigma0(a)
+ and %ebx,%r15d # a&c
+ add %r12d,%ecx # d+=T1
+
+ and %eax,%r14d # (a|c)&b
+ add %r12d,%r10d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r10d # h+=Maj(a,b,c)
+ mov 4*10(%rsi),%r12d
+ bswap %r12d
+ mov %ecx,%r13d
+ mov %ecx,%r14d
+ mov %edx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r8d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %ecx,%r15d # (f^g)&e
+ mov %r12d,40(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r8d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r9d,%r12d # T1+=h
+
+ mov %r10d,%r9d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r10d,%r13d
+ mov %r10d,%r14d
+
+ ror $2,%r9d
+ ror $13,%r13d
+ mov %r10d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r9d
+ ror $9,%r13d
+ or %eax,%r14d # a|c
+
+ xor %r13d,%r9d # h=Sigma0(a)
+ and %eax,%r15d # a&c
+ add %r12d,%ebx # d+=T1
+
+ and %r11d,%r14d # (a|c)&b
+ add %r12d,%r9d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r9d # h+=Maj(a,b,c)
+ mov 4*11(%rsi),%r12d
+ bswap %r12d
+ mov %ebx,%r13d
+ mov %ebx,%r14d
+ mov %ecx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %edx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %ebx,%r15d # (f^g)&e
+ mov %r12d,44(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %edx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r8d,%r12d # T1+=h
+
+ mov %r9d,%r8d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r9d,%r13d
+ mov %r9d,%r14d
+
+ ror $2,%r8d
+ ror $13,%r13d
+ mov %r9d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r8d
+ ror $9,%r13d
+ or %r11d,%r14d # a|c
+
+ xor %r13d,%r8d # h=Sigma0(a)
+ and %r11d,%r15d # a&c
+ add %r12d,%eax # d+=T1
+
+ and %r10d,%r14d # (a|c)&b
+ add %r12d,%r8d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r8d # h+=Maj(a,b,c)
+ mov 4*12(%rsi),%r12d
+ bswap %r12d
+ mov %eax,%r13d
+ mov %eax,%r14d
+ mov %ebx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %ecx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %eax,%r15d # (f^g)&e
+ mov %r12d,48(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %ecx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %edx,%r12d # T1+=h
+
+ mov %r8d,%edx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r8d,%r13d
+ mov %r8d,%r14d
+
+ ror $2,%edx
+ ror $13,%r13d
+ mov %r8d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%edx
+ ror $9,%r13d
+ or %r10d,%r14d # a|c
+
+ xor %r13d,%edx # h=Sigma0(a)
+ and %r10d,%r15d # a&c
+ add %r12d,%r11d # d+=T1
+
+ and %r9d,%r14d # (a|c)&b
+ add %r12d,%edx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%edx # h+=Maj(a,b,c)
+ mov 4*13(%rsi),%r12d
+ bswap %r12d
+ mov %r11d,%r13d
+ mov %r11d,%r14d
+ mov %eax,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %ebx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r11d,%r15d # (f^g)&e
+ mov %r12d,52(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %ebx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %ecx,%r12d # T1+=h
+
+ mov %edx,%ecx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %edx,%r13d
+ mov %edx,%r14d
+
+ ror $2,%ecx
+ ror $13,%r13d
+ mov %edx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%ecx
+ ror $9,%r13d
+ or %r9d,%r14d # a|c
+
+ xor %r13d,%ecx # h=Sigma0(a)
+ and %r9d,%r15d # a&c
+ add %r12d,%r10d # d+=T1
+
+ and %r8d,%r14d # (a|c)&b
+ add %r12d,%ecx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%ecx # h+=Maj(a,b,c)
+ mov 4*14(%rsi),%r12d
+ bswap %r12d
+ mov %r10d,%r13d
+ mov %r10d,%r14d
+ mov %r11d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %eax,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r10d,%r15d # (f^g)&e
+ mov %r12d,56(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %eax,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %ebx,%r12d # T1+=h
+
+ mov %ecx,%ebx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %ecx,%r13d
+ mov %ecx,%r14d
+
+ ror $2,%ebx
+ ror $13,%r13d
+ mov %ecx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%ebx
+ ror $9,%r13d
+ or %r8d,%r14d # a|c
+
+ xor %r13d,%ebx # h=Sigma0(a)
+ and %r8d,%r15d # a&c
+ add %r12d,%r9d # d+=T1
+
+ and %edx,%r14d # (a|c)&b
+ add %r12d,%ebx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%ebx # h+=Maj(a,b,c)
+ mov 4*15(%rsi),%r12d
+ bswap %r12d
+ mov %r9d,%r13d
+ mov %r9d,%r14d
+ mov %r10d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r11d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r9d,%r15d # (f^g)&e
+ mov %r12d,60(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r11d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %eax,%r12d # T1+=h
+
+ mov %ebx,%eax
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %ebx,%r13d
+ mov %ebx,%r14d
+
+ ror $2,%eax
+ ror $13,%r13d
+ mov %ebx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%eax
+ ror $9,%r13d
+ or %edx,%r14d # a|c
+
+ xor %r13d,%eax # h=Sigma0(a)
+ and %edx,%r15d # a&c
+ add %r12d,%r8d # d+=T1
+
+ and %ecx,%r14d # (a|c)&b
+ add %r12d,%eax # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%eax # h+=Maj(a,b,c)
+ jmp .Lrounds_16_xx
+.align 16
+.Lrounds_16_xx:
+ mov 4(%rsp),%r13d
+ mov 56(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 36(%rsp),%r12d
+
+ add 0(%rsp),%r12d
+ mov %r8d,%r13d
+ mov %r8d,%r14d
+ mov %r9d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r10d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r8d,%r15d # (f^g)&e
+ mov %r12d,0(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r10d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r11d,%r12d # T1+=h
+
+ mov %eax,%r11d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %eax,%r13d
+ mov %eax,%r14d
+
+ ror $2,%r11d
+ ror $13,%r13d
+ mov %eax,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r11d
+ ror $9,%r13d
+ or %ecx,%r14d # a|c
+
+ xor %r13d,%r11d # h=Sigma0(a)
+ and %ecx,%r15d # a&c
+ add %r12d,%edx # d+=T1
+
+ and %ebx,%r14d # (a|c)&b
+ add %r12d,%r11d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r11d # h+=Maj(a,b,c)
+ mov 8(%rsp),%r13d
+ mov 60(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 40(%rsp),%r12d
+
+ add 4(%rsp),%r12d
+ mov %edx,%r13d
+ mov %edx,%r14d
+ mov %r8d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r9d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %edx,%r15d # (f^g)&e
+ mov %r12d,4(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r9d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r10d,%r12d # T1+=h
+
+ mov %r11d,%r10d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r11d,%r13d
+ mov %r11d,%r14d
+
+ ror $2,%r10d
+ ror $13,%r13d
+ mov %r11d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r10d
+ ror $9,%r13d
+ or %ebx,%r14d # a|c
+
+ xor %r13d,%r10d # h=Sigma0(a)
+ and %ebx,%r15d # a&c
+ add %r12d,%ecx # d+=T1
+
+ and %eax,%r14d # (a|c)&b
+ add %r12d,%r10d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r10d # h+=Maj(a,b,c)
+ mov 12(%rsp),%r13d
+ mov 0(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 44(%rsp),%r12d
+
+ add 8(%rsp),%r12d
+ mov %ecx,%r13d
+ mov %ecx,%r14d
+ mov %edx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r8d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %ecx,%r15d # (f^g)&e
+ mov %r12d,8(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r8d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r9d,%r12d # T1+=h
+
+ mov %r10d,%r9d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r10d,%r13d
+ mov %r10d,%r14d
+
+ ror $2,%r9d
+ ror $13,%r13d
+ mov %r10d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r9d
+ ror $9,%r13d
+ or %eax,%r14d # a|c
+
+ xor %r13d,%r9d # h=Sigma0(a)
+ and %eax,%r15d # a&c
+ add %r12d,%ebx # d+=T1
+
+ and %r11d,%r14d # (a|c)&b
+ add %r12d,%r9d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r9d # h+=Maj(a,b,c)
+ mov 16(%rsp),%r13d
+ mov 4(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 48(%rsp),%r12d
+
+ add 12(%rsp),%r12d
+ mov %ebx,%r13d
+ mov %ebx,%r14d
+ mov %ecx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %edx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %ebx,%r15d # (f^g)&e
+ mov %r12d,12(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %edx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r8d,%r12d # T1+=h
+
+ mov %r9d,%r8d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r9d,%r13d
+ mov %r9d,%r14d
+
+ ror $2,%r8d
+ ror $13,%r13d
+ mov %r9d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r8d
+ ror $9,%r13d
+ or %r11d,%r14d # a|c
+
+ xor %r13d,%r8d # h=Sigma0(a)
+ and %r11d,%r15d # a&c
+ add %r12d,%eax # d+=T1
+
+ and %r10d,%r14d # (a|c)&b
+ add %r12d,%r8d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r8d # h+=Maj(a,b,c)
+ mov 20(%rsp),%r13d
+ mov 8(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 52(%rsp),%r12d
+
+ add 16(%rsp),%r12d
+ mov %eax,%r13d
+ mov %eax,%r14d
+ mov %ebx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %ecx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %eax,%r15d # (f^g)&e
+ mov %r12d,16(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %ecx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %edx,%r12d # T1+=h
+
+ mov %r8d,%edx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r8d,%r13d
+ mov %r8d,%r14d
+
+ ror $2,%edx
+ ror $13,%r13d
+ mov %r8d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%edx
+ ror $9,%r13d
+ or %r10d,%r14d # a|c
+
+ xor %r13d,%edx # h=Sigma0(a)
+ and %r10d,%r15d # a&c
+ add %r12d,%r11d # d+=T1
+
+ and %r9d,%r14d # (a|c)&b
+ add %r12d,%edx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%edx # h+=Maj(a,b,c)
+ mov 24(%rsp),%r13d
+ mov 12(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 56(%rsp),%r12d
+
+ add 20(%rsp),%r12d
+ mov %r11d,%r13d
+ mov %r11d,%r14d
+ mov %eax,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %ebx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r11d,%r15d # (f^g)&e
+ mov %r12d,20(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %ebx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %ecx,%r12d # T1+=h
+
+ mov %edx,%ecx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %edx,%r13d
+ mov %edx,%r14d
+
+ ror $2,%ecx
+ ror $13,%r13d
+ mov %edx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%ecx
+ ror $9,%r13d
+ or %r9d,%r14d # a|c
+
+ xor %r13d,%ecx # h=Sigma0(a)
+ and %r9d,%r15d # a&c
+ add %r12d,%r10d # d+=T1
+
+ and %r8d,%r14d # (a|c)&b
+ add %r12d,%ecx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%ecx # h+=Maj(a,b,c)
+ mov 28(%rsp),%r13d
+ mov 16(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 60(%rsp),%r12d
+
+ add 24(%rsp),%r12d
+ mov %r10d,%r13d
+ mov %r10d,%r14d
+ mov %r11d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %eax,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r10d,%r15d # (f^g)&e
+ mov %r12d,24(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %eax,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %ebx,%r12d # T1+=h
+
+ mov %ecx,%ebx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %ecx,%r13d
+ mov %ecx,%r14d
+
+ ror $2,%ebx
+ ror $13,%r13d
+ mov %ecx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%ebx
+ ror $9,%r13d
+ or %r8d,%r14d # a|c
+
+ xor %r13d,%ebx # h=Sigma0(a)
+ and %r8d,%r15d # a&c
+ add %r12d,%r9d # d+=T1
+
+ and %edx,%r14d # (a|c)&b
+ add %r12d,%ebx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%ebx # h+=Maj(a,b,c)
+ mov 32(%rsp),%r13d
+ mov 20(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 0(%rsp),%r12d
+
+ add 28(%rsp),%r12d
+ mov %r9d,%r13d
+ mov %r9d,%r14d
+ mov %r10d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r11d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r9d,%r15d # (f^g)&e
+ mov %r12d,28(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r11d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %eax,%r12d # T1+=h
+
+ mov %ebx,%eax
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %ebx,%r13d
+ mov %ebx,%r14d
+
+ ror $2,%eax
+ ror $13,%r13d
+ mov %ebx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%eax
+ ror $9,%r13d
+ or %edx,%r14d # a|c
+
+ xor %r13d,%eax # h=Sigma0(a)
+ and %edx,%r15d # a&c
+ add %r12d,%r8d # d+=T1
+
+ and %ecx,%r14d # (a|c)&b
+ add %r12d,%eax # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%eax # h+=Maj(a,b,c)
+ mov 36(%rsp),%r13d
+ mov 24(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 4(%rsp),%r12d
+
+ add 32(%rsp),%r12d
+ mov %r8d,%r13d
+ mov %r8d,%r14d
+ mov %r9d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r10d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r8d,%r15d # (f^g)&e
+ mov %r12d,32(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r10d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r11d,%r12d # T1+=h
+
+ mov %eax,%r11d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %eax,%r13d
+ mov %eax,%r14d
+
+ ror $2,%r11d
+ ror $13,%r13d
+ mov %eax,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r11d
+ ror $9,%r13d
+ or %ecx,%r14d # a|c
+
+ xor %r13d,%r11d # h=Sigma0(a)
+ and %ecx,%r15d # a&c
+ add %r12d,%edx # d+=T1
+
+ and %ebx,%r14d # (a|c)&b
+ add %r12d,%r11d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r11d # h+=Maj(a,b,c)
+ mov 40(%rsp),%r13d
+ mov 28(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 8(%rsp),%r12d
+
+ add 36(%rsp),%r12d
+ mov %edx,%r13d
+ mov %edx,%r14d
+ mov %r8d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r9d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %edx,%r15d # (f^g)&e
+ mov %r12d,36(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r9d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r10d,%r12d # T1+=h
+
+ mov %r11d,%r10d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r11d,%r13d
+ mov %r11d,%r14d
+
+ ror $2,%r10d
+ ror $13,%r13d
+ mov %r11d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r10d
+ ror $9,%r13d
+ or %ebx,%r14d # a|c
+
+ xor %r13d,%r10d # h=Sigma0(a)
+ and %ebx,%r15d # a&c
+ add %r12d,%ecx # d+=T1
+
+ and %eax,%r14d # (a|c)&b
+ add %r12d,%r10d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r10d # h+=Maj(a,b,c)
+ mov 44(%rsp),%r13d
+ mov 32(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 12(%rsp),%r12d
+
+ add 40(%rsp),%r12d
+ mov %ecx,%r13d
+ mov %ecx,%r14d
+ mov %edx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r8d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %ecx,%r15d # (f^g)&e
+ mov %r12d,40(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r8d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r9d,%r12d # T1+=h
+
+ mov %r10d,%r9d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r10d,%r13d
+ mov %r10d,%r14d
+
+ ror $2,%r9d
+ ror $13,%r13d
+ mov %r10d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r9d
+ ror $9,%r13d
+ or %eax,%r14d # a|c
+
+ xor %r13d,%r9d # h=Sigma0(a)
+ and %eax,%r15d # a&c
+ add %r12d,%ebx # d+=T1
+
+ and %r11d,%r14d # (a|c)&b
+ add %r12d,%r9d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r9d # h+=Maj(a,b,c)
+ mov 48(%rsp),%r13d
+ mov 36(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 16(%rsp),%r12d
+
+ add 44(%rsp),%r12d
+ mov %ebx,%r13d
+ mov %ebx,%r14d
+ mov %ecx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %edx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %ebx,%r15d # (f^g)&e
+ mov %r12d,44(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %edx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %r8d,%r12d # T1+=h
+
+ mov %r9d,%r8d
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r9d,%r13d
+ mov %r9d,%r14d
+
+ ror $2,%r8d
+ ror $13,%r13d
+ mov %r9d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%r8d
+ ror $9,%r13d
+ or %r11d,%r14d # a|c
+
+ xor %r13d,%r8d # h=Sigma0(a)
+ and %r11d,%r15d # a&c
+ add %r12d,%eax # d+=T1
+
+ and %r10d,%r14d # (a|c)&b
+ add %r12d,%r8d # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%r8d # h+=Maj(a,b,c)
+ mov 52(%rsp),%r13d
+ mov 40(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 20(%rsp),%r12d
+
+ add 48(%rsp),%r12d
+ mov %eax,%r13d
+ mov %eax,%r14d
+ mov %ebx,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %ecx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %eax,%r15d # (f^g)&e
+ mov %r12d,48(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %ecx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %edx,%r12d # T1+=h
+
+ mov %r8d,%edx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %r8d,%r13d
+ mov %r8d,%r14d
+
+ ror $2,%edx
+ ror $13,%r13d
+ mov %r8d,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%edx
+ ror $9,%r13d
+ or %r10d,%r14d # a|c
+
+ xor %r13d,%edx # h=Sigma0(a)
+ and %r10d,%r15d # a&c
+ add %r12d,%r11d # d+=T1
+
+ and %r9d,%r14d # (a|c)&b
+ add %r12d,%edx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%edx # h+=Maj(a,b,c)
+ mov 56(%rsp),%r13d
+ mov 44(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 24(%rsp),%r12d
+
+ add 52(%rsp),%r12d
+ mov %r11d,%r13d
+ mov %r11d,%r14d
+ mov %eax,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %ebx,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r11d,%r15d # (f^g)&e
+ mov %r12d,52(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %ebx,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %ecx,%r12d # T1+=h
+
+ mov %edx,%ecx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %edx,%r13d
+ mov %edx,%r14d
+
+ ror $2,%ecx
+ ror $13,%r13d
+ mov %edx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%ecx
+ ror $9,%r13d
+ or %r9d,%r14d # a|c
+
+ xor %r13d,%ecx # h=Sigma0(a)
+ and %r9d,%r15d # a&c
+ add %r12d,%r10d # d+=T1
+
+ and %r8d,%r14d # (a|c)&b
+ add %r12d,%ecx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%ecx # h+=Maj(a,b,c)
+ mov 60(%rsp),%r13d
+ mov 48(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 28(%rsp),%r12d
+
+ add 56(%rsp),%r12d
+ mov %r10d,%r13d
+ mov %r10d,%r14d
+ mov %r11d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %eax,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r10d,%r15d # (f^g)&e
+ mov %r12d,56(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %eax,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %ebx,%r12d # T1+=h
+
+ mov %ecx,%ebx
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %ecx,%r13d
+ mov %ecx,%r14d
+
+ ror $2,%ebx
+ ror $13,%r13d
+ mov %ecx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%ebx
+ ror $9,%r13d
+ or %r8d,%r14d # a|c
+
+ xor %r13d,%ebx # h=Sigma0(a)
+ and %r8d,%r15d # a&c
+ add %r12d,%r9d # d+=T1
+
+ and %edx,%r14d # (a|c)&b
+ add %r12d,%ebx # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%ebx # h+=Maj(a,b,c)
+ mov 0(%rsp),%r13d
+ mov 52(%rsp),%r12d
+
+ mov %r13d,%r15d
+
+ shr $3,%r13d
+ ror $7,%r15d
+
+ xor %r15d,%r13d
+ ror $11,%r15d
+
+ xor %r15d,%r13d # sigma0(X[(i+1)&0xf])
+ mov %r12d,%r14d
+
+ shr $10,%r12d
+ ror $17,%r14d
+
+ xor %r14d,%r12d
+ ror $2,%r14d
+
+ xor %r14d,%r12d # sigma1(X[(i+14)&0xf])
+
+ add %r13d,%r12d
+
+ add 32(%rsp),%r12d
+
+ add 60(%rsp),%r12d
+ mov %r9d,%r13d
+ mov %r9d,%r14d
+ mov %r10d,%r15d
+
+ ror $6,%r13d
+ ror $11,%r14d
+ xor %r11d,%r15d # f^g
+
+ xor %r14d,%r13d
+ ror $14,%r14d
+ and %r9d,%r15d # (f^g)&e
+ mov %r12d,60(%rsp)
+
+ xor %r14d,%r13d # Sigma1(e)
+ xor %r11d,%r15d # Ch(e,f,g)=((f^g)&e)^g
+ add %eax,%r12d # T1+=h
+
+ mov %ebx,%eax
+ add %r13d,%r12d # T1+=Sigma1(e)
+
+ add %r15d,%r12d # T1+=Ch(e,f,g)
+ mov %ebx,%r13d
+ mov %ebx,%r14d
+
+ ror $2,%eax
+ ror $13,%r13d
+ mov %ebx,%r15d
+ add (%rbp,%rdi,4),%r12d # T1+=K[round]
+
+ xor %r13d,%eax
+ ror $9,%r13d
+ or %edx,%r14d # a|c
+
+ xor %r13d,%eax # h=Sigma0(a)
+ and %edx,%r15d # a&c
+ add %r12d,%r8d # d+=T1
+
+ and %ecx,%r14d # (a|c)&b
+ add %r12d,%eax # h+=T1
+
+ or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c)
+ lea 1(%rdi),%rdi # round++
+
+ add %r14d,%eax # h+=Maj(a,b,c)
+ cmp $64,%rdi
+ jb .Lrounds_16_xx
+
+ mov 16*4+0*8(%rsp),%rdi
+ lea 16*4(%rsi),%rsi
+
+ add 4*0(%rdi),%eax
+ add 4*1(%rdi),%ebx
+ add 4*2(%rdi),%ecx
+ add 4*3(%rdi),%edx
+ add 4*4(%rdi),%r8d
+ add 4*5(%rdi),%r9d
+ add 4*6(%rdi),%r10d
+ add 4*7(%rdi),%r11d
+
+ cmp 16*4+2*8(%rsp),%rsi
+
+ mov %eax,4*0(%rdi)
+ mov %ebx,4*1(%rdi)
+ mov %ecx,4*2(%rdi)
+ mov %edx,4*3(%rdi)
+ mov %r8d,4*4(%rdi)
+ mov %r9d,4*5(%rdi)
+ mov %r10d,4*6(%rdi)
+ mov %r11d,4*7(%rdi)
+ jb .Lloop
+
+ mov 16*4+3*8(%rsp),%rsp
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+
+ ret
+SET_SIZE(SHA256TransformBlocks)
+
+.align 64
+.type K256,@object
+K256:
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+#endif /* !lint && !__lint */
diff --git a/module/icp/core/kcf_callprov.c b/module/icp/core/kcf_callprov.c
new file mode 100644
index 000000000..38927dcc0
--- /dev/null
+++ b/module/icp/core/kcf_callprov.c
@@ -0,0 +1,1567 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/sched_impl.h>
+
+static int kcf_emulate_dual(kcf_provider_desc_t *, crypto_ctx_t *,
+ kcf_req_params_t *);
+
+void
+kcf_free_triedlist(kcf_prov_tried_t *list)
+{
+ kcf_prov_tried_t *l;
+
+ while ((l = list) != NULL) {
+ list = list->pt_next;
+ KCF_PROV_REFRELE(l->pt_pd);
+ kmem_free(l, sizeof (kcf_prov_tried_t));
+ }
+}
+
+kcf_prov_tried_t *
+kcf_insert_triedlist(kcf_prov_tried_t **list, kcf_provider_desc_t *pd,
+ int kmflag)
+{
+ kcf_prov_tried_t *l;
+
+ l = kmem_alloc(sizeof (kcf_prov_tried_t), kmflag);
+ if (l == NULL)
+ return (NULL);
+
+ l->pt_pd = pd;
+ l->pt_next = *list;
+ *list = l;
+
+ return (l);
+}
+
+static boolean_t
+is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl)
+{
+ while (triedl != NULL) {
+ if (triedl->pt_pd == pd)
+ return (B_TRUE);
+ triedl = triedl->pt_next;
+ };
+
+ return (B_FALSE);
+}
+
+/*
+ * Search a mech entry's hardware provider list for the specified
+ * provider. Return true if found.
+ */
+static boolean_t
+is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me,
+ crypto_func_group_t fg)
+{
+ kcf_prov_mech_desc_t *prov_chain;
+
+ prov_chain = me->me_hw_prov_chain;
+ if (prov_chain != NULL) {
+ ASSERT(me->me_num_hwprov > 0);
+ for (; prov_chain != NULL; prov_chain = prov_chain->pm_next) {
+ if (prov_chain->pm_prov_desc == pd &&
+ IS_FG_SUPPORTED(prov_chain, fg)) {
+ return (B_TRUE);
+ }
+ }
+ }
+ return (B_FALSE);
+}
+
+/*
+ * This routine, given a logical provider, returns the least loaded
+ * provider belonging to the logical provider. The provider must be
+ * able to do the specified mechanism, i.e. check that the mechanism
+ * hasn't been disabled. In addition, just in case providers are not
+ * entirely equivalent, the provider's entry point is checked for
+ * non-nullness. This is accomplished by having the caller pass, as
+ * arguments, the offset of the function group (offset_1), and the
+ * offset of the function within the function group (offset_2).
+ * Returns NULL if no provider can be found.
+ */
+int
+kcf_get_hardware_provider(crypto_mech_type_t mech_type_1,
+ crypto_mech_type_t mech_type_2, boolean_t call_restrict,
+ kcf_provider_desc_t *old, kcf_provider_desc_t **new, crypto_func_group_t fg)
+{
+ kcf_provider_desc_t *provider, *real_pd = old;
+ kcf_provider_desc_t *gpd = NULL; /* good provider */
+ kcf_provider_desc_t *bpd = NULL; /* busy provider */
+ kcf_provider_list_t *p;
+ kcf_ops_class_t class;
+ kcf_mech_entry_t *me;
+ kcf_mech_entry_tab_t *me_tab;
+ int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
+
+ /* get the mech entry for the specified mechanism */
+ class = KCF_MECH2CLASS(mech_type_1);
+ if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ me_tab = &kcf_mech_tabs_tab[class];
+ index = KCF_MECH2INDEX(mech_type_1);
+ if ((index < 0) || (index >= me_tab->met_size)) {
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ me = &((me_tab->met_tab)[index]);
+ mutex_enter(&me->me_mutex);
+
+ /*
+ * We assume the provider descriptor will not go away because
+ * it is being held somewhere, i.e. its reference count has been
+ * incremented. In the case of the crypto module, the provider
+ * descriptor is held by the session structure.
+ */
+ if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ if (old->pd_provider_list == NULL) {
+ real_pd = NULL;
+ rv = CRYPTO_DEVICE_ERROR;
+ goto out;
+ }
+ /*
+ * Find the least loaded real provider. KCF_PROV_LOAD gives
+ * the load (number of pending requests) of the provider.
+ */
+ mutex_enter(&old->pd_lock);
+ p = old->pd_provider_list;
+ while (p != NULL) {
+ provider = p->pl_provider;
+
+ ASSERT(provider->pd_prov_type !=
+ CRYPTO_LOGICAL_PROVIDER);
+
+ if (call_restrict &&
+ (provider->pd_flags & KCF_PROV_RESTRICTED)) {
+ p = p->pl_next;
+ continue;
+ }
+
+ if (!is_valid_provider_for_mech(provider, me, fg)) {
+ p = p->pl_next;
+ continue;
+ }
+
+ /* provider does second mech */
+ if (mech_type_2 != CRYPTO_MECH_INVALID) {
+ int i;
+
+ i = KCF_TO_PROV_MECH_INDX(provider,
+ mech_type_2);
+ if (i == KCF_INVALID_INDX) {
+ p = p->pl_next;
+ continue;
+ }
+ }
+
+ if (provider->pd_state != KCF_PROV_READY) {
+ /* choose BUSY if no READY providers */
+ if (provider->pd_state == KCF_PROV_BUSY)
+ bpd = provider;
+ p = p->pl_next;
+ continue;
+ }
+
+ len = KCF_PROV_LOAD(provider);
+ if (len < gqlen) {
+ gqlen = len;
+ gpd = provider;
+ }
+
+ p = p->pl_next;
+ }
+
+ if (gpd != NULL) {
+ real_pd = gpd;
+ KCF_PROV_REFHOLD(real_pd);
+ } else if (bpd != NULL) {
+ real_pd = bpd;
+ KCF_PROV_REFHOLD(real_pd);
+ } else {
+ /* can't find provider */
+ real_pd = NULL;
+ rv = CRYPTO_MECHANISM_INVALID;
+ }
+ mutex_exit(&old->pd_lock);
+
+ } else {
+ if (!KCF_IS_PROV_USABLE(old) ||
+ (call_restrict && (old->pd_flags & KCF_PROV_RESTRICTED))) {
+ real_pd = NULL;
+ rv = CRYPTO_DEVICE_ERROR;
+ goto out;
+ }
+
+ if (!is_valid_provider_for_mech(old, me, fg)) {
+ real_pd = NULL;
+ rv = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+
+ KCF_PROV_REFHOLD(real_pd);
+ }
+out:
+ mutex_exit(&me->me_mutex);
+ *new = real_pd;
+ return (rv);
+}
+
+/*
+ * Return the best provider for the specified mechanism. The provider
+ * is held and it is the caller's responsibility to release it when done.
+ * The fg input argument is used as a search criterion to pick a provider.
+ * A provider has to support this function group to be picked.
+ *
+ * Find the least loaded provider in the list of providers. We do a linear
+ * search to find one. This is fine as we assume there are only a few
+ * number of providers in this list. If this assumption ever changes,
+ * we should revisit this.
+ *
+ * call_restrict represents if the caller should not be allowed to
+ * use restricted providers.
+ */
+kcf_provider_desc_t *
+kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
+ int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg,
+ boolean_t call_restrict, size_t data_size)
+{
+ kcf_provider_desc_t *pd = NULL, *gpd = NULL;
+ kcf_prov_mech_desc_t *prov_chain, *mdesc;
+ int len, gqlen = INT_MAX;
+ kcf_ops_class_t class;
+ int index;
+ kcf_mech_entry_t *me;
+ kcf_mech_entry_tab_t *me_tab;
+
+ class = KCF_MECH2CLASS(mech_type);
+ if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
+ *error = CRYPTO_MECHANISM_INVALID;
+ return (NULL);
+ }
+
+ me_tab = &kcf_mech_tabs_tab[class];
+ index = KCF_MECH2INDEX(mech_type);
+ if ((index < 0) || (index >= me_tab->met_size)) {
+ *error = CRYPTO_MECHANISM_INVALID;
+ return (NULL);
+ }
+
+ me = &((me_tab->met_tab)[index]);
+ if (mepp != NULL)
+ *mepp = me;
+
+ mutex_enter(&me->me_mutex);
+
+ prov_chain = me->me_hw_prov_chain;
+
+ /*
+ * We check for the threshhold for using a hardware provider for
+ * this amount of data. If there is no software provider available
+ * for the mechanism, then the threshold is ignored.
+ */
+ if ((prov_chain != NULL) &&
+ ((data_size == 0) || (me->me_threshold == 0) ||
+ (data_size >= me->me_threshold) ||
+ ((mdesc = me->me_sw_prov) == NULL) ||
+ (!IS_FG_SUPPORTED(mdesc, fg)) ||
+ (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
+ ASSERT(me->me_num_hwprov > 0);
+ /* there is at least one provider */
+
+ /*
+ * Find the least loaded real provider. KCF_PROV_LOAD gives
+ * the load (number of pending requests) of the provider.
+ */
+ while (prov_chain != NULL) {
+ pd = prov_chain->pm_prov_desc;
+
+ if (!IS_FG_SUPPORTED(prov_chain, fg) ||
+ !KCF_IS_PROV_USABLE(pd) ||
+ IS_PROVIDER_TRIED(pd, triedl) ||
+ (call_restrict &&
+ (pd->pd_flags & KCF_PROV_RESTRICTED))) {
+ prov_chain = prov_chain->pm_next;
+ continue;
+ }
+
+ if ((len = KCF_PROV_LOAD(pd)) < gqlen) {
+ gqlen = len;
+ gpd = pd;
+ }
+
+ prov_chain = prov_chain->pm_next;
+ }
+
+ pd = gpd;
+ }
+
+ /* No HW provider for this mech, is there a SW provider? */
+ if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
+ pd = mdesc->pm_prov_desc;
+ if (!IS_FG_SUPPORTED(mdesc, fg) ||
+ !KCF_IS_PROV_USABLE(pd) ||
+ IS_PROVIDER_TRIED(pd, triedl) ||
+ (call_restrict && (pd->pd_flags & KCF_PROV_RESTRICTED)))
+ pd = NULL;
+ }
+
+ if (pd == NULL) {
+ /*
+ * We do not want to report CRYPTO_MECH_NOT_SUPPORTED, when
+ * we are in the "fallback to the next provider" case. Rather
+ * we preserve the error, so that the client gets the right
+ * error code.
+ */
+ if (triedl == NULL)
+ *error = CRYPTO_MECH_NOT_SUPPORTED;
+ } else
+ KCF_PROV_REFHOLD(pd);
+
+ mutex_exit(&me->me_mutex);
+ return (pd);
+}
+
+/*
+ * Very similar to kcf_get_mech_provider(). Finds the best provider capable of
+ * a dual operation with both me1 and me2.
+ * When no dual-ops capable providers are available, return the best provider
+ * for me1 only, and sets *prov_mt2 to CRYPTO_INVALID_MECHID;
+ * We assume/expect that a slower HW capable of the dual is still
+ * faster than the 2 fastest providers capable of the individual ops
+ * separately.
+ */
+kcf_provider_desc_t *
+kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
+ kcf_mech_entry_t **mepp, crypto_mech_type_t *prov_mt1,
+ crypto_mech_type_t *prov_mt2, int *error, kcf_prov_tried_t *triedl,
+ crypto_func_group_t fg1, crypto_func_group_t fg2, boolean_t call_restrict,
+ size_t data_size)
+{
+ kcf_provider_desc_t *pd = NULL, *pdm1 = NULL, *pdm1m2 = NULL;
+ kcf_prov_mech_desc_t *prov_chain, *mdesc;
+ int len, gqlen = INT_MAX, dgqlen = INT_MAX;
+ crypto_mech_info_list_t *mil;
+ crypto_mech_type_t m2id = mech2->cm_type;
+ kcf_mech_entry_t *me;
+
+ /* when mech is a valid mechanism, me will be its mech_entry */
+ if (kcf_get_mech_entry(mech1->cm_type, &me) != KCF_SUCCESS) {
+ *error = CRYPTO_MECHANISM_INVALID;
+ return (NULL);
+ }
+
+ *prov_mt2 = CRYPTO_MECH_INVALID;
+
+ if (mepp != NULL)
+ *mepp = me;
+ mutex_enter(&me->me_mutex);
+
+ prov_chain = me->me_hw_prov_chain;
+ /*
+ * We check the threshold for using a hardware provider for
+ * this amount of data. If there is no software provider available
+ * for the first mechanism, then the threshold is ignored.
+ */
+ if ((prov_chain != NULL) &&
+ ((data_size == 0) || (me->me_threshold == 0) ||
+ (data_size >= me->me_threshold) ||
+ ((mdesc = me->me_sw_prov) == NULL) ||
+ (!IS_FG_SUPPORTED(mdesc, fg1)) ||
+ (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
+ /* there is at least one provider */
+ ASSERT(me->me_num_hwprov > 0);
+
+ /*
+ * Find the least loaded provider capable of the combo
+ * me1 + me2, and save a pointer to the least loaded
+ * provider capable of me1 only.
+ */
+ while (prov_chain != NULL) {
+ pd = prov_chain->pm_prov_desc;
+ len = KCF_PROV_LOAD(pd);
+
+ if (!IS_FG_SUPPORTED(prov_chain, fg1) ||
+ !KCF_IS_PROV_USABLE(pd) ||
+ IS_PROVIDER_TRIED(pd, triedl) ||
+ (call_restrict &&
+ (pd->pd_flags & KCF_PROV_RESTRICTED))) {
+ prov_chain = prov_chain->pm_next;
+ continue;
+ }
+
+ /* Save the best provider capable of m1 */
+ if (len < gqlen) {
+ *prov_mt1 =
+ prov_chain->pm_mech_info.cm_mech_number;
+ gqlen = len;
+ pdm1 = pd;
+ }
+
+ /* See if pd can do me2 too */
+ for (mil = prov_chain->pm_mi_list;
+ mil != NULL; mil = mil->ml_next) {
+ if ((mil->ml_mech_info.cm_func_group_mask &
+ fg2) == 0)
+ continue;
+
+ if ((mil->ml_kcf_mechid == m2id) &&
+ (len < dgqlen)) {
+ /* Bingo! */
+ dgqlen = len;
+ pdm1m2 = pd;
+ *prov_mt2 =
+ mil->ml_mech_info.cm_mech_number;
+ *prov_mt1 = prov_chain->
+ pm_mech_info.cm_mech_number;
+ break;
+ }
+ }
+
+ prov_chain = prov_chain->pm_next;
+ }
+
+ pd = (pdm1m2 != NULL) ? pdm1m2 : pdm1;
+ }
+
+ /* no HW provider for this mech, is there a SW provider? */
+ if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
+ pd = mdesc->pm_prov_desc;
+ if (!IS_FG_SUPPORTED(mdesc, fg1) ||
+ !KCF_IS_PROV_USABLE(pd) ||
+ IS_PROVIDER_TRIED(pd, triedl) ||
+ (call_restrict && (pd->pd_flags & KCF_PROV_RESTRICTED)))
+ pd = NULL;
+ else {
+ /* See if pd can do me2 too */
+ for (mil = me->me_sw_prov->pm_mi_list;
+ mil != NULL; mil = mil->ml_next) {
+ if ((mil->ml_mech_info.cm_func_group_mask &
+ fg2) == 0)
+ continue;
+
+ if (mil->ml_kcf_mechid == m2id) {
+ /* Bingo! */
+ *prov_mt2 =
+ mil->ml_mech_info.cm_mech_number;
+ break;
+ }
+ }
+ *prov_mt1 = me->me_sw_prov->pm_mech_info.cm_mech_number;
+ }
+ }
+
+ if (pd == NULL)
+ *error = CRYPTO_MECH_NOT_SUPPORTED;
+ else
+ KCF_PROV_REFHOLD(pd);
+
+ mutex_exit(&me->me_mutex);
+ return (pd);
+}
+
+/*
+ * Do the actual work of calling the provider routines.
+ *
+ * pd - Provider structure
+ * ctx - Context for this operation
+ * params - Parameters for this operation
+ * rhndl - Request handle to use for notification
+ *
+ * The return values are the same as that of the respective SPI.
+ */
+int
+common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
+ kcf_req_params_t *params, crypto_req_handle_t rhndl)
+{
+ int err = CRYPTO_ARGUMENTS_BAD;
+ kcf_op_type_t optype;
+
+ optype = params->rp_optype;
+
+ switch (params->rp_opgrp) {
+ case KCF_OG_DIGEST: {
+ kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
+
+ switch (optype) {
+ case KCF_OP_INIT:
+ /*
+ * We should do this only here and not in KCF_WRAP_*
+ * macros. This is because we may want to try other
+ * providers, in case we recover from a failure.
+ */
+ KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
+ pd, &dops->do_mech);
+
+ err = KCF_PROV_DIGEST_INIT(pd, ctx, &dops->do_mech,
+ rhndl);
+ break;
+
+ case KCF_OP_SINGLE:
+ err = KCF_PROV_DIGEST(pd, ctx, dops->do_data,
+ dops->do_digest, rhndl);
+ break;
+
+ case KCF_OP_UPDATE:
+ err = KCF_PROV_DIGEST_UPDATE(pd, ctx,
+ dops->do_data, rhndl);
+ break;
+
+ case KCF_OP_FINAL:
+ err = KCF_PROV_DIGEST_FINAL(pd, ctx,
+ dops->do_digest, rhndl);
+ break;
+
+ case KCF_OP_ATOMIC:
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
+ pd, &dops->do_mech);
+ err = KCF_PROV_DIGEST_ATOMIC(pd, dops->do_sid,
+ &dops->do_mech, dops->do_data, dops->do_digest,
+ rhndl);
+ break;
+
+ case KCF_OP_DIGEST_KEY:
+ err = KCF_PROV_DIGEST_KEY(pd, ctx, dops->do_digest_key,
+ rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_MAC: {
+ kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
+
+ switch (optype) {
+ case KCF_OP_INIT:
+ KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
+ pd, &mops->mo_mech);
+
+ err = KCF_PROV_MAC_INIT(pd, ctx, &mops->mo_mech,
+ mops->mo_key, mops->mo_templ, rhndl);
+ break;
+
+ case KCF_OP_SINGLE:
+ err = KCF_PROV_MAC(pd, ctx, mops->mo_data,
+ mops->mo_mac, rhndl);
+ break;
+
+ case KCF_OP_UPDATE:
+ err = KCF_PROV_MAC_UPDATE(pd, ctx, mops->mo_data,
+ rhndl);
+ break;
+
+ case KCF_OP_FINAL:
+ err = KCF_PROV_MAC_FINAL(pd, ctx, mops->mo_mac, rhndl);
+ break;
+
+ case KCF_OP_ATOMIC:
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
+ pd, &mops->mo_mech);
+
+ err = KCF_PROV_MAC_ATOMIC(pd, mops->mo_sid,
+ &mops->mo_mech, mops->mo_key, mops->mo_data,
+ mops->mo_mac, mops->mo_templ, rhndl);
+ break;
+
+ case KCF_OP_MAC_VERIFY_ATOMIC:
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
+ pd, &mops->mo_mech);
+
+ err = KCF_PROV_MAC_VERIFY_ATOMIC(pd, mops->mo_sid,
+ &mops->mo_mech, mops->mo_key, mops->mo_data,
+ mops->mo_mac, mops->mo_templ, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_ENCRYPT: {
+ kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
+
+ switch (optype) {
+ case KCF_OP_INIT:
+ KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
+ pd, &eops->eo_mech);
+
+ err = KCF_PROV_ENCRYPT_INIT(pd, ctx, &eops->eo_mech,
+ eops->eo_key, eops->eo_templ, rhndl);
+ break;
+
+ case KCF_OP_SINGLE:
+ err = KCF_PROV_ENCRYPT(pd, ctx, eops->eo_plaintext,
+ eops->eo_ciphertext, rhndl);
+ break;
+
+ case KCF_OP_UPDATE:
+ err = KCF_PROV_ENCRYPT_UPDATE(pd, ctx,
+ eops->eo_plaintext, eops->eo_ciphertext, rhndl);
+ break;
+
+ case KCF_OP_FINAL:
+ err = KCF_PROV_ENCRYPT_FINAL(pd, ctx,
+ eops->eo_ciphertext, rhndl);
+ break;
+
+ case KCF_OP_ATOMIC:
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
+ pd, &eops->eo_mech);
+
+ err = KCF_PROV_ENCRYPT_ATOMIC(pd, eops->eo_sid,
+ &eops->eo_mech, eops->eo_key, eops->eo_plaintext,
+ eops->eo_ciphertext, eops->eo_templ, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_DECRYPT: {
+ kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
+
+ switch (optype) {
+ case KCF_OP_INIT:
+ KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
+ pd, &dcrops->dop_mech);
+
+ err = KCF_PROV_DECRYPT_INIT(pd, ctx, &dcrops->dop_mech,
+ dcrops->dop_key, dcrops->dop_templ, rhndl);
+ break;
+
+ case KCF_OP_SINGLE:
+ err = KCF_PROV_DECRYPT(pd, ctx, dcrops->dop_ciphertext,
+ dcrops->dop_plaintext, rhndl);
+ break;
+
+ case KCF_OP_UPDATE:
+ err = KCF_PROV_DECRYPT_UPDATE(pd, ctx,
+ dcrops->dop_ciphertext, dcrops->dop_plaintext,
+ rhndl);
+ break;
+
+ case KCF_OP_FINAL:
+ err = KCF_PROV_DECRYPT_FINAL(pd, ctx,
+ dcrops->dop_plaintext, rhndl);
+ break;
+
+ case KCF_OP_ATOMIC:
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
+ pd, &dcrops->dop_mech);
+
+ err = KCF_PROV_DECRYPT_ATOMIC(pd, dcrops->dop_sid,
+ &dcrops->dop_mech, dcrops->dop_key,
+ dcrops->dop_ciphertext, dcrops->dop_plaintext,
+ dcrops->dop_templ, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_SIGN: {
+ kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
+
+ switch (optype) {
+ case KCF_OP_INIT:
+ KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
+ pd, &sops->so_mech);
+
+ err = KCF_PROV_SIGN_INIT(pd, ctx, &sops->so_mech,
+ sops->so_key, sops->so_templ, rhndl);
+ break;
+
+ case KCF_OP_SIGN_RECOVER_INIT:
+ KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
+ pd, &sops->so_mech);
+
+ err = KCF_PROV_SIGN_RECOVER_INIT(pd, ctx,
+ &sops->so_mech, sops->so_key, sops->so_templ,
+ rhndl);
+ break;
+
+ case KCF_OP_SINGLE:
+ err = KCF_PROV_SIGN(pd, ctx, sops->so_data,
+ sops->so_signature, rhndl);
+ break;
+
+ case KCF_OP_SIGN_RECOVER:
+ err = KCF_PROV_SIGN_RECOVER(pd, ctx,
+ sops->so_data, sops->so_signature, rhndl);
+ break;
+
+ case KCF_OP_UPDATE:
+ err = KCF_PROV_SIGN_UPDATE(pd, ctx, sops->so_data,
+ rhndl);
+ break;
+
+ case KCF_OP_FINAL:
+ err = KCF_PROV_SIGN_FINAL(pd, ctx, sops->so_signature,
+ rhndl);
+ break;
+
+ case KCF_OP_ATOMIC:
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
+ pd, &sops->so_mech);
+
+ err = KCF_PROV_SIGN_ATOMIC(pd, sops->so_sid,
+ &sops->so_mech, sops->so_key, sops->so_data,
+ sops->so_templ, sops->so_signature, rhndl);
+ break;
+
+ case KCF_OP_SIGN_RECOVER_ATOMIC:
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
+ pd, &sops->so_mech);
+
+ err = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, sops->so_sid,
+ &sops->so_mech, sops->so_key, sops->so_data,
+ sops->so_templ, sops->so_signature, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_VERIFY: {
+ kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
+
+ switch (optype) {
+ case KCF_OP_INIT:
+ KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
+ pd, &vops->vo_mech);
+
+ err = KCF_PROV_VERIFY_INIT(pd, ctx, &vops->vo_mech,
+ vops->vo_key, vops->vo_templ, rhndl);
+ break;
+
+ case KCF_OP_VERIFY_RECOVER_INIT:
+ KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
+ pd, &vops->vo_mech);
+
+ err = KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx,
+ &vops->vo_mech, vops->vo_key, vops->vo_templ,
+ rhndl);
+ break;
+
+ case KCF_OP_SINGLE:
+ err = KCF_PROV_VERIFY(pd, ctx, vops->vo_data,
+ vops->vo_signature, rhndl);
+ break;
+
+ case KCF_OP_VERIFY_RECOVER:
+ err = KCF_PROV_VERIFY_RECOVER(pd, ctx,
+ vops->vo_signature, vops->vo_data, rhndl);
+ break;
+
+ case KCF_OP_UPDATE:
+ err = KCF_PROV_VERIFY_UPDATE(pd, ctx, vops->vo_data,
+ rhndl);
+ break;
+
+ case KCF_OP_FINAL:
+ err = KCF_PROV_VERIFY_FINAL(pd, ctx, vops->vo_signature,
+ rhndl);
+ break;
+
+ case KCF_OP_ATOMIC:
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
+ pd, &vops->vo_mech);
+
+ err = KCF_PROV_VERIFY_ATOMIC(pd, vops->vo_sid,
+ &vops->vo_mech, vops->vo_key, vops->vo_data,
+ vops->vo_templ, vops->vo_signature, rhndl);
+ break;
+
+ case KCF_OP_VERIFY_RECOVER_ATOMIC:
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
+ pd, &vops->vo_mech);
+
+ err = KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, vops->vo_sid,
+ &vops->vo_mech, vops->vo_key, vops->vo_signature,
+ vops->vo_templ, vops->vo_data, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_ENCRYPT_MAC: {
+ kcf_encrypt_mac_ops_params_t *eops =
+ &params->rp_u.encrypt_mac_params;
+ kcf_context_t *kcf_secondctx;
+
+ switch (optype) {
+ case KCF_OP_INIT:
+ kcf_secondctx = ((kcf_context_t *)
+ (ctx->cc_framework_private))->kc_secondctx;
+
+ if (kcf_secondctx != NULL) {
+ err = kcf_emulate_dual(pd, ctx, params);
+ break;
+ }
+ KCF_SET_PROVIDER_MECHNUM(
+ eops->em_framework_encr_mechtype,
+ pd, &eops->em_encr_mech);
+
+ KCF_SET_PROVIDER_MECHNUM(
+ eops->em_framework_mac_mechtype,
+ pd, &eops->em_mac_mech);
+
+ err = KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx,
+ &eops->em_encr_mech, eops->em_encr_key,
+ &eops->em_mac_mech, eops->em_mac_key,
+ eops->em_encr_templ, eops->em_mac_templ,
+ rhndl);
+
+ break;
+
+ case KCF_OP_SINGLE:
+ err = KCF_PROV_ENCRYPT_MAC(pd, ctx,
+ eops->em_plaintext, eops->em_ciphertext,
+ eops->em_mac, rhndl);
+ break;
+
+ case KCF_OP_UPDATE:
+ kcf_secondctx = ((kcf_context_t *)
+ (ctx->cc_framework_private))->kc_secondctx;
+ if (kcf_secondctx != NULL) {
+ err = kcf_emulate_dual(pd, ctx, params);
+ break;
+ }
+ err = KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx,
+ eops->em_plaintext, eops->em_ciphertext, rhndl);
+ break;
+
+ case KCF_OP_FINAL:
+ kcf_secondctx = ((kcf_context_t *)
+ (ctx->cc_framework_private))->kc_secondctx;
+ if (kcf_secondctx != NULL) {
+ err = kcf_emulate_dual(pd, ctx, params);
+ break;
+ }
+ err = KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx,
+ eops->em_ciphertext, eops->em_mac, rhndl);
+ break;
+
+ case KCF_OP_ATOMIC:
+ ASSERT(ctx == NULL);
+
+ KCF_SET_PROVIDER_MECHNUM(
+ eops->em_framework_encr_mechtype,
+ pd, &eops->em_encr_mech);
+
+ KCF_SET_PROVIDER_MECHNUM(
+ eops->em_framework_mac_mechtype,
+ pd, &eops->em_mac_mech);
+
+ err = KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, eops->em_sid,
+ &eops->em_encr_mech, eops->em_encr_key,
+ &eops->em_mac_mech, eops->em_mac_key,
+ eops->em_plaintext, eops->em_ciphertext,
+ eops->em_mac,
+ eops->em_encr_templ, eops->em_mac_templ,
+ rhndl);
+
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_MAC_DECRYPT: {
+ kcf_mac_decrypt_ops_params_t *dops =
+ &params->rp_u.mac_decrypt_params;
+ kcf_context_t *kcf_secondctx;
+
+ switch (optype) {
+ case KCF_OP_INIT:
+ kcf_secondctx = ((kcf_context_t *)
+ (ctx->cc_framework_private))->kc_secondctx;
+
+ if (kcf_secondctx != NULL) {
+ err = kcf_emulate_dual(pd, ctx, params);
+ break;
+ }
+ KCF_SET_PROVIDER_MECHNUM(
+ dops->md_framework_mac_mechtype,
+ pd, &dops->md_mac_mech);
+
+ KCF_SET_PROVIDER_MECHNUM(
+ dops->md_framework_decr_mechtype,
+ pd, &dops->md_decr_mech);
+
+ err = KCF_PROV_MAC_DECRYPT_INIT(pd, ctx,
+ &dops->md_mac_mech, dops->md_mac_key,
+ &dops->md_decr_mech, dops->md_decr_key,
+ dops->md_mac_templ, dops->md_decr_templ,
+ rhndl);
+
+ break;
+
+ case KCF_OP_SINGLE:
+ err = KCF_PROV_MAC_DECRYPT(pd, ctx,
+ dops->md_ciphertext, dops->md_mac,
+ dops->md_plaintext, rhndl);
+ break;
+
+ case KCF_OP_UPDATE:
+ kcf_secondctx = ((kcf_context_t *)
+ (ctx->cc_framework_private))->kc_secondctx;
+ if (kcf_secondctx != NULL) {
+ err = kcf_emulate_dual(pd, ctx, params);
+ break;
+ }
+ err = KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx,
+ dops->md_ciphertext, dops->md_plaintext, rhndl);
+ break;
+
+ case KCF_OP_FINAL:
+ kcf_secondctx = ((kcf_context_t *)
+ (ctx->cc_framework_private))->kc_secondctx;
+ if (kcf_secondctx != NULL) {
+ err = kcf_emulate_dual(pd, ctx, params);
+ break;
+ }
+ err = KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx,
+ dops->md_mac, dops->md_plaintext, rhndl);
+ break;
+
+ case KCF_OP_ATOMIC:
+ ASSERT(ctx == NULL);
+
+ KCF_SET_PROVIDER_MECHNUM(
+ dops->md_framework_mac_mechtype,
+ pd, &dops->md_mac_mech);
+
+ KCF_SET_PROVIDER_MECHNUM(
+ dops->md_framework_decr_mechtype,
+ pd, &dops->md_decr_mech);
+
+ err = KCF_PROV_MAC_DECRYPT_ATOMIC(pd, dops->md_sid,
+ &dops->md_mac_mech, dops->md_mac_key,
+ &dops->md_decr_mech, dops->md_decr_key,
+ dops->md_ciphertext, dops->md_mac,
+ dops->md_plaintext,
+ dops->md_mac_templ, dops->md_decr_templ,
+ rhndl);
+
+ break;
+
+ case KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC:
+ ASSERT(ctx == NULL);
+
+ KCF_SET_PROVIDER_MECHNUM(
+ dops->md_framework_mac_mechtype,
+ pd, &dops->md_mac_mech);
+
+ KCF_SET_PROVIDER_MECHNUM(
+ dops->md_framework_decr_mechtype,
+ pd, &dops->md_decr_mech);
+
+ err = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd,
+ dops->md_sid, &dops->md_mac_mech, dops->md_mac_key,
+ &dops->md_decr_mech, dops->md_decr_key,
+ dops->md_ciphertext, dops->md_mac,
+ dops->md_plaintext,
+ dops->md_mac_templ, dops->md_decr_templ,
+ rhndl);
+
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_KEY: {
+ kcf_key_ops_params_t *kops = &params->rp_u.key_params;
+
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd,
+ &kops->ko_mech);
+
+ switch (optype) {
+ case KCF_OP_KEY_GENERATE:
+ err = KCF_PROV_KEY_GENERATE(pd, kops->ko_sid,
+ &kops->ko_mech,
+ kops->ko_key_template, kops->ko_key_attribute_count,
+ kops->ko_key_object_id_ptr, rhndl);
+ break;
+
+ case KCF_OP_KEY_GENERATE_PAIR:
+ err = KCF_PROV_KEY_GENERATE_PAIR(pd, kops->ko_sid,
+ &kops->ko_mech,
+ kops->ko_key_template, kops->ko_key_attribute_count,
+ kops->ko_private_key_template,
+ kops->ko_private_key_attribute_count,
+ kops->ko_key_object_id_ptr,
+ kops->ko_private_key_object_id_ptr, rhndl);
+ break;
+
+ case KCF_OP_KEY_WRAP:
+ err = KCF_PROV_KEY_WRAP(pd, kops->ko_sid,
+ &kops->ko_mech,
+ kops->ko_key, kops->ko_key_object_id_ptr,
+ kops->ko_wrapped_key, kops->ko_wrapped_key_len_ptr,
+ rhndl);
+ break;
+
+ case KCF_OP_KEY_UNWRAP:
+ err = KCF_PROV_KEY_UNWRAP(pd, kops->ko_sid,
+ &kops->ko_mech,
+ kops->ko_key, kops->ko_wrapped_key,
+ kops->ko_wrapped_key_len_ptr,
+ kops->ko_key_template, kops->ko_key_attribute_count,
+ kops->ko_key_object_id_ptr, rhndl);
+ break;
+
+ case KCF_OP_KEY_DERIVE:
+ err = KCF_PROV_KEY_DERIVE(pd, kops->ko_sid,
+ &kops->ko_mech,
+ kops->ko_key, kops->ko_key_template,
+ kops->ko_key_attribute_count,
+ kops->ko_key_object_id_ptr, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_RANDOM: {
+ kcf_random_number_ops_params_t *rops =
+ &params->rp_u.random_number_params;
+
+ ASSERT(ctx == NULL);
+
+ switch (optype) {
+ case KCF_OP_RANDOM_SEED:
+ err = KCF_PROV_SEED_RANDOM(pd, rops->rn_sid,
+ rops->rn_buf, rops->rn_buflen, rops->rn_entropy_est,
+ rops->rn_flags, rhndl);
+ break;
+
+ case KCF_OP_RANDOM_GENERATE:
+ err = KCF_PROV_GENERATE_RANDOM(pd, rops->rn_sid,
+ rops->rn_buf, rops->rn_buflen, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_SESSION: {
+ kcf_session_ops_params_t *sops = &params->rp_u.session_params;
+
+ ASSERT(ctx == NULL);
+ switch (optype) {
+ case KCF_OP_SESSION_OPEN:
+ /*
+ * so_pd may be a logical provider, in which case
+ * we need to check whether it has been removed.
+ */
+ if (KCF_IS_PROV_REMOVED(sops->so_pd)) {
+ err = CRYPTO_DEVICE_ERROR;
+ break;
+ }
+ err = KCF_PROV_SESSION_OPEN(pd, sops->so_sid_ptr,
+ rhndl, sops->so_pd);
+ break;
+
+ case KCF_OP_SESSION_CLOSE:
+ /*
+ * so_pd may be a logical provider, in which case
+ * we need to check whether it has been removed.
+ */
+ if (KCF_IS_PROV_REMOVED(sops->so_pd)) {
+ err = CRYPTO_DEVICE_ERROR;
+ break;
+ }
+ err = KCF_PROV_SESSION_CLOSE(pd, sops->so_sid,
+ rhndl, sops->so_pd);
+ break;
+
+ case KCF_OP_SESSION_LOGIN:
+ err = KCF_PROV_SESSION_LOGIN(pd, sops->so_sid,
+ sops->so_user_type, sops->so_pin,
+ sops->so_pin_len, rhndl);
+ break;
+
+ case KCF_OP_SESSION_LOGOUT:
+ err = KCF_PROV_SESSION_LOGOUT(pd, sops->so_sid, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_OBJECT: {
+ kcf_object_ops_params_t *jops = &params->rp_u.object_params;
+
+ ASSERT(ctx == NULL);
+ switch (optype) {
+ case KCF_OP_OBJECT_CREATE:
+ err = KCF_PROV_OBJECT_CREATE(pd, jops->oo_sid,
+ jops->oo_template, jops->oo_attribute_count,
+ jops->oo_object_id_ptr, rhndl);
+ break;
+
+ case KCF_OP_OBJECT_COPY:
+ err = KCF_PROV_OBJECT_COPY(pd, jops->oo_sid,
+ jops->oo_object_id,
+ jops->oo_template, jops->oo_attribute_count,
+ jops->oo_object_id_ptr, rhndl);
+ break;
+
+ case KCF_OP_OBJECT_DESTROY:
+ err = KCF_PROV_OBJECT_DESTROY(pd, jops->oo_sid,
+ jops->oo_object_id, rhndl);
+ break;
+
+ case KCF_OP_OBJECT_GET_SIZE:
+ err = KCF_PROV_OBJECT_GET_SIZE(pd, jops->oo_sid,
+ jops->oo_object_id, jops->oo_object_size, rhndl);
+ break;
+
+ case KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE:
+ err = KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd,
+ jops->oo_sid, jops->oo_object_id,
+ jops->oo_template, jops->oo_attribute_count, rhndl);
+ break;
+
+ case KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE:
+ err = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd,
+ jops->oo_sid, jops->oo_object_id,
+ jops->oo_template, jops->oo_attribute_count, rhndl);
+ break;
+
+ case KCF_OP_OBJECT_FIND_INIT:
+ err = KCF_PROV_OBJECT_FIND_INIT(pd, jops->oo_sid,
+ jops->oo_template, jops->oo_attribute_count,
+ jops->oo_find_init_pp_ptr, rhndl);
+ break;
+
+ case KCF_OP_OBJECT_FIND:
+ err = KCF_PROV_OBJECT_FIND(pd, jops->oo_find_pp,
+ jops->oo_object_id_ptr, jops->oo_max_object_count,
+ jops->oo_object_count_ptr, rhndl);
+ break;
+
+ case KCF_OP_OBJECT_FIND_FINAL:
+ err = KCF_PROV_OBJECT_FIND_FINAL(pd, jops->oo_find_pp,
+ rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_PROVMGMT: {
+ kcf_provmgmt_ops_params_t *pops = &params->rp_u.provmgmt_params;
+
+ ASSERT(ctx == NULL);
+ switch (optype) {
+ case KCF_OP_MGMT_EXTINFO:
+ /*
+ * po_pd may be a logical provider, in which case
+ * we need to check whether it has been removed.
+ */
+ if (KCF_IS_PROV_REMOVED(pops->po_pd)) {
+ err = CRYPTO_DEVICE_ERROR;
+ break;
+ }
+ err = KCF_PROV_EXT_INFO(pd, pops->po_ext_info, rhndl,
+ pops->po_pd);
+ break;
+
+ case KCF_OP_MGMT_INITTOKEN:
+ err = KCF_PROV_INIT_TOKEN(pd, pops->po_pin,
+ pops->po_pin_len, pops->po_label, rhndl);
+ break;
+
+ case KCF_OP_MGMT_INITPIN:
+ err = KCF_PROV_INIT_PIN(pd, pops->po_sid, pops->po_pin,
+ pops->po_pin_len, rhndl);
+ break;
+
+ case KCF_OP_MGMT_SETPIN:
+ err = KCF_PROV_SET_PIN(pd, pops->po_sid,
+ pops->po_old_pin, pops->po_old_pin_len,
+ pops->po_pin, pops->po_pin_len, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case KCF_OG_NOSTORE_KEY: {
+ kcf_key_ops_params_t *kops = &params->rp_u.key_params;
+
+ ASSERT(ctx == NULL);
+ KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd,
+ &kops->ko_mech);
+
+ switch (optype) {
+ case KCF_OP_KEY_GENERATE:
+ err = KCF_PROV_NOSTORE_KEY_GENERATE(pd, kops->ko_sid,
+ &kops->ko_mech, kops->ko_key_template,
+ kops->ko_key_attribute_count,
+ kops->ko_out_template1,
+ kops->ko_out_attribute_count1, rhndl);
+ break;
+
+ case KCF_OP_KEY_GENERATE_PAIR:
+ err = KCF_PROV_NOSTORE_KEY_GENERATE_PAIR(pd,
+ kops->ko_sid, &kops->ko_mech,
+ kops->ko_key_template, kops->ko_key_attribute_count,
+ kops->ko_private_key_template,
+ kops->ko_private_key_attribute_count,
+ kops->ko_out_template1,
+ kops->ko_out_attribute_count1,
+ kops->ko_out_template2,
+ kops->ko_out_attribute_count2,
+ rhndl);
+ break;
+
+ case KCF_OP_KEY_DERIVE:
+ err = KCF_PROV_NOSTORE_KEY_DERIVE(pd, kops->ko_sid,
+ &kops->ko_mech, kops->ko_key,
+ kops->ko_key_template,
+ kops->ko_key_attribute_count,
+ kops->ko_out_template1,
+ kops->ko_out_attribute_count1, rhndl);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ } /* end of switch(params->rp_opgrp) */
+
+ KCF_PROV_INCRSTATS(pd, err);
+ return (err);
+}
+
+
+/*
+ * Emulate the call for a multipart dual ops with 2 single steps.
+ * This routine is always called in the context of a working thread
+ * running kcf_svc_do_run().
+ * The single steps are submitted in a pure synchronous way (blocking).
+ * When this routine returns, kcf_svc_do_run() will call kcf_aop_done()
+ * so the originating consumer's callback gets invoked. kcf_aop_done()
+ * takes care of freeing the operation context. So, this routine does
+ * not free the operation context.
+ *
+ * The provider descriptor is assumed held by the callers.
+ */
+static int
+kcf_emulate_dual(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
+ kcf_req_params_t *params)
+{
+ int err = CRYPTO_ARGUMENTS_BAD;
+ kcf_op_type_t optype;
+ size_t save_len;
+ off_t save_offset;
+
+ optype = params->rp_optype;
+
+ switch (params->rp_opgrp) {
+ case KCF_OG_ENCRYPT_MAC: {
+ kcf_encrypt_mac_ops_params_t *cmops =
+ &params->rp_u.encrypt_mac_params;
+ kcf_context_t *encr_kcf_ctx;
+ crypto_ctx_t *mac_ctx;
+ kcf_req_params_t encr_params;
+
+ encr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
+
+ switch (optype) {
+ case KCF_OP_INIT: {
+ encr_kcf_ctx->kc_secondctx = NULL;
+
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_INIT,
+ pd->pd_sid, &cmops->em_encr_mech,
+ cmops->em_encr_key, NULL, NULL,
+ cmops->em_encr_templ);
+
+ err = kcf_submit_request(pd, ctx, NULL, &encr_params,
+ B_FALSE);
+
+ /* It can't be CRYPTO_QUEUED */
+ if (err != CRYPTO_SUCCESS) {
+ break;
+ }
+
+ err = crypto_mac_init(&cmops->em_mac_mech,
+ cmops->em_mac_key, cmops->em_mac_templ,
+ (crypto_context_t *)&mac_ctx, NULL);
+
+ if (err == CRYPTO_SUCCESS) {
+ encr_kcf_ctx->kc_secondctx = (kcf_context_t *)
+ mac_ctx->cc_framework_private;
+ KCF_CONTEXT_REFHOLD((kcf_context_t *)
+ mac_ctx->cc_framework_private);
+ }
+
+ break;
+
+ }
+ case KCF_OP_UPDATE: {
+ crypto_dual_data_t *ct = cmops->em_ciphertext;
+ crypto_data_t *pt = cmops->em_plaintext;
+ kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
+ crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
+
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_UPDATE,
+ pd->pd_sid, NULL, NULL, pt, (crypto_data_t *)ct,
+ NULL);
+
+ err = kcf_submit_request(pd, ctx, NULL, &encr_params,
+ B_FALSE);
+
+ /* It can't be CRYPTO_QUEUED */
+ if (err != CRYPTO_SUCCESS) {
+ break;
+ }
+
+ save_offset = ct->dd_offset1;
+ save_len = ct->dd_len1;
+ if (ct->dd_len2 == 0) {
+ /*
+ * The previous encrypt step was an
+ * accumulation only and didn't produce any
+ * partial output
+ */
+ if (ct->dd_len1 == 0)
+ break;
+
+ } else {
+ ct->dd_offset1 = ct->dd_offset2;
+ ct->dd_len1 = ct->dd_len2;
+ }
+ err = crypto_mac_update((crypto_context_t)mac_ctx,
+ (crypto_data_t *)ct, NULL);
+
+ ct->dd_offset1 = save_offset;
+ ct->dd_len1 = save_len;
+
+ break;
+ }
+ case KCF_OP_FINAL: {
+ crypto_dual_data_t *ct = cmops->em_ciphertext;
+ crypto_data_t *mac = cmops->em_mac;
+ kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
+ crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
+ crypto_context_t mac_context = mac_ctx;
+
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_FINAL,
+ pd->pd_sid, NULL, NULL, NULL, (crypto_data_t *)ct,
+ NULL);
+
+ err = kcf_submit_request(pd, ctx, NULL, &encr_params,
+ B_FALSE);
+
+ /* It can't be CRYPTO_QUEUED */
+ if (err != CRYPTO_SUCCESS) {
+ crypto_cancel_ctx(mac_context);
+ break;
+ }
+
+ if (ct->dd_len2 > 0) {
+ save_offset = ct->dd_offset1;
+ save_len = ct->dd_len1;
+ ct->dd_offset1 = ct->dd_offset2;
+ ct->dd_len1 = ct->dd_len2;
+
+ err = crypto_mac_update(mac_context,
+ (crypto_data_t *)ct, NULL);
+
+ ct->dd_offset1 = save_offset;
+ ct->dd_len1 = save_len;
+
+ if (err != CRYPTO_SUCCESS) {
+ crypto_cancel_ctx(mac_context);
+ return (err);
+ }
+ }
+
+ /* and finally, collect the MAC */
+ err = crypto_mac_final(mac_context, mac, NULL);
+ break;
+ }
+
+ default:
+ break;
+ }
+ KCF_PROV_INCRSTATS(pd, err);
+ break;
+ }
+ case KCF_OG_MAC_DECRYPT: {
+ kcf_mac_decrypt_ops_params_t *mdops =
+ &params->rp_u.mac_decrypt_params;
+ kcf_context_t *decr_kcf_ctx;
+ crypto_ctx_t *mac_ctx;
+ kcf_req_params_t decr_params;
+
+ decr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
+
+ switch (optype) {
+ case KCF_OP_INIT: {
+ decr_kcf_ctx->kc_secondctx = NULL;
+
+ err = crypto_mac_init(&mdops->md_mac_mech,
+ mdops->md_mac_key, mdops->md_mac_templ,
+ (crypto_context_t *)&mac_ctx, NULL);
+
+ /* It can't be CRYPTO_QUEUED */
+ if (err != CRYPTO_SUCCESS) {
+ break;
+ }
+
+ KCF_WRAP_DECRYPT_OPS_PARAMS(&decr_params, KCF_OP_INIT,
+ pd->pd_sid, &mdops->md_decr_mech,
+ mdops->md_decr_key, NULL, NULL,
+ mdops->md_decr_templ);
+
+ err = kcf_submit_request(pd, ctx, NULL, &decr_params,
+ B_FALSE);
+
+ /* It can't be CRYPTO_QUEUED */
+ if (err != CRYPTO_SUCCESS) {
+ crypto_cancel_ctx((crypto_context_t)mac_ctx);
+ break;
+ }
+
+ decr_kcf_ctx->kc_secondctx = (kcf_context_t *)
+ mac_ctx->cc_framework_private;
+ KCF_CONTEXT_REFHOLD((kcf_context_t *)
+ mac_ctx->cc_framework_private);
+
+ break;
+ default:
+ break;
+
+ }
+ case KCF_OP_UPDATE: {
+ crypto_dual_data_t *ct = mdops->md_ciphertext;
+ crypto_data_t *pt = mdops->md_plaintext;
+ kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
+ crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
+
+ err = crypto_mac_update((crypto_context_t)mac_ctx,
+ (crypto_data_t *)ct, NULL);
+
+ if (err != CRYPTO_SUCCESS)
+ break;
+
+ save_offset = ct->dd_offset1;
+ save_len = ct->dd_len1;
+
+ /* zero ct->dd_len2 means decrypt everything */
+ if (ct->dd_len2 > 0) {
+ ct->dd_offset1 = ct->dd_offset2;
+ ct->dd_len1 = ct->dd_len2;
+ }
+
+ err = crypto_decrypt_update((crypto_context_t)ctx,
+ (crypto_data_t *)ct, pt, NULL);
+
+ ct->dd_offset1 = save_offset;
+ ct->dd_len1 = save_len;
+
+ break;
+ }
+ case KCF_OP_FINAL: {
+ crypto_data_t *pt = mdops->md_plaintext;
+ crypto_data_t *mac = mdops->md_mac;
+ kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
+ crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
+
+ err = crypto_mac_final((crypto_context_t)mac_ctx,
+ mac, NULL);
+
+ if (err != CRYPTO_SUCCESS) {
+ crypto_cancel_ctx(ctx);
+ break;
+ }
+
+ /* Get the last chunk of plaintext */
+ KCF_CONTEXT_REFHOLD(decr_kcf_ctx);
+ err = crypto_decrypt_final((crypto_context_t)ctx, pt,
+ NULL);
+
+ break;
+ }
+ }
+ break;
+ }
+ default:
+
+ break;
+ } /* end of switch(params->rp_opgrp) */
+
+ return (err);
+}
diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c
new file mode 100644
index 000000000..3545f03ee
--- /dev/null
+++ b/module/icp/core/kcf_mech_tabs.c
@@ -0,0 +1,775 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/impl.h>
+#include <sys/modhash.h>
+
+/* Cryptographic mechanisms tables and their access functions */
+
+/*
+ * Internal numbers assigned to mechanisms are coded as follows:
+ *
+ * +----------------+----------------+
+ * | mech. class | mech. index |
+ * <--- 32-bits --->+<--- 32-bits --->
+ *
+ * the mech_class identifies the table the mechanism belongs to.
+ * mech_index is the index for that mechanism in the table.
+ * A mechanism belongs to exactly 1 table.
+ * The tables are:
+ * . digest_mechs_tab[] for the msg digest mechs.
+ * . cipher_mechs_tab[] for encrypt/decrypt and wrap/unwrap mechs.
+ * . mac_mechs_tab[] for MAC mechs.
+ * . sign_mechs_tab[] for sign & verify mechs.
+ * . keyops_mechs_tab[] for key/key pair generation, and key derivation.
+ * . misc_mechs_tab[] for mechs that don't belong to any of the above.
+ *
+ * There are no holes in the tables.
+ */
+
+/*
+ * Locking conventions:
+ * --------------------
+ * A global mutex, kcf_mech_tabs_lock, serializes writes to the
+ * mechanism table via kcf_create_mech_entry().
+ *
+ * A mutex is associated with every entry of the tables.
+ * The mutex is acquired whenever the entry is accessed for
+ * 1) retrieving the mech_id (comparing the mech name)
+ * 2) finding a provider for an xxx_init() or atomic operation.
+ * 3) altering the mechs entry to add or remove a provider.
+ *
+ * In 2), after a provider is chosen, its prov_desc is held and the
+ * entry's mutex must be dropped. The provider's working function (SPI) is
+ * called outside the mech_entry's mutex.
+ *
+ * The number of providers for a particular mechanism is not expected to be
+ * long enough to justify the cost of using rwlocks, so the per-mechanism
+ * entry mutex won't be very *hot*.
+ *
+ * When both kcf_mech_tabs_lock and a mech_entry mutex need to be held,
+ * kcf_mech_tabs_lock must always be acquired first.
+ *
+ */
+
+ /* Mechanisms tables */
+
+
+/* RFE 4687834 Will deal with the extensibility of these tables later */
+
+kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST];
+kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER];
+kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC];
+kcf_mech_entry_t kcf_sign_mechs_tab[KCF_MAXSIGN];
+kcf_mech_entry_t kcf_keyops_mechs_tab[KCF_MAXKEYOPS];
+kcf_mech_entry_t kcf_misc_mechs_tab[KCF_MAXMISC];
+
+kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = {
+ {0, NULL}, /* No class zero */
+ {KCF_MAXDIGEST, kcf_digest_mechs_tab},
+ {KCF_MAXCIPHER, kcf_cipher_mechs_tab},
+ {KCF_MAXMAC, kcf_mac_mechs_tab},
+ {KCF_MAXSIGN, kcf_sign_mechs_tab},
+ {KCF_MAXKEYOPS, kcf_keyops_mechs_tab},
+ {KCF_MAXMISC, kcf_misc_mechs_tab}
+};
+
+/*
+ * Per-algorithm internal threasholds for the minimum input size of before
+ * offloading to hardware provider.
+ * Dispatching a crypto operation to a hardware provider entails paying the
+ * cost of an additional context switch. Measurments with Sun Accelerator 4000
+ * shows that 512-byte jobs or smaller are better handled in software.
+ * There is room for refinement here.
+ *
+ */
+int kcf_md5_threshold = 512;
+int kcf_sha1_threshold = 512;
+int kcf_des_threshold = 512;
+int kcf_des3_threshold = 512;
+int kcf_aes_threshold = 512;
+int kcf_bf_threshold = 512;
+int kcf_rc4_threshold = 512;
+
+kmutex_t kcf_mech_tabs_lock;
+static uint32_t kcf_gen_swprov = 0;
+
+int kcf_mech_hash_size = 256;
+mod_hash_t *kcf_mech_hash; /* mech name to id hash */
+
+static crypto_mech_type_t
+kcf_mech_hash_find(char *mechname)
+{
+ mod_hash_val_t hv;
+ crypto_mech_type_t mt;
+
+ mt = CRYPTO_MECH_INVALID;
+ if (mod_hash_find(kcf_mech_hash, (mod_hash_key_t)mechname, &hv) == 0) {
+ mt = *(crypto_mech_type_t *)hv;
+ ASSERT(mt != CRYPTO_MECH_INVALID);
+ }
+
+ return (mt);
+}
+
+void
+kcf_destroy_mech_tabs(void)
+{
+ if (kcf_mech_hash) mod_hash_destroy_hash(kcf_mech_hash);
+}
+
+/*
+ * kcf_init_mech_tabs()
+ *
+ * Called by the misc/kcf's _init() routine to initialize the tables
+ * of mech_entry's.
+ */
+void
+kcf_init_mech_tabs(void)
+{
+ int i, max;
+ kcf_ops_class_t class;
+ kcf_mech_entry_t *me_tab;
+
+ /* Initializes the mutex locks. */
+
+ mutex_init(&kcf_mech_tabs_lock, NULL, MUTEX_DEFAULT, NULL);
+
+ /* Then the pre-defined mechanism entries */
+
+ /* Two digests */
+ (void) strncpy(kcf_digest_mechs_tab[0].me_name, SUN_CKM_MD5,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_digest_mechs_tab[0].me_threshold = kcf_md5_threshold;
+
+ (void) strncpy(kcf_digest_mechs_tab[1].me_name, SUN_CKM_SHA1,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_digest_mechs_tab[1].me_threshold = kcf_sha1_threshold;
+
+ /* The symmetric ciphers in various modes */
+ (void) strncpy(kcf_cipher_mechs_tab[0].me_name, SUN_CKM_DES_CBC,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_cipher_mechs_tab[0].me_threshold = kcf_des_threshold;
+
+ (void) strncpy(kcf_cipher_mechs_tab[1].me_name, SUN_CKM_DES3_CBC,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_cipher_mechs_tab[1].me_threshold = kcf_des3_threshold;
+
+ (void) strncpy(kcf_cipher_mechs_tab[2].me_name, SUN_CKM_DES_ECB,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_cipher_mechs_tab[2].me_threshold = kcf_des_threshold;
+
+ (void) strncpy(kcf_cipher_mechs_tab[3].me_name, SUN_CKM_DES3_ECB,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_cipher_mechs_tab[3].me_threshold = kcf_des3_threshold;
+
+ (void) strncpy(kcf_cipher_mechs_tab[4].me_name, SUN_CKM_BLOWFISH_CBC,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_cipher_mechs_tab[4].me_threshold = kcf_bf_threshold;
+
+ (void) strncpy(kcf_cipher_mechs_tab[5].me_name, SUN_CKM_BLOWFISH_ECB,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_cipher_mechs_tab[5].me_threshold = kcf_bf_threshold;
+
+ (void) strncpy(kcf_cipher_mechs_tab[6].me_name, SUN_CKM_AES_CBC,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_cipher_mechs_tab[6].me_threshold = kcf_aes_threshold;
+
+ (void) strncpy(kcf_cipher_mechs_tab[7].me_name, SUN_CKM_AES_ECB,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_cipher_mechs_tab[7].me_threshold = kcf_aes_threshold;
+
+ (void) strncpy(kcf_cipher_mechs_tab[8].me_name, SUN_CKM_RC4,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_cipher_mechs_tab[8].me_threshold = kcf_rc4_threshold;
+
+
+ /* 4 HMACs */
+ (void) strncpy(kcf_mac_mechs_tab[0].me_name, SUN_CKM_MD5_HMAC,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_mac_mechs_tab[0].me_threshold = kcf_md5_threshold;
+
+ (void) strncpy(kcf_mac_mechs_tab[1].me_name, SUN_CKM_MD5_HMAC_GENERAL,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_mac_mechs_tab[1].me_threshold = kcf_md5_threshold;
+
+ (void) strncpy(kcf_mac_mechs_tab[2].me_name, SUN_CKM_SHA1_HMAC,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_mac_mechs_tab[2].me_threshold = kcf_sha1_threshold;
+
+ (void) strncpy(kcf_mac_mechs_tab[3].me_name, SUN_CKM_SHA1_HMAC_GENERAL,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_mac_mechs_tab[3].me_threshold = kcf_sha1_threshold;
+
+
+ /* 1 random number generation pseudo mechanism */
+ (void) strncpy(kcf_misc_mechs_tab[0].me_name, SUN_RANDOM,
+ CRYPTO_MAX_MECH_NAME);
+
+ kcf_mech_hash = mod_hash_create_strhash_nodtr("kcf mech2id hash",
+ kcf_mech_hash_size, mod_hash_null_valdtor);
+
+ for (class = KCF_FIRST_OPSCLASS; class <= KCF_LAST_OPSCLASS; class++) {
+ max = kcf_mech_tabs_tab[class].met_size;
+ me_tab = kcf_mech_tabs_tab[class].met_tab;
+ for (i = 0; i < max; i++) {
+ mutex_init(&(me_tab[i].me_mutex), NULL,
+ MUTEX_DEFAULT, NULL);
+ if (me_tab[i].me_name[0] != 0) {
+ me_tab[i].me_mechid = KCF_MECHID(class, i);
+ (void) mod_hash_insert(kcf_mech_hash,
+ (mod_hash_key_t)me_tab[i].me_name,
+ (mod_hash_val_t)&(me_tab[i].me_mechid));
+ }
+ }
+ }
+}
+
+/*
+ * kcf_create_mech_entry()
+ *
+ * Arguments:
+ * . The class of mechanism.
+ * . the name of the new mechanism.
+ *
+ * Description:
+ * Creates a new mech_entry for a mechanism not yet known to the
+ * framework.
+ * This routine is called by kcf_add_mech_provider, which is
+ * in turn invoked for each mechanism supported by a provider.
+ * The'class' argument depends on the crypto_func_group_t bitmask
+ * in the registering provider's mech_info struct for this mechanism.
+ * When there is ambiguity in the mapping between the crypto_func_group_t
+ * and a class (dual ops, ...) the KCF_MISC_CLASS should be used.
+ *
+ * Context:
+ * User context only.
+ *
+ * Returns:
+ * KCF_INVALID_MECH_CLASS or KCF_INVALID_MECH_NAME if the class or
+ * the mechname is bogus.
+ * KCF_MECH_TAB_FULL when there is no room left in the mech. tabs.
+ * KCF_SUCCESS otherwise.
+ */
+static int
+kcf_create_mech_entry(kcf_ops_class_t class, char *mechname)
+{
+ crypto_mech_type_t mt;
+ kcf_mech_entry_t *me_tab;
+ int i = 0, size;
+
+ if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS))
+ return (KCF_INVALID_MECH_CLASS);
+
+ if ((mechname == NULL) || (mechname[0] == 0))
+ return (KCF_INVALID_MECH_NAME);
+ /*
+ * First check if the mechanism is already in one of the tables.
+ * The mech_entry could be in another class.
+ */
+ mutex_enter(&kcf_mech_tabs_lock);
+ mt = kcf_mech_hash_find(mechname);
+ if (mt != CRYPTO_MECH_INVALID) {
+ /* Nothing to do, regardless the suggested class. */
+ mutex_exit(&kcf_mech_tabs_lock);
+ return (KCF_SUCCESS);
+ }
+ /* Now take the next unused mech entry in the class's tab */
+ me_tab = kcf_mech_tabs_tab[class].met_tab;
+ size = kcf_mech_tabs_tab[class].met_size;
+
+ while (i < size) {
+ mutex_enter(&(me_tab[i].me_mutex));
+ if (me_tab[i].me_name[0] == 0) {
+ /* Found an empty spot */
+ (void) strncpy(me_tab[i].me_name, mechname,
+ CRYPTO_MAX_MECH_NAME);
+ me_tab[i].me_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
+ me_tab[i].me_mechid = KCF_MECHID(class, i);
+ /*
+ * No a-priori information about the new mechanism, so
+ * the threshold is set to zero.
+ */
+ me_tab[i].me_threshold = 0;
+
+ mutex_exit(&(me_tab[i].me_mutex));
+ /* Add the new mechanism to the hash table */
+ (void) mod_hash_insert(kcf_mech_hash,
+ (mod_hash_key_t)me_tab[i].me_name,
+ (mod_hash_val_t)&(me_tab[i].me_mechid));
+ break;
+ }
+ mutex_exit(&(me_tab[i].me_mutex));
+ i++;
+ }
+
+ mutex_exit(&kcf_mech_tabs_lock);
+
+ if (i == size) {
+ return (KCF_MECH_TAB_FULL);
+ }
+
+ return (KCF_SUCCESS);
+}
+
+/*
+ * kcf_add_mech_provider()
+ *
+ * Arguments:
+ * . An index in to the provider mechanism array
+ * . A pointer to the provider descriptor
+ * . A storage for the kcf_prov_mech_desc_t the entry was added at.
+ *
+ * Description:
+ * Adds a new provider of a mechanism to the mechanism's mech_entry
+ * chain.
+ *
+ * Context:
+ * User context only.
+ *
+ * Returns
+ * KCF_SUCCESS on success
+ * KCF_MECH_TAB_FULL otherwise.
+ */
+int
+kcf_add_mech_provider(short mech_indx,
+ kcf_provider_desc_t *prov_desc, kcf_prov_mech_desc_t **pmdpp)
+{
+ int error;
+ kcf_mech_entry_t *mech_entry = NULL;
+ crypto_mech_info_t *mech_info;
+ crypto_mech_type_t kcf_mech_type, mt;
+ kcf_prov_mech_desc_t *prov_mech, *prov_mech2;
+ crypto_func_group_t simple_fg_mask, dual_fg_mask;
+ crypto_mech_info_t *dmi;
+ crypto_mech_info_list_t *mil, *mil2;
+ kcf_mech_entry_t *me;
+ int i;
+
+ ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ mech_info = &prov_desc->pd_mechanisms[mech_indx];
+
+ /*
+ * A mechanism belongs to exactly one mechanism table.
+ * Find the class corresponding to the function group flag of
+ * the mechanism.
+ */
+ kcf_mech_type = kcf_mech_hash_find(mech_info->cm_mech_name);
+ if (kcf_mech_type == CRYPTO_MECH_INVALID) {
+ crypto_func_group_t fg = mech_info->cm_func_group_mask;
+ kcf_ops_class_t class;
+
+ if (fg & CRYPTO_FG_DIGEST || fg & CRYPTO_FG_DIGEST_ATOMIC)
+ class = KCF_DIGEST_CLASS;
+ else if (fg & CRYPTO_FG_ENCRYPT || fg & CRYPTO_FG_DECRYPT ||
+ fg & CRYPTO_FG_ENCRYPT_ATOMIC ||
+ fg & CRYPTO_FG_DECRYPT_ATOMIC)
+ class = KCF_CIPHER_CLASS;
+ else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC)
+ class = KCF_MAC_CLASS;
+ else if (fg & CRYPTO_FG_SIGN || fg & CRYPTO_FG_VERIFY ||
+ fg & CRYPTO_FG_SIGN_ATOMIC ||
+ fg & CRYPTO_FG_VERIFY_ATOMIC ||
+ fg & CRYPTO_FG_SIGN_RECOVER ||
+ fg & CRYPTO_FG_VERIFY_RECOVER)
+ class = KCF_SIGN_CLASS;
+ else if (fg & CRYPTO_FG_GENERATE ||
+ fg & CRYPTO_FG_GENERATE_KEY_PAIR ||
+ fg & CRYPTO_FG_WRAP || fg & CRYPTO_FG_UNWRAP ||
+ fg & CRYPTO_FG_DERIVE)
+ class = KCF_KEYOPS_CLASS;
+ else
+ class = KCF_MISC_CLASS;
+
+ /*
+ * Attempt to create a new mech_entry for the specified
+ * mechanism. kcf_create_mech_entry() can handle the case
+ * where such an entry already exists.
+ */
+ if ((error = kcf_create_mech_entry(class,
+ mech_info->cm_mech_name)) != KCF_SUCCESS) {
+ return (error);
+ }
+ /* get the KCF mech type that was assigned to the mechanism */
+ kcf_mech_type = kcf_mech_hash_find(mech_info->cm_mech_name);
+ ASSERT(kcf_mech_type != CRYPTO_MECH_INVALID);
+ }
+
+ error = kcf_get_mech_entry(kcf_mech_type, &mech_entry);
+ ASSERT(error == KCF_SUCCESS);
+
+ /* allocate and initialize new kcf_prov_mech_desc */
+ prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP);
+ bcopy(mech_info, &prov_mech->pm_mech_info, sizeof (crypto_mech_info_t));
+ prov_mech->pm_prov_desc = prov_desc;
+ prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)]
+ [KCF_MECH2INDEX(kcf_mech_type)] = mech_indx;
+
+ KCF_PROV_REFHOLD(prov_desc);
+ KCF_PROV_IREFHOLD(prov_desc);
+
+ dual_fg_mask = mech_info->cm_func_group_mask & CRYPTO_FG_DUAL_MASK;
+
+ if (dual_fg_mask == ((crypto_func_group_t)0))
+ goto add_entry;
+
+ simple_fg_mask = (mech_info->cm_func_group_mask &
+ CRYPTO_FG_SIMPLEOP_MASK) | CRYPTO_FG_RANDOM;
+
+ for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
+ dmi = &prov_desc->pd_mechanisms[i];
+
+ /* skip self */
+ if (dmi->cm_mech_number == mech_info->cm_mech_number)
+ continue;
+
+ /* skip if not a dual operation mechanism */
+ if (!(dmi->cm_func_group_mask & dual_fg_mask) ||
+ (dmi->cm_func_group_mask & simple_fg_mask))
+ continue;
+
+ mt = kcf_mech_hash_find(dmi->cm_mech_name);
+ if (mt == CRYPTO_MECH_INVALID)
+ continue;
+
+ if (kcf_get_mech_entry(mt, &me) != KCF_SUCCESS)
+ continue;
+
+ mil = kmem_zalloc(sizeof (*mil), KM_SLEEP);
+ mil2 = kmem_zalloc(sizeof (*mil2), KM_SLEEP);
+
+ /*
+ * Ignore hard-coded entries in the mech table
+ * if the provider hasn't registered.
+ */
+ mutex_enter(&me->me_mutex);
+ if (me->me_hw_prov_chain == NULL && me->me_sw_prov == NULL) {
+ mutex_exit(&me->me_mutex);
+ kmem_free(mil, sizeof (*mil));
+ kmem_free(mil2, sizeof (*mil2));
+ continue;
+ }
+
+ /*
+ * Add other dual mechanisms that have registered
+ * with the framework to this mechanism's
+ * cross-reference list.
+ */
+ mil->ml_mech_info = *dmi; /* struct assignment */
+ mil->ml_kcf_mechid = mt;
+
+ /* add to head of list */
+ mil->ml_next = prov_mech->pm_mi_list;
+ prov_mech->pm_mi_list = mil;
+
+ if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
+ prov_mech2 = me->me_hw_prov_chain;
+ else
+ prov_mech2 = me->me_sw_prov;
+
+ if (prov_mech2 == NULL) {
+ kmem_free(mil2, sizeof (*mil2));
+ mutex_exit(&me->me_mutex);
+ continue;
+ }
+
+ /*
+ * Update all other cross-reference lists by
+ * adding this new mechanism.
+ */
+ while (prov_mech2 != NULL) {
+ if (prov_mech2->pm_prov_desc == prov_desc) {
+ /* struct assignment */
+ mil2->ml_mech_info = *mech_info;
+ mil2->ml_kcf_mechid = kcf_mech_type;
+
+ /* add to head of list */
+ mil2->ml_next = prov_mech2->pm_mi_list;
+ prov_mech2->pm_mi_list = mil2;
+ break;
+ }
+ prov_mech2 = prov_mech2->pm_next;
+ }
+ if (prov_mech2 == NULL)
+ kmem_free(mil2, sizeof (*mil2));
+
+ mutex_exit(&me->me_mutex);
+ }
+
+add_entry:
+ /*
+ * Add new kcf_prov_mech_desc at the front of HW providers
+ * chain.
+ */
+ switch (prov_desc->pd_prov_type) {
+
+ case CRYPTO_HW_PROVIDER:
+ mutex_enter(&mech_entry->me_mutex);
+ prov_mech->pm_me = mech_entry;
+ prov_mech->pm_next = mech_entry->me_hw_prov_chain;
+ mech_entry->me_hw_prov_chain = prov_mech;
+ mech_entry->me_num_hwprov++;
+ mutex_exit(&mech_entry->me_mutex);
+ break;
+
+ case CRYPTO_SW_PROVIDER:
+ mutex_enter(&mech_entry->me_mutex);
+ if (mech_entry->me_sw_prov != NULL) {
+ /*
+ * There is already a SW provider for this mechanism.
+ * Since we allow only one SW provider per mechanism,
+ * report this condition.
+ */
+ cmn_err(CE_WARN, "The cryptographic software provider "
+ "\"%s\" will not be used for %s. The provider "
+ "\"%s\" will be used for this mechanism "
+ "instead.", prov_desc->pd_description,
+ mech_info->cm_mech_name,
+ mech_entry->me_sw_prov->pm_prov_desc->
+ pd_description);
+ KCF_PROV_REFRELE(prov_desc);
+ kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
+ prov_mech = NULL;
+ } else {
+ /*
+ * Set the provider as the software provider for
+ * this mechanism.
+ */
+ mech_entry->me_sw_prov = prov_mech;
+
+ /* We'll wrap around after 4 billion registrations! */
+ mech_entry->me_gen_swprov = kcf_gen_swprov++;
+ }
+ mutex_exit(&mech_entry->me_mutex);
+ break;
+ default:
+ break;
+ }
+
+ *pmdpp = prov_mech;
+
+ return (KCF_SUCCESS);
+}
+
+/*
+ * kcf_remove_mech_provider()
+ *
+ * Arguments:
+ * . mech_name: the name of the mechanism.
+ * . prov_desc: The provider descriptor
+ *
+ * Description:
+ * Removes a provider from chain of provider descriptors.
+ * The provider is made unavailable to kernel consumers for the specified
+ * mechanism.
+ *
+ * Context:
+ * User context only.
+ */
+void
+kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
+{
+ crypto_mech_type_t mech_type;
+ kcf_prov_mech_desc_t *prov_mech = NULL, *prov_chain;
+ kcf_prov_mech_desc_t **prev_entry_next;
+ kcf_mech_entry_t *mech_entry;
+ crypto_mech_info_list_t *mil, *mil2, *next, **prev_next;
+
+ ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
+
+ /* get the KCF mech type that was assigned to the mechanism */
+ if ((mech_type = kcf_mech_hash_find(mech_name)) ==
+ CRYPTO_MECH_INVALID) {
+ /*
+ * Provider was not allowed for this mech due to policy or
+ * configuration.
+ */
+ return;
+ }
+
+ /* get a ptr to the mech_entry that was created */
+ if (kcf_get_mech_entry(mech_type, &mech_entry) != KCF_SUCCESS) {
+ /*
+ * Provider was not allowed for this mech due to policy or
+ * configuration.
+ */
+ return;
+ }
+
+ mutex_enter(&mech_entry->me_mutex);
+
+ switch (prov_desc->pd_prov_type) {
+
+ case CRYPTO_HW_PROVIDER:
+ /* find the provider in the mech_entry chain */
+ prev_entry_next = &mech_entry->me_hw_prov_chain;
+ prov_mech = mech_entry->me_hw_prov_chain;
+ while (prov_mech != NULL &&
+ prov_mech->pm_prov_desc != prov_desc) {
+ prev_entry_next = &prov_mech->pm_next;
+ prov_mech = prov_mech->pm_next;
+ }
+
+ if (prov_mech == NULL) {
+ /* entry not found, simply return */
+ mutex_exit(&mech_entry->me_mutex);
+ return;
+ }
+
+ /* remove provider entry from mech_entry chain */
+ *prev_entry_next = prov_mech->pm_next;
+ ASSERT(mech_entry->me_num_hwprov > 0);
+ mech_entry->me_num_hwprov--;
+ break;
+
+ case CRYPTO_SW_PROVIDER:
+ if (mech_entry->me_sw_prov == NULL ||
+ mech_entry->me_sw_prov->pm_prov_desc != prov_desc) {
+ /* not the software provider for this mechanism */
+ mutex_exit(&mech_entry->me_mutex);
+ return;
+ }
+ prov_mech = mech_entry->me_sw_prov;
+ mech_entry->me_sw_prov = NULL;
+ break;
+ default:
+ break;
+ }
+
+ mutex_exit(&mech_entry->me_mutex);
+
+ /* Free the dual ops cross-reference lists */
+ mil = prov_mech->pm_mi_list;
+ while (mil != NULL) {
+ next = mil->ml_next;
+ if (kcf_get_mech_entry(mil->ml_kcf_mechid,
+ &mech_entry) != KCF_SUCCESS) {
+ mil = next;
+ continue;
+ }
+
+ mutex_enter(&mech_entry->me_mutex);
+ if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
+ prov_chain = mech_entry->me_hw_prov_chain;
+ else
+ prov_chain = mech_entry->me_sw_prov;
+
+ while (prov_chain != NULL) {
+ if (prov_chain->pm_prov_desc == prov_desc) {
+ prev_next = &prov_chain->pm_mi_list;
+ mil2 = prov_chain->pm_mi_list;
+ while (mil2 != NULL &&
+ mil2->ml_kcf_mechid != mech_type) {
+ prev_next = &mil2->ml_next;
+ mil2 = mil2->ml_next;
+ }
+ if (mil2 != NULL) {
+ *prev_next = mil2->ml_next;
+ kmem_free(mil2, sizeof (*mil2));
+ }
+ break;
+ }
+ prov_chain = prov_chain->pm_next;
+ }
+
+ mutex_exit(&mech_entry->me_mutex);
+ kmem_free(mil, sizeof (crypto_mech_info_list_t));
+ mil = next;
+ }
+
+ /* free entry */
+ KCF_PROV_REFRELE(prov_mech->pm_prov_desc);
+ KCF_PROV_IREFRELE(prov_mech->pm_prov_desc);
+ kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
+}
+
+/*
+ * kcf_get_mech_entry()
+ *
+ * Arguments:
+ * . The framework mechanism type
+ * . Storage for the mechanism entry
+ *
+ * Description:
+ * Retrieves the mechanism entry for the mech.
+ *
+ * Context:
+ * User and interrupt contexts.
+ *
+ * Returns:
+ * KCF_MECHANISM_XXX appropriate error code.
+ * KCF_SUCCESS otherwise.
+ */
+int
+kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep)
+{
+ kcf_ops_class_t class;
+ int index;
+ kcf_mech_entry_tab_t *me_tab;
+
+ ASSERT(mep != NULL);
+
+ class = KCF_MECH2CLASS(mech_type);
+
+ if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
+ /* the caller won't need to know it's an invalid class */
+ return (KCF_INVALID_MECH_NUMBER);
+ }
+
+ me_tab = &kcf_mech_tabs_tab[class];
+ index = KCF_MECH2INDEX(mech_type);
+
+ if ((index < 0) || (index >= me_tab->met_size)) {
+ return (KCF_INVALID_MECH_NUMBER);
+ }
+
+ *mep = &((me_tab->met_tab)[index]);
+
+ return (KCF_SUCCESS);
+}
+
+/* CURRENTLY UNSUPPORTED: attempting to load the module if it isn't found */
+/*
+ * Lookup the hash table for an entry that matches the mechname.
+ * If there are no hardware or software providers for the mechanism,
+ * but there is an unloaded software provider, this routine will attempt
+ * to load it.
+ *
+ * If the MOD_NOAUTOUNLOAD flag is not set, a software provider is
+ * in constant danger of being unloaded. For consumers that call
+ * crypto_mech2id() only once, the provider will not be reloaded
+ * if it becomes unloaded. If a provider gets loaded elsewhere
+ * without the MOD_NOAUTOUNLOAD flag being set, we set it now.
+ */
+crypto_mech_type_t
+crypto_mech2id_common(char *mechname, boolean_t load_module)
+{
+ crypto_mech_type_t mt = kcf_mech_hash_find(mechname);
+ return (mt);
+}
diff --git a/module/icp/core/kcf_prov_lib.c b/module/icp/core/kcf_prov_lib.c
new file mode 100644
index 000000000..dd4cd086d
--- /dev/null
+++ b/module/icp/core/kcf_prov_lib.c
@@ -0,0 +1,229 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+/*
+ * Utility routine to copy a buffer to a crypto_data structure.
+ */
+
+/*
+ * Utility routine to apply the command, 'cmd', to the
+ * data in the uio structure.
+ */
+int
+crypto_uio_data(crypto_data_t *data, uchar_t *buf, int len, cmd_type_t cmd,
+ void *digest_ctx, void (*update)(void))
+{
+ uio_t *uiop = data->cd_uio;
+ off_t offset = data->cd_offset;
+ size_t length = len;
+ uint_t vec_idx;
+ size_t cur_len;
+ uchar_t *datap;
+
+ ASSERT(data->cd_format == CRYPTO_DATA_UIO);
+ if (uiop->uio_segflg != UIO_SYSSPACE) {
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
+ /*
+ * Jump to the first iovec containing data to be
+ * processed.
+ */
+ for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
+ offset >= uiop->uio_iov[vec_idx].iov_len;
+ offset -= uiop->uio_iov[vec_idx++].iov_len)
+ ;
+
+ if (vec_idx == uiop->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than
+ * the total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ while (vec_idx < uiop->uio_iovcnt && length > 0) {
+ cur_len = MIN(uiop->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ datap = (uchar_t *)(uiop->uio_iov[vec_idx].iov_base +
+ offset);
+ switch (cmd) {
+ case COPY_FROM_DATA:
+ bcopy(datap, buf, cur_len);
+ buf += cur_len;
+ break;
+ case COPY_TO_DATA:
+ bcopy(buf, datap, cur_len);
+ buf += cur_len;
+ break;
+ case COMPARE_TO_DATA:
+ if (bcmp(datap, buf, cur_len))
+ return (CRYPTO_SIGNATURE_INVALID);
+ buf += cur_len;
+ break;
+ case MD5_DIGEST_DATA:
+ case SHA1_DIGEST_DATA:
+ case SHA2_DIGEST_DATA:
+ case GHASH_DATA:
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
+ length -= cur_len;
+ vec_idx++;
+ offset = 0;
+ }
+
+ if (vec_idx == uiop->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed.
+ */
+ switch (cmd) {
+ case COPY_TO_DATA:
+ data->cd_length = len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ default:
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+int
+crypto_put_output_data(uchar_t *buf, crypto_data_t *output, int len)
+{
+ switch (output->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (output->cd_raw.iov_len < len) {
+ output->cd_length = len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+ bcopy(buf, (uchar_t *)(output->cd_raw.iov_base +
+ output->cd_offset), len);
+ break;
+
+ case CRYPTO_DATA_UIO:
+ return (crypto_uio_data(output, buf, len,
+ COPY_TO_DATA, NULL, NULL));
+ default:
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+int
+crypto_update_iov(void *ctx, crypto_data_t *input, crypto_data_t *output,
+ int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
+ void (*copy_block)(uint8_t *, uint64_t *))
+{
+ common_ctx_t *common_ctx = ctx;
+ int rv;
+
+ if (input->cd_miscdata != NULL) {
+ copy_block((uint8_t *)input->cd_miscdata,
+ &common_ctx->cc_iv[0]);
+ }
+
+ if (input->cd_raw.iov_len < input->cd_length)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ rv = (cipher)(ctx, input->cd_raw.iov_base + input->cd_offset,
+ input->cd_length, (input == output) ? NULL : output);
+
+ return (rv);
+}
+
+int
+crypto_update_uio(void *ctx, crypto_data_t *input, crypto_data_t *output,
+ int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
+ void (*copy_block)(uint8_t *, uint64_t *))
+{
+ common_ctx_t *common_ctx = ctx;
+ uio_t *uiop = input->cd_uio;
+ off_t offset = input->cd_offset;
+ size_t length = input->cd_length;
+ uint_t vec_idx;
+ size_t cur_len;
+
+ if (input->cd_miscdata != NULL) {
+ copy_block((uint8_t *)input->cd_miscdata,
+ &common_ctx->cc_iv[0]);
+ }
+
+ if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
+ /*
+ * Jump to the first iovec containing data to be
+ * processed.
+ */
+ for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
+ offset >= uiop->uio_iov[vec_idx].iov_len;
+ offset -= uiop->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == uiop->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than the
+ * total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now process the iovecs.
+ */
+ while (vec_idx < uiop->uio_iovcnt && length > 0) {
+ cur_len = MIN(uiop->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ (cipher)(ctx, uiop->uio_iov[vec_idx].iov_base + offset,
+ cur_len, (input == output) ? NULL : output);
+
+ length -= cur_len;
+ vec_idx++;
+ offset = 0;
+ }
+
+ if (vec_idx == uiop->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed, i.e.
+ * The caller requested to digest more data than it provided.
+ */
+
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
diff --git a/module/icp/core/kcf_prov_tabs.c b/module/icp/core/kcf_prov_tabs.c
new file mode 100644
index 000000000..dca0fc103
--- /dev/null
+++ b/module/icp/core/kcf_prov_tabs.c
@@ -0,0 +1,638 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file is part of the core Kernel Cryptographic Framework.
+ * It implements the management of tables of Providers. Entries to
+ * added and removed when cryptographic providers register with
+ * and unregister from the framework, respectively. The KCF scheduler
+ * and ioctl pseudo driver call this function to obtain the list
+ * of available providers.
+ *
+ * The provider table is indexed by crypto_provider_id_t. Each
+ * element of the table contains a pointer to a provider descriptor,
+ * or NULL if the entry is free.
+ *
+ * This file also implements helper functions to allocate and free
+ * provider descriptors.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/sched_impl.h>
+#include <sys/crypto/spi.h>
+
+#define KCF_MAX_PROVIDERS 512 /* max number of providers */
+
+/*
+ * Prov_tab is an array of providers which is updated when
+ * a crypto provider registers with kcf. The provider calls the
+ * SPI routine, crypto_register_provider(), which in turn calls
+ * kcf_prov_tab_add_provider().
+ *
+ * A provider unregisters by calling crypto_unregister_provider()
+ * which triggers the removal of the prov_tab entry.
+ * It also calls kcf_remove_mech_provider().
+ *
+ * prov_tab entries are not updated from kcf.conf or by cryptoadm(1M).
+ */
+static kcf_provider_desc_t **prov_tab = NULL;
+static kmutex_t prov_tab_mutex; /* ensure exclusive access to the table */
+static uint_t prov_tab_num = 0; /* number of providers in table */
+static uint_t prov_tab_max = KCF_MAX_PROVIDERS;
+
+void
+kcf_prov_tab_destroy(void)
+{
+ if (prov_tab) kmem_free(prov_tab, prov_tab_max *
+ sizeof (kcf_provider_desc_t *));
+}
+
+/*
+ * Initialize a mutex and the KCF providers table, prov_tab.
+ * The providers table is dynamically allocated with prov_tab_max entries.
+ * Called from kcf module _init().
+ */
+void
+kcf_prov_tab_init(void)
+{
+ mutex_init(&prov_tab_mutex, NULL, MUTEX_DEFAULT, NULL);
+
+ prov_tab = kmem_zalloc(prov_tab_max * sizeof (kcf_provider_desc_t *),
+ KM_SLEEP);
+}
+
+/*
+ * Add a provider to the provider table. If no free entry can be found
+ * for the new provider, returns CRYPTO_HOST_MEMORY. Otherwise, add
+ * the provider to the table, initialize the pd_prov_id field
+ * of the specified provider descriptor to the index in that table,
+ * and return CRYPTO_SUCCESS. Note that a REFHOLD is done on the
+ * provider when pointed to by a table entry.
+ */
+int
+kcf_prov_tab_add_provider(kcf_provider_desc_t *prov_desc)
+{
+ uint_t i;
+
+ ASSERT(prov_tab != NULL);
+
+ mutex_enter(&prov_tab_mutex);
+
+ /* find free slot in providers table */
+ for (i = 1; i < KCF_MAX_PROVIDERS && prov_tab[i] != NULL; i++)
+ ;
+ if (i == KCF_MAX_PROVIDERS) {
+ /* ran out of providers entries */
+ mutex_exit(&prov_tab_mutex);
+ cmn_err(CE_WARN, "out of providers entries");
+ return (CRYPTO_HOST_MEMORY);
+ }
+
+ /* initialize entry */
+ prov_tab[i] = prov_desc;
+ KCF_PROV_REFHOLD(prov_desc);
+ KCF_PROV_IREFHOLD(prov_desc);
+ prov_tab_num++;
+
+ mutex_exit(&prov_tab_mutex);
+
+ /* update provider descriptor */
+ prov_desc->pd_prov_id = i;
+
+ /*
+ * The KCF-private provider handle is defined as the internal
+ * provider id.
+ */
+ prov_desc->pd_kcf_prov_handle =
+ (crypto_kcf_provider_handle_t)prov_desc->pd_prov_id;
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Remove the provider specified by its id. A REFRELE is done on the
+ * corresponding provider descriptor before this function returns.
+ * Returns CRYPTO_UNKNOWN_PROVIDER if the provider id is not valid.
+ */
+int
+kcf_prov_tab_rem_provider(crypto_provider_id_t prov_id)
+{
+ kcf_provider_desc_t *prov_desc;
+
+ ASSERT(prov_tab != NULL);
+ ASSERT(prov_tab_num >= 0);
+
+ /*
+ * Validate provider id, since it can be specified by a 3rd-party
+ * provider.
+ */
+
+ mutex_enter(&prov_tab_mutex);
+ if (prov_id >= KCF_MAX_PROVIDERS ||
+ ((prov_desc = prov_tab[prov_id]) == NULL)) {
+ mutex_exit(&prov_tab_mutex);
+ return (CRYPTO_INVALID_PROVIDER_ID);
+ }
+ mutex_exit(&prov_tab_mutex);
+
+ /*
+ * The provider id must remain valid until the associated provider
+ * descriptor is freed. For this reason, we simply release our
+ * reference to the descriptor here. When the reference count
+ * reaches zero, kcf_free_provider_desc() will be invoked and
+ * the associated entry in the providers table will be released
+ * at that time.
+ */
+
+ KCF_PROV_REFRELE(prov_desc);
+ KCF_PROV_IREFRELE(prov_desc);
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Returns the provider descriptor corresponding to the specified
+ * provider id. A REFHOLD is done on the descriptor before it is
+ * returned to the caller. It is the responsibility of the caller
+ * to do a REFRELE once it is done with the provider descriptor.
+ */
+kcf_provider_desc_t *
+kcf_prov_tab_lookup(crypto_provider_id_t prov_id)
+{
+ kcf_provider_desc_t *prov_desc;
+
+ mutex_enter(&prov_tab_mutex);
+
+ prov_desc = prov_tab[prov_id];
+
+ if (prov_desc == NULL) {
+ mutex_exit(&prov_tab_mutex);
+ return (NULL);
+ }
+
+ KCF_PROV_REFHOLD(prov_desc);
+
+ mutex_exit(&prov_tab_mutex);
+
+ return (prov_desc);
+}
+
+static void
+allocate_ops_v1(crypto_ops_t *src, crypto_ops_t *dst, uint_t *mech_list_count)
+{
+ if (src->co_control_ops != NULL)
+ dst->co_control_ops = kmem_alloc(sizeof (crypto_control_ops_t),
+ KM_SLEEP);
+
+ if (src->co_digest_ops != NULL)
+ dst->co_digest_ops = kmem_alloc(sizeof (crypto_digest_ops_t),
+ KM_SLEEP);
+
+ if (src->co_cipher_ops != NULL)
+ dst->co_cipher_ops = kmem_alloc(sizeof (crypto_cipher_ops_t),
+ KM_SLEEP);
+
+ if (src->co_mac_ops != NULL)
+ dst->co_mac_ops = kmem_alloc(sizeof (crypto_mac_ops_t),
+ KM_SLEEP);
+
+ if (src->co_sign_ops != NULL)
+ dst->co_sign_ops = kmem_alloc(sizeof (crypto_sign_ops_t),
+ KM_SLEEP);
+
+ if (src->co_verify_ops != NULL)
+ dst->co_verify_ops = kmem_alloc(sizeof (crypto_verify_ops_t),
+ KM_SLEEP);
+
+ if (src->co_dual_ops != NULL)
+ dst->co_dual_ops = kmem_alloc(sizeof (crypto_dual_ops_t),
+ KM_SLEEP);
+
+ if (src->co_dual_cipher_mac_ops != NULL)
+ dst->co_dual_cipher_mac_ops = kmem_alloc(
+ sizeof (crypto_dual_cipher_mac_ops_t), KM_SLEEP);
+
+ if (src->co_random_ops != NULL) {
+ dst->co_random_ops = kmem_alloc(
+ sizeof (crypto_random_number_ops_t), KM_SLEEP);
+
+ /*
+ * Allocate storage to store the array of supported mechanisms
+ * specified by provider. We allocate extra mechanism storage
+ * if the provider has random_ops since we keep an internal
+ * mechanism, SUN_RANDOM, in this case.
+ */
+ (*mech_list_count)++;
+ }
+
+ if (src->co_session_ops != NULL)
+ dst->co_session_ops = kmem_alloc(sizeof (crypto_session_ops_t),
+ KM_SLEEP);
+
+ if (src->co_object_ops != NULL)
+ dst->co_object_ops = kmem_alloc(sizeof (crypto_object_ops_t),
+ KM_SLEEP);
+
+ if (src->co_key_ops != NULL)
+ dst->co_key_ops = kmem_alloc(sizeof (crypto_key_ops_t),
+ KM_SLEEP);
+
+ if (src->co_provider_ops != NULL)
+ dst->co_provider_ops = kmem_alloc(
+ sizeof (crypto_provider_management_ops_t), KM_SLEEP);
+
+ if (src->co_ctx_ops != NULL)
+ dst->co_ctx_ops = kmem_alloc(sizeof (crypto_ctx_ops_t),
+ KM_SLEEP);
+}
+
+static void
+allocate_ops_v2(crypto_ops_t *src, crypto_ops_t *dst)
+{
+ if (src->co_mech_ops != NULL)
+ dst->co_mech_ops = kmem_alloc(sizeof (crypto_mech_ops_t),
+ KM_SLEEP);
+}
+
+static void
+allocate_ops_v3(crypto_ops_t *src, crypto_ops_t *dst)
+{
+ if (src->co_nostore_key_ops != NULL)
+ dst->co_nostore_key_ops =
+ kmem_alloc(sizeof (crypto_nostore_key_ops_t), KM_SLEEP);
+}
+
+/*
+ * Allocate a provider descriptor. mech_list_count specifies the
+ * number of mechanisms supported by the providers, and is used
+ * to allocate storage for the mechanism table.
+ * This function may sleep while allocating memory, which is OK
+ * since it is invoked from user context during provider registration.
+ */
+kcf_provider_desc_t *
+kcf_alloc_provider_desc(crypto_provider_info_t *info)
+{
+ int i, j;
+ kcf_provider_desc_t *desc;
+ uint_t mech_list_count = info->pi_mech_list_count;
+ crypto_ops_t *src_ops = info->pi_ops_vector;
+
+ desc = kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP);
+
+ /*
+ * pd_description serves two purposes
+ * - Appears as a blank padded PKCS#11 style string, that will be
+ * returned to applications in CK_SLOT_INFO.slotDescription.
+ * This means that we should not have a null character in the
+ * first CRYPTO_PROVIDER_DESCR_MAX_LEN bytes.
+ * - Appears as a null-terminated string that can be used by
+ * other kcf routines.
+ *
+ * So, we allocate enough room for one extra null terminator
+ * which keeps every one happy.
+ */
+ desc->pd_description = kmem_alloc(CRYPTO_PROVIDER_DESCR_MAX_LEN + 1,
+ KM_SLEEP);
+ (void) memset(desc->pd_description, ' ',
+ CRYPTO_PROVIDER_DESCR_MAX_LEN);
+ desc->pd_description[CRYPTO_PROVIDER_DESCR_MAX_LEN] = '\0';
+
+ /*
+ * Since the framework does not require the ops vector specified
+ * by the providers during registration to be persistent,
+ * KCF needs to allocate storage where copies of the ops
+ * vectors are copied.
+ */
+ desc->pd_ops_vector = kmem_zalloc(sizeof (crypto_ops_t), KM_SLEEP);
+
+ if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
+ allocate_ops_v1(src_ops, desc->pd_ops_vector, &mech_list_count);
+ if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2)
+ allocate_ops_v2(src_ops, desc->pd_ops_vector);
+ if (info->pi_interface_version == CRYPTO_SPI_VERSION_3)
+ allocate_ops_v3(src_ops, desc->pd_ops_vector);
+ }
+
+ desc->pd_mech_list_count = mech_list_count;
+ desc->pd_mechanisms = kmem_zalloc(sizeof (crypto_mech_info_t) *
+ mech_list_count, KM_SLEEP);
+ for (i = 0; i < KCF_OPS_CLASSSIZE; i++)
+ for (j = 0; j < KCF_MAXMECHTAB; j++)
+ desc->pd_mech_indx[i][j] = KCF_INVALID_INDX;
+
+ desc->pd_prov_id = KCF_PROVID_INVALID;
+ desc->pd_state = KCF_PROV_ALLOCATED;
+
+ mutex_init(&desc->pd_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&desc->pd_resume_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&desc->pd_remove_cv, NULL, CV_DEFAULT, NULL);
+
+ return (desc);
+}
+
+/*
+ * Called by KCF_PROV_REFRELE when a provider's reference count drops
+ * to zero. We free the descriptor when the last reference is released.
+ * However, for software providers, we do not free it when there is an
+ * unregister thread waiting. We signal that thread in this case and
+ * that thread is responsible for freeing the descriptor.
+ */
+void
+kcf_provider_zero_refcnt(kcf_provider_desc_t *desc)
+{
+ mutex_enter(&desc->pd_lock);
+ switch (desc->pd_prov_type) {
+ case CRYPTO_SW_PROVIDER:
+ if (desc->pd_state == KCF_PROV_REMOVED ||
+ desc->pd_state == KCF_PROV_DISABLED) {
+ desc->pd_state = KCF_PROV_FREED;
+ cv_broadcast(&desc->pd_remove_cv);
+ mutex_exit(&desc->pd_lock);
+ break;
+ }
+ /* FALLTHRU */
+
+ case CRYPTO_HW_PROVIDER:
+ case CRYPTO_LOGICAL_PROVIDER:
+ mutex_exit(&desc->pd_lock);
+ kcf_free_provider_desc(desc);
+ }
+}
+
+/*
+ * Free a provider descriptor.
+ */
+void
+kcf_free_provider_desc(kcf_provider_desc_t *desc)
+{
+ if (desc == NULL)
+ return;
+
+ mutex_enter(&prov_tab_mutex);
+ if (desc->pd_prov_id != KCF_PROVID_INVALID) {
+ /* release the associated providers table entry */
+ ASSERT(prov_tab[desc->pd_prov_id] != NULL);
+ prov_tab[desc->pd_prov_id] = NULL;
+ prov_tab_num--;
+ }
+ mutex_exit(&prov_tab_mutex);
+
+ /* free the kernel memory associated with the provider descriptor */
+
+ if (desc->pd_description != NULL)
+ kmem_free(desc->pd_description,
+ CRYPTO_PROVIDER_DESCR_MAX_LEN + 1);
+
+ if (desc->pd_ops_vector != NULL) {
+
+ if (desc->pd_ops_vector->co_control_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_control_ops,
+ sizeof (crypto_control_ops_t));
+
+ if (desc->pd_ops_vector->co_digest_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_digest_ops,
+ sizeof (crypto_digest_ops_t));
+
+ if (desc->pd_ops_vector->co_cipher_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_cipher_ops,
+ sizeof (crypto_cipher_ops_t));
+
+ if (desc->pd_ops_vector->co_mac_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_mac_ops,
+ sizeof (crypto_mac_ops_t));
+
+ if (desc->pd_ops_vector->co_sign_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_sign_ops,
+ sizeof (crypto_sign_ops_t));
+
+ if (desc->pd_ops_vector->co_verify_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_verify_ops,
+ sizeof (crypto_verify_ops_t));
+
+ if (desc->pd_ops_vector->co_dual_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_dual_ops,
+ sizeof (crypto_dual_ops_t));
+
+ if (desc->pd_ops_vector->co_dual_cipher_mac_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_dual_cipher_mac_ops,
+ sizeof (crypto_dual_cipher_mac_ops_t));
+
+ if (desc->pd_ops_vector->co_random_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_random_ops,
+ sizeof (crypto_random_number_ops_t));
+
+ if (desc->pd_ops_vector->co_session_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_session_ops,
+ sizeof (crypto_session_ops_t));
+
+ if (desc->pd_ops_vector->co_object_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_object_ops,
+ sizeof (crypto_object_ops_t));
+
+ if (desc->pd_ops_vector->co_key_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_key_ops,
+ sizeof (crypto_key_ops_t));
+
+ if (desc->pd_ops_vector->co_provider_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_provider_ops,
+ sizeof (crypto_provider_management_ops_t));
+
+ if (desc->pd_ops_vector->co_ctx_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_ctx_ops,
+ sizeof (crypto_ctx_ops_t));
+
+ if (desc->pd_ops_vector->co_mech_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_mech_ops,
+ sizeof (crypto_mech_ops_t));
+
+ if (desc->pd_ops_vector->co_nostore_key_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_nostore_key_ops,
+ sizeof (crypto_nostore_key_ops_t));
+
+ kmem_free(desc->pd_ops_vector, sizeof (crypto_ops_t));
+ }
+
+ if (desc->pd_mechanisms != NULL)
+ /* free the memory associated with the mechanism info's */
+ kmem_free(desc->pd_mechanisms, sizeof (crypto_mech_info_t) *
+ desc->pd_mech_list_count);
+
+ if (desc->pd_sched_info.ks_taskq != NULL)
+ taskq_destroy(desc->pd_sched_info.ks_taskq);
+
+ kmem_free(desc, sizeof (kcf_provider_desc_t));
+}
+
+/*
+ * Returns an array of hardware and logical provider descriptors,
+ * a.k.a the PKCS#11 slot list. A REFHOLD is done on each descriptor
+ * before the array is returned. The entire table can be freed by
+ * calling kcf_free_provider_tab().
+ */
+int
+kcf_get_slot_list(uint_t *count, kcf_provider_desc_t ***array,
+ boolean_t unverified)
+{
+ kcf_provider_desc_t *prov_desc;
+ kcf_provider_desc_t **p = NULL;
+ char *last;
+ uint_t cnt = 0;
+ uint_t i, j;
+ int rval = CRYPTO_SUCCESS;
+ size_t n, final_size;
+
+ /* count the providers */
+ mutex_enter(&prov_tab_mutex);
+ for (i = 0; i < KCF_MAX_PROVIDERS; i++) {
+ if ((prov_desc = prov_tab[i]) != NULL &&
+ ((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ (prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
+ prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
+ if (KCF_IS_PROV_USABLE(prov_desc) ||
+ (unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
+ cnt++;
+ }
+ }
+ }
+ mutex_exit(&prov_tab_mutex);
+
+ if (cnt == 0)
+ goto out;
+
+ n = cnt * sizeof (kcf_provider_desc_t *);
+again:
+ p = kmem_zalloc(n, KM_SLEEP);
+
+ /* pointer to last entry in the array */
+ last = (char *)&p[cnt-1];
+
+ mutex_enter(&prov_tab_mutex);
+ /* fill the slot list */
+ for (i = 0, j = 0; i < KCF_MAX_PROVIDERS; i++) {
+ if ((prov_desc = prov_tab[i]) != NULL &&
+ ((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ (prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
+ prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
+ if (KCF_IS_PROV_USABLE(prov_desc) ||
+ (unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
+ if ((char *)&p[j] > last) {
+ mutex_exit(&prov_tab_mutex);
+ kcf_free_provider_tab(cnt, p);
+ n = n << 1;
+ cnt = cnt << 1;
+ goto again;
+ }
+ p[j++] = prov_desc;
+ KCF_PROV_REFHOLD(prov_desc);
+ }
+ }
+ }
+ mutex_exit(&prov_tab_mutex);
+
+ final_size = j * sizeof (kcf_provider_desc_t *);
+ cnt = j;
+ ASSERT(final_size <= n);
+
+ /* check if buffer we allocated is too large */
+ if (final_size < n) {
+ char *final_buffer = NULL;
+
+ if (final_size > 0) {
+ final_buffer = kmem_alloc(final_size, KM_SLEEP);
+ bcopy(p, final_buffer, final_size);
+ }
+ kmem_free(p, n);
+ p = (kcf_provider_desc_t **)final_buffer;
+ }
+out:
+ *count = cnt;
+ *array = p;
+ return (rval);
+}
+
+/*
+ * Free an array of hardware provider descriptors. A REFRELE
+ * is done on each descriptor before the table is freed.
+ */
+void
+kcf_free_provider_tab(uint_t count, kcf_provider_desc_t **array)
+{
+ kcf_provider_desc_t *prov_desc;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if ((prov_desc = array[i]) != NULL) {
+ KCF_PROV_REFRELE(prov_desc);
+ }
+ }
+ kmem_free(array, count * sizeof (kcf_provider_desc_t *));
+}
+
+/*
+ * Returns in the location pointed to by pd a pointer to the descriptor
+ * for the software provider for the specified mechanism.
+ * The provider descriptor is returned held and it is the caller's
+ * responsibility to release it when done. The mechanism entry
+ * is returned if the optional argument mep is non NULL.
+ *
+ * Returns one of the CRYPTO_ * error codes on failure, and
+ * CRYPTO_SUCCESS on success.
+ */
+int
+kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd,
+ kcf_mech_entry_t **mep, boolean_t log_warn)
+{
+ kcf_mech_entry_t *me;
+
+ /* get the mechanism entry for this mechanism */
+ if (kcf_get_mech_entry(mech_type, &me) != KCF_SUCCESS)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /*
+ * Get the software provider for this mechanism.
+ * Lock the mech_entry until we grab the 'pd'.
+ */
+ mutex_enter(&me->me_mutex);
+
+ if (me->me_sw_prov == NULL ||
+ (*pd = me->me_sw_prov->pm_prov_desc) == NULL) {
+ /* no SW provider for this mechanism */
+ if (log_warn)
+ cmn_err(CE_WARN, "no SW provider for \"%s\"\n",
+ me->me_name);
+ mutex_exit(&me->me_mutex);
+ return (CRYPTO_MECH_NOT_SUPPORTED);
+ }
+
+ KCF_PROV_REFHOLD(*pd);
+ mutex_exit(&me->me_mutex);
+
+ if (mep != NULL)
+ *mep = me;
+
+ return (CRYPTO_SUCCESS);
+}
diff --git a/module/icp/core/kcf_sched.c b/module/icp/core/kcf_sched.c
new file mode 100644
index 000000000..8102d6675
--- /dev/null
+++ b/module/icp/core/kcf_sched.c
@@ -0,0 +1,1763 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file contains the core framework routines for the
+ * kernel cryptographic framework. These routines are at the
+ * layer, between the kernel API/ioctls and the SPI.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/sched_impl.h>
+#include <sys/crypto/api.h>
+
+kcf_global_swq_t *gswq; /* Global software queue */
+
+/* Thread pool related variables */
+static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */
+int kcf_maxthreads = 2;
+int kcf_minthreads = 1;
+int kcf_thr_multiple = 2; /* Boot-time tunable for experimentation */
+static ulong_t kcf_idlethr_timeout;
+#define KCF_DEFAULT_THRTIMEOUT 60000000 /* 60 seconds */
+
+/* kmem caches used by the scheduler */
+static kmem_cache_t *kcf_sreq_cache;
+static kmem_cache_t *kcf_areq_cache;
+static kmem_cache_t *kcf_context_cache;
+
+/* Global request ID table */
+static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES];
+
+/* KCF stats. Not protected. */
+static kcf_stats_t kcf_ksdata = {
+ { "total threads in pool", KSTAT_DATA_UINT32},
+ { "idle threads in pool", KSTAT_DATA_UINT32},
+ { "min threads in pool", KSTAT_DATA_UINT32},
+ { "max threads in pool", KSTAT_DATA_UINT32},
+ { "requests in gswq", KSTAT_DATA_UINT32},
+ { "max requests in gswq", KSTAT_DATA_UINT32},
+ { "threads for HW taskq", KSTAT_DATA_UINT32},
+ { "minalloc for HW taskq", KSTAT_DATA_UINT32},
+ { "maxalloc for HW taskq", KSTAT_DATA_UINT32}
+};
+
+static kstat_t *kcf_misc_kstat = NULL;
+ulong_t kcf_swprov_hndl = 0;
+
+static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *,
+ kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t);
+static int kcf_disp_sw_request(kcf_areq_node_t *);
+static void process_req_hwp(void *);
+static int kcf_enqueue(kcf_areq_node_t *);
+static void kcfpool_alloc(void);
+static void kcf_reqid_delete(kcf_areq_node_t *areq);
+static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq);
+static int kcf_misc_kstat_update(kstat_t *ksp, int rw);
+
+/*
+ * Create a new context.
+ */
+crypto_ctx_t *
+kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
+ crypto_session_id_t sid)
+{
+ crypto_ctx_t *ctx;
+ kcf_context_t *kcf_ctx;
+
+ kcf_ctx = kmem_cache_alloc(kcf_context_cache,
+ (crq == NULL) ? KM_SLEEP : KM_NOSLEEP);
+ if (kcf_ctx == NULL)
+ return (NULL);
+
+ /* initialize the context for the consumer */
+ kcf_ctx->kc_refcnt = 1;
+ kcf_ctx->kc_req_chain_first = NULL;
+ kcf_ctx->kc_req_chain_last = NULL;
+ kcf_ctx->kc_secondctx = NULL;
+ KCF_PROV_REFHOLD(pd);
+ kcf_ctx->kc_prov_desc = pd;
+ kcf_ctx->kc_sw_prov_desc = NULL;
+ kcf_ctx->kc_mech = NULL;
+
+ ctx = &kcf_ctx->kc_glbl_ctx;
+ ctx->cc_provider = pd->pd_prov_handle;
+ ctx->cc_session = sid;
+ ctx->cc_provider_private = NULL;
+ ctx->cc_framework_private = (void *)kcf_ctx;
+ ctx->cc_flags = 0;
+ ctx->cc_opstate = NULL;
+
+ return (ctx);
+}
+
+/*
+ * Allocate a new async request node.
+ *
+ * ictx - Framework private context pointer
+ * crq - Has callback function and argument. Should be non NULL.
+ * req - The parameters to pass to the SPI
+ */
+static kcf_areq_node_t *
+kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
+ crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual)
+{
+ kcf_areq_node_t *arptr, *areq;
+
+ ASSERT(crq != NULL);
+ arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP);
+ if (arptr == NULL)
+ return (NULL);
+
+ arptr->an_state = REQ_ALLOCATED;
+ arptr->an_reqarg = *crq;
+ arptr->an_params = *req;
+ arptr->an_context = ictx;
+ arptr->an_isdual = isdual;
+
+ arptr->an_next = arptr->an_prev = NULL;
+ KCF_PROV_REFHOLD(pd);
+ arptr->an_provider = pd;
+ arptr->an_tried_plist = NULL;
+ arptr->an_refcnt = 1;
+ arptr->an_idnext = arptr->an_idprev = NULL;
+
+ /*
+ * Requests for context-less operations do not use the
+ * fields - an_is_my_turn, and an_ctxchain_next.
+ */
+ if (ictx == NULL)
+ return (arptr);
+
+ KCF_CONTEXT_REFHOLD(ictx);
+ /*
+ * Chain this request to the context.
+ */
+ mutex_enter(&ictx->kc_in_use_lock);
+ arptr->an_ctxchain_next = NULL;
+ if ((areq = ictx->kc_req_chain_last) == NULL) {
+ arptr->an_is_my_turn = B_TRUE;
+ ictx->kc_req_chain_last =
+ ictx->kc_req_chain_first = arptr;
+ } else {
+ ASSERT(ictx->kc_req_chain_first != NULL);
+ arptr->an_is_my_turn = B_FALSE;
+ /* Insert the new request to the end of the chain. */
+ areq->an_ctxchain_next = arptr;
+ ictx->kc_req_chain_last = arptr;
+ }
+ mutex_exit(&ictx->kc_in_use_lock);
+
+ return (arptr);
+}
+
+/*
+ * Queue the request node and do one of the following:
+ * - If there is an idle thread signal it to run.
+ * - If there is no idle thread and max running threads is not
+ * reached, signal the creator thread for more threads.
+ *
+ * If the two conditions above are not met, we don't need to do
+ * any thing. The request will be picked up by one of the
+ * worker threads when it becomes available.
+ */
+static int
+kcf_disp_sw_request(kcf_areq_node_t *areq)
+{
+ int err;
+ int cnt = 0;
+
+ if ((err = kcf_enqueue(areq)) != 0)
+ return (err);
+
+ if (kcfpool->kp_idlethreads > 0) {
+ /* Signal an idle thread to run */
+ mutex_enter(&gswq->gs_lock);
+ cv_signal(&gswq->gs_cv);
+ mutex_exit(&gswq->gs_lock);
+
+ return (CRYPTO_QUEUED);
+ }
+
+ /*
+ * We keep the number of running threads to be at
+ * kcf_minthreads to reduce gs_lock contention.
+ */
+ cnt = kcf_minthreads -
+ (kcfpool->kp_threads - kcfpool->kp_blockedthreads);
+ if (cnt > 0) {
+ /*
+ * The following ensures the number of threads in pool
+ * does not exceed kcf_maxthreads.
+ */
+ cnt = MIN(cnt, kcf_maxthreads - (int)kcfpool->kp_threads);
+ if (cnt > 0) {
+ /* Signal the creator thread for more threads */
+ mutex_enter(&kcfpool->kp_user_lock);
+ if (!kcfpool->kp_signal_create_thread) {
+ kcfpool->kp_signal_create_thread = B_TRUE;
+ kcfpool->kp_nthrs = cnt;
+ cv_signal(&kcfpool->kp_user_cv);
+ }
+ mutex_exit(&kcfpool->kp_user_lock);
+ }
+ }
+
+ return (CRYPTO_QUEUED);
+}
+
+/*
+ * This routine is called by the taskq associated with
+ * each hardware provider. We notify the kernel consumer
+ * via the callback routine in case of CRYPTO_SUCCESS or
+ * a failure.
+ *
+ * A request can be of type kcf_areq_node_t or of type
+ * kcf_sreq_node_t.
+ */
+static void
+process_req_hwp(void *ireq)
+{
+ int error = 0;
+ crypto_ctx_t *ctx;
+ kcf_call_type_t ctype;
+ kcf_provider_desc_t *pd;
+ kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
+ kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;
+
+ pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
+ sreq->sn_provider : areq->an_provider;
+
+ /*
+ * Wait if flow control is in effect for the provider. A
+ * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
+ * notification will signal us. We also get signaled if
+ * the provider is unregistering.
+ */
+ if (pd->pd_state == KCF_PROV_BUSY) {
+ mutex_enter(&pd->pd_lock);
+ while (pd->pd_state == KCF_PROV_BUSY)
+ cv_wait(&pd->pd_resume_cv, &pd->pd_lock);
+ mutex_exit(&pd->pd_lock);
+ }
+
+ /*
+ * Bump the internal reference count while the request is being
+ * processed. This is how we know when it's safe to unregister
+ * a provider. This step must precede the pd_state check below.
+ */
+ KCF_PROV_IREFHOLD(pd);
+
+ /*
+ * Fail the request if the provider has failed. We return a
+ * recoverable error and the notified clients attempt any
+ * recovery. For async clients this is done in kcf_aop_done()
+ * and for sync clients it is done in the k-api routines.
+ */
+ if (pd->pd_state >= KCF_PROV_FAILED) {
+ error = CRYPTO_DEVICE_ERROR;
+ goto bail;
+ }
+
+ if (ctype == CRYPTO_SYNCH) {
+ mutex_enter(&sreq->sn_lock);
+ sreq->sn_state = REQ_INPROGRESS;
+ mutex_exit(&sreq->sn_lock);
+
+ ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
+ error = common_submit_request(sreq->sn_provider, ctx,
+ sreq->sn_params, sreq);
+ } else {
+ kcf_context_t *ictx;
+ ASSERT(ctype == CRYPTO_ASYNCH);
+
+ /*
+ * We are in the per-hardware provider thread context and
+ * hence can sleep. Note that the caller would have done
+ * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
+ */
+ ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL;
+
+ mutex_enter(&areq->an_lock);
+ /*
+ * We need to maintain ordering for multi-part requests.
+ * an_is_my_turn is set to B_TRUE initially for a request
+ * when it is enqueued and there are no other requests
+ * for that context. It is set later from kcf_aop_done() when
+ * the request before us in the chain of requests for the
+ * context completes. We get signaled at that point.
+ */
+ if (ictx != NULL) {
+ ASSERT(ictx->kc_prov_desc == areq->an_provider);
+
+ while (areq->an_is_my_turn == B_FALSE) {
+ cv_wait(&areq->an_turn_cv, &areq->an_lock);
+ }
+ }
+ areq->an_state = REQ_INPROGRESS;
+ mutex_exit(&areq->an_lock);
+
+ error = common_submit_request(areq->an_provider, ctx,
+ &areq->an_params, areq);
+ }
+
+bail:
+ if (error == CRYPTO_QUEUED) {
+ /*
+ * The request is queued by the provider and we should
+ * get a crypto_op_notification() from the provider later.
+ * We notify the consumer at that time.
+ */
+ return;
+ } else { /* CRYPTO_SUCCESS or other failure */
+ KCF_PROV_IREFRELE(pd);
+ if (ctype == CRYPTO_SYNCH)
+ kcf_sop_done(sreq, error);
+ else
+ kcf_aop_done(areq, error);
+ }
+}
+
+/*
+ * This routine checks if a request can be retried on another
+ * provider. If true, mech1 is initialized to point to the mechanism
+ * structure. mech2 is also initialized in case of a dual operation. fg
+ * is initialized to the correct crypto_func_group_t bit flag. They are
+ * initialized by this routine, so that the caller can pass them to a
+ * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change.
+ *
+ * We check that the request is for a init or atomic routine and that
+ * it is for one of the operation groups used from k-api .
+ */
+static boolean_t
+can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
+ crypto_mechanism_t **mech2, crypto_func_group_t *fg)
+{
+ kcf_req_params_t *params;
+ kcf_op_type_t optype;
+
+ params = &areq->an_params;
+ optype = params->rp_optype;
+
+ if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype)))
+ return (B_FALSE);
+
+ switch (params->rp_opgrp) {
+ case KCF_OG_DIGEST: {
+ kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
+
+ dops->do_mech.cm_type = dops->do_framework_mechtype;
+ *mech1 = &dops->do_mech;
+ *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST :
+ CRYPTO_FG_DIGEST_ATOMIC;
+ break;
+ }
+
+ case KCF_OG_MAC: {
+ kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
+
+ mops->mo_mech.cm_type = mops->mo_framework_mechtype;
+ *mech1 = &mops->mo_mech;
+ *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC :
+ CRYPTO_FG_MAC_ATOMIC;
+ break;
+ }
+
+ case KCF_OG_SIGN: {
+ kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
+
+ sops->so_mech.cm_type = sops->so_framework_mechtype;
+ *mech1 = &sops->so_mech;
+ switch (optype) {
+ case KCF_OP_INIT:
+ *fg = CRYPTO_FG_SIGN;
+ break;
+ case KCF_OP_ATOMIC:
+ *fg = CRYPTO_FG_SIGN_ATOMIC;
+ break;
+ default:
+ ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC);
+ *fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC;
+ }
+ break;
+ }
+
+ case KCF_OG_VERIFY: {
+ kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
+
+ vops->vo_mech.cm_type = vops->vo_framework_mechtype;
+ *mech1 = &vops->vo_mech;
+ switch (optype) {
+ case KCF_OP_INIT:
+ *fg = CRYPTO_FG_VERIFY;
+ break;
+ case KCF_OP_ATOMIC:
+ *fg = CRYPTO_FG_VERIFY_ATOMIC;
+ break;
+ default:
+ ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC);
+ *fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC;
+ }
+ break;
+ }
+
+ case KCF_OG_ENCRYPT: {
+ kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
+
+ eops->eo_mech.cm_type = eops->eo_framework_mechtype;
+ *mech1 = &eops->eo_mech;
+ *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT :
+ CRYPTO_FG_ENCRYPT_ATOMIC;
+ break;
+ }
+
+ case KCF_OG_DECRYPT: {
+ kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
+
+ dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype;
+ *mech1 = &dcrops->dop_mech;
+ *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT :
+ CRYPTO_FG_DECRYPT_ATOMIC;
+ break;
+ }
+
+ case KCF_OG_ENCRYPT_MAC: {
+ kcf_encrypt_mac_ops_params_t *eops =
+ &params->rp_u.encrypt_mac_params;
+
+ eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype;
+ *mech1 = &eops->em_encr_mech;
+ eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype;
+ *mech2 = &eops->em_mac_mech;
+ *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC :
+ CRYPTO_FG_ENCRYPT_MAC_ATOMIC;
+ break;
+ }
+
+ case KCF_OG_MAC_DECRYPT: {
+ kcf_mac_decrypt_ops_params_t *dops =
+ &params->rp_u.mac_decrypt_params;
+
+ dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype;
+ *mech1 = &dops->md_mac_mech;
+ dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype;
+ *mech2 = &dops->md_decr_mech;
+ *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT :
+ CRYPTO_FG_MAC_DECRYPT_ATOMIC;
+ break;
+ }
+
+ default:
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * This routine is called when a request to a provider has failed
+ * with a recoverable error. This routine tries to find another provider
+ * and dispatches the request to the new provider, if one is available.
+ * We reuse the request structure.
+ *
+ * A return value of NULL from kcf_get_mech_provider() indicates
+ * we have tried the last provider.
+ */
+static int
+kcf_resubmit_request(kcf_areq_node_t *areq)
+{
+ int error = CRYPTO_FAILED;
+ kcf_context_t *ictx;
+ kcf_provider_desc_t *old_pd;
+ kcf_provider_desc_t *new_pd;
+ crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
+ crypto_mech_type_t prov_mt1, prov_mt2;
+ crypto_func_group_t fg;
+
+ if (!can_resubmit(areq, &mech1, &mech2, &fg))
+ return (error);
+
+ old_pd = areq->an_provider;
+ /*
+ * Add old_pd to the list of providers already tried. We release
+ * the hold on old_pd (from the earlier kcf_get_mech_provider()) in
+ * kcf_free_triedlist().
+ */
+ if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
+ KM_NOSLEEP) == NULL)
+ return (error);
+
+ if (mech1 && !mech2) {
+ new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
+ areq->an_tried_plist, fg,
+ (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
+ } else {
+ ASSERT(mech1 != NULL && mech2 != NULL);
+
+ new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1,
+ &prov_mt2, &error, areq->an_tried_plist, fg, fg,
+ (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
+ }
+
+ if (new_pd == NULL)
+ return (error);
+
+ /*
+ * We reuse the old context by resetting provider specific
+ * fields in it.
+ */
+ if ((ictx = areq->an_context) != NULL) {
+ crypto_ctx_t *ctx;
+
+ ASSERT(old_pd == ictx->kc_prov_desc);
+ KCF_PROV_REFRELE(ictx->kc_prov_desc);
+ KCF_PROV_REFHOLD(new_pd);
+ ictx->kc_prov_desc = new_pd;
+
+ ctx = &ictx->kc_glbl_ctx;
+ ctx->cc_provider = new_pd->pd_prov_handle;
+ ctx->cc_session = new_pd->pd_sid;
+ ctx->cc_provider_private = NULL;
+ }
+
+ /* We reuse areq. by resetting the provider and context fields. */
+ KCF_PROV_REFRELE(old_pd);
+ KCF_PROV_REFHOLD(new_pd);
+ areq->an_provider = new_pd;
+ mutex_enter(&areq->an_lock);
+ areq->an_state = REQ_WAITING;
+ mutex_exit(&areq->an_lock);
+
+ switch (new_pd->pd_prov_type) {
+ case CRYPTO_SW_PROVIDER:
+ error = kcf_disp_sw_request(areq);
+ break;
+
+ case CRYPTO_HW_PROVIDER: {
+ taskq_t *taskq = new_pd->pd_sched_info.ks_taskq;
+
+ if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
+ (taskqid_t)0) {
+ error = CRYPTO_HOST_MEMORY;
+ } else {
+ error = CRYPTO_QUEUED;
+ }
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ return (error);
+}
+
+static inline int EMPTY_TASKQ(taskq_t *tq)
+{
+#ifdef _KERNEL
+ return (tq->tq_lowest_id == tq->tq_next_id);
+#else
+ return (tq->tq_task.tqent_next == &tq->tq_task || tq->tq_active == 0);
+#endif
+}
+
+/*
+ * Routine called by both ioctl and k-api. The consumer should
+ * bundle the parameters into a kcf_req_params_t structure. A bunch
+ * of macros are available in ops_impl.h for this bundling. They are:
+ *
+ * KCF_WRAP_DIGEST_OPS_PARAMS()
+ * KCF_WRAP_MAC_OPS_PARAMS()
+ * KCF_WRAP_ENCRYPT_OPS_PARAMS()
+ * KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc.
+ *
+ * It is the caller's responsibility to free the ctx argument when
+ * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details.
+ */
+int
+kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
+ crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
+{
+ int error = CRYPTO_SUCCESS;
+ kcf_areq_node_t *areq;
+ kcf_sreq_node_t *sreq;
+ kcf_context_t *kcf_ctx;
+ taskq_t *taskq = pd->pd_sched_info.ks_taskq;
+
+ kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;
+
+ /* Synchronous cases */
+ if (crq == NULL) {
+ switch (pd->pd_prov_type) {
+ case CRYPTO_SW_PROVIDER:
+ error = common_submit_request(pd, ctx, params,
+ KCF_RHNDL(KM_SLEEP));
+ break;
+
+ case CRYPTO_HW_PROVIDER:
+ /*
+ * Special case for CRYPTO_SYNCHRONOUS providers that
+ * never return a CRYPTO_QUEUED error. We skip any
+ * request allocation and call the SPI directly.
+ */
+ if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) &&
+ EMPTY_TASKQ(taskq)) {
+ KCF_PROV_IREFHOLD(pd);
+ if (pd->pd_state == KCF_PROV_READY) {
+ error = common_submit_request(pd, ctx,
+ params, KCF_RHNDL(KM_SLEEP));
+ KCF_PROV_IREFRELE(pd);
+ ASSERT(error != CRYPTO_QUEUED);
+ break;
+ }
+ KCF_PROV_IREFRELE(pd);
+ }
+
+ sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP);
+ sreq->sn_state = REQ_ALLOCATED;
+ sreq->sn_rv = CRYPTO_FAILED;
+ sreq->sn_params = params;
+
+ /*
+ * Note that we do not need to hold the context
+ * for synchronous case as the context will never
+ * become invalid underneath us. We do not need to hold
+ * the provider here either as the caller has a hold.
+ */
+ sreq->sn_context = kcf_ctx;
+ ASSERT(KCF_PROV_REFHELD(pd));
+ sreq->sn_provider = pd;
+
+ ASSERT(taskq != NULL);
+ /*
+ * Call the SPI directly if the taskq is empty and the
+ * provider is not busy, else dispatch to the taskq.
+ * Calling directly is fine as this is the synchronous
+ * case. This is unlike the asynchronous case where we
+ * must always dispatch to the taskq.
+ */
+ if (EMPTY_TASKQ(taskq) &&
+ pd->pd_state == KCF_PROV_READY) {
+ process_req_hwp(sreq);
+ } else {
+ /*
+ * We can not tell from taskq_dispatch() return
+ * value if we exceeded maxalloc. Hence the
+ * check here. Since we are allowed to wait in
+ * the synchronous case, we wait for the taskq
+ * to become empty.
+ */
+ if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
+ taskq_wait(taskq);
+ }
+
+ (void) taskq_dispatch(taskq, process_req_hwp,
+ sreq, TQ_SLEEP);
+ }
+
+ /*
+ * Wait for the notification to arrive,
+ * if the operation is not done yet.
+ * Bug# 4722589 will make the wait a cv_wait_sig().
+ */
+ mutex_enter(&sreq->sn_lock);
+ while (sreq->sn_state < REQ_DONE)
+ cv_wait(&sreq->sn_cv, &sreq->sn_lock);
+ mutex_exit(&sreq->sn_lock);
+
+ error = sreq->sn_rv;
+ kmem_cache_free(kcf_sreq_cache, sreq);
+
+ break;
+
+ default:
+ error = CRYPTO_FAILED;
+ break;
+ }
+
+ } else { /* Asynchronous cases */
+ switch (pd->pd_prov_type) {
+ case CRYPTO_SW_PROVIDER:
+ if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
+ /*
+ * This case has less overhead since there is
+ * no switching of context.
+ */
+ error = common_submit_request(pd, ctx, params,
+ KCF_RHNDL(KM_NOSLEEP));
+ } else {
+ /*
+ * CRYPTO_ALWAYS_QUEUE is set. We need to
+ * queue the request and return.
+ */
+ areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
+ params, cont);
+ if (areq == NULL)
+ error = CRYPTO_HOST_MEMORY;
+ else {
+ if (!(crq->cr_flag
+ & CRYPTO_SKIP_REQID)) {
+ /*
+ * Set the request handle. This handle
+ * is used for any crypto_cancel_req(9f)
+ * calls from the consumer. We have to
+ * do this before dispatching the
+ * request.
+ */
+ crq->cr_reqid = kcf_reqid_insert(areq);
+ }
+
+ error = kcf_disp_sw_request(areq);
+ /*
+ * There is an error processing this
+ * request. Remove the handle and
+ * release the request structure.
+ */
+ if (error != CRYPTO_QUEUED) {
+ if (!(crq->cr_flag
+ & CRYPTO_SKIP_REQID))
+ kcf_reqid_delete(areq);
+ KCF_AREQ_REFRELE(areq);
+ }
+ }
+ }
+ break;
+
+ case CRYPTO_HW_PROVIDER:
+ /*
+ * We need to queue the request and return.
+ */
+ areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params,
+ cont);
+ if (areq == NULL) {
+ error = CRYPTO_HOST_MEMORY;
+ goto done;
+ }
+
+ ASSERT(taskq != NULL);
+ /*
+ * We can not tell from taskq_dispatch() return
+ * value if we exceeded maxalloc. Hence the check
+ * here.
+ */
+ if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
+ error = CRYPTO_BUSY;
+ KCF_AREQ_REFRELE(areq);
+ goto done;
+ }
+
+ if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
+ /*
+ * Set the request handle. This handle is used
+ * for any crypto_cancel_req(9f) calls from the
+ * consumer. We have to do this before dispatching
+ * the request.
+ */
+ crq->cr_reqid = kcf_reqid_insert(areq);
+ }
+
+ if (taskq_dispatch(taskq,
+ process_req_hwp, areq, TQ_NOSLEEP) ==
+ (taskqid_t)0) {
+ error = CRYPTO_HOST_MEMORY;
+ if (!(crq->cr_flag & CRYPTO_SKIP_REQID))
+ kcf_reqid_delete(areq);
+ KCF_AREQ_REFRELE(areq);
+ } else {
+ error = CRYPTO_QUEUED;
+ }
+ break;
+
+ default:
+ error = CRYPTO_FAILED;
+ break;
+ }
+ }
+
+done:
+ return (error);
+}
+
+/*
+ * We're done with this framework context, so free it. Note that freeing
+ * framework context (kcf_context) frees the global context (crypto_ctx).
+ *
+ * The provider is responsible for freeing provider private context after a
+ * final or single operation and resetting the cc_provider_private field
+ * to NULL. It should do this before it notifies the framework of the
+ * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases
+ * like crypto_cancel_ctx(9f).
+ */
+void
+kcf_free_context(kcf_context_t *kcf_ctx)
+{
+ kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
+ crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
+ kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;
+
+ /* Release the second context, if any */
+
+ if (kcf_secondctx != NULL)
+ KCF_CONTEXT_REFRELE(kcf_secondctx);
+
+ if (gctx->cc_provider_private != NULL) {
+ mutex_enter(&pd->pd_lock);
+ if (!KCF_IS_PROV_REMOVED(pd)) {
+ /*
+ * Increment the provider's internal refcnt so it
+ * doesn't unregister from the framework while
+ * we're calling the entry point.
+ */
+ KCF_PROV_IREFHOLD(pd);
+ mutex_exit(&pd->pd_lock);
+ (void) KCF_PROV_FREE_CONTEXT(pd, gctx);
+ KCF_PROV_IREFRELE(pd);
+ } else {
+ mutex_exit(&pd->pd_lock);
+ }
+ }
+
+ /* kcf_ctx->kc_prov_desc has a hold on pd */
+ KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc);
+
+ /* check if this context is shared with a software provider */
+ if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) &&
+ kcf_ctx->kc_sw_prov_desc != NULL) {
+ KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc);
+ }
+
+ kmem_cache_free(kcf_context_cache, kcf_ctx);
+}
+
+/*
+ * Free the request after releasing all the holds.
+ */
+void
+kcf_free_req(kcf_areq_node_t *areq)
+{
+ KCF_PROV_REFRELE(areq->an_provider);
+ if (areq->an_context != NULL)
+ KCF_CONTEXT_REFRELE(areq->an_context);
+
+ if (areq->an_tried_plist != NULL)
+ kcf_free_triedlist(areq->an_tried_plist);
+ kmem_cache_free(kcf_areq_cache, areq);
+}
+
+/*
+ * Utility routine to remove a request from the chain of requests
+ * hanging off a context.
+ */
+void
+kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
+{
+ kcf_areq_node_t *cur, *prev;
+
+ /*
+ * Get context lock, search for areq in the chain and remove it.
+ */
+ ASSERT(ictx != NULL);
+ mutex_enter(&ictx->kc_in_use_lock);
+ prev = cur = ictx->kc_req_chain_first;
+
+ while (cur != NULL) {
+ if (cur == areq) {
+ if (prev == cur) {
+ if ((ictx->kc_req_chain_first =
+ cur->an_ctxchain_next) == NULL)
+ ictx->kc_req_chain_last = NULL;
+ } else {
+ if (cur == ictx->kc_req_chain_last)
+ ictx->kc_req_chain_last = prev;
+ prev->an_ctxchain_next = cur->an_ctxchain_next;
+ }
+
+ break;
+ }
+ prev = cur;
+ cur = cur->an_ctxchain_next;
+ }
+ mutex_exit(&ictx->kc_in_use_lock);
+}
+
+/*
+ * Remove the specified node from the global software queue.
+ *
+ * The caller must hold the queue lock and request lock (an_lock).
+ */
+void
+kcf_remove_node(kcf_areq_node_t *node)
+{
+ kcf_areq_node_t *nextp = node->an_next;
+ kcf_areq_node_t *prevp = node->an_prev;
+
+ if (nextp != NULL)
+ nextp->an_prev = prevp;
+ else
+ gswq->gs_last = prevp;
+
+ if (prevp != NULL)
+ prevp->an_next = nextp;
+ else
+ gswq->gs_first = nextp;
+
+ node->an_state = REQ_CANCELED;
+}
+
+/*
+ * Add the request node to the end of the global software queue.
+ *
+ * The caller should not hold the queue lock. Returns 0 if the
+ * request is successfully queued. Returns CRYPTO_BUSY if the limit
+ * on the number of jobs is exceeded.
+ */
+static int
+kcf_enqueue(kcf_areq_node_t *node)
+{
+ kcf_areq_node_t *tnode;
+
+ mutex_enter(&gswq->gs_lock);
+
+ if (gswq->gs_njobs >= gswq->gs_maxjobs) {
+ mutex_exit(&gswq->gs_lock);
+ return (CRYPTO_BUSY);
+ }
+
+ if (gswq->gs_last == NULL) {
+ gswq->gs_first = gswq->gs_last = node;
+ } else {
+ ASSERT(gswq->gs_last->an_next == NULL);
+ tnode = gswq->gs_last;
+ tnode->an_next = node;
+ gswq->gs_last = node;
+ node->an_prev = tnode;
+ }
+
+ gswq->gs_njobs++;
+
+ /* an_lock not needed here as we hold gs_lock */
+ node->an_state = REQ_WAITING;
+
+ mutex_exit(&gswq->gs_lock);
+
+ return (0);
+}
+
+/*
+ * kmem_cache_alloc constructor for sync request structure.
+ */
+/* ARGSUSED */
+static int
+kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags)
+{
+ kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
+
+ sreq->sn_type = CRYPTO_SYNCH;
+ cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+kcf_sreq_cache_destructor(void *buf, void *cdrarg)
+{
+ kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
+
+ mutex_destroy(&sreq->sn_lock);
+ cv_destroy(&sreq->sn_cv);
+}
+
+/*
+ * kmem_cache_alloc constructor for async request structure.
+ */
+/* ARGSUSED */
+static int
+kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags)
+{
+ kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
+
+ areq->an_type = CRYPTO_ASYNCH;
+ areq->an_refcnt = 0;
+ mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL);
+ cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+kcf_areq_cache_destructor(void *buf, void *cdrarg)
+{
+ kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
+
+ ASSERT(areq->an_refcnt == 0);
+ mutex_destroy(&areq->an_lock);
+ cv_destroy(&areq->an_done);
+ cv_destroy(&areq->an_turn_cv);
+}
+
+/*
+ * kmem_cache_alloc constructor for kcf_context structure.
+ */
+/* ARGSUSED */
+static int
+kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags)
+{
+ kcf_context_t *kctx = (kcf_context_t *)buf;
+
+ kctx->kc_refcnt = 0;
+ mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+kcf_context_cache_destructor(void *buf, void *cdrarg)
+{
+ kcf_context_t *kctx = (kcf_context_t *)buf;
+
+ ASSERT(kctx->kc_refcnt == 0);
+ mutex_destroy(&kctx->kc_in_use_lock);
+}
+
+void
+kcf_sched_destroy(void)
+{
+ int i;
+
+ if (kcf_misc_kstat)
+ kstat_delete(kcf_misc_kstat);
+
+ if (kcfpool)
+ kmem_free(kcfpool, sizeof (kcf_pool_t));
+
+ for (i = 0; i < REQID_TABLES; i++) {
+ if (kcf_reqid_table[i])
+ kmem_free(kcf_reqid_table[i],
+ sizeof (kcf_reqid_table_t));
+ }
+
+ if (gswq)
+ kmem_free(gswq, sizeof (kcf_global_swq_t));
+
+ if (kcf_context_cache)
+ kmem_cache_destroy(kcf_context_cache);
+ if (kcf_areq_cache)
+ kmem_cache_destroy(kcf_areq_cache);
+ if (kcf_sreq_cache)
+ kmem_cache_destroy(kcf_sreq_cache);
+}
+
+/*
+ * Creates and initializes all the structures needed by the framework.
+ */
+void
+kcf_sched_init(void)
+{
+ int i;
+ kcf_reqid_table_t *rt;
+
+ /*
+ * Create all the kmem caches needed by the framework. We set the
+ * align argument to 64, to get a slab aligned to 64-byte as well as
+ * have the objects (cache_chunksize) to be a 64-byte multiple.
+ * This helps to avoid false sharing as this is the size of the
+ * CPU cache line.
+ */
+ kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache",
+ sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor,
+ kcf_sreq_cache_destructor, NULL, NULL, NULL, 0);
+
+ kcf_areq_cache = kmem_cache_create("kcf_areq_cache",
+ sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor,
+ kcf_areq_cache_destructor, NULL, NULL, NULL, 0);
+
+ kcf_context_cache = kmem_cache_create("kcf_context_cache",
+ sizeof (struct kcf_context), 64, kcf_context_cache_constructor,
+ kcf_context_cache_destructor, NULL, NULL, NULL, 0);
+
+ gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP);
+
+ mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL);
+ gswq->gs_njobs = 0;
+ gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
+ gswq->gs_first = gswq->gs_last = NULL;
+
+ /* Initialize the global reqid table */
+ for (i = 0; i < REQID_TABLES; i++) {
+ rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP);
+ kcf_reqid_table[i] = rt;
+ mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL);
+ rt->rt_curid = i;
+ }
+
+ /* Allocate and initialize the thread pool */
+ kcfpool_alloc();
+
+ /* Initialize the event notification list variables */
+ mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL);
+
+ /* Create the kcf kstat */
+ kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto",
+ KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t),
+ KSTAT_FLAG_VIRTUAL);
+
+ if (kcf_misc_kstat != NULL) {
+ kcf_misc_kstat->ks_data = &kcf_ksdata;
+ kcf_misc_kstat->ks_update = kcf_misc_kstat_update;
+ kstat_install(kcf_misc_kstat);
+ }
+}
+
+/*
+ * Signal the waiting sync client.
+ */
+void
+kcf_sop_done(kcf_sreq_node_t *sreq, int error)
+{
+ mutex_enter(&sreq->sn_lock);
+ sreq->sn_state = REQ_DONE;
+ sreq->sn_rv = error;
+ cv_signal(&sreq->sn_cv);
+ mutex_exit(&sreq->sn_lock);
+}
+
+/*
+ * Callback the async client with the operation status.
+ * We free the async request node and possibly the context.
+ * We also handle any chain of requests hanging off of
+ * the context.
+ */
+void
+kcf_aop_done(kcf_areq_node_t *areq, int error)
+{
+ kcf_op_type_t optype;
+ boolean_t skip_notify = B_FALSE;
+ kcf_context_t *ictx;
+ kcf_areq_node_t *nextreq;
+
+ /*
+ * Handle recoverable errors. This has to be done first
+ * before doing any thing else in this routine so that
+ * we do not change the state of the request.
+ */
+ if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) {
+ /*
+ * We try another provider, if one is available. Else
+ * we continue with the failure notification to the
+ * client.
+ */
+ if (kcf_resubmit_request(areq) == CRYPTO_QUEUED)
+ return;
+ }
+
+ mutex_enter(&areq->an_lock);
+ areq->an_state = REQ_DONE;
+ mutex_exit(&areq->an_lock);
+
+ optype = (&areq->an_params)->rp_optype;
+ if ((ictx = areq->an_context) != NULL) {
+ /*
+ * A request after it is removed from the request
+ * queue, still stays on a chain of requests hanging
+ * of its context structure. It needs to be removed
+ * from this chain at this point.
+ */
+ mutex_enter(&ictx->kc_in_use_lock);
+ nextreq = areq->an_ctxchain_next;
+ if (nextreq != NULL) {
+ mutex_enter(&nextreq->an_lock);
+ nextreq->an_is_my_turn = B_TRUE;
+ cv_signal(&nextreq->an_turn_cv);
+ mutex_exit(&nextreq->an_lock);
+ }
+
+ ictx->kc_req_chain_first = nextreq;
+ if (nextreq == NULL)
+ ictx->kc_req_chain_last = NULL;
+ mutex_exit(&ictx->kc_in_use_lock);
+
+ if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) {
+ ASSERT(nextreq == NULL);
+ KCF_CONTEXT_REFRELE(ictx);
+ } else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) {
+ /*
+ * NOTE - We do not release the context in case of update
+ * operations. We require the consumer to free it explicitly,
+ * in case it wants to abandon an update operation. This is done
+ * as there may be mechanisms in ECB mode that can continue
+ * even if an operation on a block fails.
+ */
+ KCF_CONTEXT_REFRELE(ictx);
+ }
+ }
+
+ /* Deal with the internal continuation to this request first */
+
+ if (areq->an_isdual) {
+ kcf_dual_req_t *next_arg;
+ next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg;
+ next_arg->kr_areq = areq;
+ KCF_AREQ_REFHOLD(areq);
+ areq->an_isdual = B_FALSE;
+
+ NOTIFY_CLIENT(areq, error);
+ return;
+ }
+
+ /*
+ * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
+ * always. If this flag is clear, we skip the notification
+ * provided there are no errors. We check this flag for only
+ * init or update operations. It is ignored for single, final or
+ * atomic operations.
+ */
+ skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) &&
+ (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) &&
+ (error == CRYPTO_SUCCESS);
+
+ if (!skip_notify) {
+ NOTIFY_CLIENT(areq, error);
+ }
+
+ if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID))
+ kcf_reqid_delete(areq);
+
+ KCF_AREQ_REFRELE(areq);
+}
+
+/*
+ * Allocate the thread pool and initialize all the fields.
+ */
+static void
+kcfpool_alloc()
+{
+ kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP);
+
+ kcfpool->kp_threads = kcfpool->kp_idlethreads = 0;
+ kcfpool->kp_blockedthreads = 0;
+ kcfpool->kp_signal_create_thread = B_FALSE;
+ kcfpool->kp_nthrs = 0;
+ kcfpool->kp_user_waiting = B_FALSE;
+
+ mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL);
+
+ mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL);
+
+ kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT;
+}
+
+/*
+ * Insert the async request in the hash table after assigning it
+ * an ID. Returns the ID.
+ *
+ * The ID is used by the caller to pass as an argument to a
+ * cancel_req() routine later.
+ */
+static crypto_req_id_t
+kcf_reqid_insert(kcf_areq_node_t *areq)
+{
+ int indx;
+ crypto_req_id_t id;
+ kcf_areq_node_t *headp;
+ kcf_reqid_table_t *rt =
+ kcf_reqid_table[CPU_SEQID & REQID_TABLE_MASK];
+
+ mutex_enter(&rt->rt_lock);
+
+ rt->rt_curid = id =
+ (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH;
+ SET_REQID(areq, id);
+ indx = REQID_HASH(id);
+ headp = areq->an_idnext = rt->rt_idhash[indx];
+ areq->an_idprev = NULL;
+ if (headp != NULL)
+ headp->an_idprev = areq;
+
+ rt->rt_idhash[indx] = areq;
+ mutex_exit(&rt->rt_lock);
+
+ return (id);
+}
+
+/*
+ * Delete the async request from the hash table.
+ */
+static void
+kcf_reqid_delete(kcf_areq_node_t *areq)
+{
+ int indx;
+ kcf_areq_node_t *nextp, *prevp;
+ crypto_req_id_t id = GET_REQID(areq);
+ kcf_reqid_table_t *rt;
+
+ rt = kcf_reqid_table[id & REQID_TABLE_MASK];
+ indx = REQID_HASH(id);
+
+ mutex_enter(&rt->rt_lock);
+
+ nextp = areq->an_idnext;
+ prevp = areq->an_idprev;
+ if (nextp != NULL)
+ nextp->an_idprev = prevp;
+ if (prevp != NULL)
+ prevp->an_idnext = nextp;
+ else
+ rt->rt_idhash[indx] = nextp;
+
+ SET_REQID(areq, 0);
+ cv_broadcast(&areq->an_done);
+
+ mutex_exit(&rt->rt_lock);
+}
+
+/*
+ * Cancel a single asynchronous request.
+ *
+ * We guarantee that no problems will result from calling
+ * crypto_cancel_req() for a request which is either running, or
+ * has already completed. We remove the request from any queues
+ * if it is possible. We wait for request completion if the
+ * request is dispatched to a provider.
+ *
+ * Calling context:
+ * Can be called from user context only.
+ *
+ * NOTE: We acquire the following locks in this routine (in order):
+ * - rt_lock (kcf_reqid_table_t)
+ * - gswq->gs_lock
+ * - areq->an_lock
+ * - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain())
+ *
+ * This locking order MUST be maintained in code every where else.
+ */
+void
+crypto_cancel_req(crypto_req_id_t id)
+{
+ int indx;
+ kcf_areq_node_t *areq;
+ kcf_provider_desc_t *pd;
+ kcf_context_t *ictx;
+ kcf_reqid_table_t *rt;
+
+ rt = kcf_reqid_table[id & REQID_TABLE_MASK];
+ indx = REQID_HASH(id);
+
+ mutex_enter(&rt->rt_lock);
+ for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) {
+ if (GET_REQID(areq) == id) {
+ /*
+ * We found the request. It is either still waiting
+ * in the framework queues or running at the provider.
+ */
+ pd = areq->an_provider;
+ ASSERT(pd != NULL);
+
+ switch (pd->pd_prov_type) {
+ case CRYPTO_SW_PROVIDER:
+ mutex_enter(&gswq->gs_lock);
+ mutex_enter(&areq->an_lock);
+
+ /* This request can be safely canceled. */
+ if (areq->an_state <= REQ_WAITING) {
+ /* Remove from gswq, global software queue. */
+ kcf_remove_node(areq);
+ if ((ictx = areq->an_context) != NULL)
+ kcf_removereq_in_ctxchain(ictx, areq);
+
+ mutex_exit(&areq->an_lock);
+ mutex_exit(&gswq->gs_lock);
+ mutex_exit(&rt->rt_lock);
+
+ /* Remove areq from hash table and free it. */
+ kcf_reqid_delete(areq);
+ KCF_AREQ_REFRELE(areq);
+ return;
+ }
+
+ mutex_exit(&areq->an_lock);
+ mutex_exit(&gswq->gs_lock);
+ break;
+
+ case CRYPTO_HW_PROVIDER:
+ /*
+ * There is no interface to remove an entry
+ * once it is on the taskq. So, we do not do
+ * any thing for a hardware provider.
+ */
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * The request is running. Wait for the request completion
+ * to notify us.
+ */
+ KCF_AREQ_REFHOLD(areq);
+ while (GET_REQID(areq) == id)
+ cv_wait(&areq->an_done, &rt->rt_lock);
+ KCF_AREQ_REFRELE(areq);
+ break;
+ }
+ }
+
+ mutex_exit(&rt->rt_lock);
+}
+
+/*
+ * Cancel all asynchronous requests associated with the
+ * passed in crypto context and free it.
+ *
+ * A client SHOULD NOT call this routine after calling a crypto_*_final
+ * routine. This routine is called only during intermediate operations.
+ * The client should not use the crypto context after this function returns
+ * since we destroy it.
+ *
+ * Calling context:
+ * Can be called from user context only.
+ */
+void
+crypto_cancel_ctx(crypto_context_t ctx)
+{
+ kcf_context_t *ictx;
+ kcf_areq_node_t *areq;
+
+ if (ctx == NULL)
+ return;
+
+ ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private;
+
+ mutex_enter(&ictx->kc_in_use_lock);
+
+ /* Walk the chain and cancel each request */
+ while ((areq = ictx->kc_req_chain_first) != NULL) {
+ /*
+ * We have to drop the lock here as we may have
+ * to wait for request completion. We hold the
+ * request before dropping the lock though, so that it
+ * won't be freed underneath us.
+ */
+ KCF_AREQ_REFHOLD(areq);
+ mutex_exit(&ictx->kc_in_use_lock);
+
+ crypto_cancel_req(GET_REQID(areq));
+ KCF_AREQ_REFRELE(areq);
+
+ mutex_enter(&ictx->kc_in_use_lock);
+ }
+
+ mutex_exit(&ictx->kc_in_use_lock);
+ KCF_CONTEXT_REFRELE(ictx);
+}
+
+/*
+ * Update kstats.
+ */
+static int
+kcf_misc_kstat_update(kstat_t *ksp, int rw)
+{
+ uint_t tcnt;
+ kcf_stats_t *ks_data;
+
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+
+ ks_data = ksp->ks_data;
+
+ ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads;
+ /*
+ * The failover thread is counted in kp_idlethreads in
+ * some corner cases. This is done to avoid doing more checks
+ * when submitting a request. We account for those cases below.
+ */
+ if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1))
+ tcnt--;
+ ks_data->ks_idle_thrs.value.ui32 = tcnt;
+ ks_data->ks_minthrs.value.ui32 = kcf_minthreads;
+ ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads;
+ ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs;
+ ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs;
+ ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads;
+ ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc;
+ ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc;
+
+ return (0);
+}
+
+/*
+ * Allocate and initiatize a kcf_dual_req, used for saving the arguments of
+ * a dual operation or an atomic operation that has to be internally
+ * simulated with multiple single steps.
+ * crq determines the memory allocation flags.
+ */
+
+kcf_dual_req_t *
+kcf_alloc_req(crypto_call_req_t *crq)
+{
+ kcf_dual_req_t *kcr;
+
+ kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq));
+
+ if (kcr == NULL)
+ return (NULL);
+
+ /* Copy the whole crypto_call_req struct, as it isn't persistant */
+ if (crq != NULL)
+ kcr->kr_callreq = *crq;
+ else
+ bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t));
+ kcr->kr_areq = NULL;
+ kcr->kr_saveoffset = 0;
+ kcr->kr_savelen = 0;
+
+ return (kcr);
+}
+
+/*
+ * Callback routine for the next part of a simulated dual part.
+ * Schedules the next step.
+ *
+ * This routine can be called from interrupt context.
+ */
+void
+kcf_next_req(void *next_req_arg, int status)
+{
+ kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
+ kcf_req_params_t *params = &(next_req->kr_params);
+ kcf_areq_node_t *areq = next_req->kr_areq;
+ int error = status;
+ kcf_provider_desc_t *pd = NULL;
+ crypto_dual_data_t *ct = NULL;
+
+ /* Stop the processing if an error occured at this step */
+ if (error != CRYPTO_SUCCESS) {
+out:
+ areq->an_reqarg = next_req->kr_callreq;
+ KCF_AREQ_REFRELE(areq);
+ kmem_free(next_req, sizeof (kcf_dual_req_t));
+ areq->an_isdual = B_FALSE;
+ kcf_aop_done(areq, error);
+ return;
+ }
+
+ switch (params->rp_opgrp) {
+ case KCF_OG_MAC: {
+
+ /*
+ * The next req is submitted with the same reqid as the
+ * first part. The consumer only got back that reqid, and
+ * should still be able to cancel the operation during its
+ * second step.
+ */
+ kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
+ crypto_ctx_template_t mac_tmpl;
+ kcf_mech_entry_t *me;
+
+ ct = (crypto_dual_data_t *)mops->mo_data;
+ mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;
+
+ /* No expected recoverable failures, so no retry list */
+ pd = kcf_get_mech_provider(mops->mo_framework_mechtype,
+ &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC,
+ (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2);
+
+ if (pd == NULL) {
+ error = CRYPTO_MECH_NOT_SUPPORTED;
+ goto out;
+ }
+ /* Validate the MAC context template here */
+ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
+ (mac_tmpl != NULL)) {
+ kcf_ctx_template_t *ctx_mac_tmpl;
+
+ ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
+
+ if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
+ KCF_PROV_REFRELE(pd);
+ error = CRYPTO_OLD_CTX_TEMPLATE;
+ goto out;
+ }
+ mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
+ }
+
+ break;
+ }
+ case KCF_OG_DECRYPT: {
+ kcf_decrypt_ops_params_t *dcrops =
+ &(params->rp_u.decrypt_params);
+
+ ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
+ /* No expected recoverable failures, so no retry list */
+ pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
+ NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
+ (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1);
+
+ if (pd == NULL) {
+ error = CRYPTO_MECH_NOT_SUPPORTED;
+ goto out;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* The second step uses len2 and offset2 of the dual_data */
+ next_req->kr_saveoffset = ct->dd_offset1;
+ next_req->kr_savelen = ct->dd_len1;
+ ct->dd_offset1 = ct->dd_offset2;
+ ct->dd_len1 = ct->dd_len2;
+
+ /* preserve if the caller is restricted */
+ if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) {
+ areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED;
+ } else {
+ areq->an_reqarg.cr_flag = 0;
+ }
+
+ areq->an_reqarg.cr_callback_func = kcf_last_req;
+ areq->an_reqarg.cr_callback_arg = next_req;
+ areq->an_isdual = B_TRUE;
+
+ /*
+ * We would like to call kcf_submit_request() here. But,
+ * that is not possible as that routine allocates a new
+ * kcf_areq_node_t request structure, while we need to
+ * reuse the existing request structure.
+ */
+ switch (pd->pd_prov_type) {
+ case CRYPTO_SW_PROVIDER:
+ error = common_submit_request(pd, NULL, params,
+ KCF_RHNDL(KM_NOSLEEP));
+ break;
+
+ case CRYPTO_HW_PROVIDER: {
+ kcf_provider_desc_t *old_pd;
+ taskq_t *taskq = pd->pd_sched_info.ks_taskq;
+
+ /*
+ * Set the params for the second step in the
+ * dual-ops.
+ */
+ areq->an_params = *params;
+ old_pd = areq->an_provider;
+ KCF_PROV_REFRELE(old_pd);
+ KCF_PROV_REFHOLD(pd);
+ areq->an_provider = pd;
+
+ /*
+ * Note that we have to do a taskq_dispatch()
+ * here as we may be in interrupt context.
+ */
+ if (taskq_dispatch(taskq, process_req_hwp, areq,
+ TQ_NOSLEEP) == (taskqid_t)0) {
+ error = CRYPTO_HOST_MEMORY;
+ } else {
+ error = CRYPTO_QUEUED;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ /*
+ * We have to release the holds on the request and the provider
+ * in all cases.
+ */
+ KCF_AREQ_REFRELE(areq);
+ KCF_PROV_REFRELE(pd);
+
+ if (error != CRYPTO_QUEUED) {
+ /* restore, clean up, and invoke the client's callback */
+
+ ct->dd_offset1 = next_req->kr_saveoffset;
+ ct->dd_len1 = next_req->kr_savelen;
+ areq->an_reqarg = next_req->kr_callreq;
+ kmem_free(next_req, sizeof (kcf_dual_req_t));
+ areq->an_isdual = B_FALSE;
+ kcf_aop_done(areq, error);
+ }
+}
+
+/*
+ * Last part of an emulated dual operation.
+ * Clean up and restore ...
+ */
+void
+kcf_last_req(void *last_req_arg, int status)
+{
+ kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg;
+
+ kcf_req_params_t *params = &(last_req->kr_params);
+ kcf_areq_node_t *areq = last_req->kr_areq;
+ crypto_dual_data_t *ct = NULL;
+
+ switch (params->rp_opgrp) {
+ case KCF_OG_MAC: {
+ kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
+
+ ct = (crypto_dual_data_t *)mops->mo_data;
+ break;
+ }
+ case KCF_OG_DECRYPT: {
+ kcf_decrypt_ops_params_t *dcrops =
+ &(params->rp_u.decrypt_params);
+
+ ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
+ break;
+ }
+ default:
+ break;
+ }
+ ct->dd_offset1 = last_req->kr_saveoffset;
+ ct->dd_len1 = last_req->kr_savelen;
+
+ /* The submitter used kcf_last_req as its callback */
+
+ if (areq == NULL) {
+ crypto_call_req_t *cr = &last_req->kr_callreq;
+
+ (*(cr->cr_callback_func))(cr->cr_callback_arg, status);
+ kmem_free(last_req, sizeof (kcf_dual_req_t));
+ return;
+ }
+ areq->an_reqarg = last_req->kr_callreq;
+ KCF_AREQ_REFRELE(areq);
+ kmem_free(last_req, sizeof (kcf_dual_req_t));
+ areq->an_isdual = B_FALSE;
+ kcf_aop_done(areq, status);
+}
diff --git a/module/icp/illumos-crypto.c b/module/icp/illumos-crypto.c
new file mode 100644
index 000000000..63f019fa6
--- /dev/null
+++ b/module/icp/illumos-crypto.c
@@ -0,0 +1,152 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2016, Datto, Inc. All rights reserved.
+ */
+
+#ifdef _KERNEL
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#else
+#define __exit
+#define __init
+#endif
+
+#include <sys/crypto/common.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/sched_impl.h>
+#include <sys/modhash_impl.h>
+#include <sys/crypto/icp.h>
+
+/*
+ * Changes made to the original Illumos Crypto Layer for the ICP:
+ *
+ * Several changes were needed to allow the Illumos Crypto Layer
+ * to work in the Linux kernel. Almost all of the changes fall into
+ * one of the following categories:
+ *
+ * 1) Moving the syntax to the C90: This was mostly a matter of
+ * changing func() definitions to func(void). In a few cases,
+ * initializations of structs with unions needed to have brackets
+ * added.
+ *
+ * 2) Changes to allow userspace compilation: The ICP is meant to be
+ * compiled and used in both userspace and kernel space (for ztest and
+ * libzfs), so the _KERNEL macros did not make sense anymore. For the
+ * same reason, many header includes were also changed to use
+ * sys/zfs_context.h
+ *
+ * 3) Moving to a statically compiled architecture: At some point in
+ * the future it may make sense to have encryption algorithms that are
+ * loadable into the ICP at runtime via separate kernel modules.
+ * However, considering that this code will probably not see much use
+ * outside of zfs and zfs encryption only requires aes and sha256
+ * algorithms it seemed like more trouble than it was worth to port over
+ * Illumos's kernel module structure to a Linux kernel module. In
+ * addition, The Illumos code related to keeping track of kernel modules
+ * is very much tied to the Illumos OS and proved difficult to port to
+ * Linux. Therefore, the structure of the ICP was simplified to work
+ * statically and several pieces of code responsible for keeping track
+ * of Illumos kernel modules were removed and simplified. All module
+ * initialization and destruction is now called in this file during
+ * Linux kernel module loading and unloading.
+ *
+ * 4) Adding destructors: The Illumos Crypto Layer is built into
+ * the Illumos kernel and is not meant to be unloaded. Some destructors
+ * were added to allow the ICP to be unloaded without leaking
+ * structures.
+ *
+ * 5) Removing CRYPTO_DATA_MBLK related structures and code:
+ * crypto_data_t can have 3 formats, CRYPTO_DATA_RAW, CRYPTO_DATA_UIO,
+ * and CRYPTO_DATA_MBLK. ZFS only requires the first 2 formats, as the
+ * last one is related to streamed data. To simplify the port, code
+ * related to this format was removed.
+ *
+ * 6) Changes for architecture specific code: Some changes were needed
+ * to make architecture specific assembly compile. The biggest change
+ * here was to functions related to detecting CPU capabilities for amd64.
+ * The Illumos Crypto Layer used called into the Illumos kernel's API
+ * to discover these. They have been converted to instead use the
+ * 'cpuid' instruction as per the Intel spec. In addition, references to
+ * the sun4u' and sparc architectures have been removed so that these
+ * will use the generic implementation.
+ *
+ * 7) Removing sha384 and sha512 code: The sha code was actually very
+ * wasy to port. However, the generic sha384 and sha512 code actually
+ * exceeds the stack size on arm and powerpc architectures. In an effort
+ * to remove warnings, this code was removed.
+ *
+ * 8) Change large allocations from kmem_alloc() to vmem_alloc(): In
+ * testing the ICP with the ZFS encryption code, a few allocations were
+ * found that could potentially be very large. These caused the SPL to
+ * throw warnings and so they were changed to use vmem_alloc().
+ *
+ * 9) Makefiles: Makefiles were added that would work with the existing
+ * ZFS Makefiles.
+ */
+
+void __exit
+icp_fini(void)
+{
+ sha2_mod_fini();
+ sha1_mod_fini();
+ aes_mod_fini();
+ kcf_sched_destroy();
+ kcf_prov_tab_destroy();
+ kcf_destroy_mech_tabs();
+ mod_hash_fini();
+}
+
+/* roughly equivalent to kcf.c: _init() */
+int __init
+icp_init(void)
+{
+ /* initialize the mod hash module */
+ mod_hash_init();
+
+ /* initialize the mechanisms tables supported out-of-the-box */
+ kcf_init_mech_tabs();
+
+ /* initialize the providers tables */
+ kcf_prov_tab_init();
+
+ /*
+ * Initialize scheduling structures. Note that this does NOT
+ * start any threads since it might not be safe to do so.
+ */
+ kcf_sched_init();
+
+ /* initialize algorithms */
+ aes_mod_init();
+ sha1_mod_init();
+ sha2_mod_init();
+
+ return (0);
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+module_exit(icp_fini);
+module_init(icp_init);
+MODULE_LICENSE("CDDL");
+#endif
diff --git a/module/icp/include/aes/aes_impl.h b/module/icp/include/aes/aes_impl.h
new file mode 100644
index 000000000..ed15f74e7
--- /dev/null
+++ b/module/icp/include/aes/aes_impl.h
@@ -0,0 +1,170 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _AES_IMPL_H
+#define _AES_IMPL_H
+
+/*
+ * Common definitions used by AES.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+
+/* Similar to sysmacros.h IS_P2ALIGNED, but checks two pointers: */
+#define IS_P2ALIGNED2(v, w, a) \
+ ((((uintptr_t)(v) | (uintptr_t)(w)) & ((uintptr_t)(a) - 1)) == 0)
+
+#define AES_BLOCK_LEN 16 /* bytes */
+/* Round constant length, in number of 32-bit elements: */
+#define RC_LENGTH (5 * ((AES_BLOCK_LEN) / 4 - 2))
+
+#define AES_COPY_BLOCK(src, dst) \
+ (dst)[0] = (src)[0]; \
+ (dst)[1] = (src)[1]; \
+ (dst)[2] = (src)[2]; \
+ (dst)[3] = (src)[3]; \
+ (dst)[4] = (src)[4]; \
+ (dst)[5] = (src)[5]; \
+ (dst)[6] = (src)[6]; \
+ (dst)[7] = (src)[7]; \
+ (dst)[8] = (src)[8]; \
+ (dst)[9] = (src)[9]; \
+ (dst)[10] = (src)[10]; \
+ (dst)[11] = (src)[11]; \
+ (dst)[12] = (src)[12]; \
+ (dst)[13] = (src)[13]; \
+ (dst)[14] = (src)[14]; \
+ (dst)[15] = (src)[15]
+
+#define AES_XOR_BLOCK(src, dst) \
+ (dst)[0] ^= (src)[0]; \
+ (dst)[1] ^= (src)[1]; \
+ (dst)[2] ^= (src)[2]; \
+ (dst)[3] ^= (src)[3]; \
+ (dst)[4] ^= (src)[4]; \
+ (dst)[5] ^= (src)[5]; \
+ (dst)[6] ^= (src)[6]; \
+ (dst)[7] ^= (src)[7]; \
+ (dst)[8] ^= (src)[8]; \
+ (dst)[9] ^= (src)[9]; \
+ (dst)[10] ^= (src)[10]; \
+ (dst)[11] ^= (src)[11]; \
+ (dst)[12] ^= (src)[12]; \
+ (dst)[13] ^= (src)[13]; \
+ (dst)[14] ^= (src)[14]; \
+ (dst)[15] ^= (src)[15]
+
+/* AES key size definitions */
+#define AES_MINBITS 128
+#define AES_MINBYTES ((AES_MINBITS) >> 3)
+#define AES_MAXBITS 256
+#define AES_MAXBYTES ((AES_MAXBITS) >> 3)
+
+#define AES_MIN_KEY_BYTES ((AES_MINBITS) >> 3)
+#define AES_MAX_KEY_BYTES ((AES_MAXBITS) >> 3)
+#define AES_192_KEY_BYTES 24
+#define AES_IV_LEN 16
+
+/* AES key schedule may be implemented with 32- or 64-bit elements: */
+#define AES_32BIT_KS 32
+#define AES_64BIT_KS 64
+
+#define MAX_AES_NR 14 /* Maximum number of rounds */
+#define MAX_AES_NB 4 /* Number of columns comprising a state */
+
+typedef union {
+#ifdef sun4u
+ uint64_t ks64[((MAX_AES_NR) + 1) * (MAX_AES_NB)];
+#endif
+ uint32_t ks32[((MAX_AES_NR) + 1) * (MAX_AES_NB)];
+} aes_ks_t;
+
+/* aes_key.flags value: */
+#define INTEL_AES_NI_CAPABLE 0x1 /* AES-NI instructions present */
+
+typedef struct aes_key aes_key_t;
+struct aes_key {
+ aes_ks_t encr_ks; /* encryption key schedule */
+ aes_ks_t decr_ks; /* decryption key schedule */
+#ifdef __amd64
+ long double align128; /* Align fields above for Intel AES-NI */
+ int flags; /* implementation-dependent flags */
+#endif /* __amd64 */
+ int nr; /* number of rounds (10, 12, or 14) */
+ int type; /* key schedule size (32 or 64 bits) */
+};
+
+/*
+ * Core AES functions.
+ * ks and keysched are pointers to aes_key_t.
+ * They are declared void* as they are intended to be opaque types.
+ * Use function aes_alloc_keysched() to allocate memory for ks and keysched.
+ */
+extern void *aes_alloc_keysched(size_t *size, int kmflag);
+extern void aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits,
+ void *keysched);
+extern int aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct);
+extern int aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt);
+
+/*
+ * AES mode functions.
+ * The first 2 functions operate on 16-byte AES blocks.
+ */
+extern void aes_copy_block(uint8_t *in, uint8_t *out);
+extern void aes_xor_block(uint8_t *data, uint8_t *dst);
+
+/* Note: ctx is a pointer to aes_ctx_t defined in modes.h */
+extern int aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length,
+ crypto_data_t *out);
+extern int aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length,
+ crypto_data_t *out);
+
+/*
+ * The following definitions and declarations are only used by AES FIPS POST
+ */
+#ifdef _AES_IMPL
+
+typedef enum aes_mech_type {
+ AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */
+ AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */
+ AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */
+ AES_CTR_MECH_INFO_TYPE, /* SUN_CKM_AES_CTR */
+ AES_CCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */
+ AES_GCM_MECH_INFO_TYPE, /* SUN_CKM_AES_GCM */
+ AES_GMAC_MECH_INFO_TYPE /* SUN_CKM_AES_GMAC */
+} aes_mech_type_t;
+
+#endif /* _AES_IMPL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _AES_IMPL_H */
diff --git a/module/icp/include/modes/modes.h b/module/icp/include/modes/modes.h
new file mode 100644
index 000000000..7c1f10b16
--- /dev/null
+++ b/module/icp/include/modes/modes.h
@@ -0,0 +1,385 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _COMMON_CRYPTO_MODES_H
+#define _COMMON_CRYPTO_MODES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+#define ECB_MODE 0x00000002
+#define CBC_MODE 0x00000004
+#define CTR_MODE 0x00000008
+#define CCM_MODE 0x00000010
+#define GCM_MODE 0x00000020
+#define GMAC_MODE 0x00000040
+
+/*
+ * cc_keysched: Pointer to key schedule.
+ *
+ * cc_keysched_len: Length of the key schedule.
+ *
+ * cc_remainder: This is for residual data, i.e. data that can't
+ * be processed because there are too few bytes.
+ * Must wait until more data arrives.
+ *
+ * cc_remainder_len: Number of bytes in cc_remainder.
+ *
+ * cc_iv: Scratch buffer that sometimes contains the IV.
+ *
+ * cc_lastp: Pointer to previous block of ciphertext.
+ *
+ * cc_copy_to: Pointer to where encrypted residual data needs
+ * to be copied.
+ *
+ * cc_flags: PROVIDER_OWNS_KEY_SCHEDULE
+ * When a context is freed, it is necessary
+ * to know whether the key schedule was allocated
+ * by the caller, or internally, e.g. an init routine.
+ * If allocated by the latter, then it needs to be freed.
+ *
+ * ECB_MODE, CBC_MODE, CTR_MODE, or CCM_MODE
+ */
+struct common_ctx {
+ void *cc_keysched;
+ size_t cc_keysched_len;
+ uint64_t cc_iv[2];
+ uint64_t cc_remainder[2];
+ size_t cc_remainder_len;
+ uint8_t *cc_lastp;
+ uint8_t *cc_copy_to;
+ uint32_t cc_flags;
+};
+
+typedef struct common_ctx common_ctx_t;
+
+typedef struct ecb_ctx {
+ struct common_ctx ecb_common;
+ uint64_t ecb_lastblock[2];
+} ecb_ctx_t;
+
+#define ecb_keysched ecb_common.cc_keysched
+#define ecb_keysched_len ecb_common.cc_keysched_len
+#define ecb_iv ecb_common.cc_iv
+#define ecb_remainder ecb_common.cc_remainder
+#define ecb_remainder_len ecb_common.cc_remainder_len
+#define ecb_lastp ecb_common.cc_lastp
+#define ecb_copy_to ecb_common.cc_copy_to
+#define ecb_flags ecb_common.cc_flags
+
+typedef struct cbc_ctx {
+ struct common_ctx cbc_common;
+ uint64_t cbc_lastblock[2];
+} cbc_ctx_t;
+
+#define cbc_keysched cbc_common.cc_keysched
+#define cbc_keysched_len cbc_common.cc_keysched_len
+#define cbc_iv cbc_common.cc_iv
+#define cbc_remainder cbc_common.cc_remainder
+#define cbc_remainder_len cbc_common.cc_remainder_len
+#define cbc_lastp cbc_common.cc_lastp
+#define cbc_copy_to cbc_common.cc_copy_to
+#define cbc_flags cbc_common.cc_flags
+
+/*
+ * ctr_lower_mask Bit-mask for lower 8 bytes of counter block.
+ * ctr_upper_mask Bit-mask for upper 8 bytes of counter block.
+ */
+typedef struct ctr_ctx {
+ struct common_ctx ctr_common;
+ uint64_t ctr_lower_mask;
+ uint64_t ctr_upper_mask;
+ uint32_t ctr_tmp[4];
+} ctr_ctx_t;
+
+/*
+ * ctr_cb Counter block.
+ */
+#define ctr_keysched ctr_common.cc_keysched
+#define ctr_keysched_len ctr_common.cc_keysched_len
+#define ctr_cb ctr_common.cc_iv
+#define ctr_remainder ctr_common.cc_remainder
+#define ctr_remainder_len ctr_common.cc_remainder_len
+#define ctr_lastp ctr_common.cc_lastp
+#define ctr_copy_to ctr_common.cc_copy_to
+#define ctr_flags ctr_common.cc_flags
+
+/*
+ *
+ * ccm_mac_len: Stores length of the MAC in CCM mode.
+ * ccm_mac_buf: Stores the intermediate value for MAC in CCM encrypt.
+ * In CCM decrypt, stores the input MAC value.
+ * ccm_data_len: Length of the plaintext for CCM mode encrypt, or
+ * length of the ciphertext for CCM mode decrypt.
+ * ccm_processed_data_len:
+ * Length of processed plaintext in CCM mode encrypt,
+ * or length of processed ciphertext for CCM mode decrypt.
+ * ccm_processed_mac_len:
+ * Length of MAC data accumulated in CCM mode decrypt.
+ *
+ * ccm_pt_buf: Only used in CCM mode decrypt. It stores the
+ * decrypted plaintext to be returned when
+ * MAC verification succeeds in decrypt_final.
+ * Memory for this should be allocated in the AES module.
+ *
+ */
+typedef struct ccm_ctx {
+ struct common_ctx ccm_common;
+ uint32_t ccm_tmp[4];
+ size_t ccm_mac_len;
+ uint64_t ccm_mac_buf[2];
+ size_t ccm_data_len;
+ size_t ccm_processed_data_len;
+ size_t ccm_processed_mac_len;
+ uint8_t *ccm_pt_buf;
+ uint64_t ccm_mac_input_buf[2];
+ uint64_t ccm_counter_mask;
+} ccm_ctx_t;
+
+#define ccm_keysched ccm_common.cc_keysched
+#define ccm_keysched_len ccm_common.cc_keysched_len
+#define ccm_cb ccm_common.cc_iv
+#define ccm_remainder ccm_common.cc_remainder
+#define ccm_remainder_len ccm_common.cc_remainder_len
+#define ccm_lastp ccm_common.cc_lastp
+#define ccm_copy_to ccm_common.cc_copy_to
+#define ccm_flags ccm_common.cc_flags
+
+/*
+ * gcm_tag_len: Length of authentication tag.
+ *
+ * gcm_ghash: Stores output from the GHASH function.
+ *
+ * gcm_processed_data_len:
+ * Length of processed plaintext (encrypt) or
+ * length of processed ciphertext (decrypt).
+ *
+ * gcm_pt_buf: Stores the decrypted plaintext returned by
+ * decrypt_final when the computed authentication
+ * tag matches the user supplied tag.
+ *
+ * gcm_pt_buf_len: Length of the plaintext buffer.
+ *
+ * gcm_H: Subkey.
+ *
+ * gcm_J0: Pre-counter block generated from the IV.
+ *
+ * gcm_len_a_len_c: 64-bit representations of the bit lengths of
+ * AAD and ciphertext.
+ *
+ * gcm_kmflag: Current value of kmflag. Used only for allocating
+ * the plaintext buffer during decryption.
+ */
+typedef struct gcm_ctx {
+ struct common_ctx gcm_common;
+ size_t gcm_tag_len;
+ size_t gcm_processed_data_len;
+ size_t gcm_pt_buf_len;
+ uint32_t gcm_tmp[4];
+ uint64_t gcm_ghash[2];
+ uint64_t gcm_H[2];
+ uint64_t gcm_J0[2];
+ uint64_t gcm_len_a_len_c[2];
+ uint8_t *gcm_pt_buf;
+ int gcm_kmflag;
+} gcm_ctx_t;
+
+#define gcm_keysched gcm_common.cc_keysched
+#define gcm_keysched_len gcm_common.cc_keysched_len
+#define gcm_cb gcm_common.cc_iv
+#define gcm_remainder gcm_common.cc_remainder
+#define gcm_remainder_len gcm_common.cc_remainder_len
+#define gcm_lastp gcm_common.cc_lastp
+#define gcm_copy_to gcm_common.cc_copy_to
+#define gcm_flags gcm_common.cc_flags
+
+#define AES_GMAC_IV_LEN 12
+#define AES_GMAC_TAG_BITS 128
+
+typedef struct aes_ctx {
+ union {
+ ecb_ctx_t acu_ecb;
+ cbc_ctx_t acu_cbc;
+ ctr_ctx_t acu_ctr;
+ ccm_ctx_t acu_ccm;
+ gcm_ctx_t acu_gcm;
+ } acu;
+} aes_ctx_t;
+
+#define ac_flags acu.acu_ecb.ecb_common.cc_flags
+#define ac_remainder_len acu.acu_ecb.ecb_common.cc_remainder_len
+#define ac_keysched acu.acu_ecb.ecb_common.cc_keysched
+#define ac_keysched_len acu.acu_ecb.ecb_common.cc_keysched_len
+#define ac_iv acu.acu_ecb.ecb_common.cc_iv
+#define ac_lastp acu.acu_ecb.ecb_common.cc_lastp
+#define ac_pt_buf acu.acu_ccm.ccm_pt_buf
+#define ac_mac_len acu.acu_ccm.ccm_mac_len
+#define ac_data_len acu.acu_ccm.ccm_data_len
+#define ac_processed_mac_len acu.acu_ccm.ccm_processed_mac_len
+#define ac_processed_data_len acu.acu_ccm.ccm_processed_data_len
+#define ac_tag_len acu.acu_gcm.gcm_tag_len
+
+typedef struct blowfish_ctx {
+ union {
+ ecb_ctx_t bcu_ecb;
+ cbc_ctx_t bcu_cbc;
+ } bcu;
+} blowfish_ctx_t;
+
+#define bc_flags bcu.bcu_ecb.ecb_common.cc_flags
+#define bc_remainder_len bcu.bcu_ecb.ecb_common.cc_remainder_len
+#define bc_keysched bcu.bcu_ecb.ecb_common.cc_keysched
+#define bc_keysched_len bcu.bcu_ecb.ecb_common.cc_keysched_len
+#define bc_iv bcu.bcu_ecb.ecb_common.cc_iv
+#define bc_lastp bcu.bcu_ecb.ecb_common.cc_lastp
+
+typedef struct des_ctx {
+ union {
+ ecb_ctx_t dcu_ecb;
+ cbc_ctx_t dcu_cbc;
+ } dcu;
+} des_ctx_t;
+
+#define dc_flags dcu.dcu_ecb.ecb_common.cc_flags
+#define dc_remainder_len dcu.dcu_ecb.ecb_common.cc_remainder_len
+#define dc_keysched dcu.dcu_ecb.ecb_common.cc_keysched
+#define dc_keysched_len dcu.dcu_ecb.ecb_common.cc_keysched_len
+#define dc_iv dcu.dcu_ecb.ecb_common.cc_iv
+#define dc_lastp dcu.dcu_ecb.ecb_common.cc_lastp
+
+extern int ecb_cipher_contiguous_blocks(ecb_ctx_t *, char *, size_t,
+ crypto_data_t *, size_t, int (*cipher)(const void *, const uint8_t *,
+ uint8_t *));
+
+extern int cbc_encrypt_contiguous_blocks(cbc_ctx_t *, char *, size_t,
+ crypto_data_t *, size_t,
+ int (*encrypt)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int cbc_decrypt_contiguous_blocks(cbc_ctx_t *, char *, size_t,
+ crypto_data_t *, size_t,
+ int (*decrypt)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int ctr_mode_contiguous_blocks(ctr_ctx_t *, char *, size_t,
+ crypto_data_t *, size_t,
+ int (*cipher)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *, char *, size_t,
+ crypto_data_t *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *, char *, size_t,
+ crypto_data_t *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *, char *, size_t,
+ crypto_data_t *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *, char *, size_t,
+ crypto_data_t *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+int ccm_encrypt_final(ccm_ctx_t *, crypto_data_t *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+int gcm_encrypt_final(gcm_ctx_t *, crypto_data_t *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int ccm_decrypt_final(ccm_ctx_t *, crypto_data_t *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int gcm_decrypt_final(gcm_ctx_t *, crypto_data_t *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int ctr_mode_final(ctr_ctx_t *, crypto_data_t *,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *));
+
+extern int cbc_init_ctx(cbc_ctx_t *, char *, size_t, size_t,
+ void (*copy_block)(uint8_t *, uint64_t *));
+
+extern int ctr_init_ctx(ctr_ctx_t *, ulong_t, uint8_t *,
+ void (*copy_block)(uint8_t *, uint8_t *));
+
+extern int ccm_init_ctx(ccm_ctx_t *, char *, int, boolean_t, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int gcm_init_ctx(gcm_ctx_t *, char *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern int gmac_init_ctx(gcm_ctx_t *, char *, size_t,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *));
+
+extern void calculate_ccm_mac(ccm_ctx_t *, uint8_t *,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *));
+
+extern void gcm_mul(uint64_t *, uint64_t *, uint64_t *);
+
+extern void crypto_init_ptrs(crypto_data_t *, void **, offset_t *);
+extern void crypto_get_ptrs(crypto_data_t *, void **, offset_t *,
+ uint8_t **, size_t *, uint8_t **, size_t);
+
+extern void *ecb_alloc_ctx(int);
+extern void *cbc_alloc_ctx(int);
+extern void *ctr_alloc_ctx(int);
+extern void *ccm_alloc_ctx(int);
+extern void *gcm_alloc_ctx(int);
+extern void *gmac_alloc_ctx(int);
+extern void crypto_free_mode_ctx(void *);
+extern void gcm_set_kmflag(gcm_ctx_t *, int);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COMMON_CRYPTO_MODES_H */
diff --git a/module/icp/include/sha1/sha1.h b/module/icp/include/sha1/sha1.h
new file mode 100644
index 000000000..b6ae6b8d2
--- /dev/null
+++ b/module/icp/include/sha1/sha1.h
@@ -0,0 +1,61 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_SHA1_H
+#define _SYS_SHA1_H
+
+#include <sys/types.h> /* for uint_* */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * NOTE: n2rng (Niagara2 RNG driver) accesses the state field of
+ * SHA1_CTX directly. NEVER change this structure without verifying
+ * compatiblity with n2rng. The important thing is that the state
+ * must be in a field declared as uint32_t state[5].
+ */
+/* SHA-1 context. */
+typedef struct {
+ uint32_t state[5]; /* state (ABCDE) */
+ uint32_t count[2]; /* number of bits, modulo 2^64 (msb first) */
+ union {
+ uint8_t buf8[64]; /* undigested input */
+ uint32_t buf32[16]; /* realigned input */
+ } buf_un;
+} SHA1_CTX;
+
+#define SHA1_DIGEST_LENGTH 20
+
+void SHA1Init(SHA1_CTX *);
+void SHA1Update(SHA1_CTX *, const void *, size_t);
+void SHA1Final(void *, SHA1_CTX *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SHA1_H */
diff --git a/module/icp/include/sha1/sha1_consts.h b/module/icp/include/sha1/sha1_consts.h
new file mode 100644
index 000000000..848d25ef0
--- /dev/null
+++ b/module/icp/include/sha1/sha1_consts.h
@@ -0,0 +1,65 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1998, by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_SHA1_CONSTS_H
+#define _SYS_SHA1_CONSTS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * as explained in sha1.c, loading 32-bit constants on a sparc is expensive
+ * since it involves both a `sethi' and an `or'. thus, we instead use `ld'
+ * to load the constants from an array called `sha1_consts'. however, on
+ * intel (and perhaps other processors), it is cheaper to load the constant
+ * directly. thus, the c code in SHA1Transform() uses the macro SHA1_CONST()
+ * which either expands to a constant or an array reference, depending on
+ * the architecture the code is being compiled for.
+ */
+
+#include <sys/types.h> /* uint32_t */
+
+extern const uint32_t sha1_consts[];
+
+#if defined(__sparc)
+#define SHA1_CONST(x) (sha1_consts[x])
+#else
+#define SHA1_CONST(x) (SHA1_CONST_ ## x)
+#endif
+
+/* constants, as provided in FIPS 180-1 */
+
+#define SHA1_CONST_0 0x5a827999U
+#define SHA1_CONST_1 0x6ed9eba1U
+#define SHA1_CONST_2 0x8f1bbcdcU
+#define SHA1_CONST_3 0xca62c1d6U
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SHA1_CONSTS_H */
diff --git a/module/icp/include/sha1/sha1_impl.h b/module/icp/include/sha1/sha1_impl.h
new file mode 100644
index 000000000..1c1f8728f
--- /dev/null
+++ b/module/icp/include/sha1/sha1_impl.h
@@ -0,0 +1,73 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SHA1_IMPL_H
+#define _SHA1_IMPL_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SHA1_HASH_SIZE 20 /* SHA_1 digest length in bytes */
+#define SHA1_DIGEST_LENGTH 20 /* SHA1 digest length in bytes */
+#define SHA1_HMAC_BLOCK_SIZE 64 /* SHA1-HMAC block size */
+#define SHA1_HMAC_MIN_KEY_LEN 1 /* SHA1-HMAC min key length in bytes */
+#define SHA1_HMAC_MAX_KEY_LEN INT_MAX /* SHA1-HMAC max key length in bytes */
+#define SHA1_HMAC_INTS_PER_BLOCK (SHA1_HMAC_BLOCK_SIZE/sizeof (uint32_t))
+
+/*
+ * CSPI information (entry points, provider info, etc.)
+ */
+typedef enum sha1_mech_type {
+ SHA1_MECH_INFO_TYPE, /* SUN_CKM_SHA1 */
+ SHA1_HMAC_MECH_INFO_TYPE, /* SUN_CKM_SHA1_HMAC */
+ SHA1_HMAC_GEN_MECH_INFO_TYPE /* SUN_CKM_SHA1_HMAC_GENERAL */
+} sha1_mech_type_t;
+
+/*
+ * Context for SHA1 mechanism.
+ */
+typedef struct sha1_ctx {
+ sha1_mech_type_t sc_mech_type; /* type of context */
+ SHA1_CTX sc_sha1_ctx; /* SHA1 context */
+} sha1_ctx_t;
+
+/*
+ * Context for SHA1-HMAC and SHA1-HMAC-GENERAL mechanisms.
+ */
+typedef struct sha1_hmac_ctx {
+ sha1_mech_type_t hc_mech_type; /* type of context */
+ uint32_t hc_digest_len; /* digest len in bytes */
+ SHA1_CTX hc_icontext; /* inner SHA1 context */
+ SHA1_CTX hc_ocontext; /* outer SHA1 context */
+} sha1_hmac_ctx_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SHA1_IMPL_H */
diff --git a/module/icp/include/sha2/sha2.h b/module/icp/include/sha2/sha2.h
new file mode 100644
index 000000000..8e53987a7
--- /dev/null
+++ b/module/icp/include/sha2/sha2.h
@@ -0,0 +1,116 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/* Copyright 2013 Saso Kiselkov. All rights reserved. */
+
+#ifndef _SYS_SHA2_H
+#define _SYS_SHA2_H
+
+#include <sys/types.h> /* for uint_* */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SHA2_HMAC_MIN_KEY_LEN 1 /* SHA2-HMAC min key length in bytes */
+#define SHA2_HMAC_MAX_KEY_LEN INT_MAX /* SHA2-HMAC max key length in bytes */
+
+#define SHA256_DIGEST_LENGTH 32 /* SHA256 digest length in bytes */
+
+#define SHA256_HMAC_BLOCK_SIZE 64 /* SHA256-HMAC block size */
+
+#define SHA256 0
+#define SHA256_HMAC 1
+#define SHA256_HMAC_GEN 2
+
+/*
+ * SHA2 context.
+ * The contents of this structure are a private interface between the
+ * Init/Update/Final calls of the functions defined below.
+ * Callers must never attempt to read or write any of the fields
+ * in this structure directly.
+ */
+typedef struct {
+ uint32_t algotype; /* Algorithm Type */
+
+ /* state (ABCDEFGH) */
+ union {
+ uint32_t s32[8]; /* for SHA256 */
+ uint64_t s64[8]; /* for SHA384/512 */
+ } state;
+ /* number of bits */
+ union {
+ uint32_t c32[2]; /* for SHA256 , modulo 2^64 */
+ uint64_t c64[2]; /* for SHA384/512, modulo 2^128 */
+ } count;
+ union {
+ uint8_t buf8[128]; /* undigested input */
+ uint32_t buf32[32]; /* realigned input */
+ uint64_t buf64[16]; /* realigned input */
+ } buf_un;
+} SHA2_CTX;
+
+typedef SHA2_CTX SHA256_CTX;
+typedef SHA2_CTX SHA384_CTX;
+typedef SHA2_CTX SHA512_CTX;
+
+extern void SHA2Init(uint64_t mech, SHA2_CTX *);
+
+extern void SHA2Update(SHA2_CTX *, const void *, size_t);
+
+extern void SHA2Final(void *, SHA2_CTX *);
+
+extern void SHA256Init(SHA256_CTX *);
+
+extern void SHA256Update(SHA256_CTX *, const void *, size_t);
+
+extern void SHA256Final(void *, SHA256_CTX *);
+
+#ifdef _SHA2_IMPL
+/*
+ * The following types/functions are all private to the implementation
+ * of the SHA2 functions and must not be used by consumers of the interface
+ */
+
+/*
+ * List of support mechanisms in this module.
+ *
+ * It is important to note that in the module, division or modulus calculations
+ * are used on the enumerated type to determine which mechanism is being used;
+ * therefore, changing the order or additional mechanisms should be done
+ * carefully
+ */
+typedef enum sha2_mech_type {
+ SHA256_MECH_INFO_TYPE, /* SUN_CKM_SHA256 */
+ SHA256_HMAC_MECH_INFO_TYPE, /* SUN_CKM_SHA256_HMAC */
+ SHA256_HMAC_GEN_MECH_INFO_TYPE, /* SUN_CKM_SHA256_HMAC_GENERAL */
+} sha2_mech_type_t;
+
+#endif /* _SHA2_IMPL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SHA2_H */
diff --git a/module/icp/include/sha2/sha2_consts.h b/module/icp/include/sha2/sha2_consts.h
new file mode 100644
index 000000000..3a6645508
--- /dev/null
+++ b/module/icp/include/sha2/sha2_consts.h
@@ -0,0 +1,219 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_SHA2_CONSTS_H
+#define _SYS_SHA2_CONSTS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Loading 32-bit constants on a sparc is expensive since it involves both
+ * a `sethi' and an `or'. thus, we instead use `ld' to load the constants
+ * from an array called `sha2_consts'. however, on intel (and perhaps other
+ * processors), it is cheaper to load the constant directly. thus, the c
+ * code in SHA transform functions uses the macro SHA2_CONST() which either
+ * expands to a constant or an array reference, depending on
+ * the architecture the code is being compiled for.
+ *
+ * SHA512 constants are used for SHA384
+ */
+
+#include <sys/types.h> /* uint32_t */
+
+extern const uint32_t sha256_consts[];
+extern const uint64_t sha512_consts[];
+
+#if defined(__sparc)
+#define SHA256_CONST(x) (sha256_consts[x])
+#define SHA512_CONST(x) (sha512_consts[x])
+#else
+#define SHA256_CONST(x) (SHA256_CONST_ ## x)
+#define SHA512_CONST(x) (SHA512_CONST_ ## x)
+#endif
+
+/* constants, as provided in FIPS 180-2 */
+
+#define SHA256_CONST_0 0x428a2f98U
+#define SHA256_CONST_1 0x71374491U
+#define SHA256_CONST_2 0xb5c0fbcfU
+#define SHA256_CONST_3 0xe9b5dba5U
+#define SHA256_CONST_4 0x3956c25bU
+#define SHA256_CONST_5 0x59f111f1U
+#define SHA256_CONST_6 0x923f82a4U
+#define SHA256_CONST_7 0xab1c5ed5U
+
+#define SHA256_CONST_8 0xd807aa98U
+#define SHA256_CONST_9 0x12835b01U
+#define SHA256_CONST_10 0x243185beU
+#define SHA256_CONST_11 0x550c7dc3U
+#define SHA256_CONST_12 0x72be5d74U
+#define SHA256_CONST_13 0x80deb1feU
+#define SHA256_CONST_14 0x9bdc06a7U
+#define SHA256_CONST_15 0xc19bf174U
+
+#define SHA256_CONST_16 0xe49b69c1U
+#define SHA256_CONST_17 0xefbe4786U
+#define SHA256_CONST_18 0x0fc19dc6U
+#define SHA256_CONST_19 0x240ca1ccU
+#define SHA256_CONST_20 0x2de92c6fU
+#define SHA256_CONST_21 0x4a7484aaU
+#define SHA256_CONST_22 0x5cb0a9dcU
+#define SHA256_CONST_23 0x76f988daU
+
+#define SHA256_CONST_24 0x983e5152U
+#define SHA256_CONST_25 0xa831c66dU
+#define SHA256_CONST_26 0xb00327c8U
+#define SHA256_CONST_27 0xbf597fc7U
+#define SHA256_CONST_28 0xc6e00bf3U
+#define SHA256_CONST_29 0xd5a79147U
+#define SHA256_CONST_30 0x06ca6351U
+#define SHA256_CONST_31 0x14292967U
+
+#define SHA256_CONST_32 0x27b70a85U
+#define SHA256_CONST_33 0x2e1b2138U
+#define SHA256_CONST_34 0x4d2c6dfcU
+#define SHA256_CONST_35 0x53380d13U
+#define SHA256_CONST_36 0x650a7354U
+#define SHA256_CONST_37 0x766a0abbU
+#define SHA256_CONST_38 0x81c2c92eU
+#define SHA256_CONST_39 0x92722c85U
+
+#define SHA256_CONST_40 0xa2bfe8a1U
+#define SHA256_CONST_41 0xa81a664bU
+#define SHA256_CONST_42 0xc24b8b70U
+#define SHA256_CONST_43 0xc76c51a3U
+#define SHA256_CONST_44 0xd192e819U
+#define SHA256_CONST_45 0xd6990624U
+#define SHA256_CONST_46 0xf40e3585U
+#define SHA256_CONST_47 0x106aa070U
+
+#define SHA256_CONST_48 0x19a4c116U
+#define SHA256_CONST_49 0x1e376c08U
+#define SHA256_CONST_50 0x2748774cU
+#define SHA256_CONST_51 0x34b0bcb5U
+#define SHA256_CONST_52 0x391c0cb3U
+#define SHA256_CONST_53 0x4ed8aa4aU
+#define SHA256_CONST_54 0x5b9cca4fU
+#define SHA256_CONST_55 0x682e6ff3U
+
+#define SHA256_CONST_56 0x748f82eeU
+#define SHA256_CONST_57 0x78a5636fU
+#define SHA256_CONST_58 0x84c87814U
+#define SHA256_CONST_59 0x8cc70208U
+#define SHA256_CONST_60 0x90befffaU
+#define SHA256_CONST_61 0xa4506cebU
+#define SHA256_CONST_62 0xbef9a3f7U
+#define SHA256_CONST_63 0xc67178f2U
+
+#define SHA512_CONST_0 0x428a2f98d728ae22ULL
+#define SHA512_CONST_1 0x7137449123ef65cdULL
+#define SHA512_CONST_2 0xb5c0fbcfec4d3b2fULL
+#define SHA512_CONST_3 0xe9b5dba58189dbbcULL
+#define SHA512_CONST_4 0x3956c25bf348b538ULL
+#define SHA512_CONST_5 0x59f111f1b605d019ULL
+#define SHA512_CONST_6 0x923f82a4af194f9bULL
+#define SHA512_CONST_7 0xab1c5ed5da6d8118ULL
+#define SHA512_CONST_8 0xd807aa98a3030242ULL
+#define SHA512_CONST_9 0x12835b0145706fbeULL
+#define SHA512_CONST_10 0x243185be4ee4b28cULL
+#define SHA512_CONST_11 0x550c7dc3d5ffb4e2ULL
+#define SHA512_CONST_12 0x72be5d74f27b896fULL
+#define SHA512_CONST_13 0x80deb1fe3b1696b1ULL
+#define SHA512_CONST_14 0x9bdc06a725c71235ULL
+#define SHA512_CONST_15 0xc19bf174cf692694ULL
+#define SHA512_CONST_16 0xe49b69c19ef14ad2ULL
+#define SHA512_CONST_17 0xefbe4786384f25e3ULL
+#define SHA512_CONST_18 0x0fc19dc68b8cd5b5ULL
+#define SHA512_CONST_19 0x240ca1cc77ac9c65ULL
+#define SHA512_CONST_20 0x2de92c6f592b0275ULL
+#define SHA512_CONST_21 0x4a7484aa6ea6e483ULL
+#define SHA512_CONST_22 0x5cb0a9dcbd41fbd4ULL
+#define SHA512_CONST_23 0x76f988da831153b5ULL
+#define SHA512_CONST_24 0x983e5152ee66dfabULL
+#define SHA512_CONST_25 0xa831c66d2db43210ULL
+#define SHA512_CONST_26 0xb00327c898fb213fULL
+#define SHA512_CONST_27 0xbf597fc7beef0ee4ULL
+#define SHA512_CONST_28 0xc6e00bf33da88fc2ULL
+#define SHA512_CONST_29 0xd5a79147930aa725ULL
+#define SHA512_CONST_30 0x06ca6351e003826fULL
+#define SHA512_CONST_31 0x142929670a0e6e70ULL
+#define SHA512_CONST_32 0x27b70a8546d22ffcULL
+#define SHA512_CONST_33 0x2e1b21385c26c926ULL
+#define SHA512_CONST_34 0x4d2c6dfc5ac42aedULL
+#define SHA512_CONST_35 0x53380d139d95b3dfULL
+#define SHA512_CONST_36 0x650a73548baf63deULL
+#define SHA512_CONST_37 0x766a0abb3c77b2a8ULL
+#define SHA512_CONST_38 0x81c2c92e47edaee6ULL
+#define SHA512_CONST_39 0x92722c851482353bULL
+#define SHA512_CONST_40 0xa2bfe8a14cf10364ULL
+#define SHA512_CONST_41 0xa81a664bbc423001ULL
+#define SHA512_CONST_42 0xc24b8b70d0f89791ULL
+#define SHA512_CONST_43 0xc76c51a30654be30ULL
+#define SHA512_CONST_44 0xd192e819d6ef5218ULL
+#define SHA512_CONST_45 0xd69906245565a910ULL
+#define SHA512_CONST_46 0xf40e35855771202aULL
+#define SHA512_CONST_47 0x106aa07032bbd1b8ULL
+#define SHA512_CONST_48 0x19a4c116b8d2d0c8ULL
+#define SHA512_CONST_49 0x1e376c085141ab53ULL
+#define SHA512_CONST_50 0x2748774cdf8eeb99ULL
+#define SHA512_CONST_51 0x34b0bcb5e19b48a8ULL
+#define SHA512_CONST_52 0x391c0cb3c5c95a63ULL
+#define SHA512_CONST_53 0x4ed8aa4ae3418acbULL
+#define SHA512_CONST_54 0x5b9cca4f7763e373ULL
+#define SHA512_CONST_55 0x682e6ff3d6b2b8a3ULL
+#define SHA512_CONST_56 0x748f82ee5defb2fcULL
+#define SHA512_CONST_57 0x78a5636f43172f60ULL
+#define SHA512_CONST_58 0x84c87814a1f0ab72ULL
+#define SHA512_CONST_59 0x8cc702081a6439ecULL
+#define SHA512_CONST_60 0x90befffa23631e28ULL
+#define SHA512_CONST_61 0xa4506cebde82bde9ULL
+#define SHA512_CONST_62 0xbef9a3f7b2c67915ULL
+#define SHA512_CONST_63 0xc67178f2e372532bULL
+#define SHA512_CONST_64 0xca273eceea26619cULL
+#define SHA512_CONST_65 0xd186b8c721c0c207ULL
+#define SHA512_CONST_66 0xeada7dd6cde0eb1eULL
+#define SHA512_CONST_67 0xf57d4f7fee6ed178ULL
+#define SHA512_CONST_68 0x06f067aa72176fbaULL
+#define SHA512_CONST_69 0x0a637dc5a2c898a6ULL
+#define SHA512_CONST_70 0x113f9804bef90daeULL
+#define SHA512_CONST_71 0x1b710b35131c471bULL
+#define SHA512_CONST_72 0x28db77f523047d84ULL
+#define SHA512_CONST_73 0x32caab7b40c72493ULL
+#define SHA512_CONST_74 0x3c9ebe0a15c9bebcULL
+#define SHA512_CONST_75 0x431d67c49c100d4cULL
+#define SHA512_CONST_76 0x4cc5d4becb3e42b6ULL
+#define SHA512_CONST_77 0x597f299cfc657e2aULL
+#define SHA512_CONST_78 0x5fcb6fab3ad6faecULL
+#define SHA512_CONST_79 0x6c44198c4a475817ULL
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SHA2_CONSTS_H */
diff --git a/module/icp/include/sha2/sha2_impl.h b/module/icp/include/sha2/sha2_impl.h
new file mode 100644
index 000000000..bb42c3cd4
--- /dev/null
+++ b/module/icp/include/sha2/sha2_impl.h
@@ -0,0 +1,62 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SHA2_IMPL_H
+#define _SHA2_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ SHA1_TYPE,
+ SHA256_TYPE,
+ SHA384_TYPE,
+ SHA512_TYPE
+} sha2_mech_t;
+
+/*
+ * Context for SHA2 mechanism.
+ */
+typedef struct sha2_ctx {
+ sha2_mech_type_t sc_mech_type; /* type of context */
+ SHA2_CTX sc_sha2_ctx; /* SHA2 context */
+} sha2_ctx_t;
+
+/*
+ * Context for SHA2 HMAC and HMAC GENERAL mechanisms.
+ */
+typedef struct sha2_hmac_ctx {
+ sha2_mech_type_t hc_mech_type; /* type of context */
+ uint32_t hc_digest_len; /* digest len in bytes */
+ SHA2_CTX hc_icontext; /* inner SHA2 context */
+ SHA2_CTX hc_ocontext; /* outer SHA2 context */
+} sha2_hmac_ctx_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SHA2_IMPL_H */
diff --git a/module/icp/include/sys/asm_linkage.h b/module/icp/include/sys/asm_linkage.h
new file mode 100644
index 000000000..380597857
--- /dev/null
+++ b/module/icp/include/sys/asm_linkage.h
@@ -0,0 +1,36 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_ASM_LINKAGE_H
+#define _SYS_ASM_LINKAGE_H
+
+#if defined(__i386) || defined(__amd64)
+
+#include <sys/ia32/asm_linkage.h> /* XX64 x86/sys/asm_linkage.h */
+
+#endif
+
+#endif /* _SYS_ASM_LINKAGE_H */
diff --git a/module/icp/include/sys/bitmap.h b/module/icp/include/sys/bitmap.h
new file mode 100644
index 000000000..b1f6823e6
--- /dev/null
+++ b/module/icp/include/sys/bitmap.h
@@ -0,0 +1,183 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
+/* All Rights Reserved */
+
+
+#ifndef _SYS_BITMAP_H
+#define _SYS_BITMAP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__GNUC__) && defined(_ASM_INLINES) && \
+ (defined(__i386) || defined(__amd64))
+#include <asm/bitmap.h>
+#endif
+
+/*
+ * Operations on bitmaps of arbitrary size
+ * A bitmap is a vector of 1 or more ulong_t's.
+ * The user of the package is responsible for range checks and keeping
+ * track of sizes.
+ */
+
+#ifdef _LP64
+#define BT_ULSHIFT 6 /* log base 2 of BT_NBIPUL, to extract word index */
+#define BT_ULSHIFT32 5 /* log base 2 of BT_NBIPUL, to extract word index */
+#else
+#define BT_ULSHIFT 5 /* log base 2 of BT_NBIPUL, to extract word index */
+#endif
+
+#define BT_NBIPUL (1 << BT_ULSHIFT) /* n bits per ulong_t */
+#define BT_ULMASK (BT_NBIPUL - 1) /* to extract bit index */
+
+#ifdef _LP64
+#define BT_NBIPUL32 (1 << BT_ULSHIFT32) /* n bits per ulong_t */
+#define BT_ULMASK32 (BT_NBIPUL32 - 1) /* to extract bit index */
+#define BT_ULMAXMASK 0xffffffffffffffff /* used by bt_getlowbit */
+#else
+#define BT_ULMAXMASK 0xffffffff
+#endif
+
+/*
+ * bitmap is a ulong_t *, bitindex an index_t
+ *
+ * The macros BT_WIM and BT_BIW internal; there is no need
+ * for users of this package to use them.
+ */
+
+/*
+ * word in map
+ */
+#define BT_WIM(bitmap, bitindex) \
+ ((bitmap)[(bitindex) >> BT_ULSHIFT])
+/*
+ * bit in word
+ */
+#define BT_BIW(bitindex) \
+ (1UL << ((bitindex) & BT_ULMASK))
+
+#ifdef _LP64
+#define BT_WIM32(bitmap, bitindex) \
+ ((bitmap)[(bitindex) >> BT_ULSHIFT32])
+
+#define BT_BIW32(bitindex) \
+ (1UL << ((bitindex) & BT_ULMASK32))
+#endif
+
+/*
+ * These are public macros
+ *
+ * BT_BITOUL == n bits to n ulong_t's
+ */
+#define BT_BITOUL(nbits) \
+ (((nbits) + BT_NBIPUL - 1l) / BT_NBIPUL)
+#define BT_SIZEOFMAP(nbits) \
+ (BT_BITOUL(nbits) * sizeof (ulong_t))
+#define BT_TEST(bitmap, bitindex) \
+ ((BT_WIM((bitmap), (bitindex)) & BT_BIW(bitindex)) ? 1 : 0)
+#define BT_SET(bitmap, bitindex) \
+ { BT_WIM((bitmap), (bitindex)) |= BT_BIW(bitindex); }
+#define BT_CLEAR(bitmap, bitindex) \
+ { BT_WIM((bitmap), (bitindex)) &= ~BT_BIW(bitindex); }
+
+#ifdef _LP64
+#define BT_BITOUL32(nbits) \
+ (((nbits) + BT_NBIPUL32 - 1l) / BT_NBIPUL32)
+#define BT_SIZEOFMAP32(nbits) \
+ (BT_BITOUL32(nbits) * sizeof (uint_t))
+#define BT_TEST32(bitmap, bitindex) \
+ ((BT_WIM32((bitmap), (bitindex)) & BT_BIW32(bitindex)) ? 1 : 0)
+#define BT_SET32(bitmap, bitindex) \
+ { BT_WIM32((bitmap), (bitindex)) |= BT_BIW32(bitindex); }
+#define BT_CLEAR32(bitmap, bitindex) \
+ { BT_WIM32((bitmap), (bitindex)) &= ~BT_BIW32(bitindex); }
+#endif /* _LP64 */
+
+
+/*
+ * BIT_ONLYONESET is a private macro not designed for bitmaps of
+ * arbitrary size. u must be an unsigned integer/long. It returns
+ * true if one and only one bit is set in u.
+ */
+#define BIT_ONLYONESET(u) \
+ ((((u) == 0) ? 0 : ((u) & ((u) - 1)) == 0))
+
+#ifndef _ASM
+
+/*
+ * return next available bit index from map with specified number of bits
+ */
+extern index_t bt_availbit(ulong_t *bitmap, size_t nbits);
+/*
+ * find the highest order bit that is on, and is within or below
+ * the word specified by wx
+ */
+extern int bt_gethighbit(ulong_t *mapp, int wx);
+extern int bt_range(ulong_t *bitmap, size_t *pos1, size_t *pos2,
+ size_t end_pos);
+extern int bt_getlowbit(ulong_t *bitmap, size_t start, size_t stop);
+extern void bt_copy(ulong_t *, ulong_t *, ulong_t);
+
+/*
+ * find the parity
+ */
+extern int odd_parity(ulong_t);
+
+/*
+ * Atomically set/clear bits
+ * Atomic exclusive operations will set "result" to "-1"
+ * if the bit is already set/cleared. "result" will be set
+ * to 0 otherwise.
+ */
+#define BT_ATOMIC_SET(bitmap, bitindex) \
+ { atomic_or_long(&(BT_WIM(bitmap, bitindex)), BT_BIW(bitindex)); }
+#define BT_ATOMIC_CLEAR(bitmap, bitindex) \
+ { atomic_and_long(&(BT_WIM(bitmap, bitindex)), ~BT_BIW(bitindex)); }
+
+#define BT_ATOMIC_SET_EXCL(bitmap, bitindex, result) \
+ { result = atomic_set_long_excl(&(BT_WIM(bitmap, bitindex)), \
+ (bitindex) % BT_NBIPUL); }
+#define BT_ATOMIC_CLEAR_EXCL(bitmap, bitindex, result) \
+ { result = atomic_clear_long_excl(&(BT_WIM(bitmap, bitindex)), \
+ (bitindex) % BT_NBIPUL); }
+
+/*
+ * Extracts bits between index h (high, inclusive) and l (low, exclusive) from
+ * u, which must be an unsigned integer.
+ */
+#define BITX(u, h, l) (((u) >> (l)) & ((1LU << ((h) - (l) + 1LU)) - 1LU))
+
+#endif /* _ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_BITMAP_H */
diff --git a/module/icp/include/sys/crypto/elfsign.h b/module/icp/include/sys/crypto/elfsign.h
new file mode 100644
index 000000000..5432f0c8d
--- /dev/null
+++ b/module/icp/include/sys/crypto/elfsign.h
@@ -0,0 +1,137 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CRYPTO_ELFSIGN_H
+#define _SYS_CRYPTO_ELFSIGN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Consolidation Private Interface for elfsign/libpkcs11/kcfd
+ */
+
+#include <sys/zfs_context.h>
+
+/*
+ * Project Private structures and types used for communication between kcfd
+ * and KCF over the door.
+ */
+
+typedef enum ELFsign_status_e {
+ ELFSIGN_UNKNOWN,
+ ELFSIGN_SUCCESS,
+ ELFSIGN_FAILED,
+ ELFSIGN_NOTSIGNED,
+ ELFSIGN_INVALID_CERTPATH,
+ ELFSIGN_INVALID_ELFOBJ,
+ ELFSIGN_RESTRICTED
+} ELFsign_status_t;
+
+#define KCF_KCFD_VERSION1 1
+#define SIG_MAX_LENGTH 1024
+
+#define ELF_SIGNATURE_SECTION ".SUNW_signature"
+
+typedef struct kcf_door_arg_s {
+ short da_version;
+ boolean_t da_iskernel;
+
+ union {
+ char filename[MAXPATHLEN]; /* For request */
+
+ struct kcf_door_result_s { /* For response */
+ ELFsign_status_t status;
+ uint32_t siglen;
+ uchar_t signature[1];
+ } result;
+ } da_u;
+} kcf_door_arg_t;
+
+typedef uint32_t filesig_vers_t;
+
+/*
+ * File Signature Structure
+ * Applicable to ELF and other file formats
+ */
+struct filesignatures {
+ uint32_t filesig_cnt; /* count of signatures */
+ uint32_t filesig_pad; /* unused */
+ union {
+ char filesig_data[1];
+ struct filesig { /* one of these for each signature */
+ uint32_t filesig_size;
+ filesig_vers_t filesig_version;
+ union {
+ struct filesig_version1 {
+ uint32_t filesig_v1_dnsize;
+ uint32_t filesig_v1_sigsize;
+ uint32_t filesig_v1_oidsize;
+ char filesig_v1_data[1];
+ } filesig_v1;
+ struct filesig_version3 {
+ uint64_t filesig_v3_time;
+ uint32_t filesig_v3_dnsize;
+ uint32_t filesig_v3_sigsize;
+ uint32_t filesig_v3_oidsize;
+ char filesig_v3_data[1];
+ } filesig_v3;
+ } _u2;
+ } filesig_sig;
+ uint64_t filesig_align;
+ } _u1;
+};
+#define filesig_sig _u1.filesig_sig
+
+#define filesig_v1_dnsize _u2.filesig_v1.filesig_v1_dnsize
+#define filesig_v1_sigsize _u2.filesig_v1.filesig_v1_sigsize
+#define filesig_v1_oidsize _u2.filesig_v1.filesig_v1_oidsize
+#define filesig_v1_data _u2.filesig_v1.filesig_v1_data
+
+#define filesig_v3_time _u2.filesig_v3.filesig_v3_time
+#define filesig_v3_dnsize _u2.filesig_v3.filesig_v3_dnsize
+#define filesig_v3_sigsize _u2.filesig_v3.filesig_v3_sigsize
+#define filesig_v3_oidsize _u2.filesig_v3.filesig_v3_oidsize
+#define filesig_v3_data _u2.filesig_v3.filesig_v3_data
+
+#define filesig_ALIGN(s) (((s) + sizeof (uint64_t) - 1) & \
+ (-sizeof (uint64_t)))
+#define filesig_next(ptr) (struct filesig *)((void *)((char *)(ptr) + \
+ filesig_ALIGN((ptr)->filesig_size)))
+
+#define FILESIG_UNKNOWN 0 /* unrecognized version */
+#define FILESIG_VERSION1 1 /* version1, all but sig section */
+#define FILESIG_VERSION2 2 /* version1 format, SHF_ALLOC only */
+#define FILESIG_VERSION3 3 /* version3, all but sig section */
+#define FILESIG_VERSION4 4 /* version3 format, SHF_ALLOC only */
+
+#define _PATH_KCFD_DOOR "/etc/svc/volatile/kcfd_door"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CRYPTO_ELFSIGN_H */
diff --git a/module/icp/include/sys/crypto/impl.h b/module/icp/include/sys/crypto/impl.h
new file mode 100644
index 000000000..6d8ea8d65
--- /dev/null
+++ b/module/icp/include/sys/crypto/impl.h
@@ -0,0 +1,1370 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CRYPTO_IMPL_H
+#define _SYS_CRYPTO_IMPL_H
+
+/*
+ * Kernel Cryptographic Framework private implementation definitions.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/ioctl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define KCF_MODULE "kcf"
+
+/*
+ * Prefixes convention: structures internal to the kernel cryptographic
+ * framework start with 'kcf_'. Exposed structure start with 'crypto_'.
+ */
+
+/* Provider stats. Not protected. */
+typedef struct kcf_prov_stats {
+ kstat_named_t ps_ops_total;
+ kstat_named_t ps_ops_passed;
+ kstat_named_t ps_ops_failed;
+ kstat_named_t ps_ops_busy_rval;
+} kcf_prov_stats_t;
+
+/* Various kcf stats. Not protected. */
+typedef struct kcf_stats {
+ kstat_named_t ks_thrs_in_pool;
+ kstat_named_t ks_idle_thrs;
+ kstat_named_t ks_minthrs;
+ kstat_named_t ks_maxthrs;
+ kstat_named_t ks_swq_njobs;
+ kstat_named_t ks_swq_maxjobs;
+ kstat_named_t ks_taskq_threads;
+ kstat_named_t ks_taskq_minalloc;
+ kstat_named_t ks_taskq_maxalloc;
+} kcf_stats_t;
+
+/*
+ * Keep all the information needed by the scheduler from
+ * this provider.
+ */
+typedef struct kcf_sched_info {
+ /* The number of operations dispatched. */
+ uint64_t ks_ndispatches;
+
+ /* The number of operations that failed. */
+ uint64_t ks_nfails;
+
+ /* The number of operations that returned CRYPTO_BUSY. */
+ uint64_t ks_nbusy_rval;
+
+ /* taskq used to dispatch crypto requests */
+ taskq_t *ks_taskq;
+} kcf_sched_info_t;
+
+/*
+ * pd_irefcnt approximates the number of inflight requests to the
+ * provider. Though we increment this counter during registration for
+ * other purposes, that base value is mostly same across all providers.
+ * So, it is a good measure of the load on a provider when it is not
+ * in a busy state. Once a provider notifies it is busy, requests
+ * backup in the taskq. So, we use tq_nalloc in that case which gives
+ * the number of task entries in the task queue. Note that we do not
+ * acquire any locks here as it is not critical to get the exact number
+ * and the lock contention may be too costly for this code path.
+ */
+#define KCF_PROV_LOAD(pd) ((pd)->pd_state != KCF_PROV_BUSY ? \
+ (pd)->pd_irefcnt : (pd)->pd_sched_info.ks_taskq->tq_nalloc)
+
+#define KCF_PROV_INCRSTATS(pd, error) { \
+ (pd)->pd_sched_info.ks_ndispatches++; \
+ if (error == CRYPTO_BUSY) \
+ (pd)->pd_sched_info.ks_nbusy_rval++; \
+ else if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED) \
+ (pd)->pd_sched_info.ks_nfails++; \
+}
+
+
+/*
+ * The following two macros should be
+ * #define KCF_OPS_CLASSSIZE (KCF_LAST_OPSCLASS - KCF_FIRST_OPSCLASS + 2)
+ * #define KCF_MAXMECHTAB KCF_MAXCIPHER
+ *
+ * However, doing that would involve reorganizing the header file a bit.
+ * When impl.h is broken up (bug# 4703218), this will be done. For now,
+ * we hardcode these values.
+ */
+#define KCF_OPS_CLASSSIZE 8
+#define KCF_MAXMECHTAB 32
+
+/*
+ * Valid values for the state of a provider. The order of
+ * the elements is important.
+ *
+ * Routines which get a provider or the list of providers
+ * should pick only those that are either in KCF_PROV_READY state
+ * or in KCF_PROV_BUSY state.
+ */
+typedef enum {
+ KCF_PROV_ALLOCATED = 1,
+ KCF_PROV_UNVERIFIED,
+ KCF_PROV_VERIFICATION_FAILED,
+ /*
+ * state < KCF_PROV_READY means the provider can not
+ * be used at all.
+ */
+ KCF_PROV_READY,
+ KCF_PROV_BUSY,
+ /*
+ * state > KCF_PROV_BUSY means the provider can not
+ * be used for new requests.
+ */
+ KCF_PROV_FAILED,
+ /*
+ * Threads setting the following two states should do so only
+ * if the current state < KCF_PROV_DISABLED.
+ */
+ KCF_PROV_DISABLED,
+ KCF_PROV_REMOVED,
+ KCF_PROV_FREED
+} kcf_prov_state_t;
+
+#define KCF_IS_PROV_UNVERIFIED(pd) ((pd)->pd_state == KCF_PROV_UNVERIFIED)
+#define KCF_IS_PROV_USABLE(pd) ((pd)->pd_state == KCF_PROV_READY || \
+ (pd)->pd_state == KCF_PROV_BUSY)
+#define KCF_IS_PROV_REMOVED(pd) ((pd)->pd_state >= KCF_PROV_REMOVED)
+
+/* Internal flags valid for pd_flags field */
+#define KCF_PROV_RESTRICTED 0x40000000
+#define KCF_LPROV_MEMBER 0x80000000 /* is member of a logical provider */
+
+/*
+ * A provider descriptor structure. There is one such structure per
+ * provider. It is allocated and initialized at registration time and
+ * freed when the provider unregisters.
+ *
+ * pd_prov_type: Provider type, hardware or software
+ * pd_sid: Session ID of the provider used by kernel clients.
+ * This is valid only for session-oriented providers.
+ * pd_refcnt: Reference counter to this provider descriptor
+ * pd_irefcnt: References held by the framework internal structs
+ * pd_lock: lock protects pd_state and pd_provider_list
+ * pd_state: State value of the provider
+ * pd_provider_list: Used to cross-reference logical providers and their
+ * members. Not used for software providers.
+ * pd_resume_cv: cv to wait for state to change from KCF_PROV_BUSY
+ * pd_prov_handle: Provider handle specified by provider
+ * pd_ops_vector: The ops vector specified by Provider
+ * pd_mech_indx: Lookup table which maps a core framework mechanism
+ * number to an index in pd_mechanisms array
+ * pd_mechanisms: Array of mechanisms supported by the provider, specified
+ * by the provider during registration
+ * pd_sched_info: Scheduling information associated with the provider
+ * pd_mech_list_count: The number of entries in pi_mechanisms, specified
+ * by the provider during registration
+ * pd_name: Device name or module name
+ * pd_instance: Device instance
+ * pd_module_id: Module ID returned by modload
+ * pd_mctlp: Pointer to modctl structure for this provider
+ * pd_remove_cv: cv to wait on while the provider queue drains
+ * pd_description: Provider description string
+ * pd_flags bitwise OR of pi_flags from crypto_provider_info_t
+ * and other internal flags defined above.
+ * pd_hash_limit Maximum data size that hash mechanisms of this provider
+ * can support.
+ * pd_kcf_prov_handle: KCF-private handle assigned by KCF
+ * pd_prov_id: Identification # assigned by KCF to provider
+ * pd_kstat: kstat associated with the provider
+ * pd_ks_data: kstat data
+ */
+typedef struct kcf_provider_desc {
+ crypto_provider_type_t pd_prov_type;
+ crypto_session_id_t pd_sid;
+ uint_t pd_refcnt;
+ uint_t pd_irefcnt;
+ kmutex_t pd_lock;
+ kcf_prov_state_t pd_state;
+ struct kcf_provider_list *pd_provider_list;
+ kcondvar_t pd_resume_cv;
+ crypto_provider_handle_t pd_prov_handle;
+ crypto_ops_t *pd_ops_vector;
+ ushort_t pd_mech_indx[KCF_OPS_CLASSSIZE]\
+ [KCF_MAXMECHTAB];
+ crypto_mech_info_t *pd_mechanisms;
+ kcf_sched_info_t pd_sched_info;
+ uint_t pd_mech_list_count;
+ // char *pd_name;
+ // uint_t pd_instance;
+ // int pd_module_id;
+ // struct modctl *pd_mctlp;
+ kcondvar_t pd_remove_cv;
+ char *pd_description;
+ uint_t pd_flags;
+ uint_t pd_hash_limit;
+ crypto_kcf_provider_handle_t pd_kcf_prov_handle;
+ crypto_provider_id_t pd_prov_id;
+ kstat_t *pd_kstat;
+ kcf_prov_stats_t pd_ks_data;
+} kcf_provider_desc_t;
+
+/* useful for making a list of providers */
+typedef struct kcf_provider_list {
+ struct kcf_provider_list *pl_next;
+ struct kcf_provider_desc *pl_provider;
+} kcf_provider_list_t;
+
+/* atomic operations in linux implictly form a memory barrier */
+#define membar_exit()
+
+/*
+ * If a component has a reference to a kcf_provider_desc_t,
+ * it REFHOLD()s. A new provider descriptor which is referenced only
+ * by the providers table has a reference counter of one.
+ */
+#define KCF_PROV_REFHOLD(desc) { \
+ atomic_add_32(&(desc)->pd_refcnt, 1); \
+ ASSERT((desc)->pd_refcnt != 0); \
+}
+
+#define KCF_PROV_IREFHOLD(desc) { \
+ atomic_add_32(&(desc)->pd_irefcnt, 1); \
+ ASSERT((desc)->pd_irefcnt != 0); \
+}
+
+#define KCF_PROV_IREFRELE(desc) { \
+ ASSERT((desc)->pd_irefcnt != 0); \
+ membar_exit(); \
+ if (atomic_add_32_nv(&(desc)->pd_irefcnt, -1) == 0) { \
+ cv_broadcast(&(desc)->pd_remove_cv); \
+ } \
+}
+
+#define KCF_PROV_REFHELD(desc) ((desc)->pd_refcnt >= 1)
+
+#define KCF_PROV_REFRELE(desc) { \
+ ASSERT((desc)->pd_refcnt != 0); \
+ membar_exit(); \
+ if (atomic_add_32_nv(&(desc)->pd_refcnt, -1) == 0) { \
+ kcf_provider_zero_refcnt((desc)); \
+ } \
+}
+
+
+/* list of crypto_mech_info_t valid as the second mech in a dual operation */
+
+typedef struct crypto_mech_info_list {
+ struct crypto_mech_info_list *ml_next;
+ crypto_mech_type_t ml_kcf_mechid; /* KCF's id */
+ crypto_mech_info_t ml_mech_info;
+} crypto_mech_info_list_t;
+
+/*
+ * An element in a mechanism provider descriptors chain.
+ * The kcf_prov_mech_desc_t is duplicated in every chain the provider belongs
+ * to. This is a small tradeoff memory vs mutex spinning time to access the
+ * common provider field.
+ */
+
+typedef struct kcf_prov_mech_desc {
+ struct kcf_mech_entry *pm_me; /* Back to the head */
+ struct kcf_prov_mech_desc *pm_next; /* Next in the chain */
+ crypto_mech_info_t pm_mech_info; /* Provider mech info */
+ crypto_mech_info_list_t *pm_mi_list; /* list for duals */
+ kcf_provider_desc_t *pm_prov_desc; /* Common desc. */
+} kcf_prov_mech_desc_t;
+
+/* and the notation shortcuts ... */
+#define pm_provider_type pm_prov_desc.pd_provider_type
+#define pm_provider_handle pm_prov_desc.pd_provider_handle
+#define pm_ops_vector pm_prov_desc.pd_ops_vector
+
+
+#define KCF_CPU_PAD (128 - sizeof (crypto_mech_name_t) - \
+ sizeof (crypto_mech_type_t) - \
+ sizeof (kmutex_t) - 2 * sizeof (kcf_prov_mech_desc_t *) - \
+ sizeof (int) - sizeof (uint32_t) - sizeof (size_t))
+
+/*
+ * A mechanism entry in an xxx_mech_tab[]. KCF_CPU_PAD needs
+ * to be adjusted if this structure is changed.
+ */
+typedef struct kcf_mech_entry {
+ crypto_mech_name_t me_name; /* mechanism name */
+ crypto_mech_type_t me_mechid; /* Internal id for mechanism */
+ kmutex_t me_mutex; /* access protection */
+ kcf_prov_mech_desc_t *me_hw_prov_chain; /* list of HW providers */
+ kcf_prov_mech_desc_t *me_sw_prov; /* SW provider */
+ /*
+ * Number of HW providers in the chain. There is only one
+ * SW provider. So, we need only a count of HW providers.
+ */
+ int me_num_hwprov;
+ /*
+ * When a SW provider is present, this is the generation number that
+ * ensures no objects from old SW providers are used in the new one
+ */
+ uint32_t me_gen_swprov;
+ /*
+ * threshold for using hardware providers for this mech
+ */
+ size_t me_threshold;
+ uint8_t me_pad[KCF_CPU_PAD];
+} kcf_mech_entry_t;
+
+/*
+ * A policy descriptor structure. It is allocated and initialized
+ * when administrative ioctls load disabled mechanisms.
+ *
+ * pd_prov_type: Provider type, hardware or software
+ * pd_name: Device name or module name.
+ * pd_instance: Device instance.
+ * pd_refcnt: Reference counter for this policy descriptor
+ * pd_mutex: Protects array and count of disabled mechanisms.
+ * pd_disabled_count: Count of disabled mechanisms.
+ * pd_disabled_mechs: Array of disabled mechanisms.
+ */
+typedef struct kcf_policy_desc {
+ crypto_provider_type_t pd_prov_type;
+ char *pd_name;
+ uint_t pd_instance;
+ uint_t pd_refcnt;
+ kmutex_t pd_mutex;
+ uint_t pd_disabled_count;
+ crypto_mech_name_t *pd_disabled_mechs;
+} kcf_policy_desc_t;
+
+/*
+ * If a component has a reference to a kcf_policy_desc_t,
+ * it REFHOLD()s. A new policy descriptor which is referenced only
+ * by the policy table has a reference count of one.
+ */
+#define KCF_POLICY_REFHOLD(desc) { \
+ atomic_add_32(&(desc)->pd_refcnt, 1); \
+ ASSERT((desc)->pd_refcnt != 0); \
+}
+
+/*
+ * Releases a reference to a policy descriptor. When the last
+ * reference is released, the descriptor is freed.
+ */
+#define KCF_POLICY_REFRELE(desc) { \
+ ASSERT((desc)->pd_refcnt != 0); \
+ membar_exit(); \
+ if (atomic_add_32_nv(&(desc)->pd_refcnt, -1) == 0) \
+ kcf_policy_free_desc(desc); \
+}
+
+/*
+ * This entry stores the name of a software module and its
+ * mechanisms. The mechanisms are 'hints' that are used to
+ * trigger loading of the module.
+ */
+typedef struct kcf_soft_conf_entry {
+ struct kcf_soft_conf_entry *ce_next;
+ char *ce_name;
+ crypto_mech_name_t *ce_mechs;
+ uint_t ce_count;
+} kcf_soft_conf_entry_t;
+
+extern kmutex_t soft_config_mutex;
+extern kcf_soft_conf_entry_t *soft_config_list;
+
+/*
+ * Global tables. The sizes are from the predefined PKCS#11 v2.20 mechanisms,
+ * with a margin of few extra empty entry points
+ */
+
+#define KCF_MAXDIGEST 16 /* Digests */
+#define KCF_MAXCIPHER 64 /* Ciphers */
+#define KCF_MAXMAC 40 /* Message authentication codes */
+#define KCF_MAXSIGN 24 /* Sign/Verify */
+#define KCF_MAXKEYOPS 116 /* Key generation and derivation */
+#define KCF_MAXMISC 16 /* Others ... */
+
+#define KCF_MAXMECHS KCF_MAXDIGEST + KCF_MAXCIPHER + KCF_MAXMAC + \
+ KCF_MAXSIGN + KCF_MAXKEYOPS + \
+ KCF_MAXMISC
+
+extern kcf_mech_entry_t kcf_digest_mechs_tab[];
+extern kcf_mech_entry_t kcf_cipher_mechs_tab[];
+extern kcf_mech_entry_t kcf_mac_mechs_tab[];
+extern kcf_mech_entry_t kcf_sign_mechs_tab[];
+extern kcf_mech_entry_t kcf_keyops_mechs_tab[];
+extern kcf_mech_entry_t kcf_misc_mechs_tab[];
+
+extern kmutex_t kcf_mech_tabs_lock;
+
+typedef enum {
+ KCF_DIGEST_CLASS = 1,
+ KCF_CIPHER_CLASS,
+ KCF_MAC_CLASS,
+ KCF_SIGN_CLASS,
+ KCF_KEYOPS_CLASS,
+ KCF_MISC_CLASS
+} kcf_ops_class_t;
+
+#define KCF_FIRST_OPSCLASS KCF_DIGEST_CLASS
+#define KCF_LAST_OPSCLASS KCF_MISC_CLASS
+
+/* The table of all the kcf_xxx_mech_tab[]s, indexed by kcf_ops_class */
+
+typedef struct kcf_mech_entry_tab {
+ int met_size; /* Size of the met_tab[] */
+ kcf_mech_entry_t *met_tab; /* the table */
+} kcf_mech_entry_tab_t;
+
+extern kcf_mech_entry_tab_t kcf_mech_tabs_tab[];
+
+#define KCF_MECHID(class, index) \
+ (((crypto_mech_type_t)(class) << 32) | (crypto_mech_type_t)(index))
+
+#define KCF_MECH2CLASS(mech_type) ((kcf_ops_class_t)((mech_type) >> 32))
+
+#define KCF_MECH2INDEX(mech_type) ((int)(mech_type))
+
+#define KCF_TO_PROV_MECH_INDX(pd, mech_type) \
+ ((pd)->pd_mech_indx[KCF_MECH2CLASS(mech_type)] \
+ [KCF_MECH2INDEX(mech_type)])
+
+#define KCF_TO_PROV_MECHINFO(pd, mech_type) \
+ ((pd)->pd_mechanisms[KCF_TO_PROV_MECH_INDX(pd, mech_type)])
+
+#define KCF_TO_PROV_MECHNUM(pd, mech_type) \
+ (KCF_TO_PROV_MECHINFO(pd, mech_type).cm_mech_number)
+
+#define KCF_CAN_SHARE_OPSTATE(pd, mech_type) \
+ ((KCF_TO_PROV_MECHINFO(pd, mech_type).cm_mech_flags) & \
+ CRYPTO_CAN_SHARE_OPSTATE)
+
+/* ps_refcnt is protected by cm_lock in the crypto_minor structure */
+typedef struct crypto_provider_session {
+ struct crypto_provider_session *ps_next;
+ crypto_session_id_t ps_session;
+ kcf_provider_desc_t *ps_provider;
+ kcf_provider_desc_t *ps_real_provider;
+ uint_t ps_refcnt;
+} crypto_provider_session_t;
+
+typedef struct crypto_session_data {
+ kmutex_t sd_lock;
+ kcondvar_t sd_cv;
+ uint32_t sd_flags;
+ int sd_pre_approved_amount;
+ crypto_ctx_t *sd_digest_ctx;
+ crypto_ctx_t *sd_encr_ctx;
+ crypto_ctx_t *sd_decr_ctx;
+ crypto_ctx_t *sd_sign_ctx;
+ crypto_ctx_t *sd_verify_ctx;
+ crypto_ctx_t *sd_sign_recover_ctx;
+ crypto_ctx_t *sd_verify_recover_ctx;
+ kcf_provider_desc_t *sd_provider;
+ void *sd_find_init_cookie;
+ crypto_provider_session_t *sd_provider_session;
+} crypto_session_data_t;
+
+#define CRYPTO_SESSION_IN_USE 0x00000001
+#define CRYPTO_SESSION_IS_BUSY 0x00000002
+#define CRYPTO_SESSION_IS_CLOSED 0x00000004
+
+#define KCF_MAX_PIN_LEN 1024
+
+/*
+ * Per-minor info.
+ *
+ * cm_lock protects everything in this structure except for cm_refcnt.
+ */
+typedef struct crypto_minor {
+ uint_t cm_refcnt;
+ kmutex_t cm_lock;
+ kcondvar_t cm_cv;
+ crypto_session_data_t **cm_session_table;
+ uint_t cm_session_table_count;
+ kcf_provider_desc_t **cm_provider_array;
+ uint_t cm_provider_count;
+ crypto_provider_session_t *cm_provider_session;
+} crypto_minor_t;
+
+/*
+ * Return codes for internal functions
+ */
+#define KCF_SUCCESS 0x0 /* Successful call */
+#define KCF_INVALID_MECH_NUMBER 0x1 /* invalid mechanism number */
+#define KCF_INVALID_MECH_NAME 0x2 /* invalid mechanism name */
+#define KCF_INVALID_MECH_CLASS 0x3 /* invalid mechanism class */
+#define KCF_MECH_TAB_FULL 0x4 /* Need more room in the mech tabs. */
+#define KCF_INVALID_INDX ((ushort_t)-1)
+
+/*
+ * kCF internal mechanism and function group for tracking RNG providers.
+ */
+#define SUN_RANDOM "random"
+#define CRYPTO_FG_RANDOM 0x80000000 /* generate_random() */
+
+/*
+ * Wrappers for ops vectors. In the wrapper definitions below, the pd
+ * argument always corresponds to a pointer to a provider descriptor
+ * of type kcf_prov_desc_t.
+ */
+
+#define KCF_PROV_CONTROL_OPS(pd) ((pd)->pd_ops_vector->co_control_ops)
+#define KCF_PROV_CTX_OPS(pd) ((pd)->pd_ops_vector->co_ctx_ops)
+#define KCF_PROV_DIGEST_OPS(pd) ((pd)->pd_ops_vector->co_digest_ops)
+#define KCF_PROV_CIPHER_OPS(pd) ((pd)->pd_ops_vector->co_cipher_ops)
+#define KCF_PROV_MAC_OPS(pd) ((pd)->pd_ops_vector->co_mac_ops)
+#define KCF_PROV_SIGN_OPS(pd) ((pd)->pd_ops_vector->co_sign_ops)
+#define KCF_PROV_VERIFY_OPS(pd) ((pd)->pd_ops_vector->co_verify_ops)
+#define KCF_PROV_DUAL_OPS(pd) ((pd)->pd_ops_vector->co_dual_ops)
+#define KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) \
+ ((pd)->pd_ops_vector->co_dual_cipher_mac_ops)
+#define KCF_PROV_RANDOM_OPS(pd) ((pd)->pd_ops_vector->co_random_ops)
+#define KCF_PROV_SESSION_OPS(pd) ((pd)->pd_ops_vector->co_session_ops)
+#define KCF_PROV_OBJECT_OPS(pd) ((pd)->pd_ops_vector->co_object_ops)
+#define KCF_PROV_KEY_OPS(pd) ((pd)->pd_ops_vector->co_key_ops)
+#define KCF_PROV_PROVIDER_OPS(pd) ((pd)->pd_ops_vector->co_provider_ops)
+#define KCF_PROV_MECH_OPS(pd) ((pd)->pd_ops_vector->co_mech_ops)
+#define KCF_PROV_NOSTORE_KEY_OPS(pd) \
+ ((pd)->pd_ops_vector->co_nostore_key_ops)
+
+/*
+ * Wrappers for crypto_control_ops(9S) entry points.
+ */
+
+#define KCF_PROV_STATUS(pd, status) ( \
+ (KCF_PROV_CONTROL_OPS(pd) && \
+ KCF_PROV_CONTROL_OPS(pd)->provider_status) ? \
+ KCF_PROV_CONTROL_OPS(pd)->provider_status( \
+ (pd)->pd_prov_handle, status) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_ctx_ops(9S) entry points.
+ */
+
+#define KCF_PROV_CREATE_CTX_TEMPLATE(pd, mech, key, template, size, req) ( \
+ (KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->create_ctx_template) ? \
+ KCF_PROV_CTX_OPS(pd)->create_ctx_template( \
+ (pd)->pd_prov_handle, mech, key, template, size, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_FREE_CONTEXT(pd, ctx) ( \
+ (KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->free_context) ? \
+ KCF_PROV_CTX_OPS(pd)->free_context(ctx) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_COPYIN_MECH(pd, umech, kmech, errorp, mode) ( \
+ (KCF_PROV_MECH_OPS(pd) && KCF_PROV_MECH_OPS(pd)->copyin_mechanism) ? \
+ KCF_PROV_MECH_OPS(pd)->copyin_mechanism( \
+ (pd)->pd_prov_handle, umech, kmech, errorp, mode) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_COPYOUT_MECH(pd, kmech, umech, errorp, mode) ( \
+ (KCF_PROV_MECH_OPS(pd) && KCF_PROV_MECH_OPS(pd)->copyout_mechanism) ? \
+ KCF_PROV_MECH_OPS(pd)->copyout_mechanism( \
+ (pd)->pd_prov_handle, kmech, umech, errorp, mode) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_FREE_MECH(pd, prov_mech) ( \
+ (KCF_PROV_MECH_OPS(pd) && KCF_PROV_MECH_OPS(pd)->free_mechanism) ? \
+ KCF_PROV_MECH_OPS(pd)->free_mechanism( \
+ (pd)->pd_prov_handle, prov_mech) : CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_digest_ops(9S) entry points.
+ */
+
+#define KCF_PROV_DIGEST_INIT(pd, ctx, mech, req) ( \
+ (KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_init) ? \
+ KCF_PROV_DIGEST_OPS(pd)->digest_init(ctx, mech, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * The _ (underscore) in _digest is needed to avoid replacing the
+ * function digest().
+ */
+#define KCF_PROV_DIGEST(pd, ctx, data, _digest, req) ( \
+ (KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest) ? \
+ KCF_PROV_DIGEST_OPS(pd)->digest(ctx, data, _digest, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DIGEST_UPDATE(pd, ctx, data, req) ( \
+ (KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_update) ? \
+ KCF_PROV_DIGEST_OPS(pd)->digest_update(ctx, data, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DIGEST_KEY(pd, ctx, key, req) ( \
+ (KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_key) ? \
+ KCF_PROV_DIGEST_OPS(pd)->digest_key(ctx, key, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DIGEST_FINAL(pd, ctx, digest, req) ( \
+ (KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_final) ? \
+ KCF_PROV_DIGEST_OPS(pd)->digest_final(ctx, digest, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DIGEST_ATOMIC(pd, session, mech, data, digest, req) ( \
+ (KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_atomic) ? \
+ KCF_PROV_DIGEST_OPS(pd)->digest_atomic( \
+ (pd)->pd_prov_handle, session, mech, data, digest, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_cipher_ops(9S) entry points.
+ */
+
+#define KCF_PROV_ENCRYPT_INIT(pd, ctx, mech, key, template, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_init) ? \
+ KCF_PROV_CIPHER_OPS(pd)->encrypt_init(ctx, mech, key, template, \
+ req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_ENCRYPT(pd, ctx, plaintext, ciphertext, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt) ? \
+ KCF_PROV_CIPHER_OPS(pd)->encrypt(ctx, plaintext, ciphertext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_ENCRYPT_UPDATE(pd, ctx, plaintext, ciphertext, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_update) ? \
+ KCF_PROV_CIPHER_OPS(pd)->encrypt_update(ctx, plaintext, \
+ ciphertext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_ENCRYPT_FINAL(pd, ctx, ciphertext, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_final) ? \
+ KCF_PROV_CIPHER_OPS(pd)->encrypt_final(ctx, ciphertext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_ENCRYPT_ATOMIC(pd, session, mech, key, plaintext, ciphertext, \
+ template, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic) ? \
+ KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic( \
+ (pd)->pd_prov_handle, session, mech, key, plaintext, ciphertext, \
+ template, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DECRYPT_INIT(pd, ctx, mech, key, template, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_init) ? \
+ KCF_PROV_CIPHER_OPS(pd)->decrypt_init(ctx, mech, key, template, \
+ req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DECRYPT(pd, ctx, ciphertext, plaintext, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt) ? \
+ KCF_PROV_CIPHER_OPS(pd)->decrypt(ctx, ciphertext, plaintext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DECRYPT_UPDATE(pd, ctx, ciphertext, plaintext, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_update) ? \
+ KCF_PROV_CIPHER_OPS(pd)->decrypt_update(ctx, ciphertext, \
+ plaintext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DECRYPT_FINAL(pd, ctx, plaintext, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_final) ? \
+ KCF_PROV_CIPHER_OPS(pd)->decrypt_final(ctx, plaintext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DECRYPT_ATOMIC(pd, session, mech, key, ciphertext, plaintext, \
+ template, req) ( \
+ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_atomic) ? \
+ KCF_PROV_CIPHER_OPS(pd)->decrypt_atomic( \
+ (pd)->pd_prov_handle, session, mech, key, ciphertext, plaintext, \
+ template, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_mac_ops(9S) entry points.
+ */
+
+#define KCF_PROV_MAC_INIT(pd, ctx, mech, key, template, req) ( \
+ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_init) ? \
+ KCF_PROV_MAC_OPS(pd)->mac_init(ctx, mech, key, template, req) \
+ : CRYPTO_NOT_SUPPORTED)
+
+/*
+ * The _ (underscore) in _mac is needed to avoid replacing the
+ * function mac().
+ */
+#define KCF_PROV_MAC(pd, ctx, data, _mac, req) ( \
+ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac) ? \
+ KCF_PROV_MAC_OPS(pd)->mac(ctx, data, _mac, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_UPDATE(pd, ctx, data, req) ( \
+ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_update) ? \
+ KCF_PROV_MAC_OPS(pd)->mac_update(ctx, data, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_FINAL(pd, ctx, mac, req) ( \
+ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_final) ? \
+ KCF_PROV_MAC_OPS(pd)->mac_final(ctx, mac, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_ATOMIC(pd, session, mech, key, data, mac, template, \
+ req) ( \
+ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_atomic) ? \
+ KCF_PROV_MAC_OPS(pd)->mac_atomic( \
+ (pd)->pd_prov_handle, session, mech, key, data, mac, template, \
+ req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_VERIFY_ATOMIC(pd, session, mech, key, data, mac, \
+ template, req) ( \
+ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_verify_atomic) ? \
+ KCF_PROV_MAC_OPS(pd)->mac_verify_atomic( \
+ (pd)->pd_prov_handle, session, mech, key, data, mac, template, \
+ req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_sign_ops(9S) entry points.
+ */
+
+#define KCF_PROV_SIGN_INIT(pd, ctx, mech, key, template, req) ( \
+ (KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_init) ? \
+ KCF_PROV_SIGN_OPS(pd)->sign_init( \
+ ctx, mech, key, template, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SIGN(pd, ctx, data, sig, req) ( \
+ (KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign) ? \
+ KCF_PROV_SIGN_OPS(pd)->sign(ctx, data, sig, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SIGN_UPDATE(pd, ctx, data, req) ( \
+ (KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_update) ? \
+ KCF_PROV_SIGN_OPS(pd)->sign_update(ctx, data, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SIGN_FINAL(pd, ctx, sig, req) ( \
+ (KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_final) ? \
+ KCF_PROV_SIGN_OPS(pd)->sign_final(ctx, sig, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SIGN_ATOMIC(pd, session, mech, key, data, template, \
+ sig, req) ( \
+ (KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_atomic) ? \
+ KCF_PROV_SIGN_OPS(pd)->sign_atomic( \
+ (pd)->pd_prov_handle, session, mech, key, data, sig, template, \
+ req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SIGN_RECOVER_INIT(pd, ctx, mech, key, template, \
+ req) ( \
+ (KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_recover_init) ? \
+ KCF_PROV_SIGN_OPS(pd)->sign_recover_init(ctx, mech, key, template, \
+ req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SIGN_RECOVER(pd, ctx, data, sig, req) ( \
+ (KCF_PROV_SIGN_OPS(pd) && KCF_PROV_SIGN_OPS(pd)->sign_recover) ? \
+ KCF_PROV_SIGN_OPS(pd)->sign_recover(ctx, data, sig, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SIGN_RECOVER_ATOMIC(pd, session, mech, key, data, template, \
+ sig, req) ( \
+ (KCF_PROV_SIGN_OPS(pd) && \
+ KCF_PROV_SIGN_OPS(pd)->sign_recover_atomic) ? \
+ KCF_PROV_SIGN_OPS(pd)->sign_recover_atomic( \
+ (pd)->pd_prov_handle, session, mech, key, data, sig, template, \
+ req) : CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_verify_ops(9S) entry points.
+ */
+
+#define KCF_PROV_VERIFY_INIT(pd, ctx, mech, key, template, req) ( \
+ (KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_init) ? \
+ KCF_PROV_VERIFY_OPS(pd)->verify_init(ctx, mech, key, template, \
+ req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_VERIFY(pd, ctx, data, sig, req) ( \
+ (KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->do_verify) ? \
+ KCF_PROV_VERIFY_OPS(pd)->do_verify(ctx, data, sig, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_VERIFY_UPDATE(pd, ctx, data, req) ( \
+ (KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_update) ? \
+ KCF_PROV_VERIFY_OPS(pd)->verify_update(ctx, data, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_VERIFY_FINAL(pd, ctx, sig, req) ( \
+ (KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_final) ? \
+ KCF_PROV_VERIFY_OPS(pd)->verify_final(ctx, sig, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_VERIFY_ATOMIC(pd, session, mech, key, data, template, sig, \
+ req) ( \
+ (KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_atomic) ? \
+ KCF_PROV_VERIFY_OPS(pd)->verify_atomic( \
+ (pd)->pd_prov_handle, session, mech, key, data, sig, template, \
+ req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx, mech, key, template, \
+ req) ( \
+ (KCF_PROV_VERIFY_OPS(pd) && \
+ KCF_PROV_VERIFY_OPS(pd)->verify_recover_init) ? \
+ KCF_PROV_VERIFY_OPS(pd)->verify_recover_init(ctx, mech, key, \
+ template, req) : CRYPTO_NOT_SUPPORTED)
+
+/* verify_recover() CSPI routine has different argument order than verify() */
+#define KCF_PROV_VERIFY_RECOVER(pd, ctx, sig, data, req) ( \
+ (KCF_PROV_VERIFY_OPS(pd) && KCF_PROV_VERIFY_OPS(pd)->verify_recover) ? \
+ KCF_PROV_VERIFY_OPS(pd)->verify_recover(ctx, sig, data, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * verify_recover_atomic() CSPI routine has different argument order
+ * than verify_atomic().
+ */
+#define KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, session, mech, key, sig, \
+ template, data, req) ( \
+ (KCF_PROV_VERIFY_OPS(pd) && \
+ KCF_PROV_VERIFY_OPS(pd)->verify_recover_atomic) ? \
+ KCF_PROV_VERIFY_OPS(pd)->verify_recover_atomic( \
+ (pd)->pd_prov_handle, session, mech, key, sig, data, template, \
+ req) : CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_dual_ops(9S) entry points.
+ */
+
+#define KCF_PROV_DIGEST_ENCRYPT_UPDATE(digest_ctx, encrypt_ctx, plaintext, \
+ ciphertext, req) ( \
+ (KCF_PROV_DUAL_OPS(pd) && \
+ KCF_PROV_DUAL_OPS(pd)->digest_encrypt_update) ? \
+ KCF_PROV_DUAL_OPS(pd)->digest_encrypt_update( \
+ digest_ctx, encrypt_ctx, plaintext, ciphertext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DECRYPT_DIGEST_UPDATE(decrypt_ctx, digest_ctx, ciphertext, \
+ plaintext, req) ( \
+ (KCF_PROV_DUAL_OPS(pd) && \
+ KCF_PROV_DUAL_OPS(pd)->decrypt_digest_update) ? \
+ KCF_PROV_DUAL_OPS(pd)->decrypt_digest_update( \
+ decrypt_ctx, digest_ctx, ciphertext, plaintext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SIGN_ENCRYPT_UPDATE(sign_ctx, encrypt_ctx, plaintext, \
+ ciphertext, req) ( \
+ (KCF_PROV_DUAL_OPS(pd) && \
+ KCF_PROV_DUAL_OPS(pd)->sign_encrypt_update) ? \
+ KCF_PROV_DUAL_OPS(pd)->sign_encrypt_update( \
+ sign_ctx, encrypt_ctx, plaintext, ciphertext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_DECRYPT_VERIFY_UPDATE(decrypt_ctx, verify_ctx, ciphertext, \
+ plaintext, req) ( \
+ (KCF_PROV_DUAL_OPS(pd) && \
+ KCF_PROV_DUAL_OPS(pd)->decrypt_verify_update) ? \
+ KCF_PROV_DUAL_OPS(pd)->decrypt_verify_update( \
+ decrypt_ctx, verify_ctx, ciphertext, plaintext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_dual_cipher_mac_ops(9S) entry points.
+ */
+
+#define KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx, encr_mech, encr_key, mac_mech, \
+ mac_key, encr_ctx_template, mac_ctx_template, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_init) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_init( \
+ ctx, encr_mech, encr_key, mac_mech, mac_key, encr_ctx_template, \
+ mac_ctx_template, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_ENCRYPT_MAC(pd, ctx, plaintext, ciphertext, mac, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac( \
+ ctx, plaintext, ciphertext, mac, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx, plaintext, ciphertext, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_update) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_update( \
+ ctx, plaintext, ciphertext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx, ciphertext, mac, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_final) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_final( \
+ ctx, ciphertext, mac, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, session, encr_mech, encr_key, \
+ mac_mech, mac_key, plaintext, ciphertext, mac, \
+ encr_ctx_template, mac_ctx_template, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_atomic) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->encrypt_mac_atomic( \
+ (pd)->pd_prov_handle, session, encr_mech, encr_key, \
+ mac_mech, mac_key, plaintext, ciphertext, mac, \
+ encr_ctx_template, mac_ctx_template, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_DECRYPT_INIT(pd, ctx, mac_mech, mac_key, decr_mech, \
+ decr_key, mac_ctx_template, decr_ctx_template, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_init) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_init( \
+ ctx, mac_mech, mac_key, decr_mech, decr_key, mac_ctx_template, \
+ decr_ctx_template, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_DECRYPT(pd, ctx, ciphertext, mac, plaintext, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt( \
+ ctx, ciphertext, mac, plaintext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx, ciphertext, plaintext, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_update) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_update( \
+ ctx, ciphertext, plaintext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx, mac, plaintext, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_final) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_final( \
+ ctx, mac, plaintext, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_DECRYPT_ATOMIC(pd, session, mac_mech, mac_key, \
+ decr_mech, decr_key, ciphertext, mac, plaintext, \
+ mac_ctx_template, decr_ctx_template, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_atomic) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_decrypt_atomic( \
+ (pd)->pd_prov_handle, session, mac_mech, mac_key, \
+ decr_mech, decr_key, ciphertext, mac, plaintext, \
+ mac_ctx_template, decr_ctx_template, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd, session, mac_mech, mac_key, \
+ decr_mech, decr_key, ciphertext, mac, plaintext, \
+ mac_ctx_template, decr_ctx_template, req) ( \
+ (KCF_PROV_DUAL_CIPHER_MAC_OPS(pd) && \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_verify_decrypt_atomic \
+ != NULL) ? \
+ KCF_PROV_DUAL_CIPHER_MAC_OPS(pd)->mac_verify_decrypt_atomic( \
+ (pd)->pd_prov_handle, session, mac_mech, mac_key, \
+ decr_mech, decr_key, ciphertext, mac, plaintext, \
+ mac_ctx_template, decr_ctx_template, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_random_number_ops(9S) entry points.
+ */
+
+#define KCF_PROV_SEED_RANDOM(pd, session, buf, len, est, flags, req) ( \
+ (KCF_PROV_RANDOM_OPS(pd) && KCF_PROV_RANDOM_OPS(pd)->seed_random) ? \
+ KCF_PROV_RANDOM_OPS(pd)->seed_random((pd)->pd_prov_handle, \
+ session, buf, len, est, flags, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_GENERATE_RANDOM(pd, session, buf, len, req) ( \
+ (KCF_PROV_RANDOM_OPS(pd) && \
+ KCF_PROV_RANDOM_OPS(pd)->generate_random) ? \
+ KCF_PROV_RANDOM_OPS(pd)->generate_random((pd)->pd_prov_handle, \
+ session, buf, len, req) : CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_session_ops(9S) entry points.
+ *
+ * ops_pd is the provider descriptor that supplies the ops_vector.
+ * pd is the descriptor that supplies the provider handle.
+ * Only session open/close needs two handles.
+ */
+
+#define KCF_PROV_SESSION_OPEN(ops_pd, session, req, pd) ( \
+ (KCF_PROV_SESSION_OPS(ops_pd) && \
+ KCF_PROV_SESSION_OPS(ops_pd)->session_open) ? \
+ KCF_PROV_SESSION_OPS(ops_pd)->session_open((pd)->pd_prov_handle, \
+ session, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SESSION_CLOSE(ops_pd, session, req, pd) ( \
+ (KCF_PROV_SESSION_OPS(ops_pd) && \
+ KCF_PROV_SESSION_OPS(ops_pd)->session_close) ? \
+ KCF_PROV_SESSION_OPS(ops_pd)->session_close((pd)->pd_prov_handle, \
+ session, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SESSION_LOGIN(pd, session, user_type, pin, len, req) ( \
+ (KCF_PROV_SESSION_OPS(pd) && \
+ KCF_PROV_SESSION_OPS(pd)->session_login) ? \
+ KCF_PROV_SESSION_OPS(pd)->session_login((pd)->pd_prov_handle, \
+ session, user_type, pin, len, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SESSION_LOGOUT(pd, session, req) ( \
+ (KCF_PROV_SESSION_OPS(pd) && \
+ KCF_PROV_SESSION_OPS(pd)->session_logout) ? \
+ KCF_PROV_SESSION_OPS(pd)->session_logout((pd)->pd_prov_handle, \
+ session, req) : CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_object_ops(9S) entry points.
+ */
+
+#define KCF_PROV_OBJECT_CREATE(pd, session, template, count, object, req) ( \
+ (KCF_PROV_OBJECT_OPS(pd) && KCF_PROV_OBJECT_OPS(pd)->object_create) ? \
+ KCF_PROV_OBJECT_OPS(pd)->object_create((pd)->pd_prov_handle, \
+ session, template, count, object, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_OBJECT_COPY(pd, session, object, template, count, \
+ new_object, req) ( \
+ (KCF_PROV_OBJECT_OPS(pd) && KCF_PROV_OBJECT_OPS(pd)->object_copy) ? \
+ KCF_PROV_OBJECT_OPS(pd)->object_copy((pd)->pd_prov_handle, \
+ session, object, template, count, new_object, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_OBJECT_DESTROY(pd, session, object, req) ( \
+ (KCF_PROV_OBJECT_OPS(pd) && KCF_PROV_OBJECT_OPS(pd)->object_destroy) ? \
+ KCF_PROV_OBJECT_OPS(pd)->object_destroy((pd)->pd_prov_handle, \
+ session, object, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_OBJECT_GET_SIZE(pd, session, object, size, req) ( \
+ (KCF_PROV_OBJECT_OPS(pd) && \
+ KCF_PROV_OBJECT_OPS(pd)->object_get_size) ? \
+ KCF_PROV_OBJECT_OPS(pd)->object_get_size((pd)->pd_prov_handle, \
+ session, object, size, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd, session, object, template, \
+ count, req) ( \
+ (KCF_PROV_OBJECT_OPS(pd) && \
+ KCF_PROV_OBJECT_OPS(pd)->object_get_attribute_value) ? \
+ KCF_PROV_OBJECT_OPS(pd)->object_get_attribute_value( \
+ (pd)->pd_prov_handle, session, object, template, count, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd, session, object, template, \
+ count, req) ( \
+ (KCF_PROV_OBJECT_OPS(pd) && \
+ KCF_PROV_OBJECT_OPS(pd)->object_set_attribute_value) ? \
+ KCF_PROV_OBJECT_OPS(pd)->object_set_attribute_value( \
+ (pd)->pd_prov_handle, session, object, template, count, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_OBJECT_FIND_INIT(pd, session, template, count, ppriv, \
+ req) ( \
+ (KCF_PROV_OBJECT_OPS(pd) && \
+ KCF_PROV_OBJECT_OPS(pd)->object_find_init) ? \
+ KCF_PROV_OBJECT_OPS(pd)->object_find_init((pd)->pd_prov_handle, \
+ session, template, count, ppriv, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_OBJECT_FIND(pd, ppriv, objects, max_objects, object_count, \
+ req) ( \
+ (KCF_PROV_OBJECT_OPS(pd) && KCF_PROV_OBJECT_OPS(pd)->object_find) ? \
+ KCF_PROV_OBJECT_OPS(pd)->object_find( \
+ (pd)->pd_prov_handle, ppriv, objects, max_objects, object_count, \
+ req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_OBJECT_FIND_FINAL(pd, ppriv, req) ( \
+ (KCF_PROV_OBJECT_OPS(pd) && \
+ KCF_PROV_OBJECT_OPS(pd)->object_find_final) ? \
+ KCF_PROV_OBJECT_OPS(pd)->object_find_final( \
+ (pd)->pd_prov_handle, ppriv, req) : CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_key_ops(9S) entry points.
+ */
+
+#define KCF_PROV_KEY_GENERATE(pd, session, mech, template, count, object, \
+ req) ( \
+ (KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_generate) ? \
+ KCF_PROV_KEY_OPS(pd)->key_generate((pd)->pd_prov_handle, \
+ session, mech, template, count, object, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_KEY_GENERATE_PAIR(pd, session, mech, pub_template, \
+ pub_count, priv_template, priv_count, pub_key, priv_key, req) ( \
+ (KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_generate_pair) ? \
+ KCF_PROV_KEY_OPS(pd)->key_generate_pair((pd)->pd_prov_handle, \
+ session, mech, pub_template, pub_count, priv_template, \
+ priv_count, pub_key, priv_key, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_KEY_WRAP(pd, session, mech, wrapping_key, key, wrapped_key, \
+ wrapped_key_len, req) ( \
+ (KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_wrap) ? \
+ KCF_PROV_KEY_OPS(pd)->key_wrap((pd)->pd_prov_handle, \
+ session, mech, wrapping_key, key, wrapped_key, wrapped_key_len, \
+ req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_KEY_UNWRAP(pd, session, mech, unwrapping_key, wrapped_key, \
+ wrapped_key_len, template, count, key, req) ( \
+ (KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_unwrap) ? \
+ KCF_PROV_KEY_OPS(pd)->key_unwrap((pd)->pd_prov_handle, \
+ session, mech, unwrapping_key, wrapped_key, wrapped_key_len, \
+ template, count, key, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_KEY_DERIVE(pd, session, mech, base_key, template, count, \
+ key, req) ( \
+ (KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_derive) ? \
+ KCF_PROV_KEY_OPS(pd)->key_derive((pd)->pd_prov_handle, \
+ session, mech, base_key, template, count, key, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_KEY_CHECK(pd, mech, key) ( \
+ (KCF_PROV_KEY_OPS(pd) && KCF_PROV_KEY_OPS(pd)->key_check) ? \
+ KCF_PROV_KEY_OPS(pd)->key_check((pd)->pd_prov_handle, mech, key) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_provider_management_ops(9S) entry points.
+ *
+ * ops_pd is the provider descriptor that supplies the ops_vector.
+ * pd is the descriptor that supplies the provider handle.
+ * Only ext_info needs two handles.
+ */
+
+#define KCF_PROV_EXT_INFO(ops_pd, provext_info, req, pd) ( \
+ (KCF_PROV_PROVIDER_OPS(ops_pd) && \
+ KCF_PROV_PROVIDER_OPS(ops_pd)->ext_info) ? \
+ KCF_PROV_PROVIDER_OPS(ops_pd)->ext_info((pd)->pd_prov_handle, \
+ provext_info, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_INIT_TOKEN(pd, pin, pin_len, label, req) ( \
+ (KCF_PROV_PROVIDER_OPS(pd) && KCF_PROV_PROVIDER_OPS(pd)->init_token) ? \
+ KCF_PROV_PROVIDER_OPS(pd)->init_token((pd)->pd_prov_handle, \
+ pin, pin_len, label, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_INIT_PIN(pd, session, pin, pin_len, req) ( \
+ (KCF_PROV_PROVIDER_OPS(pd) && KCF_PROV_PROVIDER_OPS(pd)->init_pin) ? \
+ KCF_PROV_PROVIDER_OPS(pd)->init_pin((pd)->pd_prov_handle, \
+ session, pin, pin_len, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_SET_PIN(pd, session, old_pin, old_len, new_pin, new_len, \
+ req) ( \
+ (KCF_PROV_PROVIDER_OPS(pd) && KCF_PROV_PROVIDER_OPS(pd)->set_pin) ? \
+ KCF_PROV_PROVIDER_OPS(pd)->set_pin((pd)->pd_prov_handle, \
+ session, old_pin, old_len, new_pin, new_len, req) : \
+ CRYPTO_NOT_SUPPORTED)
+
+/*
+ * Wrappers for crypto_nostore_key_ops(9S) entry points.
+ */
+
+#define KCF_PROV_NOSTORE_KEY_GENERATE(pd, session, mech, template, count, \
+ out_template, out_count, req) ( \
+ (KCF_PROV_NOSTORE_KEY_OPS(pd) && \
+ KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_generate) ? \
+ KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_generate( \
+ (pd)->pd_prov_handle, session, mech, template, count, \
+ out_template, out_count, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_NOSTORE_KEY_GENERATE_PAIR(pd, session, mech, pub_template, \
+ pub_count, priv_template, priv_count, out_pub_template, \
+ out_pub_count, out_priv_template, out_priv_count, req) ( \
+ (KCF_PROV_NOSTORE_KEY_OPS(pd) && \
+ KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_generate_pair) ? \
+ KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_generate_pair( \
+ (pd)->pd_prov_handle, session, mech, pub_template, pub_count, \
+ priv_template, priv_count, out_pub_template, out_pub_count, \
+ out_priv_template, out_priv_count, req) : CRYPTO_NOT_SUPPORTED)
+
+#define KCF_PROV_NOSTORE_KEY_DERIVE(pd, session, mech, base_key, template, \
+ count, out_template, out_count, req) ( \
+ (KCF_PROV_NOSTORE_KEY_OPS(pd) && \
+ KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_derive) ? \
+ KCF_PROV_NOSTORE_KEY_OPS(pd)->nostore_key_derive( \
+ (pd)->pd_prov_handle, session, mech, base_key, template, count, \
+ out_template, out_count, req) : CRYPTO_NOT_SUPPORTED)
+
+/*
+ * The following routines are exported by the kcf module (/kernel/misc/kcf)
+ * to the crypto and cryptoadmin modules.
+ */
+
+/* Digest/mac/cipher entry points that take a provider descriptor and session */
+extern int crypto_digest_single(crypto_context_t, crypto_data_t *,
+ crypto_data_t *, crypto_call_req_t *);
+
+extern int crypto_mac_single(crypto_context_t, crypto_data_t *,
+ crypto_data_t *, crypto_call_req_t *);
+
+extern int crypto_encrypt_single(crypto_context_t, crypto_data_t *,
+ crypto_data_t *, crypto_call_req_t *);
+
+extern int crypto_decrypt_single(crypto_context_t, crypto_data_t *,
+ crypto_data_t *, crypto_call_req_t *);
+
+
+/* Other private digest/mac/cipher entry points not exported through k-API */
+extern int crypto_digest_key_prov(crypto_context_t, crypto_key_t *,
+ crypto_call_req_t *);
+
+/* Private sign entry points exported by KCF */
+extern int crypto_sign_single(crypto_context_t, crypto_data_t *,
+ crypto_data_t *, crypto_call_req_t *);
+
+extern int crypto_sign_recover_single(crypto_context_t, crypto_data_t *,
+ crypto_data_t *, crypto_call_req_t *);
+
+/* Private verify entry points exported by KCF */
+extern int crypto_verify_single(crypto_context_t, crypto_data_t *,
+ crypto_data_t *, crypto_call_req_t *);
+
+extern int crypto_verify_recover_single(crypto_context_t, crypto_data_t *,
+ crypto_data_t *, crypto_call_req_t *);
+
+/* Private dual operations entry points exported by KCF */
+extern int crypto_digest_encrypt_update(crypto_context_t, crypto_context_t,
+ crypto_data_t *, crypto_data_t *, crypto_call_req_t *);
+extern int crypto_decrypt_digest_update(crypto_context_t, crypto_context_t,
+ crypto_data_t *, crypto_data_t *, crypto_call_req_t *);
+extern int crypto_sign_encrypt_update(crypto_context_t, crypto_context_t,
+ crypto_data_t *, crypto_data_t *, crypto_call_req_t *);
+extern int crypto_decrypt_verify_update(crypto_context_t, crypto_context_t,
+ crypto_data_t *, crypto_data_t *, crypto_call_req_t *);
+
+/* Random Number Generation */
+int crypto_seed_random(crypto_provider_handle_t provider, uchar_t *buf,
+ size_t len, crypto_call_req_t *req);
+int crypto_generate_random(crypto_provider_handle_t provider, uchar_t *buf,
+ size_t len, crypto_call_req_t *req);
+
+/* Provider Management */
+int crypto_get_provider_info(crypto_provider_id_t id,
+ crypto_provider_info_t **info, crypto_call_req_t *req);
+int crypto_get_provider_mechanisms(crypto_minor_t *, crypto_provider_id_t id,
+ uint_t *count, crypto_mech_name_t **list);
+int crypto_init_token(crypto_provider_handle_t provider, char *pin,
+ size_t pin_len, char *label, crypto_call_req_t *);
+int crypto_init_pin(crypto_provider_handle_t provider, char *pin,
+ size_t pin_len, crypto_call_req_t *req);
+int crypto_set_pin(crypto_provider_handle_t provider, char *old_pin,
+ size_t old_len, char *new_pin, size_t new_len, crypto_call_req_t *req);
+void crypto_free_provider_list(crypto_provider_entry_t *list, uint_t count);
+void crypto_free_provider_info(crypto_provider_info_t *info);
+
+/* Administrative */
+int crypto_get_dev_list(uint_t *count, crypto_dev_list_entry_t **list);
+int crypto_get_soft_list(uint_t *count, char **list, size_t *len);
+int crypto_get_dev_info(char *name, uint_t instance, uint_t *count,
+ crypto_mech_name_t **list);
+int crypto_get_soft_info(caddr_t name, uint_t *count,
+ crypto_mech_name_t **list);
+int crypto_load_dev_disabled(char *name, uint_t instance, uint_t count,
+ crypto_mech_name_t *list);
+int crypto_load_soft_disabled(caddr_t name, uint_t count,
+ crypto_mech_name_t *list);
+int crypto_unload_soft_module(caddr_t path);
+int crypto_load_soft_config(caddr_t name, uint_t count,
+ crypto_mech_name_t *list);
+int crypto_load_door(uint_t did);
+void crypto_free_mech_list(crypto_mech_name_t *list, uint_t count);
+void crypto_free_dev_list(crypto_dev_list_entry_t *list, uint_t count);
+
+/* Miscellaneous */
+int crypto_get_mechanism_number(caddr_t name, crypto_mech_type_t *number);
+int crypto_get_function_list(crypto_provider_id_t id,
+ crypto_function_list_t **list, int kmflag);
+void crypto_free_function_list(crypto_function_list_t *list);
+int crypto_build_permitted_mech_names(kcf_provider_desc_t *,
+ crypto_mech_name_t **, uint_t *, int);
+extern void kcf_destroy_mech_tabs(void);
+extern void kcf_init_mech_tabs(void);
+extern int kcf_add_mech_provider(short, kcf_provider_desc_t *,
+ kcf_prov_mech_desc_t **);
+extern void kcf_remove_mech_provider(char *, kcf_provider_desc_t *);
+extern int kcf_get_mech_entry(crypto_mech_type_t, kcf_mech_entry_t **);
+extern kcf_provider_desc_t *kcf_alloc_provider_desc(crypto_provider_info_t *);
+extern void kcf_provider_zero_refcnt(kcf_provider_desc_t *);
+extern void kcf_free_provider_desc(kcf_provider_desc_t *);
+extern void kcf_soft_config_init(void);
+extern int get_sw_provider_for_mech(crypto_mech_name_t, char **);
+extern crypto_mech_type_t crypto_mech2id_common(char *, boolean_t);
+extern void undo_register_provider(kcf_provider_desc_t *, boolean_t);
+extern void redo_register_provider(kcf_provider_desc_t *);
+extern void kcf_rnd_init(void);
+extern boolean_t kcf_rngprov_check(void);
+extern int kcf_rnd_get_pseudo_bytes(uint8_t *, size_t);
+extern int kcf_rnd_get_bytes(uint8_t *, size_t, boolean_t, boolean_t);
+extern int random_add_pseudo_entropy(uint8_t *, size_t, uint_t);
+extern void kcf_rnd_schedule_timeout(boolean_t);
+extern int crypto_uio_data(crypto_data_t *, uchar_t *, int, cmd_type_t,
+ void *, void (*update)(void));
+extern int crypto_mblk_data(crypto_data_t *, uchar_t *, int, cmd_type_t,
+ void *, void (*update)(void));
+extern int crypto_put_output_data(uchar_t *, crypto_data_t *, int);
+extern int crypto_get_input_data(crypto_data_t *, uchar_t **, uchar_t *);
+extern int crypto_copy_key_to_ctx(crypto_key_t *, crypto_key_t **, size_t *,
+ int kmflag);
+extern int crypto_digest_data(crypto_data_t *, void *, uchar_t *,
+ void (*update)(void), void (*final)(void), uchar_t);
+extern int crypto_update_iov(void *, crypto_data_t *, crypto_data_t *,
+ int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
+ void (*copy_block)(uint8_t *, uint64_t *));
+extern int crypto_update_uio(void *, crypto_data_t *, crypto_data_t *,
+ int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
+ void (*copy_block)(uint8_t *, uint64_t *));
+extern int crypto_update_mp(void *, crypto_data_t *, crypto_data_t *,
+ int (*cipher)(void *, caddr_t, size_t, crypto_data_t *),
+ void (*copy_block)(uint8_t *, uint64_t *));
+extern int crypto_get_key_attr(crypto_key_t *, crypto_attr_type_t, uchar_t **,
+ ssize_t *);
+
+/* Access to the provider's table */
+extern void kcf_prov_tab_destroy(void);
+extern void kcf_prov_tab_init(void);
+extern int kcf_prov_tab_add_provider(kcf_provider_desc_t *);
+extern int kcf_prov_tab_rem_provider(crypto_provider_id_t);
+extern kcf_provider_desc_t *kcf_prov_tab_lookup_by_name(char *);
+extern kcf_provider_desc_t *kcf_prov_tab_lookup_by_dev(char *, uint_t);
+extern int kcf_get_hw_prov_tab(uint_t *, kcf_provider_desc_t ***, int,
+ char *, uint_t, boolean_t);
+extern int kcf_get_slot_list(uint_t *, kcf_provider_desc_t ***, boolean_t);
+extern void kcf_free_provider_tab(uint_t, kcf_provider_desc_t **);
+extern kcf_provider_desc_t *kcf_prov_tab_lookup(crypto_provider_id_t);
+extern int kcf_get_sw_prov(crypto_mech_type_t, kcf_provider_desc_t **,
+ kcf_mech_entry_t **, boolean_t);
+
+/* Access to the policy table */
+extern boolean_t is_mech_disabled(kcf_provider_desc_t *, crypto_mech_name_t);
+extern boolean_t is_mech_disabled_byname(crypto_provider_type_t, char *,
+ uint_t, crypto_mech_name_t);
+extern void kcf_policy_tab_init(void);
+extern void kcf_policy_free_desc(kcf_policy_desc_t *);
+extern void kcf_policy_remove_by_name(char *, uint_t *, crypto_mech_name_t **);
+extern void kcf_policy_remove_by_dev(char *, uint_t, uint_t *,
+ crypto_mech_name_t **);
+extern kcf_policy_desc_t *kcf_policy_lookup_by_name(char *);
+extern kcf_policy_desc_t *kcf_policy_lookup_by_dev(char *, uint_t);
+extern int kcf_policy_load_soft_disabled(char *, uint_t, crypto_mech_name_t *,
+ uint_t *, crypto_mech_name_t **);
+extern int kcf_policy_load_dev_disabled(char *, uint_t, uint_t,
+ crypto_mech_name_t *, uint_t *, crypto_mech_name_t **);
+extern boolean_t in_soft_config_list(char *);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CRYPTO_IMPL_H */
diff --git a/module/icp/include/sys/crypto/ioctl.h b/module/icp/include/sys/crypto/ioctl.h
new file mode 100644
index 000000000..dd59ca7f2
--- /dev/null
+++ b/module/icp/include/sys/crypto/ioctl.h
@@ -0,0 +1,1483 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CRYPTO_IOCTL_H
+#define _SYS_CRYPTO_IOCTL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/common.h>
+
+#define CRYPTO_MAX_ATTRIBUTE_COUNT 128
+
+#define CRYPTO_IOFLAGS_RW_SESSION 0x00000001
+
+#define CRYPTO(x) (('y' << 8) | (x))
+
+#define MAX_NUM_THRESHOLD 7
+
+/* the PKCS11 Mechanisms */
+#define CKM_RC4 0x00000111
+#define CKM_DES3_ECB 0x00000132
+#define CKM_DES3_CBC 0x00000133
+#define CKM_MD5 0x00000210
+#define CKM_SHA_1 0x00000220
+#define CKM_AES_ECB 0x00001081
+#define CKM_AES_CBC 0x00001082
+
+/*
+ * General Purpose Ioctls
+ */
+
+typedef struct fl_mechs_threshold {
+ int mech_type;
+ uint32_t mech_threshold;
+} fl_mechs_threshold_t;
+
+typedef struct crypto_function_list {
+ boolean_t fl_digest_init;
+ boolean_t fl_digest;
+ boolean_t fl_digest_update;
+ boolean_t fl_digest_key;
+ boolean_t fl_digest_final;
+
+ boolean_t fl_encrypt_init;
+ boolean_t fl_encrypt;
+ boolean_t fl_encrypt_update;
+ boolean_t fl_encrypt_final;
+
+ boolean_t fl_decrypt_init;
+ boolean_t fl_decrypt;
+ boolean_t fl_decrypt_update;
+ boolean_t fl_decrypt_final;
+
+ boolean_t fl_mac_init;
+ boolean_t fl_mac;
+ boolean_t fl_mac_update;
+ boolean_t fl_mac_final;
+
+ boolean_t fl_sign_init;
+ boolean_t fl_sign;
+ boolean_t fl_sign_update;
+ boolean_t fl_sign_final;
+ boolean_t fl_sign_recover_init;
+ boolean_t fl_sign_recover;
+
+ boolean_t fl_verify_init;
+ boolean_t fl_verify;
+ boolean_t fl_verify_update;
+ boolean_t fl_verify_final;
+ boolean_t fl_verify_recover_init;
+ boolean_t fl_verify_recover;
+
+ boolean_t fl_digest_encrypt_update;
+ boolean_t fl_decrypt_digest_update;
+ boolean_t fl_sign_encrypt_update;
+ boolean_t fl_decrypt_verify_update;
+
+ boolean_t fl_seed_random;
+ boolean_t fl_generate_random;
+
+ boolean_t fl_session_open;
+ boolean_t fl_session_close;
+ boolean_t fl_session_login;
+ boolean_t fl_session_logout;
+
+ boolean_t fl_object_create;
+ boolean_t fl_object_copy;
+ boolean_t fl_object_destroy;
+ boolean_t fl_object_get_size;
+ boolean_t fl_object_get_attribute_value;
+ boolean_t fl_object_set_attribute_value;
+ boolean_t fl_object_find_init;
+ boolean_t fl_object_find;
+ boolean_t fl_object_find_final;
+
+ boolean_t fl_key_generate;
+ boolean_t fl_key_generate_pair;
+ boolean_t fl_key_wrap;
+ boolean_t fl_key_unwrap;
+ boolean_t fl_key_derive;
+
+ boolean_t fl_init_token;
+ boolean_t fl_init_pin;
+ boolean_t fl_set_pin;
+
+ boolean_t prov_is_limited;
+ uint32_t prov_hash_threshold;
+ uint32_t prov_hash_limit;
+
+ int total_threshold_count;
+ fl_mechs_threshold_t fl_threshold[MAX_NUM_THRESHOLD];
+} crypto_function_list_t;
+
+typedef struct crypto_get_function_list {
+ uint_t fl_return_value;
+ crypto_provider_id_t fl_provider_id;
+ crypto_function_list_t fl_list;
+} crypto_get_function_list_t;
+
+typedef struct crypto_get_mechanism_number {
+ uint_t pn_return_value;
+ caddr_t pn_mechanism_string;
+ size_t pn_mechanism_len;
+ crypto_mech_type_t pn_internal_number;
+} crypto_get_mechanism_number_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_get_mechanism_number32 {
+ uint32_t pn_return_value;
+ caddr32_t pn_mechanism_string;
+ size32_t pn_mechanism_len;
+ crypto_mech_type_t pn_internal_number;
+} crypto_get_mechanism_number32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_GET_FUNCTION_LIST CRYPTO(20)
+#define CRYPTO_GET_MECHANISM_NUMBER CRYPTO(21)
+
+/*
+ * Session Ioctls
+ */
+
+typedef uint32_t crypto_flags_t;
+
+typedef struct crypto_open_session {
+ uint_t os_return_value;
+ crypto_session_id_t os_session;
+ crypto_flags_t os_flags;
+ crypto_provider_id_t os_provider_id;
+} crypto_open_session_t;
+
+typedef struct crypto_close_session {
+ uint_t cs_return_value;
+ crypto_session_id_t cs_session;
+} crypto_close_session_t;
+
+typedef struct crypto_close_all_sessions {
+ uint_t as_return_value;
+ crypto_provider_id_t as_provider_id;
+} crypto_close_all_sessions_t;
+
+#define CRYPTO_OPEN_SESSION CRYPTO(30)
+#define CRYPTO_CLOSE_SESSION CRYPTO(31)
+#define CRYPTO_CLOSE_ALL_SESSIONS CRYPTO(32)
+
+/*
+ * Login Ioctls
+ */
+typedef struct crypto_login {
+ uint_t co_return_value;
+ crypto_session_id_t co_session;
+ uint_t co_user_type;
+ uint_t co_pin_len;
+ caddr_t co_pin;
+} crypto_login_t;
+
+typedef struct crypto_logout {
+ uint_t cl_return_value;
+ crypto_session_id_t cl_session;
+} crypto_logout_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+typedef struct crypto_login32 {
+ uint32_t co_return_value;
+ crypto_session_id_t co_session;
+ uint32_t co_user_type;
+ uint32_t co_pin_len;
+ caddr32_t co_pin;
+} crypto_login32_t;
+
+typedef struct crypto_logout32 {
+ uint32_t cl_return_value;
+ crypto_session_id_t cl_session;
+} crypto_logout32_t;
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_LOGIN CRYPTO(40)
+#define CRYPTO_LOGOUT CRYPTO(41)
+
+/* flag for encrypt and decrypt operations */
+#define CRYPTO_INPLACE_OPERATION 0x00000001
+
+/*
+ * Cryptographic Ioctls
+ */
+typedef struct crypto_encrypt {
+ uint_t ce_return_value;
+ crypto_session_id_t ce_session;
+ size_t ce_datalen;
+ caddr_t ce_databuf;
+ size_t ce_encrlen;
+ caddr_t ce_encrbuf;
+ uint_t ce_flags;
+} crypto_encrypt_t;
+
+typedef struct crypto_encrypt_init {
+ uint_t ei_return_value;
+ crypto_session_id_t ei_session;
+ crypto_mechanism_t ei_mech;
+ crypto_key_t ei_key;
+} crypto_encrypt_init_t;
+
+typedef struct crypto_encrypt_update {
+ uint_t eu_return_value;
+ crypto_session_id_t eu_session;
+ size_t eu_datalen;
+ caddr_t eu_databuf;
+ size_t eu_encrlen;
+ caddr_t eu_encrbuf;
+} crypto_encrypt_update_t;
+
+typedef struct crypto_encrypt_final {
+ uint_t ef_return_value;
+ crypto_session_id_t ef_session;
+ size_t ef_encrlen;
+ caddr_t ef_encrbuf;
+} crypto_encrypt_final_t;
+
+typedef struct crypto_decrypt {
+ uint_t cd_return_value;
+ crypto_session_id_t cd_session;
+ size_t cd_encrlen;
+ caddr_t cd_encrbuf;
+ size_t cd_datalen;
+ caddr_t cd_databuf;
+ uint_t cd_flags;
+} crypto_decrypt_t;
+
+typedef struct crypto_decrypt_init {
+ uint_t di_return_value;
+ crypto_session_id_t di_session;
+ crypto_mechanism_t di_mech;
+ crypto_key_t di_key;
+} crypto_decrypt_init_t;
+
+typedef struct crypto_decrypt_update {
+ uint_t du_return_value;
+ crypto_session_id_t du_session;
+ size_t du_encrlen;
+ caddr_t du_encrbuf;
+ size_t du_datalen;
+ caddr_t du_databuf;
+} crypto_decrypt_update_t;
+
+typedef struct crypto_decrypt_final {
+ uint_t df_return_value;
+ crypto_session_id_t df_session;
+ size_t df_datalen;
+ caddr_t df_databuf;
+} crypto_decrypt_final_t;
+
+typedef struct crypto_digest {
+ uint_t cd_return_value;
+ crypto_session_id_t cd_session;
+ size_t cd_datalen;
+ caddr_t cd_databuf;
+ size_t cd_digestlen;
+ caddr_t cd_digestbuf;
+} crypto_digest_t;
+
+typedef struct crypto_digest_init {
+ uint_t di_return_value;
+ crypto_session_id_t di_session;
+ crypto_mechanism_t di_mech;
+} crypto_digest_init_t;
+
+typedef struct crypto_digest_update {
+ uint_t du_return_value;
+ crypto_session_id_t du_session;
+ size_t du_datalen;
+ caddr_t du_databuf;
+} crypto_digest_update_t;
+
+typedef struct crypto_digest_key {
+ uint_t dk_return_value;
+ crypto_session_id_t dk_session;
+ crypto_key_t dk_key;
+} crypto_digest_key_t;
+
+typedef struct crypto_digest_final {
+ uint_t df_return_value;
+ crypto_session_id_t df_session;
+ size_t df_digestlen;
+ caddr_t df_digestbuf;
+} crypto_digest_final_t;
+
+typedef struct crypto_mac {
+ uint_t cm_return_value;
+ crypto_session_id_t cm_session;
+ size_t cm_datalen;
+ caddr_t cm_databuf;
+ size_t cm_maclen;
+ caddr_t cm_macbuf;
+} crypto_mac_t;
+
+typedef struct crypto_mac_init {
+ uint_t mi_return_value;
+ crypto_session_id_t mi_session;
+ crypto_mechanism_t mi_mech;
+ crypto_key_t mi_key;
+} crypto_mac_init_t;
+
+typedef struct crypto_mac_update {
+ uint_t mu_return_value;
+ crypto_session_id_t mu_session;
+ size_t mu_datalen;
+ caddr_t mu_databuf;
+} crypto_mac_update_t;
+
+typedef struct crypto_mac_final {
+ uint_t mf_return_value;
+ crypto_session_id_t mf_session;
+ size_t mf_maclen;
+ caddr_t mf_macbuf;
+} crypto_mac_final_t;
+
+typedef struct crypto_sign {
+ uint_t cs_return_value;
+ crypto_session_id_t cs_session;
+ size_t cs_datalen;
+ caddr_t cs_databuf;
+ size_t cs_signlen;
+ caddr_t cs_signbuf;
+} crypto_sign_t;
+
+typedef struct crypto_sign_init {
+ uint_t si_return_value;
+ crypto_session_id_t si_session;
+ crypto_mechanism_t si_mech;
+ crypto_key_t si_key;
+} crypto_sign_init_t;
+
+typedef struct crypto_sign_update {
+ uint_t su_return_value;
+ crypto_session_id_t su_session;
+ size_t su_datalen;
+ caddr_t su_databuf;
+} crypto_sign_update_t;
+
+typedef struct crypto_sign_final {
+ uint_t sf_return_value;
+ crypto_session_id_t sf_session;
+ size_t sf_signlen;
+ caddr_t sf_signbuf;
+} crypto_sign_final_t;
+
+typedef struct crypto_sign_recover_init {
+ uint_t ri_return_value;
+ crypto_session_id_t ri_session;
+ crypto_mechanism_t ri_mech;
+ crypto_key_t ri_key;
+} crypto_sign_recover_init_t;
+
+typedef struct crypto_sign_recover {
+ uint_t sr_return_value;
+ crypto_session_id_t sr_session;
+ size_t sr_datalen;
+ caddr_t sr_databuf;
+ size_t sr_signlen;
+ caddr_t sr_signbuf;
+} crypto_sign_recover_t;
+
+typedef struct crypto_verify {
+ uint_t cv_return_value;
+ crypto_session_id_t cv_session;
+ size_t cv_datalen;
+ caddr_t cv_databuf;
+ size_t cv_signlen;
+ caddr_t cv_signbuf;
+} crypto_verify_t;
+
+typedef struct crypto_verify_init {
+ uint_t vi_return_value;
+ crypto_session_id_t vi_session;
+ crypto_mechanism_t vi_mech;
+ crypto_key_t vi_key;
+} crypto_verify_init_t;
+
+typedef struct crypto_verify_update {
+ uint_t vu_return_value;
+ crypto_session_id_t vu_session;
+ size_t vu_datalen;
+ caddr_t vu_databuf;
+} crypto_verify_update_t;
+
+typedef struct crypto_verify_final {
+ uint_t vf_return_value;
+ crypto_session_id_t vf_session;
+ size_t vf_signlen;
+ caddr_t vf_signbuf;
+} crypto_verify_final_t;
+
+typedef struct crypto_verify_recover_init {
+ uint_t ri_return_value;
+ crypto_session_id_t ri_session;
+ crypto_mechanism_t ri_mech;
+ crypto_key_t ri_key;
+} crypto_verify_recover_init_t;
+
+typedef struct crypto_verify_recover {
+ uint_t vr_return_value;
+ crypto_session_id_t vr_session;
+ size_t vr_signlen;
+ caddr_t vr_signbuf;
+ size_t vr_datalen;
+ caddr_t vr_databuf;
+} crypto_verify_recover_t;
+
+typedef struct crypto_digest_encrypt_update {
+ uint_t eu_return_value;
+ crypto_session_id_t eu_session;
+ size_t eu_datalen;
+ caddr_t eu_databuf;
+ size_t eu_encrlen;
+ caddr_t eu_encrbuf;
+} crypto_digest_encrypt_update_t;
+
+typedef struct crypto_decrypt_digest_update {
+ uint_t du_return_value;
+ crypto_session_id_t du_session;
+ size_t du_encrlen;
+ caddr_t du_encrbuf;
+ size_t du_datalen;
+ caddr_t du_databuf;
+} crypto_decrypt_digest_update_t;
+
+typedef struct crypto_sign_encrypt_update {
+ uint_t eu_return_value;
+ crypto_session_id_t eu_session;
+ size_t eu_datalen;
+ caddr_t eu_databuf;
+ size_t eu_encrlen;
+ caddr_t eu_encrbuf;
+} crypto_sign_encrypt_update_t;
+
+typedef struct crypto_decrypt_verify_update {
+ uint_t vu_return_value;
+ crypto_session_id_t vu_session;
+ size_t vu_encrlen;
+ caddr_t vu_encrbuf;
+ size_t vu_datalen;
+ caddr_t vu_databuf;
+} crypto_decrypt_verify_update_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+typedef struct crypto_encrypt32 {
+ uint32_t ce_return_value;
+ crypto_session_id_t ce_session;
+ size32_t ce_datalen;
+ caddr32_t ce_databuf;
+ size32_t ce_encrlen;
+ caddr32_t ce_encrbuf;
+ uint32_t ce_flags;
+} crypto_encrypt32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_encrypt_init32 {
+ uint32_t ei_return_value;
+ crypto_session_id_t ei_session;
+ crypto_mechanism32_t ei_mech;
+ crypto_key32_t ei_key;
+} crypto_encrypt_init32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+typedef struct crypto_encrypt_update32 {
+ uint32_t eu_return_value;
+ crypto_session_id_t eu_session;
+ size32_t eu_datalen;
+ caddr32_t eu_databuf;
+ size32_t eu_encrlen;
+ caddr32_t eu_encrbuf;
+} crypto_encrypt_update32_t;
+
+typedef struct crypto_encrypt_final32 {
+ uint32_t ef_return_value;
+ crypto_session_id_t ef_session;
+ size32_t ef_encrlen;
+ caddr32_t ef_encrbuf;
+} crypto_encrypt_final32_t;
+
+typedef struct crypto_decrypt32 {
+ uint32_t cd_return_value;
+ crypto_session_id_t cd_session;
+ size32_t cd_encrlen;
+ caddr32_t cd_encrbuf;
+ size32_t cd_datalen;
+ caddr32_t cd_databuf;
+ uint32_t cd_flags;
+} crypto_decrypt32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_decrypt_init32 {
+ uint32_t di_return_value;
+ crypto_session_id_t di_session;
+ crypto_mechanism32_t di_mech;
+ crypto_key32_t di_key;
+} crypto_decrypt_init32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+typedef struct crypto_decrypt_update32 {
+ uint32_t du_return_value;
+ crypto_session_id_t du_session;
+ size32_t du_encrlen;
+ caddr32_t du_encrbuf;
+ size32_t du_datalen;
+ caddr32_t du_databuf;
+} crypto_decrypt_update32_t;
+
+typedef struct crypto_decrypt_final32 {
+ uint32_t df_return_value;
+ crypto_session_id_t df_session;
+ size32_t df_datalen;
+ caddr32_t df_databuf;
+} crypto_decrypt_final32_t;
+
+typedef struct crypto_digest32 {
+ uint32_t cd_return_value;
+ crypto_session_id_t cd_session;
+ size32_t cd_datalen;
+ caddr32_t cd_databuf;
+ size32_t cd_digestlen;
+ caddr32_t cd_digestbuf;
+} crypto_digest32_t;
+
+typedef struct crypto_digest_init32 {
+ uint32_t di_return_value;
+ crypto_session_id_t di_session;
+ crypto_mechanism32_t di_mech;
+} crypto_digest_init32_t;
+
+typedef struct crypto_digest_update32 {
+ uint32_t du_return_value;
+ crypto_session_id_t du_session;
+ size32_t du_datalen;
+ caddr32_t du_databuf;
+} crypto_digest_update32_t;
+
+typedef struct crypto_digest_key32 {
+ uint32_t dk_return_value;
+ crypto_session_id_t dk_session;
+ crypto_key32_t dk_key;
+} crypto_digest_key32_t;
+
+typedef struct crypto_digest_final32 {
+ uint32_t df_return_value;
+ crypto_session_id_t df_session;
+ size32_t df_digestlen;
+ caddr32_t df_digestbuf;
+} crypto_digest_final32_t;
+
+typedef struct crypto_mac32 {
+ uint32_t cm_return_value;
+ crypto_session_id_t cm_session;
+ size32_t cm_datalen;
+ caddr32_t cm_databuf;
+ size32_t cm_maclen;
+ caddr32_t cm_macbuf;
+} crypto_mac32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_mac_init32 {
+ uint32_t mi_return_value;
+ crypto_session_id_t mi_session;
+ crypto_mechanism32_t mi_mech;
+ crypto_key32_t mi_key;
+} crypto_mac_init32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+typedef struct crypto_mac_update32 {
+ uint32_t mu_return_value;
+ crypto_session_id_t mu_session;
+ size32_t mu_datalen;
+ caddr32_t mu_databuf;
+} crypto_mac_update32_t;
+
+typedef struct crypto_mac_final32 {
+ uint32_t mf_return_value;
+ crypto_session_id_t mf_session;
+ size32_t mf_maclen;
+ caddr32_t mf_macbuf;
+} crypto_mac_final32_t;
+
+typedef struct crypto_sign32 {
+ uint32_t cs_return_value;
+ crypto_session_id_t cs_session;
+ size32_t cs_datalen;
+ caddr32_t cs_databuf;
+ size32_t cs_signlen;
+ caddr32_t cs_signbuf;
+} crypto_sign32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_sign_init32 {
+ uint32_t si_return_value;
+ crypto_session_id_t si_session;
+ crypto_mechanism32_t si_mech;
+ crypto_key32_t si_key;
+} crypto_sign_init32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+typedef struct crypto_sign_update32 {
+ uint32_t su_return_value;
+ crypto_session_id_t su_session;
+ size32_t su_datalen;
+ caddr32_t su_databuf;
+} crypto_sign_update32_t;
+
+typedef struct crypto_sign_final32 {
+ uint32_t sf_return_value;
+ crypto_session_id_t sf_session;
+ size32_t sf_signlen;
+ caddr32_t sf_signbuf;
+} crypto_sign_final32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_sign_recover_init32 {
+ uint32_t ri_return_value;
+ crypto_session_id_t ri_session;
+ crypto_mechanism32_t ri_mech;
+ crypto_key32_t ri_key;
+} crypto_sign_recover_init32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+typedef struct crypto_sign_recover32 {
+ uint32_t sr_return_value;
+ crypto_session_id_t sr_session;
+ size32_t sr_datalen;
+ caddr32_t sr_databuf;
+ size32_t sr_signlen;
+ caddr32_t sr_signbuf;
+} crypto_sign_recover32_t;
+
+typedef struct crypto_verify32 {
+ uint32_t cv_return_value;
+ crypto_session_id_t cv_session;
+ size32_t cv_datalen;
+ caddr32_t cv_databuf;
+ size32_t cv_signlen;
+ caddr32_t cv_signbuf;
+} crypto_verify32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_verify_init32 {
+ uint32_t vi_return_value;
+ crypto_session_id_t vi_session;
+ crypto_mechanism32_t vi_mech;
+ crypto_key32_t vi_key;
+} crypto_verify_init32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+typedef struct crypto_verify_update32 {
+ uint32_t vu_return_value;
+ crypto_session_id_t vu_session;
+ size32_t vu_datalen;
+ caddr32_t vu_databuf;
+} crypto_verify_update32_t;
+
+typedef struct crypto_verify_final32 {
+ uint32_t vf_return_value;
+ crypto_session_id_t vf_session;
+ size32_t vf_signlen;
+ caddr32_t vf_signbuf;
+} crypto_verify_final32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_verify_recover_init32 {
+ uint32_t ri_return_value;
+ crypto_session_id_t ri_session;
+ crypto_mechanism32_t ri_mech;
+ crypto_key32_t ri_key;
+} crypto_verify_recover_init32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+typedef struct crypto_verify_recover32 {
+ uint32_t vr_return_value;
+ crypto_session_id_t vr_session;
+ size32_t vr_signlen;
+ caddr32_t vr_signbuf;
+ size32_t vr_datalen;
+ caddr32_t vr_databuf;
+} crypto_verify_recover32_t;
+
+typedef struct crypto_digest_encrypt_update32 {
+ uint32_t eu_return_value;
+ crypto_session_id_t eu_session;
+ size32_t eu_datalen;
+ caddr32_t eu_databuf;
+ size32_t eu_encrlen;
+ caddr32_t eu_encrbuf;
+} crypto_digest_encrypt_update32_t;
+
+typedef struct crypto_decrypt_digest_update32 {
+ uint32_t du_return_value;
+ crypto_session_id_t du_session;
+ size32_t du_encrlen;
+ caddr32_t du_encrbuf;
+ size32_t du_datalen;
+ caddr32_t du_databuf;
+} crypto_decrypt_digest_update32_t;
+
+typedef struct crypto_sign_encrypt_update32 {
+ uint32_t eu_return_value;
+ crypto_session_id_t eu_session;
+ size32_t eu_datalen;
+ caddr32_t eu_databuf;
+ size32_t eu_encrlen;
+ caddr32_t eu_encrbuf;
+} crypto_sign_encrypt_update32_t;
+
+typedef struct crypto_decrypt_verify_update32 {
+ uint32_t vu_return_value;
+ crypto_session_id_t vu_session;
+ size32_t vu_encrlen;
+ caddr32_t vu_encrbuf;
+ size32_t vu_datalen;
+ caddr32_t vu_databuf;
+} crypto_decrypt_verify_update32_t;
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_ENCRYPT CRYPTO(50)
+#define CRYPTO_ENCRYPT_INIT CRYPTO(51)
+#define CRYPTO_ENCRYPT_UPDATE CRYPTO(52)
+#define CRYPTO_ENCRYPT_FINAL CRYPTO(53)
+#define CRYPTO_DECRYPT CRYPTO(54)
+#define CRYPTO_DECRYPT_INIT CRYPTO(55)
+#define CRYPTO_DECRYPT_UPDATE CRYPTO(56)
+#define CRYPTO_DECRYPT_FINAL CRYPTO(57)
+
+#define CRYPTO_DIGEST CRYPTO(58)
+#define CRYPTO_DIGEST_INIT CRYPTO(59)
+#define CRYPTO_DIGEST_UPDATE CRYPTO(60)
+#define CRYPTO_DIGEST_KEY CRYPTO(61)
+#define CRYPTO_DIGEST_FINAL CRYPTO(62)
+#define CRYPTO_MAC CRYPTO(63)
+#define CRYPTO_MAC_INIT CRYPTO(64)
+#define CRYPTO_MAC_UPDATE CRYPTO(65)
+#define CRYPTO_MAC_FINAL CRYPTO(66)
+
+#define CRYPTO_SIGN CRYPTO(67)
+#define CRYPTO_SIGN_INIT CRYPTO(68)
+#define CRYPTO_SIGN_UPDATE CRYPTO(69)
+#define CRYPTO_SIGN_FINAL CRYPTO(70)
+#define CRYPTO_SIGN_RECOVER_INIT CRYPTO(71)
+#define CRYPTO_SIGN_RECOVER CRYPTO(72)
+#define CRYPTO_VERIFY CRYPTO(73)
+#define CRYPTO_VERIFY_INIT CRYPTO(74)
+#define CRYPTO_VERIFY_UPDATE CRYPTO(75)
+#define CRYPTO_VERIFY_FINAL CRYPTO(76)
+#define CRYPTO_VERIFY_RECOVER_INIT CRYPTO(77)
+#define CRYPTO_VERIFY_RECOVER CRYPTO(78)
+
+#define CRYPTO_DIGEST_ENCRYPT_UPDATE CRYPTO(79)
+#define CRYPTO_DECRYPT_DIGEST_UPDATE CRYPTO(80)
+#define CRYPTO_SIGN_ENCRYPT_UPDATE CRYPTO(81)
+#define CRYPTO_DECRYPT_VERIFY_UPDATE CRYPTO(82)
+
+/*
+ * Random Number Ioctls
+ */
+typedef struct crypto_seed_random {
+ uint_t sr_return_value;
+ crypto_session_id_t sr_session;
+ size_t sr_seedlen;
+ caddr_t sr_seedbuf;
+} crypto_seed_random_t;
+
+typedef struct crypto_generate_random {
+ uint_t gr_return_value;
+ crypto_session_id_t gr_session;
+ caddr_t gr_buf;
+ size_t gr_buflen;
+} crypto_generate_random_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+typedef struct crypto_seed_random32 {
+ uint32_t sr_return_value;
+ crypto_session_id_t sr_session;
+ size32_t sr_seedlen;
+ caddr32_t sr_seedbuf;
+} crypto_seed_random32_t;
+
+typedef struct crypto_generate_random32 {
+ uint32_t gr_return_value;
+ crypto_session_id_t gr_session;
+ caddr32_t gr_buf;
+ size32_t gr_buflen;
+} crypto_generate_random32_t;
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_SEED_RANDOM CRYPTO(90)
+#define CRYPTO_GENERATE_RANDOM CRYPTO(91)
+
+/*
+ * Object Management Ioctls
+ */
+typedef struct crypto_object_create {
+ uint_t oc_return_value;
+ crypto_session_id_t oc_session;
+ crypto_object_id_t oc_handle;
+ uint_t oc_count;
+ caddr_t oc_attributes;
+} crypto_object_create_t;
+
+typedef struct crypto_object_copy {
+ uint_t oc_return_value;
+ crypto_session_id_t oc_session;
+ crypto_object_id_t oc_handle;
+ crypto_object_id_t oc_new_handle;
+ uint_t oc_count;
+ caddr_t oc_new_attributes;
+} crypto_object_copy_t;
+
+typedef struct crypto_object_destroy {
+ uint_t od_return_value;
+ crypto_session_id_t od_session;
+ crypto_object_id_t od_handle;
+} crypto_object_destroy_t;
+
+typedef struct crypto_object_get_attribute_value {
+ uint_t og_return_value;
+ crypto_session_id_t og_session;
+ crypto_object_id_t og_handle;
+ uint_t og_count;
+ caddr_t og_attributes;
+} crypto_object_get_attribute_value_t;
+
+typedef struct crypto_object_get_size {
+ uint_t gs_return_value;
+ crypto_session_id_t gs_session;
+ crypto_object_id_t gs_handle;
+ size_t gs_size;
+} crypto_object_get_size_t;
+
+typedef struct crypto_object_set_attribute_value {
+ uint_t sa_return_value;
+ crypto_session_id_t sa_session;
+ crypto_object_id_t sa_handle;
+ uint_t sa_count;
+ caddr_t sa_attributes;
+} crypto_object_set_attribute_value_t;
+
+typedef struct crypto_object_find_init {
+ uint_t fi_return_value;
+ crypto_session_id_t fi_session;
+ uint_t fi_count;
+ caddr_t fi_attributes;
+} crypto_object_find_init_t;
+
+typedef struct crypto_object_find_update {
+ uint_t fu_return_value;
+ crypto_session_id_t fu_session;
+ uint_t fu_max_count;
+ uint_t fu_count;
+ caddr_t fu_handles;
+} crypto_object_find_update_t;
+
+typedef struct crypto_object_find_final {
+ uint_t ff_return_value;
+ crypto_session_id_t ff_session;
+} crypto_object_find_final_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+typedef struct crypto_object_create32 {
+ uint32_t oc_return_value;
+ crypto_session_id_t oc_session;
+ crypto_object_id_t oc_handle;
+ uint32_t oc_count;
+ caddr32_t oc_attributes;
+} crypto_object_create32_t;
+
+typedef struct crypto_object_copy32 {
+ uint32_t oc_return_value;
+ crypto_session_id_t oc_session;
+ crypto_object_id_t oc_handle;
+ crypto_object_id_t oc_new_handle;
+ uint32_t oc_count;
+ caddr32_t oc_new_attributes;
+} crypto_object_copy32_t;
+
+typedef struct crypto_object_destroy32 {
+ uint32_t od_return_value;
+ crypto_session_id_t od_session;
+ crypto_object_id_t od_handle;
+} crypto_object_destroy32_t;
+
+typedef struct crypto_object_get_attribute_value32 {
+ uint32_t og_return_value;
+ crypto_session_id_t og_session;
+ crypto_object_id_t og_handle;
+ uint32_t og_count;
+ caddr32_t og_attributes;
+} crypto_object_get_attribute_value32_t;
+
+typedef struct crypto_object_get_size32 {
+ uint32_t gs_return_value;
+ crypto_session_id_t gs_session;
+ crypto_object_id_t gs_handle;
+ size32_t gs_size;
+} crypto_object_get_size32_t;
+
+typedef struct crypto_object_set_attribute_value32 {
+ uint32_t sa_return_value;
+ crypto_session_id_t sa_session;
+ crypto_object_id_t sa_handle;
+ uint32_t sa_count;
+ caddr32_t sa_attributes;
+} crypto_object_set_attribute_value32_t;
+
+typedef struct crypto_object_find_init32 {
+ uint32_t fi_return_value;
+ crypto_session_id_t fi_session;
+ uint32_t fi_count;
+ caddr32_t fi_attributes;
+} crypto_object_find_init32_t;
+
+typedef struct crypto_object_find_update32 {
+ uint32_t fu_return_value;
+ crypto_session_id_t fu_session;
+ uint32_t fu_max_count;
+ uint32_t fu_count;
+ caddr32_t fu_handles;
+} crypto_object_find_update32_t;
+
+typedef struct crypto_object_find_final32 {
+ uint32_t ff_return_value;
+ crypto_session_id_t ff_session;
+} crypto_object_find_final32_t;
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_OBJECT_CREATE CRYPTO(100)
+#define CRYPTO_OBJECT_COPY CRYPTO(101)
+#define CRYPTO_OBJECT_DESTROY CRYPTO(102)
+#define CRYPTO_OBJECT_GET_ATTRIBUTE_VALUE CRYPTO(103)
+#define CRYPTO_OBJECT_GET_SIZE CRYPTO(104)
+#define CRYPTO_OBJECT_SET_ATTRIBUTE_VALUE CRYPTO(105)
+#define CRYPTO_OBJECT_FIND_INIT CRYPTO(106)
+#define CRYPTO_OBJECT_FIND_UPDATE CRYPTO(107)
+#define CRYPTO_OBJECT_FIND_FINAL CRYPTO(108)
+
+/*
+ * Key Generation Ioctls
+ */
+typedef struct crypto_object_generate_key {
+ uint_t gk_return_value;
+ crypto_session_id_t gk_session;
+ crypto_object_id_t gk_handle;
+ crypto_mechanism_t gk_mechanism;
+ uint_t gk_count;
+ caddr_t gk_attributes;
+} crypto_object_generate_key_t;
+
+typedef struct crypto_object_generate_key_pair {
+ uint_t kp_return_value;
+ crypto_session_id_t kp_session;
+ crypto_object_id_t kp_public_handle;
+ crypto_object_id_t kp_private_handle;
+ uint_t kp_public_count;
+ uint_t kp_private_count;
+ caddr_t kp_public_attributes;
+ caddr_t kp_private_attributes;
+ crypto_mechanism_t kp_mechanism;
+} crypto_object_generate_key_pair_t;
+
+typedef struct crypto_object_wrap_key {
+ uint_t wk_return_value;
+ crypto_session_id_t wk_session;
+ crypto_mechanism_t wk_mechanism;
+ crypto_key_t wk_wrapping_key;
+ crypto_object_id_t wk_object_handle;
+ size_t wk_wrapped_key_len;
+ caddr_t wk_wrapped_key;
+} crypto_object_wrap_key_t;
+
+typedef struct crypto_object_unwrap_key {
+ uint_t uk_return_value;
+ crypto_session_id_t uk_session;
+ crypto_mechanism_t uk_mechanism;
+ crypto_key_t uk_unwrapping_key;
+ crypto_object_id_t uk_object_handle;
+ size_t uk_wrapped_key_len;
+ caddr_t uk_wrapped_key;
+ uint_t uk_count;
+ caddr_t uk_attributes;
+} crypto_object_unwrap_key_t;
+
+typedef struct crypto_derive_key {
+ uint_t dk_return_value;
+ crypto_session_id_t dk_session;
+ crypto_mechanism_t dk_mechanism;
+ crypto_key_t dk_base_key;
+ crypto_object_id_t dk_object_handle;
+ uint_t dk_count;
+ caddr_t dk_attributes;
+} crypto_derive_key_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_object_generate_key32 {
+ uint32_t gk_return_value;
+ crypto_session_id_t gk_session;
+ crypto_object_id_t gk_handle;
+ crypto_mechanism32_t gk_mechanism;
+ uint32_t gk_count;
+ caddr32_t gk_attributes;
+} crypto_object_generate_key32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+typedef struct crypto_object_generate_key_pair32 {
+ uint32_t kp_return_value;
+ crypto_session_id_t kp_session;
+ crypto_object_id_t kp_public_handle;
+ crypto_object_id_t kp_private_handle;
+ uint32_t kp_public_count;
+ uint32_t kp_private_count;
+ caddr32_t kp_public_attributes;
+ caddr32_t kp_private_attributes;
+ crypto_mechanism32_t kp_mechanism;
+} crypto_object_generate_key_pair32_t;
+
+typedef struct crypto_object_wrap_key32 {
+ uint32_t wk_return_value;
+ crypto_session_id_t wk_session;
+ crypto_mechanism32_t wk_mechanism;
+ crypto_key32_t wk_wrapping_key;
+ crypto_object_id_t wk_object_handle;
+ size32_t wk_wrapped_key_len;
+ caddr32_t wk_wrapped_key;
+} crypto_object_wrap_key32_t;
+
+typedef struct crypto_object_unwrap_key32 {
+ uint32_t uk_return_value;
+ crypto_session_id_t uk_session;
+ crypto_mechanism32_t uk_mechanism;
+ crypto_key32_t uk_unwrapping_key;
+ crypto_object_id_t uk_object_handle;
+ size32_t uk_wrapped_key_len;
+ caddr32_t uk_wrapped_key;
+ uint32_t uk_count;
+ caddr32_t uk_attributes;
+} crypto_object_unwrap_key32_t;
+
+typedef struct crypto_derive_key32 {
+ uint32_t dk_return_value;
+ crypto_session_id_t dk_session;
+ crypto_mechanism32_t dk_mechanism;
+ crypto_key32_t dk_base_key;
+ crypto_object_id_t dk_object_handle;
+ uint32_t dk_count;
+ caddr32_t dk_attributes;
+} crypto_derive_key32_t;
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_GENERATE_KEY CRYPTO(110)
+#define CRYPTO_GENERATE_KEY_PAIR CRYPTO(111)
+#define CRYPTO_WRAP_KEY CRYPTO(112)
+#define CRYPTO_UNWRAP_KEY CRYPTO(113)
+#define CRYPTO_DERIVE_KEY CRYPTO(114)
+
+/*
+ * Provider Management Ioctls
+ */
+
+typedef struct crypto_get_provider_list {
+ uint_t pl_return_value;
+ uint_t pl_count;
+ crypto_provider_entry_t pl_list[1];
+} crypto_get_provider_list_t;
+
+typedef struct crypto_provider_data {
+ uchar_t pd_prov_desc[CRYPTO_PROVIDER_DESCR_MAX_LEN];
+ uchar_t pd_label[CRYPTO_EXT_SIZE_LABEL];
+ uchar_t pd_manufacturerID[CRYPTO_EXT_SIZE_MANUF];
+ uchar_t pd_model[CRYPTO_EXT_SIZE_MODEL];
+ uchar_t pd_serial_number[CRYPTO_EXT_SIZE_SERIAL];
+ ulong_t pd_flags;
+ ulong_t pd_max_session_count;
+ ulong_t pd_session_count;
+ ulong_t pd_max_rw_session_count;
+ ulong_t pd_rw_session_count;
+ ulong_t pd_max_pin_len;
+ ulong_t pd_min_pin_len;
+ ulong_t pd_total_public_memory;
+ ulong_t pd_free_public_memory;
+ ulong_t pd_total_private_memory;
+ ulong_t pd_free_private_memory;
+ crypto_version_t pd_hardware_version;
+ crypto_version_t pd_firmware_version;
+ uchar_t pd_time[CRYPTO_EXT_SIZE_TIME];
+} crypto_provider_data_t;
+
+typedef struct crypto_get_provider_info {
+ uint_t gi_return_value;
+ crypto_provider_id_t gi_provider_id;
+ crypto_provider_data_t gi_provider_data;
+} crypto_get_provider_info_t;
+
+typedef struct crypto_get_provider_mechanisms {
+ uint_t pm_return_value;
+ crypto_provider_id_t pm_provider_id;
+ uint_t pm_count;
+ crypto_mech_name_t pm_list[1];
+} crypto_get_provider_mechanisms_t;
+
+typedef struct crypto_get_provider_mechanism_info {
+ uint_t mi_return_value;
+ crypto_provider_id_t mi_provider_id;
+ crypto_mech_name_t mi_mechanism_name;
+ uint32_t mi_min_key_size;
+ uint32_t mi_max_key_size;
+ uint32_t mi_flags;
+} crypto_get_provider_mechanism_info_t;
+
+typedef struct crypto_init_token {
+ uint_t it_return_value;
+ crypto_provider_id_t it_provider_id;
+ caddr_t it_pin;
+ size_t it_pin_len;
+ caddr_t it_label;
+} crypto_init_token_t;
+
+typedef struct crypto_init_pin {
+ uint_t ip_return_value;
+ crypto_session_id_t ip_session;
+ caddr_t ip_pin;
+ size_t ip_pin_len;
+} crypto_init_pin_t;
+
+typedef struct crypto_set_pin {
+ uint_t sp_return_value;
+ crypto_session_id_t sp_session;
+ caddr_t sp_old_pin;
+ size_t sp_old_len;
+ caddr_t sp_new_pin;
+ size_t sp_new_len;
+} crypto_set_pin_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+typedef struct crypto_get_provider_list32 {
+ uint32_t pl_return_value;
+ uint32_t pl_count;
+ crypto_provider_entry_t pl_list[1];
+} crypto_get_provider_list32_t;
+
+typedef struct crypto_version32 {
+ uchar_t cv_major;
+ uchar_t cv_minor;
+} crypto_version32_t;
+
+typedef struct crypto_provider_data32 {
+ uchar_t pd_prov_desc[CRYPTO_PROVIDER_DESCR_MAX_LEN];
+ uchar_t pd_label[CRYPTO_EXT_SIZE_LABEL];
+ uchar_t pd_manufacturerID[CRYPTO_EXT_SIZE_MANUF];
+ uchar_t pd_model[CRYPTO_EXT_SIZE_MODEL];
+ uchar_t pd_serial_number[CRYPTO_EXT_SIZE_SERIAL];
+ uint32_t pd_flags;
+ uint32_t pd_max_session_count;
+ uint32_t pd_session_count;
+ uint32_t pd_max_rw_session_count;
+ uint32_t pd_rw_session_count;
+ uint32_t pd_max_pin_len;
+ uint32_t pd_min_pin_len;
+ uint32_t pd_total_public_memory;
+ uint32_t pd_free_public_memory;
+ uint32_t pd_total_private_memory;
+ uint32_t pd_free_private_memory;
+ crypto_version32_t pd_hardware_version;
+ crypto_version32_t pd_firmware_version;
+ uchar_t pd_time[CRYPTO_EXT_SIZE_TIME];
+} crypto_provider_data32_t;
+
+typedef struct crypto_get_provider_info32 {
+ uint32_t gi_return_value;
+ crypto_provider_id_t gi_provider_id;
+ crypto_provider_data32_t gi_provider_data;
+} crypto_get_provider_info32_t;
+
+typedef struct crypto_get_provider_mechanisms32 {
+ uint32_t pm_return_value;
+ crypto_provider_id_t pm_provider_id;
+ uint32_t pm_count;
+ crypto_mech_name_t pm_list[1];
+} crypto_get_provider_mechanisms32_t;
+
+typedef struct crypto_init_token32 {
+ uint32_t it_return_value;
+ crypto_provider_id_t it_provider_id;
+ caddr32_t it_pin;
+ size32_t it_pin_len;
+ caddr32_t it_label;
+} crypto_init_token32_t;
+
+typedef struct crypto_init_pin32 {
+ uint32_t ip_return_value;
+ crypto_session_id_t ip_session;
+ caddr32_t ip_pin;
+ size32_t ip_pin_len;
+} crypto_init_pin32_t;
+
+typedef struct crypto_set_pin32 {
+ uint32_t sp_return_value;
+ crypto_session_id_t sp_session;
+ caddr32_t sp_old_pin;
+ size32_t sp_old_len;
+ caddr32_t sp_new_pin;
+ size32_t sp_new_len;
+} crypto_set_pin32_t;
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_GET_PROVIDER_LIST CRYPTO(120)
+#define CRYPTO_GET_PROVIDER_INFO CRYPTO(121)
+#define CRYPTO_GET_PROVIDER_MECHANISMS CRYPTO(122)
+#define CRYPTO_GET_PROVIDER_MECHANISM_INFO CRYPTO(123)
+#define CRYPTO_INIT_TOKEN CRYPTO(124)
+#define CRYPTO_INIT_PIN CRYPTO(125)
+#define CRYPTO_SET_PIN CRYPTO(126)
+
+/*
+ * No (Key) Store Key Generation Ioctls
+ */
+typedef struct crypto_nostore_generate_key {
+ uint_t ngk_return_value;
+ crypto_session_id_t ngk_session;
+ crypto_mechanism_t ngk_mechanism;
+ uint_t ngk_in_count;
+ uint_t ngk_out_count;
+ caddr_t ngk_in_attributes;
+ caddr_t ngk_out_attributes;
+} crypto_nostore_generate_key_t;
+
+typedef struct crypto_nostore_generate_key_pair {
+ uint_t nkp_return_value;
+ crypto_session_id_t nkp_session;
+ uint_t nkp_in_public_count;
+ uint_t nkp_in_private_count;
+ uint_t nkp_out_public_count;
+ uint_t nkp_out_private_count;
+ caddr_t nkp_in_public_attributes;
+ caddr_t nkp_in_private_attributes;
+ caddr_t nkp_out_public_attributes;
+ caddr_t nkp_out_private_attributes;
+ crypto_mechanism_t nkp_mechanism;
+} crypto_nostore_generate_key_pair_t;
+
+typedef struct crypto_nostore_derive_key {
+ uint_t ndk_return_value;
+ crypto_session_id_t ndk_session;
+ crypto_mechanism_t ndk_mechanism;
+ crypto_key_t ndk_base_key;
+ uint_t ndk_in_count;
+ uint_t ndk_out_count;
+ caddr_t ndk_in_attributes;
+ caddr_t ndk_out_attributes;
+} crypto_nostore_derive_key_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+typedef struct crypto_nostore_generate_key32 {
+ uint32_t ngk_return_value;
+ crypto_session_id_t ngk_session;
+ crypto_mechanism32_t ngk_mechanism;
+ uint32_t ngk_in_count;
+ uint32_t ngk_out_count;
+ caddr32_t ngk_in_attributes;
+ caddr32_t ngk_out_attributes;
+} crypto_nostore_generate_key32_t;
+
+typedef struct crypto_nostore_generate_key_pair32 {
+ uint32_t nkp_return_value;
+ crypto_session_id_t nkp_session;
+ uint32_t nkp_in_public_count;
+ uint32_t nkp_in_private_count;
+ uint32_t nkp_out_public_count;
+ uint32_t nkp_out_private_count;
+ caddr32_t nkp_in_public_attributes;
+ caddr32_t nkp_in_private_attributes;
+ caddr32_t nkp_out_public_attributes;
+ caddr32_t nkp_out_private_attributes;
+ crypto_mechanism32_t nkp_mechanism;
+} crypto_nostore_generate_key_pair32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack(4)
+#endif
+
+typedef struct crypto_nostore_derive_key32 {
+ uint32_t ndk_return_value;
+ crypto_session_id_t ndk_session;
+ crypto_mechanism32_t ndk_mechanism;
+ crypto_key32_t ndk_base_key;
+ uint32_t ndk_in_count;
+ uint32_t ndk_out_count;
+ caddr32_t ndk_in_attributes;
+ caddr32_t ndk_out_attributes;
+} crypto_nostore_derive_key32_t;
+
+#if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
+#pragma pack()
+#endif
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_NOSTORE_GENERATE_KEY CRYPTO(127)
+#define CRYPTO_NOSTORE_GENERATE_KEY_PAIR CRYPTO(128)
+#define CRYPTO_NOSTORE_DERIVE_KEY CRYPTO(129)
+
+/*
+ * Mechanism Ioctls
+ */
+
+typedef struct crypto_get_mechanism_list {
+ uint_t ml_return_value;
+ uint_t ml_count;
+ crypto_mech_name_t ml_list[1];
+} crypto_get_mechanism_list_t;
+
+typedef struct crypto_get_all_mechanism_info {
+ uint_t mi_return_value;
+ crypto_mech_name_t mi_mechanism_name;
+ uint_t mi_count;
+ crypto_mechanism_info_t mi_list[1];
+} crypto_get_all_mechanism_info_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+typedef struct crypto_get_mechanism_list32 {
+ uint32_t ml_return_value;
+ uint32_t ml_count;
+ crypto_mech_name_t ml_list[1];
+} crypto_get_mechanism_list32_t;
+
+typedef struct crypto_get_all_mechanism_info32 {
+ uint32_t mi_return_value;
+ crypto_mech_name_t mi_mechanism_name;
+ uint32_t mi_count;
+ crypto_mechanism_info32_t mi_list[1];
+} crypto_get_all_mechanism_info32_t;
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_GET_MECHANISM_LIST CRYPTO(140)
+#define CRYPTO_GET_ALL_MECHANISM_INFO CRYPTO(141)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CRYPTO_IOCTL_H */
diff --git a/module/icp/include/sys/crypto/ioctladmin.h b/module/icp/include/sys/crypto/ioctladmin.h
new file mode 100644
index 000000000..24babd775
--- /dev/null
+++ b/module/icp/include/sys/crypto/ioctladmin.h
@@ -0,0 +1,136 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CRYPTO_IOCTLADMIN_H
+#define _SYS_CRYPTO_IOCTLADMIN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+
+#define ADMIN_IOCTL_DEVICE "/dev/cryptoadm"
+
+#define CRYPTOADMIN(x) (('y' << 8) | (x))
+
+/*
+ * Administrative IOCTLs
+ */
+
+typedef struct crypto_get_dev_list {
+ uint_t dl_return_value;
+ uint_t dl_dev_count;
+ crypto_dev_list_entry_t dl_devs[1];
+} crypto_get_dev_list_t;
+
+typedef struct crypto_get_soft_list {
+ uint_t sl_return_value;
+ uint_t sl_soft_count;
+ size_t sl_soft_len;
+ caddr_t sl_soft_names;
+} crypto_get_soft_list_t;
+
+typedef struct crypto_get_dev_info {
+ uint_t di_return_value;
+ char di_dev_name[MAXNAMELEN];
+ uint_t di_dev_instance;
+ uint_t di_count;
+ crypto_mech_name_t di_list[1];
+} crypto_get_dev_info_t;
+
+typedef struct crypto_get_soft_info {
+ uint_t si_return_value;
+ char si_name[MAXNAMELEN];
+ uint_t si_count;
+ crypto_mech_name_t si_list[1];
+} crypto_get_soft_info_t;
+
+typedef struct crypto_load_dev_disabled {
+ uint_t dd_return_value;
+ char dd_dev_name[MAXNAMELEN];
+ uint_t dd_dev_instance;
+ uint_t dd_count;
+ crypto_mech_name_t dd_list[1];
+} crypto_load_dev_disabled_t;
+
+typedef struct crypto_load_soft_disabled {
+ uint_t sd_return_value;
+ char sd_name[MAXNAMELEN];
+ uint_t sd_count;
+ crypto_mech_name_t sd_list[1];
+} crypto_load_soft_disabled_t;
+
+typedef struct crypto_unload_soft_module {
+ uint_t sm_return_value;
+ char sm_name[MAXNAMELEN];
+} crypto_unload_soft_module_t;
+
+typedef struct crypto_load_soft_config {
+ uint_t sc_return_value;
+ char sc_name[MAXNAMELEN];
+ uint_t sc_count;
+ crypto_mech_name_t sc_list[1];
+} crypto_load_soft_config_t;
+
+typedef struct crypto_load_door {
+ uint_t ld_return_value;
+ uint_t ld_did;
+} crypto_load_door_t;
+
+#ifdef _KERNEL
+#ifdef _SYSCALL32
+
+typedef struct crypto_get_soft_list32 {
+ uint32_t sl_return_value;
+ uint32_t sl_soft_count;
+ size32_t sl_soft_len;
+ caddr32_t sl_soft_names;
+} crypto_get_soft_list32_t;
+
+#endif /* _SYSCALL32 */
+#endif /* _KERNEL */
+
+#define CRYPTO_GET_VERSION CRYPTOADMIN(1)
+#define CRYPTO_GET_DEV_LIST CRYPTOADMIN(2)
+#define CRYPTO_GET_SOFT_LIST CRYPTOADMIN(3)
+#define CRYPTO_GET_DEV_INFO CRYPTOADMIN(4)
+#define CRYPTO_GET_SOFT_INFO CRYPTOADMIN(5)
+#define CRYPTO_LOAD_DEV_DISABLED CRYPTOADMIN(8)
+#define CRYPTO_LOAD_SOFT_DISABLED CRYPTOADMIN(9)
+#define CRYPTO_UNLOAD_SOFT_MODULE CRYPTOADMIN(10)
+#define CRYPTO_LOAD_SOFT_CONFIG CRYPTOADMIN(11)
+#define CRYPTO_POOL_CREATE CRYPTOADMIN(12)
+#define CRYPTO_POOL_WAIT CRYPTOADMIN(13)
+#define CRYPTO_POOL_RUN CRYPTOADMIN(14)
+#define CRYPTO_LOAD_DOOR CRYPTOADMIN(15)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CRYPTO_IOCTLADMIN_H */
diff --git a/module/icp/include/sys/crypto/ops_impl.h b/module/icp/include/sys/crypto/ops_impl.h
new file mode 100644
index 000000000..230d74b06
--- /dev/null
+++ b/module/icp/include/sys/crypto/ops_impl.h
@@ -0,0 +1,630 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CRYPTO_OPS_IMPL_H
+#define _SYS_CRYPTO_OPS_IMPL_H
+
+/*
+ * Scheduler internal structures.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/common.h>
+
+/*
+ * The parameters needed for each function group are batched
+ * in one structure. This is much simpler than having a
+ * separate structure for each function.
+ *
+ * In some cases, a field is generically named to keep the
+ * structure small. The comments indicate these cases.
+ */
+typedef struct kcf_digest_ops_params {
+ crypto_session_id_t do_sid;
+ crypto_mech_type_t do_framework_mechtype;
+ crypto_mechanism_t do_mech;
+ crypto_data_t *do_data;
+ crypto_data_t *do_digest;
+ crypto_key_t *do_digest_key; /* Argument for digest_key() */
+} kcf_digest_ops_params_t;
+
+typedef struct kcf_mac_ops_params {
+ crypto_session_id_t mo_sid;
+ crypto_mech_type_t mo_framework_mechtype;
+ crypto_mechanism_t mo_mech;
+ crypto_key_t *mo_key;
+ crypto_data_t *mo_data;
+ crypto_data_t *mo_mac;
+ crypto_spi_ctx_template_t mo_templ;
+} kcf_mac_ops_params_t;
+
+typedef struct kcf_encrypt_ops_params {
+ crypto_session_id_t eo_sid;
+ crypto_mech_type_t eo_framework_mechtype;
+ crypto_mechanism_t eo_mech;
+ crypto_key_t *eo_key;
+ crypto_data_t *eo_plaintext;
+ crypto_data_t *eo_ciphertext;
+ crypto_spi_ctx_template_t eo_templ;
+} kcf_encrypt_ops_params_t;
+
+typedef struct kcf_decrypt_ops_params {
+ crypto_session_id_t dop_sid;
+ crypto_mech_type_t dop_framework_mechtype;
+ crypto_mechanism_t dop_mech;
+ crypto_key_t *dop_key;
+ crypto_data_t *dop_ciphertext;
+ crypto_data_t *dop_plaintext;
+ crypto_spi_ctx_template_t dop_templ;
+} kcf_decrypt_ops_params_t;
+
+typedef struct kcf_sign_ops_params {
+ crypto_session_id_t so_sid;
+ crypto_mech_type_t so_framework_mechtype;
+ crypto_mechanism_t so_mech;
+ crypto_key_t *so_key;
+ crypto_data_t *so_data;
+ crypto_data_t *so_signature;
+ crypto_spi_ctx_template_t so_templ;
+} kcf_sign_ops_params_t;
+
+typedef struct kcf_verify_ops_params {
+ crypto_session_id_t vo_sid;
+ crypto_mech_type_t vo_framework_mechtype;
+ crypto_mechanism_t vo_mech;
+ crypto_key_t *vo_key;
+ crypto_data_t *vo_data;
+ crypto_data_t *vo_signature;
+ crypto_spi_ctx_template_t vo_templ;
+} kcf_verify_ops_params_t;
+
+typedef struct kcf_encrypt_mac_ops_params {
+ crypto_session_id_t em_sid;
+ crypto_mech_type_t em_framework_encr_mechtype;
+ crypto_mechanism_t em_encr_mech;
+ crypto_key_t *em_encr_key;
+ crypto_mech_type_t em_framework_mac_mechtype;
+ crypto_mechanism_t em_mac_mech;
+ crypto_key_t *em_mac_key;
+ crypto_data_t *em_plaintext;
+ crypto_dual_data_t *em_ciphertext;
+ crypto_data_t *em_mac;
+ crypto_spi_ctx_template_t em_encr_templ;
+ crypto_spi_ctx_template_t em_mac_templ;
+} kcf_encrypt_mac_ops_params_t;
+
+typedef struct kcf_mac_decrypt_ops_params {
+ crypto_session_id_t md_sid;
+ crypto_mech_type_t md_framework_mac_mechtype;
+ crypto_mechanism_t md_mac_mech;
+ crypto_key_t *md_mac_key;
+ crypto_mech_type_t md_framework_decr_mechtype;
+ crypto_mechanism_t md_decr_mech;
+ crypto_key_t *md_decr_key;
+ crypto_dual_data_t *md_ciphertext;
+ crypto_data_t *md_mac;
+ crypto_data_t *md_plaintext;
+ crypto_spi_ctx_template_t md_mac_templ;
+ crypto_spi_ctx_template_t md_decr_templ;
+} kcf_mac_decrypt_ops_params_t;
+
+typedef struct kcf_random_number_ops_params {
+ crypto_session_id_t rn_sid;
+ uchar_t *rn_buf;
+ size_t rn_buflen;
+ uint_t rn_entropy_est;
+ uint32_t rn_flags;
+} kcf_random_number_ops_params_t;
+
+/*
+ * so_pd is useful when the provider descriptor (pd) supplying the
+ * provider handle is different from the pd supplying the ops vector.
+ * This is the case for session open/close where so_pd can be the pd
+ * of a logical provider. The pd supplying the ops vector is passed
+ * as an argument to kcf_submit_request().
+ */
+typedef struct kcf_session_ops_params {
+ crypto_session_id_t *so_sid_ptr;
+ crypto_session_id_t so_sid;
+ crypto_user_type_t so_user_type;
+ char *so_pin;
+ size_t so_pin_len;
+ kcf_provider_desc_t *so_pd;
+} kcf_session_ops_params_t;
+
+typedef struct kcf_object_ops_params {
+ crypto_session_id_t oo_sid;
+ crypto_object_id_t oo_object_id;
+ crypto_object_attribute_t *oo_template;
+ uint_t oo_attribute_count;
+ crypto_object_id_t *oo_object_id_ptr;
+ size_t *oo_object_size;
+ void **oo_find_init_pp_ptr;
+ void *oo_find_pp;
+ uint_t oo_max_object_count;
+ uint_t *oo_object_count_ptr;
+} kcf_object_ops_params_t;
+
+/*
+ * ko_key is used to encode wrapping key in key_wrap() and
+ * unwrapping key in key_unwrap(). ko_key_template and
+ * ko_key_attribute_count are used to encode public template
+ * and public template attr count in key_generate_pair().
+ * kops->ko_key_object_id_ptr is used to encode public key
+ * in key_generate_pair().
+ */
+typedef struct kcf_key_ops_params {
+ crypto_session_id_t ko_sid;
+ crypto_mech_type_t ko_framework_mechtype;
+ crypto_mechanism_t ko_mech;
+ crypto_object_attribute_t *ko_key_template;
+ uint_t ko_key_attribute_count;
+ crypto_object_id_t *ko_key_object_id_ptr;
+ crypto_object_attribute_t *ko_private_key_template;
+ uint_t ko_private_key_attribute_count;
+ crypto_object_id_t *ko_private_key_object_id_ptr;
+ crypto_key_t *ko_key;
+ uchar_t *ko_wrapped_key;
+ size_t *ko_wrapped_key_len_ptr;
+ crypto_object_attribute_t *ko_out_template1;
+ crypto_object_attribute_t *ko_out_template2;
+ uint_t ko_out_attribute_count1;
+ uint_t ko_out_attribute_count2;
+} kcf_key_ops_params_t;
+
+/*
+ * po_pin and po_pin_len are used to encode new_pin and new_pin_len
+ * when wrapping set_pin() function parameters.
+ *
+ * po_pd is useful when the provider descriptor (pd) supplying the
+ * provider handle is different from the pd supplying the ops vector.
+ * This is true for the ext_info provider entry point where po_pd
+ * can be the pd of a logical provider. The pd supplying the ops vector
+ * is passed as an argument to kcf_submit_request().
+ */
+typedef struct kcf_provmgmt_ops_params {
+ crypto_session_id_t po_sid;
+ char *po_pin;
+ size_t po_pin_len;
+ char *po_old_pin;
+ size_t po_old_pin_len;
+ char *po_label;
+ crypto_provider_ext_info_t *po_ext_info;
+ kcf_provider_desc_t *po_pd;
+} kcf_provmgmt_ops_params_t;
+
+/*
+ * The operation type within a function group.
+ */
+typedef enum kcf_op_type {
+ /* common ops for all mechanisms */
+ KCF_OP_INIT = 1,
+ KCF_OP_SINGLE, /* pkcs11 sense. So, INIT is already done */
+ KCF_OP_UPDATE,
+ KCF_OP_FINAL,
+ KCF_OP_ATOMIC,
+
+ /* digest_key op */
+ KCF_OP_DIGEST_KEY,
+
+ /* mac specific op */
+ KCF_OP_MAC_VERIFY_ATOMIC,
+
+ /* mac/cipher specific op */
+ KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC,
+
+ /* sign_recover ops */
+ KCF_OP_SIGN_RECOVER_INIT,
+ KCF_OP_SIGN_RECOVER,
+ KCF_OP_SIGN_RECOVER_ATOMIC,
+
+ /* verify_recover ops */
+ KCF_OP_VERIFY_RECOVER_INIT,
+ KCF_OP_VERIFY_RECOVER,
+ KCF_OP_VERIFY_RECOVER_ATOMIC,
+
+ /* random number ops */
+ KCF_OP_RANDOM_SEED,
+ KCF_OP_RANDOM_GENERATE,
+
+ /* session management ops */
+ KCF_OP_SESSION_OPEN,
+ KCF_OP_SESSION_CLOSE,
+ KCF_OP_SESSION_LOGIN,
+ KCF_OP_SESSION_LOGOUT,
+
+ /* object management ops */
+ KCF_OP_OBJECT_CREATE,
+ KCF_OP_OBJECT_COPY,
+ KCF_OP_OBJECT_DESTROY,
+ KCF_OP_OBJECT_GET_SIZE,
+ KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE,
+ KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE,
+ KCF_OP_OBJECT_FIND_INIT,
+ KCF_OP_OBJECT_FIND,
+ KCF_OP_OBJECT_FIND_FINAL,
+
+ /* key management ops */
+ KCF_OP_KEY_GENERATE,
+ KCF_OP_KEY_GENERATE_PAIR,
+ KCF_OP_KEY_WRAP,
+ KCF_OP_KEY_UNWRAP,
+ KCF_OP_KEY_DERIVE,
+ KCF_OP_KEY_CHECK,
+
+ /* provider management ops */
+ KCF_OP_MGMT_EXTINFO,
+ KCF_OP_MGMT_INITTOKEN,
+ KCF_OP_MGMT_INITPIN,
+ KCF_OP_MGMT_SETPIN
+} kcf_op_type_t;
+
+/*
+ * The operation groups that need wrapping of parameters. This is somewhat
+ * similar to the function group type in spi.h except that this also includes
+ * all the functions that don't have a mechanism.
+ *
+ * The wrapper macros should never take these enum values as an argument.
+ * Rather, they are assigned in the macro itself since they are known
+ * from the macro name.
+ */
+typedef enum kcf_op_group {
+ KCF_OG_DIGEST = 1,
+ KCF_OG_MAC,
+ KCF_OG_ENCRYPT,
+ KCF_OG_DECRYPT,
+ KCF_OG_SIGN,
+ KCF_OG_VERIFY,
+ KCF_OG_ENCRYPT_MAC,
+ KCF_OG_MAC_DECRYPT,
+ KCF_OG_RANDOM,
+ KCF_OG_SESSION,
+ KCF_OG_OBJECT,
+ KCF_OG_KEY,
+ KCF_OG_PROVMGMT,
+ KCF_OG_NOSTORE_KEY
+} kcf_op_group_t;
+
+/*
+ * The kcf_op_type_t enum values used here should be only for those
+ * operations for which there is a k-api routine in sys/crypto/api.h.
+ */
+#define IS_INIT_OP(ftype) ((ftype) == KCF_OP_INIT)
+#define IS_SINGLE_OP(ftype) ((ftype) == KCF_OP_SINGLE)
+#define IS_UPDATE_OP(ftype) ((ftype) == KCF_OP_UPDATE)
+#define IS_FINAL_OP(ftype) ((ftype) == KCF_OP_FINAL)
+#define IS_ATOMIC_OP(ftype) ( \
+ (ftype) == KCF_OP_ATOMIC || (ftype) == KCF_OP_MAC_VERIFY_ATOMIC || \
+ (ftype) == KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC || \
+ (ftype) == KCF_OP_SIGN_RECOVER_ATOMIC || \
+ (ftype) == KCF_OP_VERIFY_RECOVER_ATOMIC)
+
+/*
+ * Keep the parameters associated with a request around.
+ * We need to pass them to the SPI.
+ */
+typedef struct kcf_req_params {
+ kcf_op_group_t rp_opgrp;
+ kcf_op_type_t rp_optype;
+
+ union {
+ kcf_digest_ops_params_t digest_params;
+ kcf_mac_ops_params_t mac_params;
+ kcf_encrypt_ops_params_t encrypt_params;
+ kcf_decrypt_ops_params_t decrypt_params;
+ kcf_sign_ops_params_t sign_params;
+ kcf_verify_ops_params_t verify_params;
+ kcf_encrypt_mac_ops_params_t encrypt_mac_params;
+ kcf_mac_decrypt_ops_params_t mac_decrypt_params;
+ kcf_random_number_ops_params_t random_number_params;
+ kcf_session_ops_params_t session_params;
+ kcf_object_ops_params_t object_params;
+ kcf_key_ops_params_t key_params;
+ kcf_provmgmt_ops_params_t provmgmt_params;
+ } rp_u;
+} kcf_req_params_t;
+
+
+/*
+ * The ioctl/k-api code should bundle the parameters into a kcf_req_params_t
+ * structure before calling a scheduler routine. The following macros are
+ * available for that purpose.
+ *
+ * For the most part, the macro arguments closely correspond to the
+ * function parameters. In some cases, we use generic names. The comments
+ * for the structure should indicate these cases.
+ */
+#define KCF_WRAP_DIGEST_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
+ _data, _digest) { \
+ kcf_digest_ops_params_t *dops = &(req)->rp_u.digest_params; \
+ crypto_mechanism_t *mechp = _mech; \
+ \
+ (req)->rp_opgrp = KCF_OG_DIGEST; \
+ (req)->rp_optype = ftype; \
+ dops->do_sid = _sid; \
+ if (mechp != NULL) { \
+ dops->do_mech = *mechp; \
+ dops->do_framework_mechtype = mechp->cm_type; \
+ } \
+ dops->do_digest_key = _key; \
+ dops->do_data = _data; \
+ dops->do_digest = _digest; \
+}
+
+#define KCF_WRAP_MAC_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
+ _data, _mac, _templ) { \
+ kcf_mac_ops_params_t *mops = &(req)->rp_u.mac_params; \
+ crypto_mechanism_t *mechp = _mech; \
+ \
+ (req)->rp_opgrp = KCF_OG_MAC; \
+ (req)->rp_optype = ftype; \
+ mops->mo_sid = _sid; \
+ if (mechp != NULL) { \
+ mops->mo_mech = *mechp; \
+ mops->mo_framework_mechtype = mechp->cm_type; \
+ } \
+ mops->mo_key = _key; \
+ mops->mo_data = _data; \
+ mops->mo_mac = _mac; \
+ mops->mo_templ = _templ; \
+}
+
+#define KCF_WRAP_ENCRYPT_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
+ _plaintext, _ciphertext, _templ) { \
+ kcf_encrypt_ops_params_t *cops = &(req)->rp_u.encrypt_params; \
+ crypto_mechanism_t *mechp = _mech; \
+ \
+ (req)->rp_opgrp = KCF_OG_ENCRYPT; \
+ (req)->rp_optype = ftype; \
+ cops->eo_sid = _sid; \
+ if (mechp != NULL) { \
+ cops->eo_mech = *mechp; \
+ cops->eo_framework_mechtype = mechp->cm_type; \
+ } \
+ cops->eo_key = _key; \
+ cops->eo_plaintext = _plaintext; \
+ cops->eo_ciphertext = _ciphertext; \
+ cops->eo_templ = _templ; \
+}
+
+#define KCF_WRAP_DECRYPT_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
+ _ciphertext, _plaintext, _templ) { \
+ kcf_decrypt_ops_params_t *cops = &(req)->rp_u.decrypt_params; \
+ crypto_mechanism_t *mechp = _mech; \
+ \
+ (req)->rp_opgrp = KCF_OG_DECRYPT; \
+ (req)->rp_optype = ftype; \
+ cops->dop_sid = _sid; \
+ if (mechp != NULL) { \
+ cops->dop_mech = *mechp; \
+ cops->dop_framework_mechtype = mechp->cm_type; \
+ } \
+ cops->dop_key = _key; \
+ cops->dop_ciphertext = _ciphertext; \
+ cops->dop_plaintext = _plaintext; \
+ cops->dop_templ = _templ; \
+}
+
+#define KCF_WRAP_SIGN_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
+ _data, _signature, _templ) { \
+ kcf_sign_ops_params_t *sops = &(req)->rp_u.sign_params; \
+ crypto_mechanism_t *mechp = _mech; \
+ \
+ (req)->rp_opgrp = KCF_OG_SIGN; \
+ (req)->rp_optype = ftype; \
+ sops->so_sid = _sid; \
+ if (mechp != NULL) { \
+ sops->so_mech = *mechp; \
+ sops->so_framework_mechtype = mechp->cm_type; \
+ } \
+ sops->so_key = _key; \
+ sops->so_data = _data; \
+ sops->so_signature = _signature; \
+ sops->so_templ = _templ; \
+}
+
+#define KCF_WRAP_VERIFY_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
+ _data, _signature, _templ) { \
+ kcf_verify_ops_params_t *vops = &(req)->rp_u.verify_params; \
+ crypto_mechanism_t *mechp = _mech; \
+ \
+ (req)->rp_opgrp = KCF_OG_VERIFY; \
+ (req)->rp_optype = ftype; \
+ vops->vo_sid = _sid; \
+ if (mechp != NULL) { \
+ vops->vo_mech = *mechp; \
+ vops->vo_framework_mechtype = mechp->cm_type; \
+ } \
+ vops->vo_key = _key; \
+ vops->vo_data = _data; \
+ vops->vo_signature = _signature; \
+ vops->vo_templ = _templ; \
+}
+
+#define KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(req, ftype, _sid, _encr_key, \
+ _mac_key, _plaintext, _ciphertext, _mac, _encr_templ, _mac_templ) { \
+ kcf_encrypt_mac_ops_params_t *cmops = &(req)->rp_u.encrypt_mac_params; \
+ \
+ (req)->rp_opgrp = KCF_OG_ENCRYPT_MAC; \
+ (req)->rp_optype = ftype; \
+ cmops->em_sid = _sid; \
+ cmops->em_encr_key = _encr_key; \
+ cmops->em_mac_key = _mac_key; \
+ cmops->em_plaintext = _plaintext; \
+ cmops->em_ciphertext = _ciphertext; \
+ cmops->em_mac = _mac; \
+ cmops->em_encr_templ = _encr_templ; \
+ cmops->em_mac_templ = _mac_templ; \
+}
+
+#define KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(req, ftype, _sid, _mac_key, \
+ _decr_key, _ciphertext, _mac, _plaintext, _mac_templ, _decr_templ) { \
+ kcf_mac_decrypt_ops_params_t *cmops = &(req)->rp_u.mac_decrypt_params; \
+ \
+ (req)->rp_opgrp = KCF_OG_MAC_DECRYPT; \
+ (req)->rp_optype = ftype; \
+ cmops->md_sid = _sid; \
+ cmops->md_mac_key = _mac_key; \
+ cmops->md_decr_key = _decr_key; \
+ cmops->md_ciphertext = _ciphertext; \
+ cmops->md_mac = _mac; \
+ cmops->md_plaintext = _plaintext; \
+ cmops->md_mac_templ = _mac_templ; \
+ cmops->md_decr_templ = _decr_templ; \
+}
+
+#define KCF_WRAP_RANDOM_OPS_PARAMS(req, ftype, _sid, _buf, _buflen, \
+ _est, _flags) { \
+ kcf_random_number_ops_params_t *rops = \
+ &(req)->rp_u.random_number_params; \
+ \
+ (req)->rp_opgrp = KCF_OG_RANDOM; \
+ (req)->rp_optype = ftype; \
+ rops->rn_sid = _sid; \
+ rops->rn_buf = _buf; \
+ rops->rn_buflen = _buflen; \
+ rops->rn_entropy_est = _est; \
+ rops->rn_flags = _flags; \
+}
+
+#define KCF_WRAP_SESSION_OPS_PARAMS(req, ftype, _sid_ptr, _sid, \
+ _user_type, _pin, _pin_len, _pd) { \
+ kcf_session_ops_params_t *sops = &(req)->rp_u.session_params; \
+ \
+ (req)->rp_opgrp = KCF_OG_SESSION; \
+ (req)->rp_optype = ftype; \
+ sops->so_sid_ptr = _sid_ptr; \
+ sops->so_sid = _sid; \
+ sops->so_user_type = _user_type; \
+ sops->so_pin = _pin; \
+ sops->so_pin_len = _pin_len; \
+ sops->so_pd = _pd; \
+}
+
+#define KCF_WRAP_OBJECT_OPS_PARAMS(req, ftype, _sid, _object_id, \
+ _template, _attribute_count, _object_id_ptr, _object_size, \
+ _find_init_pp_ptr, _find_pp, _max_object_count, _object_count_ptr) { \
+ kcf_object_ops_params_t *jops = &(req)->rp_u.object_params; \
+ \
+ (req)->rp_opgrp = KCF_OG_OBJECT; \
+ (req)->rp_optype = ftype; \
+ jops->oo_sid = _sid; \
+ jops->oo_object_id = _object_id; \
+ jops->oo_template = _template; \
+ jops->oo_attribute_count = _attribute_count; \
+ jops->oo_object_id_ptr = _object_id_ptr; \
+ jops->oo_object_size = _object_size; \
+ jops->oo_find_init_pp_ptr = _find_init_pp_ptr; \
+ jops->oo_find_pp = _find_pp; \
+ jops->oo_max_object_count = _max_object_count; \
+ jops->oo_object_count_ptr = _object_count_ptr; \
+}
+
+#define KCF_WRAP_KEY_OPS_PARAMS(req, ftype, _sid, _mech, _key_template, \
+ _key_attribute_count, _key_object_id_ptr, _private_key_template, \
+ _private_key_attribute_count, _private_key_object_id_ptr, \
+ _key, _wrapped_key, _wrapped_key_len_ptr) { \
+ kcf_key_ops_params_t *kops = &(req)->rp_u.key_params; \
+ crypto_mechanism_t *mechp = _mech; \
+ \
+ (req)->rp_opgrp = KCF_OG_KEY; \
+ (req)->rp_optype = ftype; \
+ kops->ko_sid = _sid; \
+ if (mechp != NULL) { \
+ kops->ko_mech = *mechp; \
+ kops->ko_framework_mechtype = mechp->cm_type; \
+ } \
+ kops->ko_key_template = _key_template; \
+ kops->ko_key_attribute_count = _key_attribute_count; \
+ kops->ko_key_object_id_ptr = _key_object_id_ptr; \
+ kops->ko_private_key_template = _private_key_template; \
+ kops->ko_private_key_attribute_count = _private_key_attribute_count; \
+ kops->ko_private_key_object_id_ptr = _private_key_object_id_ptr; \
+ kops->ko_key = _key; \
+ kops->ko_wrapped_key = _wrapped_key; \
+ kops->ko_wrapped_key_len_ptr = _wrapped_key_len_ptr; \
+}
+
+#define KCF_WRAP_PROVMGMT_OPS_PARAMS(req, ftype, _sid, _old_pin, \
+ _old_pin_len, _pin, _pin_len, _label, _ext_info, _pd) { \
+ kcf_provmgmt_ops_params_t *pops = &(req)->rp_u.provmgmt_params; \
+ \
+ (req)->rp_opgrp = KCF_OG_PROVMGMT; \
+ (req)->rp_optype = ftype; \
+ pops->po_sid = _sid; \
+ pops->po_pin = _pin; \
+ pops->po_pin_len = _pin_len; \
+ pops->po_old_pin = _old_pin; \
+ pops->po_old_pin_len = _old_pin_len; \
+ pops->po_label = _label; \
+ pops->po_ext_info = _ext_info; \
+ pops->po_pd = _pd; \
+}
+
+#define KCF_WRAP_NOSTORE_KEY_OPS_PARAMS(req, ftype, _sid, _mech, \
+ _key_template, _key_attribute_count, _private_key_template, \
+ _private_key_attribute_count, _key, _out_template1, \
+ _out_attribute_count1, _out_template2, _out_attribute_count2) { \
+ kcf_key_ops_params_t *kops = &(req)->rp_u.key_params; \
+ crypto_mechanism_t *mechp = _mech; \
+ \
+ (req)->rp_opgrp = KCF_OG_NOSTORE_KEY; \
+ (req)->rp_optype = ftype; \
+ kops->ko_sid = _sid; \
+ if (mechp != NULL) { \
+ kops->ko_mech = *mechp; \
+ kops->ko_framework_mechtype = mechp->cm_type; \
+ } \
+ kops->ko_key_template = _key_template; \
+ kops->ko_key_attribute_count = _key_attribute_count; \
+ kops->ko_key_object_id_ptr = NULL; \
+ kops->ko_private_key_template = _private_key_template; \
+ kops->ko_private_key_attribute_count = _private_key_attribute_count; \
+ kops->ko_private_key_object_id_ptr = NULL; \
+ kops->ko_key = _key; \
+ kops->ko_wrapped_key = NULL; \
+ kops->ko_wrapped_key_len_ptr = 0; \
+ kops->ko_out_template1 = _out_template1; \
+ kops->ko_out_template2 = _out_template2; \
+ kops->ko_out_attribute_count1 = _out_attribute_count1; \
+ kops->ko_out_attribute_count2 = _out_attribute_count2; \
+}
+
+#define KCF_SET_PROVIDER_MECHNUM(fmtype, pd, mechp) \
+ (mechp)->cm_type = \
+ KCF_TO_PROV_MECHNUM(pd, fmtype);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CRYPTO_OPS_IMPL_H */
diff --git a/module/icp/include/sys/crypto/sched_impl.h b/module/icp/include/sys/crypto/sched_impl.h
new file mode 100644
index 000000000..32ffa7749
--- /dev/null
+++ b/module/icp/include/sys/crypto/sched_impl.h
@@ -0,0 +1,531 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CRYPTO_SCHED_IMPL_H
+#define _SYS_CRYPTO_SCHED_IMPL_H
+
+/*
+ * Scheduler internal structures.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/ops_impl.h>
+
+typedef void (kcf_func_t)(void *, int);
+
+typedef enum kcf_req_status {
+ REQ_ALLOCATED = 1,
+ REQ_WAITING, /* At the framework level */
+ REQ_INPROGRESS, /* At the provider level */
+ REQ_DONE,
+ REQ_CANCELED
+} kcf_req_status_t;
+
+typedef enum kcf_call_type {
+ CRYPTO_SYNCH = 1,
+ CRYPTO_ASYNCH
+} kcf_call_type_t;
+
+#define CHECK_RESTRICT(crq) (crq != NULL && \
+ ((crq)->cr_flag & CRYPTO_RESTRICTED))
+
+#define CHECK_RESTRICT_FALSE B_FALSE
+
+#define CHECK_FASTPATH(crq, pd) ((crq) == NULL || \
+ !((crq)->cr_flag & CRYPTO_ALWAYS_QUEUE)) && \
+ (pd)->pd_prov_type == CRYPTO_SW_PROVIDER
+
+#define KCF_KMFLAG(crq) (((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP)
+
+/*
+ * The framework keeps an internal handle to use in the adaptive
+ * asynchronous case. This is the case when a client has the
+ * CRYPTO_ALWAYS_QUEUE bit clear and a software provider is used for
+ * the request. The request is completed in the context of the calling
+ * thread and kernel memory must be allocated with KM_NOSLEEP.
+ *
+ * The framework passes a pointer to the handle in crypto_req_handle_t
+ * argument when it calls the SPI of the software provider. The macros
+ * KCF_RHNDL() and KCF_SWFP_RHNDL() are used to do this.
+ *
+ * When a provider asks the framework for kmflag value via
+ * crypto_kmflag(9S) we use REQHNDL2_KMFLAG() macro.
+ */
+extern ulong_t kcf_swprov_hndl;
+#define KCF_RHNDL(kmflag) (((kmflag) == KM_SLEEP) ? NULL : &kcf_swprov_hndl)
+#define KCF_SWFP_RHNDL(crq) (((crq) == NULL) ? NULL : &kcf_swprov_hndl)
+#define REQHNDL2_KMFLAG(rhndl) \
+ ((rhndl == &kcf_swprov_hndl) ? KM_NOSLEEP : KM_SLEEP)
+
+/* Internal call_req flags. They start after the public ones in api.h */
+
+#define CRYPTO_SETDUAL 0x00001000 /* Set the 'cont' boolean before */
+ /* submitting the request */
+#define KCF_ISDUALREQ(crq) \
+ (((crq) == NULL) ? B_FALSE : (crq->cr_flag & CRYPTO_SETDUAL))
+
+typedef struct kcf_prov_tried {
+ kcf_provider_desc_t *pt_pd;
+ struct kcf_prov_tried *pt_next;
+} kcf_prov_tried_t;
+
+#define IS_FG_SUPPORTED(mdesc, fg) \
+ (((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0)
+
+#define IS_PROVIDER_TRIED(pd, tlist) \
+ (tlist != NULL && is_in_triedlist(pd, tlist))
+
+#define IS_RECOVERABLE(error) \
+ (error == CRYPTO_BUFFER_TOO_BIG || \
+ error == CRYPTO_BUSY || \
+ error == CRYPTO_DEVICE_ERROR || \
+ error == CRYPTO_DEVICE_MEMORY || \
+ error == CRYPTO_KEY_SIZE_RANGE || \
+ error == CRYPTO_NO_PERMISSION)
+
+#define KCF_ATOMIC_INCR(x) atomic_add_32(&(x), 1)
+#define KCF_ATOMIC_DECR(x) atomic_add_32(&(x), -1)
+
+/*
+ * Node structure for synchronous requests.
+ */
+typedef struct kcf_sreq_node {
+ /* Should always be the first field in this structure */
+ kcf_call_type_t sn_type;
+ /*
+ * sn_cv and sr_lock are used to wait for the
+ * operation to complete. sn_lock also protects
+ * the sn_state field.
+ */
+ kcondvar_t sn_cv;
+ kmutex_t sn_lock;
+ kcf_req_status_t sn_state;
+
+ /*
+ * Return value from the operation. This will be
+ * one of the CRYPTO_* errors defined in common.h.
+ */
+ int sn_rv;
+
+ /*
+ * parameters to call the SPI with. This can be
+ * a pointer as we know the caller context/stack stays.
+ */
+ struct kcf_req_params *sn_params;
+
+ /* Internal context for this request */
+ struct kcf_context *sn_context;
+
+ /* Provider handling this request */
+ kcf_provider_desc_t *sn_provider;
+} kcf_sreq_node_t;
+
+/*
+ * Node structure for asynchronous requests. A node can be on
+ * on a chain of requests hanging of the internal context
+ * structure and can be in the global software provider queue.
+ */
+typedef struct kcf_areq_node {
+ /* Should always be the first field in this structure */
+ kcf_call_type_t an_type;
+
+ /* an_lock protects the field an_state */
+ kmutex_t an_lock;
+ kcf_req_status_t an_state;
+ crypto_call_req_t an_reqarg;
+
+ /*
+ * parameters to call the SPI with. We need to
+ * save the params since the caller stack can go away.
+ */
+ struct kcf_req_params an_params;
+
+ /*
+ * The next two fields should be NULL for operations that
+ * don't need a context.
+ */
+ /* Internal context for this request */
+ struct kcf_context *an_context;
+
+ /* next in chain of requests for context */
+ struct kcf_areq_node *an_ctxchain_next;
+
+ kcondvar_t an_turn_cv;
+ boolean_t an_is_my_turn;
+ boolean_t an_isdual; /* for internal reuse */
+
+ /*
+ * Next and previous nodes in the global software
+ * queue. These fields are NULL for a hardware
+ * provider since we use a taskq there.
+ */
+ struct kcf_areq_node *an_next;
+ struct kcf_areq_node *an_prev;
+
+ /* Provider handling this request */
+ kcf_provider_desc_t *an_provider;
+ kcf_prov_tried_t *an_tried_plist;
+
+ struct kcf_areq_node *an_idnext; /* Next in ID hash */
+ struct kcf_areq_node *an_idprev; /* Prev in ID hash */
+ kcondvar_t an_done; /* Signal request completion */
+ uint_t an_refcnt;
+} kcf_areq_node_t;
+
+#define KCF_AREQ_REFHOLD(areq) { \
+ atomic_add_32(&(areq)->an_refcnt, 1); \
+ ASSERT((areq)->an_refcnt != 0); \
+}
+
+#define KCF_AREQ_REFRELE(areq) { \
+ ASSERT((areq)->an_refcnt != 0); \
+ membar_exit(); \
+ if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0) \
+ kcf_free_req(areq); \
+}
+
+#define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
+
+#define NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
+ (areq)->an_reqarg.cr_callback_arg, err);
+
+/* For internally generated call requests for dual operations */
+typedef struct kcf_call_req {
+ crypto_call_req_t kr_callreq; /* external client call req */
+ kcf_req_params_t kr_params; /* Params saved for next call */
+ kcf_areq_node_t *kr_areq; /* Use this areq */
+ off_t kr_saveoffset;
+ size_t kr_savelen;
+} kcf_dual_req_t;
+
+/*
+ * The following are some what similar to macros in callo.h, which implement
+ * callout tables.
+ *
+ * The lower four bits of the ID are used to encode the table ID to
+ * index in to. The REQID_COUNTER_HIGH bit is used to avoid any check for
+ * wrap around when generating ID. We assume that there won't be a request
+ * which takes more time than 2^^(sizeof (long) - 5) other requests submitted
+ * after it. This ensures there won't be any ID collision.
+ */
+#define REQID_COUNTER_HIGH (1UL << (8 * sizeof (long) - 1))
+#define REQID_COUNTER_SHIFT 4
+#define REQID_COUNTER_LOW (1 << REQID_COUNTER_SHIFT)
+#define REQID_TABLES 16
+#define REQID_TABLE_MASK (REQID_TABLES - 1)
+
+#define REQID_BUCKETS 512
+#define REQID_BUCKET_MASK (REQID_BUCKETS - 1)
+#define REQID_HASH(id) (((id) >> REQID_COUNTER_SHIFT) & REQID_BUCKET_MASK)
+
+#define GET_REQID(areq) (areq)->an_reqarg.cr_reqid
+#define SET_REQID(areq, val) GET_REQID(areq) = val
+
+/*
+ * Hash table for async requests.
+ */
+typedef struct kcf_reqid_table {
+ kmutex_t rt_lock;
+ crypto_req_id_t rt_curid;
+ kcf_areq_node_t *rt_idhash[REQID_BUCKETS];
+} kcf_reqid_table_t;
+
+/*
+ * Global software provider queue structure. Requests to be
+ * handled by a SW provider and have the ALWAYS_QUEUE flag set
+ * get queued here.
+ */
+typedef struct kcf_global_swq {
+ /*
+ * gs_cv and gs_lock are used to wait for new requests.
+ * gs_lock protects the changes to the queue.
+ */
+ kcondvar_t gs_cv;
+ kmutex_t gs_lock;
+ uint_t gs_njobs;
+ uint_t gs_maxjobs;
+ kcf_areq_node_t *gs_first;
+ kcf_areq_node_t *gs_last;
+} kcf_global_swq_t;
+
+
+/*
+ * Internal representation of a canonical context. We contain crypto_ctx_t
+ * structure in order to have just one memory allocation. The SPI
+ * ((crypto_ctx_t *)ctx)->cc_framework_private maps to this structure.
+ */
+typedef struct kcf_context {
+ crypto_ctx_t kc_glbl_ctx;
+ uint_t kc_refcnt;
+ kmutex_t kc_in_use_lock;
+ /*
+ * kc_req_chain_first and kc_req_chain_last are used to chain
+ * multiple async requests using the same context. They should be
+ * NULL for sync requests.
+ */
+ kcf_areq_node_t *kc_req_chain_first;
+ kcf_areq_node_t *kc_req_chain_last;
+ kcf_provider_desc_t *kc_prov_desc; /* Prov. descriptor */
+ kcf_provider_desc_t *kc_sw_prov_desc; /* Prov. descriptor */
+ kcf_mech_entry_t *kc_mech;
+ struct kcf_context *kc_secondctx; /* for dual contexts */
+} kcf_context_t;
+
+/*
+ * Bump up the reference count on the framework private context. A
+ * global context or a request that references this structure should
+ * do a hold.
+ */
+#define KCF_CONTEXT_REFHOLD(ictx) { \
+ atomic_add_32(&(ictx)->kc_refcnt, 1); \
+ ASSERT((ictx)->kc_refcnt != 0); \
+}
+
+/*
+ * Decrement the reference count on the framework private context.
+ * When the last reference is released, the framework private
+ * context structure is freed along with the global context.
+ */
+#define KCF_CONTEXT_REFRELE(ictx) { \
+ ASSERT((ictx)->kc_refcnt != 0); \
+ membar_exit(); \
+ if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0) \
+ kcf_free_context(ictx); \
+}
+
+/*
+ * Check if we can release the context now. In case of CRYPTO_QUEUED
+ * we do not release it as we can do it only after the provider notified
+ * us. In case of CRYPTO_BUSY, the client can retry the request using
+ * the context, so we do not release the context.
+ *
+ * This macro should be called only from the final routine in
+ * an init/update/final sequence. We do not release the context in case
+ * of update operations. We require the consumer to free it
+ * explicitly, in case it wants to abandon the operation. This is done
+ * as there may be mechanisms in ECB mode that can continue even if
+ * an operation on a block fails.
+ */
+#define KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx) { \
+ if (KCF_CONTEXT_DONE(rv)) \
+ KCF_CONTEXT_REFRELE(kcf_ctx); \
+}
+
+/*
+ * This macro determines whether we're done with a context.
+ */
+#define KCF_CONTEXT_DONE(rv) \
+ ((rv) != CRYPTO_QUEUED && (rv) != CRYPTO_BUSY && \
+ (rv) != CRYPTO_BUFFER_TOO_SMALL)
+
+/*
+ * A crypto_ctx_template_t is internally a pointer to this struct
+ */
+typedef struct kcf_ctx_template {
+ crypto_kcf_provider_handle_t ct_prov_handle; /* provider handle */
+ uint_t ct_generation; /* generation # */
+ size_t ct_size; /* for freeing */
+ crypto_spi_ctx_template_t ct_prov_tmpl; /* context template */
+ /* from the SW prov */
+} kcf_ctx_template_t;
+
+/*
+ * Structure for pool of threads working on global software queue.
+ */
+typedef struct kcf_pool {
+ uint32_t kp_threads; /* Number of threads in pool */
+ uint32_t kp_idlethreads; /* Idle threads in pool */
+ uint32_t kp_blockedthreads; /* Blocked threads in pool */
+
+ /*
+ * cv & lock to monitor the condition when no threads
+ * are around. In this case the failover thread kicks in.
+ */
+ kcondvar_t kp_nothr_cv;
+ kmutex_t kp_thread_lock;
+
+ /* Userspace thread creator variables. */
+ boolean_t kp_signal_create_thread; /* Create requested flag */
+ int kp_nthrs; /* # of threads to create */
+ boolean_t kp_user_waiting; /* Thread waiting for work */
+
+ /*
+ * cv & lock for the condition where more threads need to be
+ * created. kp_user_lock also protects the three fileds above.
+ */
+ kcondvar_t kp_user_cv; /* Creator cond. variable */
+ kmutex_t kp_user_lock; /* Creator lock */
+} kcf_pool_t;
+
+
+/*
+ * State of a crypto bufcall element.
+ */
+typedef enum cbuf_state {
+ CBUF_FREE = 1,
+ CBUF_WAITING,
+ CBUF_RUNNING
+} cbuf_state_t;
+
+/*
+ * Structure of a crypto bufcall element.
+ */
+typedef struct kcf_cbuf_elem {
+ /*
+ * lock and cv to wait for CBUF_RUNNING to be done
+ * kc_lock also protects kc_state.
+ */
+ kmutex_t kc_lock;
+ kcondvar_t kc_cv;
+ cbuf_state_t kc_state;
+
+ struct kcf_cbuf_elem *kc_next;
+ struct kcf_cbuf_elem *kc_prev;
+
+ void (*kc_func)(void *arg);
+ void *kc_arg;
+} kcf_cbuf_elem_t;
+
+/*
+ * State of a notify element.
+ */
+typedef enum ntfy_elem_state {
+ NTFY_WAITING = 1,
+ NTFY_RUNNING
+} ntfy_elem_state_t;
+
+/*
+ * Structure of a notify list element.
+ */
+typedef struct kcf_ntfy_elem {
+ /*
+ * lock and cv to wait for NTFY_RUNNING to be done.
+ * kn_lock also protects kn_state.
+ */
+ kmutex_t kn_lock;
+ kcondvar_t kn_cv;
+ ntfy_elem_state_t kn_state;
+
+ struct kcf_ntfy_elem *kn_next;
+ struct kcf_ntfy_elem *kn_prev;
+
+ crypto_notify_callback_t kn_func;
+ uint32_t kn_event_mask;
+} kcf_ntfy_elem_t;
+
+
+/*
+ * The following values are based on the assumption that it would
+ * take around eight cpus to load a hardware provider (This is true for
+ * at least one product) and a kernel client may come from different
+ * low-priority interrupt levels. We will have CYRPTO_TASKQ_MIN number
+ * of cached taskq entries. The CRYPTO_TASKQ_MAX number is based on
+ * a throughput of 1GB/s using 512-byte buffers. These are just
+ * reasonable estimates and might need to change in future.
+ */
+#define CRYPTO_TASKQ_THREADS 8
+#define CYRPTO_TASKQ_MIN 64
+#define CRYPTO_TASKQ_MAX 2 * 1024 * 1024
+
+extern int crypto_taskq_threads;
+extern int crypto_taskq_minalloc;
+extern int crypto_taskq_maxalloc;
+extern kcf_global_swq_t *gswq;
+extern int kcf_maxthreads;
+extern int kcf_minthreads;
+
+/*
+ * All pending crypto bufcalls are put on a list. cbuf_list_lock
+ * protects changes to this list.
+ */
+extern kmutex_t cbuf_list_lock;
+extern kcondvar_t cbuf_list_cv;
+
+/*
+ * All event subscribers are put on a list. kcf_notify_list_lock
+ * protects changes to this list.
+ */
+extern kmutex_t ntfy_list_lock;
+extern kcondvar_t ntfy_list_cv;
+
+boolean_t kcf_get_next_logical_provider_member(kcf_provider_desc_t *,
+ kcf_provider_desc_t *, kcf_provider_desc_t **);
+extern int kcf_get_hardware_provider(crypto_mech_type_t, crypto_mech_type_t,
+ boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **,
+ crypto_func_group_t);
+extern int kcf_get_hardware_provider_nomech(offset_t, offset_t,
+ boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **);
+extern void kcf_free_triedlist(kcf_prov_tried_t *);
+extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **,
+ kcf_provider_desc_t *, int);
+extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t,
+ kcf_mech_entry_t **, int *, kcf_prov_tried_t *, crypto_func_group_t,
+ boolean_t, size_t);
+extern kcf_provider_desc_t *kcf_get_dual_provider(crypto_mechanism_t *,
+ crypto_mechanism_t *, kcf_mech_entry_t **, crypto_mech_type_t *,
+ crypto_mech_type_t *, int *, kcf_prov_tried_t *,
+ crypto_func_group_t, crypto_func_group_t, boolean_t, size_t);
+extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *,
+ crypto_session_id_t);
+extern int kcf_submit_request(kcf_provider_desc_t *, crypto_ctx_t *,
+ crypto_call_req_t *, kcf_req_params_t *, boolean_t);
+extern void kcf_sched_destroy(void);
+extern void kcf_sched_init(void);
+extern void kcf_sched_start(void);
+extern void kcf_sop_done(kcf_sreq_node_t *, int);
+extern void kcf_aop_done(kcf_areq_node_t *, int);
+extern int common_submit_request(kcf_provider_desc_t *,
+ crypto_ctx_t *, kcf_req_params_t *, crypto_req_handle_t);
+extern void kcf_free_context(kcf_context_t *);
+
+extern int kcf_svc_wait(int *);
+extern int kcf_svc_do_run(void);
+extern int kcf_need_signature_verification(kcf_provider_desc_t *);
+extern void kcf_verify_signature(void *);
+extern struct modctl *kcf_get_modctl(crypto_provider_info_t *);
+extern void verify_unverified_providers(void);
+extern void kcf_free_req(kcf_areq_node_t *areq);
+extern void crypto_bufcall_service(void);
+
+extern void kcf_walk_ntfylist(uint32_t, void *);
+extern void kcf_do_notify(kcf_provider_desc_t *, boolean_t);
+
+extern kcf_dual_req_t *kcf_alloc_req(crypto_call_req_t *);
+extern void kcf_next_req(void *, int);
+extern void kcf_last_req(void *, int);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CRYPTO_SCHED_IMPL_H */
diff --git a/module/icp/include/sys/crypto/spi.h b/module/icp/include/sys/crypto/spi.h
new file mode 100644
index 000000000..b4d6467f9
--- /dev/null
+++ b/module/icp/include/sys/crypto/spi.h
@@ -0,0 +1,721 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CRYPTO_SPI_H
+#define _SYS_CRYPTO_SPI_H
+
+/*
+ * CSPI: Cryptographic Service Provider Interface.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define CRYPTO_SPI_VERSION_1 1
+#define CRYPTO_SPI_VERSION_2 2
+#define CRYPTO_SPI_VERSION_3 3
+
+/*
+ * Provider-private handle. This handle is specified by a provider
+ * when it registers by means of the pi_provider_handle field of
+ * the crypto_provider_info structure, and passed to the provider
+ * when its entry points are invoked.
+ */
+typedef void *crypto_provider_handle_t;
+
+/*
+ * Context templates can be used to by software providers to pre-process
+ * keying material, such as key schedules. They are allocated by
+ * a software provider create_ctx_template(9E) entry point, and passed
+ * as argument to initialization and atomic provider entry points.
+ */
+typedef void *crypto_spi_ctx_template_t;
+
+/*
+ * Request handles are used by the kernel to identify an asynchronous
+ * request being processed by a provider. It is passed by the kernel
+ * to a hardware provider when submitting a request, and must be
+ * specified by a provider when calling crypto_op_notification(9F)
+ */
+typedef void *crypto_req_handle_t;
+
+/* Values for cc_flags field */
+#define CRYPTO_INIT_OPSTATE 0x00000001 /* allocate and init cc_opstate */
+#define CRYPTO_USE_OPSTATE 0x00000002 /* .. start using it as context */
+
+/*
+ * The context structure is passed from the kernel to a provider.
+ * It contains the information needed to process a multi-part or
+ * single part operation. The context structure is not used
+ * by atomic operations.
+ *
+ * Parameters needed to perform a cryptographic operation, such
+ * as keys, mechanisms, input and output buffers, are passed
+ * as separate arguments to Provider routines.
+ */
+typedef struct crypto_ctx {
+ crypto_provider_handle_t cc_provider;
+ crypto_session_id_t cc_session;
+ void *cc_provider_private; /* owned by provider */
+ void *cc_framework_private; /* owned by framework */
+ uint32_t cc_flags; /* flags */
+ void *cc_opstate; /* state */
+} crypto_ctx_t;
+
+/*
+ * Extended provider information.
+ */
+
+/*
+ * valid values for ei_flags field of extended info structure
+ * They match the RSA Security, Inc PKCS#11 tokenInfo flags.
+ */
+#define CRYPTO_EXTF_RNG 0x00000001
+#define CRYPTO_EXTF_WRITE_PROTECTED 0x00000002
+#define CRYPTO_EXTF_LOGIN_REQUIRED 0x00000004
+#define CRYPTO_EXTF_USER_PIN_INITIALIZED 0x00000008
+#define CRYPTO_EXTF_CLOCK_ON_TOKEN 0x00000040
+#define CRYPTO_EXTF_PROTECTED_AUTHENTICATION_PATH 0x00000100
+#define CRYPTO_EXTF_DUAL_CRYPTO_OPERATIONS 0x00000200
+#define CRYPTO_EXTF_TOKEN_INITIALIZED 0x00000400
+#define CRYPTO_EXTF_USER_PIN_COUNT_LOW 0x00010000
+#define CRYPTO_EXTF_USER_PIN_FINAL_TRY 0x00020000
+#define CRYPTO_EXTF_USER_PIN_LOCKED 0x00040000
+#define CRYPTO_EXTF_USER_PIN_TO_BE_CHANGED 0x00080000
+#define CRYPTO_EXTF_SO_PIN_COUNT_LOW 0x00100000
+#define CRYPTO_EXTF_SO_PIN_FINAL_TRY 0x00200000
+#define CRYPTO_EXTF_SO_PIN_LOCKED 0x00400000
+#define CRYPTO_EXTF_SO_PIN_TO_BE_CHANGED 0x00800000
+
+/*
+ * The crypto_control_ops structure contains pointers to control
+ * operations for cryptographic providers. It is passed through
+ * the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_control_ops {
+ void (*provider_status)(crypto_provider_handle_t, uint_t *);
+} crypto_control_ops_t;
+
+/*
+ * The crypto_ctx_ops structure contains points to context and context
+ * templates management operations for cryptographic providers. It is
+ * passed through the crypto_ops(9S) structure when providers register
+ * with the kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_ctx_ops {
+ int (*create_ctx_template)(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_key_t *,
+ crypto_spi_ctx_template_t *, size_t *, crypto_req_handle_t);
+ int (*free_context)(crypto_ctx_t *);
+} crypto_ctx_ops_t;
+
+/*
+ * The crypto_digest_ops structure contains pointers to digest
+ * operations for cryptographic providers. It is passed through
+ * the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_digest_ops {
+ int (*digest_init)(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_req_handle_t);
+ int (*digest)(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+ int (*digest_update)(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+ int (*digest_key)(crypto_ctx_t *, crypto_key_t *, crypto_req_handle_t);
+ int (*digest_final)(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+ int (*digest_atomic)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+} crypto_digest_ops_t;
+
+/*
+ * The crypto_cipher_ops structure contains pointers to encryption
+ * and decryption operations for cryptographic providers. It is
+ * passed through the crypto_ops(9S) structure when providers register
+ * with the kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_cipher_ops {
+ int (*encrypt_init)(crypto_ctx_t *,
+ crypto_mechanism_t *, crypto_key_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+ int (*encrypt)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*encrypt_update)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*encrypt_final)(crypto_ctx_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*encrypt_atomic)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+ int (*decrypt_init)(crypto_ctx_t *,
+ crypto_mechanism_t *, crypto_key_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+ int (*decrypt)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*decrypt_update)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*decrypt_final)(crypto_ctx_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*decrypt_atomic)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+} crypto_cipher_ops_t;
+
+/*
+ * The crypto_mac_ops structure contains pointers to MAC
+ * operations for cryptographic providers. It is passed through
+ * the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_mac_ops {
+ int (*mac_init)(crypto_ctx_t *,
+ crypto_mechanism_t *, crypto_key_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+ int (*mac)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*mac_update)(crypto_ctx_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*mac_final)(crypto_ctx_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*mac_atomic)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+ int (*mac_verify_atomic)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+} crypto_mac_ops_t;
+
+/*
+ * The crypto_sign_ops structure contains pointers to signing
+ * operations for cryptographic providers. It is passed through
+ * the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_sign_ops {
+ int (*sign_init)(crypto_ctx_t *,
+ crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+ int (*sign)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*sign_update)(crypto_ctx_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*sign_final)(crypto_ctx_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*sign_atomic)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+ int (*sign_recover_init)(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+ int (*sign_recover)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*sign_recover_atomic)(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
+ crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+} crypto_sign_ops_t;
+
+/*
+ * The crypto_verify_ops structure contains pointers to verify
+ * operations for cryptographic providers. It is passed through
+ * the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_verify_ops {
+ int (*verify_init)(crypto_ctx_t *,
+ crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+ int (*do_verify)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*verify_update)(crypto_ctx_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*verify_final)(crypto_ctx_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*verify_atomic)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+ int (*verify_recover_init)(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+ int (*verify_recover)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*verify_recover_atomic)(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
+ crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
+ crypto_req_handle_t);
+} crypto_verify_ops_t;
+
+/*
+ * The crypto_dual_ops structure contains pointers to dual
+ * cipher and sign/verify operations for cryptographic providers.
+ * It is passed through the crypto_ops(9S) structure when
+ * providers register with the kernel using
+ * crypto_register_provider(9F).
+ */
+typedef struct crypto_dual_ops {
+ int (*digest_encrypt_update)(
+ crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*decrypt_digest_update)(
+ crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*sign_encrypt_update)(
+ crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+ int (*decrypt_verify_update)(
+ crypto_ctx_t *, crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+} crypto_dual_ops_t;
+
+/*
+ * The crypto_dual_cipher_mac_ops structure contains pointers to dual
+ * cipher and MAC operations for cryptographic providers.
+ * It is passed through the crypto_ops(9S) structure when
+ * providers register with the kernel using
+ * crypto_register_provider(9F).
+ */
+typedef struct crypto_dual_cipher_mac_ops {
+ int (*encrypt_mac_init)(crypto_ctx_t *,
+ crypto_mechanism_t *, crypto_key_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+ int (*encrypt_mac)(crypto_ctx_t *,
+ crypto_data_t *, crypto_dual_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+ int (*encrypt_mac_update)(crypto_ctx_t *,
+ crypto_data_t *, crypto_dual_data_t *, crypto_req_handle_t);
+ int (*encrypt_mac_final)(crypto_ctx_t *,
+ crypto_dual_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*encrypt_mac_atomic)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_data_t *, crypto_dual_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+ int (*mac_decrypt_init)(crypto_ctx_t *,
+ crypto_mechanism_t *, crypto_key_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+ int (*mac_decrypt)(crypto_ctx_t *,
+ crypto_dual_data_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+ int (*mac_decrypt_update)(crypto_ctx_t *,
+ crypto_dual_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*mac_decrypt_final)(crypto_ctx_t *,
+ crypto_data_t *, crypto_data_t *, crypto_req_handle_t);
+ int (*mac_decrypt_atomic)(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
+ crypto_mechanism_t *, crypto_key_t *, crypto_dual_data_t *,
+ crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+ int (*mac_verify_decrypt_atomic)(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
+ crypto_mechanism_t *, crypto_key_t *, crypto_dual_data_t *,
+ crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+} crypto_dual_cipher_mac_ops_t;
+
+/*
+ * The crypto_random_number_ops structure contains pointers to random
+ * number operations for cryptographic providers. It is passed through
+ * the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_random_number_ops {
+ int (*seed_random)(crypto_provider_handle_t, crypto_session_id_t,
+ uchar_t *, size_t, uint_t, uint32_t, crypto_req_handle_t);
+ int (*generate_random)(crypto_provider_handle_t, crypto_session_id_t,
+ uchar_t *, size_t, crypto_req_handle_t);
+} crypto_random_number_ops_t;
+
+/*
+ * Flag values for seed_random.
+ */
+#define CRYPTO_SEED_NOW 0x00000001
+
+/*
+ * The crypto_session_ops structure contains pointers to session
+ * operations for cryptographic providers. It is passed through
+ * the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_session_ops {
+ int (*session_open)(crypto_provider_handle_t, crypto_session_id_t *,
+ crypto_req_handle_t);
+ int (*session_close)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_req_handle_t);
+ int (*session_login)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_user_type_t, char *, size_t, crypto_req_handle_t);
+ int (*session_logout)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_req_handle_t);
+} crypto_session_ops_t;
+
+/*
+ * The crypto_object_ops structure contains pointers to object
+ * operations for cryptographic providers. It is passed through
+ * the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_object_ops {
+ int (*object_create)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
+ crypto_req_handle_t);
+ int (*object_copy)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_object_id_t, crypto_object_attribute_t *, uint_t,
+ crypto_object_id_t *, crypto_req_handle_t);
+ int (*object_destroy)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_object_id_t, crypto_req_handle_t);
+ int (*object_get_size)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_object_id_t, size_t *, crypto_req_handle_t);
+ int (*object_get_attribute_value)(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_object_id_t,
+ crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
+ int (*object_set_attribute_value)(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_object_id_t,
+ crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
+ int (*object_find_init)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_object_attribute_t *, uint_t, void **,
+ crypto_req_handle_t);
+ int (*object_find)(crypto_provider_handle_t, void *,
+ crypto_object_id_t *, uint_t, uint_t *, crypto_req_handle_t);
+ int (*object_find_final)(crypto_provider_handle_t, void *,
+ crypto_req_handle_t);
+} crypto_object_ops_t;
+
+/*
+ * The crypto_key_ops structure contains pointers to key
+ * operations for cryptographic providers. It is passed through
+ * the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_key_ops {
+ int (*key_generate)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
+ crypto_object_id_t *, crypto_req_handle_t);
+ int (*key_generate_pair)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_object_attribute_t *, uint_t,
+ crypto_object_attribute_t *, uint_t, crypto_object_id_t *,
+ crypto_object_id_t *, crypto_req_handle_t);
+ int (*key_wrap)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_object_id_t *,
+ uchar_t *, size_t *, crypto_req_handle_t);
+ int (*key_unwrap)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, uchar_t *, size_t *,
+ crypto_object_attribute_t *, uint_t,
+ crypto_object_id_t *, crypto_req_handle_t);
+ int (*key_derive)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_object_attribute_t *,
+ uint_t, crypto_object_id_t *, crypto_req_handle_t);
+ int (*key_check)(crypto_provider_handle_t, crypto_mechanism_t *,
+ crypto_key_t *);
+} crypto_key_ops_t;
+
+/*
+ * The crypto_provider_management_ops structure contains pointers
+ * to management operations for cryptographic providers. It is passed
+ * through the crypto_ops(9S) structure when providers register with the
+ * kernel using crypto_register_provider(9F).
+ */
+typedef struct crypto_provider_management_ops {
+ int (*ext_info)(crypto_provider_handle_t,
+ crypto_provider_ext_info_t *, crypto_req_handle_t);
+ int (*init_token)(crypto_provider_handle_t, char *, size_t,
+ char *, crypto_req_handle_t);
+ int (*init_pin)(crypto_provider_handle_t, crypto_session_id_t,
+ char *, size_t, crypto_req_handle_t);
+ int (*set_pin)(crypto_provider_handle_t, crypto_session_id_t,
+ char *, size_t, char *, size_t, crypto_req_handle_t);
+} crypto_provider_management_ops_t;
+
+typedef struct crypto_mech_ops {
+ int (*copyin_mechanism)(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_mechanism_t *, int *, int);
+ int (*copyout_mechanism)(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_mechanism_t *, int *, int);
+ int (*free_mechanism)(crypto_provider_handle_t, crypto_mechanism_t *);
+} crypto_mech_ops_t;
+
+typedef struct crypto_nostore_key_ops {
+ int (*nostore_key_generate)(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_mechanism_t *,
+ crypto_object_attribute_t *, uint_t, crypto_object_attribute_t *,
+ uint_t, crypto_req_handle_t);
+ int (*nostore_key_generate_pair)(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_mechanism_t *,
+ crypto_object_attribute_t *, uint_t, crypto_object_attribute_t *,
+ uint_t, crypto_object_attribute_t *, uint_t,
+ crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
+ int (*nostore_key_derive)(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_object_attribute_t *,
+ uint_t, crypto_object_attribute_t *, uint_t, crypto_req_handle_t);
+} crypto_nostore_key_ops_t;
+
+/*
+ * The crypto_ops(9S) structure contains the structures containing
+ * the pointers to functions implemented by cryptographic providers.
+ * It is specified as part of the crypto_provider_info(9S)
+ * supplied by a provider when it registers with the kernel
+ * by calling crypto_register_provider(9F).
+ */
+typedef struct crypto_ops_v1 {
+ crypto_control_ops_t *co_control_ops;
+ crypto_digest_ops_t *co_digest_ops;
+ crypto_cipher_ops_t *co_cipher_ops;
+ crypto_mac_ops_t *co_mac_ops;
+ crypto_sign_ops_t *co_sign_ops;
+ crypto_verify_ops_t *co_verify_ops;
+ crypto_dual_ops_t *co_dual_ops;
+ crypto_dual_cipher_mac_ops_t *co_dual_cipher_mac_ops;
+ crypto_random_number_ops_t *co_random_ops;
+ crypto_session_ops_t *co_session_ops;
+ crypto_object_ops_t *co_object_ops;
+ crypto_key_ops_t *co_key_ops;
+ crypto_provider_management_ops_t *co_provider_ops;
+ crypto_ctx_ops_t *co_ctx_ops;
+} crypto_ops_v1_t;
+
+typedef struct crypto_ops_v2 {
+ crypto_ops_v1_t v1_ops;
+ crypto_mech_ops_t *co_mech_ops;
+} crypto_ops_v2_t;
+
+typedef struct crypto_ops_v3 {
+ crypto_ops_v2_t v2_ops;
+ crypto_nostore_key_ops_t *co_nostore_key_ops;
+} crypto_ops_v3_t;
+
+typedef struct crypto_ops {
+ union {
+ crypto_ops_v3_t cou_v3;
+ crypto_ops_v2_t cou_v2;
+ crypto_ops_v1_t cou_v1;
+ } cou;
+} crypto_ops_t;
+
+#define co_control_ops cou.cou_v1.co_control_ops
+#define co_digest_ops cou.cou_v1.co_digest_ops
+#define co_cipher_ops cou.cou_v1.co_cipher_ops
+#define co_mac_ops cou.cou_v1.co_mac_ops
+#define co_sign_ops cou.cou_v1.co_sign_ops
+#define co_verify_ops cou.cou_v1.co_verify_ops
+#define co_dual_ops cou.cou_v1.co_dual_ops
+#define co_dual_cipher_mac_ops cou.cou_v1.co_dual_cipher_mac_ops
+#define co_random_ops cou.cou_v1.co_random_ops
+#define co_session_ops cou.cou_v1.co_session_ops
+#define co_object_ops cou.cou_v1.co_object_ops
+#define co_key_ops cou.cou_v1.co_key_ops
+#define co_provider_ops cou.cou_v1.co_provider_ops
+#define co_ctx_ops cou.cou_v1.co_ctx_ops
+#define co_mech_ops cou.cou_v2.co_mech_ops
+#define co_nostore_key_ops cou.cou_v3.co_nostore_key_ops
+
+/*
+ * The mechanism info structure crypto_mech_info_t contains a function group
+ * bit mask cm_func_group_mask. This field, of type crypto_func_group_t,
+ * specifies the provider entry point that can be used a particular
+ * mechanism. The function group mask is a combination of the following values.
+ */
+
+typedef uint32_t crypto_func_group_t;
+
+
+#define CRYPTO_FG_ENCRYPT 0x00000001 /* encrypt_init() */
+#define CRYPTO_FG_DECRYPT 0x00000002 /* decrypt_init() */
+#define CRYPTO_FG_DIGEST 0x00000004 /* digest_init() */
+#define CRYPTO_FG_SIGN 0x00000008 /* sign_init() */
+#define CRYPTO_FG_SIGN_RECOVER 0x00000010 /* sign_recover_init() */
+#define CRYPTO_FG_VERIFY 0x00000020 /* verify_init() */
+#define CRYPTO_FG_VERIFY_RECOVER 0x00000040 /* verify_recover_init() */
+#define CRYPTO_FG_GENERATE 0x00000080 /* key_generate() */
+#define CRYPTO_FG_GENERATE_KEY_PAIR 0x00000100 /* key_generate_pair() */
+#define CRYPTO_FG_WRAP 0x00000200 /* key_wrap() */
+#define CRYPTO_FG_UNWRAP 0x00000400 /* key_unwrap() */
+#define CRYPTO_FG_DERIVE 0x00000800 /* key_derive() */
+#define CRYPTO_FG_MAC 0x00001000 /* mac_init() */
+#define CRYPTO_FG_ENCRYPT_MAC 0x00002000 /* encrypt_mac_init() */
+#define CRYPTO_FG_MAC_DECRYPT 0x00004000 /* decrypt_mac_init() */
+#define CRYPTO_FG_ENCRYPT_ATOMIC 0x00008000 /* encrypt_atomic() */
+#define CRYPTO_FG_DECRYPT_ATOMIC 0x00010000 /* decrypt_atomic() */
+#define CRYPTO_FG_MAC_ATOMIC 0x00020000 /* mac_atomic() */
+#define CRYPTO_FG_DIGEST_ATOMIC 0x00040000 /* digest_atomic() */
+#define CRYPTO_FG_SIGN_ATOMIC 0x00080000 /* sign_atomic() */
+#define CRYPTO_FG_SIGN_RECOVER_ATOMIC 0x00100000 /* sign_recover_atomic() */
+#define CRYPTO_FG_VERIFY_ATOMIC 0x00200000 /* verify_atomic() */
+#define CRYPTO_FG_VERIFY_RECOVER_ATOMIC 0x00400000 /* verify_recover_atomic() */
+#define CRYPTO_FG_ENCRYPT_MAC_ATOMIC 0x00800000 /* encrypt_mac_atomic() */
+#define CRYPTO_FG_MAC_DECRYPT_ATOMIC 0x01000000 /* mac_decrypt_atomic() */
+#define CRYPTO_FG_RESERVED 0x80000000
+
+/*
+ * Maximum length of the pi_provider_description field of the
+ * crypto_provider_info structure.
+ */
+#define CRYPTO_PROVIDER_DESCR_MAX_LEN 64
+
+
+/* Bit mask for all the simple operations */
+#define CRYPTO_FG_SIMPLEOP_MASK (CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | \
+ CRYPTO_FG_DIGEST | CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY | CRYPTO_FG_MAC | \
+ CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | \
+ CRYPTO_FG_MAC_ATOMIC | CRYPTO_FG_DIGEST_ATOMIC | CRYPTO_FG_SIGN_ATOMIC | \
+ CRYPTO_FG_VERIFY_ATOMIC)
+
+/* Bit mask for all the dual operations */
+#define CRYPTO_FG_MAC_CIPHER_MASK (CRYPTO_FG_ENCRYPT_MAC | \
+ CRYPTO_FG_MAC_DECRYPT | CRYPTO_FG_ENCRYPT_MAC_ATOMIC | \
+ CRYPTO_FG_MAC_DECRYPT_ATOMIC)
+
+/* Add other combos to CRYPTO_FG_DUAL_MASK */
+#define CRYPTO_FG_DUAL_MASK CRYPTO_FG_MAC_CIPHER_MASK
+
+/*
+ * The crypto_mech_info structure specifies one of the mechanisms
+ * supported by a cryptographic provider. The pi_mechanisms field of
+ * the crypto_provider_info structure contains a pointer to an array
+ * of crypto_mech_info's.
+ */
+typedef struct crypto_mech_info {
+ crypto_mech_name_t cm_mech_name;
+ crypto_mech_type_t cm_mech_number;
+ crypto_func_group_t cm_func_group_mask;
+ ssize_t cm_min_key_length;
+ ssize_t cm_max_key_length;
+ uint32_t cm_mech_flags;
+} crypto_mech_info_t;
+
+/* Alias the old name to the new name for compatibility. */
+#define cm_keysize_unit cm_mech_flags
+
+/*
+ * The following is used by a provider that sets
+ * CRYPTO_HASH_NO_UPDATE. It needs to specify the maximum
+ * input data size it can digest in this field.
+ */
+#define cm_max_input_length cm_max_key_length
+
+/*
+ * crypto_kcf_provider_handle_t is a handle allocated by the kernel.
+ * It is returned after the provider registers with
+ * crypto_register_provider(), and must be specified by the provider
+ * when calling crypto_unregister_provider(), and
+ * crypto_provider_notification().
+ */
+typedef uint_t crypto_kcf_provider_handle_t;
+
+/*
+ * Provider information. Passed as argument to crypto_register_provider(9F).
+ * Describes the provider and its capabilities. Multiple providers can
+ * register for the same device instance. In this case, the same
+ * pi_provider_dev must be specified with a different pi_provider_handle.
+ */
+typedef struct crypto_provider_info_v1 {
+ uint_t pi_interface_version;
+ char *pi_provider_description;
+ crypto_provider_type_t pi_provider_type;
+ crypto_provider_handle_t pi_provider_handle;
+ crypto_ops_t *pi_ops_vector;
+ uint_t pi_mech_list_count;
+ crypto_mech_info_t *pi_mechanisms;
+ uint_t pi_logical_provider_count;
+ crypto_kcf_provider_handle_t *pi_logical_providers;
+} crypto_provider_info_v1_t;
+
+typedef struct crypto_provider_info_v2 {
+ crypto_provider_info_v1_t v1_info;
+ uint_t pi_flags;
+} crypto_provider_info_v2_t;
+
+typedef struct crypto_provider_info {
+ union {
+ crypto_provider_info_v2_t piu_v2;
+ crypto_provider_info_v1_t piu_v1;
+ } piu;
+} crypto_provider_info_t;
+
+#define pi_interface_version piu.piu_v1.pi_interface_version
+#define pi_provider_description piu.piu_v1.pi_provider_description
+#define pi_provider_type piu.piu_v1.pi_provider_type
+#define pi_provider_handle piu.piu_v1.pi_provider_handle
+#define pi_ops_vector piu.piu_v1.pi_ops_vector
+#define pi_mech_list_count piu.piu_v1.pi_mech_list_count
+#define pi_mechanisms piu.piu_v1.pi_mechanisms
+#define pi_logical_provider_count piu.piu_v1.pi_logical_provider_count
+#define pi_logical_providers piu.piu_v1.pi_logical_providers
+#define pi_flags piu.piu_v2.pi_flags
+
+/* hidden providers can only be accessed via a logical provider */
+#define CRYPTO_HIDE_PROVIDER 0x00000001
+/*
+ * provider can not do multi-part digest (updates) and has a limit
+ * on maximum input data that it can digest.
+ */
+#define CRYPTO_HASH_NO_UPDATE 0x00000002
+
+/* provider can handle the request without returning a CRYPTO_QUEUED */
+#define CRYPTO_SYNCHRONOUS 0x00000004
+
+#define CRYPTO_PIFLAGS_RESERVED2 0x40000000
+#define CRYPTO_PIFLAGS_RESERVED1 0x80000000
+
+/*
+ * Provider status passed by a provider to crypto_provider_notification(9F)
+ * and returned by the provider_stauts(9E) entry point.
+ */
+#define CRYPTO_PROVIDER_READY 0
+#define CRYPTO_PROVIDER_BUSY 1
+#define CRYPTO_PROVIDER_FAILED 2
+
+/*
+ * Functions exported by Solaris to cryptographic providers. Providers
+ * call these functions to register and unregister, notify the kernel
+ * of state changes, and notify the kernel when a asynchronous request
+ * completed.
+ */
+extern int crypto_register_provider(crypto_provider_info_t *,
+ crypto_kcf_provider_handle_t *);
+extern int crypto_unregister_provider(crypto_kcf_provider_handle_t);
+extern void crypto_provider_notification(crypto_kcf_provider_handle_t, uint_t);
+extern void crypto_op_notification(crypto_req_handle_t, int);
+extern int crypto_kmflag(crypto_req_handle_t);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CRYPTO_SPI_H */
diff --git a/module/icp/include/sys/ia32/asm_linkage.h b/module/icp/include/sys/ia32/asm_linkage.h
new file mode 100644
index 000000000..f2dae7093
--- /dev/null
+++ b/module/icp/include/sys/ia32/asm_linkage.h
@@ -0,0 +1,307 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _IA32_SYS_ASM_LINKAGE_H
+#define _IA32_SYS_ASM_LINKAGE_H
+
+#include <sys/stack.h>
+#include <sys/trap.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _ASM /* The remainder of this file is only for assembly files */
+
+/*
+ * make annoying differences in assembler syntax go away
+ */
+
+/*
+ * D16 and A16 are used to insert instructions prefixes; the
+ * macros help the assembler code be slightly more portable.
+ */
+#if !defined(__GNUC_AS__)
+/*
+ * /usr/ccs/bin/as prefixes are parsed as separate instructions
+ */
+#define D16 data16;
+#define A16 addr16;
+
+/*
+ * (There are some weird constructs in constant expressions)
+ */
+#define _CONST(const) [const]
+#define _BITNOT(const) -1!_CONST(const)
+#define _MUL(a, b) _CONST(a \* b)
+
+#else
+/*
+ * Why not use the 'data16' and 'addr16' prefixes .. well, the
+ * assembler doesn't quite believe in real mode, and thus argues with
+ * us about what we're trying to do.
+ */
+#define D16 .byte 0x66;
+#define A16 .byte 0x67;
+
+#define _CONST(const) (const)
+#define _BITNOT(const) ~_CONST(const)
+#define _MUL(a, b) _CONST(a * b)
+
+#endif
+
+/*
+ * C pointers are different sizes between i386 and amd64.
+ * These constants can be used to compute offsets into pointer arrays.
+ */
+#if defined(__amd64)
+#define CLONGSHIFT 3
+#define CLONGSIZE 8
+#define CLONGMASK 7
+#elif defined(__i386)
+#define CLONGSHIFT 2
+#define CLONGSIZE 4
+#define CLONGMASK 3
+#endif
+
+/*
+ * Since we know we're either ILP32 or LP64 ..
+ */
+#define CPTRSHIFT CLONGSHIFT
+#define CPTRSIZE CLONGSIZE
+#define CPTRMASK CLONGMASK
+
+#if CPTRSIZE != (1 << CPTRSHIFT) || CLONGSIZE != (1 << CLONGSHIFT)
+#error "inconsistent shift constants"
+#endif
+
+#if CPTRMASK != (CPTRSIZE - 1) || CLONGMASK != (CLONGSIZE - 1)
+#error "inconsistent mask constants"
+#endif
+
+#define ASM_ENTRY_ALIGN 16
+
+/*
+ * SSE register alignment and save areas
+ */
+
+#define XMM_SIZE 16
+#define XMM_ALIGN 16
+
+#if defined(__amd64)
+
+#define SAVE_XMM_PROLOG(sreg, nreg) \
+ subq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp; \
+ movq %rsp, sreg
+
+#define RSTOR_XMM_EPILOG(sreg, nreg) \
+ addq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp
+
+#elif defined(__i386)
+
+#define SAVE_XMM_PROLOG(sreg, nreg) \
+ subl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp; \
+ movl %esp, sreg; \
+ addl $XMM_ALIGN, sreg; \
+ andl $_BITNOT(XMM_ALIGN-1), sreg
+
+#define RSTOR_XMM_EPILOG(sreg, nreg) \
+ addl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp;
+
+#endif /* __i386 */
+
+/*
+ * profiling causes definitions of the MCOUNT and RTMCOUNT
+ * particular to the type
+ */
+#ifdef GPROF
+
+#define MCOUNT(x) \
+ pushl %ebp; \
+ movl %esp, %ebp; \
+ call _mcount; \
+ popl %ebp
+
+#endif /* GPROF */
+
+#ifdef PROF
+
+#define MCOUNT(x) \
+/* CSTYLED */ \
+ .lcomm .L_/**/x/**/1, 4, 4; \
+ pushl %ebp; \
+ movl %esp, %ebp; \
+/* CSTYLED */ \
+ movl $.L_/**/x/**/1, %edx; \
+ call _mcount; \
+ popl %ebp
+
+#endif /* PROF */
+
+/*
+ * if we are not profiling, MCOUNT should be defined to nothing
+ */
+#if !defined(PROF) && !defined(GPROF)
+#define MCOUNT(x)
+#endif /* !defined(PROF) && !defined(GPROF) */
+
+#define RTMCOUNT(x) MCOUNT(x)
+
+/*
+ * Macro to define weak symbol aliases. These are similar to the ANSI-C
+ * #pragma weak _name = name
+ * except a compiler can determine type. The assembler must be told. Hence,
+ * the second parameter must be the type of the symbol (i.e.: function,...)
+ */
+#define ANSI_PRAGMA_WEAK(sym, stype) \
+/* CSTYLED */ \
+ .weak _/**/sym; \
+/* CSTYLED */ \
+ .type _/**/sym, @stype; \
+/* CSTYLED */ \
+_/**/sym = sym
+
+/*
+ * Like ANSI_PRAGMA_WEAK(), but for unrelated names, as in:
+ * #pragma weak sym1 = sym2
+ */
+#define ANSI_PRAGMA_WEAK2(sym1, sym2, stype) \
+ .weak sym1; \
+ .type sym1, @stype; \
+sym1 = sym2
+
+/*
+ * ENTRY provides the standard procedure entry code and an easy way to
+ * insert the calls to mcount for profiling. ENTRY_NP is identical, but
+ * never calls mcount.
+ */
+#define ENTRY(x) \
+ .text; \
+ .align ASM_ENTRY_ALIGN; \
+ .globl x; \
+ .type x, @function; \
+x: MCOUNT(x)
+
+#define ENTRY_NP(x) \
+ .text; \
+ .align ASM_ENTRY_ALIGN; \
+ .globl x; \
+ .type x, @function; \
+x:
+
+#define RTENTRY(x) \
+ .text; \
+ .align ASM_ENTRY_ALIGN; \
+ .globl x; \
+ .type x, @function; \
+x: RTMCOUNT(x)
+
+/*
+ * ENTRY2 is identical to ENTRY but provides two labels for the entry point.
+ */
+#define ENTRY2(x, y) \
+ .text; \
+ .align ASM_ENTRY_ALIGN; \
+ .globl x, y; \
+ .type x, @function; \
+ .type y, @function; \
+/* CSTYLED */ \
+x: ; \
+y: MCOUNT(x)
+
+#define ENTRY_NP2(x, y) \
+ .text; \
+ .align ASM_ENTRY_ALIGN; \
+ .globl x, y; \
+ .type x, @function; \
+ .type y, @function; \
+/* CSTYLED */ \
+x: ; \
+y:
+
+
+/*
+ * ALTENTRY provides for additional entry points.
+ */
+#define ALTENTRY(x) \
+ .globl x; \
+ .type x, @function; \
+x:
+
+/*
+ * DGDEF and DGDEF2 provide global data declarations.
+ *
+ * DGDEF provides a word aligned word of storage.
+ *
+ * DGDEF2 allocates "sz" bytes of storage with **NO** alignment. This
+ * implies this macro is best used for byte arrays.
+ *
+ * DGDEF3 allocates "sz" bytes of storage with "algn" alignment.
+ */
+#define DGDEF2(name, sz) \
+ .data; \
+ .globl name; \
+ .type name, @object; \
+ .size name, sz; \
+name:
+
+#define DGDEF3(name, sz, algn) \
+ .data; \
+ .align algn; \
+ .globl name; \
+ .type name, @object; \
+ .size name, sz; \
+name:
+
+#define DGDEF(name) DGDEF3(name, 4, 4)
+
+/*
+ * SET_SIZE trails a function and set the size for the ELF symbol table.
+ */
+#define SET_SIZE(x) \
+ .size x, [.-x]
+
+/*
+ * NWORD provides native word value.
+ */
+#if defined(__amd64)
+
+/*CSTYLED*/
+#define NWORD quad
+
+#elif defined(__i386)
+
+#define NWORD long
+
+#endif /* __i386 */
+
+#endif /* _ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IA32_SYS_ASM_LINKAGE_H */
diff --git a/module/icp/include/sys/ia32/stack.h b/module/icp/include/sys/ia32/stack.h
new file mode 100644
index 000000000..c4deb7bca
--- /dev/null
+++ b/module/icp/include/sys/ia32/stack.h
@@ -0,0 +1,160 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _IA32_SYS_STACK_H
+#define _IA32_SYS_STACK_H
+
+#if !defined(_ASM)
+
+#include <sys/types.h>
+
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * In the x86 world, a stack frame looks like this:
+ *
+ * |--------------------------|
+ * 4n+8(%ebp) ->| argument word n |
+ * | ... | (Previous frame)
+ * 8(%ebp) ->| argument word 0 |
+ * |--------------------------|--------------------
+ * 4(%ebp) ->| return address |
+ * |--------------------------|
+ * 0(%ebp) ->| previous %ebp (optional) |
+ * |--------------------------|
+ * -4(%ebp) ->| unspecified | (Current frame)
+ * | ... |
+ * 0(%esp) ->| variable size |
+ * |--------------------------|
+ */
+
+/*
+ * Stack alignment macros.
+ */
+
+#define STACK_ALIGN32 4
+#define STACK_ENTRY_ALIGN32 4
+#define STACK_BIAS32 0
+#define SA32(x) (((x)+(STACK_ALIGN32-1)) & ~(STACK_ALIGN32-1))
+#define STACK_RESERVE32 0
+#define MINFRAME32 0
+
+#if defined(__amd64)
+
+/*
+ * In the amd64 world, a stack frame looks like this:
+ *
+ * |--------------------------|
+ * 8n+16(%rbp)->| argument word n |
+ * | ... | (Previous frame)
+ * 16(%rbp) ->| argument word 0 |
+ * |--------------------------|--------------------
+ * 8(%rbp) ->| return address |
+ * |--------------------------|
+ * 0(%rbp) ->| previous %rbp |
+ * |--------------------------|
+ * -8(%rbp) ->| unspecified | (Current frame)
+ * | ... |
+ * 0(%rsp) ->| variable size |
+ * |--------------------------|
+ * -128(%rsp) ->| reserved for function |
+ * |--------------------------|
+ *
+ * The end of the input argument area must be aligned on a 16-byte
+ * boundary; i.e. (%rsp - 8) % 16 == 0 at function entry.
+ *
+ * The 128-byte location beyond %rsp is considered to be reserved for
+ * functions and is NOT modified by signal handlers. It can be used
+ * to store temporary data that is not needed across function calls.
+ */
+
+/*
+ * Stack alignment macros.
+ */
+
+#define STACK_ALIGN64 16
+#define STACK_ENTRY_ALIGN64 8
+#define STACK_BIAS64 0
+#define SA64(x) (((x)+(STACK_ALIGN64-1)) & ~(STACK_ALIGN64-1))
+#define STACK_RESERVE64 128
+#define MINFRAME64 0
+
+#define STACK_ALIGN STACK_ALIGN64
+#define STACK_ENTRY_ALIGN STACK_ENTRY_ALIGN64
+#define STACK_BIAS STACK_BIAS64
+#define SA(x) SA64(x)
+#define STACK_RESERVE STACK_RESERVE64
+#define MINFRAME MINFRAME64
+
+#elif defined(__i386)
+
+#define STACK_ALIGN STACK_ALIGN32
+#define STACK_ENTRY_ALIGN STACK_ENTRY_ALIGN32
+#define STACK_BIAS STACK_BIAS32
+#define SA(x) SA32(x)
+#define STACK_RESERVE STACK_RESERVE32
+#define MINFRAME MINFRAME32
+
+#endif /* __i386 */
+
+#if defined(_KERNEL) && !defined(_ASM)
+
+#if defined(DEBUG)
+#if STACK_ALIGN == 4
+#define ASSERT_STACK_ALIGNED() \
+ { \
+ uint32_t __tmp; \
+ ASSERT((((uintptr_t)&__tmp) & (STACK_ALIGN - 1)) == 0); \
+ }
+#elif (STACK_ALIGN == 16) && (_LONG_DOUBLE_ALIGNMENT == 16)
+#define ASSERT_STACK_ALIGNED() \
+ { \
+ long double __tmp; \
+ ASSERT((((uintptr_t)&__tmp) & (STACK_ALIGN - 1)) == 0); \
+ }
+#endif
+#else /* DEBUG */
+#define ASSERT_STACK_ALIGNED()
+#endif /* DEBUG */
+
+struct regs;
+
+void traceregs(struct regs *);
+void traceback(caddr_t);
+
+#endif /* defined(_KERNEL) && !defined(_ASM) */
+
+#define STACK_GROWTH_DOWN /* stacks grow from high to low addresses */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IA32_SYS_STACK_H */
diff --git a/module/icp/include/sys/ia32/trap.h b/module/icp/include/sys/ia32/trap.h
new file mode 100644
index 000000000..55b94969b
--- /dev/null
+++ b/module/icp/include/sys/ia32/trap.h
@@ -0,0 +1,107 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
+/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
+/* All Rights Reserved */
+
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _IA32_SYS_TRAP_H
+#define _IA32_SYS_TRAP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Trap type values
+ */
+
+#define T_ZERODIV 0x0 /* #de divide by 0 error */
+#define T_SGLSTP 0x1 /* #db single step */
+#define T_NMIFLT 0x2 /* NMI */
+#define T_BPTFLT 0x3 /* #bp breakpoint fault, INT3 insn */
+#define T_OVFLW 0x4 /* #of INTO overflow fault */
+#define T_BOUNDFLT 0x5 /* #br BOUND insn fault */
+#define T_ILLINST 0x6 /* #ud invalid opcode fault */
+#define T_NOEXTFLT 0x7 /* #nm device not available: x87 */
+#define T_DBLFLT 0x8 /* #df double fault */
+#define T_EXTOVRFLT 0x9 /* [not generated: 386 only] */
+#define T_TSSFLT 0xa /* #ts invalid TSS fault */
+#define T_SEGFLT 0xb /* #np segment not present fault */
+#define T_STKFLT 0xc /* #ss stack fault */
+#define T_GPFLT 0xd /* #gp general protection fault */
+#define T_PGFLT 0xe /* #pf page fault */
+#define T_EXTERRFLT 0x10 /* #mf x87 FPU error fault */
+#define T_ALIGNMENT 0x11 /* #ac alignment check error */
+#define T_MCE 0x12 /* #mc machine check exception */
+#define T_SIMDFPE 0x13 /* #xm SSE/SSE exception */
+#define T_DBGENTR 0x14 /* debugger entry */
+#define T_ENDPERR 0x21 /* emulated extension error flt */
+#define T_ENOEXTFLT 0x20 /* emulated ext not present */
+#define T_FASTTRAP 0xd2 /* fast system call */
+#define T_SYSCALLINT 0x91 /* general system call */
+#define T_DTRACE_RET 0x7f /* DTrace pid return */
+#define T_INT80 0x80 /* int80 handler for linux emulation */
+#define T_SOFTINT 0x50fd /* pseudo softint trap type */
+
+/*
+ * Pseudo traps.
+ */
+#define T_INTERRUPT 0x100
+#define T_FAULT 0x200
+#define T_AST 0x400
+#define T_SYSCALL 0x180
+
+
+/*
+ * Values of error code on stack in case of page fault
+ */
+
+#define PF_ERR_MASK 0x01 /* Mask for error bit */
+#define PF_ERR_PAGE 0x00 /* page not present */
+#define PF_ERR_PROT 0x01 /* protection error */
+#define PF_ERR_WRITE 0x02 /* fault caused by write (else read) */
+#define PF_ERR_USER 0x04 /* processor was in user mode */
+ /* (else supervisor) */
+#define PF_ERR_EXEC 0x10 /* attempt to execute a No eXec page (AMD) */
+
+/*
+ * Definitions for fast system call subfunctions
+ */
+#define T_FNULL 0 /* Null trap for testing */
+#define T_FGETFP 1 /* Get emulated FP context */
+#define T_FSETFP 2 /* Set emulated FP context */
+#define T_GETHRTIME 3 /* Get high resolution time */
+#define T_GETHRVTIME 4 /* Get high resolution virtual time */
+#define T_GETHRESTIME 5 /* Get high resolution time */
+#define T_GETLGRP 6 /* Get home lgrpid */
+
+#define T_LASTFAST 6 /* Last valid subfunction */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IA32_SYS_TRAP_H */
diff --git a/module/icp/include/sys/modctl.h b/module/icp/include/sys/modctl.h
new file mode 100644
index 000000000..a0b94ef39
--- /dev/null
+++ b/module/icp/include/sys/modctl.h
@@ -0,0 +1,477 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_MODCTL_H
+#define _SYS_MODCTL_H
+
+/*
+ * loadable module support.
+ */
+
+#include <sys/zfs_context.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct modlmisc;
+struct modlinkage;
+
+/*
+ * The following structure defines the operations used by modctl
+ * to load and unload modules. Each supported loadable module type
+ * requires a set of mod_ops.
+ */
+struct mod_ops {
+ int (*modm_install)(struct modlmisc *, struct modlinkage *);
+ int (*modm_remove)(struct modlmisc *, struct modlinkage *);
+ int (*modm_info)(void *, struct modlinkage *, int *);
+};
+
+/*
+ * The defined set of mod_ops structures for each loadable module type
+ * Defined in modctl.c
+ */
+extern struct mod_ops mod_brandops;
+#if defined(__i386) || defined(__amd64)
+extern struct mod_ops mod_cpuops;
+#endif
+extern struct mod_ops mod_cryptoops;
+extern struct mod_ops mod_driverops;
+extern struct mod_ops mod_execops;
+extern struct mod_ops mod_fsops;
+extern struct mod_ops mod_miscops;
+extern struct mod_ops mod_schedops;
+extern struct mod_ops mod_strmodops;
+extern struct mod_ops mod_syscallops;
+extern struct mod_ops mod_sockmodops;
+#ifdef _SYSCALL32_IMPL
+extern struct mod_ops mod_syscallops32;
+#endif
+extern struct mod_ops mod_dacfops;
+extern struct mod_ops mod_ippops;
+extern struct mod_ops mod_pcbeops;
+extern struct mod_ops mod_devfsops;
+extern struct mod_ops mod_kiconvops;
+
+/*
+ * Definitions for the module specific linkage structures.
+ * The first two fields are the same in all of the structures.
+ * The linkinfo is for informational purposes only and is returned by
+ * modctl with the MODINFO cmd.
+ */
+
+/* For cryptographic providers */
+struct modlcrypto {
+ struct mod_ops *crypto_modops;
+ char *crypto_linkinfo;
+};
+
+/* For misc */
+struct modlmisc {
+ struct mod_ops *misc_modops;
+ char *misc_linkinfo;
+};
+
+/*
+ * Revision number of loadable modules support. This is the value
+ * that must be used in the modlinkage structure.
+ */
+#define MODREV_1 1
+
+/*
+ * The modlinkage structure is the structure that the module writer
+ * provides to the routines to install, remove, and stat a module.
+ * The ml_linkage element is an array of pointers to linkage structures.
+ * For most modules there is only one linkage structure. We allocate
+ * enough space for 3 linkage structures which happens to be the most
+ * we have in any sun supplied module. For those modules with more
+ * than 3 linkage structures (which is very unlikely), a modlinkage
+ * structure must be kmem_alloc'd in the module wrapper to be big enough
+ * for all of the linkage structures.
+ */
+struct modlinkage {
+ int ml_rev; /* rev of loadable modules system */
+#ifdef _LP64
+ void *ml_linkage[7]; /* more space in 64-bit OS */
+#else
+ void *ml_linkage[4]; /* NULL terminated list of */
+ /* linkage structures */
+#endif
+};
+
+/*
+ * commands. These are the commands supported by the modctl system call.
+ */
+#define MODLOAD 0
+#define MODUNLOAD 1
+#define MODINFO 2
+#define MODRESERVED 3
+#define MODSETMINIROOT 4
+#define MODADDMAJBIND 5
+#define MODGETPATH 6
+#define MODREADSYSBIND 7
+#define MODGETMAJBIND 8
+#define MODGETNAME 9
+#define MODSIZEOF_DEVID 10
+#define MODGETDEVID 11
+#define MODSIZEOF_MINORNAME 12
+#define MODGETMINORNAME 13
+#define MODGETPATHLEN 14
+#define MODEVENTS 15
+#define MODGETFBNAME 16
+#define MODREREADDACF 17
+#define MODLOADDRVCONF 18
+#define MODUNLOADDRVCONF 19
+#define MODREMMAJBIND 20
+#define MODDEVT2INSTANCE 21
+#define MODGETDEVFSPATH_LEN 22
+#define MODGETDEVFSPATH 23
+#define MODDEVID2PATHS 24
+#define MODSETDEVPOLICY 26
+#define MODGETDEVPOLICY 27
+#define MODALLOCPRIV 28
+#define MODGETDEVPOLICYBYNAME 29
+#define MODLOADMINORPERM 31
+#define MODADDMINORPERM 32
+#define MODREMMINORPERM 33
+#define MODREMDRVCLEANUP 34
+#define MODDEVEXISTS 35
+#define MODDEVREADDIR 36
+#define MODDEVNAME 37
+#define MODGETDEVFSPATH_MI_LEN 38
+#define MODGETDEVFSPATH_MI 39
+#define MODRETIRE 40
+#define MODUNRETIRE 41
+#define MODISRETIRED 42
+#define MODDEVEMPTYDIR 43
+#define MODREMDRVALIAS 44
+
+/*
+ * sub cmds for MODEVENTS
+ */
+#define MODEVENTS_FLUSH 0
+#define MODEVENTS_FLUSH_DUMP 1
+#define MODEVENTS_SET_DOOR_UPCALL_FILENAME 2
+#define MODEVENTS_GETDATA 3
+#define MODEVENTS_FREEDATA 4
+#define MODEVENTS_POST_EVENT 5
+#define MODEVENTS_REGISTER_EVENT 6
+
+/*
+ * devname subcmds for MODDEVNAME
+ */
+#define MODDEVNAME_LOOKUPDOOR 0
+#define MODDEVNAME_DEVFSADMNODE 1
+#define MODDEVNAME_NSMAPS 2
+#define MODDEVNAME_PROFILE 3
+#define MODDEVNAME_RECONFIG 4
+#define MODDEVNAME_SYSAVAIL 5
+
+
+/*
+ * Data structure passed to modconfig command in kernel to build devfs tree
+ */
+
+struct aliases {
+ struct aliases *a_next;
+ char *a_name;
+ int a_len;
+};
+
+#define MAXMODCONFNAME 256
+
+struct modconfig {
+ char drvname[MAXMODCONFNAME];
+ char drvclass[MAXMODCONFNAME];
+ int major;
+ int flags;
+ int num_aliases;
+ struct aliases *ap;
+};
+
+#if defined(_SYSCALL32)
+
+struct aliases32 {
+ caddr32_t a_next;
+ caddr32_t a_name;
+ int32_t a_len;
+};
+
+struct modconfig32 {
+ char drvname[MAXMODCONFNAME];
+ char drvclass[MAXMODCONFNAME];
+ int32_t major;
+ int32_t flags;
+ int32_t num_aliases;
+ caddr32_t ap;
+};
+
+#endif /* _SYSCALL32 */
+
+/* flags for modconfig */
+#define MOD_UNBIND_OVERRIDE 0x01 /* fail unbind if in use */
+
+/*
+ * Max module path length
+ */
+#define MOD_MAXPATH 256
+
+/*
+ * Default search path for modules ADDITIONAL to the directory
+ * where the kernel components we booted from are.
+ *
+ * Most often, this will be "/platform/{platform}/kernel /kernel /usr/kernel",
+ * but we don't wire it down here.
+ */
+#define MOD_DEFPATH "/kernel /usr/kernel"
+
+/*
+ * Default file name extension for autoloading modules.
+ */
+#define MOD_DEFEXT ""
+
+/*
+ * Parameters for modinfo
+ */
+#define MODMAXNAMELEN 32 /* max module name length */
+#define MODMAXLINKINFOLEN 32 /* max link info length */
+
+/*
+ * Module specific information.
+ */
+struct modspecific_info {
+ char msi_linkinfo[MODMAXLINKINFOLEN]; /* name in linkage struct */
+ int msi_p0; /* module specific information */
+};
+
+/*
+ * Structure returned by modctl with MODINFO command.
+ */
+#define MODMAXLINK 10 /* max linkages modinfo can handle */
+
+struct modinfo {
+ int mi_info; /* Flags for info wanted */
+ int mi_state; /* Flags for module state */
+ int mi_id; /* id of this loaded module */
+ int mi_nextid; /* id of next module or -1 */
+ caddr_t mi_base; /* virtual addr of text */
+ size_t mi_size; /* size of module in bytes */
+ int mi_rev; /* loadable modules rev */
+ int mi_loadcnt; /* # of times loaded */
+ char mi_name[MODMAXNAMELEN]; /* name of module */
+ struct modspecific_info mi_msinfo[MODMAXLINK];
+ /* mod specific info */
+};
+
+
+#if defined(_SYSCALL32)
+
+#define MODMAXNAMELEN32 32 /* max module name length */
+#define MODMAXLINKINFOLEN32 32 /* max link info length */
+#define MODMAXLINK32 10 /* max linkages modinfo can handle */
+
+struct modspecific_info32 {
+ char msi_linkinfo[MODMAXLINKINFOLEN32]; /* name in linkage struct */
+ int32_t msi_p0; /* module specific information */
+};
+
+struct modinfo32 {
+ int32_t mi_info; /* Flags for info wanted */
+ int32_t mi_state; /* Flags for module state */
+ int32_t mi_id; /* id of this loaded module */
+ int32_t mi_nextid; /* id of next module or -1 */
+ caddr32_t mi_base; /* virtual addr of text */
+ uint32_t mi_size; /* size of module in bytes */
+ int32_t mi_rev; /* loadable modules rev */
+ int32_t mi_loadcnt; /* # of times loaded */
+ char mi_name[MODMAXNAMELEN32]; /* name of module */
+ struct modspecific_info32 mi_msinfo[MODMAXLINK32];
+ /* mod specific info */
+};
+
+#endif /* _SYSCALL32 */
+
+/* Values for mi_info flags */
+#define MI_INFO_ONE 1
+#define MI_INFO_ALL 2
+#define MI_INFO_CNT 4
+#define MI_INFO_LINKAGE 8 /* used internally to extract modlinkage */
+/*
+ * MI_INFO_NOBASE indicates caller does not need mi_base. Failure to use this
+ * flag may lead 32-bit apps to receive an EOVERFLOW error from modctl(MODINFO)
+ * when used with a 64-bit kernel.
+ */
+#define MI_INFO_NOBASE 16
+
+/* Values for mi_state */
+#define MI_LOADED 1
+#define MI_INSTALLED 2
+
+/*
+ * Macros to vector to the appropriate module specific routine.
+ */
+#define MODL_INSTALL(MODL, MODLP) \
+ (*(MODL)->misc_modops->modm_install)(MODL, MODLP)
+#define MODL_REMOVE(MODL, MODLP) \
+ (*(MODL)->misc_modops->modm_remove)(MODL, MODLP)
+#define MODL_INFO(MODL, MODLP, P0) \
+ (*(MODL)->misc_modops->modm_info)(MODL, MODLP, P0)
+
+/*
+ * Definitions for stubs
+ */
+struct mod_stub_info {
+ uintptr_t mods_func_adr;
+ struct mod_modinfo *mods_modinfo;
+ uintptr_t mods_stub_adr;
+ int (*mods_errfcn)(void);
+ int mods_flag; /* flags defined below */
+};
+
+/*
+ * Definitions for mods_flag.
+ */
+#define MODS_WEAK 0x01 /* weak stub (not loaded if called) */
+#define MODS_NOUNLOAD 0x02 /* module not unloadable (no _fini()) */
+#define MODS_INSTALLED 0x10 /* module installed */
+
+struct mod_modinfo {
+ char *modm_module_name;
+ struct modctl *mp;
+ struct mod_stub_info modm_stubs[1];
+};
+
+struct modctl_list {
+ struct modctl_list *modl_next;
+ struct modctl *modl_modp;
+};
+
+/*
+ * Structure to manage a loadable module.
+ * Note: the module (mod_mp) structure's "text" and "text_size" information
+ * are replicated in the modctl structure so that mod_containing_pc()
+ * doesn't have to grab any locks (modctls are persistent; modules are not.)
+ */
+typedef struct modctl {
+ struct modctl *mod_next; /* &modules based list */
+ struct modctl *mod_prev;
+ int mod_id;
+ void *mod_mp;
+ kthread_t *mod_inprogress_thread;
+ struct mod_modinfo *mod_modinfo;
+ struct modlinkage *mod_linkage;
+ char *mod_filename;
+ char *mod_modname;
+
+ char mod_busy; /* inprogress_thread has locked */
+ char mod_want; /* someone waiting for unlock */
+ char mod_prim; /* primary module */
+
+ int mod_ref; /* ref count - from dependent or stub */
+
+ char mod_loaded; /* module in memory */
+ char mod_installed; /* post _init pre _fini */
+ char mod_loadflags;
+ char mod_delay_unload; /* deferred unload */
+
+ struct modctl_list *mod_requisites; /* mods this one depends on. */
+ void *__unused; /* NOTE: reuse (same size) is OK, */
+ /* deletion causes mdb.vs.core issues */
+ int mod_loadcnt; /* number of times mod was loaded */
+ int mod_nenabled; /* # of enabled DTrace probes in mod */
+ char *mod_text;
+ size_t mod_text_size;
+
+ int mod_gencount; /* # times loaded/unloaded */
+ struct modctl *mod_requisite_loading; /* mod circular dependency */
+} modctl_t;
+
+/*
+ * mod_loadflags
+ */
+
+#define MOD_NOAUTOUNLOAD 0x1 /* Auto mod-unloader skips this mod */
+#define MOD_NONOTIFY 0x2 /* No krtld notifications on (un)load */
+#define MOD_NOUNLOAD 0x4 /* Assume EBUSY for all _fini's */
+
+#define MOD_BIND_HASHSIZE 64
+#define MOD_BIND_HASHMASK (MOD_BIND_HASHSIZE-1)
+
+typedef int modid_t;
+
+/*
+ * global function and data declarations
+ */
+extern kmutex_t mod_lock;
+
+extern char *systemfile;
+extern char **syscallnames;
+extern int moddebug;
+
+/*
+ * this is the head of a doubly linked list. Only the next and prev
+ * pointers are used
+ */
+extern modctl_t modules;
+
+/*
+ * Only the following are part of the DDI/DKI
+ */
+extern int mod_install(struct modlinkage *);
+extern int mod_remove(struct modlinkage *);
+extern int mod_info(struct modlinkage *, struct modinfo *);
+
+/*
+ * bit definitions for moddebug.
+ */
+#define MODDEBUG_LOADMSG 0x80000000 /* print "[un]loading..." msg */
+#define MODDEBUG_ERRMSG 0x40000000 /* print detailed error msgs */
+#define MODDEBUG_LOADMSG2 0x20000000 /* print 2nd level msgs */
+#define MODDEBUG_RETIRE 0x10000000 /* print retire msgs */
+#define MODDEBUG_BINDING 0x00040000 /* driver/alias binding */
+#define MODDEBUG_FINI_EBUSY 0x00020000 /* pretend fini returns EBUSY */
+#define MODDEBUG_NOAUL_IPP 0x00010000 /* no Autounloading ipp mods */
+#define MODDEBUG_NOAUL_DACF 0x00008000 /* no Autounloading dacf mods */
+#define MODDEBUG_KEEPTEXT 0x00004000 /* keep text after unloading */
+#define MODDEBUG_NOAUL_DRV 0x00001000 /* no Autounloading Drivers */
+#define MODDEBUG_NOAUL_EXEC 0x00000800 /* no Autounloading Execs */
+#define MODDEBUG_NOAUL_FS 0x00000400 /* no Autounloading File sys */
+#define MODDEBUG_NOAUL_MISC 0x00000200 /* no Autounloading misc */
+#define MODDEBUG_NOAUL_SCHED 0x00000100 /* no Autounloading scheds */
+#define MODDEBUG_NOAUL_STR 0x00000080 /* no Autounloading streams */
+#define MODDEBUG_NOAUL_SYS 0x00000040 /* no Autounloading syscalls */
+#define MODDEBUG_NOCTF 0x00000020 /* do not load CTF debug data */
+#define MODDEBUG_NOAUTOUNLOAD 0x00000010 /* no autounloading at all */
+#define MODDEBUG_DDI_MOD 0x00000008 /* ddi_mod{open,sym,close} */
+#define MODDEBUG_MP_MATCH 0x00000004 /* dev_minorperm */
+#define MODDEBUG_MINORPERM 0x00000002 /* minor perm modctls */
+#define MODDEBUG_USERDEBUG 0x00000001 /* bpt after init_module() */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MODCTL_H */
diff --git a/module/icp/include/sys/modhash.h b/module/icp/include/sys/modhash.h
new file mode 100644
index 000000000..06b52ff02
--- /dev/null
+++ b/module/icp/include/sys/modhash.h
@@ -0,0 +1,147 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_MODHASH_H
+#define _SYS_MODHASH_H
+
+/*
+ * Generic hash implementation for the kernel.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/zfs_context.h>
+
+/*
+ * Opaque data types for storing keys and values
+ */
+typedef void *mod_hash_val_t;
+typedef void *mod_hash_key_t;
+
+/*
+ * Opaque data type for reservation
+ */
+typedef void *mod_hash_hndl_t;
+
+/*
+ * Opaque type for hash itself.
+ */
+struct mod_hash;
+typedef struct mod_hash mod_hash_t;
+
+/*
+ * String hash table
+ */
+mod_hash_t *mod_hash_create_strhash_nodtr(char *, size_t,
+ void (*)(mod_hash_val_t));
+mod_hash_t *mod_hash_create_strhash(char *, size_t, void (*)(mod_hash_val_t));
+void mod_hash_destroy_strhash(mod_hash_t *);
+int mod_hash_strkey_cmp(mod_hash_key_t, mod_hash_key_t);
+void mod_hash_strkey_dtor(mod_hash_key_t);
+void mod_hash_strval_dtor(mod_hash_val_t);
+uint_t mod_hash_bystr(void *, mod_hash_key_t);
+
+/*
+ * Pointer hash table
+ */
+mod_hash_t *mod_hash_create_ptrhash(char *, size_t, void (*)(mod_hash_val_t),
+ size_t);
+void mod_hash_destroy_ptrhash(mod_hash_t *);
+int mod_hash_ptrkey_cmp(mod_hash_key_t, mod_hash_key_t);
+uint_t mod_hash_byptr(void *, mod_hash_key_t);
+
+/*
+ * ID hash table
+ */
+mod_hash_t *mod_hash_create_idhash(char *, size_t, void (*)(mod_hash_val_t));
+void mod_hash_destroy_idhash(mod_hash_t *);
+int mod_hash_idkey_cmp(mod_hash_key_t, mod_hash_key_t);
+uint_t mod_hash_byid(void *, mod_hash_key_t);
+uint_t mod_hash_iddata_gen(size_t);
+
+/*
+ * Hash management functions
+ */
+mod_hash_t *mod_hash_create_extended(char *, size_t, void (*)(mod_hash_key_t),
+ void (*)(mod_hash_val_t), uint_t (*)(void *, mod_hash_key_t), void *,
+ int (*)(mod_hash_key_t, mod_hash_key_t), int);
+
+void mod_hash_destroy_hash(mod_hash_t *);
+void mod_hash_clear(mod_hash_t *);
+
+/*
+ * Null key and value destructors
+ */
+void mod_hash_null_keydtor(mod_hash_key_t);
+void mod_hash_null_valdtor(mod_hash_val_t);
+
+/*
+ * Basic hash operations
+ */
+
+/*
+ * Error codes for insert, remove, find, destroy.
+ */
+#define MH_ERR_NOMEM -1
+#define MH_ERR_DUPLICATE -2
+#define MH_ERR_NOTFOUND -3
+
+/*
+ * Return codes for hash walkers
+ */
+#define MH_WALK_CONTINUE 0
+#define MH_WALK_TERMINATE 1
+
+/*
+ * Basic hash operations
+ */
+int mod_hash_insert(mod_hash_t *, mod_hash_key_t, mod_hash_val_t);
+int mod_hash_replace(mod_hash_t *, mod_hash_key_t, mod_hash_val_t);
+int mod_hash_remove(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *);
+int mod_hash_destroy(mod_hash_t *, mod_hash_key_t);
+int mod_hash_find(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *);
+int mod_hash_find_cb(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *,
+ void (*)(mod_hash_key_t, mod_hash_val_t));
+int mod_hash_find_cb_rval(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *,
+ int (*)(mod_hash_key_t, mod_hash_val_t), int *);
+void mod_hash_walk(mod_hash_t *,
+ uint_t (*)(mod_hash_key_t, mod_hash_val_t *, void *), void *);
+
+/*
+ * Reserving hash operations
+ */
+int mod_hash_reserve(mod_hash_t *, mod_hash_hndl_t *);
+int mod_hash_reserve_nosleep(mod_hash_t *, mod_hash_hndl_t *);
+void mod_hash_cancel(mod_hash_t *, mod_hash_hndl_t *);
+int mod_hash_insert_reserve(mod_hash_t *, mod_hash_key_t, mod_hash_val_t,
+ mod_hash_hndl_t);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MODHASH_H */
diff --git a/module/icp/include/sys/modhash_impl.h b/module/icp/include/sys/modhash_impl.h
new file mode 100644
index 000000000..3130773aa
--- /dev/null
+++ b/module/icp/include/sys/modhash_impl.h
@@ -0,0 +1,108 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_MODHASH_IMPL_H
+#define _SYS_MODHASH_IMPL_H
+
+/*
+ * Internal details for the kernel's generic hash implementation.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/zfs_context.h>
+#include <sys/modhash.h>
+
+struct mod_hash_entry {
+ mod_hash_key_t mhe_key; /* stored hash key */
+ mod_hash_val_t mhe_val; /* stored hash value */
+ struct mod_hash_entry *mhe_next; /* next item in chain */
+};
+
+struct mod_hash_stat {
+ ulong_t mhs_hit; /* tried a 'find' and it succeeded */
+ ulong_t mhs_miss; /* tried a 'find' but it failed */
+ ulong_t mhs_coll; /* occur when insert fails because of dup's */
+ ulong_t mhs_nelems; /* total number of stored key/value pairs */
+ ulong_t mhs_nomem; /* number of times kmem_alloc failed */
+};
+
+struct mod_hash {
+ krwlock_t mh_contents; /* lock protecting contents */
+ char *mh_name; /* hash name */
+ int mh_sleep; /* kmem_alloc flag */
+ size_t mh_nchains; /* # of elements in mh_entries */
+
+ /* key and val destructor */
+ void (*mh_kdtor)(mod_hash_key_t);
+ void (*mh_vdtor)(mod_hash_val_t);
+
+ /* key comparator */
+ int (*mh_keycmp)(mod_hash_key_t, mod_hash_key_t);
+
+ /* hash algorithm, and algorithm-private data */
+ uint_t (*mh_hashalg)(void *, mod_hash_key_t);
+ void *mh_hashalg_data;
+
+ struct mod_hash *mh_next; /* next hash in list */
+
+ struct mod_hash_stat mh_stat;
+
+ struct mod_hash_entry *mh_entries[1];
+};
+
+/*
+ * MH_SIZE()
+ * Compute the size of a mod_hash_t, in bytes, given the number of
+ * elements it contains.
+ */
+#define MH_SIZE(n) \
+ (sizeof (mod_hash_t) + ((n) - 1) * (sizeof (struct mod_hash_entry *)))
+
+/*
+ * Module initialization; called once.
+ */
+void mod_hash_fini(void);
+void mod_hash_init(void);
+
+/*
+ * Internal routines. Use directly with care.
+ */
+uint_t i_mod_hash(mod_hash_t *, mod_hash_key_t);
+int i_mod_hash_insert_nosync(mod_hash_t *, mod_hash_key_t, mod_hash_val_t,
+ mod_hash_hndl_t);
+int i_mod_hash_remove_nosync(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *);
+int i_mod_hash_find_nosync(mod_hash_t *, mod_hash_key_t, mod_hash_val_t *);
+void i_mod_hash_walk_nosync(mod_hash_t *, uint_t (*)(mod_hash_key_t,
+ mod_hash_val_t *, void *), void *);
+void i_mod_hash_clear_nosync(mod_hash_t *hash);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MODHASH_IMPL_H */
diff --git a/module/icp/include/sys/stack.h b/module/icp/include/sys/stack.h
new file mode 100644
index 000000000..64fecf409
--- /dev/null
+++ b/module/icp/include/sys/stack.h
@@ -0,0 +1,36 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_STACK_H
+#define _SYS_STACK_H
+
+#if defined(__i386) || defined(__amd64)
+
+#include <sys/ia32/stack.h> /* XX64 x86/sys/stack.h */
+
+#endif
+
+#endif /* _SYS_STACK_H */
diff --git a/module/icp/include/sys/trap.h b/module/icp/include/sys/trap.h
new file mode 100644
index 000000000..7f9fd3758
--- /dev/null
+++ b/module/icp/include/sys/trap.h
@@ -0,0 +1,36 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_TRAP_H
+#define _SYS_TRAP_H
+
+#if defined(__i386) || defined(__amd64)
+
+#include <sys/ia32/trap.h> /* XX64 x86/sys/trap.h */
+
+#endif
+
+#endif /* _SYS_TRAP_H */
diff --git a/module/icp/io/aes.c b/module/icp/io/aes.c
new file mode 100644
index 000000000..ada697eb6
--- /dev/null
+++ b/module/icp/io/aes.c
@@ -0,0 +1,1437 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * AES provider for the Kernel Cryptographic Framework (KCF)
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/icp.h>
+#include <modes/modes.h>
+#include <sys/modctl.h>
+#define _AES_IMPL
+#include <aes/aes_impl.h>
+
+#define CRYPTO_PROVIDER_NAME "aes"
+
+extern struct mod_ops mod_cryptoops;
+
+/*
+ * Module linkage information for the kernel.
+ */
+static struct modlcrypto modlcrypto = {
+ &mod_cryptoops,
+ "AES Kernel SW Provider"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, { (void *)&modlcrypto, NULL }
+};
+
+/*
+ * Mechanism info structure passed to KCF during registration.
+ */
+static crypto_mech_info_t aes_mech_info_tab[] = {
+ /* AES_ECB */
+ {SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_CBC */
+ {SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_CTR */
+ {SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_CCM */
+ {SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_GCM */
+ {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_GMAC */
+ {SUN_CKM_AES_GMAC, AES_GMAC_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC |
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC |
+ CRYPTO_FG_SIGN | CRYPTO_FG_SIGN_ATOMIC |
+ CRYPTO_FG_VERIFY | CRYPTO_FG_VERIFY_ATOMIC,
+ AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
+};
+
+/* operations are in-place if the output buffer is NULL */
+#define AES_ARG_INPLACE(input, output) \
+ if ((output) == NULL) \
+ (output) = (input);
+
+static void aes_provider_status(crypto_provider_handle_t, uint_t *);
+
+static crypto_control_ops_t aes_control_ops = {
+ aes_provider_status
+};
+
+static int aes_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int aes_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t, boolean_t);
+static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *,
+ crypto_mechanism_t *, crypto_key_t *, int, boolean_t);
+static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+
+static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+static int aes_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_cipher_ops_t aes_cipher_ops = {
+ aes_encrypt_init,
+ aes_encrypt,
+ aes_encrypt_update,
+ aes_encrypt_final,
+ aes_encrypt_atomic,
+ aes_decrypt_init,
+ aes_decrypt,
+ aes_decrypt_update,
+ aes_decrypt_final,
+ aes_decrypt_atomic
+};
+
+static int aes_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int aes_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_mac_ops_t aes_mac_ops = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ aes_mac_atomic,
+ aes_mac_verify_atomic
+};
+
+static int aes_create_ctx_template(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
+ size_t *, crypto_req_handle_t);
+static int aes_free_context(crypto_ctx_t *);
+
+static crypto_ctx_ops_t aes_ctx_ops = {
+ aes_create_ctx_template,
+ aes_free_context
+};
+
+static crypto_ops_t aes_crypto_ops = {{{{{
+ &aes_control_ops,
+ NULL,
+ &aes_cipher_ops,
+ &aes_mac_ops,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ &aes_ctx_ops
+}}}}};
+
+static crypto_provider_info_t aes_prov_info = {{{{
+ CRYPTO_SPI_VERSION_1,
+ "AES Software Provider",
+ CRYPTO_SW_PROVIDER,
+ NULL,
+ &aes_crypto_ops,
+ sizeof (aes_mech_info_tab)/sizeof (crypto_mech_info_t),
+ aes_mech_info_tab
+}}}};
+
+static crypto_kcf_provider_handle_t aes_prov_handle = 0;
+static crypto_data_t null_crypto_data = { CRYPTO_DATA_RAW };
+
+int
+aes_mod_init(void)
+{
+ int ret;
+
+ if ((ret = mod_install(&modlinkage)) != 0)
+ return (ret);
+
+ /* Register with KCF. If the registration fails, remove the module. */
+ if (crypto_register_provider(&aes_prov_info, &aes_prov_handle)) {
+ (void) mod_remove(&modlinkage);
+ return (EACCES);
+ }
+
+ return (0);
+}
+
+int
+aes_mod_fini(void)
+{
+ /* Unregister from KCF if module is registered */
+ if (aes_prov_handle != 0) {
+ if (crypto_unregister_provider(aes_prov_handle))
+ return (EBUSY);
+
+ aes_prov_handle = 0;
+ }
+
+ return (mod_remove(&modlinkage));
+}
+
+static int
+aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx, int kmflag)
+{
+ void *p = NULL;
+ boolean_t param_required = B_TRUE;
+ size_t param_len;
+ void *(*alloc_fun)(int);
+ int rv = CRYPTO_SUCCESS;
+
+ switch (mechanism->cm_type) {
+ case AES_ECB_MECH_INFO_TYPE:
+ param_required = B_FALSE;
+ alloc_fun = ecb_alloc_ctx;
+ break;
+ case AES_CBC_MECH_INFO_TYPE:
+ param_len = AES_BLOCK_LEN;
+ alloc_fun = cbc_alloc_ctx;
+ break;
+ case AES_CTR_MECH_INFO_TYPE:
+ param_len = sizeof (CK_AES_CTR_PARAMS);
+ alloc_fun = ctr_alloc_ctx;
+ break;
+ case AES_CCM_MECH_INFO_TYPE:
+ param_len = sizeof (CK_AES_CCM_PARAMS);
+ alloc_fun = ccm_alloc_ctx;
+ break;
+ case AES_GCM_MECH_INFO_TYPE:
+ param_len = sizeof (CK_AES_GCM_PARAMS);
+ alloc_fun = gcm_alloc_ctx;
+ break;
+ case AES_GMAC_MECH_INFO_TYPE:
+ param_len = sizeof (CK_AES_GMAC_PARAMS);
+ alloc_fun = gmac_alloc_ctx;
+ break;
+ default:
+ rv = CRYPTO_MECHANISM_INVALID;
+ return (rv);
+ }
+ if (param_required && mechanism->cm_param != NULL &&
+ mechanism->cm_param_len != param_len) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+ if (ctx != NULL) {
+ p = (alloc_fun)(kmflag);
+ *ctx = p;
+ }
+ return (rv);
+}
+
+/*
+ * Initialize key schedules for AES
+ */
+static int
+init_keysched(crypto_key_t *key, void *newbie)
+{
+ /*
+ * Only keys by value are supported by this module.
+ */
+ switch (key->ck_format) {
+ case CRYPTO_KEY_RAW:
+ if (key->ck_length < AES_MINBITS ||
+ key->ck_length > AES_MAXBITS) {
+ return (CRYPTO_KEY_SIZE_RANGE);
+ }
+
+ /* key length must be either 128, 192, or 256 */
+ if ((key->ck_length & 63) != 0)
+ return (CRYPTO_KEY_SIZE_RANGE);
+ break;
+ default:
+ return (CRYPTO_KEY_TYPE_INCONSISTENT);
+ }
+
+ aes_init_keysched(key->ck_data, key->ck_length, newbie);
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * KCF software provider control entry points.
+ */
+/* ARGSUSED */
+static void
+aes_provider_status(crypto_provider_handle_t provider, uint_t *status)
+{
+ *status = CRYPTO_PROVIDER_READY;
+}
+
+static int
+aes_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t template,
+ crypto_req_handle_t req) {
+ return (aes_common_init(ctx, mechanism, key, template, req, B_TRUE));
+}
+
+static int
+aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t template,
+ crypto_req_handle_t req) {
+ return (aes_common_init(ctx, mechanism, key, template, req, B_FALSE));
+}
+
+
+
+/*
+ * KCF software provider encrypt entry points.
+ */
+static int
+aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t template,
+ crypto_req_handle_t req, boolean_t is_encrypt_init)
+{
+ aes_ctx_t *aes_ctx;
+ int rv;
+ int kmflag;
+
+ /*
+ * Only keys by value are supported by this module.
+ */
+ if (key->ck_format != CRYPTO_KEY_RAW) {
+ return (CRYPTO_KEY_TYPE_INCONSISTENT);
+ }
+
+ kmflag = crypto_kmflag(req);
+ if ((rv = aes_check_mech_param(mechanism, &aes_ctx, kmflag))
+ != CRYPTO_SUCCESS)
+ return (rv);
+
+ rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, kmflag,
+ is_encrypt_init);
+ if (rv != CRYPTO_SUCCESS) {
+ crypto_free_mode_ctx(aes_ctx);
+ return (rv);
+ }
+
+ ctx->cc_provider_private = aes_ctx;
+
+ return (CRYPTO_SUCCESS);
+}
+
+static void
+aes_copy_block64(uint8_t *in, uint64_t *out)
+{
+ if (IS_P2ALIGNED(in, sizeof (uint64_t))) {
+ /* LINTED: pointer alignment */
+ out[0] = *(uint64_t *)&in[0];
+ /* LINTED: pointer alignment */
+ out[1] = *(uint64_t *)&in[8];
+ } else {
+ uint8_t *iv8 = (uint8_t *)&out[0];
+
+ AES_COPY_BLOCK(in, iv8);
+ }
+}
+
+
+static int
+aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
+ crypto_data_t *ciphertext, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_FAILED;
+
+ aes_ctx_t *aes_ctx;
+ size_t saved_length, saved_offset, length_needed;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ /*
+ * For block ciphers, plaintext must be a multiple of AES block size.
+ * This test is only valid for ciphers whose blocksize is a power of 2.
+ */
+ if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
+ == 0) && (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ AES_ARG_INPLACE(plaintext, ciphertext);
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following case.
+ */
+ switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
+ case CCM_MODE:
+ length_needed = plaintext->cd_length + aes_ctx->ac_mac_len;
+ break;
+ case GCM_MODE:
+ length_needed = plaintext->cd_length + aes_ctx->ac_tag_len;
+ break;
+ case GMAC_MODE:
+ if (plaintext->cd_length != 0)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ length_needed = aes_ctx->ac_tag_len;
+ break;
+ default:
+ length_needed = plaintext->cd_length;
+ }
+
+ if (ciphertext->cd_length < length_needed) {
+ ciphertext->cd_length = length_needed;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ saved_length = ciphertext->cd_length;
+ saved_offset = ciphertext->cd_offset;
+
+ /*
+ * Do an update on the specified input data.
+ */
+ ret = aes_encrypt_update(ctx, plaintext, ciphertext, req);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+
+ /*
+ * For CCM mode, aes_ccm_encrypt_final() will take care of any
+ * left-over unprocessed data, and compute the MAC
+ */
+ if (aes_ctx->ac_flags & CCM_MODE) {
+ /*
+ * ccm_encrypt_final() will compute the MAC and append
+ * it to existing ciphertext. So, need to adjust the left over
+ * length value accordingly
+ */
+
+ /* order of following 2 lines MUST not be reversed */
+ ciphertext->cd_offset = ciphertext->cd_length;
+ ciphertext->cd_length = saved_length - ciphertext->cd_length;
+ ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, ciphertext,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+
+ if (plaintext != ciphertext) {
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ }
+ ciphertext->cd_offset = saved_offset;
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ /*
+ * gcm_encrypt_final() will compute the MAC and append
+ * it to existing ciphertext. So, need to adjust the left over
+ * length value accordingly
+ */
+
+ /* order of following 2 lines MUST not be reversed */
+ ciphertext->cd_offset = ciphertext->cd_length;
+ ciphertext->cd_length = saved_length - ciphertext->cd_length;
+ ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, ciphertext,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+
+ if (plaintext != ciphertext) {
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ }
+ ciphertext->cd_offset = saved_offset;
+ }
+
+ ASSERT(aes_ctx->ac_remainder_len == 0);
+ (void) aes_free_context(ctx);
+
+ return (ret);
+}
+
+
+static int
+aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
+ crypto_data_t *plaintext, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_FAILED;
+
+ aes_ctx_t *aes_ctx;
+ off_t saved_offset;
+ size_t saved_length, length_needed;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ /*
+ * For block ciphers, plaintext must be a multiple of AES block size.
+ * This test is only valid for ciphers whose blocksize is a power of 2.
+ */
+ if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
+ == 0) && (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) {
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ }
+
+ AES_ARG_INPLACE(ciphertext, plaintext);
+
+ /*
+ * Return length needed to store the output.
+ * Do not destroy context when plaintext buffer is too small.
+ *
+ * CCM: plaintext is MAC len smaller than cipher text
+ * GCM: plaintext is TAG len smaller than cipher text
+ * GMAC: plaintext length must be zero
+ */
+ switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
+ case CCM_MODE:
+ length_needed = aes_ctx->ac_processed_data_len;
+ break;
+ case GCM_MODE:
+ length_needed = ciphertext->cd_length - aes_ctx->ac_tag_len;
+ break;
+ case GMAC_MODE:
+ if (plaintext->cd_length != 0)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ length_needed = 0;
+ break;
+ default:
+ length_needed = ciphertext->cd_length;
+ }
+
+ if (plaintext->cd_length < length_needed) {
+ plaintext->cd_length = length_needed;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ saved_offset = plaintext->cd_offset;
+ saved_length = plaintext->cd_length;
+
+ /*
+ * Do an update on the specified input data.
+ */
+ ret = aes_decrypt_update(ctx, ciphertext, plaintext, req);
+ if (ret != CRYPTO_SUCCESS) {
+ goto cleanup;
+ }
+
+ if (aes_ctx->ac_flags & CCM_MODE) {
+ ASSERT(aes_ctx->ac_processed_data_len == aes_ctx->ac_data_len);
+ ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
+
+ /* order of following 2 lines MUST not be reversed */
+ plaintext->cd_offset = plaintext->cd_length;
+ plaintext->cd_length = saved_length - plaintext->cd_length;
+
+ ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, plaintext,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ if (ret == CRYPTO_SUCCESS) {
+ if (plaintext != ciphertext) {
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ }
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+
+ plaintext->cd_offset = saved_offset;
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ /* order of following 2 lines MUST not be reversed */
+ plaintext->cd_offset = plaintext->cd_length;
+ plaintext->cd_length = saved_length - plaintext->cd_length;
+
+ ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, plaintext,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (ret == CRYPTO_SUCCESS) {
+ if (plaintext != ciphertext) {
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ }
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+
+ plaintext->cd_offset = saved_offset;
+ }
+
+ ASSERT(aes_ctx->ac_remainder_len == 0);
+
+cleanup:
+ (void) aes_free_context(ctx);
+
+ return (ret);
+}
+
+
+/* ARGSUSED */
+static int
+aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
+ crypto_data_t *ciphertext, crypto_req_handle_t req)
+{
+ off_t saved_offset;
+ size_t saved_length, out_len;
+ int ret = CRYPTO_SUCCESS;
+ aes_ctx_t *aes_ctx;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ AES_ARG_INPLACE(plaintext, ciphertext);
+
+ /* compute number of bytes that will hold the ciphertext */
+ out_len = aes_ctx->ac_remainder_len;
+ out_len += plaintext->cd_length;
+ out_len &= ~(AES_BLOCK_LEN - 1);
+
+ /* return length needed to store the output */
+ if (ciphertext->cd_length < out_len) {
+ ciphertext->cd_length = out_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ saved_offset = ciphertext->cd_offset;
+ saved_length = ciphertext->cd_length;
+
+ /*
+ * Do the AES update on the specified input data.
+ */
+ switch (plaintext->cd_format) {
+ case CRYPTO_DATA_RAW:
+ ret = crypto_update_iov(ctx->cc_provider_private,
+ plaintext, ciphertext, aes_encrypt_contiguous_blocks,
+ aes_copy_block64);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = crypto_update_uio(ctx->cc_provider_private,
+ plaintext, ciphertext, aes_encrypt_contiguous_blocks,
+ aes_copy_block64);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /*
+ * Since AES counter mode is a stream cipher, we call
+ * ctr_mode_final() to pick up any remaining bytes.
+ * It is an internal function that does not destroy
+ * the context like *normal* final routines.
+ */
+ if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
+ ret = ctr_mode_final((ctr_ctx_t *)aes_ctx,
+ ciphertext, aes_encrypt_block);
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ if (plaintext != ciphertext)
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ } else {
+ ciphertext->cd_length = saved_length;
+ }
+ ciphertext->cd_offset = saved_offset;
+
+ return (ret);
+}
+
+
+static int
+aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
+ crypto_data_t *plaintext, crypto_req_handle_t req)
+{
+ off_t saved_offset;
+ size_t saved_length, out_len;
+ int ret = CRYPTO_SUCCESS;
+ aes_ctx_t *aes_ctx;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ AES_ARG_INPLACE(ciphertext, plaintext);
+
+ /*
+ * Compute number of bytes that will hold the plaintext.
+ * This is not necessary for CCM, GCM, and GMAC since these
+ * mechanisms never return plaintext for update operations.
+ */
+ if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
+ out_len = aes_ctx->ac_remainder_len;
+ out_len += ciphertext->cd_length;
+ out_len &= ~(AES_BLOCK_LEN - 1);
+
+ /* return length needed to store the output */
+ if (plaintext->cd_length < out_len) {
+ plaintext->cd_length = out_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+ }
+
+ saved_offset = plaintext->cd_offset;
+ saved_length = plaintext->cd_length;
+
+ if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE))
+ gcm_set_kmflag((gcm_ctx_t *)aes_ctx, crypto_kmflag(req));
+
+ /*
+ * Do the AES update on the specified input data.
+ */
+ switch (ciphertext->cd_format) {
+ case CRYPTO_DATA_RAW:
+ ret = crypto_update_iov(ctx->cc_provider_private,
+ ciphertext, plaintext, aes_decrypt_contiguous_blocks,
+ aes_copy_block64);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = crypto_update_uio(ctx->cc_provider_private,
+ ciphertext, plaintext, aes_decrypt_contiguous_blocks,
+ aes_copy_block64);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /*
+ * Since AES counter mode is a stream cipher, we call
+ * ctr_mode_final() to pick up any remaining bytes.
+ * It is an internal function that does not destroy
+ * the context like *normal* final routines.
+ */
+ if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
+ ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, plaintext,
+ aes_encrypt_block);
+ if (ret == CRYPTO_DATA_LEN_RANGE)
+ ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ if (ciphertext != plaintext)
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+ plaintext->cd_offset = saved_offset;
+
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ aes_ctx_t *aes_ctx;
+ int ret;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ if (data->cd_format != CRYPTO_DATA_RAW &&
+ data->cd_format != CRYPTO_DATA_UIO) {
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
+ if (aes_ctx->ac_flags & CTR_MODE) {
+ if (aes_ctx->ac_remainder_len > 0) {
+ ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
+ aes_encrypt_block);
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+ }
+ } else if (aes_ctx->ac_flags & CCM_MODE) {
+ ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, data,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ size_t saved_offset = data->cd_offset;
+
+ ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, data,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+ data->cd_length = data->cd_offset - saved_offset;
+ data->cd_offset = saved_offset;
+ } else {
+ /*
+ * There must be no unprocessed plaintext.
+ * This happens if the length of the last data is
+ * not a multiple of the AES block length.
+ */
+ if (aes_ctx->ac_remainder_len > 0) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ data->cd_length = 0;
+ }
+
+ (void) aes_free_context(ctx);
+
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ aes_ctx_t *aes_ctx;
+ int ret;
+ off_t saved_offset;
+ size_t saved_length;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
+ if (data->cd_format != CRYPTO_DATA_RAW &&
+ data->cd_format != CRYPTO_DATA_UIO) {
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
+ /*
+ * There must be no unprocessed ciphertext.
+ * This happens if the length of the last ciphertext is
+ * not a multiple of the AES block length.
+ */
+ if (aes_ctx->ac_remainder_len > 0) {
+ if ((aes_ctx->ac_flags & CTR_MODE) == 0)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ else {
+ ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
+ aes_encrypt_block);
+ if (ret == CRYPTO_DATA_LEN_RANGE)
+ ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+ }
+ }
+
+ if (aes_ctx->ac_flags & CCM_MODE) {
+ /*
+ * This is where all the plaintext is returned, make sure
+ * the plaintext buffer is big enough
+ */
+ size_t pt_len = aes_ctx->ac_data_len;
+ if (data->cd_length < pt_len) {
+ data->cd_length = pt_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ ASSERT(aes_ctx->ac_processed_data_len == pt_len);
+ ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
+ saved_offset = data->cd_offset;
+ saved_length = data->cd_length;
+ ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, data,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ if (ret == CRYPTO_SUCCESS) {
+ data->cd_length = data->cd_offset - saved_offset;
+ } else {
+ data->cd_length = saved_length;
+ }
+
+ data->cd_offset = saved_offset;
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ /*
+ * This is where all the plaintext is returned, make sure
+ * the plaintext buffer is big enough
+ */
+ gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx;
+ size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
+
+ if (data->cd_length < pt_len) {
+ data->cd_length = pt_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ saved_offset = data->cd_offset;
+ saved_length = data->cd_length;
+ ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, data,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (ret == CRYPTO_SUCCESS) {
+ data->cd_length = data->cd_offset - saved_offset;
+ } else {
+ data->cd_length = saved_length;
+ }
+
+ data->cd_offset = saved_offset;
+ if (ret != CRYPTO_SUCCESS) {
+ return (ret);
+ }
+ }
+
+
+ if ((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
+ data->cd_length = 0;
+ }
+
+ (void) aes_free_context(ctx);
+
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+aes_encrypt_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
+ crypto_spi_ctx_template_t template, crypto_req_handle_t req)
+{
+ aes_ctx_t aes_ctx; /* on the stack */
+ off_t saved_offset;
+ size_t saved_length;
+ size_t length_needed;
+ int ret;
+
+ AES_ARG_INPLACE(plaintext, ciphertext);
+
+ /*
+ * CTR, CCM, GCM, and GMAC modes do not require that plaintext
+ * be a multiple of AES block size.
+ */
+ switch (mechanism->cm_type) {
+ case AES_CTR_MECH_INFO_TYPE:
+ case AES_CCM_MECH_INFO_TYPE:
+ case AES_GCM_MECH_INFO_TYPE:
+ case AES_GMAC_MECH_INFO_TYPE:
+ break;
+ default:
+ if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
+ return (ret);
+
+ bzero(&aes_ctx, sizeof (aes_ctx_t));
+
+ ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
+ crypto_kmflag(req), B_TRUE);
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+
+ switch (mechanism->cm_type) {
+ case AES_CCM_MECH_INFO_TYPE:
+ length_needed = plaintext->cd_length + aes_ctx.ac_mac_len;
+ break;
+ case AES_GMAC_MECH_INFO_TYPE:
+ if (plaintext->cd_length != 0)
+ return (CRYPTO_ARGUMENTS_BAD);
+ /* FALLTHRU */
+ case AES_GCM_MECH_INFO_TYPE:
+ length_needed = plaintext->cd_length + aes_ctx.ac_tag_len;
+ break;
+ default:
+ length_needed = plaintext->cd_length;
+ }
+
+ /* return size of buffer needed to store output */
+ if (ciphertext->cd_length < length_needed) {
+ ciphertext->cd_length = length_needed;
+ ret = CRYPTO_BUFFER_TOO_SMALL;
+ goto out;
+ }
+
+ saved_offset = ciphertext->cd_offset;
+ saved_length = ciphertext->cd_length;
+
+ /*
+ * Do an update on the specified input data.
+ */
+ switch (plaintext->cd_format) {
+ case CRYPTO_DATA_RAW:
+ ret = crypto_update_iov(&aes_ctx, plaintext, ciphertext,
+ aes_encrypt_contiguous_blocks, aes_copy_block64);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = crypto_update_uio(&aes_ctx, plaintext, ciphertext,
+ aes_encrypt_contiguous_blocks, aes_copy_block64);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
+ ret = ccm_encrypt_final((ccm_ctx_t *)&aes_ctx,
+ ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_xor_block);
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
+ mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
+ ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx,
+ ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_copy_block, aes_xor_block);
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
+ if (aes_ctx.ac_remainder_len > 0) {
+ ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
+ ciphertext, aes_encrypt_block);
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ }
+ } else {
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ }
+
+ if (plaintext != ciphertext) {
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ }
+ } else {
+ ciphertext->cd_length = saved_length;
+ }
+ ciphertext->cd_offset = saved_offset;
+
+out:
+ if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
+ bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+aes_decrypt_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
+ crypto_spi_ctx_template_t template, crypto_req_handle_t req)
+{
+ aes_ctx_t aes_ctx; /* on the stack */
+ off_t saved_offset;
+ size_t saved_length;
+ size_t length_needed;
+ int ret;
+
+ AES_ARG_INPLACE(ciphertext, plaintext);
+
+ /*
+ * CCM, GCM, CTR, and GMAC modes do not require that ciphertext
+ * be a multiple of AES block size.
+ */
+ switch (mechanism->cm_type) {
+ case AES_CTR_MECH_INFO_TYPE:
+ case AES_CCM_MECH_INFO_TYPE:
+ case AES_GCM_MECH_INFO_TYPE:
+ case AES_GMAC_MECH_INFO_TYPE:
+ break;
+ default:
+ if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ }
+
+ if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
+ return (ret);
+
+ bzero(&aes_ctx, sizeof (aes_ctx_t));
+
+ ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
+ crypto_kmflag(req), B_FALSE);
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+
+ switch (mechanism->cm_type) {
+ case AES_CCM_MECH_INFO_TYPE:
+ length_needed = aes_ctx.ac_data_len;
+ break;
+ case AES_GCM_MECH_INFO_TYPE:
+ length_needed = ciphertext->cd_length - aes_ctx.ac_tag_len;
+ break;
+ case AES_GMAC_MECH_INFO_TYPE:
+ if (plaintext->cd_length != 0)
+ return (CRYPTO_ARGUMENTS_BAD);
+ length_needed = 0;
+ break;
+ default:
+ length_needed = ciphertext->cd_length;
+ }
+
+ /* return size of buffer needed to store output */
+ if (plaintext->cd_length < length_needed) {
+ plaintext->cd_length = length_needed;
+ ret = CRYPTO_BUFFER_TOO_SMALL;
+ goto out;
+ }
+
+ saved_offset = plaintext->cd_offset;
+ saved_length = plaintext->cd_length;
+
+ if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
+ mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE)
+ gcm_set_kmflag((gcm_ctx_t *)&aes_ctx, crypto_kmflag(req));
+
+ /*
+ * Do an update on the specified input data.
+ */
+ switch (ciphertext->cd_format) {
+ case CRYPTO_DATA_RAW:
+ ret = crypto_update_iov(&aes_ctx, ciphertext, plaintext,
+ aes_decrypt_contiguous_blocks, aes_copy_block64);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = crypto_update_uio(&aes_ctx, ciphertext, plaintext,
+ aes_decrypt_contiguous_blocks, aes_copy_block64);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
+ ASSERT(aes_ctx.ac_processed_data_len
+ == aes_ctx.ac_data_len);
+ ASSERT(aes_ctx.ac_processed_mac_len
+ == aes_ctx.ac_mac_len);
+ ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx,
+ plaintext, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_copy_block, aes_xor_block);
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ if ((ret == CRYPTO_SUCCESS) &&
+ (ciphertext != plaintext)) {
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+ } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
+ mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
+ ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx,
+ plaintext, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_xor_block);
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ if ((ret == CRYPTO_SUCCESS) &&
+ (ciphertext != plaintext)) {
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+ } else if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ if (ciphertext != plaintext)
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ } else {
+ if (aes_ctx.ac_remainder_len > 0) {
+ ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
+ plaintext, aes_encrypt_block);
+ if (ret == CRYPTO_DATA_LEN_RANGE)
+ ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ }
+ if (ciphertext != plaintext)
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ }
+ } else {
+ plaintext->cd_length = saved_length;
+ }
+ plaintext->cd_offset = saved_offset;
+
+out:
+ if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
+ bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ }
+
+ if (aes_ctx.ac_flags & CCM_MODE) {
+ if (aes_ctx.ac_pt_buf != NULL) {
+ vmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len);
+ }
+ } else if (aes_ctx.ac_flags & (GCM_MODE|GMAC_MODE)) {
+ if (((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf != NULL) {
+ vmem_free(((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf,
+ ((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf_len);
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * KCF software provider context template entry points.
+ */
+/* ARGSUSED */
+static int
+aes_create_ctx_template(crypto_provider_handle_t provider,
+ crypto_mechanism_t *mechanism, crypto_key_t *key,
+ crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size, crypto_req_handle_t req)
+{
+ void *keysched;
+ size_t size;
+ int rv;
+
+ if (mechanism->cm_type != AES_ECB_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_CBC_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_CTR_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_CCM_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_GCM_MECH_INFO_TYPE &&
+ mechanism->cm_type != AES_GMAC_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ if ((keysched = aes_alloc_keysched(&size,
+ crypto_kmflag(req))) == NULL) {
+ return (CRYPTO_HOST_MEMORY);
+ }
+
+ /*
+ * Initialize key schedule. Key length information is stored
+ * in the key.
+ */
+ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
+ bzero(keysched, size);
+ kmem_free(keysched, size);
+ return (rv);
+ }
+
+ *tmpl = keysched;
+ *tmpl_size = size;
+
+ return (CRYPTO_SUCCESS);
+}
+
+
+static int
+aes_free_context(crypto_ctx_t *ctx)
+{
+ aes_ctx_t *aes_ctx = ctx->cc_provider_private;
+
+ if (aes_ctx != NULL) {
+ if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
+ ASSERT(aes_ctx->ac_keysched_len != 0);
+ bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len);
+ kmem_free(aes_ctx->ac_keysched,
+ aes_ctx->ac_keysched_len);
+ }
+ crypto_free_mode_ctx(aes_ctx);
+ ctx->cc_provider_private = NULL;
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+
+static int
+aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
+ crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag,
+ boolean_t is_encrypt_init)
+{
+ int rv = CRYPTO_SUCCESS;
+ void *keysched;
+ size_t size;
+
+ if (template == NULL) {
+ if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL)
+ return (CRYPTO_HOST_MEMORY);
+ /*
+ * Initialize key schedule.
+ * Key length is stored in the key.
+ */
+ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
+ kmem_free(keysched, size);
+ return (rv);
+ }
+
+ aes_ctx->ac_flags |= PROVIDER_OWNS_KEY_SCHEDULE;
+ aes_ctx->ac_keysched_len = size;
+ } else {
+ keysched = template;
+ }
+ aes_ctx->ac_keysched = keysched;
+
+ switch (mechanism->cm_type) {
+ case AES_CBC_MECH_INFO_TYPE:
+ rv = cbc_init_ctx((cbc_ctx_t *)aes_ctx, mechanism->cm_param,
+ mechanism->cm_param_len, AES_BLOCK_LEN, aes_copy_block64);
+ break;
+ case AES_CTR_MECH_INFO_TYPE: {
+ CK_AES_CTR_PARAMS *pp;
+
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (CK_AES_CTR_PARAMS)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ pp = (CK_AES_CTR_PARAMS *)(void *)mechanism->cm_param;
+ rv = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
+ pp->cb, aes_copy_block);
+ break;
+ }
+ case AES_CCM_MECH_INFO_TYPE:
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (CK_AES_CCM_PARAMS)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ rv = ccm_init_ctx((ccm_ctx_t *)aes_ctx, mechanism->cm_param,
+ kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_xor_block);
+ break;
+ case AES_GCM_MECH_INFO_TYPE:
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ break;
+ case AES_GMAC_MECH_INFO_TYPE:
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (CK_AES_GMAC_PARAMS)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ rv = gmac_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ break;
+ case AES_ECB_MECH_INFO_TYPE:
+ aes_ctx->ac_flags |= ECB_MODE;
+ }
+
+ if (rv != CRYPTO_SUCCESS) {
+ if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
+ bzero(keysched, size);
+ kmem_free(keysched, size);
+ }
+ }
+
+ return (rv);
+}
+
+static int
+process_gmac_mech(crypto_mechanism_t *mech, crypto_data_t *data,
+ CK_AES_GCM_PARAMS *gcm_params)
+{
+ /* LINTED: pointer alignment */
+ CK_AES_GMAC_PARAMS *params = (CK_AES_GMAC_PARAMS *)mech->cm_param;
+
+ if (mech->cm_type != AES_GMAC_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ if (mech->cm_param_len != sizeof (CK_AES_GMAC_PARAMS))
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+
+ if (params->pIv == NULL)
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+
+ gcm_params->pIv = params->pIv;
+ gcm_params->ulIvLen = AES_GMAC_IV_LEN;
+ gcm_params->ulTagBits = AES_GMAC_TAG_BITS;
+
+ if (data == NULL)
+ return (CRYPTO_SUCCESS);
+
+ if (data->cd_format != CRYPTO_DATA_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ gcm_params->pAAD = (uchar_t *)data->cd_raw.iov_base;
+ gcm_params->ulAADLen = data->cd_length;
+ return (CRYPTO_SUCCESS);
+}
+
+static int
+aes_mac_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t template, crypto_req_handle_t req)
+{
+ CK_AES_GCM_PARAMS gcm_params;
+ crypto_mechanism_t gcm_mech;
+ int rv;
+
+ if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
+ != CRYPTO_SUCCESS)
+ return (rv);
+
+ gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
+ gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
+ gcm_mech.cm_param = (char *)&gcm_params;
+
+ return (aes_encrypt_atomic(provider, session_id, &gcm_mech,
+ key, &null_crypto_data, mac, template, req));
+}
+
+static int
+aes_mac_verify_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t template, crypto_req_handle_t req)
+{
+ CK_AES_GCM_PARAMS gcm_params;
+ crypto_mechanism_t gcm_mech;
+ int rv;
+
+ if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
+ != CRYPTO_SUCCESS)
+ return (rv);
+
+ gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
+ gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
+ gcm_mech.cm_param = (char *)&gcm_params;
+
+ return (aes_decrypt_atomic(provider, session_id, &gcm_mech,
+ key, mac, &null_crypto_data, template, req));
+}
diff --git a/module/icp/io/sha1_mod.c b/module/icp/io/sha1_mod.c
new file mode 100644
index 000000000..a278dac7f
--- /dev/null
+++ b/module/icp/io/sha1_mod.c
@@ -0,0 +1,1239 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/modctl.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/spi.h>
+
+#include <sha1/sha1.h>
+#include <sha1/sha1_impl.h>
+
+/*
+ * The sha1 module is created with two modlinkages:
+ * - a modlmisc that allows consumers to directly call the entry points
+ * SHA1Init, SHA1Update, and SHA1Final.
+ * - a modlcrypto that allows the module to register with the Kernel
+ * Cryptographic Framework (KCF) as a software provider for the SHA1
+ * mechanisms.
+ */
+
+static struct modlcrypto modlcrypto = {
+ &mod_cryptoops,
+ "SHA1 Kernel SW Provider 1.1"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, { &modlcrypto, NULL }
+};
+
+
+/*
+ * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
+ * by KCF to one of the entry points.
+ */
+
+#define PROV_SHA1_CTX(ctx) ((sha1_ctx_t *)(ctx)->cc_provider_private)
+#define PROV_SHA1_HMAC_CTX(ctx) ((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
+
+/* to extract the digest length passed as mechanism parameter */
+#define PROV_SHA1_GET_DIGEST_LEN(m, len) { \
+ if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \
+ (len) = (uint32_t)*((ulong_t *)(void *)mechanism->cm_param); \
+ else { \
+ ulong_t tmp_ulong; \
+ bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \
+ (len) = (uint32_t)tmp_ulong; \
+ } \
+}
+
+#define PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) { \
+ SHA1Init(ctx); \
+ SHA1Update(ctx, key, len); \
+ SHA1Final(digest, ctx); \
+}
+
+/*
+ * Mechanism info structure passed to KCF during registration.
+ */
+static crypto_mech_info_t sha1_mech_info_tab[] = {
+ /* SHA1 */
+ {SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
+ CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
+ 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
+ /* SHA1-HMAC */
+ {SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
+ SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
+ CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* SHA1-HMAC GENERAL */
+ {SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
+ SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
+ CRYPTO_KEYSIZE_UNIT_IN_BYTES}
+};
+
+static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
+
+static crypto_control_ops_t sha1_control_ops = {
+ sha1_provider_status
+};
+
+static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_req_handle_t);
+static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+
+static crypto_digest_ops_t sha1_digest_ops = {
+ sha1_digest_init,
+ sha1_digest,
+ sha1_digest_update,
+ NULL,
+ sha1_digest_final,
+ sha1_digest_atomic
+};
+
+static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
+static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_mac_ops_t sha1_mac_ops = {
+ sha1_mac_init,
+ NULL,
+ sha1_mac_update,
+ sha1_mac_final,
+ sha1_mac_atomic,
+ sha1_mac_verify_atomic
+};
+
+static int sha1_create_ctx_template(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
+ size_t *, crypto_req_handle_t);
+static int sha1_free_context(crypto_ctx_t *);
+
+static crypto_ctx_ops_t sha1_ctx_ops = {
+ sha1_create_ctx_template,
+ sha1_free_context
+};
+
+static crypto_ops_t sha1_crypto_ops = {{{{{
+ &sha1_control_ops,
+ &sha1_digest_ops,
+ NULL,
+ &sha1_mac_ops,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ &sha1_ctx_ops,
+}}}}};
+
+static crypto_provider_info_t sha1_prov_info = {{{{
+ CRYPTO_SPI_VERSION_1,
+ "SHA1 Software Provider",
+ CRYPTO_SW_PROVIDER,
+ NULL,
+ &sha1_crypto_ops,
+ sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
+ sha1_mech_info_tab
+}}}};
+
+static crypto_kcf_provider_handle_t sha1_prov_handle = 0;
+
+int
+sha1_mod_init(void)
+{
+ int ret;
+
+ if ((ret = mod_install(&modlinkage)) != 0)
+ return (ret);
+
+ /*
+ * Register with KCF. If the registration fails, log an
+ * error but do not uninstall the module, since the functionality
+ * provided by misc/sha1 should still be available.
+ */
+ if ((ret = crypto_register_provider(&sha1_prov_info,
+ &sha1_prov_handle)) != CRYPTO_SUCCESS)
+ cmn_err(CE_WARN, "sha1 _init: "
+ "crypto_register_provider() failed (0x%x)", ret);
+
+ return (0);
+}
+
+int
+sha1_mod_fini(void)
+{
+ int ret;
+
+ if (sha1_prov_handle != 0) {
+ if ((ret = crypto_unregister_provider(sha1_prov_handle)) !=
+ CRYPTO_SUCCESS) {
+ cmn_err(CE_WARN,
+ "sha1 _fini: crypto_unregister_provider() "
+ "failed (0x%x)", ret);
+ return (EBUSY);
+ }
+ sha1_prov_handle = 0;
+ }
+
+ return (mod_remove(&modlinkage));
+}
+
+/*
+ * KCF software provider control entry points.
+ */
+/* ARGSUSED */
+static void
+sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
+{
+ *status = CRYPTO_PROVIDER_READY;
+}
+
+/*
+ * KCF software provider digest entry points.
+ */
+
+static int
+sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_req_handle_t req)
+{
+ if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /*
+ * Allocate and initialize SHA1 context.
+ */
+ ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
+ crypto_kmflag(req));
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
+ SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Helper SHA1 digest update function for uio data.
+ */
+static int
+sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
+{
+ off_t offset = data->cd_offset;
+ size_t length = data->cd_length;
+ uint_t vec_idx;
+ size_t cur_len;
+
+ /* we support only kernel buffer */
+ if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Jump to the first iovec containing data to be
+ * digested.
+ */
+ for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
+ offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
+ offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == data->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than the
+ * total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now do the digesting on the iovecs.
+ */
+ while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ SHA1Update(sha1_ctx,
+ (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len);
+
+ length -= cur_len;
+ vec_idx++;
+ offset = 0;
+ }
+
+ if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed, i.e.
+ * The caller requested to digest more data than it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Helper SHA1 digest final function for uio data.
+ * digest_len is the length of the desired digest. If digest_len
+ * is smaller than the default SHA1 digest length, the caller
+ * must pass a scratch buffer, digest_scratch, which must
+ * be at least SHA1_DIGEST_LENGTH bytes.
+ */
+static int
+sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
+ ulong_t digest_len, uchar_t *digest_scratch)
+{
+ off_t offset = digest->cd_offset;
+ uint_t vec_idx;
+
+ /* we support only kernel buffer */
+ if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Jump to the first iovec containing ptr to the digest to
+ * be returned.
+ */
+ for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
+ vec_idx < digest->cd_uio->uio_iovcnt;
+ offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == digest->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is
+ * larger than the total size of the buffers
+ * it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ if (offset + digest_len <=
+ digest->cd_uio->uio_iov[vec_idx].iov_len) {
+ /*
+ * The computed SHA1 digest will fit in the current
+ * iovec.
+ */
+ if (digest_len != SHA1_DIGEST_LENGTH) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA1Final(digest_scratch, sha1_ctx);
+ bcopy(digest_scratch, (uchar_t *)digest->
+ cd_uio->uio_iov[vec_idx].iov_base + offset,
+ digest_len);
+ } else {
+ SHA1Final((uchar_t *)digest->
+ cd_uio->uio_iov[vec_idx].iov_base + offset,
+ sha1_ctx);
+ }
+ } else {
+ /*
+ * The computed digest will be crossing one or more iovec's.
+ * This is bad performance-wise but we need to support it.
+ * Allocate a small scratch buffer on the stack and
+ * copy it piece meal to the specified digest iovec's.
+ */
+ uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
+ off_t scratch_offset = 0;
+ size_t length = digest_len;
+ size_t cur_len;
+
+ SHA1Final(digest_tmp, sha1_ctx);
+
+ while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+ bcopy(digest_tmp + scratch_offset,
+ digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len);
+
+ length -= cur_len;
+ vec_idx++;
+ scratch_offset += cur_len;
+ offset = 0;
+ }
+
+ if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed, i.e.
+ * The caller requested to digest more data than it
+ * provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((digest->cd_length == 0) ||
+ (digest->cd_length < SHA1_DIGEST_LENGTH)) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do the SHA1 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ /* the update failed, free context and bail */
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
+ ctx->cc_provider_private = NULL;
+ digest->cd_length = 0;
+ return (ret);
+ }
+
+ /*
+ * Do a SHA1 final, must be done separately since the digest
+ * type can be different than the input data type.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ digest, SHA1_DIGEST_LENGTH, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /* all done, free context and return */
+
+ if (ret == CRYPTO_SUCCESS) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ } else {
+ digest->cd_length = 0;
+ }
+
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
+ ctx->cc_provider_private = NULL;
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * Do the SHA1 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((digest->cd_length == 0) ||
+ (digest->cd_length < SHA1_DIGEST_LENGTH)) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do a SHA1 final.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
+ digest, SHA1_DIGEST_LENGTH, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /* all done, free context and return */
+
+ if (ret == CRYPTO_SUCCESS) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ } else {
+ digest->cd_length = 0;
+ }
+
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
+ ctx->cc_provider_private = NULL;
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_digest_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_data_t *data, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ SHA1_CTX sha1_ctx;
+
+ if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /*
+ * Do the SHA1 init.
+ */
+ SHA1Init(&sha1_ctx);
+
+ /*
+ * Do the SHA1 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Update(&sha1_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_update_uio(&sha1_ctx, data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ /* the update failed, bail */
+ digest->cd_length = 0;
+ return (ret);
+ }
+
+ /*
+ * Do a SHA1 final, must be done separately since the digest
+ * type can be different than the input data type.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &sha1_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(&sha1_ctx, digest,
+ SHA1_DIGEST_LENGTH, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ digest->cd_length = SHA1_DIGEST_LENGTH;
+ } else {
+ digest->cd_length = 0;
+ }
+
+ return (ret);
+}
+
+/*
+ * KCF software provider mac entry points.
+ *
+ * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
+ *
+ * Init:
+ * The initialization routine initializes what we denote
+ * as the inner and outer contexts by doing
+ * - for inner context: SHA1(key XOR ipad)
+ * - for outer context: SHA1(key XOR opad)
+ *
+ * Update:
+ * Each subsequent SHA1 HMAC update will result in an
+ * update of the inner context with the specified data.
+ *
+ * Final:
+ * The SHA1 HMAC final will do a SHA1 final operation on the
+ * inner context, and the resulting digest will be used
+ * as the data for an update on the outer context. Last
+ * but not least, a SHA1 final on the outer context will
+ * be performed to obtain the SHA1 HMAC digest to return
+ * to the user.
+ */
+
+/*
+ * Initialize a SHA1-HMAC context.
+ */
+static void
+sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
+{
+ uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
+ uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
+ uint_t i;
+
+ bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
+ bzero(opad, SHA1_HMAC_BLOCK_SIZE);
+
+ bcopy(keyval, ipad, length_in_bytes);
+ bcopy(keyval, opad, length_in_bytes);
+
+ /* XOR key with ipad (0x36) and opad (0x5c) */
+ for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
+ ipad[i] ^= 0x36363636;
+ opad[i] ^= 0x5c5c5c5c;
+ }
+
+ /* perform SHA1 on ipad */
+ SHA1Init(&ctx->hc_icontext);
+ SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
+
+ /* perform SHA1 on opad */
+ SHA1Init(&ctx->hc_ocontext);
+ SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
+}
+
+/*
+ */
+static int
+sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
+ mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
+ crypto_kmflag(req));
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
+ sizeof (sha1_hmac_ctx_t));
+ } else {
+ /* no context template, compute context */
+ if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
+ uchar_t digested_key[SHA1_DIGEST_LENGTH];
+ sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
+
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
+ key->ck_data, keylen_in_bytes, digested_key);
+ sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
+ digested_key, SHA1_DIGEST_LENGTH);
+ } else {
+ sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
+ key->ck_data, keylen_in_bytes);
+ }
+ }
+
+ /*
+ * Get the mechanism parameters, if applicable.
+ */
+ PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
+ if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t))
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ PROV_SHA1_GET_DIGEST_LEN(mechanism,
+ PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
+ if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
+ SHA1_DIGEST_LENGTH)
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
+ ctx->cc_provider_private = NULL;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * Do a SHA1 update of the inner context using the specified
+ * data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_update_uio(
+ &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA1_DIGEST_LENGTH];
+ uint32_t digest_len = SHA1_DIGEST_LENGTH;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
+ SHA1_HMAC_GEN_MECH_INFO_TYPE)
+ digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
+ mac->cd_length = digest_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do a SHA1 final on the inner context.
+ */
+ SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
+
+ /*
+ * Do a SHA1 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
+ SHA1_DIGEST_LENGTH);
+
+ /*
+ * Do a SHA1 final on the outer context, storing the computing
+ * digest in the users buffer.
+ */
+ switch (mac->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (digest_len != SHA1_DIGEST_LENGTH) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA1Final(digest,
+ &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
+ bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len);
+ } else {
+ SHA1Final((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset,
+ &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
+ }
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(
+ &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
+ digest_len, digest);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ mac->cd_length = digest_len;
+ } else {
+ mac->cd_length = 0;
+ }
+
+ bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
+ kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
+ ctx->cc_provider_private = NULL;
+
+ return (ret);
+}
+
+#define SHA1_MAC_UPDATE(data, ctx, ret) { \
+ switch (data->cd_format) { \
+ case CRYPTO_DATA_RAW: \
+ SHA1Update(&(ctx).hc_icontext, \
+ (uint8_t *)data->cd_raw.iov_base + \
+ data->cd_offset, data->cd_length); \
+ break; \
+ case CRYPTO_DATA_UIO: \
+ ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
+ break; \
+ default: \
+ ret = CRYPTO_ARGUMENTS_BAD; \
+ } \
+}
+
+/* ARGSUSED */
+static int
+sha1_mac_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA1_DIGEST_LENGTH];
+ sha1_hmac_ctx_t sha1_hmac_ctx;
+ uint32_t digest_len = SHA1_DIGEST_LENGTH;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
+ mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ } else {
+ /* no context template, initialize context */
+ if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
+ key->ck_data, keylen_in_bytes, digest);
+ sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
+ SHA1_DIGEST_LENGTH);
+ } else {
+ sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
+ keylen_in_bytes);
+ }
+ }
+
+ /* get the mechanism parameters, if applicable */
+ if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t)) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
+ if (digest_len > SHA1_DIGEST_LENGTH) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ }
+
+ /* do a SHA1 update of the inner context using the specified data */
+ SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
+ if (ret != CRYPTO_SUCCESS)
+ /* the update failed, free context and bail */
+ goto bail;
+
+ /*
+ * Do a SHA1 final on the inner context.
+ */
+ SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
+
+ /*
+ * Do an SHA1 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
+
+ /*
+ * Do a SHA1 final on the outer context, storing the computed
+ * digest in the users buffer.
+ */
+ switch (mac->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (digest_len != SHA1_DIGEST_LENGTH) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
+ bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len);
+ } else {
+ SHA1Final((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
+ }
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
+ digest_len, digest);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ mac->cd_length = digest_len;
+ } else {
+ mac->cd_length = 0;
+ }
+ /* Extra paranoia: zeroize the context on the stack */
+ bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+
+ return (ret);
+bail:
+ bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ mac->cd_length = 0;
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha1_mac_verify_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA1_DIGEST_LENGTH];
+ sha1_hmac_ctx_t sha1_hmac_ctx;
+ uint32_t digest_len = SHA1_DIGEST_LENGTH;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
+ mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
+ return (CRYPTO_MECHANISM_INVALID);
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ } else {
+ /* no context template, initialize context */
+ if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
+ key->ck_data, keylen_in_bytes, digest);
+ sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
+ SHA1_DIGEST_LENGTH);
+ } else {
+ sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
+ keylen_in_bytes);
+ }
+ }
+
+ /* get the mechanism parameters, if applicable */
+ if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t)) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
+ if (digest_len > SHA1_DIGEST_LENGTH) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ }
+
+ if (mac->cd_length != digest_len) {
+ ret = CRYPTO_INVALID_MAC;
+ goto bail;
+ }
+
+ /* do a SHA1 update of the inner context using the specified data */
+ SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
+ if (ret != CRYPTO_SUCCESS)
+ /* the update failed, free context and bail */
+ goto bail;
+
+ /* do a SHA1 final on the inner context */
+ SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
+
+ /*
+ * Do an SHA1 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
+
+ /*
+ * Do a SHA1 final on the outer context, storing the computed
+ * digest in the users buffer.
+ */
+ SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
+
+ /*
+ * Compare the computed digest against the expected digest passed
+ * as argument.
+ */
+
+ switch (mac->cd_format) {
+
+ case CRYPTO_DATA_RAW:
+ if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len) != 0)
+ ret = CRYPTO_INVALID_MAC;
+ break;
+
+ case CRYPTO_DATA_UIO: {
+ off_t offset = mac->cd_offset;
+ uint_t vec_idx;
+ off_t scratch_offset = 0;
+ size_t length = digest_len;
+ size_t cur_len;
+
+ /* we support only kernel buffer */
+ if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* jump to the first iovec containing the expected digest */
+ for (vec_idx = 0;
+ offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
+ vec_idx < mac->cd_uio->uio_iovcnt;
+ offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == mac->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is
+ * larger than the total size of the buffers
+ * it provided.
+ */
+ ret = CRYPTO_DATA_LEN_RANGE;
+ break;
+ }
+
+ /* do the comparison of computed digest vs specified one */
+ while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ if (bcmp(digest + scratch_offset,
+ mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len) != 0) {
+ ret = CRYPTO_INVALID_MAC;
+ break;
+ }
+
+ length -= cur_len;
+ vec_idx++;
+ scratch_offset += cur_len;
+ offset = 0;
+ }
+ break;
+ }
+
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ return (ret);
+bail:
+ bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
+ mac->cd_length = 0;
+ return (ret);
+}
+
+/*
+ * KCF software provider context management entry points.
+ */
+
+/* ARGSUSED */
+static int
+sha1_create_ctx_template(crypto_provider_handle_t provider,
+ crypto_mechanism_t *mechanism, crypto_key_t *key,
+ crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
+ crypto_req_handle_t req)
+{
+ sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
+ (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Allocate and initialize SHA1 context.
+ */
+ sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
+ crypto_kmflag(req));
+ if (sha1_hmac_ctx_tmpl == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
+ uchar_t digested_key[SHA1_DIGEST_LENGTH];
+
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
+ key->ck_data, keylen_in_bytes, digested_key);
+ sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
+ SHA1_DIGEST_LENGTH);
+ } else {
+ sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
+ keylen_in_bytes);
+ }
+
+ sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
+ *ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
+ *ctx_template_size = sizeof (sha1_hmac_ctx_t);
+
+
+ return (CRYPTO_SUCCESS);
+}
+
+static int
+sha1_free_context(crypto_ctx_t *ctx)
+{
+ uint_t ctx_len;
+ sha1_mech_type_t mech_type;
+
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_SUCCESS);
+
+ /*
+ * We have to free either SHA1 or SHA1-HMAC contexts, which
+ * have different lengths.
+ */
+
+ mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
+ if (mech_type == SHA1_MECH_INFO_TYPE)
+ ctx_len = sizeof (sha1_ctx_t);
+ else {
+ ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
+ mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
+ ctx_len = sizeof (sha1_hmac_ctx_t);
+ }
+
+ bzero(ctx->cc_provider_private, ctx_len);
+ kmem_free(ctx->cc_provider_private, ctx_len);
+ ctx->cc_provider_private = NULL;
+
+ return (CRYPTO_SUCCESS);
+}
diff --git a/module/icp/io/sha2_mod.c b/module/icp/io/sha2_mod.c
new file mode 100644
index 000000000..4466fcff0
--- /dev/null
+++ b/module/icp/io/sha2_mod.c
@@ -0,0 +1,1307 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/modctl.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/icp.h>
+#define _SHA2_IMPL
+#include <sha2/sha2.h>
+#include <sha2/sha2_impl.h>
+
+/*
+ * The sha2 module is created with two modlinkages:
+ * - a modlmisc that allows consumers to directly call the entry points
+ * SHA2Init, SHA2Update, and SHA2Final.
+ * - a modlcrypto that allows the module to register with the Kernel
+ * Cryptographic Framework (KCF) as a software provider for the SHA2
+ * mechanisms.
+ */
+
+static struct modlcrypto modlcrypto = {
+ &mod_cryptoops,
+ "SHA2 Kernel SW Provider"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, {&modlcrypto, NULL}
+};
+
+/*
+ * Macros to access the SHA2 or SHA2-HMAC contexts from a context passed
+ * by KCF to one of the entry points.
+ */
+
+#define PROV_SHA2_CTX(ctx) ((sha2_ctx_t *)(ctx)->cc_provider_private)
+#define PROV_SHA2_HMAC_CTX(ctx) ((sha2_hmac_ctx_t *)(ctx)->cc_provider_private)
+
+/* to extract the digest length passed as mechanism parameter */
+#define PROV_SHA2_GET_DIGEST_LEN(m, len) { \
+ if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \
+ (len) = (uint32_t)*((ulong_t *)(m)->cm_param); \
+ else { \
+ ulong_t tmp_ulong; \
+ bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \
+ (len) = (uint32_t)tmp_ulong; \
+ } \
+}
+
+#define PROV_SHA2_DIGEST_KEY(mech, ctx, key, len, digest) { \
+ SHA2Init(mech, ctx); \
+ SHA2Update(ctx, key, len); \
+ SHA2Final(digest, ctx); \
+}
+
+/*
+ * Mechanism info structure passed to KCF during registration.
+ */
+static crypto_mech_info_t sha2_mech_info_tab[] = {
+ /* SHA256 */
+ {SUN_CKM_SHA256, SHA256_MECH_INFO_TYPE,
+ CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
+ 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
+ /* SHA256-HMAC */
+ {SUN_CKM_SHA256_HMAC, SHA256_HMAC_MECH_INFO_TYPE,
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
+ SHA2_HMAC_MIN_KEY_LEN, SHA2_HMAC_MAX_KEY_LEN,
+ CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* SHA256-HMAC GENERAL */
+ {SUN_CKM_SHA256_HMAC_GENERAL, SHA256_HMAC_GEN_MECH_INFO_TYPE,
+ CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
+ SHA2_HMAC_MIN_KEY_LEN, SHA2_HMAC_MAX_KEY_LEN,
+ CRYPTO_KEYSIZE_UNIT_IN_BYTES}
+};
+
+static void sha2_provider_status(crypto_provider_handle_t, uint_t *);
+
+static crypto_control_ops_t sha2_control_ops = {
+ sha2_provider_status
+};
+
+static int sha2_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_req_handle_t);
+static int sha2_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha2_digest_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha2_digest_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha2_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+
+static crypto_digest_ops_t sha2_digest_ops = {
+ sha2_digest_init,
+ sha2_digest,
+ sha2_digest_update,
+ NULL,
+ sha2_digest_final,
+ sha2_digest_atomic
+};
+
+static int sha2_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int sha2_mac_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int sha2_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
+static int sha2_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int sha2_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_mac_ops_t sha2_mac_ops = {
+ sha2_mac_init,
+ NULL,
+ sha2_mac_update,
+ sha2_mac_final,
+ sha2_mac_atomic,
+ sha2_mac_verify_atomic
+};
+
+static int sha2_create_ctx_template(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
+ size_t *, crypto_req_handle_t);
+static int sha2_free_context(crypto_ctx_t *);
+
+static crypto_ctx_ops_t sha2_ctx_ops = {
+ sha2_create_ctx_template,
+ sha2_free_context
+};
+
+static crypto_ops_t sha2_crypto_ops = {{{{{
+ &sha2_control_ops,
+ &sha2_digest_ops,
+ NULL,
+ &sha2_mac_ops,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ &sha2_ctx_ops
+}}}}};
+
+static crypto_provider_info_t sha2_prov_info = {{{{
+ CRYPTO_SPI_VERSION_1,
+ "SHA2 Software Provider",
+ CRYPTO_SW_PROVIDER,
+ NULL,
+ &sha2_crypto_ops,
+ sizeof (sha2_mech_info_tab)/sizeof (crypto_mech_info_t),
+ sha2_mech_info_tab
+}}}};
+
+static crypto_kcf_provider_handle_t sha2_prov_handle = 0;
+
+int
+sha2_mod_init(void)
+{
+ int ret;
+
+ if ((ret = mod_install(&modlinkage)) != 0)
+ return (ret);
+
+ /*
+ * Register with KCF. If the registration fails, log an
+ * error but do not uninstall the module, since the functionality
+ * provided by misc/sha2 should still be available.
+ */
+ if ((ret = crypto_register_provider(&sha2_prov_info,
+ &sha2_prov_handle)) != CRYPTO_SUCCESS)
+ cmn_err(CE_WARN, "sha2 _init: "
+ "crypto_register_provider() failed (0x%x)", ret);
+
+ return (0);
+}
+
+int
+sha2_mod_fini(void)
+{
+ int ret;
+
+ if (sha2_prov_handle != 0) {
+ if ((ret = crypto_unregister_provider(sha2_prov_handle)) !=
+ CRYPTO_SUCCESS) {
+ cmn_err(CE_WARN,
+ "sha2 _fini: crypto_unregister_provider() "
+ "failed (0x%x)", ret);
+ return (EBUSY);
+ }
+ sha2_prov_handle = 0;
+ }
+
+ return (mod_remove(&modlinkage));
+}
+
+/*
+ * KCF software provider control entry points.
+ */
+/* ARGSUSED */
+static void
+sha2_provider_status(crypto_provider_handle_t provider, uint_t *status)
+{
+ *status = CRYPTO_PROVIDER_READY;
+}
+
+/*
+ * KCF software provider digest entry points.
+ */
+
+static int
+sha2_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_req_handle_t req)
+{
+
+ /*
+ * Allocate and initialize SHA2 context.
+ */
+ ctx->cc_provider_private = kmem_alloc(sizeof (sha2_ctx_t),
+ crypto_kmflag(req));
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ PROV_SHA2_CTX(ctx)->sc_mech_type = mechanism->cm_type;
+ SHA2Init(mechanism->cm_type, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx);
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Helper SHA2 digest update function for uio data.
+ */
+static int
+sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data)
+{
+ off_t offset = data->cd_offset;
+ size_t length = data->cd_length;
+ uint_t vec_idx;
+ size_t cur_len;
+
+ /* we support only kernel buffer */
+ if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Jump to the first iovec containing data to be
+ * digested.
+ */
+ for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
+ offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
+ offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == data->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than the
+ * total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now do the digesting on the iovecs.
+ */
+ while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ SHA2Update(sha2_ctx, (uint8_t *)data->cd_uio->
+ uio_iov[vec_idx].iov_base + offset, cur_len);
+ length -= cur_len;
+ vec_idx++;
+ offset = 0;
+ }
+
+ if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed, i.e.
+ * The caller requested to digest more data than it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Helper SHA2 digest final function for uio data.
+ * digest_len is the length of the desired digest. If digest_len
+ * is smaller than the default SHA2 digest length, the caller
+ * must pass a scratch buffer, digest_scratch, which must
+ * be at least the algorithm's digest length bytes.
+ */
+static int
+sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
+ ulong_t digest_len, uchar_t *digest_scratch)
+{
+ off_t offset = digest->cd_offset;
+ uint_t vec_idx;
+
+ /* we support only kernel buffer */
+ if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Jump to the first iovec containing ptr to the digest to
+ * be returned.
+ */
+ for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
+ vec_idx < digest->cd_uio->uio_iovcnt;
+ offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == digest->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is
+ * larger than the total size of the buffers
+ * it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ if (offset + digest_len <=
+ digest->cd_uio->uio_iov[vec_idx].iov_len) {
+ /*
+ * The computed SHA2 digest will fit in the current
+ * iovec.
+ */
+ if (((sha2_ctx->algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) &&
+ (digest_len != SHA256_DIGEST_LENGTH))) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA2Final(digest_scratch, sha2_ctx);
+
+ bcopy(digest_scratch, (uchar_t *)digest->
+ cd_uio->uio_iov[vec_idx].iov_base + offset,
+ digest_len);
+ } else {
+ SHA2Final((uchar_t *)digest->
+ cd_uio->uio_iov[vec_idx].iov_base + offset,
+ sha2_ctx);
+
+ }
+ } else {
+ /*
+ * The computed digest will be crossing one or more iovec's.
+ * This is bad performance-wise but we need to support it.
+ * Allocate a small scratch buffer on the stack and
+ * copy it piece meal to the specified digest iovec's.
+ */
+ uchar_t digest_tmp[SHA256_DIGEST_LENGTH];
+ off_t scratch_offset = 0;
+ size_t length = digest_len;
+ size_t cur_len;
+
+ SHA2Final(digest_tmp, sha2_ctx);
+
+ while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
+ cur_len =
+ MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+ bcopy(digest_tmp + scratch_offset,
+ digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len);
+
+ length -= cur_len;
+ vec_idx++;
+ scratch_offset += cur_len;
+ offset = 0;
+ }
+
+ if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed, i.e.
+ * The caller requested to digest more data than it
+ * provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+sha2_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uint_t sha_digest_len;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ switch (PROV_SHA2_CTX(ctx)->sc_mech_type) {
+ case SHA256_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((digest->cd_length == 0) ||
+ (digest->cd_length < sha_digest_len)) {
+ digest->cd_length = sha_digest_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do the SHA2 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Update(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_update_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ /* the update failed, free context and bail */
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t));
+ ctx->cc_provider_private = NULL;
+ digest->cd_length = 0;
+ return (ret);
+ }
+
+ /*
+ * Do a SHA2 final, must be done separately since the digest
+ * type can be different than the input data type.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ digest, sha_digest_len, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /* all done, free context and return */
+
+ if (ret == CRYPTO_SUCCESS)
+ digest->cd_length = sha_digest_len;
+ else
+ digest->cd_length = 0;
+
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t));
+ ctx->cc_provider_private = NULL;
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * Do the SHA2 update on the specified input data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Update(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_update_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uint_t sha_digest_len;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ switch (PROV_SHA2_CTX(ctx)->sc_mech_type) {
+ case SHA256_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((digest->cd_length == 0) ||
+ (digest->cd_length < sha_digest_len)) {
+ digest->cd_length = sha_digest_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do a SHA2 final.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx,
+ digest, sha_digest_len, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /* all done, free context and return */
+
+ if (ret == CRYPTO_SUCCESS)
+ digest->cd_length = sha_digest_len;
+ else
+ digest->cd_length = 0;
+
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t));
+ ctx->cc_provider_private = NULL;
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_digest_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_data_t *data, crypto_data_t *digest,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ SHA2_CTX sha2_ctx;
+ uint32_t sha_digest_len;
+
+ /*
+ * Do the SHA inits.
+ */
+
+ SHA2Init(mechanism->cm_type, &sha2_ctx);
+
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Update(&sha2_ctx, (uint8_t *)data->
+ cd_raw.iov_base + data->cd_offset, data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_update_uio(&sha2_ctx, data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ /*
+ * Do the SHA updates on the specified input data.
+ */
+
+ if (ret != CRYPTO_SUCCESS) {
+ /* the update failed, bail */
+ digest->cd_length = 0;
+ return (ret);
+ }
+
+ if (mechanism->cm_type <= SHA256_HMAC_GEN_MECH_INFO_TYPE)
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+
+ /*
+ * Do a SHA2 final, must be done separately since the digest
+ * type can be different than the input data type.
+ */
+ switch (digest->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Final((unsigned char *)digest->cd_raw.iov_base +
+ digest->cd_offset, &sha2_ctx);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(&sha2_ctx, digest,
+ sha_digest_len, NULL);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS)
+ digest->cd_length = sha_digest_len;
+ else
+ digest->cd_length = 0;
+
+ return (ret);
+}
+
+/*
+ * KCF software provider mac entry points.
+ *
+ * SHA2 HMAC is: SHA2(key XOR opad, SHA2(key XOR ipad, text))
+ *
+ * Init:
+ * The initialization routine initializes what we denote
+ * as the inner and outer contexts by doing
+ * - for inner context: SHA2(key XOR ipad)
+ * - for outer context: SHA2(key XOR opad)
+ *
+ * Update:
+ * Each subsequent SHA2 HMAC update will result in an
+ * update of the inner context with the specified data.
+ *
+ * Final:
+ * The SHA2 HMAC final will do a SHA2 final operation on the
+ * inner context, and the resulting digest will be used
+ * as the data for an update on the outer context. Last
+ * but not least, a SHA2 final on the outer context will
+ * be performed to obtain the SHA2 HMAC digest to return
+ * to the user.
+ */
+
+/*
+ * Initialize a SHA2-HMAC context.
+ */
+static void
+sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
+{
+ uint64_t ipad[SHA256_HMAC_BLOCK_SIZE / sizeof (uint64_t)];
+ uint64_t opad[SHA256_HMAC_BLOCK_SIZE / sizeof (uint64_t)];
+ int i, block_size, blocks_per_int64;
+
+ /* Determine the block size */
+ if (ctx->hc_mech_type <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ block_size = SHA256_HMAC_BLOCK_SIZE;
+ blocks_per_int64 = SHA256_HMAC_BLOCK_SIZE / sizeof (uint64_t);
+ }
+
+ (void) bzero(ipad, block_size);
+ (void) bzero(opad, block_size);
+ (void) bcopy(keyval, ipad, length_in_bytes);
+ (void) bcopy(keyval, opad, length_in_bytes);
+
+ /* XOR key with ipad (0x36) and opad (0x5c) */
+ for (i = 0; i < blocks_per_int64; i ++) {
+ ipad[i] ^= 0x3636363636363636;
+ opad[i] ^= 0x5c5c5c5c5c5c5c5c;
+ }
+
+ /* perform SHA2 on ipad */
+ SHA2Init(ctx->hc_mech_type, &ctx->hc_icontext);
+ SHA2Update(&ctx->hc_icontext, (uint8_t *)ipad, block_size);
+
+ /* perform SHA2 on opad */
+ SHA2Init(ctx->hc_mech_type, &ctx->hc_ocontext);
+ SHA2Update(&ctx->hc_ocontext, (uint8_t *)opad, block_size);
+
+}
+
+/*
+ */
+static int
+sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+ uint_t sha_digest_len, sha_hmac_block_size;
+
+ /*
+ * Set the digest length and block size to values approriate to the
+ * mechanism
+ */
+ switch (mechanism->cm_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ ctx->cc_provider_private = kmem_alloc(sizeof (sha2_hmac_ctx_t),
+ crypto_kmflag(req));
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, PROV_SHA2_HMAC_CTX(ctx),
+ sizeof (sha2_hmac_ctx_t));
+ } else {
+ /* no context template, compute context */
+ if (keylen_in_bytes > sha_hmac_block_size) {
+ uchar_t digested_key[SHA256_DIGEST_LENGTH];
+ sha2_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
+
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
+ &hmac_ctx->hc_icontext,
+ key->ck_data, keylen_in_bytes, digested_key);
+ sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx),
+ digested_key, sha_digest_len);
+ } else {
+ sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx),
+ key->ck_data, keylen_in_bytes);
+ }
+ }
+
+ /*
+ * Get the mechanism parameters, if applicable.
+ */
+ if (mechanism->cm_type % 3 == 2) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t))
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ PROV_SHA2_GET_DIGEST_LEN(mechanism,
+ PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len);
+ if (PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len > sha_digest_len)
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+
+ if (ret != CRYPTO_SUCCESS) {
+ bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ ctx->cc_provider_private = NULL;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_mac_update(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /*
+ * Do a SHA2 update of the inner context using the specified
+ * data.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_icontext,
+ (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
+ data->cd_length);
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_update_uio(
+ &PROV_SHA2_HMAC_CTX(ctx)->hc_icontext, data);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA256_DIGEST_LENGTH];
+ uint32_t digest_len = 0, sha_digest_len = 0;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+
+ /* Set the digest lengths to values approriate to the mechanism */
+ switch (PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ sha_digest_len = digest_len = SHA256_DIGEST_LENGTH;
+ break;
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ digest_len = PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * We need to just return the length needed to store the output.
+ * We should not destroy the context for the following cases.
+ */
+ if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
+ mac->cd_length = digest_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ /*
+ * Do a SHA2 final on the inner context.
+ */
+ SHA2Final(digest, &PROV_SHA2_HMAC_CTX(ctx)->hc_icontext);
+
+ /*
+ * Do a SHA2 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, digest,
+ sha_digest_len);
+
+ /*
+ * Do a SHA2 final on the outer context, storing the computing
+ * digest in the users buffer.
+ */
+ switch (mac->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (digest_len != sha_digest_len) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA2Final(digest,
+ &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
+ bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len);
+ } else {
+ SHA2Final((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset,
+ &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
+ }
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(
+ &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, mac,
+ digest_len, digest);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS)
+ mac->cd_length = digest_len;
+ else
+ mac->cd_length = 0;
+
+ bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ ctx->cc_provider_private = NULL;
+
+ return (ret);
+}
+
+#define SHA2_MAC_UPDATE(data, ctx, ret) { \
+ switch (data->cd_format) { \
+ case CRYPTO_DATA_RAW: \
+ SHA2Update(&(ctx).hc_icontext, \
+ (uint8_t *)data->cd_raw.iov_base + \
+ data->cd_offset, data->cd_length); \
+ break; \
+ case CRYPTO_DATA_UIO: \
+ ret = sha2_digest_update_uio(&(ctx).hc_icontext, data); \
+ break; \
+ default: \
+ ret = CRYPTO_ARGUMENTS_BAD; \
+ } \
+}
+
+/* ARGSUSED */
+static int
+sha2_mac_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA256_DIGEST_LENGTH];
+ sha2_hmac_ctx_t sha2_hmac_ctx;
+ uint32_t sha_digest_len, digest_len, sha_hmac_block_size;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ /*
+ * Set the digest length and block size to values appropriate to the
+ * mechanism
+ */
+ switch (mechanism->cm_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = digest_len = SHA256_DIGEST_LENGTH;
+ sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ } else {
+ sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
+ /* no context template, initialize context */
+ if (keylen_in_bytes > sha_hmac_block_size) {
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
+ &sha2_hmac_ctx.hc_icontext,
+ key->ck_data, keylen_in_bytes, digest);
+ sha2_mac_init_ctx(&sha2_hmac_ctx, digest,
+ sha_digest_len);
+ } else {
+ sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data,
+ keylen_in_bytes);
+ }
+ }
+
+ /* get the mechanism parameters, if applicable */
+ if ((mechanism->cm_type % 3) == 2) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t)) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ PROV_SHA2_GET_DIGEST_LEN(mechanism, digest_len);
+ if (digest_len > sha_digest_len) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ }
+
+ /* do a SHA2 update of the inner context using the specified data */
+ SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret);
+ if (ret != CRYPTO_SUCCESS)
+ /* the update failed, free context and bail */
+ goto bail;
+
+ /*
+ * Do a SHA2 final on the inner context.
+ */
+ SHA2Final(digest, &sha2_hmac_ctx.hc_icontext);
+
+ /*
+ * Do an SHA2 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
+
+ /*
+ * Do a SHA2 final on the outer context, storing the computed
+ * digest in the users buffer.
+ */
+ switch (mac->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (digest_len != sha_digest_len) {
+ /*
+ * The caller requested a short digest. Digest
+ * into a scratch buffer and return to
+ * the user only what was requested.
+ */
+ SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
+ bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len);
+ } else {
+ SHA2Final((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, &sha2_hmac_ctx.hc_ocontext);
+ }
+ break;
+ case CRYPTO_DATA_UIO:
+ ret = sha2_digest_final_uio(&sha2_hmac_ctx.hc_ocontext, mac,
+ digest_len, digest);
+ break;
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ if (ret == CRYPTO_SUCCESS) {
+ mac->cd_length = digest_len;
+ return (CRYPTO_SUCCESS);
+ }
+bail:
+ bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ mac->cd_length = 0;
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+sha2_mac_verify_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int ret = CRYPTO_SUCCESS;
+ uchar_t digest[SHA256_DIGEST_LENGTH];
+ sha2_hmac_ctx_t sha2_hmac_ctx;
+ uint32_t sha_digest_len, digest_len, sha_hmac_block_size;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+
+ /*
+ * Set the digest length and block size to values appropriate to the
+ * mechanism
+ */
+ switch (mechanism->cm_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = digest_len = SHA256_DIGEST_LENGTH;
+ sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (ctx_template != NULL) {
+ /* reuse context template */
+ bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ } else {
+ sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
+ /* no context template, initialize context */
+ if (keylen_in_bytes > sha_hmac_block_size) {
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
+ &sha2_hmac_ctx.hc_icontext,
+ key->ck_data, keylen_in_bytes, digest);
+ sha2_mac_init_ctx(&sha2_hmac_ctx, digest,
+ sha_digest_len);
+ } else {
+ sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data,
+ keylen_in_bytes);
+ }
+ }
+
+ /* get the mechanism parameters, if applicable */
+ if (mechanism->cm_type % 3 == 2) {
+ if (mechanism->cm_param == NULL ||
+ mechanism->cm_param_len != sizeof (ulong_t)) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ PROV_SHA2_GET_DIGEST_LEN(mechanism, digest_len);
+ if (digest_len > sha_digest_len) {
+ ret = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto bail;
+ }
+ }
+
+ if (mac->cd_length != digest_len) {
+ ret = CRYPTO_INVALID_MAC;
+ goto bail;
+ }
+
+ /* do a SHA2 update of the inner context using the specified data */
+ SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret);
+ if (ret != CRYPTO_SUCCESS)
+ /* the update failed, free context and bail */
+ goto bail;
+
+ /* do a SHA2 final on the inner context */
+ SHA2Final(digest, &sha2_hmac_ctx.hc_icontext);
+
+ /*
+ * Do an SHA2 update on the outer context, feeding the inner
+ * digest as data.
+ */
+ SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
+
+ /*
+ * Do a SHA2 final on the outer context, storing the computed
+ * digest in the users buffer.
+ */
+ SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
+
+ /*
+ * Compare the computed digest against the expected digest passed
+ * as argument.
+ */
+
+ switch (mac->cd_format) {
+
+ case CRYPTO_DATA_RAW:
+ if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest_len) != 0)
+ ret = CRYPTO_INVALID_MAC;
+ break;
+
+ case CRYPTO_DATA_UIO: {
+ off_t offset = mac->cd_offset;
+ uint_t vec_idx;
+ off_t scratch_offset = 0;
+ size_t length = digest_len;
+ size_t cur_len;
+
+ /* we support only kernel buffer */
+ if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* jump to the first iovec containing the expected digest */
+ for (vec_idx = 0;
+ offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
+ vec_idx < mac->cd_uio->uio_iovcnt;
+ offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
+ ;
+ if (vec_idx == mac->cd_uio->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is
+ * larger than the total size of the buffers
+ * it provided.
+ */
+ ret = CRYPTO_DATA_LEN_RANGE;
+ break;
+ }
+
+ /* do the comparison of computed digest vs specified one */
+ while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
+ cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
+ offset, length);
+
+ if (bcmp(digest + scratch_offset,
+ mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
+ cur_len) != 0) {
+ ret = CRYPTO_INVALID_MAC;
+ break;
+ }
+
+ length -= cur_len;
+ vec_idx++;
+ scratch_offset += cur_len;
+ offset = 0;
+ }
+ break;
+ }
+
+ default:
+ ret = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (ret);
+bail:
+ bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ mac->cd_length = 0;
+ return (ret);
+}
+
+/*
+ * KCF software provider context management entry points.
+ */
+
+/* ARGSUSED */
+static int
+sha2_create_ctx_template(crypto_provider_handle_t provider,
+ crypto_mechanism_t *mechanism, crypto_key_t *key,
+ crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
+ crypto_req_handle_t req)
+{
+ sha2_hmac_ctx_t *sha2_hmac_ctx_tmpl;
+ uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
+ uint32_t sha_digest_len, sha_hmac_block_size;
+
+ /*
+ * Set the digest length and block size to values appropriate to the
+ * mechanism
+ */
+ switch (mechanism->cm_type) {
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ sha_digest_len = SHA256_DIGEST_LENGTH;
+ sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE;
+ break;
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+
+ /* Add support for key by attributes (RFE 4706552) */
+ if (key->ck_format != CRYPTO_KEY_RAW)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Allocate and initialize SHA2 context.
+ */
+ sha2_hmac_ctx_tmpl = kmem_alloc(sizeof (sha2_hmac_ctx_t),
+ crypto_kmflag(req));
+ if (sha2_hmac_ctx_tmpl == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ sha2_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
+
+ if (keylen_in_bytes > sha_hmac_block_size) {
+ uchar_t digested_key[SHA256_DIGEST_LENGTH];
+
+ /*
+ * Hash the passed-in key to get a smaller key.
+ * The inner context is used since it hasn't been
+ * initialized yet.
+ */
+ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
+ &sha2_hmac_ctx_tmpl->hc_icontext,
+ key->ck_data, keylen_in_bytes, digested_key);
+ sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, digested_key,
+ sha_digest_len);
+ } else {
+ sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, key->ck_data,
+ keylen_in_bytes);
+ }
+
+ *ctx_template = (crypto_spi_ctx_template_t)sha2_hmac_ctx_tmpl;
+ *ctx_template_size = sizeof (sha2_hmac_ctx_t);
+
+ return (CRYPTO_SUCCESS);
+}
+
+static int
+sha2_free_context(crypto_ctx_t *ctx)
+{
+ uint_t ctx_len;
+
+ if (ctx->cc_provider_private == NULL)
+ return (CRYPTO_SUCCESS);
+
+ /*
+ * We have to free either SHA2 or SHA2-HMAC contexts, which
+ * have different lengths.
+ *
+ * Note: Below is dependent on the mechanism ordering.
+ */
+
+ if (PROV_SHA2_CTX(ctx)->sc_mech_type % 3 == 0)
+ ctx_len = sizeof (sha2_ctx_t);
+ else
+ ctx_len = sizeof (sha2_hmac_ctx_t);
+
+ bzero(ctx->cc_provider_private, ctx_len);
+ kmem_free(ctx->cc_provider_private, ctx_len);
+ ctx->cc_provider_private = NULL;
+
+ return (CRYPTO_SUCCESS);
+}
diff --git a/module/icp/os/modconf.c b/module/icp/os/modconf.c
new file mode 100644
index 000000000..e0cd7f4ad
--- /dev/null
+++ b/module/icp/os/modconf.c
@@ -0,0 +1,171 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/modctl.h>
+
+/*
+ * Null operations; used for uninitialized and "misc" modules.
+ */
+static int mod_null(struct modlmisc *, struct modlinkage *);
+static int mod_infonull(void *, struct modlinkage *, int *);
+
+/*
+ * Cryptographic Modules
+ */
+struct mod_ops mod_cryptoops = {
+ mod_null, mod_null, mod_infonull
+};
+
+/*
+ * Null operation; return 0.
+ */
+static int
+mod_null(struct modlmisc *modl, struct modlinkage *modlp)
+{
+ return (0);
+}
+
+/*
+ * Status for User modules.
+ */
+static int
+mod_infonull(void *modl, struct modlinkage *modlp, int *p0)
+{
+ *p0 = -1; /* for modinfo display */
+ return (0);
+}
+
+/*
+ * Install a module.
+ * (This routine is in the Solaris SPARC DDI/DKI)
+ */
+int
+mod_install(struct modlinkage *modlp)
+{
+ int retval = -1; /* No linkage structures */
+ struct modlmisc **linkpp;
+ struct modlmisc **linkpp1;
+
+ if (modlp->ml_rev != MODREV_1) {
+ cmn_err(CE_WARN, "mod_install: "
+ "modlinkage structure is not MODREV_1\n");
+ return (EINVAL);
+ }
+ linkpp = (struct modlmisc **)&modlp->ml_linkage[0];
+
+ while (*linkpp != NULL) {
+ if ((retval = MODL_INSTALL(*linkpp, modlp)) != 0) {
+ linkpp1 = (struct modlmisc **)&modlp->ml_linkage[0];
+
+ while (linkpp1 != linkpp) {
+ MODL_REMOVE(*linkpp1, modlp); /* clean up */
+ linkpp1++;
+ }
+ break;
+ }
+ linkpp++;
+ }
+ return (retval);
+}
+
+static char *reins_err =
+ "Could not reinstall %s\nReboot to correct the problem";
+
+/*
+ * Remove a module. This is called by the module wrapper routine.
+ * (This routine is in the Solaris SPARC DDI/DKI)
+ */
+int
+mod_remove(struct modlinkage *modlp)
+{
+ int retval = 0;
+ struct modlmisc **linkpp, *last_linkp;
+
+ linkpp = (struct modlmisc **)&modlp->ml_linkage[0];
+
+ while (*linkpp != NULL) {
+ if ((retval = MODL_REMOVE(*linkpp, modlp)) != 0) {
+ last_linkp = *linkpp;
+ linkpp = (struct modlmisc **)&modlp->ml_linkage[0];
+ while (*linkpp != last_linkp) {
+ if (MODL_INSTALL(*linkpp, modlp) != 0) {
+ cmn_err(CE_WARN, reins_err,
+ (*linkpp)->misc_linkinfo);
+ break;
+ }
+ linkpp++;
+ }
+ break;
+ }
+ linkpp++;
+ }
+ return (retval);
+}
+
+/*
+ * Get module status.
+ * (This routine is in the Solaris SPARC DDI/DKI)
+ */
+int
+mod_info(struct modlinkage *modlp, struct modinfo *modinfop)
+{
+ int i;
+ int retval = 0;
+ struct modspecific_info *msip;
+ struct modlmisc **linkpp;
+
+ modinfop->mi_rev = modlp->ml_rev;
+
+ linkpp = (struct modlmisc **)modlp->ml_linkage;
+ msip = &modinfop->mi_msinfo[0];
+
+ for (i = 0; i < MODMAXLINK; i++) {
+ if (*linkpp == NULL) {
+ msip->msi_linkinfo[0] = '\0';
+ } else {
+ (void) strncpy(msip->msi_linkinfo,
+ (*linkpp)->misc_linkinfo, MODMAXLINKINFOLEN);
+ retval = MODL_INFO(*linkpp, modlp, &msip->msi_p0);
+ if (retval != 0)
+ break;
+ linkpp++;
+ }
+ msip++;
+ }
+
+ if (modinfop->mi_info == MI_INFO_LINKAGE) {
+ /*
+ * Slight kludge used to extract the address of the
+ * modlinkage structure from the module (just after
+ * loading a module for the very first time)
+ */
+ modinfop->mi_base = (void *)modlp;
+ }
+
+ if (retval == 0)
+ return (1);
+ return (0);
+} \ No newline at end of file
diff --git a/module/icp/os/modhash.c b/module/icp/os/modhash.c
new file mode 100644
index 000000000..1ff782afc
--- /dev/null
+++ b/module/icp/os/modhash.c
@@ -0,0 +1,925 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * mod_hash: flexible hash table implementation.
+ *
+ * This is a reasonably fast, reasonably flexible hash table implementation
+ * which features pluggable hash algorithms to support storing arbitrary keys
+ * and values. It is designed to handle small (< 100,000 items) amounts of
+ * data. The hash uses chaining to resolve collisions, and does not feature a
+ * mechanism to grow the hash. Care must be taken to pick nchains to be large
+ * enough for the application at hand, or lots of time will be wasted searching
+ * hash chains.
+ *
+ * The client of the hash is required to supply a number of items to support
+ * the various hash functions:
+ *
+ * - Destructor functions for the key and value being hashed.
+ * A destructor is responsible for freeing an object when the hash
+ * table is no longer storing it. Since keys and values can be of
+ * arbitrary type, separate destructors for keys & values are used.
+ * These may be mod_hash_null_keydtor and mod_hash_null_valdtor if no
+ * destructor is needed for either a key or value.
+ *
+ * - A hashing algorithm which returns a uint_t representing a hash index
+ * The number returned need _not_ be between 0 and nchains. The mod_hash
+ * code will take care of doing that. The second argument (after the
+ * key) to the hashing function is a void * that represents
+ * hash_alg_data-- this is provided so that the hashing algrorithm can
+ * maintain some state across calls, or keep algorithm-specific
+ * constants associated with the hash table.
+ *
+ * A pointer-hashing and a string-hashing algorithm are supplied in
+ * this file.
+ *
+ * - A key comparator (a la qsort).
+ * This is used when searching the hash chain. The key comparator
+ * determines if two keys match. It should follow the return value
+ * semantics of strcmp.
+ *
+ * string and pointer comparators are supplied in this file.
+ *
+ * mod_hash_create_strhash() and mod_hash_create_ptrhash() provide good
+ * examples of how to create a customized hash table.
+ *
+ * Basic hash operations:
+ *
+ * mod_hash_create_strhash(name, nchains, dtor),
+ * create a hash using strings as keys.
+ * NOTE: This create a hash which automatically cleans up the string
+ * values it is given for keys.
+ *
+ * mod_hash_create_ptrhash(name, nchains, dtor, key_elem_size):
+ * create a hash using pointers as keys.
+ *
+ * mod_hash_create_extended(name, nchains, kdtor, vdtor,
+ * hash_alg, hash_alg_data,
+ * keycmp, sleep)
+ * create a customized hash table.
+ *
+ * mod_hash_destroy_hash(hash):
+ * destroy the given hash table, calling the key and value destructors
+ * on each key-value pair stored in the hash.
+ *
+ * mod_hash_insert(hash, key, val):
+ * place a key, value pair into the given hash.
+ * duplicate keys are rejected.
+ *
+ * mod_hash_insert_reserve(hash, key, val, handle):
+ * place a key, value pair into the given hash, using handle to indicate
+ * the reserved storage for the pair. (no memory allocation is needed
+ * during a mod_hash_insert_reserve.) duplicate keys are rejected.
+ *
+ * mod_hash_reserve(hash, *handle):
+ * reserve storage for a key-value pair using the memory allocation
+ * policy of 'hash', returning the storage handle in 'handle'.
+ *
+ * mod_hash_reserve_nosleep(hash, *handle): reserve storage for a key-value
+ * pair ignoring the memory allocation policy of 'hash' and always without
+ * sleep, returning the storage handle in 'handle'.
+ *
+ * mod_hash_remove(hash, key, *val):
+ * remove a key-value pair with key 'key' from 'hash', destroying the
+ * stored key, and returning the value in val.
+ *
+ * mod_hash_replace(hash, key, val)
+ * atomically remove an existing key-value pair from a hash, and replace
+ * the key and value with the ones supplied. The removed key and value
+ * (if any) are destroyed.
+ *
+ * mod_hash_destroy(hash, key):
+ * remove a key-value pair with key 'key' from 'hash', destroying both
+ * stored key and stored value.
+ *
+ * mod_hash_find(hash, key, val):
+ * find a value in the hash table corresponding to the given key.
+ *
+ * mod_hash_find_cb(hash, key, val, found_callback)
+ * find a value in the hash table corresponding to the given key.
+ * If a value is found, call specified callback passing key and val to it.
+ * The callback is called with the hash lock held.
+ * It is intended to be used in situations where the act of locating the
+ * data must also modify it - such as in reference counting schemes.
+ *
+ * mod_hash_walk(hash, callback(key, elem, arg), arg)
+ * walks all the elements in the hashtable and invokes the callback
+ * function with the key/value pair for each element. the hashtable
+ * is locked for readers so the callback function should not attempt
+ * to do any updates to the hashable. the callback function should
+ * return MH_WALK_CONTINUE to continue walking the hashtable or
+ * MH_WALK_TERMINATE to abort the walk of the hashtable.
+ *
+ * mod_hash_clear(hash):
+ * clears the given hash table of entries, calling the key and value
+ * destructors for every element in the hash.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/bitmap.h>
+#include <sys/modhash_impl.h>
+#include <sys/sysmacros.h>
+
+/*
+ * MH_KEY_DESTROY()
+ * Invoke the key destructor.
+ */
+#define MH_KEY_DESTROY(hash, key) ((hash->mh_kdtor)(key))
+
+/*
+ * MH_VAL_DESTROY()
+ * Invoke the value destructor.
+ */
+#define MH_VAL_DESTROY(hash, val) ((hash->mh_vdtor)(val))
+
+/*
+ * MH_KEYCMP()
+ * Call the key comparator for the given hash keys.
+ */
+#define MH_KEYCMP(hash, key1, key2) ((hash->mh_keycmp)(key1, key2))
+
+/*
+ * Cache for struct mod_hash_entry
+ */
+kmem_cache_t *mh_e_cache = NULL;
+mod_hash_t *mh_head = NULL;
+kmutex_t mh_head_lock;
+
+/*
+ * mod_hash_null_keydtor()
+ * mod_hash_null_valdtor()
+ * no-op key and value destructors.
+ */
+/*ARGSUSED*/
+void
+mod_hash_null_keydtor(mod_hash_key_t key)
+{
+}
+
+/*ARGSUSED*/
+void
+mod_hash_null_valdtor(mod_hash_val_t val)
+{
+}
+
+/*
+ * mod_hash_bystr()
+ * mod_hash_strkey_cmp()
+ * mod_hash_strkey_dtor()
+ * mod_hash_strval_dtor()
+ * Hash and key comparison routines for hashes with string keys.
+ *
+ * mod_hash_create_strhash()
+ * Create a hash using strings as keys
+ *
+ * The string hashing algorithm is from the "Dragon Book" --
+ * "Compilers: Principles, Tools & Techniques", by Aho, Sethi, Ullman
+ */
+
+/*ARGSUSED*/
+uint_t
+mod_hash_bystr(void *hash_data, mod_hash_key_t key)
+{
+ uint_t hash = 0;
+ uint_t g;
+ char *p, *k = (char *)key;
+
+ ASSERT(k);
+ for (p = k; *p != '\0'; p++) {
+ hash = (hash << 4) + *p;
+ if ((g = (hash & 0xf0000000)) != 0) {
+ hash ^= (g >> 24);
+ hash ^= g;
+ }
+ }
+ return (hash);
+}
+
+int
+mod_hash_strkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
+{
+ return (strcmp((char *)key1, (char *)key2));
+}
+
+void
+mod_hash_strkey_dtor(mod_hash_key_t key)
+{
+ char *c = (char *)key;
+ kmem_free(c, strlen(c) + 1);
+}
+
+void
+mod_hash_strval_dtor(mod_hash_val_t val)
+{
+ char *c = (char *)val;
+ kmem_free(c, strlen(c) + 1);
+}
+
+mod_hash_t *
+mod_hash_create_strhash_nodtr(char *name, size_t nchains,
+ void (*val_dtor)(mod_hash_val_t))
+{
+ return mod_hash_create_extended(name, nchains, mod_hash_null_keydtor,
+ val_dtor, mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
+}
+
+mod_hash_t *
+mod_hash_create_strhash(char *name, size_t nchains,
+ void (*val_dtor)(mod_hash_val_t))
+{
+ return mod_hash_create_extended(name, nchains, mod_hash_strkey_dtor,
+ val_dtor, mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
+}
+
+void
+mod_hash_destroy_strhash(mod_hash_t *strhash)
+{
+ ASSERT(strhash);
+ mod_hash_destroy_hash(strhash);
+}
+
+
+/*
+ * mod_hash_byptr()
+ * mod_hash_ptrkey_cmp()
+ * Hash and key comparison routines for hashes with pointer keys.
+ *
+ * mod_hash_create_ptrhash()
+ * mod_hash_destroy_ptrhash()
+ * Create a hash that uses pointers as keys. This hash algorithm
+ * picks an appropriate set of middle bits in the address to hash on
+ * based on the size of the hash table and a hint about the size of
+ * the items pointed at.
+ */
+uint_t
+mod_hash_byptr(void *hash_data, mod_hash_key_t key)
+{
+ uintptr_t k = (uintptr_t)key;
+ k >>= (int)(uintptr_t)hash_data;
+
+ return ((uint_t)k);
+}
+
+int
+mod_hash_ptrkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
+{
+ uintptr_t k1 = (uintptr_t)key1;
+ uintptr_t k2 = (uintptr_t)key2;
+ if (k1 > k2)
+ return (-1);
+ else if (k1 < k2)
+ return (1);
+ else
+ return (0);
+}
+
+mod_hash_t *
+mod_hash_create_ptrhash(char *name, size_t nchains,
+ void (*val_dtor)(mod_hash_val_t), size_t key_elem_size)
+{
+ size_t rshift;
+
+ /*
+ * We want to hash on the bits in the middle of the address word
+ * Bits far to the right in the word have little significance, and
+ * are likely to all look the same (for example, an array of
+ * 256-byte structures will have the bottom 8 bits of address
+ * words the same). So we want to right-shift each address to
+ * ignore the bottom bits.
+ *
+ * The high bits, which are also unused, will get taken out when
+ * mod_hash takes hashkey % nchains.
+ */
+ rshift = highbit(key_elem_size);
+
+ return mod_hash_create_extended(name, nchains, mod_hash_null_keydtor,
+ val_dtor, mod_hash_byptr, (void *)rshift, mod_hash_ptrkey_cmp,
+ KM_SLEEP);
+}
+
+void
+mod_hash_destroy_ptrhash(mod_hash_t *hash)
+{
+ ASSERT(hash);
+ mod_hash_destroy_hash(hash);
+}
+
+/*
+ * mod_hash_byid()
+ * mod_hash_idkey_cmp()
+ * Hash and key comparison routines for hashes with 32-bit unsigned keys.
+ *
+ * mod_hash_create_idhash()
+ * mod_hash_destroy_idhash()
+ * mod_hash_iddata_gen()
+ * Create a hash that uses numeric keys.
+ *
+ * The hash algorithm is documented in "Introduction to Algorithms"
+ * (Cormen, Leiserson, Rivest); when the hash table is created, it
+ * attempts to find the next largest prime above the number of hash
+ * slots. The hash index is then this number times the key modulo
+ * the hash size, or (key * prime) % nchains.
+ */
+uint_t
+mod_hash_byid(void *hash_data, mod_hash_key_t key)
+{
+ uint_t kval = (uint_t)(uintptr_t)hash_data;
+ return ((uint_t)(uintptr_t)key * (uint_t)kval);
+}
+
+int
+mod_hash_idkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
+{
+ return ((uint_t)(uintptr_t)key1 - (uint_t)(uintptr_t)key2);
+}
+
+/*
+ * Generate the next largest prime number greater than nchains; this value
+ * is intended to be later passed in to mod_hash_create_extended() as the
+ * hash_data.
+ */
+uint_t
+mod_hash_iddata_gen(size_t nchains)
+{
+ uint_t kval, i, prime;
+
+ /*
+ * Pick the first (odd) prime greater than nchains. Make sure kval is
+ * odd (so start with nchains +1 or +2 as appropriate).
+ */
+ kval = (nchains % 2 == 0) ? nchains + 1 : nchains + 2;
+
+ for (;;) {
+ prime = 1;
+ for (i = 3; i * i <= kval; i += 2) {
+ if (kval % i == 0)
+ prime = 0;
+ }
+ if (prime == 1)
+ break;
+ kval += 2;
+ }
+ return (kval);
+}
+
+mod_hash_t *
+mod_hash_create_idhash(char *name, size_t nchains,
+ void (*val_dtor)(mod_hash_val_t))
+{
+ uint_t kval = mod_hash_iddata_gen(nchains);
+
+ return (mod_hash_create_extended(name, nchains, mod_hash_null_keydtor,
+ val_dtor, mod_hash_byid, (void *)(uintptr_t)kval,
+ mod_hash_idkey_cmp, KM_SLEEP));
+}
+
+void
+mod_hash_destroy_idhash(mod_hash_t *hash)
+{
+ ASSERT(hash);
+ mod_hash_destroy_hash(hash);
+}
+
+void
+mod_hash_fini(void)
+{
+ mutex_destroy(&mh_head_lock);
+
+ if (mh_e_cache) {
+ kmem_cache_destroy(mh_e_cache);
+ mh_e_cache = NULL;
+ }
+}
+
+/*
+ * mod_hash_init()
+ * sets up globals, etc for mod_hash_*
+ */
+void
+mod_hash_init(void)
+{
+ ASSERT(mh_e_cache == NULL);
+ mh_e_cache = kmem_cache_create("mod_hash_entries",
+ sizeof (struct mod_hash_entry), 0, NULL, NULL, NULL, NULL,
+ NULL, 0);
+
+ mutex_init(&mh_head_lock, NULL, MUTEX_DEFAULT, NULL);
+}
+
+/*
+ * mod_hash_create_extended()
+ * The full-blown hash creation function.
+ *
+ * notes:
+ * nchains - how many hash slots to create. More hash slots will
+ * result in shorter hash chains, but will consume
+ * slightly more memory up front.
+ * sleep - should be KM_SLEEP or KM_NOSLEEP, to indicate whether
+ * to sleep for memory, or fail in low-memory conditions.
+ *
+ * Fails only if KM_NOSLEEP was specified, and no memory was available.
+ */
+mod_hash_t *
+mod_hash_create_extended(
+ char *hname, /* descriptive name for hash */
+ size_t nchains, /* number of hash slots */
+ void (*kdtor)(mod_hash_key_t), /* key destructor */
+ void (*vdtor)(mod_hash_val_t), /* value destructor */
+ uint_t (*hash_alg)(void *, mod_hash_key_t), /* hash algorithm */
+ void *hash_alg_data, /* pass-thru arg for hash_alg */
+ int (*keycmp)(mod_hash_key_t, mod_hash_key_t), /* key comparator */
+ int sleep) /* whether to sleep for mem */
+{
+ mod_hash_t *mod_hash;
+ ASSERT(hname && keycmp && hash_alg && vdtor && kdtor);
+
+ if ((mod_hash = kmem_zalloc(MH_SIZE(nchains), sleep)) == NULL)
+ return (NULL);
+
+ mod_hash->mh_name = kmem_alloc(strlen(hname) + 1, sleep);
+ if (mod_hash->mh_name == NULL) {
+ kmem_free(mod_hash, MH_SIZE(nchains));
+ return (NULL);
+ }
+ (void) strcpy(mod_hash->mh_name, hname);
+
+ rw_init(&mod_hash->mh_contents, NULL, RW_DEFAULT, NULL);
+ mod_hash->mh_sleep = sleep;
+ mod_hash->mh_nchains = nchains;
+ mod_hash->mh_kdtor = kdtor;
+ mod_hash->mh_vdtor = vdtor;
+ mod_hash->mh_hashalg = hash_alg;
+ mod_hash->mh_hashalg_data = hash_alg_data;
+ mod_hash->mh_keycmp = keycmp;
+
+ /*
+ * Link the hash up on the list of hashes
+ */
+ mutex_enter(&mh_head_lock);
+ mod_hash->mh_next = mh_head;
+ mh_head = mod_hash;
+ mutex_exit(&mh_head_lock);
+
+ return (mod_hash);
+}
+
+/*
+ * mod_hash_destroy_hash()
+ * destroy a hash table, destroying all of its stored keys and values
+ * as well.
+ */
+void
+mod_hash_destroy_hash(mod_hash_t *hash)
+{
+ mod_hash_t *mhp, *mhpp;
+
+ mutex_enter(&mh_head_lock);
+ /*
+ * Remove the hash from the hash list
+ */
+ if (hash == mh_head) { /* removing 1st list elem */
+ mh_head = mh_head->mh_next;
+ } else {
+ /*
+ * mhpp can start out NULL since we know the 1st elem isn't the
+ * droid we're looking for.
+ */
+ mhpp = NULL;
+ for (mhp = mh_head; mhp != NULL; mhp = mhp->mh_next) {
+ if (mhp == hash) {
+ mhpp->mh_next = mhp->mh_next;
+ break;
+ }
+ mhpp = mhp;
+ }
+ }
+ mutex_exit(&mh_head_lock);
+
+ /*
+ * Clean out keys and values.
+ */
+ mod_hash_clear(hash);
+
+ rw_destroy(&hash->mh_contents);
+ kmem_free(hash->mh_name, strlen(hash->mh_name) + 1);
+ kmem_free(hash, MH_SIZE(hash->mh_nchains));
+}
+
+/*
+ * i_mod_hash()
+ * Call the hashing algorithm for this hash table, with the given key.
+ */
+uint_t
+i_mod_hash(mod_hash_t *hash, mod_hash_key_t key)
+{
+ uint_t h;
+ /*
+ * Prevent div by 0 problems;
+ * Also a nice shortcut when using a hash as a list
+ */
+ if (hash->mh_nchains == 1)
+ return (0);
+
+ h = (hash->mh_hashalg)(hash->mh_hashalg_data, key);
+ return (h % (hash->mh_nchains - 1));
+}
+
+/*
+ * i_mod_hash_insert_nosync()
+ * mod_hash_insert()
+ * mod_hash_insert_reserve()
+ * insert 'val' into the hash table, using 'key' as its key. If 'key' is
+ * already a key in the hash, an error will be returned, and the key-val
+ * pair will not be inserted. i_mod_hash_insert_nosync() supports a simple
+ * handle abstraction, allowing hash entry allocation to be separated from
+ * the hash insertion. this abstraction allows simple use of the mod_hash
+ * structure in situations where mod_hash_insert() with a KM_SLEEP
+ * allocation policy would otherwise be unsafe.
+ */
+int
+i_mod_hash_insert_nosync(mod_hash_t *hash, mod_hash_key_t key,
+ mod_hash_val_t val, mod_hash_hndl_t handle)
+{
+ uint_t hashidx;
+ struct mod_hash_entry *entry;
+
+ ASSERT(hash);
+
+ /*
+ * If we've not been given reserved storage, allocate storage directly,
+ * using the hash's allocation policy.
+ */
+ if (handle == (mod_hash_hndl_t)0) {
+ entry = kmem_cache_alloc(mh_e_cache, hash->mh_sleep);
+ if (entry == NULL) {
+ hash->mh_stat.mhs_nomem++;
+ return (MH_ERR_NOMEM);
+ }
+ } else {
+ entry = (struct mod_hash_entry *)handle;
+ }
+
+ hashidx = i_mod_hash(hash, key);
+ entry->mhe_key = key;
+ entry->mhe_val = val;
+ entry->mhe_next = hash->mh_entries[hashidx];
+
+ hash->mh_entries[hashidx] = entry;
+ hash->mh_stat.mhs_nelems++;
+
+ return (0);
+}
+
+int
+mod_hash_insert(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t val)
+{
+ int res;
+ mod_hash_val_t v;
+
+ rw_enter(&hash->mh_contents, RW_WRITER);
+
+ /*
+ * Disallow duplicate keys in the hash
+ */
+ if (i_mod_hash_find_nosync(hash, key, &v) == 0) {
+ rw_exit(&hash->mh_contents);
+ hash->mh_stat.mhs_coll++;
+ return (MH_ERR_DUPLICATE);
+ }
+
+ res = i_mod_hash_insert_nosync(hash, key, val, (mod_hash_hndl_t)0);
+ rw_exit(&hash->mh_contents);
+
+ return (res);
+}
+
+int
+mod_hash_insert_reserve(mod_hash_t *hash, mod_hash_key_t key,
+ mod_hash_val_t val, mod_hash_hndl_t handle)
+{
+ int res;
+ mod_hash_val_t v;
+
+ rw_enter(&hash->mh_contents, RW_WRITER);
+
+ /*
+ * Disallow duplicate keys in the hash
+ */
+ if (i_mod_hash_find_nosync(hash, key, &v) == 0) {
+ rw_exit(&hash->mh_contents);
+ hash->mh_stat.mhs_coll++;
+ return (MH_ERR_DUPLICATE);
+ }
+ res = i_mod_hash_insert_nosync(hash, key, val, handle);
+ rw_exit(&hash->mh_contents);
+
+ return (res);
+}
+
+/*
+ * mod_hash_reserve()
+ * mod_hash_reserve_nosleep()
+ * mod_hash_cancel()
+ * Make or cancel a mod_hash_entry_t reservation. Reservations are used in
+ * mod_hash_insert_reserve() above.
+ */
+int
+mod_hash_reserve(mod_hash_t *hash, mod_hash_hndl_t *handlep)
+{
+ *handlep = kmem_cache_alloc(mh_e_cache, hash->mh_sleep);
+ if (*handlep == NULL) {
+ hash->mh_stat.mhs_nomem++;
+ return (MH_ERR_NOMEM);
+ }
+
+ return (0);
+}
+
+int
+mod_hash_reserve_nosleep(mod_hash_t *hash, mod_hash_hndl_t *handlep)
+{
+ *handlep = kmem_cache_alloc(mh_e_cache, KM_NOSLEEP);
+ if (*handlep == NULL) {
+ hash->mh_stat.mhs_nomem++;
+ return (MH_ERR_NOMEM);
+ }
+
+ return (0);
+
+}
+
+/*ARGSUSED*/
+void
+mod_hash_cancel(mod_hash_t *hash, mod_hash_hndl_t *handlep)
+{
+ kmem_cache_free(mh_e_cache, *handlep);
+ *handlep = (mod_hash_hndl_t)0;
+}
+
+/*
+ * i_mod_hash_remove_nosync()
+ * mod_hash_remove()
+ * Remove an element from the hash table.
+ */
+int
+i_mod_hash_remove_nosync(mod_hash_t *hash, mod_hash_key_t key,
+ mod_hash_val_t *val)
+{
+ int hashidx;
+ struct mod_hash_entry *e, *ep;
+
+ hashidx = i_mod_hash(hash, key);
+ ep = NULL; /* e's parent */
+
+ for (e = hash->mh_entries[hashidx]; e != NULL; e = e->mhe_next) {
+ if (MH_KEYCMP(hash, e->mhe_key, key) == 0)
+ break;
+ ep = e;
+ }
+
+ if (e == NULL) { /* not found */
+ return (MH_ERR_NOTFOUND);
+ }
+
+ if (ep == NULL) /* special case 1st element in bucket */
+ hash->mh_entries[hashidx] = e->mhe_next;
+ else
+ ep->mhe_next = e->mhe_next;
+
+ /*
+ * Clean up resources used by the node's key.
+ */
+ MH_KEY_DESTROY(hash, e->mhe_key);
+
+ *val = e->mhe_val;
+ kmem_cache_free(mh_e_cache, e);
+ hash->mh_stat.mhs_nelems--;
+
+ return (0);
+}
+
+int
+mod_hash_remove(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t *val)
+{
+ int res;
+
+ rw_enter(&hash->mh_contents, RW_WRITER);
+ res = i_mod_hash_remove_nosync(hash, key, val);
+ rw_exit(&hash->mh_contents);
+
+ return (res);
+}
+
+/*
+ * mod_hash_replace()
+ * atomically remove an existing key-value pair from a hash, and replace
+ * the key and value with the ones supplied. The removed key and value
+ * (if any) are destroyed.
+ */
+int
+mod_hash_replace(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t val)
+{
+ int res;
+ mod_hash_val_t v;
+
+ rw_enter(&hash->mh_contents, RW_WRITER);
+
+ if (i_mod_hash_remove_nosync(hash, key, &v) == 0) {
+ /*
+ * mod_hash_remove() takes care of freeing up the key resources.
+ */
+ MH_VAL_DESTROY(hash, v);
+ }
+ res = i_mod_hash_insert_nosync(hash, key, val, (mod_hash_hndl_t)0);
+
+ rw_exit(&hash->mh_contents);
+
+ return (res);
+}
+
+/*
+ * mod_hash_destroy()
+ * Remove an element from the hash table matching 'key', and destroy it.
+ */
+int
+mod_hash_destroy(mod_hash_t *hash, mod_hash_key_t key)
+{
+ mod_hash_val_t val;
+ int rv;
+
+ rw_enter(&hash->mh_contents, RW_WRITER);
+
+ if ((rv = i_mod_hash_remove_nosync(hash, key, &val)) == 0) {
+ /*
+ * mod_hash_remove() takes care of freeing up the key resources.
+ */
+ MH_VAL_DESTROY(hash, val);
+ }
+
+ rw_exit(&hash->mh_contents);
+ return (rv);
+}
+
+/*
+ * i_mod_hash_find_nosync()
+ * mod_hash_find()
+ * Find a value in the hash table corresponding to the given key.
+ */
+int
+i_mod_hash_find_nosync(mod_hash_t *hash, mod_hash_key_t key,
+ mod_hash_val_t *val)
+{
+ uint_t hashidx;
+ struct mod_hash_entry *e;
+
+ hashidx = i_mod_hash(hash, key);
+
+ for (e = hash->mh_entries[hashidx]; e != NULL; e = e->mhe_next) {
+ if (MH_KEYCMP(hash, e->mhe_key, key) == 0) {
+ *val = e->mhe_val;
+ hash->mh_stat.mhs_hit++;
+ return (0);
+ }
+ }
+ hash->mh_stat.mhs_miss++;
+ return (MH_ERR_NOTFOUND);
+}
+
+int
+mod_hash_find(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t *val)
+{
+ int res;
+
+ rw_enter(&hash->mh_contents, RW_READER);
+ res = i_mod_hash_find_nosync(hash, key, val);
+ rw_exit(&hash->mh_contents);
+
+ return (res);
+}
+
+int
+mod_hash_find_cb(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t *val,
+ void (*find_cb)(mod_hash_key_t, mod_hash_val_t))
+{
+ int res;
+
+ rw_enter(&hash->mh_contents, RW_READER);
+ res = i_mod_hash_find_nosync(hash, key, val);
+ if (res == 0) {
+ find_cb(key, *val);
+ }
+ rw_exit(&hash->mh_contents);
+
+ return (res);
+}
+
+int
+mod_hash_find_cb_rval(mod_hash_t *hash, mod_hash_key_t key, mod_hash_val_t *val,
+ int (*find_cb)(mod_hash_key_t, mod_hash_val_t), int *cb_rval)
+{
+ int res;
+
+ rw_enter(&hash->mh_contents, RW_READER);
+ res = i_mod_hash_find_nosync(hash, key, val);
+ if (res == 0) {
+ *cb_rval = find_cb(key, *val);
+ }
+ rw_exit(&hash->mh_contents);
+
+ return (res);
+}
+
+void
+i_mod_hash_walk_nosync(mod_hash_t *hash,
+ uint_t (*callback)(mod_hash_key_t, mod_hash_val_t *, void *), void *arg)
+{
+ struct mod_hash_entry *e;
+ uint_t hashidx;
+ int res = MH_WALK_CONTINUE;
+
+ for (hashidx = 0;
+ (hashidx < (hash->mh_nchains - 1)) && (res == MH_WALK_CONTINUE);
+ hashidx++) {
+ e = hash->mh_entries[hashidx];
+ while ((e != NULL) && (res == MH_WALK_CONTINUE)) {
+ res = callback(e->mhe_key, e->mhe_val, arg);
+ e = e->mhe_next;
+ }
+ }
+}
+
+/*
+ * mod_hash_walk()
+ * Walks all the elements in the hashtable and invokes the callback
+ * function with the key/value pair for each element. The hashtable
+ * is locked for readers so the callback function should not attempt
+ * to do any updates to the hashable. The callback function should
+ * return MH_WALK_CONTINUE to continue walking the hashtable or
+ * MH_WALK_TERMINATE to abort the walk of the hashtable.
+ */
+void
+mod_hash_walk(mod_hash_t *hash,
+ uint_t (*callback)(mod_hash_key_t, mod_hash_val_t *, void *), void *arg)
+{
+ rw_enter(&hash->mh_contents, RW_READER);
+ i_mod_hash_walk_nosync(hash, callback, arg);
+ rw_exit(&hash->mh_contents);
+}
+
+
+/*
+ * i_mod_hash_clear_nosync()
+ * mod_hash_clear()
+ * Clears the given hash table by calling the destructor of every hash
+ * element and freeing up all mod_hash_entry's.
+ */
+void
+i_mod_hash_clear_nosync(mod_hash_t *hash)
+{
+ int i;
+ struct mod_hash_entry *e, *old_e;
+
+ for (i = 0; i < hash->mh_nchains; i++) {
+ e = hash->mh_entries[i];
+ while (e != NULL) {
+ MH_KEY_DESTROY(hash, e->mhe_key);
+ MH_VAL_DESTROY(hash, e->mhe_val);
+ old_e = e;
+ e = e->mhe_next;
+ kmem_cache_free(mh_e_cache, old_e);
+ }
+ hash->mh_entries[i] = NULL;
+ }
+ hash->mh_stat.mhs_nelems = 0;
+}
+
+void
+mod_hash_clear(mod_hash_t *hash)
+{
+ ASSERT(hash);
+ rw_enter(&hash->mh_contents, RW_WRITER);
+ i_mod_hash_clear_nosync(hash);
+ rw_exit(&hash->mh_contents);
+}
diff --git a/module/icp/spi/kcf_spi.c b/module/icp/spi/kcf_spi.c
new file mode 100644
index 000000000..e6e463a62
--- /dev/null
+++ b/module/icp/spi/kcf_spi.c
@@ -0,0 +1,927 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file is part of the core Kernel Cryptographic Framework.
+ * It implements the SPI functions exported to cryptographic
+ * providers.
+ */
+
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/sched_impl.h>
+#include <sys/crypto/spi.h>
+
+/*
+ * minalloc and maxalloc values to be used for taskq_create().
+ */
+int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
+int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
+int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
+
+static void remove_provider(kcf_provider_desc_t *);
+static void process_logical_providers(crypto_provider_info_t *,
+ kcf_provider_desc_t *);
+static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
+static int kcf_prov_kstat_update(kstat_t *, int);
+static void delete_kstat(kcf_provider_desc_t *);
+
+static kcf_prov_stats_t kcf_stats_ks_data_template = {
+ { "kcf_ops_total", KSTAT_DATA_UINT64 },
+ { "kcf_ops_passed", KSTAT_DATA_UINT64 },
+ { "kcf_ops_failed", KSTAT_DATA_UINT64 },
+ { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 }
+};
+
+#define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
+ *((dst)->ops) = *((src)->ops);
+
+/*
+ * Copy an ops vector from src to dst. Used during provider registration
+ * to copy the ops vector from the provider info structure to the
+ * provider descriptor maintained by KCF.
+ * Copying the ops vector specified by the provider is needed since the
+ * framework does not require the provider info structure to be
+ * persistent.
+ */
+static void
+copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
+{
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
+}
+
+static void
+copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
+{
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
+}
+
+static void
+copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
+{
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
+}
+
+/*
+ * This routine is used to add cryptographic providers to the KEF framework.
+ * Providers pass a crypto_provider_info structure to crypto_register_provider()
+ * and get back a handle. The crypto_provider_info structure contains a
+ * list of mechanisms supported by the provider and an ops vector containing
+ * provider entry points. Hardware providers call this routine in their attach
+ * routines. Software providers call this routine in their _init() routine.
+ */
+int
+crypto_register_provider(crypto_provider_info_t *info,
+ crypto_kcf_provider_handle_t *handle)
+{
+ char ks_name[KSTAT_STRLEN];
+
+ kcf_provider_desc_t *prov_desc = NULL;
+ int ret = CRYPTO_ARGUMENTS_BAD;
+
+ if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
+ return (CRYPTO_VERSION_MISMATCH);
+
+ /*
+ * Check provider type, must be software, hardware, or logical.
+ */
+ if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
+ info->pi_provider_type != CRYPTO_SW_PROVIDER &&
+ info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /*
+ * Allocate and initialize a new provider descriptor. We also
+ * hold it and release it when done.
+ */
+ prov_desc = kcf_alloc_provider_desc(info);
+ KCF_PROV_REFHOLD(prov_desc);
+
+ prov_desc->pd_prov_type = info->pi_provider_type;
+
+ /* provider-private handle, opaque to KCF */
+ prov_desc->pd_prov_handle = info->pi_provider_handle;
+
+ /* copy provider description string */
+ if (info->pi_provider_description != NULL) {
+ /*
+ * pi_provider_descriptor is a string that can contain
+ * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
+ * INCLUDING the terminating null character. A bcopy()
+ * is necessary here as pd_description should not have
+ * a null character. See comments in kcf_alloc_provider_desc()
+ * for details on pd_description field.
+ */
+ bcopy(info->pi_provider_description, prov_desc->pd_description,
+ MIN(strlen(info->pi_provider_description),
+ (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN));
+ }
+
+ if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
+ if (info->pi_ops_vector == NULL) {
+ goto bail;
+ }
+ copy_ops_vector_v1(info->pi_ops_vector,
+ prov_desc->pd_ops_vector);
+ if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
+ copy_ops_vector_v2(info->pi_ops_vector,
+ prov_desc->pd_ops_vector);
+ prov_desc->pd_flags = info->pi_flags;
+ }
+ if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
+ copy_ops_vector_v3(info->pi_ops_vector,
+ prov_desc->pd_ops_vector);
+ }
+ }
+
+ /* object_ops and nostore_key_ops are mutually exclusive */
+ if (prov_desc->pd_ops_vector->co_object_ops &&
+ prov_desc->pd_ops_vector->co_nostore_key_ops) {
+ goto bail;
+ }
+
+ /* process the mechanisms supported by the provider */
+ if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
+ goto bail;
+
+ /*
+ * Add provider to providers tables, also sets the descriptor
+ * pd_prov_id field.
+ */
+ if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
+ undo_register_provider(prov_desc, B_FALSE);
+ goto bail;
+ }
+
+ /*
+ * We create a taskq only for a hardware provider. The global
+ * software queue is used for software providers. We handle ordering
+ * of multi-part requests in the taskq routine. So, it is safe to
+ * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
+ * to keep some entries cached to improve performance.
+ */
+ if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
+ prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
+ crypto_taskq_threads, minclsyspri,
+ crypto_taskq_minalloc, crypto_taskq_maxalloc,
+ TASKQ_PREPOPULATE);
+ else
+ prov_desc->pd_sched_info.ks_taskq = NULL;
+
+ /* no kernel session to logical providers */
+ if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
+ /*
+ * Open a session for session-oriented providers. This session
+ * is used for all kernel consumers. This is fine as a provider
+ * is required to support multiple thread access to a session.
+ * We can do this only after the taskq has been created as we
+ * do a kcf_submit_request() to open the session.
+ */
+ if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
+ kcf_req_params_t params;
+
+ KCF_WRAP_SESSION_OPS_PARAMS(&params,
+ KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
+ CRYPTO_USER, NULL, 0, prov_desc);
+ ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
+ B_FALSE);
+
+ if (ret != CRYPTO_SUCCESS) {
+ undo_register_provider(prov_desc, B_TRUE);
+ ret = CRYPTO_FAILED;
+ goto bail;
+ }
+ }
+ }
+
+ if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
+ /*
+ * Create the kstat for this provider. There is a kstat
+ * installed for each successfully registered provider.
+ * This kstat is deleted, when the provider unregisters.
+ */
+ if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
+ (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
+ "NONAME", "provider_stats");
+ } else {
+ (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
+ "NONAME", 0,
+ prov_desc->pd_prov_id, "provider_stats");
+ }
+
+ prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
+ KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
+ sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
+
+ if (prov_desc->pd_kstat != NULL) {
+ bcopy(&kcf_stats_ks_data_template,
+ &prov_desc->pd_ks_data,
+ sizeof (kcf_stats_ks_data_template));
+ prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
+ KCF_PROV_REFHOLD(prov_desc);
+ KCF_PROV_IREFHOLD(prov_desc);
+ prov_desc->pd_kstat->ks_private = prov_desc;
+ prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
+ kstat_install(prov_desc->pd_kstat);
+ }
+ }
+
+ if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
+ process_logical_providers(info, prov_desc);
+
+ mutex_enter(&prov_desc->pd_lock);
+ prov_desc->pd_state = KCF_PROV_READY;
+ mutex_exit(&prov_desc->pd_lock);
+ kcf_do_notify(prov_desc, B_TRUE);
+
+ *handle = prov_desc->pd_kcf_prov_handle;
+ ret = CRYPTO_SUCCESS;
+
+bail:
+ KCF_PROV_REFRELE(prov_desc);
+ return (ret);
+}
+
+/*
+ * This routine is used to notify the framework when a provider is being
+ * removed. Hardware providers call this routine in their detach routines.
+ * Software providers call this routine in their _fini() routine.
+ */
+int
+crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
+{
+ uint_t mech_idx;
+ kcf_provider_desc_t *desc;
+ kcf_prov_state_t saved_state;
+
+ /* lookup provider descriptor */
+ if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
+ return (CRYPTO_UNKNOWN_PROVIDER);
+
+ mutex_enter(&desc->pd_lock);
+ /*
+ * Check if any other thread is disabling or removing
+ * this provider. We return if this is the case.
+ */
+ if (desc->pd_state >= KCF_PROV_DISABLED) {
+ mutex_exit(&desc->pd_lock);
+ /* Release reference held by kcf_prov_tab_lookup(). */
+ KCF_PROV_REFRELE(desc);
+ return (CRYPTO_BUSY);
+ }
+
+ saved_state = desc->pd_state;
+ desc->pd_state = KCF_PROV_REMOVED;
+
+ if (saved_state == KCF_PROV_BUSY) {
+ /*
+ * The per-provider taskq threads may be waiting. We
+ * signal them so that they can start failing requests.
+ */
+ cv_broadcast(&desc->pd_resume_cv);
+ }
+
+ if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
+ /*
+ * Check if this provider is currently being used.
+ * pd_irefcnt is the number of holds from the internal
+ * structures. We add one to account for the above lookup.
+ */
+ if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
+ desc->pd_state = saved_state;
+ mutex_exit(&desc->pd_lock);
+ /* Release reference held by kcf_prov_tab_lookup(). */
+ KCF_PROV_REFRELE(desc);
+ /*
+ * The administrator presumably will stop the clients
+ * thus removing the holds, when they get the busy
+ * return value. Any retry will succeed then.
+ */
+ return (CRYPTO_BUSY);
+ }
+ }
+ mutex_exit(&desc->pd_lock);
+
+ if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
+ remove_provider(desc);
+ }
+
+ if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
+ /* remove the provider from the mechanisms tables */
+ for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
+ mech_idx++) {
+ kcf_remove_mech_provider(
+ desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
+ }
+ }
+
+ /* remove provider from providers table */
+ if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
+ CRYPTO_SUCCESS) {
+ /* Release reference held by kcf_prov_tab_lookup(). */
+ KCF_PROV_REFRELE(desc);
+ return (CRYPTO_UNKNOWN_PROVIDER);
+ }
+
+ delete_kstat(desc);
+
+ if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
+ /* Release reference held by kcf_prov_tab_lookup(). */
+ KCF_PROV_REFRELE(desc);
+
+ /*
+ * Wait till the existing requests complete.
+ */
+ mutex_enter(&desc->pd_lock);
+ while (desc->pd_state != KCF_PROV_FREED)
+ cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
+ mutex_exit(&desc->pd_lock);
+ } else {
+ /*
+ * Wait until requests that have been sent to the provider
+ * complete.
+ */
+ mutex_enter(&desc->pd_lock);
+ while (desc->pd_irefcnt > 0)
+ cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
+ mutex_exit(&desc->pd_lock);
+ }
+
+ kcf_do_notify(desc, B_FALSE);
+
+ if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
+ /*
+ * This is the only place where kcf_free_provider_desc()
+ * is called directly. KCF_PROV_REFRELE() should free the
+ * structure in all other places.
+ */
+ ASSERT(desc->pd_state == KCF_PROV_FREED &&
+ desc->pd_refcnt == 0);
+ kcf_free_provider_desc(desc);
+ } else {
+ KCF_PROV_REFRELE(desc);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * This routine is used to notify the framework that the state of
+ * a cryptographic provider has changed. Valid state codes are:
+ *
+ * CRYPTO_PROVIDER_READY
+ * The provider indicates that it can process more requests. A provider
+ * will notify with this event if it previously has notified us with a
+ * CRYPTO_PROVIDER_BUSY.
+ *
+ * CRYPTO_PROVIDER_BUSY
+ * The provider can not take more requests.
+ *
+ * CRYPTO_PROVIDER_FAILED
+ * The provider encountered an internal error. The framework will not
+ * be sending any more requests to the provider. The provider may notify
+ * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
+ *
+ * This routine can be called from user or interrupt context.
+ */
+void
+crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
+{
+ kcf_provider_desc_t *pd;
+
+ /* lookup the provider from the given handle */
+ if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
+ return;
+
+ mutex_enter(&pd->pd_lock);
+
+ if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
+ goto out;
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ cmn_err(CE_WARN, "crypto_provider_notification: "
+ "logical provider (%x) ignored\n", handle);
+ goto out;
+ }
+ switch (state) {
+ case CRYPTO_PROVIDER_READY:
+ switch (pd->pd_state) {
+ case KCF_PROV_BUSY:
+ pd->pd_state = KCF_PROV_READY;
+ /*
+ * Signal the per-provider taskq threads that they
+ * can start submitting requests.
+ */
+ cv_broadcast(&pd->pd_resume_cv);
+ break;
+
+ case KCF_PROV_FAILED:
+ /*
+ * The provider recovered from the error. Let us
+ * use it now.
+ */
+ pd->pd_state = KCF_PROV_READY;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case CRYPTO_PROVIDER_BUSY:
+ switch (pd->pd_state) {
+ case KCF_PROV_READY:
+ pd->pd_state = KCF_PROV_BUSY;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case CRYPTO_PROVIDER_FAILED:
+ /*
+ * We note the failure and return. The per-provider taskq
+ * threads check this flag and start failing the
+ * requests, if it is set. See process_req_hwp() for details.
+ */
+ switch (pd->pd_state) {
+ case KCF_PROV_READY:
+ pd->pd_state = KCF_PROV_FAILED;
+ break;
+
+ case KCF_PROV_BUSY:
+ pd->pd_state = KCF_PROV_FAILED;
+ /*
+ * The per-provider taskq threads may be waiting. We
+ * signal them so that they can start failing requests.
+ */
+ cv_broadcast(&pd->pd_resume_cv);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+out:
+ mutex_exit(&pd->pd_lock);
+ KCF_PROV_REFRELE(pd);
+}
+
+/*
+ * This routine is used to notify the framework the result of
+ * an asynchronous request handled by a provider. Valid error
+ * codes are the same as the CRYPTO_* errors defined in common.h.
+ *
+ * This routine can be called from user or interrupt context.
+ */
+void
+crypto_op_notification(crypto_req_handle_t handle, int error)
+{
+ kcf_call_type_t ctype;
+
+ if (handle == NULL)
+ return;
+
+ if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
+ kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
+
+ if (error != CRYPTO_SUCCESS)
+ sreq->sn_provider->pd_sched_info.ks_nfails++;
+ KCF_PROV_IREFRELE(sreq->sn_provider);
+ kcf_sop_done(sreq, error);
+ } else {
+ kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
+
+ ASSERT(ctype == CRYPTO_ASYNCH);
+ if (error != CRYPTO_SUCCESS)
+ areq->an_provider->pd_sched_info.ks_nfails++;
+ KCF_PROV_IREFRELE(areq->an_provider);
+ kcf_aop_done(areq, error);
+ }
+}
+
+/*
+ * This routine is used by software providers to determine
+ * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
+ * Note that hardware providers can always use KM_SLEEP. So,
+ * they do not need to call this routine.
+ *
+ * This routine can be called from user or interrupt context.
+ */
+int
+crypto_kmflag(crypto_req_handle_t handle)
+{
+ return (REQHNDL2_KMFLAG(handle));
+}
+
+/*
+ * Process the mechanism info structures specified by the provider
+ * during registration. A NULL crypto_provider_info_t indicates
+ * an already initialized provider descriptor.
+ *
+ * Mechanisms are not added to the kernel's mechanism table if the
+ * provider is a logical provider.
+ *
+ * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
+ * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
+ * if the table of mechanisms is full.
+ */
+static int
+init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
+{
+ uint_t mech_idx;
+ uint_t cleanup_idx;
+ int err = CRYPTO_SUCCESS;
+ kcf_prov_mech_desc_t *pmd;
+ int desc_use_count = 0;
+ int mcount = desc->pd_mech_list_count;
+
+ if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ if (info != NULL) {
+ ASSERT(info->pi_mechanisms != NULL);
+ bcopy(info->pi_mechanisms, desc->pd_mechanisms,
+ sizeof (crypto_mech_info_t) * mcount);
+ }
+ return (CRYPTO_SUCCESS);
+ }
+
+ /*
+ * Copy the mechanism list from the provider info to the provider
+ * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
+ * element if the provider has random_ops since we keep an internal
+ * mechanism, SUN_RANDOM, in this case.
+ */
+ if (info != NULL) {
+ if (info->pi_ops_vector->co_random_ops != NULL) {
+ crypto_mech_info_t *rand_mi;
+
+ /*
+ * Need the following check as it is possible to have
+ * a provider that implements just random_ops and has
+ * pi_mechanisms == NULL.
+ */
+ if (info->pi_mechanisms != NULL) {
+ bcopy(info->pi_mechanisms, desc->pd_mechanisms,
+ sizeof (crypto_mech_info_t) * (mcount - 1));
+ }
+ rand_mi = &desc->pd_mechanisms[mcount - 1];
+
+ bzero(rand_mi, sizeof (crypto_mech_info_t));
+ (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
+ CRYPTO_MAX_MECH_NAME);
+ rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
+ } else {
+ ASSERT(info->pi_mechanisms != NULL);
+ bcopy(info->pi_mechanisms, desc->pd_mechanisms,
+ sizeof (crypto_mech_info_t) * mcount);
+ }
+ }
+
+ /*
+ * For each mechanism support by the provider, add the provider
+ * to the corresponding KCF mechanism mech_entry chain.
+ */
+ for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
+ crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
+
+ if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
+ (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
+ err = CRYPTO_ARGUMENTS_BAD;
+ break;
+ }
+
+ if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE &&
+ mi->cm_func_group_mask & CRYPTO_FG_DIGEST) {
+ /*
+ * We ask the provider to specify the limit
+ * per hash mechanism. But, in practice, a
+ * hardware limitation means all hash mechanisms
+ * will have the same maximum size allowed for
+ * input data. So, we make it a per provider
+ * limit to keep it simple.
+ */
+ if (mi->cm_max_input_length == 0) {
+ err = CRYPTO_ARGUMENTS_BAD;
+ break;
+ } else {
+ desc->pd_hash_limit = mi->cm_max_input_length;
+ }
+ }
+
+ if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
+ KCF_SUCCESS)
+ break;
+
+ if (pmd == NULL)
+ continue;
+
+ /* The provider will be used for this mechanism */
+ desc_use_count++;
+ }
+
+ /*
+ * Don't allow multiple software providers with disabled mechanisms
+ * to register. Subsequent enabling of mechanisms will result in
+ * an unsupported configuration, i.e. multiple software providers
+ * per mechanism.
+ */
+ if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (err == KCF_SUCCESS)
+ return (CRYPTO_SUCCESS);
+
+ /*
+ * An error occurred while adding the mechanism, cleanup
+ * and bail.
+ */
+ for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
+ kcf_remove_mech_provider(
+ desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
+ }
+
+ if (err == KCF_MECH_TAB_FULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ return (CRYPTO_ARGUMENTS_BAD);
+}
+
+/*
+ * Update routine for kstat. Only privileged users are allowed to
+ * access this information, since this information is sensitive.
+ * There are some cryptographic attacks (e.g. traffic analysis)
+ * which can use this information.
+ */
+static int
+kcf_prov_kstat_update(kstat_t *ksp, int rw)
+{
+ kcf_prov_stats_t *ks_data;
+ kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
+
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+
+ ks_data = ksp->ks_data;
+
+ ks_data->ps_ops_total.value.ui64 =
+ pd->pd_sched_info.ks_ndispatches;
+ ks_data->ps_ops_failed.value.ui64 =
+ pd->pd_sched_info.ks_nfails;
+ ks_data->ps_ops_busy_rval.value.ui64 =
+ pd->pd_sched_info.ks_nbusy_rval;
+ ks_data->ps_ops_passed.value.ui64 =
+ pd->pd_sched_info.ks_ndispatches -
+ pd->pd_sched_info.ks_nfails -
+ pd->pd_sched_info.ks_nbusy_rval;
+
+ return (0);
+}
+
+
+/*
+ * Utility routine called from failure paths in crypto_register_provider()
+ * and from crypto_load_soft_disabled().
+ */
+void
+undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
+{
+ uint_t mech_idx;
+
+ /* remove the provider from the mechanisms tables */
+ for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
+ mech_idx++) {
+ kcf_remove_mech_provider(
+ desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
+ }
+
+ /* remove provider from providers table */
+ if (remove_prov)
+ (void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
+}
+
+/*
+ * Utility routine called from crypto_load_soft_disabled(). Callers
+ * should have done a prior undo_register_provider().
+ */
+void
+redo_register_provider(kcf_provider_desc_t *pd)
+{
+ /* process the mechanisms supported by the provider */
+ (void) init_prov_mechs(NULL, pd);
+
+ /*
+ * Hold provider in providers table. We should not call
+ * kcf_prov_tab_add_provider() here as the provider descriptor
+ * is still valid which means it has an entry in the provider
+ * table.
+ */
+ KCF_PROV_REFHOLD(pd);
+ KCF_PROV_IREFHOLD(pd);
+}
+
+/*
+ * Add provider (p1) to another provider's array of providers (p2).
+ * Hardware and logical providers use this array to cross-reference
+ * each other.
+ */
+static void
+add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
+{
+ kcf_provider_list_t *new;
+
+ new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
+ mutex_enter(&p2->pd_lock);
+ new->pl_next = p2->pd_provider_list;
+ p2->pd_provider_list = new;
+ KCF_PROV_IREFHOLD(p1);
+ new->pl_provider = p1;
+ mutex_exit(&p2->pd_lock);
+}
+
+/*
+ * Remove provider (p1) from another provider's array of providers (p2).
+ * Hardware and logical providers use this array to cross-reference
+ * each other.
+ */
+static void
+remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
+{
+
+ kcf_provider_list_t *pl = NULL, **prev;
+
+ mutex_enter(&p2->pd_lock);
+ for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
+ pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
+ if (pl->pl_provider == p1) {
+ break;
+ }
+ }
+
+ if (p1 == NULL) {
+ mutex_exit(&p2->pd_lock);
+ return;
+ }
+
+ /* detach and free kcf_provider_list structure */
+ KCF_PROV_IREFRELE(p1);
+ *prev = pl->pl_next;
+ kmem_free(pl, sizeof (*pl));
+ mutex_exit(&p2->pd_lock);
+}
+
+/*
+ * Convert an array of logical provider handles (crypto_provider_id)
+ * stored in a crypto_provider_info structure into an array of provider
+ * descriptors (kcf_provider_desc_t) attached to a logical provider.
+ */
+static void
+process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
+{
+ kcf_provider_desc_t *lp;
+ crypto_provider_id_t handle;
+ int count = info->pi_logical_provider_count;
+ int i;
+
+ /* add hardware provider to each logical provider */
+ for (i = 0; i < count; i++) {
+ handle = info->pi_logical_providers[i];
+ lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
+ if (lp == NULL) {
+ continue;
+ }
+ add_provider_to_array(hp, lp);
+ hp->pd_flags |= KCF_LPROV_MEMBER;
+
+ /*
+ * A hardware provider has to have the provider descriptor of
+ * every logical provider it belongs to, so it can be removed
+ * from the logical provider if the hardware provider
+ * unregisters from the framework.
+ */
+ add_provider_to_array(lp, hp);
+ KCF_PROV_REFRELE(lp);
+ }
+}
+
+/*
+ * This routine removes a provider from all of the logical or
+ * hardware providers it belongs to, and frees the provider's
+ * array of pointers to providers.
+ */
+static void
+remove_provider(kcf_provider_desc_t *pp)
+{
+ kcf_provider_desc_t *p;
+ kcf_provider_list_t *e, *next;
+
+ mutex_enter(&pp->pd_lock);
+ for (e = pp->pd_provider_list; e != NULL; e = next) {
+ p = e->pl_provider;
+ remove_provider_from_array(pp, p);
+ if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ p->pd_provider_list == NULL)
+ p->pd_flags &= ~KCF_LPROV_MEMBER;
+ KCF_PROV_IREFRELE(p);
+ next = e->pl_next;
+ kmem_free(e, sizeof (*e));
+ }
+ pp->pd_provider_list = NULL;
+ mutex_exit(&pp->pd_lock);
+}
+
+/*
+ * Dispatch events as needed for a provider. is_added flag tells
+ * whether the provider is registering or unregistering.
+ */
+void
+kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
+{
+ int i;
+ crypto_notify_event_change_t ec;
+
+ ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
+
+ /*
+ * Inform interested clients of the mechanisms becoming
+ * available/unavailable. We skip this for logical providers
+ * as they do not affect mechanisms.
+ */
+ if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
+ ec.ec_provider_type = prov_desc->pd_prov_type;
+ ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
+ CRYPTO_MECH_REMOVED;
+ for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
+ (void) strncpy(ec.ec_mech_name,
+ prov_desc->pd_mechanisms[i].cm_mech_name,
+ CRYPTO_MAX_MECH_NAME);
+ kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
+ }
+
+ }
+
+ /*
+ * Inform interested clients about the new or departing provider.
+ * In case of a logical provider, we need to notify the event only
+ * for the logical provider and not for the underlying
+ * providers which are known by the KCF_LPROV_MEMBER bit.
+ */
+ if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
+ (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
+ kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
+ CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
+ }
+}
+
+static void
+delete_kstat(kcf_provider_desc_t *desc)
+{
+ /* destroy the kstat created for this provider */
+ if (desc->pd_kstat != NULL) {
+ kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
+
+ /* release reference held by desc->pd_kstat->ks_private */
+ ASSERT(desc == kspd);
+ kstat_delete(kspd->pd_kstat);
+ desc->pd_kstat = NULL;
+ KCF_PROV_REFRELE(kspd);
+ KCF_PROV_IREFRELE(kspd);
+ }
+}