summaryrefslogtreecommitdiffstats
path: root/module/icp/algs
diff options
context:
space:
mode:
authorTom Caputi <[email protected]>2016-05-12 10:51:24 -0400
committerBrian Behlendorf <[email protected]>2016-07-20 10:43:30 -0700
commit0b04990a5de594659d2cf20458965277dd6efeb1 (patch)
tree74369a3236e03359f7276cb9b19687e28c7f6d59 /module/icp/algs
parentbe88e733a634ad0d7f20350e1a17ede51922d3ff (diff)
Illumos Crypto Port module added to enable native encryption in zfs
A port of the Illumos Crypto Framework to a Linux kernel module (found in module/icp). This is needed to do the actual encryption work. We cannot use the Linux kernel's built in crypto api because it is only exported to GPL-licensed modules. Having the ICP also means the crypto code can run on any of the other kernels under OpenZFS. I ended up porting over most of the internals of the framework, which means that porting over other API calls (if we need them) should be fairly easy. Specifically, I have ported over the API functions related to encryption, digests, macs, and crypto templates. The ICP is able to use assembly-accelerated encryption on amd64 machines and AES-NI instructions on Intel chips that support it. There are place-holder directories for similar assembly optimizations for other architectures (although they have not been written). Signed-off-by: Tom Caputi <[email protected]> Signed-off-by: Tony Hutter <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #4329
Diffstat (limited to 'module/icp/algs')
-rw-r--r--module/icp/algs/aes/aes_impl.c1618
-rw-r--r--module/icp/algs/aes/aes_modes.c135
-rw-r--r--module/icp/algs/modes/cbc.c305
-rw-r--r--module/icp/algs/modes/ccm.c920
-rw-r--r--module/icp/algs/modes/ctr.c238
-rw-r--r--module/icp/algs/modes/ecb.c143
-rw-r--r--module/icp/algs/modes/gcm.c748
-rw-r--r--module/icp/algs/modes/modes.c159
-rw-r--r--module/icp/algs/sha1/sha1.c663
-rw-r--r--module/icp/algs/sha2/sha2.c495
10 files changed, 5424 insertions, 0 deletions
diff --git a/module/icp/algs/aes/aes_impl.c b/module/icp/algs/aes/aes_impl.c
new file mode 100644
index 000000000..9c53964f0
--- /dev/null
+++ b/module/icp/algs/aes/aes_impl.c
@@ -0,0 +1,1618 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/crypto/spi.h>
+#include <modes/modes.h>
+#include <aes/aes_impl.h>
+
+#ifdef __amd64
+
+#ifdef _KERNEL
+/* Workaround for no XMM kernel thread save/restore */
+#define KPREEMPT_DISABLE kpreempt_disable()
+#define KPREEMPT_ENABLE kpreempt_enable()
+
+#else
+#define KPREEMPT_DISABLE
+#define KPREEMPT_ENABLE
+#endif /* _KERNEL */
+#endif /* __amd64 */
+
+
+/*
+ * This file is derived from the file rijndael-alg-fst.c taken from the
+ * "optimized C code v3.0" on the "rijndael home page"
+ * http://www.iaik.tu-graz.ac.at/research/krypto/AES/old/~rijmen/rijndael/
+ * pointed by the NIST web-site http://csrc.nist.gov/archive/aes/
+ *
+ * The following note is from the original file:
+ */
+
+/*
+ * rijndael-alg-fst.c
+ *
+ * @version 3.0 (December 2000)
+ *
+ * Optimised ANSI C code for the Rijndael cipher (now AES)
+ *
+ * @author Vincent Rijmen <[email protected]>
+ * @author Antoon Bosselaers <[email protected]>
+ * @author Paulo Barreto <[email protected]>
+ *
+ * This code is hereby placed in the public domain.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__amd64)
+
+/* These functions are used to execute amd64 instructions for AMD or Intel: */
+extern int rijndael_key_setup_enc_amd64(uint32_t rk[],
+ const uint32_t cipherKey[], int keyBits);
+extern int rijndael_key_setup_dec_amd64(uint32_t rk[],
+ const uint32_t cipherKey[], int keyBits);
+extern void aes_encrypt_amd64(const uint32_t rk[], int Nr,
+ const uint32_t pt[4], uint32_t ct[4]);
+extern void aes_decrypt_amd64(const uint32_t rk[], int Nr,
+ const uint32_t ct[4], uint32_t pt[4]);
+
+/* These functions are used to execute Intel-specific AES-NI instructions: */
+extern int rijndael_key_setup_enc_intel(uint32_t rk[],
+ const uint32_t cipherKey[], uint64_t keyBits);
+extern int rijndael_key_setup_dec_intel(uint32_t rk[],
+ const uint32_t cipherKey[], uint64_t keyBits);
+extern void aes_encrypt_intel(const uint32_t rk[], int Nr,
+ const uint32_t pt[4], uint32_t ct[4]);
+extern void aes_decrypt_intel(const uint32_t rk[], int Nr,
+ const uint32_t ct[4], uint32_t pt[4]);
+
+static int intel_aes_instructions_present(void);
+
+#define AES_ENCRYPT_IMPL(a, b, c, d, e) rijndael_encrypt(a, b, c, d, e)
+#define AES_DECRYPT_IMPL(a, b, c, d, e) rijndael_decrypt(a, b, c, d, e)
+
+#else /* Generic C implementation */
+
+#define AES_ENCRYPT_IMPL(a, b, c, d, e) rijndael_encrypt(a, b, c, d)
+#define AES_DECRYPT_IMPL(a, b, c, d, e) rijndael_decrypt(a, b, c, d)
+#define rijndael_key_setup_enc_raw rijndael_key_setup_enc
+#endif /* __amd64 */
+
+#if defined(_LITTLE_ENDIAN) && !defined(__amd64)
+#define AES_BYTE_SWAP
+#endif
+
+
+#if !defined(__amd64)
+/*
+ * Constant tables
+ */
+
+/*
+ * Te0[x] = S [x].[02, 01, 01, 03];
+ * Te1[x] = S [x].[03, 02, 01, 01];
+ * Te2[x] = S [x].[01, 03, 02, 01];
+ * Te3[x] = S [x].[01, 01, 03, 02];
+ * Te4[x] = S [x].[01, 01, 01, 01];
+ *
+ * Td0[x] = Si[x].[0e, 09, 0d, 0b];
+ * Td1[x] = Si[x].[0b, 0e, 09, 0d];
+ * Td2[x] = Si[x].[0d, 0b, 0e, 09];
+ * Td3[x] = Si[x].[09, 0d, 0b, 0e];
+ * Td4[x] = Si[x].[01, 01, 01, 01];
+ */
+
+/* Encrypt Sbox constants (for the substitute bytes operation) */
+
+static const uint32_t Te0[256] =
+{
+ 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
+ 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
+ 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
+ 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
+ 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
+ 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
+ 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
+ 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
+ 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
+ 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
+ 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
+ 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
+ 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
+ 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
+ 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
+ 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
+ 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
+ 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
+ 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
+ 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
+ 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
+ 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
+ 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
+ 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
+ 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
+ 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
+ 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
+ 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
+ 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
+ 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
+ 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
+ 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
+ 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
+ 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
+ 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
+ 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
+ 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
+ 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
+ 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
+ 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
+ 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
+ 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
+ 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
+ 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
+ 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
+ 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
+ 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
+ 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
+ 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
+ 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
+ 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
+ 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
+ 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
+ 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
+ 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
+ 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
+ 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
+ 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
+ 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
+ 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
+ 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
+ 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
+ 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
+ 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU
+};
+
+
+static const uint32_t Te1[256] =
+{
+ 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
+ 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
+ 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
+ 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
+ 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
+ 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
+ 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
+ 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
+ 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
+ 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
+ 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
+ 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
+ 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
+ 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
+ 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
+ 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
+ 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
+ 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
+ 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
+ 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
+ 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
+ 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
+ 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
+ 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
+ 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
+ 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
+ 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
+ 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
+ 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
+ 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
+ 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
+ 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
+ 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
+ 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
+ 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
+ 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
+ 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
+ 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
+ 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
+ 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
+ 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
+ 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
+ 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
+ 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
+ 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
+ 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
+ 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
+ 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
+ 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
+ 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
+ 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
+ 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
+ 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
+ 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
+ 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
+ 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
+ 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
+ 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
+ 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
+ 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
+ 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
+ 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
+ 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
+ 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U
+};
+
+
+static const uint32_t Te2[256] =
+{
+ 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
+ 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
+ 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
+ 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
+ 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
+ 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
+ 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
+ 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
+ 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
+ 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
+ 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
+ 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
+ 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
+ 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
+ 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
+ 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
+ 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
+ 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
+ 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
+ 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
+ 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
+ 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
+ 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
+ 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
+ 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
+ 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
+ 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
+ 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
+ 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
+ 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
+ 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
+ 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
+ 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
+ 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
+ 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
+ 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
+ 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
+ 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
+ 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
+ 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
+ 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
+ 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
+ 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
+ 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
+ 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
+ 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
+ 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
+ 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
+ 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
+ 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
+ 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
+ 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
+ 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
+ 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
+ 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
+ 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
+ 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
+ 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
+ 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
+ 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
+ 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
+ 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
+ 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
+ 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U
+};
+
+
+static const uint32_t Te3[256] =
+{
+ 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
+ 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
+ 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
+ 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
+ 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
+ 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
+ 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
+ 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
+ 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
+ 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
+ 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
+ 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
+ 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
+ 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
+ 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
+ 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
+ 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
+ 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
+ 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
+ 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
+ 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
+ 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
+ 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
+ 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
+ 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
+ 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
+ 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
+ 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
+ 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
+ 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
+ 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
+ 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
+ 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
+ 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
+ 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
+ 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
+ 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
+ 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
+ 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
+ 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
+ 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
+ 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
+ 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
+ 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
+ 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
+ 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
+ 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
+ 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
+ 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
+ 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
+ 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
+ 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
+ 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
+ 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
+ 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
+ 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
+ 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
+ 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
+ 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
+ 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
+ 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
+ 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
+ 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
+ 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU
+};
+
+static const uint32_t Te4[256] =
+{
+ 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU,
+ 0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U,
+ 0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU,
+ 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U,
+ 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU,
+ 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U,
+ 0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU,
+ 0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U,
+ 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U,
+ 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU,
+ 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U,
+ 0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U,
+ 0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U,
+ 0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU,
+ 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U,
+ 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U,
+ 0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU,
+ 0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U,
+ 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U,
+ 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U,
+ 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU,
+ 0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU,
+ 0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U,
+ 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU,
+ 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU,
+ 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U,
+ 0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU,
+ 0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U,
+ 0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU,
+ 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U,
+ 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U,
+ 0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U,
+ 0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU,
+ 0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U,
+ 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU,
+ 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U,
+ 0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU,
+ 0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U,
+ 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U,
+ 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU,
+ 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU,
+ 0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU,
+ 0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U,
+ 0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U,
+ 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU,
+ 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U,
+ 0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU,
+ 0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U,
+ 0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU,
+ 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U,
+ 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU,
+ 0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU,
+ 0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U,
+ 0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU,
+ 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U,
+ 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU,
+ 0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U,
+ 0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U,
+ 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U,
+ 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU,
+ 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU,
+ 0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U,
+ 0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU,
+ 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U
+};
+
+/* Decrypt Sbox constants (for the substitute bytes operation) */
+
+static const uint32_t Td0[256] =
+{
+ 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
+ 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
+ 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
+ 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
+ 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
+ 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
+ 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
+ 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
+ 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
+ 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
+ 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
+ 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
+ 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
+ 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
+ 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
+ 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
+ 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
+ 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
+ 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
+ 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
+ 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
+ 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
+ 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
+ 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
+ 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
+ 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
+ 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
+ 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
+ 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
+ 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
+ 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
+ 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
+ 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
+ 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
+ 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
+ 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
+ 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
+ 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
+ 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
+ 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
+ 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
+ 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
+ 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
+ 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
+ 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
+ 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
+ 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
+ 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
+ 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
+ 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
+ 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
+ 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
+ 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
+ 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
+ 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
+ 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
+ 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
+ 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
+ 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
+ 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
+ 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
+ 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
+ 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
+ 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U
+};
+
+static const uint32_t Td1[256] =
+{
+ 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
+ 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
+ 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
+ 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
+ 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
+ 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
+ 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
+ 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
+ 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
+ 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
+ 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
+ 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
+ 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
+ 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
+ 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
+ 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
+ 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
+ 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
+ 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
+ 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
+ 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
+ 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
+ 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
+ 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
+ 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
+ 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
+ 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
+ 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
+ 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
+ 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
+ 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
+ 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
+ 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
+ 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
+ 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
+ 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
+ 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
+ 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
+ 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
+ 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
+ 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
+ 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
+ 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
+ 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
+ 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
+ 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
+ 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
+ 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
+ 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
+ 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
+ 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
+ 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
+ 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
+ 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
+ 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
+ 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
+ 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
+ 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
+ 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
+ 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
+ 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
+ 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
+ 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
+ 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U
+};
+
+static const uint32_t Td2[256] =
+{
+ 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
+ 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
+ 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
+ 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
+ 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
+ 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
+ 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
+ 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
+ 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
+ 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
+ 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
+ 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
+ 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
+ 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
+ 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
+ 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
+ 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
+ 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
+ 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
+ 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
+ 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
+ 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
+ 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
+ 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
+ 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
+ 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
+ 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
+ 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
+ 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
+ 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
+ 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
+ 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
+ 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
+ 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
+ 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
+ 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
+ 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
+ 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
+ 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
+ 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
+ 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
+ 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
+ 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
+ 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
+ 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
+ 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
+ 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
+ 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
+ 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
+ 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
+ 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
+ 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
+ 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
+ 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
+ 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
+ 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
+ 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
+ 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
+ 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
+ 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
+ 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
+ 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
+ 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
+ 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U
+};
+
+static const uint32_t Td3[256] =
+{
+ 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
+ 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
+ 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
+ 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
+ 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
+ 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
+ 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
+ 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
+ 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
+ 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
+ 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
+ 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
+ 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
+ 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
+ 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
+ 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
+ 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
+ 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
+ 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
+ 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
+ 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
+ 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
+ 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
+ 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
+ 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
+ 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
+ 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
+ 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
+ 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
+ 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
+ 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
+ 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
+ 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
+ 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
+ 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
+ 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
+ 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
+ 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
+ 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
+ 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
+ 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
+ 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
+ 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
+ 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
+ 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
+ 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
+ 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
+ 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
+ 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
+ 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
+ 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
+ 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
+ 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
+ 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
+ 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
+ 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
+ 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
+ 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
+ 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
+ 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
+ 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
+ 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
+ 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
+ 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U
+};
+
+static const uint32_t Td4[256] =
+{
+ 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U,
+ 0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U,
+ 0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU,
+ 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU,
+ 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U,
+ 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U,
+ 0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U,
+ 0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU,
+ 0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U,
+ 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU,
+ 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU,
+ 0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU,
+ 0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U,
+ 0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U,
+ 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U,
+ 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U,
+ 0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U,
+ 0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U,
+ 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU,
+ 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U,
+ 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U,
+ 0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU,
+ 0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U,
+ 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U,
+ 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U,
+ 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU,
+ 0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U,
+ 0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U,
+ 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU,
+ 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U,
+ 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U,
+ 0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU,
+ 0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U,
+ 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU,
+ 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU,
+ 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U,
+ 0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U,
+ 0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U,
+ 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U,
+ 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU,
+ 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U,
+ 0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U,
+ 0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU,
+ 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU,
+ 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU,
+ 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U,
+ 0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU,
+ 0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U,
+ 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U,
+ 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U,
+ 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U,
+ 0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU,
+ 0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U,
+ 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU,
+ 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU,
+ 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU,
+ 0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU,
+ 0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U,
+ 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU,
+ 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U,
+ 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU,
+ 0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U,
+ 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U,
+ 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU
+};
+
+/* Rcon is Round Constant; used for encryption key expansion */
+static const uint32_t rcon[RC_LENGTH] =
+{
+ /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000
+};
+
+
+/*
+ * Expand the cipher key into the encryption key schedule.
+ *
+ * Return the number of rounds for the given cipher key size.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk AES key schedule 32-bit array to be initialized
+ * cipherKey User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+static int
+rijndael_key_setup_enc_raw(uint32_t rk[], const uint32_t cipherKey[],
+ int keyBits)
+{
+ int i = 0;
+ uint32_t temp;
+
+ rk[0] = cipherKey[0];
+ rk[1] = cipherKey[1];
+ rk[2] = cipherKey[2];
+ rk[3] = cipherKey[3];
+
+ if (keyBits == 128) {
+ for (;;) {
+ temp = rk[3];
+ rk[4] = rk[0] ^
+ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
+ (Te4[temp & 0xff] & 0x0000ff00) ^
+ (Te4[temp >> 24] & 0x000000ff) ^
+ rcon[i];
+ rk[5] = rk[1] ^ rk[4];
+ rk[6] = rk[2] ^ rk[5];
+ rk[7] = rk[3] ^ rk[6];
+
+ if (++i == 10) {
+ return (10);
+ }
+ rk += 4;
+ }
+ }
+
+ rk[4] = cipherKey[4];
+ rk[5] = cipherKey[5];
+
+ if (keyBits == 192) {
+ for (;;) {
+ temp = rk[5];
+ rk[6] = rk[0] ^
+ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
+ (Te4[temp & 0xff] & 0x0000ff00) ^
+ (Te4[temp >> 24] & 0x000000ff) ^
+ rcon[i];
+ rk[7] = rk[1] ^ rk[6];
+ rk[8] = rk[2] ^ rk[7];
+ rk[9] = rk[3] ^ rk[8];
+
+ if (++i == 8) {
+ return (12);
+ }
+
+ rk[10] = rk[4] ^ rk[9];
+ rk[11] = rk[5] ^ rk[10];
+ rk += 6;
+ }
+ }
+
+ rk[6] = cipherKey[6];
+ rk[7] = cipherKey[7];
+
+ if (keyBits == 256) {
+ for (;;) {
+ temp = rk[7];
+ rk[8] = rk[0] ^
+ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
+ (Te4[temp & 0xff] & 0x0000ff00) ^
+ (Te4[temp >> 24] & 0x000000ff) ^
+ rcon[i];
+ rk[9] = rk[1] ^ rk[8];
+ rk[10] = rk[2] ^ rk[9];
+ rk[11] = rk[3] ^ rk[10];
+
+ if (++i == 7) {
+ return (14);
+ }
+ temp = rk[11];
+ rk[12] = rk[4] ^
+ (Te4[temp >> 24] & 0xff000000) ^
+ (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[temp & 0xff] & 0x000000ff);
+ rk[13] = rk[5] ^ rk[12];
+ rk[14] = rk[6] ^ rk[13];
+ rk[15] = rk[7] ^ rk[14];
+
+ rk += 8;
+ }
+ }
+
+ return (0);
+}
+#endif /* !__amd64 */
+
+#if defined(__amd64)
+
+/*
+ * Expand the 32-bit AES cipher key array into the encryption and decryption
+ * key schedules.
+ *
+ * Parameters:
+ * key AES key schedule to be initialized
+ * keyarr32 User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+static void
+aes_setupkeys(aes_key_t *key, const uint32_t *keyarr32, int keybits)
+{
+ if (intel_aes_instructions_present()) {
+ key->flags = INTEL_AES_NI_CAPABLE;
+ KPREEMPT_DISABLE;
+ key->nr = rijndael_key_setup_enc_intel(&(key->encr_ks.ks32[0]),
+ keyarr32, keybits);
+ key->nr = rijndael_key_setup_dec_intel(&(key->decr_ks.ks32[0]),
+ keyarr32, keybits);
+ KPREEMPT_ENABLE;
+ } else {
+ key->flags = 0;
+ key->nr = rijndael_key_setup_enc_amd64(&(key->encr_ks.ks32[0]),
+ keyarr32, keybits);
+ key->nr = rijndael_key_setup_dec_amd64(&(key->decr_ks.ks32[0]),
+ keyarr32, keybits);
+ }
+
+ key->type = AES_32BIT_KS;
+}
+
+/*
+ * Encrypt one block of data. The block is assumed to be an array
+ * of four uint32_t values, so copy for alignment (and byte-order
+ * reversal for little endian systems might be necessary on the
+ * input and output byte streams.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk Key schedule, of aes_ks_t (60 32-bit integers)
+ * Nr Number of rounds
+ * pt Input block (plain text)
+ * ct Output block (crypto text). Can overlap with pt
+ * flags Indicates whether we're on Intel AES-NI-capable hardware
+ */
+static void
+rijndael_encrypt(const uint32_t rk[], int Nr, const uint32_t pt[4],
+ uint32_t ct[4], int flags) {
+ if (flags & INTEL_AES_NI_CAPABLE) {
+ KPREEMPT_DISABLE;
+ aes_encrypt_intel(rk, Nr, pt, ct);
+ KPREEMPT_ENABLE;
+ } else {
+ aes_encrypt_amd64(rk, Nr, pt, ct);
+ }
+}
+
+/*
+ * Decrypt one block of data. The block is assumed to be an array
+ * of four uint32_t values, so copy for alignment (and byte-order
+ * reversal for little endian systems might be necessary on the
+ * input and output byte streams.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk Key schedule, of aes_ks_t (60 32-bit integers)
+ * Nr Number of rounds
+ * ct Input block (crypto text)
+ * pt Output block (plain text). Can overlap with pt
+ * flags Indicates whether we're on Intel AES-NI-capable hardware
+ */
+static void
+rijndael_decrypt(const uint32_t rk[], int Nr, const uint32_t ct[4],
+ uint32_t pt[4], int flags) {
+ if (flags & INTEL_AES_NI_CAPABLE) {
+ KPREEMPT_DISABLE;
+ aes_decrypt_intel(rk, Nr, ct, pt);
+ KPREEMPT_ENABLE;
+ } else {
+ aes_decrypt_amd64(rk, Nr, ct, pt);
+ }
+}
+
+
+#else /* generic C implementation */
+
+/*
+ * Expand the cipher key into the decryption key schedule.
+ * Return the number of rounds for the given cipher key size.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk AES key schedule 32-bit array to be initialized
+ * cipherKey User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+static int
+rijndael_key_setup_dec(uint32_t rk[], const uint32_t cipherKey[], int keyBits)
+{
+ int Nr, i, j;
+ uint32_t temp;
+
+ /* expand the cipher key: */
+ Nr = rijndael_key_setup_enc_raw(rk, cipherKey, keyBits);
+
+ /* invert the order of the round keys: */
+ for (i = 0, j = 4 * Nr; i < j; i += 4, j -= 4) {
+ temp = rk[i];
+ rk[i] = rk[j];
+ rk[j] = temp;
+ temp = rk[i + 1];
+ rk[i + 1] = rk[j + 1];
+ rk[j + 1] = temp;
+ temp = rk[i + 2];
+ rk[i + 2] = rk[j + 2];
+ rk[j + 2] = temp;
+ temp = rk[i + 3];
+ rk[i + 3] = rk[j + 3];
+ rk[j + 3] = temp;
+ }
+
+ /*
+ * apply the inverse MixColumn transform to all
+ * round keys but the first and the last:
+ */
+ for (i = 1; i < Nr; i++) {
+ rk += 4;
+ rk[0] = Td0[Te4[rk[0] >> 24] & 0xff] ^
+ Td1[Te4[(rk[0] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[0] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[rk[0] & 0xff] & 0xff];
+ rk[1] = Td0[Te4[rk[1] >> 24] & 0xff] ^
+ Td1[Te4[(rk[1] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[1] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[rk[1] & 0xff] & 0xff];
+ rk[2] = Td0[Te4[rk[2] >> 24] & 0xff] ^
+ Td1[Te4[(rk[2] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[2] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[rk[2] & 0xff] & 0xff];
+ rk[3] = Td0[Te4[rk[3] >> 24] & 0xff] ^
+ Td1[Te4[(rk[3] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[3] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[rk[3] & 0xff] & 0xff];
+ }
+
+ return (Nr);
+}
+
+
+/*
+ * Expand the 32-bit AES cipher key array into the encryption and decryption
+ * key schedules.
+ *
+ * Parameters:
+ * key AES key schedule to be initialized
+ * keyarr32 User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ */
+static void
+aes_setupkeys(aes_key_t *key, const uint32_t *keyarr32, int keybits)
+{
+ key->nr = rijndael_key_setup_enc(&(key->encr_ks.ks32[0]), keyarr32,
+ keybits);
+ key->nr = rijndael_key_setup_dec(&(key->decr_ks.ks32[0]), keyarr32,
+ keybits);
+ key->type = AES_32BIT_KS;
+}
+
+
+/*
+ * Encrypt one block of data. The block is assumed to be an array
+ * of four uint32_t values, so copy for alignment (and byte-order
+ * reversal for little endian systems might be necessary on the
+ * input and output byte streams.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk Key schedule, of aes_ks_t (60 32-bit integers)
+ * Nr Number of rounds
+ * pt Input block (plain text)
+ * ct Output block (crypto text). Can overlap with pt
+ */
+static void
+rijndael_encrypt(const uint32_t rk[], int Nr, const uint32_t pt[4],
+ uint32_t ct[4])
+{
+ uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
+ int r;
+
+ /*
+ * map byte array block to cipher state
+ * and add initial round key:
+ */
+
+ s0 = pt[0] ^ rk[0];
+ s1 = pt[1] ^ rk[1];
+ s2 = pt[2] ^ rk[2];
+ s3 = pt[3] ^ rk[3];
+
+ /*
+ * Nr - 1 full rounds:
+ */
+
+ r = Nr >> 1;
+
+ for (;;) {
+ t0 = Te0[s0 >> 24] ^
+ Te1[(s1 >> 16) & 0xff] ^
+ Te2[(s2 >> 8) & 0xff] ^
+ Te3[s3 & 0xff] ^
+ rk[4];
+
+ t1 = Te0[s1 >> 24] ^
+ Te1[(s2 >> 16) & 0xff] ^
+ Te2[(s3 >> 8) & 0xff] ^
+ Te3[s0 & 0xff] ^
+ rk[5];
+
+ t2 = Te0[s2 >> 24] ^
+ Te1[(s3 >> 16) & 0xff] ^
+ Te2[(s0 >> 8) & 0xff] ^
+ Te3[s1 & 0xff] ^
+ rk[6];
+
+ t3 = Te0[s3 >> 24] ^
+ Te1[(s0 >> 16) & 0xff] ^
+ Te2[(s1 >> 8) & 0xff] ^
+ Te3[s2 & 0xff] ^
+ rk[7];
+
+ rk += 8;
+
+ if (--r == 0) {
+ break;
+ }
+
+ s0 = Te0[t0 >> 24] ^
+ Te1[(t1 >> 16) & 0xff] ^
+ Te2[(t2 >> 8) & 0xff] ^
+ Te3[t3 & 0xff] ^
+ rk[0];
+
+ s1 = Te0[t1 >> 24] ^
+ Te1[(t2 >> 16) & 0xff] ^
+ Te2[(t3 >> 8) & 0xff] ^
+ Te3[t0 & 0xff] ^
+ rk[1];
+
+ s2 = Te0[t2 >> 24] ^
+ Te1[(t3 >> 16) & 0xff] ^
+ Te2[(t0 >> 8) & 0xff] ^
+ Te3[t1 & 0xff] ^
+ rk[2];
+
+ s3 = Te0[t3 >> 24] ^
+ Te1[(t0 >> 16) & 0xff] ^
+ Te2[(t1 >> 8) & 0xff] ^
+ Te3[t2 & 0xff] ^
+ rk[3];
+ }
+
+ /*
+ * apply last round and
+ * map cipher state to byte array block:
+ */
+
+ s0 = (Te4[(t0 >> 24)] & 0xff000000) ^
+ (Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[t3 & 0xff] & 0x000000ff) ^
+ rk[0];
+ ct[0] = s0;
+
+ s1 = (Te4[(t1 >> 24)] & 0xff000000) ^
+ (Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[t0 & 0xff] & 0x000000ff) ^
+ rk[1];
+ ct[1] = s1;
+
+ s2 = (Te4[(t2 >> 24)] & 0xff000000) ^
+ (Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[t1 & 0xff] & 0x000000ff) ^
+ rk[2];
+ ct[2] = s2;
+
+ s3 = (Te4[(t3 >> 24)] & 0xff000000) ^
+ (Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[t2 & 0xff] & 0x000000ff) ^
+ rk[3];
+ ct[3] = s3;
+}
+
+
+/*
+ * Decrypt one block of data. The block is assumed to be an array
+ * of four uint32_t values, so copy for alignment (and byte-order
+ * reversal for little endian systems might be necessary on the
+ * input and output byte streams.
+ * The size of the key schedule depends on the number of rounds
+ * (which can be computed from the size of the key), i.e. 4*(Nr + 1).
+ *
+ * Parameters:
+ * rk Key schedule, of aes_ks_t (60 32-bit integers)
+ * Nr Number of rounds
+ * ct Input block (crypto text)
+ * pt Output block (plain text). Can overlap with pt
+ */
+static void
+rijndael_decrypt(const uint32_t rk[], int Nr, const uint32_t ct[4],
+ uint32_t pt[4])
+{
+ uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
+ int r;
+
+ /*
+ * map byte array block to cipher state
+ * and add initial round key:
+ */
+ s0 = ct[0] ^ rk[0];
+ s1 = ct[1] ^ rk[1];
+ s2 = ct[2] ^ rk[2];
+ s3 = ct[3] ^ rk[3];
+
+ /*
+ * Nr - 1 full rounds:
+ */
+
+ r = Nr >> 1;
+
+ for (;;) {
+ t0 = Td0[s0 >> 24] ^
+ Td1[(s3 >> 16) & 0xff] ^
+ Td2[(s2 >> 8) & 0xff] ^
+ Td3[s1 & 0xff] ^
+ rk[4];
+
+ t1 = Td0[s1 >> 24] ^
+ Td1[(s0 >> 16) & 0xff] ^
+ Td2[(s3 >> 8) & 0xff] ^
+ Td3[s2 & 0xff] ^
+ rk[5];
+
+ t2 = Td0[s2 >> 24] ^
+ Td1[(s1 >> 16) & 0xff] ^
+ Td2[(s0 >> 8) & 0xff] ^
+ Td3[s3 & 0xff] ^
+ rk[6];
+
+ t3 = Td0[s3 >> 24] ^
+ Td1[(s2 >> 16) & 0xff] ^
+ Td2[(s1 >> 8) & 0xff] ^
+ Td3[s0 & 0xff] ^
+ rk[7];
+
+ rk += 8;
+
+ if (--r == 0) {
+ break;
+ }
+
+ s0 = Td0[t0 >> 24] ^
+ Td1[(t3 >> 16) & 0xff] ^
+ Td2[(t2 >> 8) & 0xff] ^
+ Td3[t1 & 0xff] ^
+ rk[0];
+
+ s1 = Td0[t1 >> 24] ^
+ Td1[(t0 >> 16) & 0xff] ^
+ Td2[(t3 >> 8) & 0xff] ^
+ Td3[t2 & 0xff] ^
+ rk[1];
+
+ s2 = Td0[t2 >> 24] ^
+ Td1[(t1 >> 16) & 0xff] ^
+ Td2[(t0 >> 8) & 0xff] ^
+ Td3[t3 & 0xff] ^
+ rk[2];
+
+ s3 = Td0[t3 >> 24] ^
+ Td1[(t2 >> 16) & 0xff] ^
+ Td2[(t1 >> 8) & 0xff] ^
+ Td3[t0 & 0xff] ^
+ rk[3];
+ }
+
+ /*
+ * apply last round and
+ * map cipher state to byte array block:
+ */
+
+ s0 = (Td4[t0 >> 24] & 0xff000000) ^
+ (Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[t1 & 0xff] & 0x000000ff) ^
+ rk[0];
+ pt[0] = s0;
+
+ s1 = (Td4[t1 >> 24] & 0xff000000) ^
+ (Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[t2 & 0xff] & 0x000000ff) ^
+ rk[1];
+ pt[1] = s1;
+
+ s2 = (Td4[t2 >> 24] & 0xff000000) ^
+ (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[t3 & 0xff] & 0x000000ff) ^
+ rk[2];
+ pt[2] = s2;
+
+ s3 = (Td4[t3 >> 24] & 0xff000000) ^
+ (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[t0 & 0xff] & 0x000000ff) ^
+ rk[3];
+ pt[3] = s3;
+}
+#endif /* __amd64 */
+
+
+/*
+ * Initialize AES encryption and decryption key schedules.
+ *
+ * Parameters:
+ * cipherKey User key
+ * keyBits AES key size (128, 192, or 256 bits)
+ * keysched AES key schedule to be initialized, of type aes_key_t.
+ * Allocated by aes_alloc_keysched().
+ */
+void
+aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
+{
+ aes_key_t *newbie = keysched;
+ uint_t keysize, i, j;
+ union {
+ uint64_t ka64[4];
+ uint32_t ka32[8];
+ } keyarr;
+
+ switch (keyBits) {
+ case 128:
+ newbie->nr = 10;
+ break;
+
+ case 192:
+ newbie->nr = 12;
+ break;
+
+ case 256:
+ newbie->nr = 14;
+ break;
+
+ default:
+ /* should never get here */
+ return;
+ }
+ keysize = CRYPTO_BITS2BYTES(keyBits);
+
+ /*
+ * For _LITTLE_ENDIAN machines (except AMD64), reverse every
+ * 4 bytes in the key. On _BIG_ENDIAN and AMD64, copy the key
+ * without reversing bytes.
+ * For AMD64, do not byte swap for aes_setupkeys().
+ *
+ * SPARCv8/v9 uses a key schedule array with 64-bit elements.
+ * X86/AMD64 uses a key schedule array with 32-bit elements.
+ */
+#ifndef AES_BYTE_SWAP
+ if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) {
+ for (i = 0, j = 0; j < keysize; i++, j += 8) {
+ /* LINTED: pointer alignment */
+ keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
+ }
+ } else {
+ bcopy(cipherKey, keyarr.ka32, keysize);
+ }
+
+#else /* byte swap */
+ for (i = 0, j = 0; j < keysize; i++, j += 4) {
+ keyarr.ka32[i] = htonl(*(uint32_t *)(void *)&cipherKey[j]);
+ }
+#endif
+
+ aes_setupkeys(newbie, keyarr.ka32, keyBits);
+}
+
+
+/*
+ * Encrypt one block using AES.
+ * Align if needed and (for x86 32-bit only) byte-swap.
+ *
+ * Parameters:
+ * ks Key schedule, of type aes_key_t
+ * pt Input block (plain text)
+ * ct Output block (crypto text). Can overlap with pt
+ */
+int
+aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
+{
+ aes_key_t *ksch = (aes_key_t *)ks;
+
+#ifndef AES_BYTE_SWAP
+ if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t))) {
+ /* LINTED: pointer alignment */
+ AES_ENCRYPT_IMPL(&ksch->encr_ks.ks32[0], ksch->nr,
+ /* LINTED: pointer alignment */
+ (uint32_t *)pt, (uint32_t *)ct, ksch->flags);
+ } else {
+#endif
+ uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
+
+ /* Copy input block into buffer */
+#ifndef AES_BYTE_SWAP
+ bcopy(pt, &buffer, AES_BLOCK_LEN);
+
+#else /* byte swap */
+ buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]);
+ buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]);
+ buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
+ buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
+#endif
+
+ AES_ENCRYPT_IMPL(&ksch->encr_ks.ks32[0], ksch->nr,
+ buffer, buffer, ksch->flags);
+
+ /* Copy result from buffer to output block */
+#ifndef AES_BYTE_SWAP
+ bcopy(&buffer, ct, AES_BLOCK_LEN);
+ }
+
+#else /* byte swap */
+ *(uint32_t *)(void *)&ct[0] = htonl(buffer[0]);
+ *(uint32_t *)(void *)&ct[4] = htonl(buffer[1]);
+ *(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
+ *(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
+#endif
+ return (CRYPTO_SUCCESS);
+}
+
+
+/*
+ * Decrypt one block using AES.
+ * Align and byte-swap if needed.
+ *
+ * Parameters:
+ * ks Key schedule, of type aes_key_t
+ * ct Input block (crypto text)
+ * pt Output block (plain text). Can overlap with pt
+ */
+int
+aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
+{
+ aes_key_t *ksch = (aes_key_t *)ks;
+
+#ifndef AES_BYTE_SWAP
+ if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t))) {
+ /* LINTED: pointer alignment */
+ AES_DECRYPT_IMPL(&ksch->decr_ks.ks32[0], ksch->nr,
+ /* LINTED: pointer alignment */
+ (uint32_t *)ct, (uint32_t *)pt, ksch->flags);
+ } else {
+#endif
+ uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
+
+ /* Copy input block into buffer */
+#ifndef AES_BYTE_SWAP
+ bcopy(ct, &buffer, AES_BLOCK_LEN);
+
+#else /* byte swap */
+ buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]);
+ buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]);
+ buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
+ buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
+#endif
+
+ AES_DECRYPT_IMPL(&ksch->decr_ks.ks32[0], ksch->nr,
+ buffer, buffer, ksch->flags);
+
+ /* Copy result from buffer to output block */
+#ifndef AES_BYTE_SWAP
+ bcopy(&buffer, pt, AES_BLOCK_LEN);
+ }
+
+#else /* byte swap */
+ *(uint32_t *)(void *)&pt[0] = htonl(buffer[0]);
+ *(uint32_t *)(void *)&pt[4] = htonl(buffer[1]);
+ *(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
+ *(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
+#endif
+
+ return (CRYPTO_SUCCESS);
+}
+
+
+/*
+ * Allocate key schedule for AES.
+ *
+ * Return the pointer and set size to the number of bytes allocated.
+ * Memory allocated must be freed by the caller when done.
+ *
+ * Parameters:
+ * size Size of key schedule allocated, in bytes
+ * kmflag Flag passed to kmem_alloc(9F); ignored in userland.
+ */
+/* ARGSUSED */
+void *
+aes_alloc_keysched(size_t *size, int kmflag)
+{
+ aes_key_t *keysched;
+
+ keysched = (aes_key_t *)kmem_alloc(sizeof (aes_key_t), kmflag);
+ if (keysched != NULL) {
+ *size = sizeof (aes_key_t);
+ return (keysched);
+ }
+ return (NULL);
+}
+
+
+#ifdef __amd64
+
+#define INTEL_AESNI_FLAG (1 << 25)
+
+/*
+ * Return 1 if executing on Intel with AES-NI instructions,
+ * otherwise 0 (i.e., Intel without AES-NI or AMD64).
+ * Cache the result, as the CPU can't change.
+ */
+static int
+intel_aes_instructions_present(void)
+{
+ static int cached_result = -1;
+ unsigned eax, ebx, ecx, edx;
+ unsigned func, subfunc;
+
+ if (cached_result == -1) { /* first time */
+ /* check for an intel cpu */
+ func = 0;
+ subfunc = 0;
+
+ __asm__ __volatile__(
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
+
+ if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
+ memcmp((char *) (&edx), "ineI", 4) == 0 &&
+ memcmp((char *) (&ecx), "ntel", 4) == 0) {
+
+ func = 1;
+ subfunc = 0;
+
+ /* check for aes-ni instruction set */
+ __asm__ __volatile__(
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
+
+ cached_result = !!(ecx & INTEL_AESNI_FLAG);
+ } else {
+ cached_result = 0;
+ }
+ }
+
+ return (cached_result);
+}
+
+#endif /* __amd64 */
diff --git a/module/icp/algs/aes/aes_modes.c b/module/icp/algs/aes/aes_modes.c
new file mode 100644
index 000000000..9e4b498ff
--- /dev/null
+++ b/module/icp/algs/aes/aes_modes.c
@@ -0,0 +1,135 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <aes/aes_impl.h>
+
+/* Copy a 16-byte AES block from "in" to "out" */
+void
+aes_copy_block(uint8_t *in, uint8_t *out)
+{
+ if (IS_P2ALIGNED2(in, out, sizeof (uint32_t))) {
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&out[0] = *(uint32_t *)&in[0];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&out[4] = *(uint32_t *)&in[4];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&out[8] = *(uint32_t *)&in[8];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&out[12] = *(uint32_t *)&in[12];
+ } else {
+ AES_COPY_BLOCK(in, out);
+ }
+}
+
+
+/* XOR a 16-byte AES block of data into dst */
+void
+aes_xor_block(uint8_t *data, uint8_t *dst)
+{
+ if (IS_P2ALIGNED2(dst, data, sizeof (uint32_t))) {
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&dst[0] ^= *(uint32_t *)&data[0];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&dst[4] ^= *(uint32_t *)&data[4];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&dst[8] ^= *(uint32_t *)&data[8];
+ /* LINTED: pointer alignment */
+ *(uint32_t *)&dst[12] ^= *(uint32_t *)&data[12];
+ } else {
+ AES_XOR_BLOCK(data, dst);
+ }
+}
+
+
+/*
+ * Encrypt multiple blocks of data according to mode.
+ */
+int
+aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length,
+ crypto_data_t *out)
+{
+ aes_ctx_t *aes_ctx = ctx;
+ int rv;
+
+ if (aes_ctx->ac_flags & CTR_MODE) {
+ rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ } else if (aes_ctx->ac_flags & CCM_MODE) {
+ rv = ccm_mode_encrypt_contiguous_blocks(ctx, data, length,
+ out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ rv = gcm_mode_encrypt_contiguous_blocks(ctx, data, length,
+ out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else if (aes_ctx->ac_flags & CBC_MODE) {
+ rv = cbc_encrypt_contiguous_blocks(ctx,
+ data, length, out, AES_BLOCK_LEN, aes_encrypt_block,
+ aes_copy_block, aes_xor_block);
+ } else {
+ rv = ecb_cipher_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_encrypt_block);
+ }
+ return (rv);
+}
+
+
+/*
+ * Decrypt multiple blocks of data according to mode.
+ */
+int
+aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length,
+ crypto_data_t *out)
+{
+ aes_ctx_t *aes_ctx = ctx;
+ int rv;
+
+ if (aes_ctx->ac_flags & CTR_MODE) {
+ rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ if (rv == CRYPTO_DATA_LEN_RANGE)
+ rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ } else if (aes_ctx->ac_flags & CCM_MODE) {
+ rv = ccm_mode_decrypt_contiguous_blocks(ctx, data, length,
+ out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
+ rv = gcm_mode_decrypt_contiguous_blocks(ctx, data, length,
+ out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else if (aes_ctx->ac_flags & CBC_MODE) {
+ rv = cbc_decrypt_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_decrypt_block, aes_copy_block,
+ aes_xor_block);
+ } else {
+ rv = ecb_cipher_contiguous_blocks(ctx, data, length, out,
+ AES_BLOCK_LEN, aes_decrypt_block);
+ if (rv == CRYPTO_DATA_LEN_RANGE)
+ rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ }
+ return (rv);
+}
diff --git a/module/icp/algs/modes/cbc.c b/module/icp/algs/modes/cbc.c
new file mode 100644
index 000000000..2cc94ec72
--- /dev/null
+++ b/module/icp/algs/modes/cbc.c
@@ -0,0 +1,305 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+/*
+ * Algorithm independent CBC functions.
+ */
+int
+cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+
+ if (length + ctx->cbc_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
+ length);
+ ctx->cbc_remainder_len += length;
+ ctx->cbc_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->cbc_iv;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->cbc_remainder_len > 0) {
+ need = block_size - ctx->cbc_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
+ [ctx->cbc_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->cbc_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ if (out == NULL) {
+ /*
+ * XOR the previous cipher block or IV with the
+ * current clear block.
+ */
+ xor_block(lastp, blockp);
+ encrypt(ctx->cbc_keysched, blockp, blockp);
+
+ ctx->cbc_lastp = blockp;
+ lastp = blockp;
+
+ if (ctx->cbc_remainder_len > 0) {
+ bcopy(blockp, ctx->cbc_copy_to,
+ ctx->cbc_remainder_len);
+ bcopy(blockp + ctx->cbc_remainder_len, datap,
+ need);
+ }
+ } else {
+ /*
+ * XOR the previous cipher block or IV with the
+ * current clear block.
+ */
+ xor_block(blockp, lastp);
+ encrypt(ctx->cbc_keysched, lastp, lastp);
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ if (out_data_1_len == block_size) {
+ copy_block(lastp, out_data_1);
+ } else {
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len,
+ out_data_2,
+ block_size - out_data_1_len);
+ }
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->cbc_remainder_len != 0) {
+ datap += need;
+ ctx->cbc_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->cbc_remainder, remainder);
+ ctx->cbc_remainder_len = remainder;
+ ctx->cbc_copy_to = datap;
+ goto out;
+ }
+ ctx->cbc_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ /*
+ * Save the last encrypted block in the context.
+ */
+ if (ctx->cbc_lastp != NULL) {
+ copy_block((uint8_t *)ctx->cbc_lastp, (uint8_t *)ctx->cbc_iv);
+ ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv;
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+#define OTHER(a, ctx) \
+ (((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock)
+
+/* ARGSUSED */
+int
+cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*decrypt)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+
+ if (length + ctx->cbc_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
+ length);
+ ctx->cbc_remainder_len += length;
+ ctx->cbc_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = ctx->cbc_lastp;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->cbc_remainder_len > 0) {
+ need = block_size - ctx->cbc_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
+ [ctx->cbc_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->cbc_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /* LINTED: pointer alignment */
+ copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx));
+
+ if (out != NULL) {
+ decrypt(ctx->cbc_keysched, blockp,
+ (uint8_t *)ctx->cbc_remainder);
+ blockp = (uint8_t *)ctx->cbc_remainder;
+ } else {
+ decrypt(ctx->cbc_keysched, blockp, blockp);
+ }
+
+ /*
+ * XOR the previous cipher block or IV with the
+ * currently decrypted block.
+ */
+ xor_block(lastp, blockp);
+
+ /* LINTED: pointer alignment */
+ lastp = (uint8_t *)OTHER((uint64_t *)lastp, ctx);
+
+ if (out != NULL) {
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ bcopy(blockp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(blockp + out_data_1_len, out_data_2,
+ block_size - out_data_1_len);
+ }
+
+ /* update offset */
+ out->cd_offset += block_size;
+
+ } else if (ctx->cbc_remainder_len > 0) {
+ /* copy temporary block to where it belongs */
+ bcopy(blockp, ctx->cbc_copy_to, ctx->cbc_remainder_len);
+ bcopy(blockp + ctx->cbc_remainder_len, datap, need);
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->cbc_remainder_len != 0) {
+ datap += need;
+ ctx->cbc_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->cbc_remainder, remainder);
+ ctx->cbc_remainder_len = remainder;
+ ctx->cbc_lastp = lastp;
+ ctx->cbc_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+ ctx->cbc_copy_to = NULL;
+
+ } while (remainder > 0);
+
+ ctx->cbc_lastp = lastp;
+ return (CRYPTO_SUCCESS);
+}
+
+int
+cbc_init_ctx(cbc_ctx_t *cbc_ctx, char *param, size_t param_len,
+ size_t block_size, void (*copy_block)(uint8_t *, uint64_t *))
+{
+ /*
+ * Copy IV into context.
+ *
+ * If cm_param == NULL then the IV comes from the
+ * cd_miscdata field in the crypto_data structure.
+ */
+ if (param != NULL) {
+ ASSERT(param_len == block_size);
+ copy_block((uchar_t *)param, cbc_ctx->cbc_iv);
+ }
+
+ cbc_ctx->cbc_lastp = (uint8_t *)&cbc_ctx->cbc_iv[0];
+ cbc_ctx->cbc_flags |= CBC_MODE;
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+void *
+cbc_alloc_ctx(int kmflag)
+{
+ cbc_ctx_t *cbc_ctx;
+
+ if ((cbc_ctx = kmem_zalloc(sizeof (cbc_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ cbc_ctx->cbc_flags = CBC_MODE;
+ return (cbc_ctx);
+}
diff --git a/module/icp/algs/modes/ccm.c b/module/icp/algs/modes/ccm.c
new file mode 100644
index 000000000..22aeb0a6a
--- /dev/null
+++ b/module/icp/algs/modes/ccm.c
@@ -0,0 +1,920 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+#if defined(__i386) || defined(__amd64)
+#include <sys/byteorder.h>
+#define UNALIGNED_POINTERS_PERMITTED
+#endif
+
+/*
+ * Encrypt multiple blocks of data in CCM mode. Decrypt for CCM mode
+ * is done in another function.
+ */
+int
+ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ uint64_t counter;
+ uint8_t *mac_buf;
+
+ if (length + ctx->ccm_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
+ length);
+ ctx->ccm_remainder_len += length;
+ ctx->ccm_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->ccm_cb;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ mac_buf = (uint8_t *)ctx->ccm_mac_buf;
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->ccm_remainder_len > 0) {
+ need = block_size - ctx->ccm_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
+ [ctx->ccm_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->ccm_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /*
+ * do CBC MAC
+ *
+ * XOR the previous cipher block current clear block.
+ * mac_buf always contain previous cipher block.
+ */
+ xor_block(blockp, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+
+ /* ccm_cb is the counter block */
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb,
+ (uint8_t *)ctx->ccm_tmp);
+
+ lastp = (uint8_t *)ctx->ccm_tmp;
+
+ /*
+ * Increment counter. Counter bits are confined
+ * to the bottom 64 bits of the counter block.
+ */
+#ifdef _LITTLE_ENDIAN
+ counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
+ counter = htonll(counter + 1);
+#else
+ counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
+ counter++;
+#endif /* _LITTLE_ENDIAN */
+ counter &= ctx->ccm_counter_mask;
+ ctx->ccm_cb[1] =
+ (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
+
+ /*
+ * XOR encrypted counter block with the current clear block.
+ */
+ xor_block(blockp, lastp);
+
+ ctx->ccm_processed_data_len += block_size;
+
+ if (out == NULL) {
+ if (ctx->ccm_remainder_len > 0) {
+ bcopy(blockp, ctx->ccm_copy_to,
+ ctx->ccm_remainder_len);
+ bcopy(blockp + ctx->ccm_remainder_len, datap,
+ need);
+ }
+ } else {
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ if (out_data_1_len == block_size) {
+ copy_block(lastp, out_data_1);
+ } else {
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len,
+ out_data_2,
+ block_size - out_data_1_len);
+ }
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->ccm_remainder_len != 0) {
+ datap += need;
+ ctx->ccm_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->ccm_remainder, remainder);
+ ctx->ccm_remainder_len = remainder;
+ ctx->ccm_copy_to = datap;
+ goto out;
+ }
+ ctx->ccm_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+void
+calculate_ccm_mac(ccm_ctx_t *ctx, uint8_t *ccm_mac,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
+{
+ uint64_t counter;
+ uint8_t *counterp, *mac_buf;
+ int i;
+
+ mac_buf = (uint8_t *)ctx->ccm_mac_buf;
+
+ /* first counter block start with index 0 */
+ counter = 0;
+ ctx->ccm_cb[1] = (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
+
+ counterp = (uint8_t *)ctx->ccm_tmp;
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
+
+ /* calculate XOR of MAC with first counter block */
+ for (i = 0; i < ctx->ccm_mac_len; i++) {
+ ccm_mac[i] = mac_buf[i] ^ counterp[i];
+ }
+}
+
+/* ARGSUSED */
+int
+ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *lastp, *mac_buf, *ccm_mac_p, *macp = NULL;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ int i;
+
+ if (out->cd_length < (ctx->ccm_remainder_len + ctx->ccm_mac_len)) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * When we get here, the number of bytes of payload processed
+ * plus whatever data remains, if any,
+ * should be the same as the number of bytes that's being
+ * passed in the argument during init time.
+ */
+ if ((ctx->ccm_processed_data_len + ctx->ccm_remainder_len)
+ != (ctx->ccm_data_len)) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ mac_buf = (uint8_t *)ctx->ccm_mac_buf;
+
+ if (ctx->ccm_remainder_len > 0) {
+
+ /* ccm_mac_input_buf is not used for encryption */
+ macp = (uint8_t *)ctx->ccm_mac_input_buf;
+ bzero(macp, block_size);
+
+ /* copy remainder to temporary buffer */
+ bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len);
+
+ /* calculate the CBC MAC */
+ xor_block(macp, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+
+ /* calculate the counter mode */
+ lastp = (uint8_t *)ctx->ccm_tmp;
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, lastp);
+
+ /* XOR with counter block */
+ for (i = 0; i < ctx->ccm_remainder_len; i++) {
+ macp[i] ^= lastp[i];
+ }
+ ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
+ }
+
+ /* Calculate the CCM MAC */
+ ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
+ calculate_ccm_mac(ctx, ccm_mac_p, encrypt_block);
+
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2,
+ ctx->ccm_remainder_len + ctx->ccm_mac_len);
+
+ if (ctx->ccm_remainder_len > 0) {
+
+ /* copy temporary block to where it belongs */
+ if (out_data_2 == NULL) {
+ /* everything will fit in out_data_1 */
+ bcopy(macp, out_data_1, ctx->ccm_remainder_len);
+ bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len,
+ ctx->ccm_mac_len);
+ } else {
+
+ if (out_data_1_len < ctx->ccm_remainder_len) {
+
+ size_t data_2_len_used;
+
+ bcopy(macp, out_data_1, out_data_1_len);
+
+ data_2_len_used = ctx->ccm_remainder_len
+ - out_data_1_len;
+
+ bcopy((uint8_t *)macp + out_data_1_len,
+ out_data_2, data_2_len_used);
+ bcopy(ccm_mac_p, out_data_2 + data_2_len_used,
+ ctx->ccm_mac_len);
+ } else {
+ bcopy(macp, out_data_1, out_data_1_len);
+ if (out_data_1_len == ctx->ccm_remainder_len) {
+ /* mac will be in out_data_2 */
+ bcopy(ccm_mac_p, out_data_2,
+ ctx->ccm_mac_len);
+ } else {
+ size_t len_not_used = out_data_1_len -
+ ctx->ccm_remainder_len;
+ /*
+ * part of mac in will be in
+ * out_data_1, part of the mac will be
+ * in out_data_2
+ */
+ bcopy(ccm_mac_p,
+ out_data_1 + ctx->ccm_remainder_len,
+ len_not_used);
+ bcopy(ccm_mac_p + len_not_used,
+ out_data_2,
+ ctx->ccm_mac_len - len_not_used);
+
+ }
+ }
+ }
+ } else {
+ /* copy block to where it belongs */
+ bcopy(ccm_mac_p, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(ccm_mac_p + out_data_1_len, out_data_2,
+ block_size - out_data_1_len);
+ }
+ }
+ out->cd_offset += ctx->ccm_remainder_len + ctx->ccm_mac_len;
+ ctx->ccm_remainder_len = 0;
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * This will only deal with decrypting the last block of the input that
+ * might not be a multiple of block length.
+ */
+void
+ccm_decrypt_incomplete_block(ccm_ctx_t *ctx,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
+{
+ uint8_t *datap, *outp, *counterp;
+ int i;
+
+ datap = (uint8_t *)ctx->ccm_remainder;
+ outp = &((ctx->ccm_pt_buf)[ctx->ccm_processed_data_len]);
+
+ counterp = (uint8_t *)ctx->ccm_tmp;
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
+
+ /* XOR with counter block */
+ for (i = 0; i < ctx->ccm_remainder_len; i++) {
+ outp[i] = datap[i] ^ counterp[i];
+ }
+}
+
+/*
+ * This will decrypt the cipher text. However, the plaintext won't be
+ * returned to the caller. It will be returned when decrypt_final() is
+ * called if the MAC matches
+ */
+/* ARGSUSED */
+int
+ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *cbp;
+ uint64_t counter;
+ size_t pt_len, total_decrypted_len, mac_len, pm_len, pd_len;
+ uint8_t *resultp;
+
+
+ pm_len = ctx->ccm_processed_mac_len;
+
+ if (pm_len > 0) {
+ uint8_t *tmp;
+ /*
+ * all ciphertext has been processed, just waiting for
+ * part of the value of the mac
+ */
+ if ((pm_len + length) > ctx->ccm_mac_len) {
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ }
+ tmp = (uint8_t *)ctx->ccm_mac_input_buf;
+
+ bcopy(datap, tmp + pm_len, length);
+
+ ctx->ccm_processed_mac_len += length;
+ return (CRYPTO_SUCCESS);
+ }
+
+ /*
+ * If we decrypt the given data, what total amount of data would
+ * have been decrypted?
+ */
+ pd_len = ctx->ccm_processed_data_len;
+ total_decrypted_len = pd_len + length + ctx->ccm_remainder_len;
+
+ if (total_decrypted_len >
+ (ctx->ccm_data_len + ctx->ccm_mac_len)) {
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ }
+
+ pt_len = ctx->ccm_data_len;
+
+ if (total_decrypted_len > pt_len) {
+ /*
+ * part of the input will be the MAC, need to isolate that
+ * to be dealt with later. The left-over data in
+ * ccm_remainder_len from last time will not be part of the
+ * MAC. Otherwise, it would have already been taken out
+ * when this call is made last time.
+ */
+ size_t pt_part = pt_len - pd_len - ctx->ccm_remainder_len;
+
+ mac_len = length - pt_part;
+
+ ctx->ccm_processed_mac_len = mac_len;
+ bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len);
+
+ if (pt_part + ctx->ccm_remainder_len < block_size) {
+ /*
+ * since this is last of the ciphertext, will
+ * just decrypt with it here
+ */
+ bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
+ [ctx->ccm_remainder_len], pt_part);
+ ctx->ccm_remainder_len += pt_part;
+ ccm_decrypt_incomplete_block(ctx, encrypt_block);
+ ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
+ ctx->ccm_remainder_len = 0;
+ return (CRYPTO_SUCCESS);
+ } else {
+ /* let rest of the code handle this */
+ length = pt_part;
+ }
+ } else if (length + ctx->ccm_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
+ length);
+ ctx->ccm_remainder_len += length;
+ ctx->ccm_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->ccm_remainder_len > 0) {
+ need = block_size - ctx->ccm_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
+ [ctx->ccm_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->ccm_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /* Calculate the counter mode, ccm_cb is the counter block */
+ cbp = (uint8_t *)ctx->ccm_tmp;
+ encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, cbp);
+
+ /*
+ * Increment counter.
+ * Counter bits are confined to the bottom 64 bits
+ */
+#ifdef _LITTLE_ENDIAN
+ counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
+ counter = htonll(counter + 1);
+#else
+ counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
+ counter++;
+#endif /* _LITTLE_ENDIAN */
+ counter &= ctx->ccm_counter_mask;
+ ctx->ccm_cb[1] =
+ (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
+
+ /* XOR with the ciphertext */
+ xor_block(blockp, cbp);
+
+ /* Copy the plaintext to the "holding buffer" */
+ resultp = (uint8_t *)ctx->ccm_pt_buf +
+ ctx->ccm_processed_data_len;
+ copy_block(cbp, resultp);
+
+ ctx->ccm_processed_data_len += block_size;
+
+ ctx->ccm_lastp = blockp;
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->ccm_remainder_len != 0) {
+ datap += need;
+ ctx->ccm_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->ccm_remainder, remainder);
+ ctx->ccm_remainder_len = remainder;
+ ctx->ccm_copy_to = datap;
+ if (ctx->ccm_processed_mac_len > 0) {
+ /*
+ * not expecting anymore ciphertext, just
+ * compute plaintext for the remaining input
+ */
+ ccm_decrypt_incomplete_block(ctx,
+ encrypt_block);
+ ctx->ccm_processed_data_len += remainder;
+ ctx->ccm_remainder_len = 0;
+ }
+ goto out;
+ }
+ ctx->ccm_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t mac_remain, pt_len;
+ uint8_t *pt, *mac_buf, *macp, *ccm_mac_p;
+ int rv;
+
+ pt_len = ctx->ccm_data_len;
+
+ /* Make sure output buffer can fit all of the plaintext */
+ if (out->cd_length < pt_len) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ pt = ctx->ccm_pt_buf;
+ mac_remain = ctx->ccm_processed_data_len;
+ mac_buf = (uint8_t *)ctx->ccm_mac_buf;
+
+ macp = (uint8_t *)ctx->ccm_tmp;
+
+ while (mac_remain > 0) {
+
+ if (mac_remain < block_size) {
+ bzero(macp, block_size);
+ bcopy(pt, macp, mac_remain);
+ mac_remain = 0;
+ } else {
+ copy_block(pt, macp);
+ mac_remain -= block_size;
+ pt += block_size;
+ }
+
+ /* calculate the CBC MAC */
+ xor_block(macp, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+ }
+
+ /* Calculate the CCM MAC */
+ ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
+ calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block);
+
+ /* compare the input CCM MAC value with what we calculated */
+ if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
+ /* They don't match */
+ return (CRYPTO_INVALID_MAC);
+ } else {
+ rv = crypto_put_output_data(ctx->ccm_pt_buf, out, pt_len);
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ out->cd_offset += pt_len;
+ }
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ccm_validate_args(CK_AES_CCM_PARAMS *ccm_param, boolean_t is_encrypt_init)
+{
+ size_t macSize, nonceSize;
+ uint8_t q;
+ uint64_t maxValue;
+
+ /*
+ * Check the length of the MAC. The only valid
+ * lengths for the MAC are: 4, 6, 8, 10, 12, 14, 16
+ */
+ macSize = ccm_param->ulMACSize;
+ if ((macSize < 4) || (macSize > 16) || ((macSize % 2) != 0)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ /* Check the nonce length. Valid values are 7, 8, 9, 10, 11, 12, 13 */
+ nonceSize = ccm_param->ulNonceSize;
+ if ((nonceSize < 7) || (nonceSize > 13)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ /* q is the length of the field storing the length, in bytes */
+ q = (uint8_t)((15 - nonceSize) & 0xFF);
+
+
+ /*
+ * If it is decrypt, need to make sure size of ciphertext is at least
+ * bigger than MAC len
+ */
+ if ((!is_encrypt_init) && (ccm_param->ulDataSize < macSize)) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ /*
+ * Check to make sure the length of the payload is within the
+ * range of values allowed by q
+ */
+ if (q < 8) {
+ maxValue = (1ULL << (q * 8)) - 1;
+ } else {
+ maxValue = ULONG_MAX;
+ }
+
+ if (ccm_param->ulDataSize > maxValue) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Format the first block used in CBC-MAC (B0) and the initial counter
+ * block based on formatting functions and counter generation functions
+ * specified in RFC 3610 and NIST publication 800-38C, appendix A
+ *
+ * b0 is the first block used in CBC-MAC
+ * cb0 is the first counter block
+ *
+ * It's assumed that the arguments b0 and cb0 are preallocated AES blocks
+ *
+ */
+static void
+ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
+ ulong_t authDataSize, uint8_t *b0, ccm_ctx_t *aes_ctx)
+{
+ uint64_t payloadSize;
+ uint8_t t, q, have_adata = 0;
+ size_t limit;
+ int i, j, k;
+ uint64_t mask = 0;
+ uint8_t *cb;
+
+ q = (uint8_t)((15 - nonceSize) & 0xFF);
+ t = (uint8_t)((aes_ctx->ccm_mac_len) & 0xFF);
+
+ /* Construct the first octet of b0 */
+ if (authDataSize > 0) {
+ have_adata = 1;
+ }
+ b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1);
+
+ /* copy the nonce value into b0 */
+ bcopy(nonce, &(b0[1]), nonceSize);
+
+ /* store the length of the payload into b0 */
+ bzero(&(b0[1+nonceSize]), q);
+
+ payloadSize = aes_ctx->ccm_data_len;
+ limit = 8 < q ? 8 : q;
+
+ for (i = 0, j = 0, k = 15; i < limit; i++, j += 8, k--) {
+ b0[k] = (uint8_t)((payloadSize >> j) & 0xFF);
+ }
+
+ /* format the counter block */
+
+ cb = (uint8_t *)aes_ctx->ccm_cb;
+
+ cb[0] = 0x07 & (q-1); /* first byte */
+
+ /* copy the nonce value into the counter block */
+ bcopy(nonce, &(cb[1]), nonceSize);
+
+ bzero(&(cb[1+nonceSize]), q);
+
+ /* Create the mask for the counter field based on the size of nonce */
+ q <<= 3;
+ while (q-- > 0) {
+ mask |= (1ULL << q);
+ }
+
+#ifdef _LITTLE_ENDIAN
+ mask = htonll(mask);
+#endif
+ aes_ctx->ccm_counter_mask = mask;
+
+ /*
+ * During calculation, we start using counter block 1, we will
+ * set it up right here.
+ * We can just set the last byte to have the value 1, because
+ * even with the biggest nonce of 13, the last byte of the
+ * counter block will be used for the counter value.
+ */
+ cb[15] = 0x01;
+}
+
+/*
+ * Encode the length of the associated data as
+ * specified in RFC 3610 and NIST publication 800-38C, appendix A
+ */
+static void
+encode_adata_len(ulong_t auth_data_len, uint8_t *encoded, size_t *encoded_len)
+{
+#ifdef UNALIGNED_POINTERS_PERMITTED
+ uint32_t *lencoded_ptr;
+#ifdef _LP64
+ uint64_t *llencoded_ptr;
+#endif
+#endif /* UNALIGNED_POINTERS_PERMITTED */
+
+ if (auth_data_len < ((1ULL<<16) - (1ULL<<8))) {
+ /* 0 < a < (2^16-2^8) */
+ *encoded_len = 2;
+ encoded[0] = (auth_data_len & 0xff00) >> 8;
+ encoded[1] = auth_data_len & 0xff;
+
+ } else if ((auth_data_len >= ((1ULL<<16) - (1ULL<<8))) &&
+ (auth_data_len < (1ULL << 31))) {
+ /* (2^16-2^8) <= a < 2^32 */
+ *encoded_len = 6;
+ encoded[0] = 0xff;
+ encoded[1] = 0xfe;
+#ifdef UNALIGNED_POINTERS_PERMITTED
+ lencoded_ptr = (uint32_t *)&encoded[2];
+ *lencoded_ptr = htonl(auth_data_len);
+#else
+ encoded[2] = (auth_data_len & 0xff000000) >> 24;
+ encoded[3] = (auth_data_len & 0xff0000) >> 16;
+ encoded[4] = (auth_data_len & 0xff00) >> 8;
+ encoded[5] = auth_data_len & 0xff;
+#endif /* UNALIGNED_POINTERS_PERMITTED */
+
+#ifdef _LP64
+ } else {
+ /* 2^32 <= a < 2^64 */
+ *encoded_len = 10;
+ encoded[0] = 0xff;
+ encoded[1] = 0xff;
+#ifdef UNALIGNED_POINTERS_PERMITTED
+ llencoded_ptr = (uint64_t *)&encoded[2];
+ *llencoded_ptr = htonl(auth_data_len);
+#else
+ encoded[2] = (auth_data_len & 0xff00000000000000) >> 56;
+ encoded[3] = (auth_data_len & 0xff000000000000) >> 48;
+ encoded[4] = (auth_data_len & 0xff0000000000) >> 40;
+ encoded[5] = (auth_data_len & 0xff00000000) >> 32;
+ encoded[6] = (auth_data_len & 0xff000000) >> 24;
+ encoded[7] = (auth_data_len & 0xff0000) >> 16;
+ encoded[8] = (auth_data_len & 0xff00) >> 8;
+ encoded[9] = auth_data_len & 0xff;
+#endif /* UNALIGNED_POINTERS_PERMITTED */
+#endif /* _LP64 */
+ }
+}
+
+/*
+ * The following function should be call at encrypt or decrypt init time
+ * for AES CCM mode.
+ */
+int
+ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
+ unsigned char *auth_data, size_t auth_data_len, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *mac_buf, *datap, *ivp, *authp;
+ size_t remainder, processed;
+ uint8_t encoded_a[10]; /* max encoded auth data length is 10 octets */
+ size_t encoded_a_len = 0;
+
+ mac_buf = (uint8_t *)&(ctx->ccm_mac_buf);
+
+ /*
+ * Format the 1st block for CBC-MAC and construct the
+ * 1st counter block.
+ *
+ * aes_ctx->ccm_iv is used for storing the counter block
+ * mac_buf will store b0 at this time.
+ */
+ ccm_format_initial_blocks(nonce, nonce_len,
+ auth_data_len, mac_buf, ctx);
+
+ /* The IV for CBC MAC for AES CCM mode is always zero */
+ ivp = (uint8_t *)ctx->ccm_tmp;
+ bzero(ivp, block_size);
+
+ xor_block(ivp, mac_buf);
+
+ /* encrypt the nonce */
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+
+ /* take care of the associated data, if any */
+ if (auth_data_len == 0) {
+ return (CRYPTO_SUCCESS);
+ }
+
+ encode_adata_len(auth_data_len, encoded_a, &encoded_a_len);
+
+ remainder = auth_data_len;
+
+ /* 1st block: it contains encoded associated data, and some data */
+ authp = (uint8_t *)ctx->ccm_tmp;
+ bzero(authp, block_size);
+ bcopy(encoded_a, authp, encoded_a_len);
+ processed = block_size - encoded_a_len;
+ if (processed > auth_data_len) {
+ /* in case auth_data is very small */
+ processed = auth_data_len;
+ }
+ bcopy(auth_data, authp+encoded_a_len, processed);
+ /* xor with previous buffer */
+ xor_block(authp, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+ remainder -= processed;
+ if (remainder == 0) {
+ /* a small amount of associated data, it's all done now */
+ return (CRYPTO_SUCCESS);
+ }
+
+ do {
+ if (remainder < block_size) {
+ /*
+ * There's not a block full of data, pad rest of
+ * buffer with zero
+ */
+ bzero(authp, block_size);
+ bcopy(&(auth_data[processed]), authp, remainder);
+ datap = (uint8_t *)authp;
+ remainder = 0;
+ } else {
+ datap = (uint8_t *)(&(auth_data[processed]));
+ processed += block_size;
+ remainder -= block_size;
+ }
+
+ xor_block(datap, mac_buf);
+ encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
+
+ } while (remainder > 0);
+
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ccm_init_ctx(ccm_ctx_t *ccm_ctx, char *param, int kmflag,
+ boolean_t is_encrypt_init, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ int rv;
+ CK_AES_CCM_PARAMS *ccm_param;
+
+ if (param != NULL) {
+ ccm_param = (CK_AES_CCM_PARAMS *)param;
+
+ if ((rv = ccm_validate_args(ccm_param,
+ is_encrypt_init)) != 0) {
+ return (rv);
+ }
+
+ ccm_ctx->ccm_mac_len = ccm_param->ulMACSize;
+ if (is_encrypt_init) {
+ ccm_ctx->ccm_data_len = ccm_param->ulDataSize;
+ } else {
+ ccm_ctx->ccm_data_len =
+ ccm_param->ulDataSize - ccm_ctx->ccm_mac_len;
+ ccm_ctx->ccm_processed_mac_len = 0;
+ }
+ ccm_ctx->ccm_processed_data_len = 0;
+
+ ccm_ctx->ccm_flags |= CCM_MODE;
+ } else {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+
+ if (ccm_init(ccm_ctx, ccm_param->nonce, ccm_param->ulNonceSize,
+ ccm_param->authData, ccm_param->ulAuthDataSize, block_size,
+ encrypt_block, xor_block) != 0) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+ if (!is_encrypt_init) {
+ /* allocate buffer for storing decrypted plaintext */
+ ccm_ctx->ccm_pt_buf = vmem_alloc(ccm_ctx->ccm_data_len,
+ kmflag);
+ if (ccm_ctx->ccm_pt_buf == NULL) {
+ rv = CRYPTO_HOST_MEMORY;
+ }
+ }
+out:
+ return (rv);
+}
+
+void *
+ccm_alloc_ctx(int kmflag)
+{
+ ccm_ctx_t *ccm_ctx;
+
+ if ((ccm_ctx = kmem_zalloc(sizeof (ccm_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ ccm_ctx->ccm_flags = CCM_MODE;
+ return (ccm_ctx);
+}
diff --git a/module/icp/algs/modes/ctr.c b/module/icp/algs/modes/ctr.c
new file mode 100644
index 000000000..77ba28ddd
--- /dev/null
+++ b/module/icp/algs/modes/ctr.c
@@ -0,0 +1,238 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/byteorder.h>
+
+/*
+ * Encrypt and decrypt multiple blocks of data in counter mode.
+ */
+int
+ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ uint64_t lower_counter, upper_counter;
+
+ if (length + ctx->ctr_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len,
+ length);
+ ctx->ctr_remainder_len += length;
+ ctx->ctr_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->ctr_cb;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->ctr_remainder_len > 0) {
+ need = block_size - ctx->ctr_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->ctr_remainder)
+ [ctx->ctr_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->ctr_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /* ctr_cb is the counter block */
+ cipher(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
+ (uint8_t *)ctx->ctr_tmp);
+
+ lastp = (uint8_t *)ctx->ctr_tmp;
+
+ /*
+ * Increment Counter.
+ */
+ lower_counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_lower_mask);
+ lower_counter = htonll(lower_counter + 1);
+ lower_counter &= ctx->ctr_lower_mask;
+ ctx->ctr_cb[1] = (ctx->ctr_cb[1] & ~(ctx->ctr_lower_mask)) |
+ lower_counter;
+
+ /* wrap around */
+ if (lower_counter == 0) {
+ upper_counter =
+ ntohll(ctx->ctr_cb[0] & ctx->ctr_upper_mask);
+ upper_counter = htonll(upper_counter + 1);
+ upper_counter &= ctx->ctr_upper_mask;
+ ctx->ctr_cb[0] =
+ (ctx->ctr_cb[0] & ~(ctx->ctr_upper_mask)) |
+ upper_counter;
+ }
+
+ /*
+ * XOR encrypted counter block with the current clear block.
+ */
+ xor_block(blockp, lastp);
+
+ if (out == NULL) {
+ if (ctx->ctr_remainder_len > 0) {
+ bcopy(lastp, ctx->ctr_copy_to,
+ ctx->ctr_remainder_len);
+ bcopy(lastp + ctx->ctr_remainder_len, datap,
+ need);
+ }
+ } else {
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len, out_data_2,
+ block_size - out_data_1_len);
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->ctr_remainder_len != 0) {
+ datap += need;
+ ctx->ctr_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->ctr_remainder, remainder);
+ ctx->ctr_remainder_len = remainder;
+ ctx->ctr_copy_to = datap;
+ goto out;
+ }
+ ctx->ctr_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
+{
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ uint8_t *p;
+ int i;
+
+ if (out->cd_length < ctx->ctr_remainder_len)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ encrypt_block(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
+ (uint8_t *)ctx->ctr_tmp);
+
+ lastp = (uint8_t *)ctx->ctr_tmp;
+ p = (uint8_t *)ctx->ctr_remainder;
+ for (i = 0; i < ctx->ctr_remainder_len; i++) {
+ p[i] ^= lastp[i];
+ }
+
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, ctx->ctr_remainder_len);
+
+ bcopy(p, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy((uint8_t *)p + out_data_1_len,
+ out_data_2, ctx->ctr_remainder_len - out_data_1_len);
+ }
+ out->cd_offset += ctx->ctr_remainder_len;
+ ctx->ctr_remainder_len = 0;
+ return (CRYPTO_SUCCESS);
+}
+
+int
+ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb,
+void (*copy_block)(uint8_t *, uint8_t *))
+{
+ uint64_t upper_mask = 0;
+ uint64_t lower_mask = 0;
+
+ if (count == 0 || count > 128) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ /* upper 64 bits of the mask */
+ if (count >= 64) {
+ count -= 64;
+ upper_mask = (count == 64) ? UINT64_MAX : (1ULL << count) - 1;
+ lower_mask = UINT64_MAX;
+ } else {
+ /* now the lower 63 bits */
+ lower_mask = (1ULL << count) - 1;
+ }
+ ctr_ctx->ctr_lower_mask = htonll(lower_mask);
+ ctr_ctx->ctr_upper_mask = htonll(upper_mask);
+
+ copy_block(cb, (uchar_t *)ctr_ctx->ctr_cb);
+ ctr_ctx->ctr_lastp = (uint8_t *)&ctr_ctx->ctr_cb[0];
+ ctr_ctx->ctr_flags |= CTR_MODE;
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+void *
+ctr_alloc_ctx(int kmflag)
+{
+ ctr_ctx_t *ctr_ctx;
+
+ if ((ctr_ctx = kmem_zalloc(sizeof (ctr_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ ctr_ctx->ctr_flags = CTR_MODE;
+ return (ctr_ctx);
+}
diff --git a/module/icp/algs/modes/ecb.c b/module/icp/algs/modes/ecb.c
new file mode 100644
index 000000000..04e6c5eaa
--- /dev/null
+++ b/module/icp/algs/modes/ecb.c
@@ -0,0 +1,143 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+/*
+ * Algorithm independent ECB functions.
+ */
+int
+ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+
+ if (length + ctx->ecb_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len,
+ length);
+ ctx->ecb_remainder_len += length;
+ ctx->ecb_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->ecb_iv;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->ecb_remainder_len > 0) {
+ need = block_size - ctx->ecb_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->ecb_remainder)
+ [ctx->ecb_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->ecb_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ if (out == NULL) {
+ cipher(ctx->ecb_keysched, blockp, blockp);
+
+ ctx->ecb_lastp = blockp;
+ lastp = blockp;
+
+ if (ctx->ecb_remainder_len > 0) {
+ bcopy(blockp, ctx->ecb_copy_to,
+ ctx->ecb_remainder_len);
+ bcopy(blockp + ctx->ecb_remainder_len, datap,
+ need);
+ }
+ } else {
+ cipher(ctx->ecb_keysched, blockp, lastp);
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len, out_data_2,
+ block_size - out_data_1_len);
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->ecb_remainder_len != 0) {
+ datap += need;
+ ctx->ecb_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->ecb_remainder, remainder);
+ ctx->ecb_remainder_len = remainder;
+ ctx->ecb_copy_to = datap;
+ goto out;
+ }
+ ctx->ecb_copy_to = NULL;
+
+ } while (remainder > 0);
+
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+void *
+ecb_alloc_ctx(int kmflag)
+{
+ ecb_ctx_t *ecb_ctx;
+
+ if ((ecb_ctx = kmem_zalloc(sizeof (ecb_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ ecb_ctx->ecb_flags = ECB_MODE;
+ return (ecb_ctx);
+}
diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c
new file mode 100644
index 000000000..9cd8ab1e9
--- /dev/null
+++ b/module/icp/algs/modes/gcm.c
@@ -0,0 +1,748 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/byteorder.h>
+
+#ifdef __amd64
+
+#ifdef _KERNEL
+/* Workaround for no XMM kernel thread save/restore */
+#define KPREEMPT_DISABLE kpreempt_disable()
+#define KPREEMPT_ENABLE kpreempt_enable()
+
+#else
+#define KPREEMPT_DISABLE
+#define KPREEMPT_ENABLE
+#endif /* _KERNEL */
+
+extern void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
+static int intel_pclmulqdq_instruction_present(void);
+#endif /* __amd64 */
+
+struct aes_block {
+ uint64_t a;
+ uint64_t b;
+};
+
+
+/*
+ * gcm_mul()
+ * Perform a carry-less multiplication (that is, use XOR instead of the
+ * multiply operator) on *x_in and *y and place the result in *res.
+ *
+ * Byte swap the input (*x_in and *y) and the output (*res).
+ *
+ * Note: x_in, y, and res all point to 16-byte numbers (an array of two
+ * 64-bit integers).
+ */
+void
+gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res)
+{
+#ifdef __amd64
+ if (intel_pclmulqdq_instruction_present()) {
+ KPREEMPT_DISABLE;
+ gcm_mul_pclmulqdq(x_in, y, res);
+ KPREEMPT_ENABLE;
+ } else
+#endif /* __amd64 */
+ {
+ static const uint64_t R = 0xe100000000000000ULL;
+ struct aes_block z = {0, 0};
+ struct aes_block v;
+ uint64_t x;
+ int i, j;
+
+ v.a = ntohll(y[0]);
+ v.b = ntohll(y[1]);
+
+ for (j = 0; j < 2; j++) {
+ x = ntohll(x_in[j]);
+ for (i = 0; i < 64; i++, x <<= 1) {
+ if (x & 0x8000000000000000ULL) {
+ z.a ^= v.a;
+ z.b ^= v.b;
+ }
+ if (v.b & 1ULL) {
+ v.b = (v.a << 63)|(v.b >> 1);
+ v.a = (v.a >> 1) ^ R;
+ } else {
+ v.b = (v.a << 63)|(v.b >> 1);
+ v.a = v.a >> 1;
+ }
+ }
+ }
+ res[0] = htonll(z.a);
+ res[1] = htonll(z.b);
+ }
+}
+
+
+#define GHASH(c, d, t) \
+ xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
+ gcm_mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
+ (uint64_t *)(void *)(t));
+
+
+/*
+ * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode
+ * is done in another function.
+ */
+int
+gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t remainder = length;
+ size_t need = 0;
+ uint8_t *datap = (uint8_t *)data;
+ uint8_t *blockp;
+ uint8_t *lastp;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data_1;
+ uint8_t *out_data_2;
+ size_t out_data_1_len;
+ uint64_t counter;
+ uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
+
+ if (length + ctx->gcm_remainder_len < block_size) {
+ /* accumulate bytes here and return */
+ bcopy(datap,
+ (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
+ length);
+ ctx->gcm_remainder_len += length;
+ ctx->gcm_copy_to = datap;
+ return (CRYPTO_SUCCESS);
+ }
+
+ lastp = (uint8_t *)ctx->gcm_cb;
+ if (out != NULL)
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
+
+ do {
+ /* Unprocessed data from last call. */
+ if (ctx->gcm_remainder_len > 0) {
+ need = block_size - ctx->gcm_remainder_len;
+
+ if (need > remainder)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
+ [ctx->gcm_remainder_len], need);
+
+ blockp = (uint8_t *)ctx->gcm_remainder;
+ } else {
+ blockp = datap;
+ }
+
+ /*
+ * Increment counter. Counter bits are confined
+ * to the bottom 32 bits of the counter block.
+ */
+ counter = ntohll(ctx->gcm_cb[1] & counter_mask);
+ counter = htonll(counter + 1);
+ counter &= counter_mask;
+ ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
+
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
+ (uint8_t *)ctx->gcm_tmp);
+ xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
+
+ lastp = (uint8_t *)ctx->gcm_tmp;
+
+ ctx->gcm_processed_data_len += block_size;
+
+ if (out == NULL) {
+ if (ctx->gcm_remainder_len > 0) {
+ bcopy(blockp, ctx->gcm_copy_to,
+ ctx->gcm_remainder_len);
+ bcopy(blockp + ctx->gcm_remainder_len, datap,
+ need);
+ }
+ } else {
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
+ &out_data_1_len, &out_data_2, block_size);
+
+ /* copy block to where it belongs */
+ if (out_data_1_len == block_size) {
+ copy_block(lastp, out_data_1);
+ } else {
+ bcopy(lastp, out_data_1, out_data_1_len);
+ if (out_data_2 != NULL) {
+ bcopy(lastp + out_data_1_len,
+ out_data_2,
+ block_size - out_data_1_len);
+ }
+ }
+ /* update offset */
+ out->cd_offset += block_size;
+ }
+
+ /* add ciphertext to the hash */
+ GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
+
+ /* Update pointer to next block of data to be processed. */
+ if (ctx->gcm_remainder_len != 0) {
+ datap += need;
+ ctx->gcm_remainder_len = 0;
+ } else {
+ datap += block_size;
+ }
+
+ remainder = (size_t)&data[length] - (size_t)datap;
+
+ /* Incomplete last block. */
+ if (remainder > 0 && remainder < block_size) {
+ bcopy(datap, ctx->gcm_remainder, remainder);
+ ctx->gcm_remainder_len = remainder;
+ ctx->gcm_copy_to = datap;
+ goto out;
+ }
+ ctx->gcm_copy_to = NULL;
+
+ } while (remainder > 0);
+out:
+ return (CRYPTO_SUCCESS);
+}
+
+/* ARGSUSED */
+int
+gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
+ uint8_t *ghash, *macp = NULL;
+ int i, rv;
+
+ if (out->cd_length <
+ (ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ ghash = (uint8_t *)ctx->gcm_ghash;
+
+ if (ctx->gcm_remainder_len > 0) {
+ uint64_t counter;
+ uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
+
+ /*
+ * Here is where we deal with data that is not a
+ * multiple of the block size.
+ */
+
+ /*
+ * Increment counter.
+ */
+ counter = ntohll(ctx->gcm_cb[1] & counter_mask);
+ counter = htonll(counter + 1);
+ counter &= counter_mask;
+ ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
+
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
+ (uint8_t *)ctx->gcm_tmp);
+
+ macp = (uint8_t *)ctx->gcm_remainder;
+ bzero(macp + ctx->gcm_remainder_len,
+ block_size - ctx->gcm_remainder_len);
+
+ /* XOR with counter block */
+ for (i = 0; i < ctx->gcm_remainder_len; i++) {
+ macp[i] ^= tmpp[i];
+ }
+
+ /* add ciphertext to the hash */
+ GHASH(ctx, macp, ghash);
+
+ ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
+ }
+
+ ctx->gcm_len_a_len_c[1] =
+ htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len));
+ GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
+ (uint8_t *)ctx->gcm_J0);
+ xor_block((uint8_t *)ctx->gcm_J0, ghash);
+
+ if (ctx->gcm_remainder_len > 0) {
+ rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+ out->cd_offset += ctx->gcm_remainder_len;
+ ctx->gcm_remainder_len = 0;
+ rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ out->cd_offset += ctx->gcm_tag_len;
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * This will only deal with decrypting the last block of the input that
+ * might not be a multiple of block length.
+ */
+static void
+gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *datap, *outp, *counterp;
+ uint64_t counter;
+ uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
+ int i;
+
+ /*
+ * Increment counter.
+ * Counter bits are confined to the bottom 32 bits
+ */
+ counter = ntohll(ctx->gcm_cb[1] & counter_mask);
+ counter = htonll(counter + 1);
+ counter &= counter_mask;
+ ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
+
+ datap = (uint8_t *)ctx->gcm_remainder;
+ outp = &((ctx->gcm_pt_buf)[index]);
+ counterp = (uint8_t *)ctx->gcm_tmp;
+
+ /* authentication tag */
+ bzero((uint8_t *)ctx->gcm_tmp, block_size);
+ bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
+
+ /* add ciphertext to the hash */
+ GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
+
+ /* decrypt remaining ciphertext */
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
+
+ /* XOR with counter block */
+ for (i = 0; i < ctx->gcm_remainder_len; i++) {
+ outp[i] = datap[i] ^ counterp[i];
+ }
+}
+
+/* ARGSUSED */
+int
+gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
+ crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t new_len;
+ uint8_t *new;
+
+ /*
+ * Copy contiguous ciphertext input blocks to plaintext buffer.
+ * Ciphertext will be decrypted in the final.
+ */
+ if (length > 0) {
+ new_len = ctx->gcm_pt_buf_len + length;
+ new = vmem_alloc(new_len, ctx->gcm_kmflag);
+ bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
+ vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
+ if (new == NULL)
+ return (CRYPTO_HOST_MEMORY);
+
+ ctx->gcm_pt_buf = new;
+ ctx->gcm_pt_buf_len = new_len;
+ bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
+ length);
+ ctx->gcm_processed_data_len += length;
+ }
+
+ ctx->gcm_remainder_len = 0;
+ return (CRYPTO_SUCCESS);
+}
+
+int
+gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ size_t pt_len;
+ size_t remainder;
+ uint8_t *ghash;
+ uint8_t *blockp;
+ uint8_t *cbp;
+ uint64_t counter;
+ uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
+ int processed = 0, rv;
+
+ ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len);
+
+ pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
+ ghash = (uint8_t *)ctx->gcm_ghash;
+ blockp = ctx->gcm_pt_buf;
+ remainder = pt_len;
+ while (remainder > 0) {
+ /* Incomplete last block */
+ if (remainder < block_size) {
+ bcopy(blockp, ctx->gcm_remainder, remainder);
+ ctx->gcm_remainder_len = remainder;
+ /*
+ * not expecting anymore ciphertext, just
+ * compute plaintext for the remaining input
+ */
+ gcm_decrypt_incomplete_block(ctx, block_size,
+ processed, encrypt_block, xor_block);
+ ctx->gcm_remainder_len = 0;
+ goto out;
+ }
+ /* add ciphertext to the hash */
+ GHASH(ctx, blockp, ghash);
+
+ /*
+ * Increment counter.
+ * Counter bits are confined to the bottom 32 bits
+ */
+ counter = ntohll(ctx->gcm_cb[1] & counter_mask);
+ counter = htonll(counter + 1);
+ counter &= counter_mask;
+ ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
+
+ cbp = (uint8_t *)ctx->gcm_tmp;
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
+
+ /* XOR with ciphertext */
+ xor_block(cbp, blockp);
+
+ processed += block_size;
+ blockp += block_size;
+ remainder -= block_size;
+ }
+out:
+ ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len));
+ GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
+ (uint8_t *)ctx->gcm_J0);
+ xor_block((uint8_t *)ctx->gcm_J0, ghash);
+
+ /* compare the input authentication tag with what we calculated */
+ if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
+ /* They don't match */
+ return (CRYPTO_INVALID_MAC);
+ } else {
+ rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ out->cd_offset += pt_len;
+ }
+ return (CRYPTO_SUCCESS);
+}
+
+static int
+gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
+{
+ size_t tag_len;
+
+ /*
+ * Check the length of the authentication tag (in bits).
+ */
+ tag_len = gcm_param->ulTagBits;
+ switch (tag_len) {
+ case 32:
+ case 64:
+ case 96:
+ case 104:
+ case 112:
+ case 120:
+ case 128:
+ break;
+ default:
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ if (gcm_param->ulIvLen == 0)
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+
+ return (CRYPTO_SUCCESS);
+}
+
+static void
+gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
+ gcm_ctx_t *ctx, size_t block_size,
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *cb;
+ ulong_t remainder = iv_len;
+ ulong_t processed = 0;
+ uint8_t *datap, *ghash;
+ uint64_t len_a_len_c[2];
+
+ ghash = (uint8_t *)ctx->gcm_ghash;
+ cb = (uint8_t *)ctx->gcm_cb;
+ if (iv_len == 12) {
+ bcopy(iv, cb, 12);
+ cb[12] = 0;
+ cb[13] = 0;
+ cb[14] = 0;
+ cb[15] = 1;
+ /* J0 will be used again in the final */
+ copy_block(cb, (uint8_t *)ctx->gcm_J0);
+ } else {
+ /* GHASH the IV */
+ do {
+ if (remainder < block_size) {
+ bzero(cb, block_size);
+ bcopy(&(iv[processed]), cb, remainder);
+ datap = (uint8_t *)cb;
+ remainder = 0;
+ } else {
+ datap = (uint8_t *)(&(iv[processed]));
+ processed += block_size;
+ remainder -= block_size;
+ }
+ GHASH(ctx, datap, ghash);
+ } while (remainder > 0);
+
+ len_a_len_c[0] = 0;
+ len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len));
+ GHASH(ctx, len_a_len_c, ctx->gcm_J0);
+
+ /* J0 will be used again in the final */
+ copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
+ }
+}
+
+/*
+ * The following function is called at encrypt or decrypt init time
+ * for AES GCM mode.
+ */
+int
+gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
+ unsigned char *auth_data, size_t auth_data_len, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ uint8_t *ghash, *datap, *authp;
+ size_t remainder, processed;
+
+ /* encrypt zero block to get subkey H */
+ bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
+ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
+ (uint8_t *)ctx->gcm_H);
+
+ gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
+ copy_block, xor_block);
+
+ authp = (uint8_t *)ctx->gcm_tmp;
+ ghash = (uint8_t *)ctx->gcm_ghash;
+ bzero(authp, block_size);
+ bzero(ghash, block_size);
+
+ processed = 0;
+ remainder = auth_data_len;
+ do {
+ if (remainder < block_size) {
+ /*
+ * There's not a block full of data, pad rest of
+ * buffer with zero
+ */
+ bzero(authp, block_size);
+ bcopy(&(auth_data[processed]), authp, remainder);
+ datap = (uint8_t *)authp;
+ remainder = 0;
+ } else {
+ datap = (uint8_t *)(&(auth_data[processed]));
+ processed += block_size;
+ remainder -= block_size;
+ }
+
+ /* add auth data to the hash */
+ GHASH(ctx, datap, ghash);
+
+ } while (remainder > 0);
+
+ return (CRYPTO_SUCCESS);
+}
+
+int
+gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ int rv;
+ CK_AES_GCM_PARAMS *gcm_param;
+
+ if (param != NULL) {
+ gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
+
+ if ((rv = gcm_validate_args(gcm_param)) != 0) {
+ return (rv);
+ }
+
+ gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
+ gcm_ctx->gcm_tag_len >>= 3;
+ gcm_ctx->gcm_processed_data_len = 0;
+
+ /* these values are in bits */
+ gcm_ctx->gcm_len_a_len_c[0]
+ = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen));
+
+ rv = CRYPTO_SUCCESS;
+ gcm_ctx->gcm_flags |= GCM_MODE;
+ } else {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+
+ if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
+ gcm_param->pAAD, gcm_param->ulAADLen, block_size,
+ encrypt_block, copy_block, xor_block) != 0) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+out:
+ return (rv);
+}
+
+int
+gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
+ void (*copy_block)(uint8_t *, uint8_t *),
+ void (*xor_block)(uint8_t *, uint8_t *))
+{
+ int rv;
+ CK_AES_GMAC_PARAMS *gmac_param;
+
+ if (param != NULL) {
+ gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param;
+
+ gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
+ gcm_ctx->gcm_processed_data_len = 0;
+
+ /* these values are in bits */
+ gcm_ctx->gcm_len_a_len_c[0]
+ = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen));
+
+ rv = CRYPTO_SUCCESS;
+ gcm_ctx->gcm_flags |= GMAC_MODE;
+ } else {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+
+ if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
+ gmac_param->pAAD, gmac_param->ulAADLen, block_size,
+ encrypt_block, copy_block, xor_block) != 0) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ }
+out:
+ return (rv);
+}
+
+void *
+gcm_alloc_ctx(int kmflag)
+{
+ gcm_ctx_t *gcm_ctx;
+
+ if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ gcm_ctx->gcm_flags = GCM_MODE;
+ return (gcm_ctx);
+}
+
+void *
+gmac_alloc_ctx(int kmflag)
+{
+ gcm_ctx_t *gcm_ctx;
+
+ if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
+ return (NULL);
+
+ gcm_ctx->gcm_flags = GMAC_MODE;
+ return (gcm_ctx);
+}
+
+void
+gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
+{
+ ctx->gcm_kmflag = kmflag;
+}
+
+
+#ifdef __amd64
+
+#define INTEL_PCLMULQDQ_FLAG (1 << 1)
+
+/*
+ * Return 1 if executing on Intel with PCLMULQDQ instructions,
+ * otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
+ * Cache the result, as the CPU can't change.
+ *
+ * Note: the userland version uses getisax(). The kernel version uses
+ * is_x86_featureset().
+ */
+static int
+intel_pclmulqdq_instruction_present(void)
+{
+ static int cached_result = -1;
+ unsigned eax, ebx, ecx, edx;
+ unsigned func, subfunc;
+
+ if (cached_result == -1) { /* first time */
+ /* check for an intel cpu */
+ func = 0;
+ subfunc = 0;
+
+ __asm__ __volatile__(
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
+
+ if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
+ memcmp((char *) (&edx), "ineI", 4) == 0 &&
+ memcmp((char *) (&ecx), "ntel", 4) == 0) {
+
+ func = 1;
+ subfunc = 0;
+
+ /* check for aes-ni instruction set */
+ __asm__ __volatile__(
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
+
+ cached_result = !!(ecx & INTEL_PCLMULQDQ_FLAG);
+ } else {
+ cached_result = 0;
+ }
+ }
+
+ return (cached_result);
+}
+
+#endif /* __amd64 */
diff --git a/module/icp/algs/modes/modes.c b/module/icp/algs/modes/modes.c
new file mode 100644
index 000000000..1d33c4268
--- /dev/null
+++ b/module/icp/algs/modes/modes.c
@@ -0,0 +1,159 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+#include <modes/modes.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+
+/*
+ * Initialize by setting iov_or_mp to point to the current iovec or mp,
+ * and by setting current_offset to an offset within the current iovec or mp.
+ */
+void
+crypto_init_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset)
+{
+ offset_t offset;
+
+ switch (out->cd_format) {
+ case CRYPTO_DATA_RAW:
+ *current_offset = out->cd_offset;
+ break;
+
+ case CRYPTO_DATA_UIO: {
+ uio_t *uiop = out->cd_uio;
+ uintptr_t vec_idx;
+
+ offset = out->cd_offset;
+ for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
+ offset >= uiop->uio_iov[vec_idx].iov_len;
+ offset -= uiop->uio_iov[vec_idx++].iov_len)
+ ;
+
+ *current_offset = offset;
+ *iov_or_mp = (void *)vec_idx;
+ break;
+ }
+ } /* end switch */
+}
+
+/*
+ * Get pointers for where in the output to copy a block of encrypted or
+ * decrypted data. The iov_or_mp argument stores a pointer to the current
+ * iovec or mp, and offset stores an offset into the current iovec or mp.
+ */
+void
+crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset,
+ uint8_t **out_data_1, size_t *out_data_1_len, uint8_t **out_data_2,
+ size_t amt)
+{
+ offset_t offset;
+
+ switch (out->cd_format) {
+ case CRYPTO_DATA_RAW: {
+ iovec_t *iov;
+
+ offset = *current_offset;
+ iov = &out->cd_raw;
+ if ((offset + amt) <= iov->iov_len) {
+ /* one block fits */
+ *out_data_1 = (uint8_t *)iov->iov_base + offset;
+ *out_data_1_len = amt;
+ *out_data_2 = NULL;
+ *current_offset = offset + amt;
+ }
+ break;
+ }
+
+ case CRYPTO_DATA_UIO: {
+ uio_t *uio = out->cd_uio;
+ iovec_t *iov;
+ offset_t offset;
+ uintptr_t vec_idx;
+ uint8_t *p;
+
+ offset = *current_offset;
+ vec_idx = (uintptr_t)(*iov_or_mp);
+ iov = (iovec_t *)&uio->uio_iov[vec_idx];
+ p = (uint8_t *)iov->iov_base + offset;
+ *out_data_1 = p;
+
+ if (offset + amt <= iov->iov_len) {
+ /* can fit one block into this iov */
+ *out_data_1_len = amt;
+ *out_data_2 = NULL;
+ *current_offset = offset + amt;
+ } else {
+ /* one block spans two iovecs */
+ *out_data_1_len = iov->iov_len - offset;
+ if (vec_idx == uio->uio_iovcnt)
+ return;
+ vec_idx++;
+ iov = (iovec_t *)&uio->uio_iov[vec_idx];
+ *out_data_2 = (uint8_t *)iov->iov_base;
+ *current_offset = amt - *out_data_1_len;
+ }
+ *iov_or_mp = (void *)vec_idx;
+ break;
+ }
+ } /* end switch */
+}
+
+void
+crypto_free_mode_ctx(void *ctx)
+{
+ common_ctx_t *common_ctx = (common_ctx_t *)ctx;
+
+ switch (common_ctx->cc_flags &
+ (ECB_MODE|CBC_MODE|CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) {
+ case ECB_MODE:
+ kmem_free(common_ctx, sizeof (ecb_ctx_t));
+ break;
+
+ case CBC_MODE:
+ kmem_free(common_ctx, sizeof (cbc_ctx_t));
+ break;
+
+ case CTR_MODE:
+ kmem_free(common_ctx, sizeof (ctr_ctx_t));
+ break;
+
+ case CCM_MODE:
+ if (((ccm_ctx_t *)ctx)->ccm_pt_buf != NULL)
+ vmem_free(((ccm_ctx_t *)ctx)->ccm_pt_buf,
+ ((ccm_ctx_t *)ctx)->ccm_data_len);
+
+ kmem_free(ctx, sizeof (ccm_ctx_t));
+ break;
+
+ case GCM_MODE:
+ case GMAC_MODE:
+ if (((gcm_ctx_t *)ctx)->gcm_pt_buf != NULL)
+ vmem_free(((gcm_ctx_t *)ctx)->gcm_pt_buf,
+ ((gcm_ctx_t *)ctx)->gcm_pt_buf_len);
+
+ kmem_free(ctx, sizeof (gcm_ctx_t));
+ }
+}
diff --git a/module/icp/algs/sha1/sha1.c b/module/icp/algs/sha1/sha1.c
new file mode 100644
index 000000000..b826c54ad
--- /dev/null
+++ b/module/icp/algs/sha1/sha1.c
@@ -0,0 +1,663 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * The basic framework for this code came from the reference
+ * implementation for MD5. That implementation is Copyright (C)
+ * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
+ *
+ * License to copy and use this software is granted provided that it
+ * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
+ * Algorithm" in all material mentioning or referencing this software
+ * or this function.
+ *
+ * License is also granted to make and use derivative works provided
+ * that such works are identified as "derived from the RSA Data
+ * Security, Inc. MD5 Message-Digest Algorithm" in all material
+ * mentioning or referencing the derived work.
+ *
+ * RSA Data Security, Inc. makes no representations concerning either
+ * the merchantability of this software or the suitability of this
+ * software for any particular purpose. It is provided "as is"
+ * without express or implied warranty of any kind.
+ *
+ * These notices must be retained in any copies of any part of this
+ * documentation and/or software.
+ *
+ * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1
+ * standard, available at http://www.itl.nist.gov/fipspubs/fip180-1.htm
+ * Not as fast as one would like -- further optimizations are encouraged
+ * and appreciated.
+ */
+
+#include <sys/zfs_context.h>
+#include <sha1/sha1.h>
+#include <sha1/sha1_consts.h>
+
+#ifdef _LITTLE_ENDIAN
+#include <sys/byteorder.h>
+#define HAVE_HTONL
+#endif
+
+#define _RESTRICT_KYWD
+
+static void Encode(uint8_t *, const uint32_t *, size_t);
+
+#if defined(__amd64)
+
+#define SHA1_TRANSFORM(ctx, in) sha1_block_data_order((ctx), (in), 1)
+#define SHA1_TRANSFORM_BLOCKS(ctx, in, num) sha1_block_data_order((ctx), \
+ (in), (num))
+
+void sha1_block_data_order(SHA1_CTX *ctx, const void *inpp, size_t num_blocks);
+
+#else
+
+#define SHA1_TRANSFORM(ctx, in) SHA1Transform((ctx), (in))
+
+static void SHA1Transform(SHA1_CTX *, const uint8_t *);
+
+#endif
+
+
+static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
+
+/*
+ * F, G, and H are the basic SHA1 functions.
+ */
+#define F(b, c, d) (((b) & (c)) | ((~b) & (d)))
+#define G(b, c, d) ((b) ^ (c) ^ (d))
+#define H(b, c, d) (((b) & (c)) | (((b)|(c)) & (d)))
+
+/*
+ * ROTATE_LEFT rotates x left n bits.
+ */
+
+#if defined(__GNUC__) && defined(_LP64)
+static __inline__ uint64_t
+ROTATE_LEFT(uint64_t value, uint32_t n)
+{
+ uint32_t t32;
+
+ t32 = (uint32_t)value;
+ return ((t32 << n) | (t32 >> (32 - n)));
+}
+
+#else
+
+#define ROTATE_LEFT(x, n) \
+ (((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n))))
+
+#endif
+
+
+/*
+ * SHA1Init()
+ *
+ * purpose: initializes the sha1 context and begins and sha1 digest operation
+ * input: SHA1_CTX * : the context to initializes.
+ * output: void
+ */
+
+void
+SHA1Init(SHA1_CTX *ctx)
+{
+ ctx->count[0] = ctx->count[1] = 0;
+
+ /*
+ * load magic initialization constants. Tell lint
+ * that these constants are unsigned by using U.
+ */
+
+ ctx->state[0] = 0x67452301U;
+ ctx->state[1] = 0xefcdab89U;
+ ctx->state[2] = 0x98badcfeU;
+ ctx->state[3] = 0x10325476U;
+ ctx->state[4] = 0xc3d2e1f0U;
+}
+
+void
+SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len)
+{
+ uint32_t i, buf_index, buf_len;
+ const uint8_t *input = inptr;
+#if defined(__amd64)
+ uint32_t block_count;
+#endif /* __amd64 */
+
+ /* check for noop */
+ if (input_len == 0)
+ return;
+
+ /* compute number of bytes mod 64 */
+ buf_index = (ctx->count[1] >> 3) & 0x3F;
+
+ /* update number of bits */
+ if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
+ ctx->count[0]++;
+
+ ctx->count[0] += (input_len >> 29);
+
+ buf_len = 64 - buf_index;
+
+ /* transform as many times as possible */
+ i = 0;
+ if (input_len >= buf_len) {
+
+ /*
+ * general optimization:
+ *
+ * only do initial bcopy() and SHA1Transform() if
+ * buf_index != 0. if buf_index == 0, we're just
+ * wasting our time doing the bcopy() since there
+ * wasn't any data left over from a previous call to
+ * SHA1Update().
+ */
+
+ if (buf_index) {
+ bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
+ SHA1_TRANSFORM(ctx, ctx->buf_un.buf8);
+ i = buf_len;
+ }
+
+#if !defined(__amd64)
+ for (; i + 63 < input_len; i += 64)
+ SHA1_TRANSFORM(ctx, &input[i]);
+#else
+ block_count = (input_len - i) >> 6;
+ if (block_count > 0) {
+ SHA1_TRANSFORM_BLOCKS(ctx, &input[i], block_count);
+ i += block_count << 6;
+ }
+#endif /* !__amd64 */
+
+ /*
+ * general optimization:
+ *
+ * if i and input_len are the same, return now instead
+ * of calling bcopy(), since the bcopy() in this case
+ * will be an expensive nop.
+ */
+
+ if (input_len == i)
+ return;
+
+ buf_index = 0;
+ }
+
+ /* buffer remaining input */
+ bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
+}
+
+/*
+ * SHA1Final()
+ *
+ * purpose: ends an sha1 digest operation, finalizing the message digest and
+ * zeroing the context.
+ * input: uchar_t * : A buffer to store the digest.
+ * : The function actually uses void* because many
+ * : callers pass things other than uchar_t here.
+ * SHA1_CTX * : the context to finalize, save, and zero
+ * output: void
+ */
+
+void
+SHA1Final(void *digest, SHA1_CTX *ctx)
+{
+ uint8_t bitcount_be[sizeof (ctx->count)];
+ uint32_t index = (ctx->count[1] >> 3) & 0x3f;
+
+ /* store bit count, big endian */
+ Encode(bitcount_be, ctx->count, sizeof (bitcount_be));
+
+ /* pad out to 56 mod 64 */
+ SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
+
+ /* append length (before padding) */
+ SHA1Update(ctx, bitcount_be, sizeof (bitcount_be));
+
+ /* store state in digest */
+ Encode(digest, ctx->state, sizeof (ctx->state));
+
+ /* zeroize sensitive information */
+ bzero(ctx, sizeof (*ctx));
+}
+
+
+#if !defined(__amd64)
+
+typedef uint32_t sha1word;
+
+/*
+ * sparc optimization:
+ *
+ * on the sparc, we can load big endian 32-bit data easily. note that
+ * special care must be taken to ensure the address is 32-bit aligned.
+ * in the interest of speed, we don't check to make sure, since
+ * careful programming can guarantee this for us.
+ */
+
+#if defined(_BIG_ENDIAN)
+#define LOAD_BIG_32(addr) (*(uint32_t *)(addr))
+
+#elif defined(HAVE_HTONL)
+#define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr)))
+
+#else
+/* little endian -- will work on big endian, but slowly */
+#define LOAD_BIG_32(addr) \
+ (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
+#endif /* _BIG_ENDIAN */
+
+/*
+ * SHA1Transform()
+ */
+#if defined(W_ARRAY)
+#define W(n) w[n]
+#else /* !defined(W_ARRAY) */
+#define W(n) w_ ## n
+#endif /* !defined(W_ARRAY) */
+
+void /* CSTYLED */
+SHA1Transform(SHA1_CTX *ctx, const uint8_t blk[64])
+{
+ /* CSTYLED */
+ sha1word a = ctx->state[0];
+ sha1word b = ctx->state[1];
+ sha1word c = ctx->state[2];
+ sha1word d = ctx->state[3];
+ sha1word e = ctx->state[4];
+
+#if defined(W_ARRAY)
+ sha1word w[16];
+#else /* !defined(W_ARRAY) */
+ sha1word w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7;
+ sha1word w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
+#endif /* !defined(W_ARRAY) */
+
+ W(0) = LOAD_BIG_32((void *)(blk + 0));
+ W(1) = LOAD_BIG_32((void *)(blk + 4));
+ W(2) = LOAD_BIG_32((void *)(blk + 8));
+ W(3) = LOAD_BIG_32((void *)(blk + 12));
+ W(4) = LOAD_BIG_32((void *)(blk + 16));
+ W(5) = LOAD_BIG_32((void *)(blk + 20));
+ W(6) = LOAD_BIG_32((void *)(blk + 24));
+ W(7) = LOAD_BIG_32((void *)(blk + 28));
+ W(8) = LOAD_BIG_32((void *)(blk + 32));
+ W(9) = LOAD_BIG_32((void *)(blk + 36));
+ W(10) = LOAD_BIG_32((void *)(blk + 40));
+ W(11) = LOAD_BIG_32((void *)(blk + 44));
+ W(12) = LOAD_BIG_32((void *)(blk + 48));
+ W(13) = LOAD_BIG_32((void *)(blk + 52));
+ W(14) = LOAD_BIG_32((void *)(blk + 56));
+ W(15) = LOAD_BIG_32((void *)(blk + 60));
+
+ /*
+ * general optimization:
+ *
+ * even though this approach is described in the standard as
+ * being slower algorithmically, it is 30-40% faster than the
+ * "faster" version under SPARC, because this version has more
+ * of the constraints specified at compile-time and uses fewer
+ * variables (and therefore has better register utilization)
+ * than its "speedier" brother. (i've tried both, trust me)
+ *
+ * for either method given in the spec, there is an "assignment"
+ * phase where the following takes place:
+ *
+ * tmp = (main_computation);
+ * e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp;
+ *
+ * we can make the algorithm go faster by not doing this work,
+ * but just pretending that `d' is now `e', etc. this works
+ * really well and obviates the need for a temporary variable.
+ * however, we still explicitly perform the rotate action,
+ * since it is cheaper on SPARC to do it once than to have to
+ * do it over and over again.
+ */
+
+ /* round 1 */
+ e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(0) + SHA1_CONST(0); /* 0 */
+ b = ROTATE_LEFT(b, 30);
+
+ d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(1) + SHA1_CONST(0); /* 1 */
+ a = ROTATE_LEFT(a, 30);
+
+ c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(2) + SHA1_CONST(0); /* 2 */
+ e = ROTATE_LEFT(e, 30);
+
+ b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(3) + SHA1_CONST(0); /* 3 */
+ d = ROTATE_LEFT(d, 30);
+
+ a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(4) + SHA1_CONST(0); /* 4 */
+ c = ROTATE_LEFT(c, 30);
+
+ e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(5) + SHA1_CONST(0); /* 5 */
+ b = ROTATE_LEFT(b, 30);
+
+ d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(6) + SHA1_CONST(0); /* 6 */
+ a = ROTATE_LEFT(a, 30);
+
+ c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(7) + SHA1_CONST(0); /* 7 */
+ e = ROTATE_LEFT(e, 30);
+
+ b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(8) + SHA1_CONST(0); /* 8 */
+ d = ROTATE_LEFT(d, 30);
+
+ a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(9) + SHA1_CONST(0); /* 9 */
+ c = ROTATE_LEFT(c, 30);
+
+ e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(10) + SHA1_CONST(0); /* 10 */
+ b = ROTATE_LEFT(b, 30);
+
+ d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(11) + SHA1_CONST(0); /* 11 */
+ a = ROTATE_LEFT(a, 30);
+
+ c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(12) + SHA1_CONST(0); /* 12 */
+ e = ROTATE_LEFT(e, 30);
+
+ b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(13) + SHA1_CONST(0); /* 13 */
+ d = ROTATE_LEFT(d, 30);
+
+ a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(14) + SHA1_CONST(0); /* 14 */
+ c = ROTATE_LEFT(c, 30);
+
+ e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(15) + SHA1_CONST(0); /* 15 */
+ b = ROTATE_LEFT(b, 30);
+
+ W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 16 */
+ d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(0) + SHA1_CONST(0);
+ a = ROTATE_LEFT(a, 30);
+
+ W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 17 */
+ c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(1) + SHA1_CONST(0);
+ e = ROTATE_LEFT(e, 30);
+
+ W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 18 */
+ b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(2) + SHA1_CONST(0);
+ d = ROTATE_LEFT(d, 30);
+
+ W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 19 */
+ a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(3) + SHA1_CONST(0);
+ c = ROTATE_LEFT(c, 30);
+
+ /* round 2 */
+ W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 20 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(4) + SHA1_CONST(1);
+ b = ROTATE_LEFT(b, 30);
+
+ W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 21 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(5) + SHA1_CONST(1);
+ a = ROTATE_LEFT(a, 30);
+
+ W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 22 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(6) + SHA1_CONST(1);
+ e = ROTATE_LEFT(e, 30);
+
+ W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 23 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(7) + SHA1_CONST(1);
+ d = ROTATE_LEFT(d, 30);
+
+ W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 24 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(8) + SHA1_CONST(1);
+ c = ROTATE_LEFT(c, 30);
+
+ W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 25 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(9) + SHA1_CONST(1);
+ b = ROTATE_LEFT(b, 30);
+
+ W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 26 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(10) + SHA1_CONST(1);
+ a = ROTATE_LEFT(a, 30);
+
+ W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 27 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(11) + SHA1_CONST(1);
+ e = ROTATE_LEFT(e, 30);
+
+ W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 28 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(12) + SHA1_CONST(1);
+ d = ROTATE_LEFT(d, 30);
+
+ W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 29 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(13) + SHA1_CONST(1);
+ c = ROTATE_LEFT(c, 30);
+
+ W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 30 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(14) + SHA1_CONST(1);
+ b = ROTATE_LEFT(b, 30);
+
+ W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 31 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(15) + SHA1_CONST(1);
+ a = ROTATE_LEFT(a, 30);
+
+ W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 32 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(0) + SHA1_CONST(1);
+ e = ROTATE_LEFT(e, 30);
+
+ W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 33 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(1) + SHA1_CONST(1);
+ d = ROTATE_LEFT(d, 30);
+
+ W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 34 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(2) + SHA1_CONST(1);
+ c = ROTATE_LEFT(c, 30);
+
+ W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 35 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(3) + SHA1_CONST(1);
+ b = ROTATE_LEFT(b, 30);
+
+ W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 36 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(4) + SHA1_CONST(1);
+ a = ROTATE_LEFT(a, 30);
+
+ W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 37 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(5) + SHA1_CONST(1);
+ e = ROTATE_LEFT(e, 30);
+
+ W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 38 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(6) + SHA1_CONST(1);
+ d = ROTATE_LEFT(d, 30);
+
+ W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 39 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(7) + SHA1_CONST(1);
+ c = ROTATE_LEFT(c, 30);
+
+ /* round 3 */
+ W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 40 */
+ e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(8) + SHA1_CONST(2);
+ b = ROTATE_LEFT(b, 30);
+
+ W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 41 */
+ d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(9) + SHA1_CONST(2);
+ a = ROTATE_LEFT(a, 30);
+
+ W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 42 */
+ c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(10) + SHA1_CONST(2);
+ e = ROTATE_LEFT(e, 30);
+
+ W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 43 */
+ b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(11) + SHA1_CONST(2);
+ d = ROTATE_LEFT(d, 30);
+
+ W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 44 */
+ a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(12) + SHA1_CONST(2);
+ c = ROTATE_LEFT(c, 30);
+
+ W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 45 */
+ e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(13) + SHA1_CONST(2);
+ b = ROTATE_LEFT(b, 30);
+
+ W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 46 */
+ d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(14) + SHA1_CONST(2);
+ a = ROTATE_LEFT(a, 30);
+
+ W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 47 */
+ c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(15) + SHA1_CONST(2);
+ e = ROTATE_LEFT(e, 30);
+
+ W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 48 */
+ b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(0) + SHA1_CONST(2);
+ d = ROTATE_LEFT(d, 30);
+
+ W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 49 */
+ a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(1) + SHA1_CONST(2);
+ c = ROTATE_LEFT(c, 30);
+
+ W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 50 */
+ e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(2) + SHA1_CONST(2);
+ b = ROTATE_LEFT(b, 30);
+
+ W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 51 */
+ d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(3) + SHA1_CONST(2);
+ a = ROTATE_LEFT(a, 30);
+
+ W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 52 */
+ c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(4) + SHA1_CONST(2);
+ e = ROTATE_LEFT(e, 30);
+
+ W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 53 */
+ b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(5) + SHA1_CONST(2);
+ d = ROTATE_LEFT(d, 30);
+
+ W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 54 */
+ a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(6) + SHA1_CONST(2);
+ c = ROTATE_LEFT(c, 30);
+
+ W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 55 */
+ e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(7) + SHA1_CONST(2);
+ b = ROTATE_LEFT(b, 30);
+
+ W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 56 */
+ d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(8) + SHA1_CONST(2);
+ a = ROTATE_LEFT(a, 30);
+
+ W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 57 */
+ c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(9) + SHA1_CONST(2);
+ e = ROTATE_LEFT(e, 30);
+
+ W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 58 */
+ b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(10) + SHA1_CONST(2);
+ d = ROTATE_LEFT(d, 30);
+
+ W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 59 */
+ a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(11) + SHA1_CONST(2);
+ c = ROTATE_LEFT(c, 30);
+
+ /* round 4 */
+ W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 60 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(12) + SHA1_CONST(3);
+ b = ROTATE_LEFT(b, 30);
+
+ W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 61 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(13) + SHA1_CONST(3);
+ a = ROTATE_LEFT(a, 30);
+
+ W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 62 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(14) + SHA1_CONST(3);
+ e = ROTATE_LEFT(e, 30);
+
+ W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 63 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(15) + SHA1_CONST(3);
+ d = ROTATE_LEFT(d, 30);
+
+ W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 64 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(0) + SHA1_CONST(3);
+ c = ROTATE_LEFT(c, 30);
+
+ W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 65 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(1) + SHA1_CONST(3);
+ b = ROTATE_LEFT(b, 30);
+
+ W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 66 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(2) + SHA1_CONST(3);
+ a = ROTATE_LEFT(a, 30);
+
+ W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 67 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(3) + SHA1_CONST(3);
+ e = ROTATE_LEFT(e, 30);
+
+ W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 68 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(4) + SHA1_CONST(3);
+ d = ROTATE_LEFT(d, 30);
+
+ W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 69 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(5) + SHA1_CONST(3);
+ c = ROTATE_LEFT(c, 30);
+
+ W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 70 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(6) + SHA1_CONST(3);
+ b = ROTATE_LEFT(b, 30);
+
+ W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 71 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(7) + SHA1_CONST(3);
+ a = ROTATE_LEFT(a, 30);
+
+ W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 72 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(8) + SHA1_CONST(3);
+ e = ROTATE_LEFT(e, 30);
+
+ W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 73 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(9) + SHA1_CONST(3);
+ d = ROTATE_LEFT(d, 30);
+
+ W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 74 */
+ a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(10) + SHA1_CONST(3);
+ c = ROTATE_LEFT(c, 30);
+
+ W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 75 */
+ e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(11) + SHA1_CONST(3);
+ b = ROTATE_LEFT(b, 30);
+
+ W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 76 */
+ d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(12) + SHA1_CONST(3);
+ a = ROTATE_LEFT(a, 30);
+
+ W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 77 */
+ c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(13) + SHA1_CONST(3);
+ e = ROTATE_LEFT(e, 30);
+
+ W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 78 */
+ b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(14) + SHA1_CONST(3);
+ d = ROTATE_LEFT(d, 30);
+
+ W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 79 */
+
+ ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(15) +
+ SHA1_CONST(3);
+ ctx->state[1] += b;
+ ctx->state[2] += ROTATE_LEFT(c, 30);
+ ctx->state[3] += d;
+ ctx->state[4] += e;
+
+ /* zeroize sensitive information */
+ W(0) = W(1) = W(2) = W(3) = W(4) = W(5) = W(6) = W(7) = W(8) = 0;
+ W(9) = W(10) = W(11) = W(12) = W(13) = W(14) = W(15) = 0;
+}
+#endif /* !__amd64 */
+
+
+/*
+ * Encode()
+ *
+ * purpose: to convert a list of numbers from little endian to big endian
+ * input: uint8_t * : place to store the converted big endian numbers
+ * uint32_t * : place to get numbers to convert from
+ * size_t : the length of the input in bytes
+ * output: void
+ */
+
+static void
+Encode(uint8_t *_RESTRICT_KYWD output, const uint32_t *_RESTRICT_KYWD input,
+ size_t len)
+{
+ size_t i, j;
+
+ for (i = 0, j = 0; j < len; i++, j += 4) {
+ output[j] = (input[i] >> 24) & 0xff;
+ output[j + 1] = (input[i] >> 16) & 0xff;
+ output[j + 2] = (input[i] >> 8) & 0xff;
+ output[j + 3] = input[i] & 0xff;
+ }
+}
diff --git a/module/icp/algs/sha2/sha2.c b/module/icp/algs/sha2/sha2.c
new file mode 100644
index 000000000..792ca8825
--- /dev/null
+++ b/module/icp/algs/sha2/sha2.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * Copyright 2013 Saso Kiselkov. All rights reserved.
+ */
+
+/*
+ * The basic framework for this code came from the reference
+ * implementation for MD5. That implementation is Copyright (C)
+ * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
+ *
+ * License to copy and use this software is granted provided that it
+ * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
+ * Algorithm" in all material mentioning or referencing this software
+ * or this function.
+ *
+ * License is also granted to make and use derivative works provided
+ * that such works are identified as "derived from the RSA Data
+ * Security, Inc. MD5 Message-Digest Algorithm" in all material
+ * mentioning or referencing the derived work.
+ *
+ * RSA Data Security, Inc. makes no representations concerning either
+ * the merchantability of this software or the suitability of this
+ * software for any particular purpose. It is provided "as is"
+ * without express or implied warranty of any kind.
+ *
+ * These notices must be retained in any copies of any part of this
+ * documentation and/or software.
+ *
+ * NOTE: Cleaned-up and optimized, version of SHA2, based on the FIPS 180-2
+ * standard, available at
+ * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
+ * Not as fast as one would like -- further optimizations are encouraged
+ * and appreciated.
+ */
+
+#include <sys/zfs_context.h>
+#define _SHA2_IMPL
+#include <sha2/sha2.h>
+#include <sha2/sha2_consts.h>
+
+#define _RESTRICT_KYWD
+
+#ifdef _LITTLE_ENDIAN
+#include <sys/byteorder.h>
+#define HAVE_HTONL
+#endif
+
+static void Encode(uint8_t *, uint32_t *, size_t);
+
+#if defined(__amd64)
+#define SHA256Transform(ctx, in) SHA256TransformBlocks((ctx), (in), 1)
+void SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num);
+#else
+static void SHA256Transform(SHA2_CTX *, const uint8_t *);
+#endif /* __amd64 */
+
+static uint8_t PADDING[128] = { 0x80, /* all zeros */ };
+
+/* Ch and Maj are the basic SHA2 functions. */
+#define Ch(b, c, d) (((b) & (c)) ^ ((~b) & (d)))
+#define Maj(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d)))
+
+/* Rotates x right n bits. */
+#define ROTR(x, n) \
+ (((x) >> (n)) | ((x) << ((sizeof (x) * NBBY)-(n))))
+
+/* Shift x right n bits */
+#define SHR(x, n) ((x) >> (n))
+
+/* SHA256 Functions */
+#define BIGSIGMA0_256(x) (ROTR((x), 2) ^ ROTR((x), 13) ^ ROTR((x), 22))
+#define BIGSIGMA1_256(x) (ROTR((x), 6) ^ ROTR((x), 11) ^ ROTR((x), 25))
+#define SIGMA0_256(x) (ROTR((x), 7) ^ ROTR((x), 18) ^ SHR((x), 3))
+#define SIGMA1_256(x) (ROTR((x), 17) ^ ROTR((x), 19) ^ SHR((x), 10))
+
+#define SHA256ROUND(a, b, c, d, e, f, g, h, i, w) \
+ T1 = h + BIGSIGMA1_256(e) + Ch(e, f, g) + SHA256_CONST(i) + w; \
+ d += T1; \
+ T2 = BIGSIGMA0_256(a) + Maj(a, b, c); \
+ h = T1 + T2
+
+/*
+ * sparc optimization:
+ *
+ * on the sparc, we can load big endian 32-bit data easily. note that
+ * special care must be taken to ensure the address is 32-bit aligned.
+ * in the interest of speed, we don't check to make sure, since
+ * careful programming can guarantee this for us.
+ */
+
+#if defined(_BIG_ENDIAN)
+#define LOAD_BIG_32(addr) (*(uint32_t *)(addr))
+#define LOAD_BIG_64(addr) (*(uint64_t *)(addr))
+
+#elif defined(HAVE_HTONL)
+#define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr)))
+#define LOAD_BIG_64(addr) htonll(*((uint64_t *)(addr)))
+
+#else
+/* little endian -- will work on big endian, but slowly */
+#define LOAD_BIG_32(addr) \
+ (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
+#define LOAD_BIG_64(addr) \
+ (((uint64_t)(addr)[0] << 56) | ((uint64_t)(addr)[1] << 48) | \
+ ((uint64_t)(addr)[2] << 40) | ((uint64_t)(addr)[3] << 32) | \
+ ((uint64_t)(addr)[4] << 24) | ((uint64_t)(addr)[5] << 16) | \
+ ((uint64_t)(addr)[6] << 8) | (uint64_t)(addr)[7])
+#endif /* _BIG_ENDIAN */
+
+
+#if !defined(__amd64)
+/* SHA256 Transform */
+
+static void
+SHA256Transform(SHA2_CTX *ctx, const uint8_t *blk)
+{
+ uint32_t a = ctx->state.s32[0];
+ uint32_t b = ctx->state.s32[1];
+ uint32_t c = ctx->state.s32[2];
+ uint32_t d = ctx->state.s32[3];
+ uint32_t e = ctx->state.s32[4];
+ uint32_t f = ctx->state.s32[5];
+ uint32_t g = ctx->state.s32[6];
+ uint32_t h = ctx->state.s32[7];
+
+ uint32_t w0, w1, w2, w3, w4, w5, w6, w7;
+ uint32_t w8, w9, w10, w11, w12, w13, w14, w15;
+ uint32_t T1, T2;
+
+ if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */
+ bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
+ blk = (uint8_t *)ctx->buf_un.buf32;
+ }
+
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w0 = LOAD_BIG_32(blk + 4 * 0);
+ SHA256ROUND(a, b, c, d, e, f, g, h, 0, w0);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w1 = LOAD_BIG_32(blk + 4 * 1);
+ SHA256ROUND(h, a, b, c, d, e, f, g, 1, w1);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w2 = LOAD_BIG_32(blk + 4 * 2);
+ SHA256ROUND(g, h, a, b, c, d, e, f, 2, w2);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w3 = LOAD_BIG_32(blk + 4 * 3);
+ SHA256ROUND(f, g, h, a, b, c, d, e, 3, w3);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w4 = LOAD_BIG_32(blk + 4 * 4);
+ SHA256ROUND(e, f, g, h, a, b, c, d, 4, w4);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w5 = LOAD_BIG_32(blk + 4 * 5);
+ SHA256ROUND(d, e, f, g, h, a, b, c, 5, w5);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w6 = LOAD_BIG_32(blk + 4 * 6);
+ SHA256ROUND(c, d, e, f, g, h, a, b, 6, w6);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w7 = LOAD_BIG_32(blk + 4 * 7);
+ SHA256ROUND(b, c, d, e, f, g, h, a, 7, w7);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w8 = LOAD_BIG_32(blk + 4 * 8);
+ SHA256ROUND(a, b, c, d, e, f, g, h, 8, w8);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w9 = LOAD_BIG_32(blk + 4 * 9);
+ SHA256ROUND(h, a, b, c, d, e, f, g, 9, w9);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w10 = LOAD_BIG_32(blk + 4 * 10);
+ SHA256ROUND(g, h, a, b, c, d, e, f, 10, w10);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w11 = LOAD_BIG_32(blk + 4 * 11);
+ SHA256ROUND(f, g, h, a, b, c, d, e, 11, w11);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w12 = LOAD_BIG_32(blk + 4 * 12);
+ SHA256ROUND(e, f, g, h, a, b, c, d, 12, w12);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w13 = LOAD_BIG_32(blk + 4 * 13);
+ SHA256ROUND(d, e, f, g, h, a, b, c, 13, w13);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w14 = LOAD_BIG_32(blk + 4 * 14);
+ SHA256ROUND(c, d, e, f, g, h, a, b, 14, w14);
+ /* LINTED E_BAD_PTR_CAST_ALIGN */
+ w15 = LOAD_BIG_32(blk + 4 * 15);
+ SHA256ROUND(b, c, d, e, f, g, h, a, 15, w15);
+
+ w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 16, w0);
+ w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 17, w1);
+ w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 18, w2);
+ w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 19, w3);
+ w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 20, w4);
+ w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 21, w5);
+ w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 22, w6);
+ w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 23, w7);
+ w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 24, w8);
+ w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 25, w9);
+ w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 26, w10);
+ w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 27, w11);
+ w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 28, w12);
+ w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 29, w13);
+ w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 30, w14);
+ w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 31, w15);
+
+ w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 32, w0);
+ w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 33, w1);
+ w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 34, w2);
+ w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 35, w3);
+ w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 36, w4);
+ w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 37, w5);
+ w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 38, w6);
+ w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 39, w7);
+ w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 40, w8);
+ w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 41, w9);
+ w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 42, w10);
+ w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 43, w11);
+ w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 44, w12);
+ w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 45, w13);
+ w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 46, w14);
+ w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 47, w15);
+
+ w0 = SIGMA1_256(w14) + w9 + SIGMA0_256(w1) + w0;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 48, w0);
+ w1 = SIGMA1_256(w15) + w10 + SIGMA0_256(w2) + w1;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 49, w1);
+ w2 = SIGMA1_256(w0) + w11 + SIGMA0_256(w3) + w2;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 50, w2);
+ w3 = SIGMA1_256(w1) + w12 + SIGMA0_256(w4) + w3;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 51, w3);
+ w4 = SIGMA1_256(w2) + w13 + SIGMA0_256(w5) + w4;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 52, w4);
+ w5 = SIGMA1_256(w3) + w14 + SIGMA0_256(w6) + w5;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 53, w5);
+ w6 = SIGMA1_256(w4) + w15 + SIGMA0_256(w7) + w6;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 54, w6);
+ w7 = SIGMA1_256(w5) + w0 + SIGMA0_256(w8) + w7;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 55, w7);
+ w8 = SIGMA1_256(w6) + w1 + SIGMA0_256(w9) + w8;
+ SHA256ROUND(a, b, c, d, e, f, g, h, 56, w8);
+ w9 = SIGMA1_256(w7) + w2 + SIGMA0_256(w10) + w9;
+ SHA256ROUND(h, a, b, c, d, e, f, g, 57, w9);
+ w10 = SIGMA1_256(w8) + w3 + SIGMA0_256(w11) + w10;
+ SHA256ROUND(g, h, a, b, c, d, e, f, 58, w10);
+ w11 = SIGMA1_256(w9) + w4 + SIGMA0_256(w12) + w11;
+ SHA256ROUND(f, g, h, a, b, c, d, e, 59, w11);
+ w12 = SIGMA1_256(w10) + w5 + SIGMA0_256(w13) + w12;
+ SHA256ROUND(e, f, g, h, a, b, c, d, 60, w12);
+ w13 = SIGMA1_256(w11) + w6 + SIGMA0_256(w14) + w13;
+ SHA256ROUND(d, e, f, g, h, a, b, c, 61, w13);
+ w14 = SIGMA1_256(w12) + w7 + SIGMA0_256(w15) + w14;
+ SHA256ROUND(c, d, e, f, g, h, a, b, 62, w14);
+ w15 = SIGMA1_256(w13) + w8 + SIGMA0_256(w0) + w15;
+ SHA256ROUND(b, c, d, e, f, g, h, a, 63, w15);
+
+ ctx->state.s32[0] += a;
+ ctx->state.s32[1] += b;
+ ctx->state.s32[2] += c;
+ ctx->state.s32[3] += d;
+ ctx->state.s32[4] += e;
+ ctx->state.s32[5] += f;
+ ctx->state.s32[6] += g;
+ ctx->state.s32[7] += h;
+}
+#endif /* !__amd64 */
+
+
+/*
+ * Encode()
+ *
+ * purpose: to convert a list of numbers from little endian to big endian
+ * input: uint8_t * : place to store the converted big endian numbers
+ * uint32_t * : place to get numbers to convert from
+ * size_t : the length of the input in bytes
+ * output: void
+ */
+
+static void
+Encode(uint8_t *_RESTRICT_KYWD output, uint32_t *_RESTRICT_KYWD input,
+ size_t len)
+{
+ size_t i, j;
+
+ for (i = 0, j = 0; j < len; i++, j += 4) {
+ output[j] = (input[i] >> 24) & 0xff;
+ output[j + 1] = (input[i] >> 16) & 0xff;
+ output[j + 2] = (input[i] >> 8) & 0xff;
+ output[j + 3] = input[i] & 0xff;
+ }
+}
+
+void
+SHA2Init(uint64_t mech, SHA2_CTX *ctx)
+{
+
+ switch (mech) {
+ case SHA256_MECH_INFO_TYPE:
+ case SHA256_HMAC_MECH_INFO_TYPE:
+ case SHA256_HMAC_GEN_MECH_INFO_TYPE:
+ ctx->state.s32[0] = 0x6a09e667U;
+ ctx->state.s32[1] = 0xbb67ae85U;
+ ctx->state.s32[2] = 0x3c6ef372U;
+ ctx->state.s32[3] = 0xa54ff53aU;
+ ctx->state.s32[4] = 0x510e527fU;
+ ctx->state.s32[5] = 0x9b05688cU;
+ ctx->state.s32[6] = 0x1f83d9abU;
+ ctx->state.s32[7] = 0x5be0cd19U;
+ break;
+ default:
+ cmn_err(CE_PANIC,
+ "sha2_init: failed to find a supported algorithm: 0x%x",
+ (uint32_t)mech);
+ }
+
+ ctx->algotype = (uint32_t)mech;
+ ctx->count.c64[0] = ctx->count.c64[1] = 0;
+}
+
+void
+SHA256Init(SHA256_CTX *ctx)
+{
+ SHA2Init(SHA256, ctx);
+}
+
+/*
+ * SHA2Update()
+ *
+ * purpose: continues an sha2 digest operation, using the message block
+ * to update the context.
+ * input: SHA2_CTX * : the context to update
+ * void * : the message block
+ * size_t : the length of the message block, in bytes
+ * output: void
+ */
+
+void
+SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
+{
+ uint32_t i, buf_index, buf_len, buf_limit;
+ const uint8_t *input = inptr;
+ uint32_t algotype = ctx->algotype;
+#if defined(__amd64)
+ uint32_t block_count;
+#endif /* !__amd64 */
+
+
+ /* check for noop */
+ if (input_len == 0)
+ return;
+
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ buf_limit = 64;
+
+ /* compute number of bytes mod 64 */
+ buf_index = (ctx->count.c32[1] >> 3) & 0x3F;
+
+ /* update number of bits */
+ if ((ctx->count.c32[1] += (input_len << 3)) < (input_len << 3))
+ ctx->count.c32[0]++;
+
+ ctx->count.c32[0] += (input_len >> 29);
+
+ } else {
+ buf_limit = 128;
+
+ /* compute number of bytes mod 128 */
+ buf_index = (ctx->count.c64[1] >> 3) & 0x7F;
+
+ /* update number of bits */
+ if ((ctx->count.c64[1] += (input_len << 3)) < (input_len << 3))
+ ctx->count.c64[0]++;
+
+ ctx->count.c64[0] += (input_len >> 29);
+ }
+
+ buf_len = buf_limit - buf_index;
+
+ /* transform as many times as possible */
+ i = 0;
+ if (input_len >= buf_len) {
+
+ /*
+ * general optimization:
+ *
+ * only do initial bcopy() and SHA2Transform() if
+ * buf_index != 0. if buf_index == 0, we're just
+ * wasting our time doing the bcopy() since there
+ * wasn't any data left over from a previous call to
+ * SHA2Update().
+ */
+ if (buf_index) {
+ bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE)
+ SHA256Transform(ctx, ctx->buf_un.buf8);
+
+ i = buf_len;
+ }
+
+#if !defined(__amd64)
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ for (; i + buf_limit - 1 < input_len; i += buf_limit) {
+ SHA256Transform(ctx, &input[i]);
+ }
+ }
+
+#else
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ block_count = (input_len - i) >> 6;
+ if (block_count > 0) {
+ SHA256TransformBlocks(ctx, &input[i],
+ block_count);
+ i += block_count << 6;
+ }
+ }
+#endif /* !__amd64 */
+
+ /*
+ * general optimization:
+ *
+ * if i and input_len are the same, return now instead
+ * of calling bcopy(), since the bcopy() in this case
+ * will be an expensive noop.
+ */
+
+ if (input_len == i)
+ return;
+
+ buf_index = 0;
+ }
+
+ /* buffer remaining input */
+ bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
+}
+
+
+/*
+ * SHA2Final()
+ *
+ * purpose: ends an sha2 digest operation, finalizing the message digest and
+ * zeroing the context.
+ * input: uchar_t * : a buffer to store the digest
+ * : The function actually uses void* because many
+ * : callers pass things other than uchar_t here.
+ * SHA2_CTX * : the context to finalize, save, and zero
+ * output: void
+ */
+
+void
+SHA2Final(void *digest, SHA2_CTX *ctx)
+{
+ uint8_t bitcount_be[sizeof (ctx->count.c32)];
+ uint32_t index;
+ uint32_t algotype = ctx->algotype;
+
+ if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
+ index = (ctx->count.c32[1] >> 3) & 0x3f;
+ Encode(bitcount_be, ctx->count.c32, sizeof (bitcount_be));
+ SHA2Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
+ SHA2Update(ctx, bitcount_be, sizeof (bitcount_be));
+ Encode(digest, ctx->state.s32, sizeof (ctx->state.s32));
+ }
+
+ /* zeroize sensitive information */
+ bzero(ctx, sizeof (*ctx));
+}