aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/build-data/arch/x86_64.txt1
-rw-r--r--src/cli/speed.cpp2
-rw-r--r--src/lib/hash/sha1/sha160.cpp7
-rw-r--r--src/lib/hash/sha1/sha160.h7
-rw-r--r--src/lib/hash/sha1/sha1_x86/info.txt11
-rw-r--r--src/lib/hash/sha1/sha1_x86/sha1_x86.cpp214
-rw-r--r--src/lib/hash/sha2_32/sha2_32.cpp10
-rw-r--r--src/lib/hash/sha2_32/sha2_32.h8
-rw-r--r--src/lib/hash/sha2_32/sha2_32_x86/info.txt11
-rw-r--r--src/lib/hash/sha2_32/sha2_32_x86/sha2_32_x86.cpp210
-rw-r--r--src/tests/data/hash/sha1.vec2
-rw-r--r--src/tests/data/hash/sha2_32.vec2
-rw-r--r--src/tests/tests.cpp2
13 files changed, 485 insertions, 2 deletions
diff --git a/src/build-data/arch/x86_64.txt b/src/build-data/arch/x86_64.txt
index 33d9f5bd5..1e85df34b 100644
--- a/src/build-data/arch/x86_64.txt
+++ b/src/build-data/arch/x86_64.txt
@@ -47,4 +47,5 @@ rdrand
rdseed
sha
bmi2
+sha
</isa_extensions>
diff --git a/src/cli/speed.cpp b/src/cli/speed.cpp
index 30a01a934..58f12ef92 100644
--- a/src/cli/speed.cpp
+++ b/src/cli/speed.cpp
@@ -434,6 +434,8 @@ class Speed final : public Command
Botan::CPUID::clear_cpuid_bit(Botan::CPUID::CPUID_AVX2_BIT);
else if(cpuid_to_clear == "sse2")
Botan::CPUID::clear_cpuid_bit(Botan::CPUID::CPUID_SSE2_BIT);
+ else if(cpuid_to_clear == "sha")
+ Botan::CPUID::clear_cpuid_bit(Botan::CPUID::CPUID_SHA_BIT);
#endif
}
diff --git a/src/lib/hash/sha1/sha160.cpp b/src/lib/hash/sha1/sha160.cpp
index 735789cab..13f9c24d7 100644
--- a/src/lib/hash/sha1/sha160.cpp
+++ b/src/lib/hash/sha1/sha160.cpp
@@ -61,6 +61,13 @@ void SHA_160::compress_n(const uint8_t input[], size_t blocks)
{
using namespace SHA1_F;
+#if defined(BOTAN_HAS_SHA1_X86_SHA_NI)
+ if(CPUID::has_intel_sha())
+ {
+ return sha1_compress_x86(m_digest, input, blocks);
+ }
+#endif
+
#if defined(BOTAN_HAS_SHA1_SSE2)
if(CPUID::has_sse2())
{
diff --git a/src/lib/hash/sha1/sha160.h b/src/lib/hash/sha1/sha160.h
index 7ba7257af..7333ca827 100644
--- a/src/lib/hash/sha1/sha160.h
+++ b/src/lib/hash/sha1/sha160.h
@@ -38,6 +38,13 @@ class BOTAN_DLL SHA_160 final : public MDx_HashFunction
size_t block_count);
#endif
+#if defined(BOTAN_HAS_SHA1_X86_SHA_NI)
+ // Using x86 SHA instructions in Intel Goldmont and Cannonlake
+ static void sha1_compress_x86(secure_vector<uint32_t>& digest,
+ const uint8_t blocks[],
+ size_t block_count);
+#endif
+
void copy_out(uint8_t[]) override;
diff --git a/src/lib/hash/sha1/sha1_x86/info.txt b/src/lib/hash/sha1/sha1_x86/info.txt
new file mode 100644
index 000000000..fa329ae77
--- /dev/null
+++ b/src/lib/hash/sha1/sha1_x86/info.txt
@@ -0,0 +1,11 @@
+<defines>
+SHA1_X86_SHA_NI -> 20170518
+</defines>
+
+need_isa sha,ssse3,sse4.1
+
+<cc>
+clang:3.9
+gcc:5.0
+msvc:2015
+</cc>
diff --git a/src/lib/hash/sha1/sha1_x86/sha1_x86.cpp b/src/lib/hash/sha1/sha1_x86/sha1_x86.cpp
new file mode 100644
index 000000000..0d774662b
--- /dev/null
+++ b/src/lib/hash/sha1/sha1_x86/sha1_x86.cpp
@@ -0,0 +1,214 @@
+/*
+* SHA-1 using Intel SHA intrinsic
+*
+* Based on public domain code by Sean Gulley
+* (https://github.com/mitls/hacl-star/tree/master/experimental/hash)
+* Adapted to Botan by Jeffrey Walton.
+*
+* Further changes
+*
+* (C) 2017 Jack Lloyd
+*
+* Botan is released under the Simplified BSD License (see license.txt)
+*/
+
+#include <botan/sha160.h>
+#include <immintrin.h>
+
+namespace Botan {
+
+BOTAN_FUNC_ISA("sha")
+void SHA_160::sha1_compress_x86(secure_vector<uint32_t>& digest,
+ const uint8_t input[],
+ size_t blocks)
+ {
+ const __m128i MASK = _mm_set_epi64x(0x0001020304050607ULL, 0x08090a0b0c0d0e0fULL);
+ const __m128i* input_mm = reinterpret_cast<const __m128i*>(input);
+
+ uint32_t* state = digest.data();
+
+ // Load initial values
+ __m128i ABCD = _mm_loadu_si128((__m128i*) state);
+ __m128i E0 = _mm_set_epi32(state[4], 0, 0, 0);
+ ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
+
+ while (blocks)
+ {
+ // Save current hash
+ const __m128i ABCD_SAVE = ABCD;
+ const __m128i E0_SAVE = E0;
+
+ __m128i MSG0, MSG1, MSG2, MSG3;
+ __m128i E1;
+
+ // Rounds 0-3
+ MSG0 = _mm_loadu_si128(input_mm+0);
+ MSG0 = _mm_shuffle_epi8(MSG0, MASK);
+ E0 = _mm_add_epi32(E0, MSG0);
+ E1 = ABCD;
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
+
+ // Rounds 4-7
+ MSG1 = _mm_loadu_si128(input_mm+1);
+ MSG1 = _mm_shuffle_epi8(MSG1, MASK);
+ E1 = _mm_sha1nexte_epu32(E1, MSG1);
+ E0 = ABCD;
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
+ MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
+
+ // Rounds 8-11
+ MSG2 = _mm_loadu_si128(input_mm+2);
+ MSG2 = _mm_shuffle_epi8(MSG2, MASK);
+ E0 = _mm_sha1nexte_epu32(E0, MSG2);
+ E1 = ABCD;
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
+ MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
+ MSG0 = _mm_xor_si128(MSG0, MSG2);
+
+ // Rounds 12-15
+ MSG3 = _mm_loadu_si128(input_mm+3);
+ MSG3 = _mm_shuffle_epi8(MSG3, MASK);
+ E1 = _mm_sha1nexte_epu32(E1, MSG3);
+ E0 = ABCD;
+ MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
+ MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
+ MSG1 = _mm_xor_si128(MSG1, MSG3);
+
+ // Rounds 16-19
+ E0 = _mm_sha1nexte_epu32(E0, MSG0);
+ E1 = ABCD;
+ MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
+ MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
+ MSG2 = _mm_xor_si128(MSG2, MSG0);
+
+ // Rounds 20-23
+ E1 = _mm_sha1nexte_epu32(E1, MSG1);
+ E0 = ABCD;
+ MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
+ MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
+ MSG3 = _mm_xor_si128(MSG3, MSG1);
+
+ // Rounds 24-27
+ E0 = _mm_sha1nexte_epu32(E0, MSG2);
+ E1 = ABCD;
+ MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
+ MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
+ MSG0 = _mm_xor_si128(MSG0, MSG2);
+
+ // Rounds 28-31
+ E1 = _mm_sha1nexte_epu32(E1, MSG3);
+ E0 = ABCD;
+ MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
+ MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
+ MSG1 = _mm_xor_si128(MSG1, MSG3);
+
+ // Rounds 32-35
+ E0 = _mm_sha1nexte_epu32(E0, MSG0);
+ E1 = ABCD;
+ MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
+ MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
+ MSG2 = _mm_xor_si128(MSG2, MSG0);
+
+ // Rounds 36-39
+ E1 = _mm_sha1nexte_epu32(E1, MSG1);
+ E0 = ABCD;
+ MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
+ MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
+ MSG3 = _mm_xor_si128(MSG3, MSG1);
+
+ // Rounds 40-43
+ E0 = _mm_sha1nexte_epu32(E0, MSG2);
+ E1 = ABCD;
+ MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
+ MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
+ MSG0 = _mm_xor_si128(MSG0, MSG2);
+
+ // Rounds 44-47
+ E1 = _mm_sha1nexte_epu32(E1, MSG3);
+ E0 = ABCD;
+ MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
+ MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
+ MSG1 = _mm_xor_si128(MSG1, MSG3);
+
+ // Rounds 48-51
+ E0 = _mm_sha1nexte_epu32(E0, MSG0);
+ E1 = ABCD;
+ MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
+ MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
+ MSG2 = _mm_xor_si128(MSG2, MSG0);
+
+ // Rounds 52-55
+ E1 = _mm_sha1nexte_epu32(E1, MSG1);
+ E0 = ABCD;
+ MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
+ MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
+ MSG3 = _mm_xor_si128(MSG3, MSG1);
+
+ // Rounds 56-59
+ E0 = _mm_sha1nexte_epu32(E0, MSG2);
+ E1 = ABCD;
+ MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
+ MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
+ MSG0 = _mm_xor_si128(MSG0, MSG2);
+
+ // Rounds 60-63
+ E1 = _mm_sha1nexte_epu32(E1, MSG3);
+ E0 = ABCD;
+ MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
+ MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
+ MSG1 = _mm_xor_si128(MSG1, MSG3);
+
+ // Rounds 64-67
+ E0 = _mm_sha1nexte_epu32(E0, MSG0);
+ E1 = ABCD;
+ MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
+ MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
+ MSG2 = _mm_xor_si128(MSG2, MSG0);
+
+ // Rounds 68-71
+ E1 = _mm_sha1nexte_epu32(E1, MSG1);
+ E0 = ABCD;
+ MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
+ MSG3 = _mm_xor_si128(MSG3, MSG1);
+
+ // Rounds 72-75
+ E0 = _mm_sha1nexte_epu32(E0, MSG2);
+ E1 = ABCD;
+ MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
+
+ // Rounds 76-79
+ E1 = _mm_sha1nexte_epu32(E1, MSG3);
+ E0 = ABCD;
+ ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
+
+ // Add values back to state
+ E0 = _mm_sha1nexte_epu32(E0, E0_SAVE);
+ ABCD = _mm_add_epi32(ABCD, ABCD_SAVE);
+
+ input_mm += 4;
+ blocks--;
+ }
+
+ // Save state
+ ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
+ _mm_storeu_si128((__m128i*) state, ABCD);
+ state[4] = _mm_extract_epi32(E0, 3);
+ }
+
+}
diff --git a/src/lib/hash/sha2_32/sha2_32.cpp b/src/lib/hash/sha2_32/sha2_32.cpp
index 2a748a6aa..c4d76b0f6 100644
--- a/src/lib/hash/sha2_32/sha2_32.cpp
+++ b/src/lib/hash/sha2_32/sha2_32.cpp
@@ -1,12 +1,13 @@
/*
* SHA-{224,256}
-* (C) 1999-2010 Jack Lloyd
+* (C) 1999-2010,2017 Jack Lloyd
* 2007 FlexSecure GmbH
*
* Botan is released under the Simplified BSD License (see license.txt)
*/
#include <botan/sha2_32.h>
+#include <botan/cpuid.h>
namespace Botan {
@@ -51,6 +52,13 @@ inline uint32_t sigma(uint32_t X, uint32_t rot1, uint32_t rot2, uint32_t shift)
void compress(secure_vector<uint32_t>& digest,
const uint8_t input[], size_t blocks)
{
+#if defined(BOTAN_HAS_SHA2_32_X86)
+ if(CPUID::has_intel_sha())
+ {
+ return sha2_compress_x86(digest.data(), input, blocks);
+ }
+#endif
+
uint32_t A = digest[0], B = digest[1], C = digest[2],
D = digest[3], E = digest[4], F = digest[5],
G = digest[6], H = digest[7];
diff --git a/src/lib/hash/sha2_32/sha2_32.h b/src/lib/hash/sha2_32/sha2_32.h
index 78e08c97a..5a687efbe 100644
--- a/src/lib/hash/sha2_32/sha2_32.h
+++ b/src/lib/hash/sha2_32/sha2_32.h
@@ -48,6 +48,7 @@ class BOTAN_DLL SHA_256 final : public MDx_HashFunction
SHA_256() : MDx_HashFunction(64, true, true), m_digest(8)
{ clear(); }
+
private:
void compress_n(const uint8_t[], size_t blocks) override;
void copy_out(uint8_t[]) override;
@@ -55,6 +56,13 @@ class BOTAN_DLL SHA_256 final : public MDx_HashFunction
secure_vector<uint32_t> m_digest;
};
+#if defined(BOTAN_HAS_SHA2_32_X86)
+/*
+* SHA-256 compression using Goldmont x86 extensions. Not for public consumption.
+*/
+void sha2_compress_x86(uint32_t digest[8], const uint8_t input[], size_t blocks);
+#endif
+
}
#endif
diff --git a/src/lib/hash/sha2_32/sha2_32_x86/info.txt b/src/lib/hash/sha2_32/sha2_32_x86/info.txt
new file mode 100644
index 000000000..c0b159ab2
--- /dev/null
+++ b/src/lib/hash/sha2_32/sha2_32_x86/info.txt
@@ -0,0 +1,11 @@
+<defines>
+SHA2_32_X86 -> 20170518
+</defines>
+
+need_isa sha,sse4.1
+
+<cc>
+gcc:5.0
+clang:3.9
+msvc:2015
+</cc>
diff --git a/src/lib/hash/sha2_32/sha2_32_x86/sha2_32_x86.cpp b/src/lib/hash/sha2_32/sha2_32_x86/sha2_32_x86.cpp
new file mode 100644
index 000000000..8f90ec5a9
--- /dev/null
+++ b/src/lib/hash/sha2_32/sha2_32_x86/sha2_32_x86.cpp
@@ -0,0 +1,210 @@
+/*
+* Support for SHA-256 x86 instrinsic
+* Based on public domain code by Sean Gulley
+* (https://github.com/mitls/hacl-star/tree/master/experimental/hash)
+*
+* Botan is released under the Simplified BSD License (see license.txt)
+*/
+
+#include <botan/sha2_32.h>
+#include <immintrin.h>
+
+namespace Botan {
+
+// called from sha2_32.cpp
+void sha2_compress_x86(uint32_t digest[8], const uint8_t input[], size_t blocks)
+ {
+ __m128i STATE0, STATE1;
+ __m128i MSG, TMP, MASK;
+ __m128i TMSG0, TMSG1, TMSG2, TMSG3;
+ __m128i ABEF_SAVE, CDGH_SAVE;
+
+ uint32_t* state = &digest[0];
+
+ // Load initial values
+ TMP = _mm_loadu_si128((__m128i*) &state[0]);
+ STATE1 = _mm_loadu_si128((__m128i*) &state[4]);
+ MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
+
+ TMP = _mm_shuffle_epi32(TMP, 0xB1); // CDAB
+ STATE1 = _mm_shuffle_epi32(STATE1, 0x1B); // EFGH
+ STATE0 = _mm_alignr_epi8(TMP, STATE1, 8); // ABEF
+ STATE1 = _mm_blend_epi16(STATE1, TMP, 0xF0); // CDGH
+
+ while (blocks)
+ {
+ // Save current hash
+ ABEF_SAVE = STATE0;
+ CDGH_SAVE = STATE1;
+
+ // Rounds 0-3
+ MSG = _mm_loadu_si128((const __m128i*) (input+0));
+ TMSG0 = _mm_shuffle_epi8(MSG, MASK);
+ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+
+ // Rounds 4-7
+ TMSG1 = _mm_loadu_si128((const __m128i*) (input+16));
+ TMSG1 = _mm_shuffle_epi8(TMSG1, MASK);
+ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
+
+ // Rounds 8-11
+ TMSG2 = _mm_loadu_si128((const __m128i*) (input+32));
+ TMSG2 = _mm_shuffle_epi8(TMSG2, MASK);
+ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
+
+ // Rounds 12-15
+ TMSG3 = _mm_loadu_si128((const __m128i*) (input+48));
+ TMSG3 = _mm_shuffle_epi8(TMSG3, MASK);
+ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
+ TMSG0 = _mm_add_epi32(TMSG0, TMP);
+ TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
+
+ // Rounds 16-19
+ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
+ TMSG1 = _mm_add_epi32(TMSG1, TMP);
+ TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
+
+ // Rounds 20-23
+ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
+ TMSG2 = _mm_add_epi32(TMSG2, TMP);
+ TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
+
+ // Rounds 24-27
+ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
+ TMSG3 = _mm_add_epi32(TMSG3, TMP);
+ TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
+
+ // Rounds 28-31
+ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
+ TMSG0 = _mm_add_epi32(TMSG0, TMP);
+ TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
+
+ // Rounds 32-35
+ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
+ TMSG1 = _mm_add_epi32(TMSG1, TMP);
+ TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
+
+ // Rounds 36-39
+ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
+ TMSG2 = _mm_add_epi32(TMSG2, TMP);
+ TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
+
+ // Rounds 40-43
+ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
+ TMSG3 = _mm_add_epi32(TMSG3, TMP);
+ TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
+
+ // Rounds 44-47
+ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x106AA070F40E3585ULL, 0xD6990624D192E819ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
+ TMSG0 = _mm_add_epi32(TMSG0, TMP);
+ TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
+
+ // Rounds 48-51
+ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
+ TMSG1 = _mm_add_epi32(TMSG1, TMP);
+ TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+ TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
+
+ // Rounds 52-55
+ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
+ TMSG2 = _mm_add_epi32(TMSG2, TMP);
+ TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+
+ // Rounds 56-59
+ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
+ TMSG3 = _mm_add_epi32(TMSG3, TMP);
+ TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+
+ // Rounds 60-63
+ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL));
+ STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
+ MSG = _mm_shuffle_epi32(MSG, 0x0E);
+ STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
+
+ // Add values back to state
+ STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE);
+ STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE);
+
+ input += 64;
+ blocks--;
+ }
+
+ TMP = _mm_shuffle_epi32(STATE0, 0x1B); // FEBA
+ STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); // DCHG
+ STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0); // DCBA
+ STATE1 = _mm_alignr_epi8(STATE1, TMP, 8); // ABEF
+
+ // Save state
+ _mm_storeu_si128((__m128i*) &state[0], STATE0);
+ _mm_storeu_si128((__m128i*) &state[4], STATE1);
+ }
+
+}
diff --git a/src/tests/data/hash/sha1.vec b/src/tests/data/hash/sha1.vec
index e86650f30..395de6efc 100644
--- a/src/tests/data/hash/sha1.vec
+++ b/src/tests/data/hash/sha1.vec
@@ -1,4 +1,4 @@
-#test cpuid sse2
+#test cpuid sse2 sha
[SHA-160]
In =
diff --git a/src/tests/data/hash/sha2_32.vec b/src/tests/data/hash/sha2_32.vec
index e1f32bfb2..d71f49f20 100644
--- a/src/tests/data/hash/sha2_32.vec
+++ b/src/tests/data/hash/sha2_32.vec
@@ -1,3 +1,5 @@
+#test cpuid sha
+
[SHA-224]
In =
Out = D14A028C2A3A2BC9476102BB288234C415A2B01F828EA62AC5B3E42F
diff --git a/src/tests/tests.cpp b/src/tests/tests.cpp
index 2252cb221..1886aa122 100644
--- a/src/tests/tests.cpp
+++ b/src/tests/tests.cpp
@@ -885,6 +885,8 @@ std::vector<Botan::CPUID::CPUID_bits> map_cpuid_string(const std::string& tok)
return {Botan::CPUID::CPUID_CLMUL_BIT};
if(tok == "avx2")
return {Botan::CPUID::CPUID_AVX2_BIT};
+ if(tok == "sha")
+ return {Botan::CPUID::CPUID_SHA_BIT};
#endif
#if defined(BOTAN_TARGET_CPU_IS_PPC_FAMILY)