aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJack Lloyd <[email protected]>2017-01-17 16:28:20 -0500
committerJack Lloyd <[email protected]>2017-05-20 11:11:27 -0400
commit455a39f70e6de8376f78f318f04d07af7d245be3 (patch)
treed71becdefdb712c529bc04218ae51b44b46f1b59
parentb7200c05c6fe841cd5f4a5942a5be3d63124914e (diff)
Add ARMv8 SHA-1 support
Based on patch from Jeffrey Walton in GH #840 Only tested in qemu so far.
-rw-r--r--src/lib/hash/sha1/sha160.cpp7
-rw-r--r--src/lib/hash/sha1/sha160.h6
-rw-r--r--src/lib/hash/sha1/sha1_armv8/info.txt6
-rw-r--r--src/lib/hash/sha1/sha1_armv8/sha1_armv8.cpp203
4 files changed, 222 insertions, 0 deletions
diff --git a/src/lib/hash/sha1/sha160.cpp b/src/lib/hash/sha1/sha160.cpp
index 13f9c24d7..6ebdba73f 100644
--- a/src/lib/hash/sha1/sha160.cpp
+++ b/src/lib/hash/sha1/sha160.cpp
@@ -68,6 +68,13 @@ void SHA_160::compress_n(const uint8_t input[], size_t blocks)
}
#endif
+#if defined(BOTAN_HAS_SHA1_ARMV8)
+ if(CPUID::has_arm_sha1())
+ {
+ return sha1_armv8_compress_n(m_digest, input, blocks);
+ }
+#endif
+
#if defined(BOTAN_HAS_SHA1_SSE2)
if(CPUID::has_sse2())
{
diff --git a/src/lib/hash/sha1/sha160.h b/src/lib/hash/sha1/sha160.h
index 7333ca827..f2ed61b64 100644
--- a/src/lib/hash/sha1/sha160.h
+++ b/src/lib/hash/sha1/sha160.h
@@ -32,6 +32,12 @@ class BOTAN_DLL SHA_160 final : public MDx_HashFunction
private:
void compress_n(const uint8_t[], size_t blocks) override;
+#if defined(BOTAN_HAS_SHA1_ARMV8)
+ static void sha1_armv8_compress_n(secure_vector<uint32_t>& digest,
+ const uint8_t blocks[],
+ size_t block_count);
+#endif
+
#if defined(BOTAN_HAS_SHA1_SSE2)
static void sse2_compress_n(secure_vector<uint32_t>& digest,
const uint8_t blocks[],
diff --git a/src/lib/hash/sha1/sha1_armv8/info.txt b/src/lib/hash/sha1/sha1_armv8/info.txt
new file mode 100644
index 000000000..7377a938a
--- /dev/null
+++ b/src/lib/hash/sha1/sha1_armv8/info.txt
@@ -0,0 +1,6 @@
+define SHA1_ARMV8 20170117
+
+<arch>
+arm32
+arm64
+</arch>
diff --git a/src/lib/hash/sha1/sha1_armv8/sha1_armv8.cpp b/src/lib/hash/sha1/sha1_armv8/sha1_armv8.cpp
new file mode 100644
index 000000000..5ff921003
--- /dev/null
+++ b/src/lib/hash/sha1/sha1_armv8/sha1_armv8.cpp
@@ -0,0 +1,203 @@
+/*
+* SHA-1 using CPU instructions in ARMv8
+*
+* Contributed by Jeffrey Walton. Based on public domain code by
+* Johannes Schneiders, Skip Hovsmith and Barry O'Rourke.
+*
+* Botan is released under the Simplified BSD License (see license.txt)
+*/
+
+#include <botan/sha160.h>
+#include <arm_neon.h>
+
+namespace Botan {
+
+/*
+* SHA-1 using CPU instructions in ARMv8
+*/
+//static
+BOTAN_FUNC_ISA("+crypto")
+void SHA_160::sha1_armv8_compress_n(secure_vector<uint32_t>& digest, const uint8_t input[], size_t blocks)
+ {
+ uint32x4_t C0, C1, C2, C3;
+ uint32x4_t ABCD, ABCD_SAVED;
+ uint32_t E0, E0_SAVED, E1;
+
+ // Load initial values
+ C0 = vdupq_n_u32(0x5A827999);
+ C1 = vdupq_n_u32(0x6ED9EBA1);
+ C2 = vdupq_n_u32(0x8F1BBCDC);
+ C3 = vdupq_n_u32(0xCA62C1D6);
+
+ ABCD = vld1q_u32(&digest[0]);
+ E0 = digest[4];
+
+ while (blocks)
+ {
+ uint32x4_t MSG0, MSG1, MSG2, MSG3;
+ uint32x4_t TMP0, TMP1;
+
+ // Save current hash
+ ABCD_SAVED = ABCD;
+ E0_SAVED = E0;
+
+ // Intermediate void* cast due to http://llvm.org/bugs/show_bug.cgi?id=20670
+ MSG0 = vld1q_u32((const uint32_t*)(const void*)(input + 0));
+ MSG1 = vld1q_u32((const uint32_t*)(const void*)(input + 16));
+ MSG2 = vld1q_u32((const uint32_t*)(const void*)(input + 32));
+ MSG3 = vld1q_u32((const uint32_t*)(const void*)(input + 48));
+
+ MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
+ MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
+ MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
+ MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
+
+ TMP0 = vaddq_u32(MSG0, C0);
+ TMP1 = vaddq_u32(MSG1, C0);
+
+ // Rounds 0-3
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1cq_u32(ABCD, E0, TMP0);
+ TMP0 = vaddq_u32(MSG2, C0);
+ MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
+
+ // Rounds 4-7
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1cq_u32(ABCD, E1, TMP1);
+ TMP1 = vaddq_u32(MSG3, C0);
+ MSG0 = vsha1su1q_u32(MSG0, MSG3);
+ MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
+
+ // Rounds 8-11
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1cq_u32(ABCD, E0, TMP0);
+ TMP0 = vaddq_u32(MSG0, C0);
+ MSG1 = vsha1su1q_u32(MSG1, MSG0);
+ MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
+
+ // Rounds 12-15
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1cq_u32(ABCD, E1, TMP1);
+ TMP1 = vaddq_u32(MSG1, C1);
+ MSG2 = vsha1su1q_u32(MSG2, MSG1);
+ MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
+
+ // Rounds 16-19
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1cq_u32(ABCD, E0, TMP0);
+ TMP0 = vaddq_u32(MSG2, C1);
+ MSG3 = vsha1su1q_u32(MSG3, MSG2);
+ MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
+
+ // Rounds 20-23
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E1, TMP1);
+ TMP1 = vaddq_u32(MSG3, C1);
+ MSG0 = vsha1su1q_u32(MSG0, MSG3);
+ MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
+
+ // Rounds 24-27
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E0, TMP0);
+ TMP0 = vaddq_u32(MSG0, C1);
+ MSG1 = vsha1su1q_u32(MSG1, MSG0);
+ MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
+
+ // Rounds 28-31
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E1, TMP1);
+ TMP1 = vaddq_u32(MSG1, C1);
+ MSG2 = vsha1su1q_u32(MSG2, MSG1);
+ MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
+
+ // Rounds 32-35
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E0, TMP0);
+ TMP0 = vaddq_u32(MSG2, C2);
+ MSG3 = vsha1su1q_u32(MSG3, MSG2);
+ MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
+
+ // Rounds 36-39
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E1, TMP1);
+ TMP1 = vaddq_u32(MSG3, C2);
+ MSG0 = vsha1su1q_u32(MSG0, MSG3);
+ MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
+
+ // Rounds 40-43
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1mq_u32(ABCD, E0, TMP0);
+ TMP0 = vaddq_u32(MSG0, C2);
+ MSG1 = vsha1su1q_u32(MSG1, MSG0);
+ MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
+
+ // Rounds 44-47
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1mq_u32(ABCD, E1, TMP1);
+ TMP1 = vaddq_u32(MSG1, C2);
+ MSG2 = vsha1su1q_u32(MSG2, MSG1);
+ MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
+
+ // Rounds 48-51
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1mq_u32(ABCD, E0, TMP0);
+ TMP0 = vaddq_u32(MSG2, C2);
+ MSG3 = vsha1su1q_u32(MSG3, MSG2);
+ MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
+
+ // Rounds 52-55
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1mq_u32(ABCD, E1, TMP1);
+ TMP1 = vaddq_u32(MSG3, C3);
+ MSG0 = vsha1su1q_u32(MSG0, MSG3);
+ MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
+
+ // Rounds 56-59
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1mq_u32(ABCD, E0, TMP0);
+ TMP0 = vaddq_u32(MSG0, C3);
+ MSG1 = vsha1su1q_u32(MSG1, MSG0);
+ MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
+
+ // Rounds 60-63
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E1, TMP1);
+ TMP1 = vaddq_u32(MSG1, C3);
+ MSG2 = vsha1su1q_u32(MSG2, MSG1);
+ MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
+
+ // Rounds 64-67
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E0, TMP0);
+ TMP0 = vaddq_u32(MSG2, C3);
+ MSG3 = vsha1su1q_u32(MSG3, MSG2);
+ MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
+
+ // Rounds 68-71
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E1, TMP1);
+ TMP1 = vaddq_u32(MSG3, C3);
+ MSG0 = vsha1su1q_u32(MSG0, MSG3);
+
+ // Rounds 72-75
+ E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E0, TMP0);
+
+ // Rounds 76-79
+ E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
+ ABCD = vsha1pq_u32(ABCD, E1, TMP1);
+
+ // Add state back
+ E0 += E0_SAVED;
+ ABCD = vaddq_u32(ABCD_SAVED, ABCD);
+
+ input += 64;
+ blocks--;
+ }
+
+ // Save digest
+ vst1q_u32(&digest[0], ABCD);
+ digest[4] = E0;
+ }
+
+}