aboutsummaryrefslogtreecommitdiffstats
path: root/src/lib/utils/simd
diff options
context:
space:
mode:
authorJack Lloyd <[email protected]>2019-08-31 08:46:16 -0400
committerJack Lloyd <[email protected]>2019-08-31 13:57:03 -0400
commit142db684186c6f25fe439d89e1dedbded8e5a715 (patch)
treeaf26fbe0a335bb4aac989f4b0e0904bed54e1b4c /src/lib/utils/simd
parent6d291015d67c264a92e64d5f4c4e1295f8128435 (diff)
Abstract the AES SSSE3 implementation to support other SIMD
Diffstat (limited to 'src/lib/utils/simd')
-rw-r--r--src/lib/utils/simd/simd_32.h343
1 files changed, 179 insertions, 164 deletions
diff --git a/src/lib/utils/simd/simd_32.h b/src/lib/utils/simd/simd_32.h
index 304770587..7f04546ed 100644
--- a/src/lib/utils/simd/simd_32.h
+++ b/src/lib/utils/simd/simd_32.h
@@ -24,7 +24,7 @@
#define BOTAN_SIMD_USE_ALTIVEC
#elif defined(BOTAN_TARGET_SUPPORTS_NEON)
- #include <arm_neon.h>
+ #include <arm_simd.h>
#define BOTAN_SIMD_USE_NEON
#else
@@ -33,6 +33,16 @@
namespace Botan {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ typedef __m128i native_simd_type;
+#elif defined(BOTAN_SIMD_USE_ALTIVEC)
+ typedef __vector unsigned int native_simd_type;
+#elif defined(BOTAN_SIMD_USE_NEON)
+ typedef uint32x4_t native_simd_type;
+#else
+ typedef struct { uint32_t val[4]; } native_simd_type;
+#endif
+
/**
* 4x32 bit SIMD register
*
@@ -59,16 +69,16 @@ class SIMD_4x32 final
SIMD_4x32() // zero initialized
{
#if defined(BOTAN_SIMD_USE_SSE2)
- m_sse = _mm_setzero_si128();
+ m_simd = _mm_setzero_si128();
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- m_vmx = vec_splat_u32(0);
+ m_simd = vec_splat_u32(0);
#elif defined(BOTAN_SIMD_USE_NEON)
- m_neon = vdupq_n_u32(0);
+ m_simd = vdupq_n_u32(0);
#else
- m_scalar[0] = 0;
- m_scalar[1] = 0;
- m_scalar[2] = 0;
- m_scalar[3] = 0;
+ m_simd.val[0] = 0;
+ m_simd.val[1] = 0;
+ m_simd.val[2] = 0;
+ m_simd.val[3] = 0;
#endif
}
@@ -78,16 +88,16 @@ class SIMD_4x32 final
explicit SIMD_4x32(const uint32_t B[4])
{
#if defined(BOTAN_SIMD_USE_SSE2)
- m_sse = _mm_loadu_si128(reinterpret_cast<const __m128i*>(B));
+ m_simd = _mm_loadu_si128(reinterpret_cast<const __m128i*>(B));
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- m_vmx = (__vector unsigned int){B[0], B[1], B[2], B[3]};
+ m_simd = (__vector unsigned int){B[0], B[1], B[2], B[3]};
#elif defined(BOTAN_SIMD_USE_NEON)
- m_neon = vld1q_u32(B);
+ m_simd = vld1q_u32(B);
#else
- m_scalar[0] = B[0];
- m_scalar[1] = B[1];
- m_scalar[2] = B[2];
- m_scalar[3] = B[3];
+ m_simd.val[0] = B[0];
+ m_simd.val[1] = B[1];
+ m_simd.val[2] = B[2];
+ m_simd.val[3] = B[3];
#endif
}
@@ -97,18 +107,18 @@ class SIMD_4x32 final
SIMD_4x32(uint32_t B0, uint32_t B1, uint32_t B2, uint32_t B3)
{
#if defined(BOTAN_SIMD_USE_SSE2)
- m_sse = _mm_set_epi32(B3, B2, B1, B0);
+ m_simd = _mm_set_epi32(B3, B2, B1, B0);
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- m_vmx = (__vector unsigned int){B0, B1, B2, B3};
+ m_simd = (__vector unsigned int){B0, B1, B2, B3};
#elif defined(BOTAN_SIMD_USE_NEON)
// Better way to do this?
const uint32_t B[4] = { B0, B1, B2, B3 };
- m_neon = vld1q_u32(B);
+ m_simd = vld1q_u32(B);
#else
- m_scalar[0] = B0;
- m_scalar[1] = B1;
- m_scalar[2] = B2;
- m_scalar[3] = B3;
+ m_simd.val[0] = B0;
+ m_simd.val[1] = B1;
+ m_simd.val[2] = B2;
+ m_simd.val[3] = B3;
#endif
}
@@ -127,6 +137,21 @@ class SIMD_4x32 final
}
/**
+ * Load SIMD register with one 8-bit element repeated
+ */
+ static SIMD_4x32 splat_u8(uint8_t B)
+ {
+#if defined(BOTAN_SIMD_USE_SSE2)
+ return SIMD_4x32(_mm_set1_epi8(B));
+#elif defined(BOTAN_SIMD_USE_ARM)
+ return SIMD_4x32(vdupq_n_u8(B));
+#else
+ const uint32_t B4 = make_uint32(B, B, B, B);
+ return SIMD_4x32(B4, B4, B4, B4);
+#endif
+ }
+
+ /**
* Load a SIMD register with little-endian convention
*/
static SIMD_4x32 load_le(const void* in)
@@ -145,7 +170,7 @@ class SIMD_4x32 final
return CPUID::is_big_endian() ? l.bswap() : l;
#else
SIMD_4x32 out;
- Botan::load_le(out.m_scalar, static_cast<const uint8_t*>(in), 4);
+ Botan::load_le(out.m_simd.val, static_cast<const uint8_t*>(in), 4);
return out;
#endif
}
@@ -172,11 +197,16 @@ class SIMD_4x32 final
#else
SIMD_4x32 out;
- Botan::load_be(out.m_scalar, static_cast<const uint8_t*>(in), 4);
+ Botan::load_be(out.m_simd.val, static_cast<const uint8_t*>(in), 4);
return out;
#endif
}
+ void store_le(uint32_t out[]) const
+ {
+ this->store_le(reinterpret_cast<uint8_t*>(out));
+ }
+
/**
* Load a SIMD register with little-endian convention
*/
@@ -184,7 +214,7 @@ class SIMD_4x32 final
{
#if defined(BOTAN_SIMD_USE_SSE2)
- _mm_storeu_si128(reinterpret_cast<__m128i*>(out), m_sse);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(out), m_simd);
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
@@ -192,7 +222,7 @@ class SIMD_4x32 final
__vector unsigned int V;
uint32_t R[4];
} vec;
- vec.V = m_vmx;
+ vec.V = m_simd;
Botan::store_le(out, vec.R[0], vec.R[1], vec.R[2], vec.R[3]);
#elif defined(BOTAN_SIMD_USE_NEON)
@@ -203,10 +233,10 @@ class SIMD_4x32 final
}
else
{
- vst1q_u8(out, vreinterpretq_u8_u32(m_neon));
+ vst1q_u8(out, vreinterpretq_u8_u32(m_simd));
}
#else
- Botan::store_le(out, m_scalar[0], m_scalar[1], m_scalar[2], m_scalar[3]);
+ Botan::store_le(out, m_simd.val[0], m_simd.val[1], m_simd.val[2], m_simd.val[3]);
#endif
}
@@ -225,7 +255,7 @@ class SIMD_4x32 final
__vector unsigned int V;
uint32_t R[4];
} vec;
- vec.V = m_vmx;
+ vec.V = m_simd;
Botan::store_be(out, vec.R[0], vec.R[1], vec.R[2], vec.R[3]);
#elif defined(BOTAN_SIMD_USE_NEON)
@@ -236,15 +266,14 @@ class SIMD_4x32 final
}
else
{
- vst1q_u8(out, vreinterpretq_u8_u32(m_neon));
+ vst1q_u8(out, vreinterpretq_u8_u32(m_simd));
}
#else
- Botan::store_be(out, m_scalar[0], m_scalar[1], m_scalar[2], m_scalar[3]);
+ Botan::store_be(out, m_simd.val[0], m_simd.val[1], m_simd.val[2], m_simd.val[3]);
#endif
}
-
/*
* This is used for SHA-2/SHACAL2
* Return rotr(ROT1) ^ rotr(ROT2) ^ rotr(ROT3)
@@ -268,20 +297,20 @@ class SIMD_4x32 final
#if defined(BOTAN_SIMD_USE_SSE2)
- return SIMD_4x32(_mm_or_si128(_mm_slli_epi32(m_sse, static_cast<int>(ROT)),
- _mm_srli_epi32(m_sse, static_cast<int>(32-ROT))));
+ return SIMD_4x32(_mm_or_si128(_mm_slli_epi32(m_simd, static_cast<int>(ROT)),
+ _mm_srli_epi32(m_simd, static_cast<int>(32-ROT))));
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
const unsigned int r = static_cast<unsigned int>(ROT);
- return SIMD_4x32(vec_rl(m_vmx, (__vector unsigned int){r, r, r, r}));
+ return SIMD_4x32(vec_rl(m_simd, (__vector unsigned int){r, r, r, r}));
#elif defined(BOTAN_SIMD_USE_NEON)
#if defined(BOTAN_TARGET_ARCH_IS_ARM32)
- return SIMD_4x32(vorrq_u32(vshlq_n_u32(m_neon, static_cast<int>(ROT)),
- vshrq_n_u32(m_neon, static_cast<int>(32-ROT))));
+ return SIMD_4x32(vorrq_u32(vshlq_n_u32(m_simd, static_cast<int>(ROT)),
+ vshrq_n_u32(m_simd, static_cast<int>(32-ROT))));
#else
@@ -289,25 +318,25 @@ class SIMD_4x32 final
{
const uint8_t maskb[16] = { 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 };
const uint8x16_t mask = vld1q_u8(maskb);
- return SIMD_4x32(vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(m_neon), mask)));
+ return SIMD_4x32(vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(m_simd), mask)));
}
else BOTAN_IF_CONSTEXPR(ROT == 16)
{
- return SIMD_4x32(vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(m_neon))));
+ return SIMD_4x32(vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(m_simd))));
}
else
{
- return SIMD_4x32(vorrq_u32(vshlq_n_u32(m_neon, static_cast<int>(ROT)),
- vshrq_n_u32(m_neon, static_cast<int>(32-ROT))));
+ return SIMD_4x32(vorrq_u32(vshlq_n_u32(m_simd, static_cast<int>(ROT)),
+ vshrq_n_u32(m_simd, static_cast<int>(32-ROT))));
}
#endif
#else
- return SIMD_4x32(Botan::rotl<ROT>(m_scalar[0]),
- Botan::rotl<ROT>(m_scalar[1]),
- Botan::rotl<ROT>(m_scalar[2]),
- Botan::rotl<ROT>(m_scalar[3]));
+ return SIMD_4x32(Botan::rotl<ROT>(m_simd.val[0]),
+ Botan::rotl<ROT>(m_simd.val[1]),
+ Botan::rotl<ROT>(m_simd.val[2]),
+ Botan::rotl<ROT>(m_simd.val[3]));
#endif
}
@@ -373,81 +402,81 @@ class SIMD_4x32 final
void operator+=(const SIMD_4x32& other)
{
#if defined(BOTAN_SIMD_USE_SSE2)
- m_sse = _mm_add_epi32(m_sse, other.m_sse);
+ m_simd = _mm_add_epi32(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- m_vmx = vec_add(m_vmx, other.m_vmx);
+ m_simd = vec_add(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_NEON)
- m_neon = vaddq_u32(m_neon, other.m_neon);
+ m_simd = vaddq_u32(m_simd, other.m_simd);
#else
- m_scalar[0] += other.m_scalar[0];
- m_scalar[1] += other.m_scalar[1];
- m_scalar[2] += other.m_scalar[2];
- m_scalar[3] += other.m_scalar[3];
+ m_simd.val[0] += other.m_simd.val[0];
+ m_simd.val[1] += other.m_simd.val[1];
+ m_simd.val[2] += other.m_simd.val[2];
+ m_simd.val[3] += other.m_simd.val[3];
#endif
}
void operator-=(const SIMD_4x32& other)
{
#if defined(BOTAN_SIMD_USE_SSE2)
- m_sse = _mm_sub_epi32(m_sse, other.m_sse);
+ m_simd = _mm_sub_epi32(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- m_vmx = vec_sub(m_vmx, other.m_vmx);
+ m_simd = vec_sub(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_NEON)
- m_neon = vsubq_u32(m_neon, other.m_neon);
+ m_simd = vsubq_u32(m_simd, other.m_simd);
#else
- m_scalar[0] -= other.m_scalar[0];
- m_scalar[1] -= other.m_scalar[1];
- m_scalar[2] -= other.m_scalar[2];
- m_scalar[3] -= other.m_scalar[3];
+ m_simd.val[0] -= other.m_simd.val[0];
+ m_simd.val[1] -= other.m_simd.val[1];
+ m_simd.val[2] -= other.m_simd.val[2];
+ m_simd.val[3] -= other.m_simd.val[3];
#endif
}
void operator^=(const SIMD_4x32& other)
{
#if defined(BOTAN_SIMD_USE_SSE2)
- m_sse = _mm_xor_si128(m_sse, other.m_sse);
+ m_simd = _mm_xor_si128(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- m_vmx = vec_xor(m_vmx, other.m_vmx);
+ m_simd = vec_xor(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_NEON)
- m_neon = veorq_u32(m_neon, other.m_neon);
+ m_simd = veorq_u32(m_simd, other.m_simd);
#else
- m_scalar[0] ^= other.m_scalar[0];
- m_scalar[1] ^= other.m_scalar[1];
- m_scalar[2] ^= other.m_scalar[2];
- m_scalar[3] ^= other.m_scalar[3];
+ m_simd.val[0] ^= other.m_simd.val[0];
+ m_simd.val[1] ^= other.m_simd.val[1];
+ m_simd.val[2] ^= other.m_simd.val[2];
+ m_simd.val[3] ^= other.m_simd.val[3];
#endif
}
void operator|=(const SIMD_4x32& other)
{
#if defined(BOTAN_SIMD_USE_SSE2)
- m_sse = _mm_or_si128(m_sse, other.m_sse);
+ m_simd = _mm_or_si128(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- m_vmx = vec_or(m_vmx, other.m_vmx);
+ m_simd = vec_or(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_NEON)
- m_neon = vorrq_u32(m_neon, other.m_neon);
+ m_simd = vorrq_u32(m_simd, other.m_simd);
#else
- m_scalar[0] |= other.m_scalar[0];
- m_scalar[1] |= other.m_scalar[1];
- m_scalar[2] |= other.m_scalar[2];
- m_scalar[3] |= other.m_scalar[3];
+ m_simd.val[0] |= other.m_simd.val[0];
+ m_simd.val[1] |= other.m_simd.val[1];
+ m_simd.val[2] |= other.m_simd.val[2];
+ m_simd.val[3] |= other.m_simd.val[3];
#endif
}
void operator&=(const SIMD_4x32& other)
{
#if defined(BOTAN_SIMD_USE_SSE2)
- m_sse = _mm_and_si128(m_sse, other.m_sse);
+ m_simd = _mm_and_si128(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- m_vmx = vec_and(m_vmx, other.m_vmx);
+ m_simd = vec_and(m_simd, other.m_simd);
#elif defined(BOTAN_SIMD_USE_NEON)
- m_neon = vandq_u32(m_neon, other.m_neon);
+ m_simd = vandq_u32(m_simd, other.m_simd);
#else
- m_scalar[0] &= other.m_scalar[0];
- m_scalar[1] &= other.m_scalar[1];
- m_scalar[2] &= other.m_scalar[2];
- m_scalar[3] &= other.m_scalar[3];
+ m_simd.val[0] &= other.m_simd.val[0];
+ m_simd.val[1] &= other.m_simd.val[1];
+ m_simd.val[2] &= other.m_simd.val[2];
+ m_simd.val[3] &= other.m_simd.val[3];
#endif
}
@@ -455,34 +484,34 @@ class SIMD_4x32 final
template<int SHIFT> SIMD_4x32 shl() const
{
#if defined(BOTAN_SIMD_USE_SSE2)
- return SIMD_4x32(_mm_slli_epi32(m_sse, SHIFT));
+ return SIMD_4x32(_mm_slli_epi32(m_simd, SHIFT));
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
const unsigned int s = static_cast<unsigned int>(SHIFT);
- return SIMD_4x32(vec_sl(m_vmx, (__vector unsigned int){s, s, s, s}));
+ return SIMD_4x32(vec_sl(m_simd, (__vector unsigned int){s, s, s, s}));
#elif defined(BOTAN_SIMD_USE_NEON)
- return SIMD_4x32(vshlq_n_u32(m_neon, SHIFT));
+ return SIMD_4x32(vshlq_n_u32(m_simd, SHIFT));
#else
- return SIMD_4x32(m_scalar[0] << SHIFT,
- m_scalar[1] << SHIFT,
- m_scalar[2] << SHIFT,
- m_scalar[3] << SHIFT);
+ return SIMD_4x32(m_simd.val[0] << SHIFT,
+ m_simd.val[1] << SHIFT,
+ m_simd.val[2] << SHIFT,
+ m_simd.val[3] << SHIFT);
#endif
}
template<int SHIFT> SIMD_4x32 shr() const
{
#if defined(BOTAN_SIMD_USE_SSE2)
- return SIMD_4x32(_mm_srli_epi32(m_sse, SHIFT));
+ return SIMD_4x32(_mm_srli_epi32(m_simd, SHIFT));
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
const unsigned int s = static_cast<unsigned int>(SHIFT);
- return SIMD_4x32(vec_sr(m_vmx, (__vector unsigned int){s, s, s, s}));
+ return SIMD_4x32(vec_sr(m_simd, (__vector unsigned int){s, s, s, s}));
#elif defined(BOTAN_SIMD_USE_NEON)
- return SIMD_4x32(vshrq_n_u32(m_neon, SHIFT));
+ return SIMD_4x32(vshrq_n_u32(m_simd, SHIFT));
#else
- return SIMD_4x32(m_scalar[0] >> SHIFT, m_scalar[1] >> SHIFT,
- m_scalar[2] >> SHIFT, m_scalar[3] >> SHIFT);
+ return SIMD_4x32(m_simd.val[0] >> SHIFT, m_simd.val[1] >> SHIFT,
+ m_simd.val[2] >> SHIFT, m_simd.val[3] >> SHIFT);
#endif
}
@@ -490,13 +519,13 @@ class SIMD_4x32 final
SIMD_4x32 operator~() const
{
#if defined(BOTAN_SIMD_USE_SSE2)
- return SIMD_4x32(_mm_xor_si128(m_sse, _mm_set1_epi32(0xFFFFFFFF)));
+ return SIMD_4x32(_mm_xor_si128(m_simd, _mm_set1_epi32(0xFFFFFFFF)));
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- return SIMD_4x32(vec_nor(m_vmx, m_vmx));
+ return SIMD_4x32(vec_nor(m_simd, m_simd));
#elif defined(BOTAN_SIMD_USE_NEON)
- return SIMD_4x32(vmvnq_u32(m_neon));
+ return SIMD_4x32(vmvnq_u32(m_simd));
#else
- return SIMD_4x32(~m_scalar[0], ~m_scalar[1], ~m_scalar[2], ~m_scalar[3]);
+ return SIMD_4x32(~m_simd.val[0], ~m_simd.val[1], ~m_simd.val[2], ~m_simd.val[3]);
#endif
}
@@ -504,21 +533,21 @@ class SIMD_4x32 final
SIMD_4x32 andc(const SIMD_4x32& other) const
{
#if defined(BOTAN_SIMD_USE_SSE2)
- return SIMD_4x32(_mm_andnot_si128(m_sse, other.m_sse));
+ return SIMD_4x32(_mm_andnot_si128(m_simd, other.m_simd));
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
/*
AltiVec does arg1 & ~arg2 rather than SSE's ~arg1 & arg2
so swap the arguments
*/
- return SIMD_4x32(vec_andc(other.m_vmx, m_vmx));
+ return SIMD_4x32(vec_andc(other.m_simd, m_simd));
#elif defined(BOTAN_SIMD_USE_NEON)
// NEON is also a & ~b
- return SIMD_4x32(vbicq_u32(other.m_neon, m_neon));
+ return SIMD_4x32(vbicq_u32(other.m_simd, m_simd));
#else
- return SIMD_4x32((~m_scalar[0]) & other.m_scalar[0],
- (~m_scalar[1]) & other.m_scalar[1],
- (~m_scalar[2]) & other.m_scalar[2],
- (~m_scalar[3]) & other.m_scalar[3]);
+ return SIMD_4x32((~m_simd.val[0]) & other.m_simd.val[0],
+ (~m_simd.val[1]) & other.m_simd.val[1],
+ (~m_simd.val[2]) & other.m_simd.val[2],
+ (~m_simd.val[3]) & other.m_simd.val[3]);
#endif
}
@@ -529,7 +558,7 @@ class SIMD_4x32 final
{
#if defined(BOTAN_SIMD_USE_SSE2)
- __m128i T = m_sse;
+ __m128i T = m_simd;
T = _mm_shufflehi_epi16(T, _MM_SHUFFLE(2, 3, 0, 1));
T = _mm_shufflelo_epi16(T, _MM_SHUFFLE(2, 3, 0, 1));
return SIMD_4x32(_mm_or_si128(_mm_srli_epi16(T, 8), _mm_slli_epi16(T, 8)));
@@ -541,20 +570,20 @@ class SIMD_4x32 final
uint32_t R[4];
} vec;
- vec.V = m_vmx;
+ vec.V = m_simd;
bswap_4(vec.R);
return SIMD_4x32(vec.R[0], vec.R[1], vec.R[2], vec.R[3]);
#elif defined(BOTAN_SIMD_USE_NEON)
- return SIMD_4x32(vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(m_neon))));
+ return SIMD_4x32(vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(m_simd))));
#else
// scalar
- return SIMD_4x32(reverse_bytes(m_scalar[0]),
- reverse_bytes(m_scalar[1]),
- reverse_bytes(m_scalar[2]),
- reverse_bytes(m_scalar[3]));
+ return SIMD_4x32(reverse_bytes(m_simd.val[0]),
+ reverse_bytes(m_simd.val[1]),
+ reverse_bytes(m_simd.val[2]),
+ reverse_bytes(m_simd.val[3]));
#endif
}
@@ -565,59 +594,59 @@ class SIMD_4x32 final
SIMD_4x32& B2, SIMD_4x32& B3)
{
#if defined(BOTAN_SIMD_USE_SSE2)
- const __m128i T0 = _mm_unpacklo_epi32(B0.m_sse, B1.m_sse);
- const __m128i T1 = _mm_unpacklo_epi32(B2.m_sse, B3.m_sse);
- const __m128i T2 = _mm_unpackhi_epi32(B0.m_sse, B1.m_sse);
- const __m128i T3 = _mm_unpackhi_epi32(B2.m_sse, B3.m_sse);
-
- B0.m_sse = _mm_unpacklo_epi64(T0, T1);
- B1.m_sse = _mm_unpackhi_epi64(T0, T1);
- B2.m_sse = _mm_unpacklo_epi64(T2, T3);
- B3.m_sse = _mm_unpackhi_epi64(T2, T3);
+ const __m128i T0 = _mm_unpacklo_epi32(B0.m_simd, B1.m_simd);
+ const __m128i T1 = _mm_unpacklo_epi32(B2.m_simd, B3.m_simd);
+ const __m128i T2 = _mm_unpackhi_epi32(B0.m_simd, B1.m_simd);
+ const __m128i T3 = _mm_unpackhi_epi32(B2.m_simd, B3.m_simd);
+
+ B0.m_simd = _mm_unpacklo_epi64(T0, T1);
+ B1.m_simd = _mm_unpackhi_epi64(T0, T1);
+ B2.m_simd = _mm_unpacklo_epi64(T2, T3);
+ B3.m_simd = _mm_unpackhi_epi64(T2, T3);
#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- const __vector unsigned int T0 = vec_mergeh(B0.m_vmx, B2.m_vmx);
- const __vector unsigned int T1 = vec_mergeh(B1.m_vmx, B3.m_vmx);
- const __vector unsigned int T2 = vec_mergel(B0.m_vmx, B2.m_vmx);
- const __vector unsigned int T3 = vec_mergel(B1.m_vmx, B3.m_vmx);
-
- B0.m_vmx = vec_mergeh(T0, T1);
- B1.m_vmx = vec_mergel(T0, T1);
- B2.m_vmx = vec_mergeh(T2, T3);
- B3.m_vmx = vec_mergel(T2, T3);
+ const __vector unsigned int T0 = vec_mergeh(B0.m_simd, B2.m_simd);
+ const __vector unsigned int T1 = vec_mergeh(B1.m_simd, B3.m_simd);
+ const __vector unsigned int T2 = vec_mergel(B0.m_simd, B2.m_simd);
+ const __vector unsigned int T3 = vec_mergel(B1.m_simd, B3.m_simd);
+
+ B0.m_simd = vec_mergeh(T0, T1);
+ B1.m_simd = vec_mergel(T0, T1);
+ B2.m_simd = vec_mergeh(T2, T3);
+ B3.m_simd = vec_mergel(T2, T3);
#elif defined(BOTAN_SIMD_USE_NEON)
#if defined(BOTAN_TARGET_ARCH_IS_ARM32)
- const uint32x4x2_t T0 = vzipq_u32(B0.m_neon, B2.m_neon);
- const uint32x4x2_t T1 = vzipq_u32(B1.m_neon, B3.m_neon);
+ const uint32x4x2_t T0 = vzipq_u32(B0.m_simd, B2.m_simd);
+ const uint32x4x2_t T1 = vzipq_u32(B1.m_simd, B3.m_simd);
const uint32x4x2_t O0 = vzipq_u32(T0.val[0], T1.val[0]);
const uint32x4x2_t O1 = vzipq_u32(T0.val[1], T1.val[1]);
- B0.m_neon = O0.val[0];
- B1.m_neon = O0.val[1];
- B2.m_neon = O1.val[0];
- B3.m_neon = O1.val[1];
+ B0.m_simd = O0.val[0];
+ B1.m_simd = O0.val[1];
+ B2.m_simd = O1.val[0];
+ B3.m_simd = O1.val[1];
#elif defined(BOTAN_TARGET_ARCH_IS_ARM64)
- const uint32x4_t T0 = vzip1q_u32(B0.m_neon, B2.m_neon);
- const uint32x4_t T2 = vzip2q_u32(B0.m_neon, B2.m_neon);
+ const uint32x4_t T0 = vzip1q_u32(B0.m_simd, B2.m_simd);
+ const uint32x4_t T2 = vzip2q_u32(B0.m_simd, B2.m_simd);
- const uint32x4_t T1 = vzip1q_u32(B1.m_neon, B3.m_neon);
- const uint32x4_t T3 = vzip2q_u32(B1.m_neon, B3.m_neon);
+ const uint32x4_t T1 = vzip1q_u32(B1.m_simd, B3.m_simd);
+ const uint32x4_t T3 = vzip2q_u32(B1.m_simd, B3.m_simd);
- B0.m_neon = vzip1q_u32(T0, T1);
- B1.m_neon = vzip2q_u32(T0, T1);
+ B0.m_simd = vzip1q_u32(T0, T1);
+ B1.m_simd = vzip2q_u32(T0, T1);
- B2.m_neon = vzip1q_u32(T2, T3);
- B3.m_neon = vzip2q_u32(T2, T3);
+ B2.m_simd = vzip1q_u32(T2, T3);
+ B3.m_simd = vzip2q_u32(T2, T3);
#endif
#else
// scalar
- SIMD_4x32 T0(B0.m_scalar[0], B1.m_scalar[0], B2.m_scalar[0], B3.m_scalar[0]);
- SIMD_4x32 T1(B0.m_scalar[1], B1.m_scalar[1], B2.m_scalar[1], B3.m_scalar[1]);
- SIMD_4x32 T2(B0.m_scalar[2], B1.m_scalar[2], B2.m_scalar[2], B3.m_scalar[2]);
- SIMD_4x32 T3(B0.m_scalar[3], B1.m_scalar[3], B2.m_scalar[3], B3.m_scalar[3]);
+ SIMD_4x32 T0(B0.m_simd.val[0], B1.m_simd.val[0], B2.m_simd.val[0], B3.m_simd.val[0]);
+ SIMD_4x32 T1(B0.m_simd.val[1], B1.m_simd.val[1], B2.m_simd.val[1], B3.m_simd.val[1]);
+ SIMD_4x32 T2(B0.m_simd.val[2], B1.m_simd.val[2], B2.m_simd.val[2], B3.m_simd.val[2]);
+ SIMD_4x32 T3(B0.m_simd.val[3], B1.m_simd.val[3], B2.m_simd.val[3], B3.m_simd.val[3]);
B0 = T0;
B1 = T1;
@@ -626,25 +655,11 @@ class SIMD_4x32 final
#endif
}
- private:
+ native_simd_type raw() const { return m_simd; }
-#if defined(BOTAN_SIMD_USE_SSE2)
- explicit SIMD_4x32(__m128i in) : m_sse(in) {}
-#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- explicit SIMD_4x32(__vector unsigned int in) : m_vmx(in) {}
-#elif defined(BOTAN_SIMD_USE_NEON)
- explicit SIMD_4x32(uint32x4_t in) : m_neon(in) {}
-#endif
-
-#if defined(BOTAN_SIMD_USE_SSE2)
- __m128i m_sse;
-#elif defined(BOTAN_SIMD_USE_ALTIVEC)
- __vector unsigned int m_vmx;
-#elif defined(BOTAN_SIMD_USE_NEON)
- uint32x4_t m_neon;
-#else
- uint32_t m_scalar[4];
-#endif
+ explicit SIMD_4x32(native_simd_type x) : m_simd(x) {}
+ private:
+ native_simd_type m_simd;
};
}