aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorlloyd <[email protected]>2009-10-29 07:54:52 +0000
committerlloyd <[email protected]>2009-10-29 07:54:52 +0000
commit077ad13c6f97ab79ecb585c0f584bb8963096098 (patch)
tree6129c91e2944f831098ae7cbf52e85ff32e7620c
parent0cf1ad7aca8506bbd541641cb1ebc0c73c62bcd2 (diff)
parent1419961fbc1ef832940d0f5184c3d71797270ef9 (diff)
propagate from branch 'net.randombit.botan' (head 4fd7eb9630271d3c1dfed21987ef864680d4ce7b)
to branch 'net.randombit.botan.general-simd' (head 91df868149cdc4754d340e6103028acc82182609)
-rw-r--r--doc/examples/cpuid.cpp2
-rw-r--r--doc/log.txt2
-rw-r--r--src/algo_factory/prov_weight.cpp2
-rw-r--r--src/block/serpent_simd/info.txt7
-rw-r--r--src/block/serpent_simd/serp_simd.cpp (renamed from src/block/serpent_sse2/serp_sse2.cpp)154
-rw-r--r--src/block/serpent_simd/serp_simd.h (renamed from src/block/serpent_sse2/serp_sse2.h)10
-rw-r--r--src/block/serpent_simd/serp_simd_sbox.h426
-rw-r--r--src/block/serpent_sse2/info.txt6
-rw-r--r--src/block/serpent_sse2/serp_sse2_sbox.h434
-rw-r--r--src/block/xtea/xtea.h2
-rw-r--r--src/block/xtea_simd/info.txt14
-rw-r--r--src/block/xtea_simd/xtea_simd.cpp124
-rw-r--r--src/block/xtea_simd/xtea_simd.h28
-rw-r--r--src/engine/simd_engine/info.txt3
-rw-r--r--src/engine/simd_engine/simd_engine.cpp54
-rw-r--r--src/engine/simd_engine/simd_engine.h (renamed from src/engine/sse2_eng/eng_sse2.h)10
-rw-r--r--src/engine/sse2_eng/eng_sse2.cpp51
-rw-r--r--src/engine/sse2_eng/info.txt21
-rw-r--r--src/hash/sha1/sha160.cpp74
-rw-r--r--src/hash/sha1_sse2/info.txt15
-rw-r--r--src/hash/sha1_sse2/sha1_sse2.cpp267
-rw-r--r--src/hash/sha1_sse2/sha1_sse2_imp.cpp304
-rw-r--r--src/libstate/libstate.cpp8
-rw-r--r--src/utils/cpuid.cpp60
-rw-r--r--src/utils/cpuid.h1
-rw-r--r--src/utils/simd_32/info.txt16
-rw-r--r--src/utils/simd_32/simd_32.h32
-rw-r--r--src/utils/simd_32/simd_altivec.h202
-rw-r--r--src/utils/simd_32/simd_scalar.h202
-rw-r--r--src/utils/simd_32/simd_sse.h156
30 files changed, 1723 insertions, 964 deletions
diff --git a/doc/examples/cpuid.cpp b/doc/examples/cpuid.cpp
index 59940b500..1bdee787c 100644
--- a/doc/examples/cpuid.cpp
+++ b/doc/examples/cpuid.cpp
@@ -12,4 +12,6 @@ int main()
printf("SSSE3 %d\n", CPUID::has_ssse3());
printf("SSE41 %d\n", CPUID::has_sse41());
printf("SSE42 %d\n", CPUID::has_sse42());
+
+ printf("AltiVec %d\n", CPUID::has_altivec());
}
diff --git a/doc/log.txt b/doc/log.txt
index 1d219eb1c..84379c3e8 100644
--- a/doc/log.txt
+++ b/doc/log.txt
@@ -1,5 +1,7 @@
* 1.9.2-dev, ????-??-??
+ - Add SIMD version of XTEA
+ - Support both SSE2 and AltiVec SIMD for Serpent and XTEA
* 1.9.1, 2009-10-23
- Better support for Python and Perl wrappers
diff --git a/src/algo_factory/prov_weight.cpp b/src/algo_factory/prov_weight.cpp
index a55a8b1e6..d7e84a323 100644
--- a/src/algo_factory/prov_weight.cpp
+++ b/src/algo_factory/prov_weight.cpp
@@ -22,7 +22,7 @@ u32bit static_provider_weight(const std::string& prov_name)
if(prov_name == "core") return 5;
if(prov_name == "ia32") return 6;
if(prov_name == "amd64") return 7;
- if(prov_name == "sse2") return 8;
+ if(prov_name == "simd") return 8;
if(prov_name == "openssl") return 2;
if(prov_name == "gmp") return 1;
diff --git a/src/block/serpent_simd/info.txt b/src/block/serpent_simd/info.txt
new file mode 100644
index 000000000..d65b41235
--- /dev/null
+++ b/src/block/serpent_simd/info.txt
@@ -0,0 +1,7 @@
+define SERPENT_SIMD
+
+<requires>
+serpent
+simd_32
+simd_engine
+</requires>
diff --git a/src/block/serpent_sse2/serp_sse2.cpp b/src/block/serpent_simd/serp_simd.cpp
index c51bb69ab..b394b0c26 100644
--- a/src/block/serpent_sse2/serp_sse2.cpp
+++ b/src/block/serpent_simd/serp_simd.cpp
@@ -1,99 +1,71 @@
/*
-* Serpent (SSE2)
+* Serpent (SIMD)
* (C) 2009 Jack Lloyd
*
* Distributed under the terms of the Botan license
*/
-#include <botan/serp_sse2.h>
-#include <botan/serp_sse2_sbox.h>
+#include <botan/serp_simd.h>
+#include <botan/serp_simd_sbox.h>
+#include <botan/simd_32.h>
#include <botan/loadstor.h>
-#include <emmintrin.h>
namespace Botan {
namespace {
-#define key_xor(round, B0, B1, B2, B3) \
- do { \
- __m128i key = _mm_loadu_si128(keys + round); \
- B0 = _mm_xor_si128(B0, _mm_shuffle_epi32(key, _MM_SHUFFLE(0,0,0,0))); \
- B1 = _mm_xor_si128(B1, _mm_shuffle_epi32(key, _MM_SHUFFLE(1,1,1,1))); \
- B2 = _mm_xor_si128(B2, _mm_shuffle_epi32(key, _MM_SHUFFLE(2,2,2,2))); \
- B3 = _mm_xor_si128(B3, _mm_shuffle_epi32(key, _MM_SHUFFLE(3,3,3,3))); \
+#define key_xor(round, B0, B1, B2, B3) \
+ do { \
+ B0 ^= SIMD_32(keys[4*round ]); \
+ B1 ^= SIMD_32(keys[4*round+1]); \
+ B2 ^= SIMD_32(keys[4*round+2]); \
+ B3 ^= SIMD_32(keys[4*round+3]); \
} while(0);
/*
* Serpent's linear transformations
*/
-#define rotate_left_m128(vec, rot) \
- _mm_or_si128(_mm_slli_epi32(vec, rot), _mm_srli_epi32(vec, 32-rot))
-
-#define rotate_right_m128(vec, rot) \
- _mm_or_si128(_mm_srli_epi32(vec, rot), _mm_slli_epi32(vec, 32-rot))
-
-#define transform(B0, B1, B2, B3) \
- do { \
- B0 = rotate_left_m128(B0, 13); \
- B2 = rotate_left_m128(B2, 3); \
- B1 = _mm_xor_si128(B1, _mm_xor_si128(B0, B2)); \
- B3 = _mm_xor_si128(B3, _mm_xor_si128(B2, _mm_slli_epi32(B0, 3))); \
- B1 = rotate_left_m128(B1, 1); \
- B3 = rotate_left_m128(B3, 7); \
- B0 = _mm_xor_si128(B0, _mm_xor_si128(B1, B3)); \
- B2 = _mm_xor_si128(B2, _mm_xor_si128(B3, _mm_slli_epi32(B1, 7))); \
- B0 = rotate_left_m128(B0, 5); \
- B2 = rotate_left_m128(B2, 22); \
+#define transform(B0, B1, B2, B3) \
+ do { \
+ B0.rotate_left(13); \
+ B2.rotate_left(3); \
+ B1 ^= B0 ^ B2; \
+ B3 ^= B2 ^ (B0 << 3); \
+ B1.rotate_left(1); \
+ B3.rotate_left(7); \
+ B0 ^= B1 ^ B3; \
+ B2 ^= B3 ^ (B1 << 7); \
+ B0.rotate_left(5); \
+ B2.rotate_left(22); \
} while(0);
-#define i_transform(B0, B1, B2, B3) \
- do { \
- B2 = rotate_right_m128(B2, 22); \
- B0 = rotate_right_m128(B0, 5); \
- B2 = _mm_xor_si128(B2, _mm_xor_si128(B3, _mm_slli_epi32(B1, 7))); \
- B0 = _mm_xor_si128(B0, _mm_xor_si128(B1, B3)); \
- B3 = rotate_right_m128(B3, 7); \
- B1 = rotate_right_m128(B1, 1); \
- B3 = _mm_xor_si128(B3, _mm_xor_si128(B2, _mm_slli_epi32(B0, 3))); \
- B1 = _mm_xor_si128(B1, _mm_xor_si128(B0, B2)); \
- B2 = rotate_right_m128(B2, 3); \
- B0 = rotate_right_m128(B0, 13); \
+#define i_transform(B0, B1, B2, B3) \
+ do { \
+ B2.rotate_right(22); \
+ B0.rotate_right(5); \
+ B2 ^= B3 ^ (B1 << 7); \
+ B0 ^= B1 ^ B3; \
+ B3.rotate_right(7); \
+ B1.rotate_right(1); \
+ B3 ^= B2 ^ (B0 << 3); \
+ B1 ^= B0 ^ B2; \
+ B2.rotate_right(3); \
+ B0.rotate_right(13); \
} while(0);
/*
-* 4x4 SSE2 integer matrix transpose
-*/
-#define transpose(B0, B1, B2, B3) \
- do { \
- __m128i T0 = _mm_unpacklo_epi32(B0, B1); \
- __m128i T1 = _mm_unpacklo_epi32(B2, B3); \
- __m128i T2 = _mm_unpackhi_epi32(B0, B1); \
- __m128i T3 = _mm_unpackhi_epi32(B2, B3); \
- B0 = _mm_unpacklo_epi64(T0, T1); \
- B1 = _mm_unpackhi_epi64(T0, T1); \
- B2 = _mm_unpacklo_epi64(T2, T3); \
- B3 = _mm_unpackhi_epi64(T2, T3); \
- } while(0);
-
-/*
-* SSE2 Serpent Encryption of 4 blocks in parallel
+* SIMD Serpent Encryption of 4 blocks in parallel
*/
void serpent_encrypt_4(const byte in[64],
byte out[64],
- const u32bit keys_32[132])
+ const u32bit keys[132])
{
- const __m128i all_ones = _mm_set1_epi8(0xFF);
+ SIMD_32 B0 = SIMD_32::load_le(in);
+ SIMD_32 B1 = SIMD_32::load_le(in + 16);
+ SIMD_32 B2 = SIMD_32::load_le(in + 32);
+ SIMD_32 B3 = SIMD_32::load_le(in + 48);
- const __m128i* keys = (const __m128i*)(keys_32);
- __m128i* out_mm = (__m128i*)(out);
- __m128i* in_mm = (__m128i*)(in);
-
- __m128i B0 = _mm_loadu_si128(in_mm);
- __m128i B1 = _mm_loadu_si128(in_mm + 1);
- __m128i B2 = _mm_loadu_si128(in_mm + 2);
- __m128i B3 = _mm_loadu_si128(in_mm + 3);
-
- transpose(B0, B1, B2, B3);
+ SIMD_32::transpose(B0, B1, B2, B3);
key_xor( 0,B0,B1,B2,B3); SBoxE1(B0,B1,B2,B3); transform(B0,B1,B2,B3);
key_xor( 1,B0,B1,B2,B3); SBoxE2(B0,B1,B2,B3); transform(B0,B1,B2,B3);
@@ -131,33 +103,27 @@ void serpent_encrypt_4(const byte in[64],
key_xor(30,B0,B1,B2,B3); SBoxE7(B0,B1,B2,B3); transform(B0,B1,B2,B3);
key_xor(31,B0,B1,B2,B3); SBoxE8(B0,B1,B2,B3); key_xor(32,B0,B1,B2,B3);
- transpose(B0, B1, B2, B3);
+ SIMD_32::transpose(B0, B1, B2, B3);
- _mm_storeu_si128(out_mm , B0);
- _mm_storeu_si128(out_mm + 1, B1);
- _mm_storeu_si128(out_mm + 2, B2);
- _mm_storeu_si128(out_mm + 3, B3);
+ B0.store_le(out);
+ B1.store_le(out + 16);
+ B2.store_le(out + 32);
+ B3.store_le(out + 48);
}
/*
-* SSE2 Serpent Decryption of 4 blocks in parallel
+* SIMD Serpent Decryption of 4 blocks in parallel
*/
void serpent_decrypt_4(const byte in[64],
byte out[64],
- const u32bit keys_32[132])
+ const u32bit keys[132])
{
- const __m128i all_ones = _mm_set1_epi8(0xFF);
-
- const __m128i* keys = (const __m128i*)(keys_32);
- __m128i* out_mm = (__m128i*)(out);
- __m128i* in_mm = (__m128i*)(in);
-
- __m128i B0 = _mm_loadu_si128(in_mm);
- __m128i B1 = _mm_loadu_si128(in_mm + 1);
- __m128i B2 = _mm_loadu_si128(in_mm + 2);
- __m128i B3 = _mm_loadu_si128(in_mm + 3);
+ SIMD_32 B0 = SIMD_32::load_le(in);
+ SIMD_32 B1 = SIMD_32::load_le(in + 16);
+ SIMD_32 B2 = SIMD_32::load_le(in + 32);
+ SIMD_32 B3 = SIMD_32::load_le(in + 48);
- transpose(B0, B1, B2, B3);
+ SIMD_32::transpose(B0, B1, B2, B3);
key_xor(32,B0,B1,B2,B3); SBoxD8(B0,B1,B2,B3); key_xor(31,B0,B1,B2,B3);
i_transform(B0,B1,B2,B3); SBoxD7(B0,B1,B2,B3); key_xor(30,B0,B1,B2,B3);
@@ -195,12 +161,12 @@ void serpent_decrypt_4(const byte in[64],
i_transform(B0,B1,B2,B3); SBoxD2(B0,B1,B2,B3); key_xor( 1,B0,B1,B2,B3);
i_transform(B0,B1,B2,B3); SBoxD1(B0,B1,B2,B3); key_xor( 0,B0,B1,B2,B3);
- transpose(B0, B1, B2, B3);
+ SIMD_32::transpose(B0, B1, B2, B3);
- _mm_storeu_si128(out_mm , B0);
- _mm_storeu_si128(out_mm + 1, B1);
- _mm_storeu_si128(out_mm + 2, B2);
- _mm_storeu_si128(out_mm + 3, B3);
+ B0.store_le(out);
+ B1.store_le(out + 16);
+ B2.store_le(out + 32);
+ B3.store_le(out + 48);
}
}
@@ -208,7 +174,7 @@ void serpent_decrypt_4(const byte in[64],
/*
* Serpent Encryption
*/
-void Serpent_SSE2::encrypt_n(const byte in[], byte out[], u32bit blocks) const
+void Serpent_SIMD::encrypt_n(const byte in[], byte out[], u32bit blocks) const
{
while(blocks >= 4)
{
@@ -224,7 +190,7 @@ void Serpent_SSE2::encrypt_n(const byte in[], byte out[], u32bit blocks) const
/*
* Serpent Decryption
*/
-void Serpent_SSE2::decrypt_n(const byte in[], byte out[], u32bit blocks) const
+void Serpent_SIMD::decrypt_n(const byte in[], byte out[], u32bit blocks) const
{
while(blocks >= 4)
{
diff --git a/src/block/serpent_sse2/serp_sse2.h b/src/block/serpent_simd/serp_simd.h
index f1e5c2028..1ecb70159 100644
--- a/src/block/serpent_sse2/serp_sse2.h
+++ b/src/block/serpent_simd/serp_simd.h
@@ -1,12 +1,12 @@
/*
-* Serpent (SSE2)
+* Serpent (SIMD)
* (C) 2009 Jack Lloyd
*
* Distributed under the terms of the Botan license
*/
-#ifndef BOTAN_SERPENT_SSE2_H__
-#define BOTAN_SERPENT_SSE2_H__
+#ifndef BOTAN_SERPENT_SIMD_H__
+#define BOTAN_SERPENT_SIMD_H__
#include <botan/serpent.h>
@@ -15,13 +15,13 @@ namespace Botan {
/*
* Serpent
*/
-class BOTAN_DLL Serpent_SSE2 : public Serpent
+class BOTAN_DLL Serpent_SIMD : public Serpent
{
public:
void encrypt_n(const byte in[], byte out[], u32bit blocks) const;
void decrypt_n(const byte in[], byte out[], u32bit blocks) const;
- BlockCipher* clone() const { return new Serpent_SSE2; }
+ BlockCipher* clone() const { return new Serpent_SIMD; }
};
}
diff --git a/src/block/serpent_simd/serp_simd_sbox.h b/src/block/serpent_simd/serp_simd_sbox.h
new file mode 100644
index 000000000..6e3da7359
--- /dev/null
+++ b/src/block/serpent_simd/serp_simd_sbox.h
@@ -0,0 +1,426 @@
+/*
+* Serpent Sboxes in SIMD form
+* (C) 2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#ifndef SERPENT_SIMD_SBOXES_H__
+#define SERPENT_SIMD_SBOXES_H__
+
+#define SBoxE1(B0, B1, B2, B3) \
+ do { \
+ B3 ^= B0; \
+ SIMD_32 B4 = B1; \
+ B1 &= B3; \
+ B4 ^= B2; \
+ B1 ^= B0; \
+ B0 |= B3; \
+ B0 ^= B4; \
+ B4 ^= B3; \
+ B3 ^= B2; \
+ B2 |= B1; \
+ B2 ^= B4; \
+ B4 = ~B4; \
+ B4 |= B1; \
+ B1 ^= B3; \
+ B1 ^= B4; \
+ B3 |= B0; \
+ B1 ^= B3; \
+ B4 ^= B3; \
+ B3 = B0; \
+ B0 = B1; \
+ B1 = B4; \
+ } while(0);
+
+#define SBoxE2(B0, B1, B2, B3) \
+ do { \
+ B0 = ~B0; \
+ B2 = ~B2; \
+ SIMD_32 B4 = B0; \
+ B0 &= B1; \
+ B2 ^= B0; \
+ B0 |= B3; \
+ B3 ^= B2; \
+ B1 ^= B0; \
+ B0 ^= B4; \
+ B4 |= B1; \
+ B1 ^= B3; \
+ B2 |= B0; \
+ B2 &= B4; \
+ B0 ^= B1; \
+ B1 &= B2; \
+ B1 ^= B0; \
+ B0 &= B2; \
+ B4 ^= B0; \
+ B0 = B2; \
+ B2 = B3; \
+ B3 = B1; \
+ B1 = B4; \
+ } while(0);
+
+#define SBoxE3(B0, B1, B2, B3) \
+ do { \
+ SIMD_32 B4 = B0; \
+ B0 &= B2; \
+ B0 ^= B3; \
+ B2 ^= B1; \
+ B2 ^= B0; \
+ B3 |= B4; \
+ B3 ^= B1; \
+ B4 ^= B2; \
+ B1 = B3; \
+ B3 |= B4; \
+ B3 ^= B0; \
+ B0 &= B1; \
+ B4 ^= B0; \
+ B1 ^= B3; \
+ B1 ^= B4; \
+ B4 = ~B4; \
+ B0 = B2; \
+ B2 = B1; \
+ B1 = B3; \
+ B3 = B4; \
+ } while(0);
+
+#define SBoxE4(B0, B1, B2, B3) \
+ do { \
+ SIMD_32 B4 = B0; \
+ B0 |= B3; \
+ B3 ^= B1; \
+ B1 &= B4; \
+ B4 ^= B2; \
+ B2 ^= B3; \
+ B3 &= B0; \
+ B4 |= B1; \
+ B3 ^= B4; \
+ B0 ^= B1; \
+ B4 &= B0; \
+ B1 ^= B3; \
+ B4 ^= B2; \
+ B1 |= B0; \
+ B1 ^= B2; \
+ B0 ^= B3; \
+ B2 = B1; \
+ B1 |= B3; \
+ B0 ^= B1; \
+ B1 = B2; \
+ B2 = B3; \
+ B3 = B4; \
+ } while(0);
+
+#define SBoxE5(B0, B1, B2, B3) \
+ do { \
+ B1 ^= B3; \
+ B3 = ~B3; \
+ B2 ^= B3; \
+ B3 ^= B0; \
+ SIMD_32 B4 = B1; \
+ B1 &= B3; \
+ B1 ^= B2; \
+ B4 ^= B3; \
+ B0 ^= B4; \
+ B2 &= B4; \
+ B2 ^= B0; \
+ B0 &= B1; \
+ B3 ^= B0; \
+ B4 |= B1; \
+ B4 ^= B0; \
+ B0 |= B3; \
+ B0 ^= B2; \
+ B2 &= B3; \
+ B0 = ~B0; \
+ B4 ^= B2; \
+ B2 = B0; \
+ B0 = B1; \
+ B1 = B4; \
+ } while(0);
+
+#define SBoxE6(B0, B1, B2, B3) \
+ do { \
+ B0 ^= B1; \
+ B1 ^= B3; \
+ B3 = ~B3; \
+ SIMD_32 B4 = B1; \
+ B1 &= B0; \
+ B2 ^= B3; \
+ B1 ^= B2; \
+ B2 |= B4; \
+ B4 ^= B3; \
+ B3 &= B1; \
+ B3 ^= B0; \
+ B4 ^= B1; \
+ B4 ^= B2; \
+ B2 ^= B0; \
+ B0 &= B3; \
+ B2 = ~B2; \
+ B0 ^= B4; \
+ B4 |= B3; \
+ B4 ^= B2; \
+ B2 = B0; \
+ B0 = B1; \
+ B1 = B3; \
+ B3 = B4; \
+ } while(0);
+
+#define SBoxE7(B0, B1, B2, B3) \
+ do { \
+ B2 = ~B2; \
+ SIMD_32 B4 = B3; \
+ B3 &= B0; \
+ B0 ^= B4; \
+ B3 ^= B2; \
+ B2 |= B4; \
+ B1 ^= B3; \
+ B2 ^= B0; \
+ B0 |= B1; \
+ B2 ^= B1; \
+ B4 ^= B0; \
+ B0 |= B3; \
+ B0 ^= B2; \
+ B4 ^= B3; \
+ B4 ^= B0; \
+ B3 = ~B3; \
+ B2 &= B4; \
+ B3 ^= B2; \
+ B2 = B4; \
+ } while(0);
+
+#define SBoxE8(B0, B1, B2, B3) \
+ do { \
+ SIMD_32 B4 = B1; \
+ B1 |= B2; \
+ B1 ^= B3; \
+ B4 ^= B2; \
+ B2 ^= B1; \
+ B3 |= B4; \
+ B3 &= B0; \
+ B4 ^= B2; \
+ B3 ^= B1; \
+ B1 |= B4; \
+ B1 ^= B0; \
+ B0 |= B4; \
+ B0 ^= B2; \
+ B1 ^= B4; \
+ B2 ^= B1; \
+ B1 &= B0; \
+ B1 ^= B4; \
+ B2 = ~B2; \
+ B2 |= B0; \
+ B4 ^= B2; \
+ B2 = B1; \
+ B1 = B3; \
+ B3 = B0; \
+ B0 = B4; \
+ } while(0);
+
+#define SBoxD1(B0, B1, B2, B3) \
+ do { \
+ B2 = ~B2; \
+ SIMD_32 B4 = B1; \
+ B1 |= B0; \
+ B4 = ~B4; \
+ B1 ^= B2; \
+ B2 |= B4; \
+ B1 ^= B3; \
+ B0 ^= B4; \
+ B2 ^= B0; \
+ B0 &= B3; \
+ B4 ^= B0; \
+ B0 |= B1; \
+ B0 ^= B2; \
+ B3 ^= B4; \
+ B2 ^= B1; \
+ B3 ^= B0; \
+ B3 ^= B1; \
+ B2 &= B3; \
+ B4 ^= B2; \
+ B2 = B1; \
+ B1 = B4; \
+ } while(0);
+
+#define SBoxD2(B0, B1, B2, B3) \
+ do { \
+ SIMD_32 B4 = B1; \
+ B1 ^= B3; \
+ B3 &= B1; \
+ B4 ^= B2; \
+ B3 ^= B0; \
+ B0 |= B1; \
+ B2 ^= B3; \
+ B0 ^= B4; \
+ B0 |= B2; \
+ B1 ^= B3; \
+ B0 ^= B1; \
+ B1 |= B3; \
+ B1 ^= B0; \
+ B4 = ~B4; \
+ B4 ^= B1; \
+ B1 |= B0; \
+ B1 ^= B0; \
+ B1 |= B4; \
+ B3 ^= B1; \
+ B1 = B0; \
+ B0 = B4; \
+ B4 = B2; \
+ B2 = B3; \
+ B3 = B4; \
+ } while(0);
+
+#define SBoxD3(B0, B1, B2, B3) \
+ do { \
+ B2 ^= B3; \
+ B3 ^= B0; \
+ SIMD_32 B4 = B3; \
+ B3 &= B2; \
+ B3 ^= B1; \
+ B1 |= B2; \
+ B1 ^= B4; \
+ B4 &= B3; \
+ B2 ^= B3; \
+ B4 &= B0; \
+ B4 ^= B2; \
+ B2 &= B1; \
+ B2 |= B0; \
+ B3 = ~B3; \
+ B2 ^= B3; \
+ B0 ^= B3; \
+ B0 &= B1; \
+ B3 ^= B4; \
+ B3 ^= B0; \
+ B0 = B1; \
+ B1 = B4; \
+ } while(0);
+
+#define SBoxD4(B0, B1, B2, B3) \
+ do { \
+ SIMD_32 B4 = B2; \
+ B2 ^= B1; \
+ B0 ^= B2; \
+ B4 &= B2; \
+ B4 ^= B0; \
+ B0 &= B1; \
+ B1 ^= B3; \
+ B3 |= B4; \
+ B2 ^= B3; \
+ B0 ^= B3; \
+ B1 ^= B4; \
+ B3 &= B2; \
+ B3 ^= B1; \
+ B1 ^= B0; \
+ B1 |= B2; \
+ B0 ^= B3; \
+ B1 ^= B4; \
+ B0 ^= B1; \
+ B4 = B0; \
+ B0 = B2; \
+ B2 = B3; \
+ B3 = B4; \
+ } while(0);
+
+#define SBoxD5(B0, B1, B2, B3) \
+ do { \
+ SIMD_32 B4 = B2; \
+ B2 &= B3; \
+ B2 ^= B1; \
+ B1 |= B3; \
+ B1 &= B0; \
+ B4 ^= B2; \
+ B4 ^= B1; \
+ B1 &= B2; \
+ B0 = ~B0; \
+ B3 ^= B4; \
+ B1 ^= B3; \
+ B3 &= B0; \
+ B3 ^= B2; \
+ B0 ^= B1; \
+ B2 &= B0; \
+ B3 ^= B0; \
+ B2 ^= B4; \
+ B2 |= B3; \
+ B3 ^= B0; \
+ B2 ^= B1; \
+ B1 = B3; \
+ B3 = B4; \
+ } while(0);
+
+#define SBoxD6(B0, B1, B2, B3) \
+ do { \
+ B1 = ~B1; \
+ SIMD_32 B4 = B3; \
+ B2 ^= B1; \
+ B3 |= B0; \
+ B3 ^= B2; \
+ B2 |= B1; \
+ B2 &= B0; \
+ B4 ^= B3; \
+ B2 ^= B4; \
+ B4 |= B0; \
+ B4 ^= B1; \
+ B1 &= B2; \
+ B1 ^= B3; \
+ B4 ^= B2; \
+ B3 &= B4; \
+ B4 ^= B1; \
+ B3 ^= B4; \
+ B4 = ~B4; \
+ B3 ^= B0; \
+ B0 = B1; \
+ B1 = B4; \
+ B4 = B3; \
+ B3 = B2; \
+ B2 = B4; \
+ } while(0);
+
+#define SBoxD7(B0, B1, B2, B3) \
+ do { \
+ B0 ^= B2; \
+ SIMD_32 B4 = B2; \
+ B2 &= B0; \
+ B4 ^= B3; \
+ B2 = ~B2; \
+ B3 ^= B1; \
+ B2 ^= B3; \
+ B4 |= B0; \
+ B0 ^= B2; \
+ B3 ^= B4; \
+ B4 ^= B1; \
+ B1 &= B3; \
+ B1 ^= B0; \
+ B0 ^= B3; \
+ B0 |= B2; \
+ B3 ^= B1; \
+ B4 ^= B0; \
+ B0 = B1; \
+ B1 = B2; \
+ B2 = B4; \
+ } while(0);
+
+#define SBoxD8(B0, B1, B2, B3) \
+ do { \
+ SIMD_32 B4 = B2; \
+ B2 ^= B0; \
+ B0 &= B3; \
+ B4 |= B3; \
+ B2 = ~B2; \
+ B3 ^= B1; \
+ B1 |= B0; \
+ B0 ^= B2; \
+ B2 &= B4; \
+ B3 &= B4; \
+ B1 ^= B2; \
+ B2 ^= B0; \
+ B0 |= B2; \
+ B4 ^= B1; \
+ B0 ^= B3; \
+ B3 ^= B4; \
+ B4 |= B0; \
+ B3 ^= B2; \
+ B4 ^= B2; \
+ B2 = B1; \
+ B1 = B0; \
+ B0 = B3; \
+ B3 = B4; \
+ } while(0);
+
+#endif
diff --git a/src/block/serpent_sse2/info.txt b/src/block/serpent_sse2/info.txt
deleted file mode 100644
index a4ec561a8..000000000
--- a/src/block/serpent_sse2/info.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-define SERPENT_SSE2
-
-<requires>
-serpent
-sse2_eng
-</requires>
diff --git a/src/block/serpent_sse2/serp_sse2_sbox.h b/src/block/serpent_sse2/serp_sse2_sbox.h
deleted file mode 100644
index 40c552e87..000000000
--- a/src/block/serpent_sse2/serp_sse2_sbox.h
+++ /dev/null
@@ -1,434 +0,0 @@
-/*
-* Serpent Sboxes in SSE2 form
-* (C) 2009 Jack Lloyd
-*
-* Distributed under the terms of the Botan license
-*/
-
-#ifndef SERPENT_SSE2_SBOXES_H__
-#define SERPENT_SSE2_SBOXES_H__
-
-#define SBoxE1(B0, B1, B2, B3) \
- do { \
- B3 = _mm_xor_si128(B3, B0); \
- __m128i B4 = B1; \
- B1 = _mm_and_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_or_si128(B0, B3); \
- B0 = _mm_xor_si128(B0, B4); \
- B4 = _mm_xor_si128(B4, B3); \
- B3 = _mm_xor_si128(B3, B2); \
- B2 = _mm_or_si128(B2, B1); \
- B2 = _mm_xor_si128(B2, B4); \
- B4 = _mm_xor_si128(B4, all_ones); \
- B4 = _mm_or_si128(B4, B1); \
- B1 = _mm_xor_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B4); \
- B3 = _mm_or_si128(B3, B0); \
- B1 = _mm_xor_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B3); \
- B3 = B0; \
- B0 = B1; \
- B1 = B4; \
- } while(0);
-
-#define SBoxE2(B0, B1, B2, B3) \
- do { \
- B0 = _mm_xor_si128(B0, all_ones); \
- B2 = _mm_xor_si128(B2, all_ones); \
- __m128i B4 = B0; \
- B0 = _mm_and_si128(B0, B1); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_or_si128(B0, B3); \
- B3 = _mm_xor_si128(B3, B2); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_xor_si128(B0, B4); \
- B4 = _mm_or_si128(B4, B1); \
- B1 = _mm_xor_si128(B1, B3); \
- B2 = _mm_or_si128(B2, B0); \
- B2 = _mm_and_si128(B2, B4); \
- B0 = _mm_xor_si128(B0, B1); \
- B1 = _mm_and_si128(B1, B2); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_and_si128(B0, B2); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = B2; \
- B2 = B3; \
- B3 = B1; \
- B1 = B4; \
- } while(0);
-
-#define SBoxE3(B0, B1, B2, B3) \
- do { \
- __m128i B4 = B0; \
- B0 = _mm_and_si128(B0, B2); \
- B0 = _mm_xor_si128(B0, B3); \
- B2 = _mm_xor_si128(B2, B1); \
- B2 = _mm_xor_si128(B2, B0); \
- B3 = _mm_or_si128(B3, B4); \
- B3 = _mm_xor_si128(B3, B1); \
- B4 = _mm_xor_si128(B4, B2); \
- B1 = B3; \
- B3 = _mm_or_si128(B3, B4); \
- B3 = _mm_xor_si128(B3, B0); \
- B0 = _mm_and_si128(B0, B1); \
- B4 = _mm_xor_si128(B4, B0); \
- B1 = _mm_xor_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B4); \
- B4 = _mm_xor_si128(B4, all_ones); \
- B0 = B2; \
- B2 = B1; \
- B1 = B3; \
- B3 = B4; \
- } while(0);
-
-#define SBoxE4(B0, B1, B2, B3) \
- do { \
- __m128i B4 = B0; \
- B0 = _mm_or_si128(B0, B3); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_and_si128(B1, B4); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = _mm_xor_si128(B2, B3); \
- B3 = _mm_and_si128(B3, B0); \
- B4 = _mm_or_si128(B4, B1); \
- B3 = _mm_xor_si128(B3, B4); \
- B0 = _mm_xor_si128(B0, B1); \
- B4 = _mm_and_si128(B4, B0); \
- B1 = _mm_xor_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B1 = _mm_or_si128(B1, B0); \
- B1 = _mm_xor_si128(B1, B2); \
- B0 = _mm_xor_si128(B0, B3); \
- B2 = B1; \
- B1 = _mm_or_si128(B1, B3); \
- B0 = _mm_xor_si128(B0, B1); \
- B1 = B2; \
- B2 = B3; \
- B3 = B4; \
- } while(0);
-
-#define SBoxE5(B0, B1, B2, B3) \
- do { \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_xor_si128(B3, all_ones); \
- B2 = _mm_xor_si128(B2, B3); \
- B3 = _mm_xor_si128(B3, B0); \
- __m128i B4 = B1; \
- B1 = _mm_and_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B2); \
- B4 = _mm_xor_si128(B4, B3); \
- B0 = _mm_xor_si128(B0, B4); \
- B2 = _mm_and_si128(B2, B4); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_and_si128(B0, B1); \
- B3 = _mm_xor_si128(B3, B0); \
- B4 = _mm_or_si128(B4, B1); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = _mm_or_si128(B0, B3); \
- B0 = _mm_xor_si128(B0, B2); \
- B2 = _mm_and_si128(B2, B3); \
- B0 = _mm_xor_si128(B0, all_ones); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = B0; \
- B0 = B1; \
- B1 = B4; \
- } while(0);
-
-#define SBoxE6(B0, B1, B2, B3) \
- do { \
- B0 = _mm_xor_si128(B0, B1); \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_xor_si128(B3, all_ones); \
- __m128i B4 = B1; \
- B1 = _mm_and_si128(B1, B0); \
- B2 = _mm_xor_si128(B2, B3); \
- B1 = _mm_xor_si128(B1, B2); \
- B2 = _mm_or_si128(B2, B4); \
- B4 = _mm_xor_si128(B4, B3); \
- B3 = _mm_and_si128(B3, B1); \
- B3 = _mm_xor_si128(B3, B0); \
- B4 = _mm_xor_si128(B4, B1); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_and_si128(B0, B3); \
- B2 = _mm_xor_si128(B2, all_ones); \
- B0 = _mm_xor_si128(B0, B4); \
- B4 = _mm_or_si128(B4, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = B0; \
- B0 = B1; \
- B1 = B3; \
- B3 = B4; \
- } while(0);
-
-#define SBoxE7(B0, B1, B2, B3) \
- do { \
- B2 = _mm_xor_si128(B2, all_ones); \
- __m128i B4 = B3; \
- B3 = _mm_and_si128(B3, B0); \
- B0 = _mm_xor_si128(B0, B4); \
- B3 = _mm_xor_si128(B3, B2); \
- B2 = _mm_or_si128(B2, B4); \
- B1 = _mm_xor_si128(B1, B3); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_or_si128(B0, B1); \
- B2 = _mm_xor_si128(B2, B1); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = _mm_or_si128(B0, B3); \
- B0 = _mm_xor_si128(B0, B2); \
- B4 = _mm_xor_si128(B4, B3); \
- B4 = _mm_xor_si128(B4, B0); \
- B3 = _mm_xor_si128(B3, all_ones); \
- B2 = _mm_and_si128(B2, B4); \
- B3 = _mm_xor_si128(B3, B2); \
- B2 = B4; \
- } while(0);
-
-#define SBoxE8(B0, B1, B2, B3) \
- do { \
- __m128i B4 = B1; \
- B1 = _mm_or_si128(B1, B2); \
- B1 = _mm_xor_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = _mm_xor_si128(B2, B1); \
- B3 = _mm_or_si128(B3, B4); \
- B3 = _mm_and_si128(B3, B0); \
- B4 = _mm_xor_si128(B4, B2); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_or_si128(B1, B4); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_or_si128(B0, B4); \
- B0 = _mm_xor_si128(B0, B2); \
- B1 = _mm_xor_si128(B1, B4); \
- B2 = _mm_xor_si128(B2, B1); \
- B1 = _mm_and_si128(B1, B0); \
- B1 = _mm_xor_si128(B1, B4); \
- B2 = _mm_xor_si128(B2, all_ones); \
- B2 = _mm_or_si128(B2, B0); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = B1; \
- B1 = B3; \
- B3 = B0; \
- B0 = B4; \
- } while(0);
-
-#define SBoxD1(B0, B1, B2, B3) \
- do \
- { \
- B2 = _mm_xor_si128(B2, all_ones); \
- __m128i B4 = B1; \
- B1 = _mm_or_si128(B1, B0); \
- B4 = _mm_xor_si128(B4, all_ones); \
- B1 = _mm_xor_si128(B1, B2); \
- B2 = _mm_or_si128(B2, B4); \
- B1 = _mm_xor_si128(B1, B3); \
- B0 = _mm_xor_si128(B0, B4); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_and_si128(B0, B3); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = _mm_or_si128(B0, B1); \
- B0 = _mm_xor_si128(B0, B2); \
- B3 = _mm_xor_si128(B3, B4); \
- B2 = _mm_xor_si128(B2, B1); \
- B3 = _mm_xor_si128(B3, B0); \
- B3 = _mm_xor_si128(B3, B1); \
- B2 = _mm_and_si128(B2, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = B1; \
- B1 = B4; \
- } while(0);
-
-#define SBoxD2(B0, B1, B2, B3) \
- do \
- { \
- __m128i B4 = B1; \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_and_si128(B3, B1); \
- B4 = _mm_xor_si128(B4, B2); \
- B3 = _mm_xor_si128(B3, B0); \
- B0 = _mm_or_si128(B0, B1); \
- B2 = _mm_xor_si128(B2, B3); \
- B0 = _mm_xor_si128(B0, B4); \
- B0 = _mm_or_si128(B0, B2); \
- B1 = _mm_xor_si128(B1, B3); \
- B0 = _mm_xor_si128(B0, B1); \
- B1 = _mm_or_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B0); \
- B4 = _mm_xor_si128(B4, all_ones); \
- B4 = _mm_xor_si128(B4, B1); \
- B1 = _mm_or_si128(B1, B0); \
- B1 = _mm_xor_si128(B1, B0); \
- B1 = _mm_or_si128(B1, B4); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = B0; \
- B0 = B4; \
- B4 = B2; \
- B2 = B3; \
- B3 = B4; \
- } while(0);
-
-#define SBoxD3(B0, B1, B2, B3) \
- do \
- { \
- B2 = _mm_xor_si128(B2, B3); \
- B3 = _mm_xor_si128(B3, B0); \
- __m128i B4 = B3; \
- B3 = _mm_and_si128(B3, B2); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_or_si128(B1, B2); \
- B1 = _mm_xor_si128(B1, B4); \
- B4 = _mm_and_si128(B4, B3); \
- B2 = _mm_xor_si128(B2, B3); \
- B4 = _mm_and_si128(B4, B0); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = _mm_and_si128(B2, B1); \
- B2 = _mm_or_si128(B2, B0); \
- B3 = _mm_xor_si128(B3, all_ones); \
- B2 = _mm_xor_si128(B2, B3); \
- B0 = _mm_xor_si128(B0, B3); \
- B0 = _mm_and_si128(B0, B1); \
- B3 = _mm_xor_si128(B3, B4); \
- B3 = _mm_xor_si128(B3, B0); \
- B0 = B1; \
- B1 = B4; \
- } while(0);
-
-#define SBoxD4(B0, B1, B2, B3) \
- do \
- { \
- __m128i B4 = B2; \
- B2 = _mm_xor_si128(B2, B1); \
- B0 = _mm_xor_si128(B0, B2); \
- B4 = _mm_and_si128(B4, B2); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = _mm_and_si128(B0, B1); \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_or_si128(B3, B4); \
- B2 = _mm_xor_si128(B2, B3); \
- B0 = _mm_xor_si128(B0, B3); \
- B1 = _mm_xor_si128(B1, B4); \
- B3 = _mm_and_si128(B3, B2); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_xor_si128(B1, B0); \
- B1 = _mm_or_si128(B1, B2); \
- B0 = _mm_xor_si128(B0, B3); \
- B1 = _mm_xor_si128(B1, B4); \
- B0 = _mm_xor_si128(B0, B1); \
- B4 = B0; \
- B0 = B2; \
- B2 = B3; \
- B3 = B4; \
- } while(0);
-
-#define SBoxD5(B0, B1, B2, B3) \
- do \
- { \
- __m128i B4 = B2; \
- B2 = _mm_and_si128(B2, B3); \
- B2 = _mm_xor_si128(B2, B1); \
- B1 = _mm_or_si128(B1, B3); \
- B1 = _mm_and_si128(B1, B0); \
- B4 = _mm_xor_si128(B4, B2); \
- B4 = _mm_xor_si128(B4, B1); \
- B1 = _mm_and_si128(B1, B2); \
- B0 = _mm_xor_si128(B0, all_ones); \
- B3 = _mm_xor_si128(B3, B4); \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_and_si128(B3, B0); \
- B3 = _mm_xor_si128(B3, B2); \
- B0 = _mm_xor_si128(B0, B1); \
- B2 = _mm_and_si128(B2, B0); \
- B3 = _mm_xor_si128(B3, B0); \
- B2 = _mm_xor_si128(B2, B4); \
- B2 = _mm_or_si128(B2, B3); \
- B3 = _mm_xor_si128(B3, B0); \
- B2 = _mm_xor_si128(B2, B1); \
- B1 = B3; \
- B3 = B4; \
- } while(0);
-
-#define SBoxD6(B0, B1, B2, B3) \
- do \
- { \
- B1 = _mm_xor_si128(B1, all_ones); \
- __m128i B4 = B3; \
- B2 = _mm_xor_si128(B2, B1); \
- B3 = _mm_or_si128(B3, B0); \
- B3 = _mm_xor_si128(B3, B2); \
- B2 = _mm_or_si128(B2, B1); \
- B2 = _mm_and_si128(B2, B0); \
- B4 = _mm_xor_si128(B4, B3); \
- B2 = _mm_xor_si128(B2, B4); \
- B4 = _mm_or_si128(B4, B0); \
- B4 = _mm_xor_si128(B4, B1); \
- B1 = _mm_and_si128(B1, B2); \
- B1 = _mm_xor_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B3 = _mm_and_si128(B3, B4); \
- B4 = _mm_xor_si128(B4, B1); \
- B3 = _mm_xor_si128(B3, B4); \
- B4 = _mm_xor_si128(B4, all_ones); \
- B3 = _mm_xor_si128(B3, B0); \
- B0 = B1; \
- B1 = B4; \
- B4 = B3; \
- B3 = B2; \
- B2 = B4; \
- } while(0);
-
-#define SBoxD7(B0, B1, B2, B3) \
- do \
- { \
- B0 = _mm_xor_si128(B0, B2); \
- __m128i B4 = B2; \
- B2 = _mm_and_si128(B2, B0); \
- B4 = _mm_xor_si128(B4, B3); \
- B2 = _mm_xor_si128(B2, all_ones); \
- B3 = _mm_xor_si128(B3, B1); \
- B2 = _mm_xor_si128(B2, B3); \
- B4 = _mm_or_si128(B4, B0); \
- B0 = _mm_xor_si128(B0, B2); \
- B3 = _mm_xor_si128(B3, B4); \
- B4 = _mm_xor_si128(B4, B1); \
- B1 = _mm_and_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_xor_si128(B0, B3); \
- B0 = _mm_or_si128(B0, B2); \
- B3 = _mm_xor_si128(B3, B1); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = B1; \
- B1 = B2; \
- B2 = B4; \
- } while(0);
-
-#define SBoxD8(B0, B1, B2, B3) \
- do \
- { \
- __m128i B4 = B2; \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_and_si128(B0, B3); \
- B4 = _mm_or_si128(B4, B3); \
- B2 = _mm_xor_si128(B2, all_ones); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_or_si128(B1, B0); \
- B0 = _mm_xor_si128(B0, B2); \
- B2 = _mm_and_si128(B2, B4); \
- B3 = _mm_and_si128(B3, B4); \
- B1 = _mm_xor_si128(B1, B2); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_or_si128(B0, B2); \
- B4 = _mm_xor_si128(B4, B1); \
- B0 = _mm_xor_si128(B0, B3); \
- B3 = _mm_xor_si128(B3, B4); \
- B4 = _mm_or_si128(B4, B0); \
- B3 = _mm_xor_si128(B3, B2); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = B1; \
- B1 = B0; \
- B0 = B3; \
- B3 = B4; \
- } while(0);
-
-#endif
diff --git a/src/block/xtea/xtea.h b/src/block/xtea/xtea.h
index f3b554edb..9982d0712 100644
--- a/src/block/xtea/xtea.h
+++ b/src/block/xtea/xtea.h
@@ -26,7 +26,7 @@ class BOTAN_DLL XTEA : public BlockCipher
BlockCipher* clone() const { return new XTEA; }
XTEA() : BlockCipher(8, 16) {}
- private:
+ protected:
void key_schedule(const byte[], u32bit);
SecureBuffer<u32bit, 64> EK;
};
diff --git a/src/block/xtea_simd/info.txt b/src/block/xtea_simd/info.txt
new file mode 100644
index 000000000..98a6e941f
--- /dev/null
+++ b/src/block/xtea_simd/info.txt
@@ -0,0 +1,14 @@
+define XTEA_SIMD
+
+load_on auto
+
+<add>
+xtea_simd.cpp
+xtea_simd.h
+</add>
+
+<requires>
+xtea
+simd_32
+simd_engine
+</requires>
diff --git a/src/block/xtea_simd/xtea_simd.cpp b/src/block/xtea_simd/xtea_simd.cpp
new file mode 100644
index 000000000..6151c355c
--- /dev/null
+++ b/src/block/xtea_simd/xtea_simd.cpp
@@ -0,0 +1,124 @@
+/*
+* XTEA in SIMD
+* (C) 2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#include <botan/xtea_simd.h>
+#include <botan/loadstor.h>
+#include <botan/simd_32.h>
+
+namespace Botan {
+
+namespace {
+
+void xtea_encrypt_8(const byte in[64], byte out[64], const u32bit EK[64])
+ {
+ SIMD_32 L0 = SIMD_32::load_be(in );
+ SIMD_32 R0 = SIMD_32::load_be(in + 16);
+ SIMD_32 L1 = SIMD_32::load_be(in + 32);
+ SIMD_32 R1 = SIMD_32::load_be(in + 48);
+
+ SIMD_32::transpose(L0, R0, L1, R1);
+
+ for(u32bit i = 0; i != 32; i += 2)
+ {
+ SIMD_32 K0(EK[2*i ]);
+ SIMD_32 K1(EK[2*i+1]);
+ SIMD_32 K2(EK[2*i+2]);
+ SIMD_32 K3(EK[2*i+3]);
+
+ L0 += (((R0 << 4) ^ (R0 >> 5)) + R0) ^ K0;
+ L1 += (((R1 << 4) ^ (R1 >> 5)) + R1) ^ K0;
+
+ R0 += (((L0 << 4) ^ (L0 >> 5)) + L0) ^ K1;
+ R1 += (((L1 << 4) ^ (L1 >> 5)) + L1) ^ K1;
+
+ L0 += (((R0 << 4) ^ (R0 >> 5)) + R0) ^ K2;
+ L1 += (((R1 << 4) ^ (R1 >> 5)) + R1) ^ K2;
+
+ R0 += (((L0 << 4) ^ (L0 >> 5)) + L0) ^ K3;
+ R1 += (((L1 << 4) ^ (L1 >> 5)) + L1) ^ K3;
+ }
+
+ SIMD_32::transpose(L0, R0, L1, R1);
+
+ L0.store_be(out);
+ R0.store_be(out + 16);
+ L1.store_be(out + 32);
+ R1.store_be(out + 48);
+ }
+
+void xtea_decrypt_8(const byte in[64], byte out[64], const u32bit EK[64])
+ {
+ SIMD_32 L0 = SIMD_32::load_be(in );
+ SIMD_32 R0 = SIMD_32::load_be(in + 16);
+ SIMD_32 L1 = SIMD_32::load_be(in + 32);
+ SIMD_32 R1 = SIMD_32::load_be(in + 48);
+
+ SIMD_32::transpose(L0, R0, L1, R1);
+
+ for(u32bit i = 0; i != 32; i += 2)
+ {
+ SIMD_32 K0(EK[63 - 2*i]);
+ SIMD_32 K1(EK[62 - 2*i]);
+ SIMD_32 K2(EK[61 - 2*i]);
+ SIMD_32 K3(EK[60 - 2*i]);
+
+ R0 -= (((L0 << 4) ^ (L0 >> 5)) + L0) ^ K0;
+ R1 -= (((L1 << 4) ^ (L1 >> 5)) + L1) ^ K0;
+
+ L0 -= (((R0 << 4) ^ (R0 >> 5)) + R0) ^ K1;
+ L1 -= (((R1 << 4) ^ (R1 >> 5)) + R1) ^ K1;
+
+ R0 -= (((L0 << 4) ^ (L0 >> 5)) + L0) ^ K2;
+ R1 -= (((L1 << 4) ^ (L1 >> 5)) + L1) ^ K2;
+
+ L0 -= (((R0 << 4) ^ (R0 >> 5)) + R0) ^ K3;
+ L1 -= (((R1 << 4) ^ (R1 >> 5)) + R1) ^ K3;
+ }
+
+ SIMD_32::transpose(L0, R0, L1, R1);
+
+ L0.store_be(out);
+ R0.store_be(out + 16);
+ L1.store_be(out + 32);
+ R1.store_be(out + 48);
+ }
+
+}
+
+/*
+* XTEA Encryption
+*/
+void XTEA_SIMD::encrypt_n(const byte in[], byte out[], u32bit blocks) const
+ {
+ while(blocks >= 8)
+ {
+ xtea_encrypt_8(in, out, this->EK);
+ in += 8 * BLOCK_SIZE;
+ out += 8 * BLOCK_SIZE;
+ blocks -= 8;
+ }
+
+ XTEA::encrypt_n(in, out, blocks);
+ }
+
+/*
+* XTEA Decryption
+*/
+void XTEA_SIMD::decrypt_n(const byte in[], byte out[], u32bit blocks) const
+ {
+ while(blocks >= 8)
+ {
+ xtea_decrypt_8(in, out, this->EK);
+ in += 8 * BLOCK_SIZE;
+ out += 8 * BLOCK_SIZE;
+ blocks -= 8;
+ }
+
+ XTEA::decrypt_n(in, out, blocks);
+ }
+
+}
diff --git a/src/block/xtea_simd/xtea_simd.h b/src/block/xtea_simd/xtea_simd.h
new file mode 100644
index 000000000..e4ce734ed
--- /dev/null
+++ b/src/block/xtea_simd/xtea_simd.h
@@ -0,0 +1,28 @@
+/*
+* XTEA in SIMD
+* (C) 2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#ifndef BOTAN_XTEA_SIMD_H__
+#define BOTAN_XTEA_SIMD_H__
+
+#include <botan/xtea.h>
+
+namespace Botan {
+
+/*
+* XTEA (SIMD variant)
+*/
+class BOTAN_DLL XTEA_SIMD : public XTEA
+ {
+ public:
+ void encrypt_n(const byte in[], byte out[], u32bit blocks) const;
+ void decrypt_n(const byte in[], byte out[], u32bit blocks) const;
+ BlockCipher* clone() const { return new XTEA_SIMD; }
+ };
+
+}
+
+#endif
diff --git a/src/engine/simd_engine/info.txt b/src/engine/simd_engine/info.txt
new file mode 100644
index 000000000..b0523285f
--- /dev/null
+++ b/src/engine/simd_engine/info.txt
@@ -0,0 +1,3 @@
+define ENGINE_SIMD
+
+load_on dep
diff --git a/src/engine/simd_engine/simd_engine.cpp b/src/engine/simd_engine/simd_engine.cpp
new file mode 100644
index 000000000..7e15f9ec1
--- /dev/null
+++ b/src/engine/simd_engine/simd_engine.cpp
@@ -0,0 +1,54 @@
+/**
+* SIMD Engine
+* (C) 1999-2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#include <botan/simd_engine.h>
+#include <botan/cpuid.h>
+
+#if defined(BOTAN_HAS_SERPENT_SIMD)
+ #include <botan/serp_simd.h>
+#endif
+
+#if defined(BOTAN_HAS_XTEA_SIMD)
+ #include <botan/xtea_simd.h>
+#endif
+
+#if defined(BOTAN_HAS_SHA1_SSE2)
+ #include <botan/sha1_sse2.h>
+#endif
+
+namespace Botan {
+
+BlockCipher*
+SIMD_Engine::find_block_cipher(const SCAN_Name& request,
+ Algorithm_Factory&) const
+ {
+#if defined(BOTAN_HAS_SERPENT_SIMD)
+ if(request.algo_name() == "Serpent")
+ return new Serpent_SIMD;
+#endif
+
+#if defined(BOTAN_HAS_XTEA_SIMD)
+ if(request.algo_name() == "XTEA")
+ return new XTEA_SIMD;
+#endif
+
+ return 0;
+ }
+
+HashFunction*
+SIMD_Engine::find_hash(const SCAN_Name& request,
+ Algorithm_Factory&) const
+ {
+#if defined(BOTAN_HAS_SHA1_SSE2)
+ if(request.algo_name() == "SHA-160" && CPUID::has_sse2())
+ return new SHA_160_SSE2;
+#endif
+
+ return 0;
+ }
+
+}
diff --git a/src/engine/sse2_eng/eng_sse2.h b/src/engine/simd_engine/simd_engine.h
index c6b0ce889..22a58e203 100644
--- a/src/engine/sse2_eng/eng_sse2.h
+++ b/src/engine/simd_engine/simd_engine.h
@@ -1,21 +1,21 @@
/**
-* SSE2 Assembly Engine
+* SIMD Assembly Engine
* (C) 1999-2009 Jack Lloyd
*
* Distributed under the terms of the Botan license
*/
-#ifndef BOTAN_SSE2_ASM_ENGINE_H__
-#define BOTAN_SSE2_ASM_ENGINE_H__
+#ifndef BOTAN_SIMD_ENGINE_H__
+#define BOTAN_SIMD_ENGINE_H__
#include <botan/engine.h>
namespace Botan {
-class BOTAN_DLL SSE2_Assembler_Engine : public Engine
+class BOTAN_DLL SIMD_Engine : public Engine
{
public:
- std::string provider_name() const { return "sse2"; }
+ std::string provider_name() const { return "simd"; }
private:
BlockCipher* find_block_cipher(const SCAN_Name&,
Algorithm_Factory&) const;
diff --git a/src/engine/sse2_eng/eng_sse2.cpp b/src/engine/sse2_eng/eng_sse2.cpp
deleted file mode 100644
index 07c625c7c..000000000
--- a/src/engine/sse2_eng/eng_sse2.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
-* SSE2 Assembly Engine
-* (C) 1999-2009 Jack Lloyd
-*
-* Distributed under the terms of the Botan license
-*/
-
-#include <botan/eng_sse2.h>
-#include <botan/cpuid.h>
-
-#if defined(BOTAN_HAS_SHA1_SSE2)
- #include <botan/sha1_sse2.h>
-#endif
-
-#if defined(BOTAN_HAS_SERPENT_SSE2)
- #include <botan/serp_sse2.h>
-#endif
-
-namespace Botan {
-
-BlockCipher*
-SSE2_Assembler_Engine::find_block_cipher(const SCAN_Name& request,
- Algorithm_Factory&) const
- {
- if(!CPUID::has_sse2())
- return 0;
-
-#if defined(BOTAN_HAS_SERPENT_SSE2)
- if(request.algo_name() == "Serpent")
- return new Serpent_SSE2;
-#endif
-
- return 0;
- }
-
-HashFunction*
-SSE2_Assembler_Engine::find_hash(const SCAN_Name& request,
- Algorithm_Factory&) const
- {
- if(!CPUID::has_sse2())
- return 0;
-
-#if defined(BOTAN_HAS_SHA1_SSE2)
- if(request.algo_name() == "SHA-160")
- return new SHA_160_SSE2;
-#endif
-
- return 0;
- }
-
-}
diff --git a/src/engine/sse2_eng/info.txt b/src/engine/sse2_eng/info.txt
deleted file mode 100644
index 43df92343..000000000
--- a/src/engine/sse2_eng/info.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-define ENGINE_SSE2_ASSEMBLER
-
-load_on dep
-
-<add>
-eng_sse2.cpp
-eng_sse2.h
-</add>
-
-<arch>
-pentium-m
-pentium4
-prescott
-amd64
-</arch>
-
-<cc>
-gcc
-icc
-msvc
-</cc>
diff --git a/src/hash/sha1/sha160.cpp b/src/hash/sha1/sha160.cpp
index 92f4f5f11..88f2161e2 100644
--- a/src/hash/sha1/sha160.cpp
+++ b/src/hash/sha1/sha160.cpp
@@ -82,37 +82,49 @@ void SHA_160::compress_n(const byte input[], u32bit blocks)
W[j+7] = rotate_left((W[j+4] ^ W[j-1] ^ W[j- 7] ^ W[j- 9]), 1);
}
- F1(A,B,C,D,E,W[ 0]); F1(E,A,B,C,D,W[ 1]); F1(D,E,A,B,C,W[ 2]);
- F1(C,D,E,A,B,W[ 3]); F1(B,C,D,E,A,W[ 4]); F1(A,B,C,D,E,W[ 5]);
- F1(E,A,B,C,D,W[ 6]); F1(D,E,A,B,C,W[ 7]); F1(C,D,E,A,B,W[ 8]);
- F1(B,C,D,E,A,W[ 9]); F1(A,B,C,D,E,W[10]); F1(E,A,B,C,D,W[11]);
- F1(D,E,A,B,C,W[12]); F1(C,D,E,A,B,W[13]); F1(B,C,D,E,A,W[14]);
- F1(A,B,C,D,E,W[15]); F1(E,A,B,C,D,W[16]); F1(D,E,A,B,C,W[17]);
- F1(C,D,E,A,B,W[18]); F1(B,C,D,E,A,W[19]);
-
- F2(A,B,C,D,E,W[20]); F2(E,A,B,C,D,W[21]); F2(D,E,A,B,C,W[22]);
- F2(C,D,E,A,B,W[23]); F2(B,C,D,E,A,W[24]); F2(A,B,C,D,E,W[25]);
- F2(E,A,B,C,D,W[26]); F2(D,E,A,B,C,W[27]); F2(C,D,E,A,B,W[28]);
- F2(B,C,D,E,A,W[29]); F2(A,B,C,D,E,W[30]); F2(E,A,B,C,D,W[31]);
- F2(D,E,A,B,C,W[32]); F2(C,D,E,A,B,W[33]); F2(B,C,D,E,A,W[34]);
- F2(A,B,C,D,E,W[35]); F2(E,A,B,C,D,W[36]); F2(D,E,A,B,C,W[37]);
- F2(C,D,E,A,B,W[38]); F2(B,C,D,E,A,W[39]);
-
- F3(A,B,C,D,E,W[40]); F3(E,A,B,C,D,W[41]); F3(D,E,A,B,C,W[42]);
- F3(C,D,E,A,B,W[43]); F3(B,C,D,E,A,W[44]); F3(A,B,C,D,E,W[45]);
- F3(E,A,B,C,D,W[46]); F3(D,E,A,B,C,W[47]); F3(C,D,E,A,B,W[48]);
- F3(B,C,D,E,A,W[49]); F3(A,B,C,D,E,W[50]); F3(E,A,B,C,D,W[51]);
- F3(D,E,A,B,C,W[52]); F3(C,D,E,A,B,W[53]); F3(B,C,D,E,A,W[54]);
- F3(A,B,C,D,E,W[55]); F3(E,A,B,C,D,W[56]); F3(D,E,A,B,C,W[57]);
- F3(C,D,E,A,B,W[58]); F3(B,C,D,E,A,W[59]);
-
- F4(A,B,C,D,E,W[60]); F4(E,A,B,C,D,W[61]); F4(D,E,A,B,C,W[62]);
- F4(C,D,E,A,B,W[63]); F4(B,C,D,E,A,W[64]); F4(A,B,C,D,E,W[65]);
- F4(E,A,B,C,D,W[66]); F4(D,E,A,B,C,W[67]); F4(C,D,E,A,B,W[68]);
- F4(B,C,D,E,A,W[69]); F4(A,B,C,D,E,W[70]); F4(E,A,B,C,D,W[71]);
- F4(D,E,A,B,C,W[72]); F4(C,D,E,A,B,W[73]); F4(B,C,D,E,A,W[74]);
- F4(A,B,C,D,E,W[75]); F4(E,A,B,C,D,W[76]); F4(D,E,A,B,C,W[77]);
- F4(C,D,E,A,B,W[78]); F4(B,C,D,E,A,W[79]);
+ F1(A, B, C, D, E, W[ 0]); F1(E, A, B, C, D, W[ 1]);
+ F1(D, E, A, B, C, W[ 2]); F1(C, D, E, A, B, W[ 3]);
+ F1(B, C, D, E, A, W[ 4]); F1(A, B, C, D, E, W[ 5]);
+ F1(E, A, B, C, D, W[ 6]); F1(D, E, A, B, C, W[ 7]);
+ F1(C, D, E, A, B, W[ 8]); F1(B, C, D, E, A, W[ 9]);
+ F1(A, B, C, D, E, W[10]); F1(E, A, B, C, D, W[11]);
+ F1(D, E, A, B, C, W[12]); F1(C, D, E, A, B, W[13]);
+ F1(B, C, D, E, A, W[14]); F1(A, B, C, D, E, W[15]);
+ F1(E, A, B, C, D, W[16]); F1(D, E, A, B, C, W[17]);
+ F1(C, D, E, A, B, W[18]); F1(B, C, D, E, A, W[19]);
+
+ F2(A, B, C, D, E, W[20]); F2(E, A, B, C, D, W[21]);
+ F2(D, E, A, B, C, W[22]); F2(C, D, E, A, B, W[23]);
+ F2(B, C, D, E, A, W[24]); F2(A, B, C, D, E, W[25]);
+ F2(E, A, B, C, D, W[26]); F2(D, E, A, B, C, W[27]);
+ F2(C, D, E, A, B, W[28]); F2(B, C, D, E, A, W[29]);
+ F2(A, B, C, D, E, W[30]); F2(E, A, B, C, D, W[31]);
+ F2(D, E, A, B, C, W[32]); F2(C, D, E, A, B, W[33]);
+ F2(B, C, D, E, A, W[34]); F2(A, B, C, D, E, W[35]);
+ F2(E, A, B, C, D, W[36]); F2(D, E, A, B, C, W[37]);
+ F2(C, D, E, A, B, W[38]); F2(B, C, D, E, A, W[39]);
+
+ F3(A, B, C, D, E, W[40]); F3(E, A, B, C, D, W[41]);
+ F3(D, E, A, B, C, W[42]); F3(C, D, E, A, B, W[43]);
+ F3(B, C, D, E, A, W[44]); F3(A, B, C, D, E, W[45]);
+ F3(E, A, B, C, D, W[46]); F3(D, E, A, B, C, W[47]);
+ F3(C, D, E, A, B, W[48]); F3(B, C, D, E, A, W[49]);
+ F3(A, B, C, D, E, W[50]); F3(E, A, B, C, D, W[51]);
+ F3(D, E, A, B, C, W[52]); F3(C, D, E, A, B, W[53]);
+ F3(B, C, D, E, A, W[54]); F3(A, B, C, D, E, W[55]);
+ F3(E, A, B, C, D, W[56]); F3(D, E, A, B, C, W[57]);
+ F3(C, D, E, A, B, W[58]); F3(B, C, D, E, A, W[59]);
+
+ F4(A, B, C, D, E, W[60]); F4(E, A, B, C, D, W[61]);
+ F4(D, E, A, B, C, W[62]); F4(C, D, E, A, B, W[63]);
+ F4(B, C, D, E, A, W[64]); F4(A, B, C, D, E, W[65]);
+ F4(E, A, B, C, D, W[66]); F4(D, E, A, B, C, W[67]);
+ F4(C, D, E, A, B, W[68]); F4(B, C, D, E, A, W[69]);
+ F4(A, B, C, D, E, W[70]); F4(E, A, B, C, D, W[71]);
+ F4(D, E, A, B, C, W[72]); F4(C, D, E, A, B, W[73]);
+ F4(B, C, D, E, A, W[74]); F4(A, B, C, D, E, W[75]);
+ F4(E, A, B, C, D, W[76]); F4(D, E, A, B, C, W[77]);
+ F4(C, D, E, A, B, W[78]); F4(B, C, D, E, A, W[79]);
A = (digest[0] += A);
B = (digest[1] += B);
diff --git a/src/hash/sha1_sse2/info.txt b/src/hash/sha1_sse2/info.txt
index ad61aa5fa..ee61076b4 100644
--- a/src/hash/sha1_sse2/info.txt
+++ b/src/hash/sha1_sse2/info.txt
@@ -2,5 +2,18 @@ define SHA1_SSE2
<requires>
sha1
-sse2_eng
+simd_engine
</requires>
+
+<arch>
+pentium-m
+pentium4
+prescott
+amd64
+</arch>
+
+<cc>
+gcc
+icc
+msvc
+</cc>
diff --git a/src/hash/sha1_sse2/sha1_sse2.cpp b/src/hash/sha1_sse2/sha1_sse2.cpp
index dddc06b7b..fc6466dd0 100644
--- a/src/hash/sha1_sse2/sha1_sse2.cpp
+++ b/src/hash/sha1_sse2/sha1_sse2.cpp
@@ -1,23 +1,278 @@
/*
-* SHA-160 (SSE2)
-* (C) 1999-2007 Jack Lloyd
+* SHA-1 using SSE2
+* (C) 2009 Jack Lloyd
*
* Distributed under the terms of the Botan license
+*
+* Based on public domain code by Dean Gaudet <[email protected]>
+* Source - http://arctic.org/~dean/crypto/sha1.html
*/
#include <botan/sha1_sse2.h>
+#include <botan/rotate.h>
+#include <emmintrin.h>
namespace Botan {
+namespace {
+
+/*
+First 16 bytes just need byte swapping. Preparing just means
+adding in the round constants.
+*/
+
+#define prep00_15(P, W) \
+ do { \
+ W = _mm_shufflehi_epi16(W, _MM_SHUFFLE(2, 3, 0, 1)); \
+ W = _mm_shufflelo_epi16(W, _MM_SHUFFLE(2, 3, 0, 1)); \
+ W = _mm_or_si128(_mm_slli_epi16(W, 8), \
+ _mm_srli_epi16(W, 8)); \
+ P.u128 = _mm_add_epi32(W, K00_19); \
+ } while(0)
+
+/*
+for each multiple of 4, t, we want to calculate this:
+
+W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
+W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
+W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
+W[t+3] = rol(W[t] ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
+
+we'll actually calculate this:
+
+W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
+W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
+W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
+W[t+3] = rol( 0 ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
+W[t+3] ^= rol(W[t+0], 1);
+
+the parameters are:
+
+W0 = &W[t-16];
+W1 = &W[t-12];
+W2 = &W[t- 8];
+W3 = &W[t- 4];
+
+and on output:
+prepared = W0 + K
+W0 = W[t]..W[t+3]
+*/
+
+/* note that there is a step here where i want to do a rol by 1, which
+* normally would look like this:
+*
+* r1 = psrld r0,$31
+* r0 = pslld r0,$1
+* r0 = por r0,r1
+*
+* but instead i do this:
+*
+* r1 = pcmpltd r0,zero
+* r0 = paddd r0,r0
+* r0 = psub r0,r1
+*
+* because pcmpltd and paddd are availabe in both MMX units on
+* efficeon, pentium-m, and opteron but shifts are available in
+* only one unit.
+*/
+#define prep(prep, XW0, XW1, XW2, XW3, K) \
+ do { \
+ __m128i r0, r1, r2, r3; \
+ \
+ /* load W[t-4] 16-byte aligned, and shift */ \
+ r3 = _mm_srli_si128((XW3), 4); \
+ r0 = (XW0); \
+ /* get high 64-bits of XW0 into low 64-bits */ \
+ r1 = _mm_shuffle_epi32((XW0), _MM_SHUFFLE(1,0,3,2)); \
+ /* load high 64-bits of r1 */ \
+ r1 = _mm_unpacklo_epi64(r1, (XW1)); \
+ r2 = (XW2); \
+ \
+ r0 = _mm_xor_si128(r1, r0); \
+ r2 = _mm_xor_si128(r3, r2); \
+ r0 = _mm_xor_si128(r2, r0); \
+ /* unrotated W[t]..W[t+2] in r0 ... still need W[t+3] */ \
+ \
+ r2 = _mm_slli_si128(r0, 12); \
+ r1 = _mm_cmplt_epi32(r0, _mm_setzero_si128()); \
+ r0 = _mm_add_epi32(r0, r0); /* shift left by 1 */ \
+ r0 = _mm_sub_epi32(r0, r1); /* r0 has W[t]..W[t+2] */ \
+ \
+ r3 = _mm_srli_epi32(r2, 30); \
+ r2 = _mm_slli_epi32(r2, 2); \
+ \
+ r0 = _mm_xor_si128(r0, r3); \
+ r0 = _mm_xor_si128(r0, r2); /* r0 now has W[t+3] */ \
+ \
+ (XW0) = r0; \
+ (prep).u128 = _mm_add_epi32(r0, K); \
+ } while(0)
+
+/*
+* SHA-160 F1 Function
+*/
+inline void F1(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
+ {
+ E += (D ^ (B & (C ^ D))) + msg + rotate_left(A, 5);
+ B = rotate_left(B, 30);
+ }
+
/*
-* SHA-160 Compression Function
+* SHA-160 F2 Function
*/
-void SHA_160_SSE2::compress_n(const byte input[], u32bit blocks)
+inline void F2(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
{
+ E += (B ^ C ^ D) + msg + rotate_left(A, 5);
+ B = rotate_left(B, 30);
+ }
+
+/*
+* SHA-160 F3 Function
+*/
+inline void F3(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
+ {
+ E += ((B & C) | ((B | C) & D)) + msg + rotate_left(A, 5);
+ B = rotate_left(B, 30);
+ }
+
+/*
+* SHA-160 F4 Function
+*/
+inline void F4(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
+ {
+ E += (B ^ C ^ D) + msg + rotate_left(A, 5);
+ B = rotate_left(B, 30);
+ }
+
+}
+
+/*
+* SHA-160 Compression Function using SSE for message expansion
+*/
+void SHA_160_SSE2::compress_n(const byte input_bytes[], u32bit blocks)
+ {
+ const __m128i K00_19 = _mm_set1_epi32(0x5A827999);
+ const __m128i K20_39 = _mm_set1_epi32(0x6ED9EBA1);
+ const __m128i K40_59 = _mm_set1_epi32(0x8F1BBCDC);
+ const __m128i K60_79 = _mm_set1_epi32(0xCA62C1D6);
+
+ u32bit A = digest[0], B = digest[1], C = digest[2],
+ D = digest[3], E = digest[4];
+
+ const __m128i* input = (const __m128i *)input_bytes;
+
for(u32bit i = 0; i != blocks; ++i)
{
- botan_sha1_sse2_compress(digest, reinterpret_cast<const u32bit*>(input));
- input += HASH_BLOCK_SIZE;
+
+ /* I've tried arranging the SSE2 code to be 4, 8, 12, and 16
+ * steps ahead of the integer code. 12 steps ahead seems to
+ * produce the best performance. -dean
+ *
+ * Todo: check this is still true on Barcelona and Core2 -Jack
+ */
+
+ union v4si {
+ u32bit u32[4];
+ __m128i u128;
+ };
+
+ v4si P0, P1, P2;
+
+ __m128i W0 = _mm_loadu_si128(&input[0]);
+ prep00_15(P0, W0);
+
+ __m128i W1 = _mm_loadu_si128(&input[1]);
+ prep00_15(P1, W1);
+
+ __m128i W2 = _mm_loadu_si128(&input[2]);
+ prep00_15(P2, W2);
+
+ __m128i W3 = _mm_loadu_si128(&input[3]);
+
+ F1(A, B, C, D, E, P0.u32[0]); F1(E, A, B, C, D, P0.u32[1]);
+ F1(D, E, A, B, C, P0.u32[2]); F1(C, D, E, A, B, P0.u32[3]);
+ prep00_15(P0, W3);
+
+ F1(B, C, D, E, A, P1.u32[0]); F1(A, B, C, D, E, P1.u32[1]);
+ F1(E, A, B, C, D, P1.u32[2]); F1(D, E, A, B, C, P1.u32[3]);
+ prep(P1, W0, W1, W2, W3, K00_19);
+
+ F1(C, D, E, A, B, P2.u32[0]); F1(B, C, D, E, A, P2.u32[1]);
+ F1(A, B, C, D, E, P2.u32[2]); F1(E, A, B, C, D, P2.u32[3]);
+ prep(P2, W1, W2, W3, W0, K20_39);
+
+ F1(D, E, A, B, C, P0.u32[0]); F1(C, D, E, A, B, P0.u32[1]);
+ F1(B, C, D, E, A, P0.u32[2]); F1(A, B, C, D, E, P0.u32[3]);
+ prep(P0, W2, W3, W0, W1, K20_39);
+
+ F1(E, A, B, C, D, P1.u32[0]); F1(D, E, A, B, C, P1.u32[1]);
+ F1(C, D, E, A, B, P1.u32[2]); F1(B, C, D, E, A, P1.u32[3]);
+ prep(P1, W3, W0, W1, W2, K20_39);
+
+ F2(A, B, C, D, E, P2.u32[0]); F2(E, A, B, C, D, P2.u32[1]);
+ F2(D, E, A, B, C, P2.u32[2]); F2(C, D, E, A, B, P2.u32[3]);
+ prep(P2, W0, W1, W2, W3, K20_39);
+
+ F2(B, C, D, E, A, P0.u32[0]); F2(A, B, C, D, E, P0.u32[1]);
+ F2(E, A, B, C, D, P0.u32[2]); F2(D, E, A, B, C, P0.u32[3]);
+ prep(P0, W1, W2, W3, W0, K20_39);
+
+ F2(C, D, E, A, B, P1.u32[0]); F2(B, C, D, E, A, P1.u32[1]);
+ F2(A, B, C, D, E, P1.u32[2]); F2(E, A, B, C, D, P1.u32[3]);
+ prep(P1, W2, W3, W0, W1, K40_59);
+
+ F2(D, E, A, B, C, P2.u32[0]); F2(C, D, E, A, B, P2.u32[1]);
+ F2(B, C, D, E, A, P2.u32[2]); F2(A, B, C, D, E, P2.u32[3]);
+ prep(P2, W3, W0, W1, W2, K40_59);
+
+ F2(E, A, B, C, D, P0.u32[0]); F2(D, E, A, B, C, P0.u32[1]);
+ F2(C, D, E, A, B, P0.u32[2]); F2(B, C, D, E, A, P0.u32[3]);
+ prep(P0, W0, W1, W2, W3, K40_59);
+
+ F3(A, B, C, D, E, P1.u32[0]); F3(E, A, B, C, D, P1.u32[1]);
+ F3(D, E, A, B, C, P1.u32[2]); F3(C, D, E, A, B, P1.u32[3]);
+ prep(P1, W1, W2, W3, W0, K40_59);
+
+ F3(B, C, D, E, A, P2.u32[0]); F3(A, B, C, D, E, P2.u32[1]);
+ F3(E, A, B, C, D, P2.u32[2]); F3(D, E, A, B, C, P2.u32[3]);
+ prep(P2, W2, W3, W0, W1, K40_59);
+
+ F3(C, D, E, A, B, P0.u32[0]); F3(B, C, D, E, A, P0.u32[1]);
+ F3(A, B, C, D, E, P0.u32[2]); F3(E, A, B, C, D, P0.u32[3]);
+ prep(P0, W3, W0, W1, W2, K60_79);
+
+ F3(D, E, A, B, C, P1.u32[0]); F3(C, D, E, A, B, P1.u32[1]);
+ F3(B, C, D, E, A, P1.u32[2]); F3(A, B, C, D, E, P1.u32[3]);
+ prep(P1, W0, W1, W2, W3, K60_79);
+
+ F3(E, A, B, C, D, P2.u32[0]); F3(D, E, A, B, C, P2.u32[1]);
+ F3(C, D, E, A, B, P2.u32[2]); F3(B, C, D, E, A, P2.u32[3]);
+ prep(P2, W1, W2, W3, W0, K60_79);
+
+ F4(A, B, C, D, E, P0.u32[0]); F4(E, A, B, C, D, P0.u32[1]);
+ F4(D, E, A, B, C, P0.u32[2]); F4(C, D, E, A, B, P0.u32[3]);
+ prep(P0, W2, W3, W0, W1, K60_79);
+
+ F4(B, C, D, E, A, P1.u32[0]); F4(A, B, C, D, E, P1.u32[1]);
+ F4(E, A, B, C, D, P1.u32[2]); F4(D, E, A, B, C, P1.u32[3]);
+ prep(P1, W3, W0, W1, W2, K60_79);
+
+ F4(C, D, E, A, B, P2.u32[0]); F4(B, C, D, E, A, P2.u32[1]);
+ F4(A, B, C, D, E, P2.u32[2]); F4(E, A, B, C, D, P2.u32[3]);
+
+ F4(D, E, A, B, C, P0.u32[0]); F4(C, D, E, A, B, P0.u32[1]);
+ F4(B, C, D, E, A, P0.u32[2]); F4(A, B, C, D, E, P0.u32[3]);
+
+ F4(E, A, B, C, D, P1.u32[0]); F4(D, E, A, B, C, P1.u32[1]);
+ F4(C, D, E, A, B, P1.u32[2]); F4(B, C, D, E, A, P1.u32[3]);
+
+ A = (digest[0] += A);
+ B = (digest[1] += B);
+ C = (digest[2] += C);
+ D = (digest[3] += D);
+ E = (digest[4] += E);
+
+ input += (HASH_BLOCK_SIZE / 16);
}
}
diff --git a/src/hash/sha1_sse2/sha1_sse2_imp.cpp b/src/hash/sha1_sse2/sha1_sse2_imp.cpp
deleted file mode 100644
index 5ee222cca..000000000
--- a/src/hash/sha1_sse2/sha1_sse2_imp.cpp
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
-* SHA-1 using SSE2
-* (C) 2009 Jack Lloyd
-*
-* Distributed under the terms of the Botan license
-*/
-
-/*
-* Based on public domain code by Dean Gaudet <[email protected]>
-* http://arctic.org/~dean/crypto/sha1.html
-*/
-
-#include <botan/sha1_sse2.h>
-#include <botan/rotate.h>
-#include <emmintrin.h>
-
-namespace Botan {
-
-namespace {
-
-typedef union {
- u32bit u32[4];
- __m128i u128;
- } v4si;
-
-static const v4si K00_19 = { { 0x5a827999, 0x5a827999, 0x5a827999, 0x5a827999 } };
-static const v4si K20_39 = { { 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1 } };
-static const v4si K40_59 = { { 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc } };
-static const v4si K60_79 = { { 0xca62c1d6, 0xca62c1d6, 0xca62c1d6, 0xca62c1d6 } };
-
-/*
-the first 16 bytes only need byte swapping
-
-prepared points to 4x u32bit, 16-byte aligned
-
-W points to the 4 dwords which need preparing --
-and is overwritten with the swapped bytes
-*/
-#define prep00_15(prep, W) do { \
- __m128i r1, r2; \
- \
- r1 = (W); \
- r1 = _mm_shufflehi_epi16(r1, _MM_SHUFFLE(2, 3, 0, 1)); \
- r1 = _mm_shufflelo_epi16(r1, _MM_SHUFFLE(2, 3, 0, 1)); \
- r2 = _mm_slli_epi16(r1, 8); \
- r1 = _mm_srli_epi16(r1, 8); \
- r1 = _mm_or_si128(r1, r2); \
- (W) = r1; \
- (prep).u128 = _mm_add_epi32(K00_19.u128, r1); \
- } while(0)
-
-/*
-for each multiple of 4, t, we want to calculate this:
-
-W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
-W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
-W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
-W[t+3] = rol(W[t] ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
-
-we'll actually calculate this:
-
-W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
-W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
-W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
-W[t+3] = rol( 0 ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
-W[t+3] ^= rol(W[t+0], 1);
-
-the parameters are:
-
-W0 = &W[t-16];
-W1 = &W[t-12];
-W2 = &W[t- 8];
-W3 = &W[t- 4];
-
-and on output:
-prepared = W0 + K
-W0 = W[t]..W[t+3]
-*/
-
-/* note that there is a step here where i want to do a rol by 1, which
-* normally would look like this:
-*
-* r1 = psrld r0,$31
-* r0 = pslld r0,$1
-* r0 = por r0,r1
-*
-* but instead i do this:
-*
-* r1 = pcmpltd r0,zero
-* r0 = paddd r0,r0
-* r0 = psub r0,r1
-*
-* because pcmpltd and paddd are availabe in both MMX units on
-* efficeon, pentium-m, and opteron but shifts are available in
-* only one unit.
-*/
-#define prep(prep, XW0, XW1, XW2, XW3, K) \
- do { \
- __m128i r0, r1, r2, r3; \
- \
- /* load W[t-4] 16-byte aligned, and shift */ \
- r3 = _mm_srli_si128((XW3), 4); \
- r0 = (XW0); \
- /* get high 64-bits of XW0 into low 64-bits */ \
- r1 = _mm_shuffle_epi32((XW0), _MM_SHUFFLE(1,0,3,2)); \
- /* load high 64-bits of r1 */ \
- r1 = _mm_unpacklo_epi64(r1, (XW1)); \
- r2 = (XW2); \
- \
- r0 = _mm_xor_si128(r1, r0); \
- r2 = _mm_xor_si128(r3, r2); \
- r0 = _mm_xor_si128(r2, r0); \
- /* unrotated W[t]..W[t+2] in r0 ... still need W[t+3] */ \
- \
- r2 = _mm_slli_si128(r0, 12); \
- r1 = _mm_cmplt_epi32(r0, _mm_setzero_si128()); \
- r0 = _mm_add_epi32(r0, r0); /* shift left by 1 */ \
- r0 = _mm_sub_epi32(r0, r1); /* r0 has W[t]..W[t+2] */ \
- \
- r3 = _mm_srli_epi32(r2, 30); \
- r2 = _mm_slli_epi32(r2, 2); \
- \
- r0 = _mm_xor_si128(r0, r3); \
- r0 = _mm_xor_si128(r0, r2); /* r0 now has W[t+3] */ \
- \
- (XW0) = r0; \
- (prep).u128 = _mm_add_epi32(r0, (K).u128); \
- } while(0)
-
-static inline u32bit f00_19(u32bit x, u32bit y, u32bit z)
- {
- /* FIPS 180-2 says this: (x & y) ^ (~x & z)
- * but we can calculate it in fewer steps.
- */
- return ((y ^ z) & x) ^ z;
- }
-
-
-static inline u32bit f20_39(u32bit x, u32bit y, u32bit z)
- {
- return (x ^ z) ^ y;
- }
-
-
-static inline u32bit f40_59(u32bit x, u32bit y, u32bit z)
- {
- /* FIPS 180-2 says this: (x & y) ^ (x & z) ^ (y & z)
- * but we can calculate it in fewer steps.
- */
- return (x & z) | ((x | z) & y);
- }
-
-
-static inline u32bit f60_79(u32bit x, u32bit y, u32bit z)
- {
- return f20_39(x, y, z);
- }
-
-#define step(nn_mm, xa, xb, xc, xd, xe, xt, input) \
- do { \
- (xt) = (input) + f##nn_mm((xb), (xc), (xd)); \
- (xb) = rotate_left((xb), 30); \
- (xt) += ((xe) + rotate_left((xa), 5)); \
- } while(0)
-
-}
-
-extern "C" void botan_sha1_sse2_compress(u32bit H[5],
- const u32bit* inputu)
- {
- const __m128i * input = (const __m128i *)inputu;
- __m128i W0, W1, W2, W3;
- v4si prep0, prep1, prep2;
- u32bit a, b, c, d, e, t;
-
- a = H[0];
- b = H[1];
- c = H[2];
- d = H[3];
- e = H[4];
-
- /* i've tried arranging the SSE2 code to be 4, 8, 12, and 16
- * steps ahead of the integer code. 12 steps ahead seems
- * to produce the best performance. -dean
- */
- W0 = _mm_loadu_si128(&input[0]);
- prep00_15(prep0, W0); /* prepare for 00 through 03 */
- W1 = _mm_loadu_si128(&input[1]);
- prep00_15(prep1, W1); /* prepare for 04 through 07 */
- W2 = _mm_loadu_si128(&input[2]);
- prep00_15(prep2, W2); /* prepare for 08 through 11 */
-
- W3 = _mm_loadu_si128(&input[3]);
- step(00_19, a, b, c, d, e, t, prep0.u32[0]); /* 00 */
- step(00_19, t, a, b, c, d, e, prep0.u32[1]); /* 01 */
- step(00_19, e, t, a, b, c, d, prep0.u32[2]); /* 02 */
- step(00_19, d, e, t, a, b, c, prep0.u32[3]); /* 03 */
- prep00_15(prep0, W3);
- step(00_19, c, d, e, t, a, b, prep1.u32[0]); /* 04 */
- step(00_19, b, c, d, e, t, a, prep1.u32[1]); /* 05 */
- step(00_19, a, b, c, d, e, t, prep1.u32[2]); /* 06 */
- step(00_19, t, a, b, c, d, e, prep1.u32[3]); /* 07 */
- prep(prep1, W0, W1, W2, W3, K00_19); /* prepare for 16 through 19 */
- step(00_19, e, t, a, b, c, d, prep2.u32[0]); /* 08 */
- step(00_19, d, e, t, a, b, c, prep2.u32[1]); /* 09 */
- step(00_19, c, d, e, t, a, b, prep2.u32[2]); /* 10 */
- step(00_19, b, c, d, e, t, a, prep2.u32[3]); /* 11 */
- prep(prep2, W1, W2, W3, W0, K20_39); /* prepare for 20 through 23 */
- step(00_19, a, b, c, d, e, t, prep0.u32[0]); /* 12 */
- step(00_19, t, a, b, c, d, e, prep0.u32[1]); /* 13 */
- step(00_19, e, t, a, b, c, d, prep0.u32[2]); /* 14 */
- step(00_19, d, e, t, a, b, c, prep0.u32[3]); /* 15 */
- prep(prep0, W2, W3, W0, W1, K20_39);
- step(00_19, c, d, e, t, a, b, prep1.u32[0]); /* 16 */
- step(00_19, b, c, d, e, t, a, prep1.u32[1]); /* 17 */
- step(00_19, a, b, c, d, e, t, prep1.u32[2]); /* 18 */
- step(00_19, t, a, b, c, d, e, prep1.u32[3]); /* 19 */
-
- prep(prep1, W3, W0, W1, W2, K20_39);
- step(20_39, e, t, a, b, c, d, prep2.u32[0]); /* 20 */
- step(20_39, d, e, t, a, b, c, prep2.u32[1]); /* 21 */
- step(20_39, c, d, e, t, a, b, prep2.u32[2]); /* 22 */
- step(20_39, b, c, d, e, t, a, prep2.u32[3]); /* 23 */
- prep(prep2, W0, W1, W2, W3, K20_39);
- step(20_39, a, b, c, d, e, t, prep0.u32[0]); /* 24 */
- step(20_39, t, a, b, c, d, e, prep0.u32[1]); /* 25 */
- step(20_39, e, t, a, b, c, d, prep0.u32[2]); /* 26 */
- step(20_39, d, e, t, a, b, c, prep0.u32[3]); /* 27 */
- prep(prep0, W1, W2, W3, W0, K20_39);
- step(20_39, c, d, e, t, a, b, prep1.u32[0]); /* 28 */
- step(20_39, b, c, d, e, t, a, prep1.u32[1]); /* 29 */
- step(20_39, a, b, c, d, e, t, prep1.u32[2]); /* 30 */
- step(20_39, t, a, b, c, d, e, prep1.u32[3]); /* 31 */
- prep(prep1, W2, W3, W0, W1, K40_59);
- step(20_39, e, t, a, b, c, d, prep2.u32[0]); /* 32 */
- step(20_39, d, e, t, a, b, c, prep2.u32[1]); /* 33 */
- step(20_39, c, d, e, t, a, b, prep2.u32[2]); /* 34 */
- step(20_39, b, c, d, e, t, a, prep2.u32[3]); /* 35 */
- prep(prep2, W3, W0, W1, W2, K40_59);
- step(20_39, a, b, c, d, e, t, prep0.u32[0]); /* 36 */
- step(20_39, t, a, b, c, d, e, prep0.u32[1]); /* 37 */
- step(20_39, e, t, a, b, c, d, prep0.u32[2]); /* 38 */
- step(20_39, d, e, t, a, b, c, prep0.u32[3]); /* 39 */
-
- prep(prep0, W0, W1, W2, W3, K40_59);
- step(40_59, c, d, e, t, a, b, prep1.u32[0]); /* 40 */
- step(40_59, b, c, d, e, t, a, prep1.u32[1]); /* 41 */
- step(40_59, a, b, c, d, e, t, prep1.u32[2]); /* 42 */
- step(40_59, t, a, b, c, d, e, prep1.u32[3]); /* 43 */
- prep(prep1, W1, W2, W3, W0, K40_59);
- step(40_59, e, t, a, b, c, d, prep2.u32[0]); /* 44 */
- step(40_59, d, e, t, a, b, c, prep2.u32[1]); /* 45 */
- step(40_59, c, d, e, t, a, b, prep2.u32[2]); /* 46 */
- step(40_59, b, c, d, e, t, a, prep2.u32[3]); /* 47 */
- prep(prep2, W2, W3, W0, W1, K40_59);
- step(40_59, a, b, c, d, e, t, prep0.u32[0]); /* 48 */
- step(40_59, t, a, b, c, d, e, prep0.u32[1]); /* 49 */
- step(40_59, e, t, a, b, c, d, prep0.u32[2]); /* 50 */
- step(40_59, d, e, t, a, b, c, prep0.u32[3]); /* 51 */
- prep(prep0, W3, W0, W1, W2, K60_79);
- step(40_59, c, d, e, t, a, b, prep1.u32[0]); /* 52 */
- step(40_59, b, c, d, e, t, a, prep1.u32[1]); /* 53 */
- step(40_59, a, b, c, d, e, t, prep1.u32[2]); /* 54 */
- step(40_59, t, a, b, c, d, e, prep1.u32[3]); /* 55 */
- prep(prep1, W0, W1, W2, W3, K60_79);
- step(40_59, e, t, a, b, c, d, prep2.u32[0]); /* 56 */
- step(40_59, d, e, t, a, b, c, prep2.u32[1]); /* 57 */
- step(40_59, c, d, e, t, a, b, prep2.u32[2]); /* 58 */
- step(40_59, b, c, d, e, t, a, prep2.u32[3]); /* 59 */
-
- prep(prep2, W1, W2, W3, W0, K60_79);
- step(60_79, a, b, c, d, e, t, prep0.u32[0]); /* 60 */
- step(60_79, t, a, b, c, d, e, prep0.u32[1]); /* 61 */
- step(60_79, e, t, a, b, c, d, prep0.u32[2]); /* 62 */
- step(60_79, d, e, t, a, b, c, prep0.u32[3]); /* 63 */
- prep(prep0, W2, W3, W0, W1, K60_79);
- step(60_79, c, d, e, t, a, b, prep1.u32[0]); /* 64 */
- step(60_79, b, c, d, e, t, a, prep1.u32[1]); /* 65 */
- step(60_79, a, b, c, d, e, t, prep1.u32[2]); /* 66 */
- step(60_79, t, a, b, c, d, e, prep1.u32[3]); /* 67 */
- prep(prep1, W3, W0, W1, W2, K60_79);
- step(60_79, e, t, a, b, c, d, prep2.u32[0]); /* 68 */
- step(60_79, d, e, t, a, b, c, prep2.u32[1]); /* 69 */
- step(60_79, c, d, e, t, a, b, prep2.u32[2]); /* 70 */
- step(60_79, b, c, d, e, t, a, prep2.u32[3]); /* 71 */
-
- step(60_79, a, b, c, d, e, t, prep0.u32[0]); /* 72 */
- step(60_79, t, a, b, c, d, e, prep0.u32[1]); /* 73 */
- step(60_79, e, t, a, b, c, d, prep0.u32[2]); /* 74 */
- step(60_79, d, e, t, a, b, c, prep0.u32[3]); /* 75 */
- /* no more input to prepare */
- step(60_79, c, d, e, t, a, b, prep1.u32[0]); /* 76 */
- step(60_79, b, c, d, e, t, a, prep1.u32[1]); /* 77 */
- step(60_79, a, b, c, d, e, t, prep1.u32[2]); /* 78 */
- step(60_79, t, a, b, c, d, e, prep1.u32[3]); /* 79 */
- /* e, t, a, b, c, d */
- H[0] += e;
- H[1] += t;
- H[2] += a;
- H[3] += b;
- H[4] += c;
- }
-
-}
diff --git a/src/libstate/libstate.cpp b/src/libstate/libstate.cpp
index ca454458b..c78bce62d 100644
--- a/src/libstate/libstate.cpp
+++ b/src/libstate/libstate.cpp
@@ -37,8 +37,8 @@
#include <botan/eng_amd64.h>
#endif
-#if defined(BOTAN_HAS_ENGINE_SSE2_ASSEMBLER)
- #include <botan/eng_sse2.h>
+#if defined(BOTAN_HAS_ENGINE_SIMD)
+ #include <botan/simd_engine.h>
#endif
#if defined(BOTAN_HAS_ENGINE_GNU_MP)
@@ -288,8 +288,8 @@ void Library_State::initialize(bool thread_safe)
engines.push_back(new OpenSSL_Engine);
#endif
-#if defined(BOTAN_HAS_ENGINE_SSE2_ASSEMBLER)
- engines.push_back(new SSE2_Assembler_Engine);
+#if defined(BOTAN_HAS_ENGINE_SIMD)
+ engines.push_back(new SIMD_Engine);
#endif
#if defined(BOTAN_HAS_ENGINE_AMD64_ASSEMBLER)
diff --git a/src/utils/cpuid.cpp b/src/utils/cpuid.cpp
index f79e3a912..2d3b5d92c 100644
--- a/src/utils/cpuid.cpp
+++ b/src/utils/cpuid.cpp
@@ -98,4 +98,64 @@ u32bit CPUID::cache_line_size()
return cl_size;
}
+bool CPUID::has_altivec()
+ {
+ static bool first_time = true;
+ static bool altivec_capable = false;
+
+ if(first_time)
+ {
+#if defined(BOTAN_TARGET_ARCH_IS_PPC) || defined(BOTAN_TARGET_ARCH_IS_PPC64)
+
+ /*
+ PVR identifiers for various AltiVec enabled CPUs. Taken from
+ PearPC and Linux sources, mostly.
+ */
+ const u16bit PVR_G4_7400 = 0x000C;
+ const u16bit PVR_G5_970 = 0x0039;
+ const u16bit PVR_G5_970FX = 0x003C;
+ const u16bit PVR_G5_970MP = 0x0044;
+ const u16bit PVR_G5_970GX = 0x0045;
+ const u16bit PVR_POWER6 = 0x003E;
+ const u16bit PVR_CELL_PPU = 0x0070;
+
+ // Motorola produced G4s with PVR 0x800[0123C] (at least)
+ const u16bit PVR_G4_74xx_24 = 0x800;
+
+ u32bit pvr = 0;
+ /*
+ On PowerPC, MSR 287 is PVR, the Processor Version Number
+
+ Normally it is only accessible to ring 0, but Linux and NetBSD
+ (at least) will trap and emulate it for us. This is roughly 20x
+ saner than every other approach I've seen for this (all of which
+ are entirely OS specific, to boot).
+
+ Apparently OS X doesn't support this, but then again OS X
+ doesn't really support PPC anymore, so I'm not worrying about it.
+
+ For OSes that aren't (known to) support the emulation, leave pvr
+ as 0 which will cause all subsequent model number checks to fail.
+ */
+#if defined(BOTAN_TARGET_OS_IS_LINUX) || defined(BOTAN_TARGET_OS_IS_NETBSD)
+ asm volatile("mfspr %0, 287" : "=r" (pvr));
+#endif
+ // Top 16 bit suffice to identify model
+ pvr >>= 16;
+
+ altivec_capable |= (pvr == PVR_G4_7400);
+ altivec_capable |= ((pvr >> 8) == PVR_G4_74xx_24);
+ altivec_capable |= (pvr == PVR_G5_970);
+ altivec_capable |= (pvr == PVR_G5_970FX);
+ altivec_capable |= (pvr == PVR_G5_970MP);
+ altivec_capable |= (pvr == PVR_G5_970GX);
+ altivec_capable |= (pvr == PVR_CELL_PPU);
+#endif
+
+ first_time = false;
+ }
+
+ return altivec_capable;
+ }
+
}
diff --git a/src/utils/cpuid.h b/src/utils/cpuid.h
index 0b210768a..8b8021754 100644
--- a/src/utils/cpuid.h
+++ b/src/utils/cpuid.h
@@ -65,6 +65,7 @@ class CPUID
static bool has_intel_aes()
{ return ((x86_processor_flags() >> CPUID_INTEL_AES_BIT) & 1); }
+ static bool has_altivec();
private:
static u64bit x86_processor_flags();
};
diff --git a/src/utils/simd_32/info.txt b/src/utils/simd_32/info.txt
new file mode 100644
index 000000000..64707c1e4
--- /dev/null
+++ b/src/utils/simd_32/info.txt
@@ -0,0 +1,16 @@
+define SIMD_32
+
+load_on always
+
+<arch>
+pentium-m
+pentium4
+prescott
+amd64
+</arch>
+
+<cc>
+gcc
+icc
+msvc
+</cc>
diff --git a/src/utils/simd_32/simd_32.h b/src/utils/simd_32/simd_32.h
new file mode 100644
index 000000000..be426efd6
--- /dev/null
+++ b/src/utils/simd_32/simd_32.h
@@ -0,0 +1,32 @@
+/**
+* Lightweight wrappers for SIMD operations
+* (C) 2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#ifndef BOTAN_SIMD_32_H__
+#define BOTAN_SIMD_32_H__
+
+#include <botan/types.h>
+
+//#define BOTAN_TARGET_CPU_HAS_SSE2
+
+#if defined(BOTAN_TARGET_CPU_HAS_SSE2)
+
+ #include <botan/simd_sse.h>
+ namespace Botan { typedef SIMD_SSE2 SIMD_32; }
+
+#elif defined(BOTAN_TARGET_CPU_HAS_ALTIVEC)
+
+ #include <botan/simd_altivec.h>
+ namespace Botan { typedef SIMD_Altivec SIMD_32; }
+
+#else
+
+ #include <botan/simd_scalar.h>
+ namespace Botan { typedef SIMD_Scalar SIMD_32; }
+
+#endif
+
+#endif
diff --git a/src/utils/simd_32/simd_altivec.h b/src/utils/simd_32/simd_altivec.h
new file mode 100644
index 000000000..e1aa62002
--- /dev/null
+++ b/src/utils/simd_32/simd_altivec.h
@@ -0,0 +1,202 @@
+/**
+* Lightweight wrappers around AltiVec for 32-bit operations
+* (C) 2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#ifndef BOTAN_SIMD_ALTIVEC_H__
+#define BOTAN_SIMD_ALTIVEC_H__
+
+#include <botan/loadstor.h>
+#include <botan/cpuid.h>
+
+#include <altivec.h>
+#undef vector
+
+namespace Botan {
+
+class SIMD_Altivec
+ {
+ public:
+ bool enabled() const { return CPUID::has_altivec(); }
+
+ SIMD_Altivec(const u32bit B[4])
+ {
+ reg = (__vector unsigned int){B[0], B[1], B[2], B[3]};
+ }
+
+ SIMD_Altivec(u32bit B0, u32bit B1, u32bit B2, u32bit B3)
+ {
+ reg = (__vector unsigned int){B0, B1, B2, B3};
+ }
+
+ SIMD_Altivec(u32bit B)
+ {
+ reg = (__vector unsigned int){B, B, B, B};
+ }
+
+ static SIMD_Altivec load_le(const void* in)
+ {
+ const u32bit* in_32 = static_cast<const u32bit*>(in);
+
+ __vector unsigned int R0 = vec_ld(0, in_32);
+ __vector unsigned int R1 = vec_ld(12, in_32);
+
+ __vector unsigned char perm = vec_lvsl(0, in_32);
+
+ perm = vec_xor(perm, vec_splat_u8(3));
+
+ R0 = vec_perm(R0, R1, perm);
+
+ return SIMD_Altivec(R0);
+ }
+
+ static SIMD_Altivec load_be(const void* in)
+ {
+ const u32bit* in_32 = static_cast<const u32bit*>(in);
+
+ __vector unsigned int R0 = vec_ld(0, in_32);
+ __vector unsigned int R1 = vec_ld(12, in_32);
+
+ __vector unsigned char perm = vec_lvsl(0, in_32);
+
+ R0 = vec_perm(R0, R1, perm);
+
+ return SIMD_Altivec(R0);
+ }
+
+ void store_le(byte out[]) const
+ {
+ __vector unsigned char perm = vec_lvsl(0, (u32bit*)0);
+
+ perm = vec_xor(perm, vec_splat_u8(3));
+
+ union {
+ __vector unsigned int V;
+ u32bit R[4];
+ } vec;
+
+ vec.V = vec_perm(reg, reg, perm);
+
+ Botan::store_be(out, vec.R[0], vec.R[1], vec.R[2], vec.R[3]);
+ }
+
+ void store_be(byte out[]) const
+ {
+ union {
+ __vector unsigned int V;
+ u32bit R[4];
+ } vec;
+
+ vec.V = reg;
+
+ Botan::store_be(out, vec.R[0], vec.R[1], vec.R[2], vec.R[3]);
+ }
+
+ void rotate_left(u32bit rot)
+ {
+ __vector unsigned int rot_vec =
+ (__vector unsigned int){rot, rot, rot, rot};
+
+ reg = vec_rl(reg, rot_vec);
+ }
+
+ void rotate_right(u32bit rot)
+ {
+ rotate_left(32 - rot);
+ }
+
+ void operator+=(const SIMD_Altivec& other)
+ {
+ reg = vec_add(reg, other.reg);
+ }
+
+ SIMD_Altivec operator+(const SIMD_Altivec& other) const
+ {
+ return vec_add(reg, other.reg);
+ }
+
+ void operator-=(const SIMD_Altivec& other)
+ {
+ reg = vec_sub(reg, other.reg);
+ }
+
+ SIMD_Altivec operator-(const SIMD_Altivec& other) const
+ {
+ return vec_sub(reg, other.reg);
+ }
+
+ void operator^=(const SIMD_Altivec& other)
+ {
+ reg = vec_xor(reg, other.reg);
+ }
+
+ SIMD_Altivec operator^(const SIMD_Altivec& other) const
+ {
+ return vec_xor(reg, other.reg);
+ }
+
+ void operator|=(const SIMD_Altivec& other)
+ {
+ reg = vec_or(reg, other.reg);
+ }
+
+ void operator&=(const SIMD_Altivec& other)
+ {
+ reg = vec_and(reg, other.reg);
+ }
+
+ SIMD_Altivec operator<<(u32bit shift) const
+ {
+ __vector unsigned int shift_vec =
+ (__vector unsigned int){shift, shift, shift, shift};
+
+ return vec_sl(reg, shift_vec);
+ }
+
+ SIMD_Altivec operator>>(u32bit shift) const
+ {
+ __vector unsigned int shift_vec =
+ (__vector unsigned int){shift, shift, shift, shift};
+
+ return vec_sr(reg, shift_vec);
+ }
+
+ SIMD_Altivec operator~() const
+ {
+ return vec_nor(reg, reg);
+ }
+
+ SIMD_Altivec bswap() const
+ {
+ __vector unsigned char perm = vec_lvsl(0, (u32bit*)0);
+
+ perm = vec_xor(perm, vec_splat_u8(3));
+
+ return SIMD_Altivec(vec_perm(reg, reg, perm));
+ }
+
+ static void transpose(SIMD_Altivec& B0, SIMD_Altivec& B1,
+ SIMD_Altivec& B2, SIMD_Altivec& B3)
+ {
+ __vector unsigned int T0 = vec_mergeh(B0.reg, B2.reg);
+ __vector unsigned int T1 = vec_mergel(B0.reg, B2.reg);
+ __vector unsigned int T2 = vec_mergeh(B1.reg, B3.reg);
+ __vector unsigned int T3 = vec_mergel(B1.reg, B3.reg);
+
+ B0.reg = vec_mergeh(T0, T2);
+ B1.reg = vec_mergel(T0, T2);
+ B2.reg = vec_mergeh(T1, T3);
+ B3.reg = vec_mergel(T1, T3);
+ }
+
+ private:
+ SIMD_Altivec(__vector unsigned int input) { reg = input; }
+
+ __vector unsigned int reg;
+ };
+
+}
+
+#endif
diff --git a/src/utils/simd_32/simd_scalar.h b/src/utils/simd_32/simd_scalar.h
new file mode 100644
index 000000000..5fc20b462
--- /dev/null
+++ b/src/utils/simd_32/simd_scalar.h
@@ -0,0 +1,202 @@
+/**
+* Scalar emulation of SIMD 32-bit operations
+* (C) 2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#ifndef BOTAN_SIMD_SCALAR_H__
+#define BOTAN_SIMD_SCALAR_H__
+
+#include <botan/loadstor.h>
+#include <botan/bswap.h>
+
+namespace Botan {
+
+class SIMD_Scalar
+ {
+ public:
+ bool enabled() const { return true; }
+
+ SIMD_Scalar(const u32bit B[4])
+ {
+ R0 = B[0];
+ R1 = B[1];
+ R2 = B[2];
+ R3 = B[3];
+ }
+
+ SIMD_Scalar(u32bit B0, u32bit B1, u32bit B2, u32bit B3)
+ {
+ R0 = B0;
+ R1 = B1;
+ R2 = B2;
+ R3 = B3;
+ }
+
+ SIMD_Scalar(u32bit B)
+ {
+ R0 = B;
+ R1 = B;
+ R2 = B;
+ R3 = B;
+ }
+
+ static SIMD_Scalar load_le(const void* in)
+ {
+ const byte* in_b = static_cast<const byte*>(in);
+ return SIMD_Scalar(Botan::load_le<u32bit>(in_b, 0),
+ Botan::load_le<u32bit>(in_b, 1),
+ Botan::load_le<u32bit>(in_b, 2),
+ Botan::load_le<u32bit>(in_b, 3));
+ }
+
+ static SIMD_Scalar load_be(const void* in)
+ {
+ const byte* in_b = static_cast<const byte*>(in);
+ return SIMD_Scalar(Botan::load_be<u32bit>(in_b, 0),
+ Botan::load_be<u32bit>(in_b, 1),
+ Botan::load_be<u32bit>(in_b, 2),
+ Botan::load_be<u32bit>(in_b, 3));
+ }
+
+ void store_le(byte out[]) const
+ {
+ Botan::store_le(out, R0, R1, R2, R3);
+ }
+
+ void store_be(byte out[]) const
+ {
+ Botan::store_be(out, R0, R1, R2, R3);
+ }
+
+ void rotate_left(u32bit rot)
+ {
+ R0 = Botan::rotate_left(R0, rot);
+ R1 = Botan::rotate_left(R1, rot);
+ R2 = Botan::rotate_left(R2, rot);
+ R3 = Botan::rotate_left(R3, rot);
+ }
+
+ void rotate_right(u32bit rot)
+ {
+ R0 = Botan::rotate_right(R0, rot);
+ R1 = Botan::rotate_right(R1, rot);
+ R2 = Botan::rotate_right(R2, rot);
+ R3 = Botan::rotate_right(R3, rot);
+ }
+
+ void operator+=(const SIMD_Scalar& other)
+ {
+ R0 += other.R0;
+ R1 += other.R1;
+ R2 += other.R2;
+ R3 += other.R3;
+ }
+
+ SIMD_Scalar operator+(const SIMD_Scalar& other) const
+ {
+ return SIMD_Scalar(R0 + other.R0,
+ R1 + other.R1,
+ R2 + other.R2,
+ R3 + other.R3);
+ }
+
+ void operator-=(const SIMD_Scalar& other)
+ {
+ R0 -= other.R0;
+ R1 -= other.R1;
+ R2 -= other.R2;
+ R3 -= other.R3;
+ }
+
+ SIMD_Scalar operator-(const SIMD_Scalar& other) const
+ {
+ return SIMD_Scalar(R0 - other.R0,
+ R1 - other.R1,
+ R2 - other.R2,
+ R3 - other.R3);
+ }
+
+ void operator^=(const SIMD_Scalar& other)
+ {
+ R0 ^= other.R0;
+ R1 ^= other.R1;
+ R2 ^= other.R2;
+ R3 ^= other.R3;
+ }
+
+ SIMD_Scalar operator^(const SIMD_Scalar& other) const
+ {
+ return SIMD_Scalar(R0 ^ other.R0,
+ R1 ^ other.R1,
+ R2 ^ other.R2,
+ R3 ^ other.R3);
+ }
+
+ void operator|=(const SIMD_Scalar& other)
+ {
+ R0 |= other.R0;
+ R1 |= other.R1;
+ R2 |= other.R2;
+ R3 |= other.R3;
+ }
+
+ void operator&=(const SIMD_Scalar& other)
+ {
+ R0 &= other.R0;
+ R1 &= other.R1;
+ R2 &= other.R2;
+ R3 &= other.R3;
+ }
+
+ SIMD_Scalar operator<<(u32bit shift) const
+ {
+ return SIMD_Scalar(R0 << shift,
+ R1 << shift,
+ R2 << shift,
+ R3 << shift);
+ }
+
+ SIMD_Scalar operator>>(u32bit shift) const
+ {
+ return SIMD_Scalar(R0 >> shift,
+ R1 >> shift,
+ R2 >> shift,
+ R3 >> shift);
+ }
+
+ SIMD_Scalar operator~() const
+ {
+ return SIMD_Scalar(~R0, ~R1, ~R2, ~R3);
+ }
+
+ SIMD_Scalar bswap() const
+ {
+ return SIMD_Scalar(reverse_bytes(R0),
+ reverse_bytes(R1),
+ reverse_bytes(R2),
+ reverse_bytes(R3));
+ }
+
+ static void transpose(SIMD_Scalar& B0, SIMD_Scalar& B1,
+ SIMD_Scalar& B2, SIMD_Scalar& B3)
+ {
+ SIMD_Scalar T0(B0.R0, B1.R0, B2.R0, B3.R0);
+ SIMD_Scalar T1(B0.R1, B1.R1, B2.R1, B3.R1);
+ SIMD_Scalar T2(B0.R2, B1.R2, B2.R2, B3.R2);
+ SIMD_Scalar T3(B0.R3, B1.R3, B2.R3, B3.R3);
+
+ B0 = T0;
+ B1 = T1;
+ B2 = T2;
+ B3 = T3;
+ }
+
+ private:
+ u32bit R0, R1, R2, R3;
+ };
+
+}
+
+#endif
diff --git a/src/utils/simd_32/simd_sse.h b/src/utils/simd_32/simd_sse.h
new file mode 100644
index 000000000..c45d8032f
--- /dev/null
+++ b/src/utils/simd_32/simd_sse.h
@@ -0,0 +1,156 @@
+/**
+* Lightweight wrappers for SSE2 intrinsics for 32-bit operations
+* (C) 2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#ifndef BOTAN_SIMD_SSE_H__
+#define BOTAN_SIMD_SSE_H__
+
+#include <botan/cpuid.h>
+
+#include <emmintrin.h>
+
+namespace Botan {
+
+class SIMD_SSE2
+ {
+ public:
+ bool enabled() const { return CPUID::has_sse2(); }
+
+ SIMD_SSE2(const u32bit B[4])
+ {
+ reg = _mm_loadu_si128((const __m128i*)B);
+ }
+
+ SIMD_SSE2(u32bit B0, u32bit B1, u32bit B2, u32bit B3)
+ {
+ reg = _mm_set_epi32(B0, B1, B2, B3);
+ }
+
+ SIMD_SSE2(u32bit B)
+ {
+ reg = _mm_set1_epi32(B);
+ }
+
+ static SIMD_SSE2 load_le(const void* in)
+ {
+ return _mm_loadu_si128((const __m128i*)in);
+ }
+
+ static SIMD_SSE2 load_be(const void* in)
+ {
+ return load_le(in).bswap();
+ }
+
+ void store_le(byte out[]) const
+ {
+ _mm_storeu_si128((__m128i*)out, reg);
+ }
+
+ void store_be(byte out[]) const
+ {
+ bswap().store_le(out);
+ }
+
+ void rotate_left(u32bit rot)
+ {
+ reg = _mm_or_si128(_mm_slli_epi32(reg, rot),
+ _mm_srli_epi32(reg, 32-rot));
+ }
+
+ void rotate_right(u32bit rot)
+ {
+ rotate_left(32 - rot);
+ }
+
+ void operator+=(const SIMD_SSE2& other)
+ {
+ reg = _mm_add_epi32(reg, other.reg);
+ }
+
+ SIMD_SSE2 operator+(const SIMD_SSE2& other) const
+ {
+ return _mm_add_epi32(reg, other.reg);
+ }
+
+ void operator-=(const SIMD_SSE2& other)
+ {
+ reg = _mm_sub_epi32(reg, other.reg);
+ }
+
+ SIMD_SSE2 operator-(const SIMD_SSE2& other) const
+ {
+ return _mm_sub_epi32(reg, other.reg);
+ }
+
+ void operator^=(const SIMD_SSE2& other)
+ {
+ reg = _mm_xor_si128(reg, other.reg);
+ }
+
+ SIMD_SSE2 operator^(const SIMD_SSE2& other) const
+ {
+ return _mm_xor_si128(reg, other.reg);
+ }
+
+ void operator|=(const SIMD_SSE2& other)
+ {
+ reg = _mm_or_si128(reg, other.reg);
+ }
+
+ void operator&=(const SIMD_SSE2& other)
+ {
+ reg = _mm_and_si128(reg, other.reg);
+ }
+
+ SIMD_SSE2 operator<<(u32bit shift) const
+ {
+ return _mm_slli_epi32(reg, shift);
+ }
+
+ SIMD_SSE2 operator>>(u32bit shift) const
+ {
+ return _mm_srli_epi32(reg, shift);
+ }
+
+ SIMD_SSE2 operator~() const
+ {
+ static const __m128i all_ones = _mm_set1_epi32(0xFFFFFFFF);
+ return _mm_xor_si128(reg, all_ones);
+ }
+
+ SIMD_SSE2 bswap() const
+ {
+ __m128i T = reg;
+
+ T = _mm_shufflehi_epi16(T, _MM_SHUFFLE(2, 3, 0, 1));
+ T = _mm_shufflelo_epi16(T, _MM_SHUFFLE(2, 3, 0, 1));
+
+ return _mm_or_si128(_mm_srli_epi16(T, 8),
+ _mm_slli_epi16(T, 8));
+ }
+
+ static void transpose(SIMD_SSE2& B0, SIMD_SSE2& B1,
+ SIMD_SSE2& B2, SIMD_SSE2& B3)
+ {
+ __m128i T0 = _mm_unpacklo_epi32(B0.reg, B1.reg);
+ __m128i T1 = _mm_unpacklo_epi32(B2.reg, B3.reg);
+ __m128i T2 = _mm_unpackhi_epi32(B0.reg, B1.reg);
+ __m128i T3 = _mm_unpackhi_epi32(B2.reg, B3.reg);
+ B0.reg = _mm_unpacklo_epi64(T0, T1);
+ B1.reg = _mm_unpackhi_epi64(T0, T1);
+ B2.reg = _mm_unpacklo_epi64(T2, T3);
+ B3.reg = _mm_unpackhi_epi64(T2, T3);
+ }
+
+ private:
+ SIMD_SSE2(__m128i in) { reg = in; }
+
+ __m128i reg;
+ };
+
+}
+
+#endif