aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/block/serpent_sse2/serp_sse2.cpp142
-rw-r--r--src/block/serpent_sse2/serp_sse2_sbox.h622
-rw-r--r--src/block/xtea/xtea.h2
-rw-r--r--src/block/xtea_sse2/info.txt15
-rw-r--r--src/block/xtea_sse2/xtea_sse2.cpp124
-rw-r--r--src/block/xtea_sse2/xtea_sse2.h28
-rw-r--r--src/engine/sse2_eng/eng_sse2.cpp9
-rw-r--r--src/utils/simd_32/info.txt18
-rw-r--r--src/utils/simd_32/simd_32.h29
-rw-r--r--src/utils/simd_32/simd_altivec.h178
-rw-r--r--src/utils/simd_32/simd_scalar.h188
-rw-r--r--src/utils/simd_32/simd_sse.h150
12 files changed, 1101 insertions, 404 deletions
diff --git a/src/block/serpent_sse2/serp_sse2.cpp b/src/block/serpent_sse2/serp_sse2.cpp
index c51bb69ab..be79e870d 100644
--- a/src/block/serpent_sse2/serp_sse2.cpp
+++ b/src/block/serpent_sse2/serp_sse2.cpp
@@ -1,5 +1,5 @@
/*
-* Serpent (SSE2)
+* Serpent (SIMD)
* (C) 2009 Jack Lloyd
*
* Distributed under the terms of the Botan license
@@ -7,72 +7,50 @@
#include <botan/serp_sse2.h>
#include <botan/serp_sse2_sbox.h>
+#include <botan/simd_32.h>
#include <botan/loadstor.h>
-#include <emmintrin.h>
namespace Botan {
namespace {
-#define key_xor(round, B0, B1, B2, B3) \
- do { \
- __m128i key = _mm_loadu_si128(keys + round); \
- B0 = _mm_xor_si128(B0, _mm_shuffle_epi32(key, _MM_SHUFFLE(0,0,0,0))); \
- B1 = _mm_xor_si128(B1, _mm_shuffle_epi32(key, _MM_SHUFFLE(1,1,1,1))); \
- B2 = _mm_xor_si128(B2, _mm_shuffle_epi32(key, _MM_SHUFFLE(2,2,2,2))); \
- B3 = _mm_xor_si128(B3, _mm_shuffle_epi32(key, _MM_SHUFFLE(3,3,3,3))); \
+#define key_xor(round, B0, B1, B2, B3) \
+ do { \
+ B0 ^= SIMD_32(keys[4*round ]); \
+ B1 ^= SIMD_32(keys[4*round+1]); \
+ B2 ^= SIMD_32(keys[4*round+2]); \
+ B3 ^= SIMD_32(keys[4*round+3]); \
} while(0);
/*
* Serpent's linear transformations
*/
-#define rotate_left_m128(vec, rot) \
- _mm_or_si128(_mm_slli_epi32(vec, rot), _mm_srli_epi32(vec, 32-rot))
-
-#define rotate_right_m128(vec, rot) \
- _mm_or_si128(_mm_srli_epi32(vec, rot), _mm_slli_epi32(vec, 32-rot))
-
-#define transform(B0, B1, B2, B3) \
- do { \
- B0 = rotate_left_m128(B0, 13); \
- B2 = rotate_left_m128(B2, 3); \
- B1 = _mm_xor_si128(B1, _mm_xor_si128(B0, B2)); \
- B3 = _mm_xor_si128(B3, _mm_xor_si128(B2, _mm_slli_epi32(B0, 3))); \
- B1 = rotate_left_m128(B1, 1); \
- B3 = rotate_left_m128(B3, 7); \
- B0 = _mm_xor_si128(B0, _mm_xor_si128(B1, B3)); \
- B2 = _mm_xor_si128(B2, _mm_xor_si128(B3, _mm_slli_epi32(B1, 7))); \
- B0 = rotate_left_m128(B0, 5); \
- B2 = rotate_left_m128(B2, 22); \
+#define transform(B0, B1, B2, B3) \
+ do { \
+ B0.rotate_left(13); \
+ B2.rotate_left(3); \
+ B1 ^= B0 ^ B2; \
+ B3 ^= B2 ^ (B0 << 3); \
+ B1.rotate_left(1); \
+ B3.rotate_left(7); \
+ B0 ^= B1 ^ B3; \
+ B2 ^= B3 ^ (B1 << 7); \
+ B0.rotate_left(5); \
+ B2.rotate_left(22); \
} while(0);
-#define i_transform(B0, B1, B2, B3) \
- do { \
- B2 = rotate_right_m128(B2, 22); \
- B0 = rotate_right_m128(B0, 5); \
- B2 = _mm_xor_si128(B2, _mm_xor_si128(B3, _mm_slli_epi32(B1, 7))); \
- B0 = _mm_xor_si128(B0, _mm_xor_si128(B1, B3)); \
- B3 = rotate_right_m128(B3, 7); \
- B1 = rotate_right_m128(B1, 1); \
- B3 = _mm_xor_si128(B3, _mm_xor_si128(B2, _mm_slli_epi32(B0, 3))); \
- B1 = _mm_xor_si128(B1, _mm_xor_si128(B0, B2)); \
- B2 = rotate_right_m128(B2, 3); \
- B0 = rotate_right_m128(B0, 13); \
- } while(0);
-
-/*
-* 4x4 SSE2 integer matrix transpose
-*/
-#define transpose(B0, B1, B2, B3) \
- do { \
- __m128i T0 = _mm_unpacklo_epi32(B0, B1); \
- __m128i T1 = _mm_unpacklo_epi32(B2, B3); \
- __m128i T2 = _mm_unpackhi_epi32(B0, B1); \
- __m128i T3 = _mm_unpackhi_epi32(B2, B3); \
- B0 = _mm_unpacklo_epi64(T0, T1); \
- B1 = _mm_unpackhi_epi64(T0, T1); \
- B2 = _mm_unpacklo_epi64(T2, T3); \
- B3 = _mm_unpackhi_epi64(T2, T3); \
+#define i_transform(B0, B1, B2, B3) \
+ do { \
+ B2.rotate_right(22); \
+ B0.rotate_right(5); \
+ B2 ^= B3 ^ (B1 << 7); \
+ B0 ^= B1 ^ B3; \
+ B3.rotate_right(7); \
+ B1.rotate_right(1); \
+ B3 ^= B2 ^ (B0 << 3); \
+ B1 ^= B0 ^ B2; \
+ B2.rotate_right(3); \
+ B0.rotate_right(13); \
} while(0);
/*
@@ -80,20 +58,14 @@ namespace {
*/
void serpent_encrypt_4(const byte in[64],
byte out[64],
- const u32bit keys_32[132])
+ const u32bit keys[132])
{
- const __m128i all_ones = _mm_set1_epi8(0xFF);
+ SIMD_32 B0 = SIMD_32::load_le(in);
+ SIMD_32 B1 = SIMD_32::load_le(in + 16);
+ SIMD_32 B2 = SIMD_32::load_le(in + 32);
+ SIMD_32 B3 = SIMD_32::load_le(in + 48);
- const __m128i* keys = (const __m128i*)(keys_32);
- __m128i* out_mm = (__m128i*)(out);
- __m128i* in_mm = (__m128i*)(in);
-
- __m128i B0 = _mm_loadu_si128(in_mm);
- __m128i B1 = _mm_loadu_si128(in_mm + 1);
- __m128i B2 = _mm_loadu_si128(in_mm + 2);
- __m128i B3 = _mm_loadu_si128(in_mm + 3);
-
- transpose(B0, B1, B2, B3);
+ SIMD_32::transpose(B0, B1, B2, B3);
key_xor( 0,B0,B1,B2,B3); SBoxE1(B0,B1,B2,B3); transform(B0,B1,B2,B3);
key_xor( 1,B0,B1,B2,B3); SBoxE2(B0,B1,B2,B3); transform(B0,B1,B2,B3);
@@ -131,12 +103,12 @@ void serpent_encrypt_4(const byte in[64],
key_xor(30,B0,B1,B2,B3); SBoxE7(B0,B1,B2,B3); transform(B0,B1,B2,B3);
key_xor(31,B0,B1,B2,B3); SBoxE8(B0,B1,B2,B3); key_xor(32,B0,B1,B2,B3);
- transpose(B0, B1, B2, B3);
+ SIMD_32::transpose(B0, B1, B2, B3);
- _mm_storeu_si128(out_mm , B0);
- _mm_storeu_si128(out_mm + 1, B1);
- _mm_storeu_si128(out_mm + 2, B2);
- _mm_storeu_si128(out_mm + 3, B3);
+ B0.store_le(out);
+ B1.store_le(out + 16);
+ B2.store_le(out + 32);
+ B3.store_le(out + 48);
}
/*
@@ -144,20 +116,14 @@ void serpent_encrypt_4(const byte in[64],
*/
void serpent_decrypt_4(const byte in[64],
byte out[64],
- const u32bit keys_32[132])
+ const u32bit keys[132])
{
- const __m128i all_ones = _mm_set1_epi8(0xFF);
-
- const __m128i* keys = (const __m128i*)(keys_32);
- __m128i* out_mm = (__m128i*)(out);
- __m128i* in_mm = (__m128i*)(in);
-
- __m128i B0 = _mm_loadu_si128(in_mm);
- __m128i B1 = _mm_loadu_si128(in_mm + 1);
- __m128i B2 = _mm_loadu_si128(in_mm + 2);
- __m128i B3 = _mm_loadu_si128(in_mm + 3);
+ SIMD_32 B0 = SIMD_32::load_le(in);
+ SIMD_32 B1 = SIMD_32::load_le(in + 16);
+ SIMD_32 B2 = SIMD_32::load_le(in + 32);
+ SIMD_32 B3 = SIMD_32::load_le(in + 48);
- transpose(B0, B1, B2, B3);
+ SIMD_32::transpose(B0, B1, B2, B3);
key_xor(32,B0,B1,B2,B3); SBoxD8(B0,B1,B2,B3); key_xor(31,B0,B1,B2,B3);
i_transform(B0,B1,B2,B3); SBoxD7(B0,B1,B2,B3); key_xor(30,B0,B1,B2,B3);
@@ -195,12 +161,12 @@ void serpent_decrypt_4(const byte in[64],
i_transform(B0,B1,B2,B3); SBoxD2(B0,B1,B2,B3); key_xor( 1,B0,B1,B2,B3);
i_transform(B0,B1,B2,B3); SBoxD1(B0,B1,B2,B3); key_xor( 0,B0,B1,B2,B3);
- transpose(B0, B1, B2, B3);
+ SIMD_32::transpose(B0, B1, B2, B3);
- _mm_storeu_si128(out_mm , B0);
- _mm_storeu_si128(out_mm + 1, B1);
- _mm_storeu_si128(out_mm + 2, B2);
- _mm_storeu_si128(out_mm + 3, B3);
+ B0.store_le(out);
+ B1.store_le(out + 16);
+ B2.store_le(out + 32);
+ B3.store_le(out + 48);
}
}
diff --git a/src/block/serpent_sse2/serp_sse2_sbox.h b/src/block/serpent_sse2/serp_sse2_sbox.h
index 40c552e87..6e3da7359 100644
--- a/src/block/serpent_sse2/serp_sse2_sbox.h
+++ b/src/block/serpent_sse2/serp_sse2_sbox.h
@@ -1,33 +1,33 @@
/*
-* Serpent Sboxes in SSE2 form
+* Serpent Sboxes in SIMD form
* (C) 2009 Jack Lloyd
*
* Distributed under the terms of the Botan license
*/
-#ifndef SERPENT_SSE2_SBOXES_H__
-#define SERPENT_SSE2_SBOXES_H__
+#ifndef SERPENT_SIMD_SBOXES_H__
+#define SERPENT_SIMD_SBOXES_H__
#define SBoxE1(B0, B1, B2, B3) \
do { \
- B3 = _mm_xor_si128(B3, B0); \
- __m128i B4 = B1; \
- B1 = _mm_and_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_or_si128(B0, B3); \
- B0 = _mm_xor_si128(B0, B4); \
- B4 = _mm_xor_si128(B4, B3); \
- B3 = _mm_xor_si128(B3, B2); \
- B2 = _mm_or_si128(B2, B1); \
- B2 = _mm_xor_si128(B2, B4); \
- B4 = _mm_xor_si128(B4, all_ones); \
- B4 = _mm_or_si128(B4, B1); \
- B1 = _mm_xor_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B4); \
- B3 = _mm_or_si128(B3, B0); \
- B1 = _mm_xor_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B3); \
+ B3 ^= B0; \
+ SIMD_32 B4 = B1; \
+ B1 &= B3; \
+ B4 ^= B2; \
+ B1 ^= B0; \
+ B0 |= B3; \
+ B0 ^= B4; \
+ B4 ^= B3; \
+ B3 ^= B2; \
+ B2 |= B1; \
+ B2 ^= B4; \
+ B4 = ~B4; \
+ B4 |= B1; \
+ B1 ^= B3; \
+ B1 ^= B4; \
+ B3 |= B0; \
+ B1 ^= B3; \
+ B4 ^= B3; \
B3 = B0; \
B0 = B1; \
B1 = B4; \
@@ -35,24 +35,24 @@
#define SBoxE2(B0, B1, B2, B3) \
do { \
- B0 = _mm_xor_si128(B0, all_ones); \
- B2 = _mm_xor_si128(B2, all_ones); \
- __m128i B4 = B0; \
- B0 = _mm_and_si128(B0, B1); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_or_si128(B0, B3); \
- B3 = _mm_xor_si128(B3, B2); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_xor_si128(B0, B4); \
- B4 = _mm_or_si128(B4, B1); \
- B1 = _mm_xor_si128(B1, B3); \
- B2 = _mm_or_si128(B2, B0); \
- B2 = _mm_and_si128(B2, B4); \
- B0 = _mm_xor_si128(B0, B1); \
- B1 = _mm_and_si128(B1, B2); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_and_si128(B0, B2); \
- B4 = _mm_xor_si128(B4, B0); \
+ B0 = ~B0; \
+ B2 = ~B2; \
+ SIMD_32 B4 = B0; \
+ B0 &= B1; \
+ B2 ^= B0; \
+ B0 |= B3; \
+ B3 ^= B2; \
+ B1 ^= B0; \
+ B0 ^= B4; \
+ B4 |= B1; \
+ B1 ^= B3; \
+ B2 |= B0; \
+ B2 &= B4; \
+ B0 ^= B1; \
+ B1 &= B2; \
+ B1 ^= B0; \
+ B0 &= B2; \
+ B4 ^= B0; \
B0 = B2; \
B2 = B3; \
B3 = B1; \
@@ -61,22 +61,22 @@
#define SBoxE3(B0, B1, B2, B3) \
do { \
- __m128i B4 = B0; \
- B0 = _mm_and_si128(B0, B2); \
- B0 = _mm_xor_si128(B0, B3); \
- B2 = _mm_xor_si128(B2, B1); \
- B2 = _mm_xor_si128(B2, B0); \
- B3 = _mm_or_si128(B3, B4); \
- B3 = _mm_xor_si128(B3, B1); \
- B4 = _mm_xor_si128(B4, B2); \
+ SIMD_32 B4 = B0; \
+ B0 &= B2; \
+ B0 ^= B3; \
+ B2 ^= B1; \
+ B2 ^= B0; \
+ B3 |= B4; \
+ B3 ^= B1; \
+ B4 ^= B2; \
B1 = B3; \
- B3 = _mm_or_si128(B3, B4); \
- B3 = _mm_xor_si128(B3, B0); \
- B0 = _mm_and_si128(B0, B1); \
- B4 = _mm_xor_si128(B4, B0); \
- B1 = _mm_xor_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B4); \
- B4 = _mm_xor_si128(B4, all_ones); \
+ B3 |= B4; \
+ B3 ^= B0; \
+ B0 &= B1; \
+ B4 ^= B0; \
+ B1 ^= B3; \
+ B1 ^= B4; \
+ B4 = ~B4; \
B0 = B2; \
B2 = B1; \
B1 = B3; \
@@ -85,25 +85,25 @@
#define SBoxE4(B0, B1, B2, B3) \
do { \
- __m128i B4 = B0; \
- B0 = _mm_or_si128(B0, B3); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_and_si128(B1, B4); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = _mm_xor_si128(B2, B3); \
- B3 = _mm_and_si128(B3, B0); \
- B4 = _mm_or_si128(B4, B1); \
- B3 = _mm_xor_si128(B3, B4); \
- B0 = _mm_xor_si128(B0, B1); \
- B4 = _mm_and_si128(B4, B0); \
- B1 = _mm_xor_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B1 = _mm_or_si128(B1, B0); \
- B1 = _mm_xor_si128(B1, B2); \
- B0 = _mm_xor_si128(B0, B3); \
+ SIMD_32 B4 = B0; \
+ B0 |= B3; \
+ B3 ^= B1; \
+ B1 &= B4; \
+ B4 ^= B2; \
+ B2 ^= B3; \
+ B3 &= B0; \
+ B4 |= B1; \
+ B3 ^= B4; \
+ B0 ^= B1; \
+ B4 &= B0; \
+ B1 ^= B3; \
+ B4 ^= B2; \
+ B1 |= B0; \
+ B1 ^= B2; \
+ B0 ^= B3; \
B2 = B1; \
- B1 = _mm_or_si128(B1, B3); \
- B0 = _mm_xor_si128(B0, B1); \
+ B1 |= B3; \
+ B0 ^= B1; \
B1 = B2; \
B2 = B3; \
B3 = B4; \
@@ -111,26 +111,26 @@
#define SBoxE5(B0, B1, B2, B3) \
do { \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_xor_si128(B3, all_ones); \
- B2 = _mm_xor_si128(B2, B3); \
- B3 = _mm_xor_si128(B3, B0); \
- __m128i B4 = B1; \
- B1 = _mm_and_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B2); \
- B4 = _mm_xor_si128(B4, B3); \
- B0 = _mm_xor_si128(B0, B4); \
- B2 = _mm_and_si128(B2, B4); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_and_si128(B0, B1); \
- B3 = _mm_xor_si128(B3, B0); \
- B4 = _mm_or_si128(B4, B1); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = _mm_or_si128(B0, B3); \
- B0 = _mm_xor_si128(B0, B2); \
- B2 = _mm_and_si128(B2, B3); \
- B0 = _mm_xor_si128(B0, all_ones); \
- B4 = _mm_xor_si128(B4, B2); \
+ B1 ^= B3; \
+ B3 = ~B3; \
+ B2 ^= B3; \
+ B3 ^= B0; \
+ SIMD_32 B4 = B1; \
+ B1 &= B3; \
+ B1 ^= B2; \
+ B4 ^= B3; \
+ B0 ^= B4; \
+ B2 &= B4; \
+ B2 ^= B0; \
+ B0 &= B1; \
+ B3 ^= B0; \
+ B4 |= B1; \
+ B4 ^= B0; \
+ B0 |= B3; \
+ B0 ^= B2; \
+ B2 &= B3; \
+ B0 = ~B0; \
+ B4 ^= B2; \
B2 = B0; \
B0 = B1; \
B1 = B4; \
@@ -138,25 +138,25 @@
#define SBoxE6(B0, B1, B2, B3) \
do { \
- B0 = _mm_xor_si128(B0, B1); \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_xor_si128(B3, all_ones); \
- __m128i B4 = B1; \
- B1 = _mm_and_si128(B1, B0); \
- B2 = _mm_xor_si128(B2, B3); \
- B1 = _mm_xor_si128(B1, B2); \
- B2 = _mm_or_si128(B2, B4); \
- B4 = _mm_xor_si128(B4, B3); \
- B3 = _mm_and_si128(B3, B1); \
- B3 = _mm_xor_si128(B3, B0); \
- B4 = _mm_xor_si128(B4, B1); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_and_si128(B0, B3); \
- B2 = _mm_xor_si128(B2, all_ones); \
- B0 = _mm_xor_si128(B0, B4); \
- B4 = _mm_or_si128(B4, B3); \
- B4 = _mm_xor_si128(B4, B2); \
+ B0 ^= B1; \
+ B1 ^= B3; \
+ B3 = ~B3; \
+ SIMD_32 B4 = B1; \
+ B1 &= B0; \
+ B2 ^= B3; \
+ B1 ^= B2; \
+ B2 |= B4; \
+ B4 ^= B3; \
+ B3 &= B1; \
+ B3 ^= B0; \
+ B4 ^= B1; \
+ B4 ^= B2; \
+ B2 ^= B0; \
+ B0 &= B3; \
+ B2 = ~B2; \
+ B0 ^= B4; \
+ B4 |= B3; \
+ B4 ^= B2; \
B2 = B0; \
B0 = B1; \
B1 = B3; \
@@ -165,49 +165,49 @@
#define SBoxE7(B0, B1, B2, B3) \
do { \
- B2 = _mm_xor_si128(B2, all_ones); \
- __m128i B4 = B3; \
- B3 = _mm_and_si128(B3, B0); \
- B0 = _mm_xor_si128(B0, B4); \
- B3 = _mm_xor_si128(B3, B2); \
- B2 = _mm_or_si128(B2, B4); \
- B1 = _mm_xor_si128(B1, B3); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_or_si128(B0, B1); \
- B2 = _mm_xor_si128(B2, B1); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = _mm_or_si128(B0, B3); \
- B0 = _mm_xor_si128(B0, B2); \
- B4 = _mm_xor_si128(B4, B3); \
- B4 = _mm_xor_si128(B4, B0); \
- B3 = _mm_xor_si128(B3, all_ones); \
- B2 = _mm_and_si128(B2, B4); \
- B3 = _mm_xor_si128(B3, B2); \
+ B2 = ~B2; \
+ SIMD_32 B4 = B3; \
+ B3 &= B0; \
+ B0 ^= B4; \
+ B3 ^= B2; \
+ B2 |= B4; \
+ B1 ^= B3; \
+ B2 ^= B0; \
+ B0 |= B1; \
+ B2 ^= B1; \
+ B4 ^= B0; \
+ B0 |= B3; \
+ B0 ^= B2; \
+ B4 ^= B3; \
+ B4 ^= B0; \
+ B3 = ~B3; \
+ B2 &= B4; \
+ B3 ^= B2; \
B2 = B4; \
} while(0);
#define SBoxE8(B0, B1, B2, B3) \
do { \
- __m128i B4 = B1; \
- B1 = _mm_or_si128(B1, B2); \
- B1 = _mm_xor_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = _mm_xor_si128(B2, B1); \
- B3 = _mm_or_si128(B3, B4); \
- B3 = _mm_and_si128(B3, B0); \
- B4 = _mm_xor_si128(B4, B2); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_or_si128(B1, B4); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_or_si128(B0, B4); \
- B0 = _mm_xor_si128(B0, B2); \
- B1 = _mm_xor_si128(B1, B4); \
- B2 = _mm_xor_si128(B2, B1); \
- B1 = _mm_and_si128(B1, B0); \
- B1 = _mm_xor_si128(B1, B4); \
- B2 = _mm_xor_si128(B2, all_ones); \
- B2 = _mm_or_si128(B2, B0); \
- B4 = _mm_xor_si128(B4, B2); \
+ SIMD_32 B4 = B1; \
+ B1 |= B2; \
+ B1 ^= B3; \
+ B4 ^= B2; \
+ B2 ^= B1; \
+ B3 |= B4; \
+ B3 &= B0; \
+ B4 ^= B2; \
+ B3 ^= B1; \
+ B1 |= B4; \
+ B1 ^= B0; \
+ B0 |= B4; \
+ B0 ^= B2; \
+ B1 ^= B4; \
+ B2 ^= B1; \
+ B1 &= B0; \
+ B1 ^= B4; \
+ B2 = ~B2; \
+ B2 |= B0; \
+ B4 ^= B2; \
B2 = B1; \
B1 = B3; \
B3 = B0; \
@@ -215,53 +215,51 @@
} while(0);
#define SBoxD1(B0, B1, B2, B3) \
- do \
- { \
- B2 = _mm_xor_si128(B2, all_ones); \
- __m128i B4 = B1; \
- B1 = _mm_or_si128(B1, B0); \
- B4 = _mm_xor_si128(B4, all_ones); \
- B1 = _mm_xor_si128(B1, B2); \
- B2 = _mm_or_si128(B2, B4); \
- B1 = _mm_xor_si128(B1, B3); \
- B0 = _mm_xor_si128(B0, B4); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_and_si128(B0, B3); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = _mm_or_si128(B0, B1); \
- B0 = _mm_xor_si128(B0, B2); \
- B3 = _mm_xor_si128(B3, B4); \
- B2 = _mm_xor_si128(B2, B1); \
- B3 = _mm_xor_si128(B3, B0); \
- B3 = _mm_xor_si128(B3, B1); \
- B2 = _mm_and_si128(B2, B3); \
- B4 = _mm_xor_si128(B4, B2); \
+ do { \
+ B2 = ~B2; \
+ SIMD_32 B4 = B1; \
+ B1 |= B0; \
+ B4 = ~B4; \
+ B1 ^= B2; \
+ B2 |= B4; \
+ B1 ^= B3; \
+ B0 ^= B4; \
+ B2 ^= B0; \
+ B0 &= B3; \
+ B4 ^= B0; \
+ B0 |= B1; \
+ B0 ^= B2; \
+ B3 ^= B4; \
+ B2 ^= B1; \
+ B3 ^= B0; \
+ B3 ^= B1; \
+ B2 &= B3; \
+ B4 ^= B2; \
B2 = B1; \
B1 = B4; \
} while(0);
#define SBoxD2(B0, B1, B2, B3) \
- do \
- { \
- __m128i B4 = B1; \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_and_si128(B3, B1); \
- B4 = _mm_xor_si128(B4, B2); \
- B3 = _mm_xor_si128(B3, B0); \
- B0 = _mm_or_si128(B0, B1); \
- B2 = _mm_xor_si128(B2, B3); \
- B0 = _mm_xor_si128(B0, B4); \
- B0 = _mm_or_si128(B0, B2); \
- B1 = _mm_xor_si128(B1, B3); \
- B0 = _mm_xor_si128(B0, B1); \
- B1 = _mm_or_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B0); \
- B4 = _mm_xor_si128(B4, all_ones); \
- B4 = _mm_xor_si128(B4, B1); \
- B1 = _mm_or_si128(B1, B0); \
- B1 = _mm_xor_si128(B1, B0); \
- B1 = _mm_or_si128(B1, B4); \
- B3 = _mm_xor_si128(B3, B1); \
+ do { \
+ SIMD_32 B4 = B1; \
+ B1 ^= B3; \
+ B3 &= B1; \
+ B4 ^= B2; \
+ B3 ^= B0; \
+ B0 |= B1; \
+ B2 ^= B3; \
+ B0 ^= B4; \
+ B0 |= B2; \
+ B1 ^= B3; \
+ B0 ^= B1; \
+ B1 |= B3; \
+ B1 ^= B0; \
+ B4 = ~B4; \
+ B4 ^= B1; \
+ B1 |= B0; \
+ B1 ^= B0; \
+ B1 |= B4; \
+ B3 ^= B1; \
B1 = B0; \
B0 = B4; \
B4 = B2; \
@@ -270,52 +268,50 @@
} while(0);
#define SBoxD3(B0, B1, B2, B3) \
- do \
- { \
- B2 = _mm_xor_si128(B2, B3); \
- B3 = _mm_xor_si128(B3, B0); \
- __m128i B4 = B3; \
- B3 = _mm_and_si128(B3, B2); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_or_si128(B1, B2); \
- B1 = _mm_xor_si128(B1, B4); \
- B4 = _mm_and_si128(B4, B3); \
- B2 = _mm_xor_si128(B2, B3); \
- B4 = _mm_and_si128(B4, B0); \
- B4 = _mm_xor_si128(B4, B2); \
- B2 = _mm_and_si128(B2, B1); \
- B2 = _mm_or_si128(B2, B0); \
- B3 = _mm_xor_si128(B3, all_ones); \
- B2 = _mm_xor_si128(B2, B3); \
- B0 = _mm_xor_si128(B0, B3); \
- B0 = _mm_and_si128(B0, B1); \
- B3 = _mm_xor_si128(B3, B4); \
- B3 = _mm_xor_si128(B3, B0); \
+ do { \
+ B2 ^= B3; \
+ B3 ^= B0; \
+ SIMD_32 B4 = B3; \
+ B3 &= B2; \
+ B3 ^= B1; \
+ B1 |= B2; \
+ B1 ^= B4; \
+ B4 &= B3; \
+ B2 ^= B3; \
+ B4 &= B0; \
+ B4 ^= B2; \
+ B2 &= B1; \
+ B2 |= B0; \
+ B3 = ~B3; \
+ B2 ^= B3; \
+ B0 ^= B3; \
+ B0 &= B1; \
+ B3 ^= B4; \
+ B3 ^= B0; \
B0 = B1; \
B1 = B4; \
} while(0);
#define SBoxD4(B0, B1, B2, B3) \
- do \
- { \
- __m128i B4 = B2; \
- B2 = _mm_xor_si128(B2, B1); \
- B0 = _mm_xor_si128(B0, B2); \
- B4 = _mm_and_si128(B4, B2); \
- B4 = _mm_xor_si128(B4, B0); \
- B0 = _mm_and_si128(B0, B1); \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_or_si128(B3, B4); \
- B2 = _mm_xor_si128(B2, B3); \
- B0 = _mm_xor_si128(B0, B3); \
- B1 = _mm_xor_si128(B1, B4); \
- B3 = _mm_and_si128(B3, B2); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_xor_si128(B1, B0); \
- B1 = _mm_or_si128(B1, B2); \
- B0 = _mm_xor_si128(B0, B3); \
- B1 = _mm_xor_si128(B1, B4); \
- B0 = _mm_xor_si128(B0, B1); \
+ do { \
+ SIMD_32 B4 = B2; \
+ B2 ^= B1; \
+ B0 ^= B2; \
+ B4 &= B2; \
+ B4 ^= B0; \
+ B0 &= B1; \
+ B1 ^= B3; \
+ B3 |= B4; \
+ B2 ^= B3; \
+ B0 ^= B3; \
+ B1 ^= B4; \
+ B3 &= B2; \
+ B3 ^= B1; \
+ B1 ^= B0; \
+ B1 |= B2; \
+ B0 ^= B3; \
+ B1 ^= B4; \
+ B0 ^= B1; \
B4 = B0; \
B0 = B2; \
B2 = B3; \
@@ -323,54 +319,52 @@
} while(0);
#define SBoxD5(B0, B1, B2, B3) \
- do \
- { \
- __m128i B4 = B2; \
- B2 = _mm_and_si128(B2, B3); \
- B2 = _mm_xor_si128(B2, B1); \
- B1 = _mm_or_si128(B1, B3); \
- B1 = _mm_and_si128(B1, B0); \
- B4 = _mm_xor_si128(B4, B2); \
- B4 = _mm_xor_si128(B4, B1); \
- B1 = _mm_and_si128(B1, B2); \
- B0 = _mm_xor_si128(B0, all_ones); \
- B3 = _mm_xor_si128(B3, B4); \
- B1 = _mm_xor_si128(B1, B3); \
- B3 = _mm_and_si128(B3, B0); \
- B3 = _mm_xor_si128(B3, B2); \
- B0 = _mm_xor_si128(B0, B1); \
- B2 = _mm_and_si128(B2, B0); \
- B3 = _mm_xor_si128(B3, B0); \
- B2 = _mm_xor_si128(B2, B4); \
- B2 = _mm_or_si128(B2, B3); \
- B3 = _mm_xor_si128(B3, B0); \
- B2 = _mm_xor_si128(B2, B1); \
+ do { \
+ SIMD_32 B4 = B2; \
+ B2 &= B3; \
+ B2 ^= B1; \
+ B1 |= B3; \
+ B1 &= B0; \
+ B4 ^= B2; \
+ B4 ^= B1; \
+ B1 &= B2; \
+ B0 = ~B0; \
+ B3 ^= B4; \
+ B1 ^= B3; \
+ B3 &= B0; \
+ B3 ^= B2; \
+ B0 ^= B1; \
+ B2 &= B0; \
+ B3 ^= B0; \
+ B2 ^= B4; \
+ B2 |= B3; \
+ B3 ^= B0; \
+ B2 ^= B1; \
B1 = B3; \
B3 = B4; \
} while(0);
#define SBoxD6(B0, B1, B2, B3) \
- do \
- { \
- B1 = _mm_xor_si128(B1, all_ones); \
- __m128i B4 = B3; \
- B2 = _mm_xor_si128(B2, B1); \
- B3 = _mm_or_si128(B3, B0); \
- B3 = _mm_xor_si128(B3, B2); \
- B2 = _mm_or_si128(B2, B1); \
- B2 = _mm_and_si128(B2, B0); \
- B4 = _mm_xor_si128(B4, B3); \
- B2 = _mm_xor_si128(B2, B4); \
- B4 = _mm_or_si128(B4, B0); \
- B4 = _mm_xor_si128(B4, B1); \
- B1 = _mm_and_si128(B1, B2); \
- B1 = _mm_xor_si128(B1, B3); \
- B4 = _mm_xor_si128(B4, B2); \
- B3 = _mm_and_si128(B3, B4); \
- B4 = _mm_xor_si128(B4, B1); \
- B3 = _mm_xor_si128(B3, B4); \
- B4 = _mm_xor_si128(B4, all_ones); \
- B3 = _mm_xor_si128(B3, B0); \
+ do { \
+ B1 = ~B1; \
+ SIMD_32 B4 = B3; \
+ B2 ^= B1; \
+ B3 |= B0; \
+ B3 ^= B2; \
+ B2 |= B1; \
+ B2 &= B0; \
+ B4 ^= B3; \
+ B2 ^= B4; \
+ B4 |= B0; \
+ B4 ^= B1; \
+ B1 &= B2; \
+ B1 ^= B3; \
+ B4 ^= B2; \
+ B3 &= B4; \
+ B4 ^= B1; \
+ B3 ^= B4; \
+ B4 = ~B4; \
+ B3 ^= B0; \
B0 = B1; \
B1 = B4; \
B4 = B3; \
@@ -379,52 +373,50 @@
} while(0);
#define SBoxD7(B0, B1, B2, B3) \
- do \
- { \
- B0 = _mm_xor_si128(B0, B2); \
- __m128i B4 = B2; \
- B2 = _mm_and_si128(B2, B0); \
- B4 = _mm_xor_si128(B4, B3); \
- B2 = _mm_xor_si128(B2, all_ones); \
- B3 = _mm_xor_si128(B3, B1); \
- B2 = _mm_xor_si128(B2, B3); \
- B4 = _mm_or_si128(B4, B0); \
- B0 = _mm_xor_si128(B0, B2); \
- B3 = _mm_xor_si128(B3, B4); \
- B4 = _mm_xor_si128(B4, B1); \
- B1 = _mm_and_si128(B1, B3); \
- B1 = _mm_xor_si128(B1, B0); \
- B0 = _mm_xor_si128(B0, B3); \
- B0 = _mm_or_si128(B0, B2); \
- B3 = _mm_xor_si128(B3, B1); \
- B4 = _mm_xor_si128(B4, B0); \
+ do { \
+ B0 ^= B2; \
+ SIMD_32 B4 = B2; \
+ B2 &= B0; \
+ B4 ^= B3; \
+ B2 = ~B2; \
+ B3 ^= B1; \
+ B2 ^= B3; \
+ B4 |= B0; \
+ B0 ^= B2; \
+ B3 ^= B4; \
+ B4 ^= B1; \
+ B1 &= B3; \
+ B1 ^= B0; \
+ B0 ^= B3; \
+ B0 |= B2; \
+ B3 ^= B1; \
+ B4 ^= B0; \
B0 = B1; \
B1 = B2; \
B2 = B4; \
} while(0);
#define SBoxD8(B0, B1, B2, B3) \
- do \
- { \
- __m128i B4 = B2; \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_and_si128(B0, B3); \
- B4 = _mm_or_si128(B4, B3); \
- B2 = _mm_xor_si128(B2, all_ones); \
- B3 = _mm_xor_si128(B3, B1); \
- B1 = _mm_or_si128(B1, B0); \
- B0 = _mm_xor_si128(B0, B2); \
- B2 = _mm_and_si128(B2, B4); \
- B3 = _mm_and_si128(B3, B4); \
- B1 = _mm_xor_si128(B1, B2); \
- B2 = _mm_xor_si128(B2, B0); \
- B0 = _mm_or_si128(B0, B2); \
- B4 = _mm_xor_si128(B4, B1); \
- B0 = _mm_xor_si128(B0, B3); \
- B3 = _mm_xor_si128(B3, B4); \
- B4 = _mm_or_si128(B4, B0); \
- B3 = _mm_xor_si128(B3, B2); \
- B4 = _mm_xor_si128(B4, B2); \
+ do { \
+ SIMD_32 B4 = B2; \
+ B2 ^= B0; \
+ B0 &= B3; \
+ B4 |= B3; \
+ B2 = ~B2; \
+ B3 ^= B1; \
+ B1 |= B0; \
+ B0 ^= B2; \
+ B2 &= B4; \
+ B3 &= B4; \
+ B1 ^= B2; \
+ B2 ^= B0; \
+ B0 |= B2; \
+ B4 ^= B1; \
+ B0 ^= B3; \
+ B3 ^= B4; \
+ B4 |= B0; \
+ B3 ^= B2; \
+ B4 ^= B2; \
B2 = B1; \
B1 = B0; \
B0 = B3; \
diff --git a/src/block/xtea/xtea.h b/src/block/xtea/xtea.h
index f3b554edb..9982d0712 100644
--- a/src/block/xtea/xtea.h
+++ b/src/block/xtea/xtea.h
@@ -26,7 +26,7 @@ class BOTAN_DLL XTEA : public BlockCipher
BlockCipher* clone() const { return new XTEA; }
XTEA() : BlockCipher(8, 16) {}
- private:
+ protected:
void key_schedule(const byte[], u32bit);
SecureBuffer<u32bit, 64> EK;
};
diff --git a/src/block/xtea_sse2/info.txt b/src/block/xtea_sse2/info.txt
new file mode 100644
index 000000000..edcdd114d
--- /dev/null
+++ b/src/block/xtea_sse2/info.txt
@@ -0,0 +1,15 @@
+realname "XTEA (SSE2)"
+
+define XTEA_SSE2
+
+load_on auto
+
+<add>
+xtea_sse2.cpp
+xtea_sse2.h
+</add>
+
+<requires>
+xtea
+sse2_eng
+</requires>
diff --git a/src/block/xtea_sse2/xtea_sse2.cpp b/src/block/xtea_sse2/xtea_sse2.cpp
new file mode 100644
index 000000000..5d9c7dd05
--- /dev/null
+++ b/src/block/xtea_sse2/xtea_sse2.cpp
@@ -0,0 +1,124 @@
+/*
+* XTEA in SIMD
+* (C) 2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#include <botan/xtea_sse2.h>
+#include <botan/loadstor.h>
+#include <botan/simd_32.h>
+
+namespace Botan {
+
+namespace {
+
+void xtea_encrypt_8(const byte in[64], byte out[64], const u32bit EK[64])
+ {
+ SIMD_32 L0 = SIMD_32::load_be(in );
+ SIMD_32 R0 = SIMD_32::load_be(in + 16);
+ SIMD_32 L1 = SIMD_32::load_be(in + 32);
+ SIMD_32 R1 = SIMD_32::load_be(in + 48);
+
+ SIMD_32::transpose(L0, R0, L1, R1);
+
+ for(u32bit i = 0; i != 32; i += 2)
+ {
+ SIMD_32 K0(EK[2*i ]);
+ SIMD_32 K1(EK[2*i+1]);
+ SIMD_32 K2(EK[2*i+2]);
+ SIMD_32 K3(EK[2*i+3]);
+
+ L0 += (((R0 << 4) ^ (R0 >> 5)) + R0) ^ K0;
+ L1 += (((R1 << 4) ^ (R1 >> 5)) + R1) ^ K0;
+
+ R0 += (((L0 << 4) ^ (L0 >> 5)) + L0) ^ K1;
+ R1 += (((L1 << 4) ^ (L1 >> 5)) + L1) ^ K1;
+
+ L0 += (((R0 << 4) ^ (R0 >> 5)) + R0) ^ K2;
+ L1 += (((R1 << 4) ^ (R1 >> 5)) + R1) ^ K2;
+
+ R0 += (((L0 << 4) ^ (L0 >> 5)) + L0) ^ K3;
+ R1 += (((L1 << 4) ^ (L1 >> 5)) + L1) ^ K3;
+ }
+
+ SIMD_32::transpose(L0, R0, L1, R1);
+
+ L0.store_be(out);
+ R0.store_be(out + 16);
+ L1.store_be(out + 32);
+ R1.store_be(out + 48);
+ }
+
+void xtea_decrypt_8(const byte in[64], byte out[64], const u32bit EK[64])
+ {
+ SIMD_32 L0 = SIMD_32::load_be(in );
+ SIMD_32 R0 = SIMD_32::load_be(in + 16);
+ SIMD_32 L1 = SIMD_32::load_be(in + 32);
+ SIMD_32 R1 = SIMD_32::load_be(in + 48);
+
+ SIMD_32::transpose(L0, R0, L1, R1);
+
+ for(u32bit i = 0; i != 32; i += 2)
+ {
+ SIMD_32 K0(EK[63 - 2*i]);
+ SIMD_32 K1(EK[62 - 2*i]);
+ SIMD_32 K2(EK[61 - 2*i]);
+ SIMD_32 K3(EK[60 - 2*i]);
+
+ R0 -= (((L0 << 4) ^ (L0 >> 5)) + L0) ^ K0;
+ R1 -= (((L1 << 4) ^ (L1 >> 5)) + L1) ^ K0;
+
+ L0 -= (((R0 << 4) ^ (R0 >> 5)) + R0) ^ K1;
+ L1 -= (((R1 << 4) ^ (R1 >> 5)) + R1) ^ K1;
+
+ R0 -= (((L0 << 4) ^ (L0 >> 5)) + L0) ^ K2;
+ R1 -= (((L1 << 4) ^ (L1 >> 5)) + L1) ^ K2;
+
+ L0 -= (((R0 << 4) ^ (R0 >> 5)) + R0) ^ K3;
+ L1 -= (((R1 << 4) ^ (R1 >> 5)) + R1) ^ K3;
+ }
+
+ SIMD_32::transpose(L0, R0, L1, R1);
+
+ L0.store_be(out);
+ R0.store_be(out + 16);
+ L1.store_be(out + 32);
+ R1.store_be(out + 48);
+ }
+
+}
+
+/*
+* XTEA Encryption
+*/
+void XTEA_SSE2::encrypt_n(const byte in[], byte out[], u32bit blocks) const
+ {
+ while(blocks >= 8)
+ {
+ xtea_encrypt_8(in, out, this->EK);
+ in += 8 * BLOCK_SIZE;
+ out += 8 * BLOCK_SIZE;
+ blocks -= 8;
+ }
+
+ XTEA::encrypt_n(in, out, blocks);
+ }
+
+/*
+* XTEA Decryption
+*/
+void XTEA_SSE2::decrypt_n(const byte in[], byte out[], u32bit blocks) const
+ {
+ while(blocks >= 8)
+ {
+ xtea_decrypt_8(in, out, this->EK);
+ in += 8 * BLOCK_SIZE;
+ out += 8 * BLOCK_SIZE;
+ blocks -= 8;
+ }
+
+ XTEA::decrypt_n(in, out, blocks);
+ }
+
+}
diff --git a/src/block/xtea_sse2/xtea_sse2.h b/src/block/xtea_sse2/xtea_sse2.h
new file mode 100644
index 000000000..e691e5c40
--- /dev/null
+++ b/src/block/xtea_sse2/xtea_sse2.h
@@ -0,0 +1,28 @@
+/*
+* XTEA in SSE2
+* (C) 2009 Jack Lloyd
+*
+* Distributed under the terms of the Botan license
+*/
+
+#ifndef BOTAN_XTEA_SSE2_H__
+#define BOTAN_XTEA_SSE2_H__
+
+#include <botan/xtea.h>
+
+namespace Botan {
+
+/*
+* XTEA (SSE2 variant)
+*/
+class BOTAN_DLL XTEA_SSE2 : public XTEA
+ {
+ public:
+ void encrypt_n(const byte in[], byte out[], u32bit blocks) const;
+ void decrypt_n(const byte in[], byte out[], u32bit blocks) const;
+ BlockCipher* clone() const { return new XTEA_SSE2; }
+ };
+
+}
+
+#endif
diff --git a/src/engine/sse2_eng/eng_sse2.cpp b/src/engine/sse2_eng/eng_sse2.cpp
index 07c625c7c..51b7d04e3 100644
--- a/src/engine/sse2_eng/eng_sse2.cpp
+++ b/src/engine/sse2_eng/eng_sse2.cpp
@@ -16,6 +16,10 @@
#include <botan/serp_sse2.h>
#endif
+#if defined(BOTAN_HAS_XTEA_SSE2)
+ #include <botan/xtea_sse2.h>
+#endif
+
namespace Botan {
BlockCipher*
@@ -30,6 +34,11 @@ SSE2_Assembler_Engine::find_block_cipher(const SCAN_Name& request,
return new Serpent_SSE2;
#endif
+#if defined(BOTAN_HAS_XTEA_SSE2)
+ if(request.algo_name() == "XTEA")
+ return new XTEA_SSE2;
+#endif
+
return 0;
}
diff --git a/src/utils/simd_32/info.txt b/src/utils/simd_32/info.txt
new file mode 100644
index 000000000..c72f2a6ed
--- /dev/null
+++ b/src/utils/simd_32/info.txt
@@ -0,0 +1,18 @@
+realname "SIMD"
+
+define SIMD_32
+
+load_on always
+
+<arch>
+pentium-m
+pentium4
+prescott
+amd64
+</arch>
+
+<cc>
+gcc
+icc
+msvc
+</cc>
diff --git a/src/utils/simd_32/simd_32.h b/src/utils/simd_32/simd_32.h
new file mode 100644
index 000000000..d9fac0d3d
--- /dev/null
+++ b/src/utils/simd_32/simd_32.h
@@ -0,0 +1,29 @@
+/**
+* Lightweight wrappers for SIMD operations
+*/
+
+#ifndef BOTAN_SIMD_32_H__
+#define BOTAN_SIMD_32_H__
+
+#include <botan/types.h>
+
+//#define BOTAN_TARGET_CPU_HAS_SSE2
+
+#if defined(BOTAN_TARGET_CPU_HAS_SSE2)
+
+ #include <botan/simd_sse.h>
+ namespace Botan { typedef SIMD_SSE2 SIMD_32; }
+
+#elif defined(BOTAN_TARGET_CPU_HAS_ALTIVEC)
+
+ #include <botan/simd_altivec.h>
+ namespace Botan { typedef SIMD_Altivec SIMD_32; }
+
+#else
+
+ #include <botan/simd_scalar.h>
+ namespace Botan { typedef SIMD_Scalar SIMD_32; }
+
+#endif
+
+#endif
diff --git a/src/utils/simd_32/simd_altivec.h b/src/utils/simd_32/simd_altivec.h
new file mode 100644
index 000000000..d6aaa699d
--- /dev/null
+++ b/src/utils/simd_32/simd_altivec.h
@@ -0,0 +1,178 @@
+/**
+* Altivec SIMD
+*/
+
+#ifndef BOTAN_SIMD_ALTIVEC_H__
+#define BOTAN_SIMD_ALTIVEC_H__
+
+#include <botan/loadstor.h>
+#include <altivec.h>
+#undef vector
+
+namespace Botan {
+
+class SIMD_Altivec
+ {
+ public:
+
+ SIMD_Altivec(const u32bit B[4])
+ {
+ reg = (__vector unsigned int){B[0], B[1], B[2], B[3]};
+ }
+
+ SIMD_Altivec(u32bit B0, u32bit B1, u32bit B2, u32bit B3)
+ {
+ reg = (__vector unsigned int){B0, B1, B2, B3};
+ }
+
+ SIMD_Altivec(u32bit B)
+ {
+ reg = (__vector unsigned int){B, B, B, B};
+ }
+
+ static SIMD_Altivec load_le(const void* in)
+ {
+ const u32bit* in_32 = static_cast<const u32bit*>(in);
+
+ __vector unsigned int R0 = vec_ld(0, in_32);
+ __vector unsigned int R1 = vec_ld(12, in_32);
+
+ __vector unsigned char perm = vec_lvsl(0, in_32);
+
+ perm = vec_xor(perm, vec_splat_u8(3));
+
+ R0 = vec_perm(R0, R1, perm);
+
+ return SIMD_Altivec(R0);
+ }
+
+ static SIMD_Altivec load_be(const void* in)
+ {
+ const u32bit* in_32 = static_cast<const u32bit*>(in);
+
+ __vector unsigned int R0 = vec_ld(0, in_32);
+ __vector unsigned int R1 = vec_ld(12, in_32);
+
+ __vector unsigned char perm = vec_lvsl(0, in_32);
+
+ R0 = vec_perm(R0, R1, perm);
+
+ return SIMD_Altivec(R0);
+ }
+
+ void store_le(byte out[]) const
+ {
+ u32bit* out_32 = reinterpret_cast<u32bit*>(out);
+
+ __vector unsigned char perm = vec_lvsl(0, (int*)0);
+
+ perm = vec_xor(perm, vec_splat_u8(3));
+
+ __vector unsigned int swapped = vec_perm(reg, reg, perm);
+
+ vec_st(swapped, 0, out_32);
+ }
+
+ void store_be(byte out[]) const
+ {
+ u32bit* out_32 = reinterpret_cast<u32bit*>(out);
+ vec_st(reg, 0, out_32);
+ }
+
+ void rotate_left(u32bit rot)
+ {
+ __vector unsigned int rot_vec =
+ (__vector unsigned int){rot, rot, rot, rot};
+
+ reg = vec_rl(reg, rot_vec);
+ }
+
+ void rotate_right(u32bit rot)
+ {
+ rotate_left(32 - rot);
+ }
+
+ void operator+=(const SIMD_Altivec& other)
+ {
+ reg = vec_add(reg, other.reg);
+ }
+
+ SIMD_Altivec operator+(const SIMD_Altivec& other) const
+ {
+ return vec_add(reg, other.reg);
+ }
+
+ void operator-=(const SIMD_Altivec& other)
+ {
+ reg = vec_sub(reg, other.reg);
+ }
+
+ SIMD_Altivec operator-(const SIMD_Altivec& other) const
+ {
+ return vec_sub(reg, other.reg);
+ }
+
+ void operator^=(const SIMD_Altivec& other)
+ {
+ reg = vec_xor(reg, other.reg);
+ }
+
+ SIMD_Altivec operator^(const SIMD_Altivec& other) const
+ {
+ return vec_xor(reg, other.reg);
+ }
+
+ void operator|=(const SIMD_Altivec& other)
+ {
+ reg = vec_or(reg, other.reg);
+ }
+
+ void operator&=(const SIMD_Altivec& other)
+ {
+ reg = vec_and(reg, other.reg);
+ }
+
+ SIMD_Altivec operator<<(u32bit shift) const
+ {
+ __vector unsigned int shift_vec =
+ (__vector unsigned int){shift, shift, shift, shift};
+
+ return vec_sl(reg, shift_vec);
+ }
+
+ SIMD_Altivec operator>>(u32bit shift) const
+ {
+ __vector unsigned int shift_vec =
+ (__vector unsigned int){shift, shift, shift, shift};
+
+ return vec_sr(reg, shift_vec);
+ }
+
+ SIMD_Altivec operator~() const
+ {
+ return vec_nor(reg, reg);
+ }
+
+ static void transpose(SIMD_Altivec& B0, SIMD_Altivec& B1,
+ SIMD_Altivec& B2, SIMD_Altivec& B3)
+ {
+ __vector unsigned int T0 = vec_mergeh(B0.reg, B2.reg);
+ __vector unsigned int T1 = vec_mergel(B0.reg, B2.reg);
+ __vector unsigned int T2 = vec_mergeh(B1.reg, B3.reg);
+ __vector unsigned int T3 = vec_mergel(B1.reg, B3.reg);
+
+ B0.reg = vec_mergeh(T0, T2);
+ B1.reg = vec_mergel(T0, T2);
+ B2.reg = vec_mergeh(T1, T3);
+ B3.reg = vec_mergel(T1, T3);
+ }
+
+ private:
+ SIMD_Altivec(__vector unsigned int input) { reg = input; }
+
+ __vector unsigned int reg;
+ };
+
+}
+
+#endif
diff --git a/src/utils/simd_32/simd_scalar.h b/src/utils/simd_32/simd_scalar.h
new file mode 100644
index 000000000..4b81c183b
--- /dev/null
+++ b/src/utils/simd_32/simd_scalar.h
@@ -0,0 +1,188 @@
+/**
+* Scalar emulation of SIMD operations
+*/
+
+#ifndef BOTAN_SIMD_SCALAR_H__
+#define BOTAN_SIMD_SCALAR_H__
+
+#include <botan/loadstor.h>
+
+namespace Botan {
+
+class SIMD_Scalar
+ {
+ public:
+ SIMD_Scalar(const u32bit B[4])
+ {
+ R0 = B[0];
+ R1 = B[1];
+ R2 = B[2];
+ R3 = B[3];
+ }
+
+ SIMD_Scalar(u32bit B0, u32bit B1, u32bit B2, u32bit B3)
+ {
+ R0 = B0;
+ R1 = B1;
+ R2 = B2;
+ R3 = B3;
+ }
+
+ SIMD_Scalar(u32bit B)
+ {
+ R0 = B;
+ R1 = B;
+ R2 = B;
+ R3 = B;
+ }
+
+ static SIMD_Scalar load_le(const void* in)
+ {
+ const byte* in_b = static_cast<const byte*>(in);
+ return SIMD_Scalar(Botan::load_le<u32bit>(in_b, 0),
+ Botan::load_le<u32bit>(in_b, 1),
+ Botan::load_le<u32bit>(in_b, 2),
+ Botan::load_le<u32bit>(in_b, 3));
+ }
+
+ static SIMD_Scalar load_be(const void* in)
+ {
+ const byte* in_b = static_cast<const byte*>(in);
+ return SIMD_Scalar(Botan::load_be<u32bit>(in_b, 0),
+ Botan::load_be<u32bit>(in_b, 1),
+ Botan::load_be<u32bit>(in_b, 2),
+ Botan::load_be<u32bit>(in_b, 3));
+ }
+
+ void store_le(byte out[]) const
+ {
+ Botan::store_le(out, R0, R1, R2, R3);
+ }
+
+ void store_be(byte out[]) const
+ {
+ Botan::store_be(out, R0, R1, R2, R3);
+ }
+
+ void rotate_left(u32bit rot)
+ {
+ R0 = Botan::rotate_left(R0, rot);
+ R1 = Botan::rotate_left(R1, rot);
+ R2 = Botan::rotate_left(R2, rot);
+ R3 = Botan::rotate_left(R3, rot);
+ }
+
+ void rotate_right(u32bit rot)
+ {
+ R0 = Botan::rotate_right(R0, rot);
+ R1 = Botan::rotate_right(R1, rot);
+ R2 = Botan::rotate_right(R2, rot);
+ R3 = Botan::rotate_right(R3, rot);
+ }
+
+ void operator+=(const SIMD_Scalar& other)
+ {
+ R0 += other.R0;
+ R1 += other.R1;
+ R2 += other.R2;
+ R3 += other.R3;
+ }
+
+ SIMD_Scalar operator+(const SIMD_Scalar& other) const
+ {
+ return SIMD_Scalar(R0 + other.R0,
+ R1 + other.R1,
+ R2 + other.R2,
+ R3 + other.R3);
+ }
+
+ void operator-=(const SIMD_Scalar& other)
+ {
+ R0 -= other.R0;
+ R1 -= other.R1;
+ R2 -= other.R2;
+ R3 -= other.R3;
+ }
+
+ SIMD_Scalar operator-(const SIMD_Scalar& other) const
+ {
+ return SIMD_Scalar(R0 - other.R0,
+ R1 - other.R1,
+ R2 - other.R2,
+ R3 - other.R3);
+ }
+
+ void operator^=(const SIMD_Scalar& other)
+ {
+ R0 ^= other.R0;
+ R1 ^= other.R1;
+ R2 ^= other.R2;
+ R3 ^= other.R3;
+ }
+
+ SIMD_Scalar operator^(const SIMD_Scalar& other) const
+ {
+ return SIMD_Scalar(R0 ^ other.R0,
+ R1 ^ other.R1,
+ R2 ^ other.R2,
+ R3 ^ other.R3);
+ }
+
+ void operator|=(const SIMD_Scalar& other)
+ {
+ R0 |= other.R0;
+ R1 |= other.R1;
+ R2 |= other.R2;
+ R3 |= other.R3;
+ }
+
+ void operator&=(const SIMD_Scalar& other)
+ {
+ R0 &= other.R0;
+ R1 &= other.R1;
+ R2 &= other.R2;
+ R3 &= other.R3;
+ }
+
+ SIMD_Scalar operator<<(u32bit shift) const
+ {
+ return SIMD_Scalar(R0 << shift,
+ R1 << shift,
+ R2 << shift,
+ R3 << shift);
+ }
+
+ SIMD_Scalar operator>>(u32bit shift) const
+ {
+ return SIMD_Scalar(R0 >> shift,
+ R1 >> shift,
+ R2 >> shift,
+ R3 >> shift);
+ }
+
+ SIMD_Scalar operator~() const
+ {
+ return SIMD_Scalar(~R0, ~R1, ~R2, ~R3);
+ }
+
+ static void transpose(SIMD_Scalar& B0, SIMD_Scalar& B1,
+ SIMD_Scalar& B2, SIMD_Scalar& B3)
+ {
+ SIMD_Scalar T0(B0.R0, B1.R0, B2.R0, B3.R0);
+ SIMD_Scalar T1(B0.R1, B1.R1, B2.R1, B3.R1);
+ SIMD_Scalar T2(B0.R2, B1.R2, B2.R2, B3.R2);
+ SIMD_Scalar T3(B0.R3, B1.R3, B2.R3, B3.R3);
+
+ B0 = T0;
+ B1 = T1;
+ B2 = T2;
+ B3 = T3;
+ }
+
+ private:
+ u32bit R0, R1, R2, R3;
+ };
+
+}
+
+#endif
diff --git a/src/utils/simd_32/simd_sse.h b/src/utils/simd_32/simd_sse.h
new file mode 100644
index 000000000..d32ffdc2e
--- /dev/null
+++ b/src/utils/simd_32/simd_sse.h
@@ -0,0 +1,150 @@
+/**
+* Lightweight wrappers for SSE2 intrinsics for 32-bit operations
+*/
+
+#ifndef BOTAN_SIMD_SSE_H__
+#define BOTAN_SIMD_SSE_H__
+
+#include <botan/types.h>
+#include <emmintrin.h>
+
+namespace Botan {
+
+class SIMD_SSE2
+ {
+ public:
+ SIMD_SSE2(const u32bit B[4])
+ {
+ reg = _mm_loadu_si128((const __m128i*)B);
+ }
+
+ SIMD_SSE2(u32bit B0, u32bit B1, u32bit B2, u32bit B3)
+ {
+ reg = _mm_set_epi32(B0, B1, B2, B3);
+ }
+
+ SIMD_SSE2(u32bit B)
+ {
+ reg = _mm_set1_epi32(B);
+ }
+
+ static SIMD_SSE2 load_le(const void* in)
+ {
+ return _mm_loadu_si128((const __m128i*)in);
+ }
+
+ static SIMD_SSE2 load_be(const void* in)
+ {
+ return load_le(in).bswap();
+ }
+
+ void store_le(byte out[]) const
+ {
+ _mm_storeu_si128((__m128i*)out, reg);
+ }
+
+ void store_be(byte out[]) const
+ {
+ bswap().store_le(out);
+ }
+
+ void rotate_left(u32bit rot)
+ {
+ reg = _mm_or_si128(_mm_slli_epi32(reg, rot),
+ _mm_srli_epi32(reg, 32-rot));
+ }
+
+ void rotate_right(u32bit rot)
+ {
+ rotate_left(32 - rot);
+ }
+
+ void operator+=(const SIMD_SSE2& other)
+ {
+ reg = _mm_add_epi32(reg, other.reg);
+ }
+
+ SIMD_SSE2 operator+(const SIMD_SSE2& other) const
+ {
+ return _mm_add_epi32(reg, other.reg);
+ }
+
+ void operator-=(const SIMD_SSE2& other)
+ {
+ reg = _mm_sub_epi32(reg, other.reg);
+ }
+
+ SIMD_SSE2 operator-(const SIMD_SSE2& other) const
+ {
+ return _mm_sub_epi32(reg, other.reg);
+ }
+
+ void operator^=(const SIMD_SSE2& other)
+ {
+ reg = _mm_xor_si128(reg, other.reg);
+ }
+
+ SIMD_SSE2 operator^(const SIMD_SSE2& other) const
+ {
+ return _mm_xor_si128(reg, other.reg);
+ }
+
+ void operator|=(const SIMD_SSE2& other)
+ {
+ reg = _mm_or_si128(reg, other.reg);
+ }
+
+ void operator&=(const SIMD_SSE2& other)
+ {
+ reg = _mm_and_si128(reg, other.reg);
+ }
+
+ SIMD_SSE2 operator<<(u32bit shift) const
+ {
+ return _mm_slli_epi32(reg, shift);
+ }
+
+ SIMD_SSE2 operator>>(u32bit shift) const
+ {
+ return _mm_srli_epi32(reg, shift);
+ }
+
+ SIMD_SSE2 operator~() const
+ {
+ static const __m128i all_ones = _mm_set1_epi32(0xFFFFFFFF);
+ return _mm_xor_si128(reg, all_ones);
+ }
+
+ static void transpose(SIMD_SSE2& B0, SIMD_SSE2& B1,
+ SIMD_SSE2& B2, SIMD_SSE2& B3)
+ {
+ __m128i T0 = _mm_unpacklo_epi32(B0.reg, B1.reg);
+ __m128i T1 = _mm_unpacklo_epi32(B2.reg, B3.reg);
+ __m128i T2 = _mm_unpackhi_epi32(B0.reg, B1.reg);
+ __m128i T3 = _mm_unpackhi_epi32(B2.reg, B3.reg);
+ B0.reg = _mm_unpacklo_epi64(T0, T1);
+ B1.reg = _mm_unpackhi_epi64(T0, T1);
+ B2.reg = _mm_unpacklo_epi64(T2, T3);
+ B3.reg = _mm_unpackhi_epi64(T2, T3);
+ }
+
+ private:
+ SIMD_SSE2(__m128i in) { reg = in; }
+
+ SIMD_SSE2 bswap() const
+ {
+ __m128i T = reg;
+
+ T = _mm_shufflehi_epi16(T, _MM_SHUFFLE(2, 3, 0, 1));
+ T = _mm_shufflelo_epi16(T, _MM_SHUFFLE(2, 3, 0, 1));
+
+ return _mm_or_si128(_mm_srli_epi16(T, 8),
+ _mm_slli_epi16(T, 8));
+ }
+
+ __m128i reg;
+ };
+
+}
+
+#endif