aboutsummaryrefslogtreecommitdiffstats
path: root/src/utils
diff options
context:
space:
mode:
authorlloyd <[email protected]>2009-11-13 22:51:01 +0000
committerlloyd <[email protected]>2009-11-13 22:51:01 +0000
commit0972ba645555b0bf283eba71e4b9edacdf424eba (patch)
treeec5c59370b7ded3e0cc4e057601bad9df0d5d815 /src/utils
parent1860807e4ed230f3aeea0831ec180e55d2f0eaa4 (diff)
parentbe9b28137b0de48d3f86c96655fa1bbc5c70973c (diff)
propagate from branch 'net.randombit.botan' (head ac888e57b614c623590d79ab615353ad7c76ef68)
to branch 'net.randombit.botan.c++0x' (head 9bf78ed7e2521a328f6db7acbc1cd81b07718230)
Diffstat (limited to 'src/utils')
-rw-r--r--src/utils/cpuid.h13
-rw-r--r--src/utils/loadstor.h35
-rw-r--r--src/utils/simd_32/info.txt13
-rw-r--r--src/utils/simd_32/simd_altivec.h1
-rw-r--r--src/utils/xor_buf.h4
5 files changed, 36 insertions, 30 deletions
diff --git a/src/utils/cpuid.h b/src/utils/cpuid.h
index 8b8021754..455721af9 100644
--- a/src/utils/cpuid.h
+++ b/src/utils/cpuid.h
@@ -60,11 +60,20 @@ class CPUID
{ return ((x86_processor_flags() >> CPUID_SSE42_BIT) & 1); }
/**
- * Check if the processor supports Intel AES instructions
+ * Check if the processor supports Intel's AES instructions
*/
- static bool has_intel_aes()
+ static bool has_aes_intel()
{ return ((x86_processor_flags() >> CPUID_INTEL_AES_BIT) & 1); }
+ /**
+ * Check if the processor supports VIA's AES instructions
+ * (not implemented)
+ */
+ static bool has_aes_via() { return false; }
+
+ /**
+ * Check if the processor supports AltiVec/VMX
+ */
static bool has_altivec();
private:
static u64bit x86_processor_flags();
diff --git a/src/utils/loadstor.h b/src/utils/loadstor.h
index 8f430f36c..b15cafd2c 100644
--- a/src/utils/loadstor.h
+++ b/src/utils/loadstor.h
@@ -13,8 +13,9 @@
#include <botan/bswap.h>
#include <botan/rotate.h>
#include <botan/prefetch.h>
+#include <cstring>
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
#if defined(BOTAN_TARGET_CPU_IS_BIG_ENDIAN)
@@ -101,7 +102,7 @@ inline T load_le(const byte in[], u32bit off)
template<>
inline u16bit load_be<u16bit>(const byte in[], u32bit off)
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
return BOTAN_ENDIAN_N2B(*(reinterpret_cast<const u16bit*>(in) + off));
#else
in += off * sizeof(u16bit);
@@ -112,7 +113,7 @@ inline u16bit load_be<u16bit>(const byte in[], u32bit off)
template<>
inline u16bit load_le<u16bit>(const byte in[], u32bit off)
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
return BOTAN_ENDIAN_N2L(*(reinterpret_cast<const u16bit*>(in) + off));
#else
in += off * sizeof(u16bit);
@@ -123,7 +124,7 @@ inline u16bit load_le<u16bit>(const byte in[], u32bit off)
template<>
inline u32bit load_be<u32bit>(const byte in[], u32bit off)
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
return BOTAN_ENDIAN_N2B(*(reinterpret_cast<const u32bit*>(in) + off));
#else
in += off * sizeof(u32bit);
@@ -134,7 +135,7 @@ inline u32bit load_be<u32bit>(const byte in[], u32bit off)
template<>
inline u32bit load_le<u32bit>(const byte in[], u32bit off)
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
return BOTAN_ENDIAN_N2L(*(reinterpret_cast<const u32bit*>(in) + off));
#else
in += off * sizeof(u32bit);
@@ -145,7 +146,7 @@ inline u32bit load_le<u32bit>(const byte in[], u32bit off)
template<>
inline u64bit load_be<u64bit>(const byte in[], u32bit off)
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
return BOTAN_ENDIAN_N2B(*(reinterpret_cast<const u64bit*>(in) + off));
#else
in += off * sizeof(u64bit);
@@ -157,7 +158,7 @@ inline u64bit load_be<u64bit>(const byte in[], u32bit off)
template<>
inline u64bit load_le<u64bit>(const byte in[], u32bit off)
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
return BOTAN_ENDIAN_N2L(*(reinterpret_cast<const u64bit*>(in) + off));
#else
in += off * sizeof(u64bit);
@@ -203,6 +204,9 @@ inline void load_le(T out[],
const byte in[],
u32bit count)
{
+#if defined(BOTAN_TARGET_CPU_IS_LITTLE_ENDIAN)
+ std::memcpy(out, in, sizeof(T)*count);
+#else
const u32bit blocks = count - (count % 4);
const u32bit left = count - blocks;
@@ -219,6 +223,7 @@ inline void load_le(T out[],
for(u32bit i = 0; i != left; ++i)
out[i] = load_le<T>(in, i);
+#endif
}
template<typename T>
@@ -258,6 +263,9 @@ inline void load_be(T out[],
const byte in[],
u32bit count)
{
+#if defined(BOTAN_TARGET_CPU_IS_BIG_ENDIAN)
+ std::memcpy(out, in, sizeof(T)*count);
+#else
const u32bit blocks = count - (count % 4);
const u32bit left = count - blocks;
@@ -274,6 +282,7 @@ inline void load_be(T out[],
for(u32bit i = 0; i != left; ++i)
out[i] = load_be<T>(in, i);
+#endif
}
/*
@@ -281,7 +290,7 @@ inline void load_be(T out[],
*/
inline void store_be(u16bit in, byte out[2])
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
*reinterpret_cast<u16bit*>(out) = BOTAN_ENDIAN_B2N(in);
#else
out[0] = get_byte(0, in);
@@ -291,7 +300,7 @@ inline void store_be(u16bit in, byte out[2])
inline void store_le(u16bit in, byte out[2])
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
*reinterpret_cast<u16bit*>(out) = BOTAN_ENDIAN_L2N(in);
#else
out[0] = get_byte(1, in);
@@ -301,7 +310,7 @@ inline void store_le(u16bit in, byte out[2])
inline void store_be(u32bit in, byte out[4])
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
*reinterpret_cast<u32bit*>(out) = BOTAN_ENDIAN_B2N(in);
#else
out[0] = get_byte(0, in);
@@ -313,7 +322,7 @@ inline void store_be(u32bit in, byte out[4])
inline void store_le(u32bit in, byte out[4])
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
*reinterpret_cast<u32bit*>(out) = BOTAN_ENDIAN_L2N(in);
#else
out[0] = get_byte(3, in);
@@ -325,7 +334,7 @@ inline void store_le(u32bit in, byte out[4])
inline void store_be(u64bit in, byte out[8])
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
*reinterpret_cast<u64bit*>(out) = BOTAN_ENDIAN_B2N(in);
#else
out[0] = get_byte(0, in);
@@ -341,7 +350,7 @@ inline void store_be(u64bit in, byte out[8])
inline void store_le(u64bit in, byte out[8])
{
-#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK
+#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK
*reinterpret_cast<u64bit*>(out) = BOTAN_ENDIAN_L2N(in);
#else
out[0] = get_byte(7, in);
diff --git a/src/utils/simd_32/info.txt b/src/utils/simd_32/info.txt
index 64707c1e4..883096a5d 100644
--- a/src/utils/simd_32/info.txt
+++ b/src/utils/simd_32/info.txt
@@ -1,16 +1,3 @@
define SIMD_32
load_on always
-
-<arch>
-pentium-m
-pentium4
-prescott
-amd64
-</arch>
-
-<cc>
-gcc
-icc
-msvc
-</cc>
diff --git a/src/utils/simd_32/simd_altivec.h b/src/utils/simd_32/simd_altivec.h
index c6dd8a289..3e784a8c4 100644
--- a/src/utils/simd_32/simd_altivec.h
+++ b/src/utils/simd_32/simd_altivec.h
@@ -13,6 +13,7 @@
#include <altivec.h>
#undef vector
+#undef bool
namespace Botan {
diff --git a/src/utils/xor_buf.h b/src/utils/xor_buf.h
index 39781f017..39c4a493d 100644
--- a/src/utils/xor_buf.h
+++ b/src/utils/xor_buf.h
@@ -22,7 +22,7 @@ inline void xor_buf(byte out[], const byte in[], u32bit length)
{
while(length >= 8)
{
-#if BOTAN_UNALIGNED_LOADSTOR_OK
+#if BOTAN_UNALIGNED_MEMORY_ACCESS_OK
*reinterpret_cast<u64bit*>(out) ^= *reinterpret_cast<const u64bit*>(in);
#else
out[0] ^= in[0]; out[1] ^= in[1];
@@ -51,7 +51,7 @@ inline void xor_buf(byte out[],
{
while(length >= 8)
{
-#if BOTAN_UNALIGNED_LOADSTOR_OK
+#if BOTAN_UNALIGNED_MEMORY_ACCESS_OK
*reinterpret_cast<u64bit*>(out) =
*reinterpret_cast<const u64bit*>(in) ^
*reinterpret_cast<const u64bit*>(in2);