diff options
author | lloyd <[email protected]> | 2009-11-06 14:04:16 +0000 |
---|---|---|
committer | lloyd <[email protected]> | 2009-11-06 14:04:16 +0000 |
commit | a495493c4ea42a68290df75e4a24562c6ec19a06 (patch) | |
tree | 10ff3fcbdc4d753ec422a0b02f63c704e7e1ee86 | |
parent | f3c92264410c709e2a3ebf88ea3a67badf298c1c (diff) |
Tick to 1.9.3-dev
Rename BOTAN_UNALIGNED_LOADSTOR_OK to BOTAN_UNALIGNED_MEMORY_ACCESS_OK
which is somewhat more clear as to the point.
-rwxr-xr-x | configure.py | 16 | ||||
-rw-r--r-- | doc/log.txt | 3 | ||||
-rw-r--r-- | readme.txt | 2 | ||||
-rw-r--r-- | src/utils/loadstor.h | 26 | ||||
-rw-r--r-- | src/utils/simd_32/info.txt | 13 | ||||
-rw-r--r-- | src/utils/xor_buf.h | 4 |
6 files changed, 27 insertions, 37 deletions
diff --git a/configure.py b/configure.py index a0f9ec9d6..f07c0a087 100755 --- a/configure.py +++ b/configure.py @@ -37,9 +37,9 @@ class BuildConfigurationInformation(object): """ version_major = 1 version_minor = 9 - version_patch = 2 - version_so_patch = 2 - version_suffix = '' + version_patch = 3 + version_so_patch = 3 + version_suffix = '-dev' version_string = '%d.%d.%d%s' % ( version_major, version_minor, version_patch, version_suffix) @@ -450,7 +450,7 @@ class ArchInfo(object): Return the types of SIMD supported by this submodel (if any) """ def simd_in(self, cpu_type): - return self.simd.get(cpu_type, []) + self.simd.get('all', []) + return sorted(self.simd.get(cpu_type, []) + self.simd.get('all', [])) """ Return a list of all submodels for this arch @@ -473,6 +473,9 @@ class ArchInfo(object): macros.append('TARGET_CPU_IS_%s' % ( form_cpu_macro(target_submodel))) + for simd in self.simd_in(target_submodel): + macros.append('TARGET_CPU_HAS_%s' % (simd.upper())) + if with_endian: macros.append('TARGET_CPU_IS_%s_ENDIAN' % (with_endian.upper())) elif self.endian != None: @@ -483,10 +486,7 @@ class ArchInfo(object): if unaligned_ok: logging.info('Assuming unaligned memory access works on this CPU') - macros.append('TARGET_UNALIGNED_LOADSTOR_OK %d' % (unaligned_ok)) - - for simd in self.simd_in(target_submodel): - macros.append('TARGET_CPU_HAS_%s' % (simd.upper())) + macros.append('TARGET_UNALIGNED_MEMORY_ACCESS_OK %d' % (unaligned_ok)) return macros diff --git a/doc/log.txt b/doc/log.txt index 97e40db5e..75fde9a5f 100644 --- a/doc/log.txt +++ b/doc/log.txt @@ -1,4 +1,7 @@ +* 1.9.3-dev, ????-??-?? + - Set macros for available SIMD instructions in build.h + * 1.9.2, 2009-11-03 - Add SIMD version of XTEA - Support both SSE2 and AltiVec SIMD for Serpent and XTEA diff --git a/readme.txt b/readme.txt index a192a1e1e..44f1f471c 100644 --- a/readme.txt +++ b/readme.txt @@ -1,4 +1,4 @@ -Botan 1.9.2, 2009-11-03 +Botan 1.9.3-dev, ????-??-?? Botan is a C++ class library for performing a wide variety of cryptographic operations. diff --git a/src/utils/loadstor.h b/src/utils/loadstor.h index 8f430f36c..489a789f4 100644 --- a/src/utils/loadstor.h +++ b/src/utils/loadstor.h @@ -14,7 +14,7 @@ #include <botan/rotate.h> #include <botan/prefetch.h> -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK #if defined(BOTAN_TARGET_CPU_IS_BIG_ENDIAN) @@ -101,7 +101,7 @@ inline T load_le(const byte in[], u32bit off) template<> inline u16bit load_be<u16bit>(const byte in[], u32bit off) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK return BOTAN_ENDIAN_N2B(*(reinterpret_cast<const u16bit*>(in) + off)); #else in += off * sizeof(u16bit); @@ -112,7 +112,7 @@ inline u16bit load_be<u16bit>(const byte in[], u32bit off) template<> inline u16bit load_le<u16bit>(const byte in[], u32bit off) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK return BOTAN_ENDIAN_N2L(*(reinterpret_cast<const u16bit*>(in) + off)); #else in += off * sizeof(u16bit); @@ -123,7 +123,7 @@ inline u16bit load_le<u16bit>(const byte in[], u32bit off) template<> inline u32bit load_be<u32bit>(const byte in[], u32bit off) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK return BOTAN_ENDIAN_N2B(*(reinterpret_cast<const u32bit*>(in) + off)); #else in += off * sizeof(u32bit); @@ -134,7 +134,7 @@ inline u32bit load_be<u32bit>(const byte in[], u32bit off) template<> inline u32bit load_le<u32bit>(const byte in[], u32bit off) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK return BOTAN_ENDIAN_N2L(*(reinterpret_cast<const u32bit*>(in) + off)); #else in += off * sizeof(u32bit); @@ -145,7 +145,7 @@ inline u32bit load_le<u32bit>(const byte in[], u32bit off) template<> inline u64bit load_be<u64bit>(const byte in[], u32bit off) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK return BOTAN_ENDIAN_N2B(*(reinterpret_cast<const u64bit*>(in) + off)); #else in += off * sizeof(u64bit); @@ -157,7 +157,7 @@ inline u64bit load_be<u64bit>(const byte in[], u32bit off) template<> inline u64bit load_le<u64bit>(const byte in[], u32bit off) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK return BOTAN_ENDIAN_N2L(*(reinterpret_cast<const u64bit*>(in) + off)); #else in += off * sizeof(u64bit); @@ -281,7 +281,7 @@ inline void load_be(T out[], */ inline void store_be(u16bit in, byte out[2]) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK *reinterpret_cast<u16bit*>(out) = BOTAN_ENDIAN_B2N(in); #else out[0] = get_byte(0, in); @@ -291,7 +291,7 @@ inline void store_be(u16bit in, byte out[2]) inline void store_le(u16bit in, byte out[2]) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK *reinterpret_cast<u16bit*>(out) = BOTAN_ENDIAN_L2N(in); #else out[0] = get_byte(1, in); @@ -301,7 +301,7 @@ inline void store_le(u16bit in, byte out[2]) inline void store_be(u32bit in, byte out[4]) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK *reinterpret_cast<u32bit*>(out) = BOTAN_ENDIAN_B2N(in); #else out[0] = get_byte(0, in); @@ -313,7 +313,7 @@ inline void store_be(u32bit in, byte out[4]) inline void store_le(u32bit in, byte out[4]) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK *reinterpret_cast<u32bit*>(out) = BOTAN_ENDIAN_L2N(in); #else out[0] = get_byte(3, in); @@ -325,7 +325,7 @@ inline void store_le(u32bit in, byte out[4]) inline void store_be(u64bit in, byte out[8]) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK *reinterpret_cast<u64bit*>(out) = BOTAN_ENDIAN_B2N(in); #else out[0] = get_byte(0, in); @@ -341,7 +341,7 @@ inline void store_be(u64bit in, byte out[8]) inline void store_le(u64bit in, byte out[8]) { -#if BOTAN_TARGET_UNALIGNED_LOADSTOR_OK +#if BOTAN_TARGET_UNALIGNED_MEMORY_ACCESS_OK *reinterpret_cast<u64bit*>(out) = BOTAN_ENDIAN_L2N(in); #else out[0] = get_byte(7, in); diff --git a/src/utils/simd_32/info.txt b/src/utils/simd_32/info.txt index 64707c1e4..883096a5d 100644 --- a/src/utils/simd_32/info.txt +++ b/src/utils/simd_32/info.txt @@ -1,16 +1,3 @@ define SIMD_32 load_on always - -<arch> -pentium-m -pentium4 -prescott -amd64 -</arch> - -<cc> -gcc -icc -msvc -</cc> diff --git a/src/utils/xor_buf.h b/src/utils/xor_buf.h index 39781f017..39c4a493d 100644 --- a/src/utils/xor_buf.h +++ b/src/utils/xor_buf.h @@ -22,7 +22,7 @@ inline void xor_buf(byte out[], const byte in[], u32bit length) { while(length >= 8) { -#if BOTAN_UNALIGNED_LOADSTOR_OK +#if BOTAN_UNALIGNED_MEMORY_ACCESS_OK *reinterpret_cast<u64bit*>(out) ^= *reinterpret_cast<const u64bit*>(in); #else out[0] ^= in[0]; out[1] ^= in[1]; @@ -51,7 +51,7 @@ inline void xor_buf(byte out[], { while(length >= 8) { -#if BOTAN_UNALIGNED_LOADSTOR_OK +#if BOTAN_UNALIGNED_MEMORY_ACCESS_OK *reinterpret_cast<u64bit*>(out) = *reinterpret_cast<const u64bit*>(in) ^ *reinterpret_cast<const u64bit*>(in2); |