diff options
author | lloyd <[email protected]> | 2009-12-21 13:51:05 +0000 |
---|---|---|
committer | lloyd <[email protected]> | 2009-12-21 13:51:05 +0000 |
commit | 75f32d61c6a78e4e63cfadd084730f20b5896493 (patch) | |
tree | 4a52d3009f012ec5761f1c1218e18efbe07eef0b /src/benchmark | |
parent | f3f36611db8c3f6c67c818d454973a0165b0fcf2 (diff) |
Un-internal loadstor.h (and its header deps, rotate.h and
bswap.h); too many external apps rely on loadstor.h existing.
Define 64-bit generic bswap in terms of 32-bit bswap, since it's
not much slower if 32-bit is also generic, and much faster if
it's not. This may be quite helpful on 32-bit x86 in particular.
Change formulation of generic 32-bit bswap. It may be faster or
slower depending on the CPU, especially the latency and throuput
of rotate instructions, but should be faster on an ideally
superscalar processor with rotate instructions (ie, what I expect
future CPUs to look more like).
Diffstat (limited to 'src/benchmark')
-rw-r--r-- | src/benchmark/benchmark.cpp | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/src/benchmark/benchmark.cpp b/src/benchmark/benchmark.cpp index 69d3a40ec..b6060412e 100644 --- a/src/benchmark/benchmark.cpp +++ b/src/benchmark/benchmark.cpp @@ -14,6 +14,8 @@ #include <botan/time.h> #include <memory> +#include <iostream> + namespace Botan { namespace { @@ -26,15 +28,15 @@ std::pair<u64bit, u64bit> bench_buf_comp(BufferedComputation* buf_comp, const byte buf[], u32bit buf_len) { u64bit reps = 0; - - const u64bit start = get_nanoseconds_clock(); u64bit nanoseconds_used = 0; while(nanoseconds_used < nanoseconds_max) { + const u64bit start = get_nanoseconds_clock(); buf_comp->update(buf, buf_len); + nanoseconds_used += get_nanoseconds_clock() - start; + ++reps; - nanoseconds_used = get_nanoseconds_clock() - start; } return std::make_pair(reps * buf_len, nanoseconds_used); @@ -51,18 +53,17 @@ bench_block_cipher(BlockCipher* block_cipher, const u32bit in_blocks = buf_len / block_cipher->BLOCK_SIZE; u64bit reps = 0; - - const u64bit start = get_nanoseconds_clock(); u64bit nanoseconds_used = 0; block_cipher->set_key(buf, block_cipher->MAXIMUM_KEYLENGTH); while(nanoseconds_used < nanoseconds_max) { + const u64bit start = get_nanoseconds_clock(); block_cipher->encrypt_n(buf, buf, in_blocks); + nanoseconds_used += get_nanoseconds_clock() - start; ++reps; - nanoseconds_used = get_nanoseconds_clock() - start; } return std::make_pair(reps * in_blocks * block_cipher->BLOCK_SIZE, @@ -78,17 +79,17 @@ bench_stream_cipher(StreamCipher* stream_cipher, byte buf[], u32bit buf_len) { u64bit reps = 0; - - const u64bit start = get_nanoseconds_clock(); u64bit nanoseconds_used = 0; stream_cipher->set_key(buf, stream_cipher->MAXIMUM_KEYLENGTH); while(nanoseconds_used < nanoseconds_max) { + const u64bit start = get_nanoseconds_clock(); stream_cipher->cipher1(buf, buf_len); + nanoseconds_used += get_nanoseconds_clock() - start; + ++reps; - nanoseconds_used = get_nanoseconds_clock() - start; } return std::make_pair(reps * buf_len, nanoseconds_used); |