aboutsummaryrefslogtreecommitdiffstats
path: root/src/whrlpool.cpp
diff options
context:
space:
mode:
authorlloyd <[email protected]>2007-05-31 03:25:19 +0000
committerlloyd <[email protected]>2007-05-31 03:25:19 +0000
commit55608e7dd1aa593944f967f2549564e4f42b654e (patch)
treeec2ec03a762a6dac82eb608487d5394370135624 /src/whrlpool.cpp
parent22ecdc45a0efa4c444d0b7010b7cd743aeb68c57 (diff)
Write functions to handle loading and saving words a block at a time, taking into
account endian differences. The current code does not take advantage of the knowledge of which endianness we are running on; an optimization suggested by Yves Jerschow is to use (unsafe) casts to speed up the load/store operations. This turns out to provide large performance increases (30% or more) in some cases. Even without the unsafe casts, this version seems to average a few percent faster, probably because the longer loading loops have been partially or fully unrolled. This also makes the code implementing low-level algorithms like ciphers and hashes a bit more succint.
Diffstat (limited to 'src/whrlpool.cpp')
-rw-r--r--src/whrlpool.cpp7
1 files changed, 3 insertions, 4 deletions
diff --git a/src/whrlpool.cpp b/src/whrlpool.cpp
index 48cd79e5f..960095d9b 100644
--- a/src/whrlpool.cpp
+++ b/src/whrlpool.cpp
@@ -22,8 +22,7 @@ void Whirlpool::hash(const byte in[])
};
for(u32bit j = 0; j != 8; ++j)
- M[j] = make_u64bit(in[8*j+0], in[8*j+1], in[8*j+2], in[8*j+3],
- in[8*j+4], in[8*j+5], in[8*j+6], in[8*j+7]);
+ M[j] = load_be<u64bit>(in, j);
u64bit K0, K1, K2, K3, K4, K5, K6, K7;
K0 = digest[0]; K1 = digest[1]; K2 = digest[2]; K3 = digest[3];
@@ -124,8 +123,8 @@ void Whirlpool::hash(const byte in[])
*************************************************/
void Whirlpool::copy_out(byte output[])
{
- for(u32bit j = 0; j != OUTPUT_LENGTH; ++j)
- output[j] = get_byte(j % 8, digest[j/8]);
+ for(u32bit j = 0; j != OUTPUT_LENGTH; j += 8)
+ store_be(digest[j/8], output + j);
}
/*************************************************