From 7462977b8e5eb95a81a6253dc6e6224334ad6ae9 Mon Sep 17 00:00:00 2001 From: lloyd Date: Thu, 29 Oct 2009 17:34:52 +0000 Subject: Add a new looping load_be / load_le for loading large arrays at once, and change some of the hash functions to use it as low hanging fruit. Probably could use further optimization (just unrolls x4 currently), but merely having it as syntax is good as it allows optimizing many functions at once (eg using SSE2 to do 4-way byteswaps). --- src/hash/sha1/sha160.cpp | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'src/hash/sha1') diff --git a/src/hash/sha1/sha160.cpp b/src/hash/sha1/sha160.cpp index 88f2161e2..ff44593f6 100644 --- a/src/hash/sha1/sha160.cpp +++ b/src/hash/sha1/sha160.cpp @@ -61,14 +61,7 @@ void SHA_160::compress_n(const byte input[], u32bit blocks) for(u32bit i = 0; i != blocks; ++i) { - for(u32bit j = 0; j != 16; j += 4) - { - W[j ] = load_be(input, j); - W[j+1] = load_be(input, j+1); - W[j+2] = load_be(input, j+2); - W[j+3] = load_be(input, j+3); - } - input += HASH_BLOCK_SIZE; + load_be(W.begin(), input, 16); for(u32bit j = 16; j != 80; j += 8) { @@ -131,6 +124,8 @@ void SHA_160::compress_n(const byte input[], u32bit blocks) C = (digest[2] += C); D = (digest[3] += D); E = (digest[4] += E); + + input += HASH_BLOCK_SIZE; } } -- cgit v1.2.3