aboutsummaryrefslogtreecommitdiffstats
path: root/src/hash/sha1
diff options
context:
space:
mode:
authorlloyd <[email protected]>2009-10-29 17:34:52 +0000
committerlloyd <[email protected]>2009-10-29 17:34:52 +0000
commit7462977b8e5eb95a81a6253dc6e6224334ad6ae9 (patch)
treed14b468f7a04b02635b11bda017cca56259275b5 /src/hash/sha1
parent5553c5cf54563280a4ffc94baab7b94a83cb0000 (diff)
Add a new looping load_be / load_le for loading large arrays at once, and
change some of the hash functions to use it as low hanging fruit. Probably could use further optimization (just unrolls x4 currently), but merely having it as syntax is good as it allows optimizing many functions at once (eg using SSE2 to do 4-way byteswaps).
Diffstat (limited to 'src/hash/sha1')
-rw-r--r--src/hash/sha1/sha160.cpp11
1 files changed, 3 insertions, 8 deletions
diff --git a/src/hash/sha1/sha160.cpp b/src/hash/sha1/sha160.cpp
index 88f2161e2..ff44593f6 100644
--- a/src/hash/sha1/sha160.cpp
+++ b/src/hash/sha1/sha160.cpp
@@ -61,14 +61,7 @@ void SHA_160::compress_n(const byte input[], u32bit blocks)
for(u32bit i = 0; i != blocks; ++i)
{
- for(u32bit j = 0; j != 16; j += 4)
- {
- W[j ] = load_be<u32bit>(input, j);
- W[j+1] = load_be<u32bit>(input, j+1);
- W[j+2] = load_be<u32bit>(input, j+2);
- W[j+3] = load_be<u32bit>(input, j+3);
- }
- input += HASH_BLOCK_SIZE;
+ load_be(W.begin(), input, 16);
for(u32bit j = 16; j != 80; j += 8)
{
@@ -131,6 +124,8 @@ void SHA_160::compress_n(const byte input[], u32bit blocks)
C = (digest[2] += C);
D = (digest[3] += D);
E = (digest[4] += E);
+
+ input += HASH_BLOCK_SIZE;
}
}