aboutsummaryrefslogtreecommitdiffstats
path: root/src/hash
diff options
context:
space:
mode:
authorlloyd <[email protected]>2009-11-03 16:47:11 +0000
committerlloyd <[email protected]>2009-11-03 16:47:11 +0000
commitfa8a8437b9ce3b5275133230708af0964427651a (patch)
tree1e2565f204d1c6b96f067bb980a98822eebf4465 /src/hash
parent6b617a55bd02bcc4fdb6f76af92e0cb65fd838a2 (diff)
Conver the rest of the hash functions to use the array-based load instructions.
I'm not totally happy with this - in particular in all cases the size is a compile time constant - it would be nice to make use of this via tempalate metaprogramming. Also for matching endian loads, a straight memcpy would do the work, which would probably be even faster.
Diffstat (limited to 'src/hash')
-rw-r--r--src/hash/fork256/fork256.cpp6
-rw-r--r--src/hash/gost_3411/gost_3411.cpp8
-rw-r--r--src/hash/md4/md4.cpp55
-rw-r--r--src/hash/sha2/sha2_64.cpp6
-rw-r--r--src/hash/whirlpool/whrlpool.cpp6
5 files changed, 41 insertions, 40 deletions
diff --git a/src/hash/fork256/fork256.cpp b/src/hash/fork256/fork256.cpp
index 6718f9f97..bd85dfd7c 100644
--- a/src/hash/fork256/fork256.cpp
+++ b/src/hash/fork256/fork256.cpp
@@ -66,9 +66,7 @@ void FORK_256::compress_n(const byte input[], u32bit blocks)
G1 = G2 = G3 = G4 = digest[6];
H1 = H2 = H3 = H4 = digest[7];
- for(u32bit j = 0; j != 16; ++j)
- M[j] = load_be<u32bit>(input, j);
- input += HASH_BLOCK_SIZE;
+ load_be(M.begin(), input, M.size());
step(A1, B1, C1, D1, E1, F1, G1, H1, M[ 0], M[ 1], DELTA[ 0], DELTA[ 1]);
step(A2, B2, C2, D2, E2, F2, G2, H2, M[14], M[15], DELTA[15], DELTA[14]);
@@ -118,6 +116,8 @@ void FORK_256::compress_n(const byte input[], u32bit blocks)
digest[5] += (F1 + F2) ^ (F3 + F4);
digest[6] += (G1 + G2) ^ (G3 + G4);
digest[7] += (H1 + H2) ^ (H3 + H4);
+
+ input += HASH_BLOCK_SIZE;
}
}
diff --git a/src/hash/gost_3411/gost_3411.cpp b/src/hash/gost_3411/gost_3411.cpp
index 90ef3e805..16b1311da 100644
--- a/src/hash/gost_3411/gost_3411.cpp
+++ b/src/hash/gost_3411/gost_3411.cpp
@@ -79,12 +79,8 @@ void GOST_34_11::compress_n(const byte input[], u32bit blocks)
byte S[32] = { 0 };
u64bit U[4], V[4];
-
- for(u32bit j = 0; j != 4; ++j)
- {
- U[j] = load_be<u64bit>(hash, j);
- V[j] = load_be<u64bit>(input + 32*i, j);
- }
+ load_be(U, hash, 4);
+ load_be(V, input + 32*i, 4);
for(u32bit j = 0; j != 4; ++j)
{
diff --git a/src/hash/md4/md4.cpp b/src/hash/md4/md4.cpp
index c50c73a8d..f573dae25 100644
--- a/src/hash/md4/md4.cpp
+++ b/src/hash/md4/md4.cpp
@@ -51,36 +51,41 @@ void MD4::compress_n(const byte input[], u32bit blocks)
for(u32bit i = 0; i != blocks; ++i)
{
- //load_le(M.begin(), input, M.size());
- for(u32bit j = 0; j != 16; ++j)
- M[j] = load_le<u32bit>(input, j);
- input += HASH_BLOCK_SIZE;
-
- FF(A,B,C,D,M[ 0], 3); FF(D,A,B,C,M[ 1], 7); FF(C,D,A,B,M[ 2],11);
- FF(B,C,D,A,M[ 3],19); FF(A,B,C,D,M[ 4], 3); FF(D,A,B,C,M[ 5], 7);
- FF(C,D,A,B,M[ 6],11); FF(B,C,D,A,M[ 7],19); FF(A,B,C,D,M[ 8], 3);
- FF(D,A,B,C,M[ 9], 7); FF(C,D,A,B,M[10],11); FF(B,C,D,A,M[11],19);
- FF(A,B,C,D,M[12], 3); FF(D,A,B,C,M[13], 7); FF(C,D,A,B,M[14],11);
- FF(B,C,D,A,M[15],19);
-
- GG(A,B,C,D,M[ 0], 3); GG(D,A,B,C,M[ 4], 5); GG(C,D,A,B,M[ 8], 9);
- GG(B,C,D,A,M[12],13); GG(A,B,C,D,M[ 1], 3); GG(D,A,B,C,M[ 5], 5);
- GG(C,D,A,B,M[ 9], 9); GG(B,C,D,A,M[13],13); GG(A,B,C,D,M[ 2], 3);
- GG(D,A,B,C,M[ 6], 5); GG(C,D,A,B,M[10], 9); GG(B,C,D,A,M[14],13);
- GG(A,B,C,D,M[ 3], 3); GG(D,A,B,C,M[ 7], 5); GG(C,D,A,B,M[11], 9);
- GG(B,C,D,A,M[15],13);
-
- HH(A,B,C,D,M[ 0], 3); HH(D,A,B,C,M[ 8], 9); HH(C,D,A,B,M[ 4],11);
- HH(B,C,D,A,M[12],15); HH(A,B,C,D,M[ 2], 3); HH(D,A,B,C,M[10], 9);
- HH(C,D,A,B,M[ 6],11); HH(B,C,D,A,M[14],15); HH(A,B,C,D,M[ 1], 3);
- HH(D,A,B,C,M[ 9], 9); HH(C,D,A,B,M[ 5],11); HH(B,C,D,A,M[13],15);
- HH(A,B,C,D,M[ 3], 3); HH(D,A,B,C,M[11], 9); HH(C,D,A,B,M[ 7],11);
- HH(B,C,D,A,M[15],15);
+ load_le(M.begin(), input, M.size());
+
+ FF(A,B,C,D,M[ 0], 3); FF(D,A,B,C,M[ 1], 7);
+ FF(C,D,A,B,M[ 2],11); FF(B,C,D,A,M[ 3],19);
+ FF(A,B,C,D,M[ 4], 3); FF(D,A,B,C,M[ 5], 7);
+ FF(C,D,A,B,M[ 6],11); FF(B,C,D,A,M[ 7],19);
+ FF(A,B,C,D,M[ 8], 3); FF(D,A,B,C,M[ 9], 7);
+ FF(C,D,A,B,M[10],11); FF(B,C,D,A,M[11],19);
+ FF(A,B,C,D,M[12], 3); FF(D,A,B,C,M[13], 7);
+ FF(C,D,A,B,M[14],11); FF(B,C,D,A,M[15],19);
+
+ GG(A,B,C,D,M[ 0], 3); GG(D,A,B,C,M[ 4], 5);
+ GG(C,D,A,B,M[ 8], 9); GG(B,C,D,A,M[12],13);
+ GG(A,B,C,D,M[ 1], 3); GG(D,A,B,C,M[ 5], 5);
+ GG(C,D,A,B,M[ 9], 9); GG(B,C,D,A,M[13],13);
+ GG(A,B,C,D,M[ 2], 3); GG(D,A,B,C,M[ 6], 5);
+ GG(C,D,A,B,M[10], 9); GG(B,C,D,A,M[14],13);
+ GG(A,B,C,D,M[ 3], 3); GG(D,A,B,C,M[ 7], 5);
+ GG(C,D,A,B,M[11], 9); GG(B,C,D,A,M[15],13);
+
+ HH(A,B,C,D,M[ 0], 3); HH(D,A,B,C,M[ 8], 9);
+ HH(C,D,A,B,M[ 4],11); HH(B,C,D,A,M[12],15);
+ HH(A,B,C,D,M[ 2], 3); HH(D,A,B,C,M[10], 9);
+ HH(C,D,A,B,M[ 6],11); HH(B,C,D,A,M[14],15);
+ HH(A,B,C,D,M[ 1], 3); HH(D,A,B,C,M[ 9], 9);
+ HH(C,D,A,B,M[ 5],11); HH(B,C,D,A,M[13],15);
+ HH(A,B,C,D,M[ 3], 3); HH(D,A,B,C,M[11], 9);
+ HH(C,D,A,B,M[ 7],11); HH(B,C,D,A,M[15],15);
A = (digest[0] += A);
B = (digest[1] += B);
C = (digest[2] += C);
D = (digest[3] += D);
+
+ input += HASH_BLOCK_SIZE;
}
}
diff --git a/src/hash/sha2/sha2_64.cpp b/src/hash/sha2/sha2_64.cpp
index e260d8338..3e7c0e228 100644
--- a/src/hash/sha2/sha2_64.cpp
+++ b/src/hash/sha2/sha2_64.cpp
@@ -55,9 +55,7 @@ void SHA_384_512_BASE::compress_n(const byte input[], u32bit blocks)
for(u32bit i = 0; i != blocks; ++i)
{
- for(u32bit j = 0; j != 16; ++j)
- W[j] = load_be<u64bit>(input, j);
- input += HASH_BLOCK_SIZE;
+ load_be(W.begin(), input, 16);
for(u32bit j = 16; j != 80; j += 8)
{
@@ -160,6 +158,8 @@ void SHA_384_512_BASE::compress_n(const byte input[], u32bit blocks)
F = (digest[5] += F);
G = (digest[6] += G);
H = (digest[7] += H);
+
+ input += HASH_BLOCK_SIZE;
}
}
diff --git a/src/hash/whirlpool/whrlpool.cpp b/src/hash/whirlpool/whrlpool.cpp
index b7a02a9b6..06755fe77 100644
--- a/src/hash/whirlpool/whrlpool.cpp
+++ b/src/hash/whirlpool/whrlpool.cpp
@@ -25,9 +25,7 @@ void Whirlpool::compress_n(const byte in[], u32bit blocks)
for(u32bit i = 0; i != blocks; ++i)
{
- for(u32bit j = 0; j != 8; ++j)
- M[j] = load_be<u64bit>(in, j);
- in += HASH_BLOCK_SIZE;
+ load_be(M.begin(), in, M.size());
u64bit K0, K1, K2, K3, K4, K5, K6, K7;
K0 = digest[0]; K1 = digest[1]; K2 = digest[2]; K3 = digest[3];
@@ -121,6 +119,8 @@ void Whirlpool::compress_n(const byte in[], u32bit blocks)
digest[5] ^= B5 ^ M[5];
digest[6] ^= B6 ^ M[6];
digest[7] ^= B7 ^ M[7];
+
+ in += HASH_BLOCK_SIZE;
}
}