aboutsummaryrefslogtreecommitdiffstats
path: root/src/lib/block/camellia
diff options
context:
space:
mode:
authorJack Lloyd <[email protected]>2017-10-11 17:02:20 -0400
committerJack Lloyd <[email protected]>2017-10-12 11:13:11 -0400
commit175f09ffd806f2f19cd509017a67ae1384f29ae1 (patch)
tree6194884467e4720dd79797cd106a45d60211f35f /src/lib/block/camellia
parent40b3f979723b2b3dfb5c44047d7f786a73fd7f6f (diff)
Add compile-time rotation functions
The problem with asm rol/ror is the compiler can't schedule effectively. But we only need asm in the case when the rotation is variable, so distinguish the two cases. If a compile time constant, then static_assert that the rotation is in the correct range and do the straightforward expression knowing the compiler will probably do the right thing. Otherwise do a tricky expression that both GCC and Clang happen to have recognize. Avoid the reduction case; instead require that the rotation be in range (this reverts 2b37c13dcf). Remove the asm rotations (making this branch illnamed), because now both Clang and GCC will create a roll without any extra help. Remove the reduction/mask by the word size for the variable case. The compiler can't optimize that it out well, but it's easy to ensure it is valid in the callers, especially now that the variable input cases are easy to grep for.
Diffstat (limited to 'src/lib/block/camellia')
-rw-r--r--src/lib/block/camellia/camellia.cpp16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/lib/block/camellia/camellia.cpp b/src/lib/block/camellia/camellia.cpp
index ea84fa313..89db6f8b9 100644
--- a/src/lib/block/camellia/camellia.cpp
+++ b/src/lib/block/camellia/camellia.cpp
@@ -577,12 +577,12 @@ uint64_t F_SLOW(uint64_t v, uint64_t K)
const uint64_t x = v ^ K;
const uint8_t t1 = SBOX[get_byte(0, x)];
- const uint8_t t2 = rotate_left(SBOX[get_byte(1, x)], 1);
- const uint8_t t3 = rotate_left(SBOX[get_byte(2, x)], 7);
- const uint8_t t4 = SBOX[rotate_left(get_byte(3, x), 1)];
- const uint8_t t5 = rotate_left(SBOX[get_byte(4, x)], 1);
- const uint8_t t6 = rotate_left(SBOX[get_byte(5, x)], 7);
- const uint8_t t7 = SBOX[rotate_left(get_byte(6, x), 1)];
+ const uint8_t t2 = rotl<1>(SBOX[get_byte(1, x)]);
+ const uint8_t t3 = rotl<7>(SBOX[get_byte(2, x)]);
+ const uint8_t t4 = SBOX[rotl<1>(get_byte(3, x))];
+ const uint8_t t5 = rotl<1>(SBOX[get_byte(4, x)]);
+ const uint8_t t6 = rotl<7>(SBOX[get_byte(5, x)]);
+ const uint8_t t7 = SBOX[rotl<1>(get_byte(6, x))];
const uint8_t t8 = SBOX[get_byte(7, x)];
const uint8_t y1 = t1 ^ t3 ^ t4 ^ t6 ^ t7 ^ t8;
@@ -619,7 +619,7 @@ inline uint64_t FL(uint64_t v, uint64_t K)
const uint32_t k1 = static_cast<uint32_t>(K >> 32);
const uint32_t k2 = static_cast<uint32_t>(K & 0xFFFFFFFF);
- x2 ^= rotate_left(x1 & k1, 1);
+ x2 ^= rotl<1>(x1 & k1);
x1 ^= (x2 | k2);
return ((static_cast<uint64_t>(x1) << 32) | x2);
@@ -634,7 +634,7 @@ inline uint64_t FLINV(uint64_t v, uint64_t K)
const uint32_t k2 = static_cast<uint32_t>(K & 0xFFFFFFFF);
x1 ^= (x2 | k2);
- x2 ^= rotate_left(x1 & k1, 1);
+ x2 ^= rotl<1>(x1 & k1);
return ((static_cast<uint64_t>(x1) << 32) | x2);
}