aboutsummaryrefslogtreecommitdiffstats
path: root/src/block/aes
diff options
context:
space:
mode:
authorlloyd <[email protected]>2009-09-29 19:07:31 +0000
committerlloyd <[email protected]>2009-09-29 19:07:31 +0000
commit9a45274e5d7e91fa8cc0a13de3ec9b8195f96611 (patch)
tree335bef1f4338a8057509459c9abcc1b2dd5ee71e /src/block/aes
parent096ed3cfa340aa7c917da7a92ddade6dd69ab758 (diff)
Use prefetching in AES. Nominally, this will help somewhat with preventing
timing attacks, since once all the TE/SE tables are entirely in cache then timing attacks against it become somewhat harder. However for this to be a full defense it would be necessary to ensure the tables were entirely loaded into cache, which is not guaranteed by the normal SSE prefetch instructions. (Or prefetch instructions for other CPUs, AFAIK). Much more importantly, it provides a 10% speedup.
Diffstat (limited to 'src/block/aes')
-rw-r--r--src/block/aes/aes.cpp8
1 files changed, 8 insertions, 0 deletions
diff --git a/src/block/aes/aes.cpp b/src/block/aes/aes.cpp
index 34698ae7f..8ef9cd8fe 100644
--- a/src/block/aes/aes.cpp
+++ b/src/block/aes/aes.cpp
@@ -20,6 +20,10 @@ void AES::encrypt_n(const byte in[], byte out[], u32bit blocks) const
const u32bit* TE2 = TE + 512;
const u32bit* TE3 = TE + 768;
+ PREFETCH::readonly(TE, 1024);
+ PREFETCH::readonly(SE, 256);
+ PREFETCH::cipher_fetch(in, out, blocks, this->BLOCK_SIZE);
+
for(u32bit i = 0; i != blocks; ++i)
{
u32bit T0 = load_be<u32bit>(in, 0) ^ EK[0];
@@ -114,6 +118,10 @@ void AES::decrypt_n(const byte in[], byte out[], u32bit blocks) const
const u32bit* TD2 = TD + 512;
const u32bit* TD3 = TD + 768;
+ PREFETCH::readonly(TD, 1024);
+ PREFETCH::readonly(SD, 256);
+ PREFETCH::cipher_fetch(in, out, blocks, this->BLOCK_SIZE);
+
for(u32bit i = 0; i != blocks; ++i)
{
u32bit T0 = load_be<u32bit>(in, 0) ^ DK[0];