aboutsummaryrefslogtreecommitdiffstats
path: root/src/lib/modes/aead/gcm/clmul/clmul.cpp
blob: 33378d833e396939194715a5d84e16e19571a461 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
/*
* CLMUL hook
* (C) 2013 Jack Lloyd
*
* Botan is released under the Simplified BSD License (see license.txt)
*/

#include <botan/internal/clmul.h>
#include <immintrin.h>
#include <wmmintrin.h>

namespace Botan {

BOTAN_FUNC_ISA("pclmul,ssse3")
void gcm_multiply_clmul(uint8_t x[16], const uint8_t H[16],
                        const uint8_t input[], size_t blocks)
   {
   /*
   * Algorithms 1 and 5 from Intel's CLMUL guide
   */
   const __m128i BSWAP_MASK = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);

   const __m128i b = _mm_shuffle_epi8(_mm_loadu_si128(reinterpret_cast<const __m128i*>(H)), BSWAP_MASK);

   __m128i a = _mm_loadu_si128(reinterpret_cast<const __m128i*>(x));
   a = _mm_shuffle_epi8(a, BSWAP_MASK);

   for(size_t i = 0; i != blocks; ++i)
      {
      __m128i m = _mm_loadu_si128(reinterpret_cast<const __m128i*>(input) + i);
      m = _mm_shuffle_epi8(m, BSWAP_MASK);

      a = _mm_xor_si128(a, m);

      __m128i T0, T1, T2, T3, T4, T5;

      T0 = _mm_clmulepi64_si128(a, b, 0x00);
      T1 = _mm_clmulepi64_si128(a, b, 0x01);
      T2 = _mm_clmulepi64_si128(a, b, 0x10);
      T3 = _mm_clmulepi64_si128(a, b, 0x11);

      T1 = _mm_xor_si128(T1, T2);
      T2 = _mm_slli_si128(T1, 8);
      T1 = _mm_srli_si128(T1, 8);
      T0 = _mm_xor_si128(T0, T2);
      T3 = _mm_xor_si128(T3, T1);

      T4 = _mm_srli_epi32(T0, 31);
      T0 = _mm_slli_epi32(T0, 1);

      T5 = _mm_srli_epi32(T3, 31);
      T3 = _mm_slli_epi32(T3, 1);

      T2 = _mm_srli_si128(T4, 12);
      T5 = _mm_slli_si128(T5, 4);
      T4 = _mm_slli_si128(T4, 4);
      T0 = _mm_or_si128(T0, T4);
      T3 = _mm_or_si128(T3, T5);
      T3 = _mm_or_si128(T3, T2);

      T4 = _mm_slli_epi32(T0, 31);
      T5 = _mm_slli_epi32(T0, 30);
      T2 = _mm_slli_epi32(T0, 25);

      T4 = _mm_xor_si128(T4, T5);
      T4 = _mm_xor_si128(T4, T2);
      T5 = _mm_srli_si128(T4, 4);
      T3 = _mm_xor_si128(T3, T5);
      T4 = _mm_slli_si128(T4, 12);
      T0 = _mm_xor_si128(T0, T4);
      T3 = _mm_xor_si128(T3, T0);

      T4 = _mm_srli_epi32(T0, 1);
      T1 = _mm_srli_epi32(T0, 2);
      T2 = _mm_srli_epi32(T0, 7);
      T3 = _mm_xor_si128(T3, T1);
      T3 = _mm_xor_si128(T3, T2);
      T3 = _mm_xor_si128(T3, T4);

      a = T3;
      }

   a = _mm_shuffle_epi8(a, BSWAP_MASK);
   _mm_storeu_si128(reinterpret_cast<__m128i*>(x), a);
   }

}