xref: /aosp_15_r20/external/mbedtls/library/aesce.c (revision 62c56f9862f102b96d72393aff6076c951fb8148)
1*62c56f98SSadaf Ebrahimi /*
2*62c56f98SSadaf Ebrahimi  *  Armv8-A Cryptographic Extension support functions for Aarch64
3*62c56f98SSadaf Ebrahimi  *
4*62c56f98SSadaf Ebrahimi  *  Copyright The Mbed TLS Contributors
5*62c56f98SSadaf Ebrahimi  *  SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
6*62c56f98SSadaf Ebrahimi  */
7*62c56f98SSadaf Ebrahimi 
8*62c56f98SSadaf Ebrahimi #if defined(__aarch64__) && !defined(__ARM_FEATURE_CRYPTO) && \
9*62c56f98SSadaf Ebrahimi     defined(__clang__) && __clang_major__ >= 4
10*62c56f98SSadaf Ebrahimi /* TODO: Re-consider above after https://reviews.llvm.org/D131064 merged.
11*62c56f98SSadaf Ebrahimi  *
12*62c56f98SSadaf Ebrahimi  * The intrinsic declaration are guarded by predefined ACLE macros in clang:
13*62c56f98SSadaf Ebrahimi  * these are normally only enabled by the -march option on the command line.
14*62c56f98SSadaf Ebrahimi  * By defining the macros ourselves we gain access to those declarations without
15*62c56f98SSadaf Ebrahimi  * requiring -march on the command line.
16*62c56f98SSadaf Ebrahimi  *
17*62c56f98SSadaf Ebrahimi  * `arm_neon.h` could be included by any header file, so we put these defines
18*62c56f98SSadaf Ebrahimi  * at the top of this file, before any includes.
19*62c56f98SSadaf Ebrahimi  */
20*62c56f98SSadaf Ebrahimi #define __ARM_FEATURE_CRYPTO 1
21*62c56f98SSadaf Ebrahimi /* See: https://arm-software.github.io/acle/main/acle.html#cryptographic-extensions
22*62c56f98SSadaf Ebrahimi  *
23*62c56f98SSadaf Ebrahimi  * `__ARM_FEATURE_CRYPTO` is deprecated, but we need to continue to specify it
24*62c56f98SSadaf Ebrahimi  * for older compilers.
25*62c56f98SSadaf Ebrahimi  */
26*62c56f98SSadaf Ebrahimi #define __ARM_FEATURE_AES    1
27*62c56f98SSadaf Ebrahimi #define MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG
28*62c56f98SSadaf Ebrahimi #endif
29*62c56f98SSadaf Ebrahimi 
30*62c56f98SSadaf Ebrahimi #include <string.h>
31*62c56f98SSadaf Ebrahimi #include "common.h"
32*62c56f98SSadaf Ebrahimi 
33*62c56f98SSadaf Ebrahimi #if defined(MBEDTLS_AESCE_C)
34*62c56f98SSadaf Ebrahimi 
35*62c56f98SSadaf Ebrahimi #include "aesce.h"
36*62c56f98SSadaf Ebrahimi 
37*62c56f98SSadaf Ebrahimi #if defined(MBEDTLS_ARCH_IS_ARM64)
38*62c56f98SSadaf Ebrahimi 
39*62c56f98SSadaf Ebrahimi /* Compiler version checks. */
40*62c56f98SSadaf Ebrahimi #if defined(__clang__)
41*62c56f98SSadaf Ebrahimi #   if __clang_major__ < 4
42*62c56f98SSadaf Ebrahimi #       error "Minimum version of Clang for MBEDTLS_AESCE_C is 4.0."
43*62c56f98SSadaf Ebrahimi #   endif
44*62c56f98SSadaf Ebrahimi #elif defined(__GNUC__)
45*62c56f98SSadaf Ebrahimi #   if __GNUC__ < 6
46*62c56f98SSadaf Ebrahimi #       error "Minimum version of GCC for MBEDTLS_AESCE_C is 6.0."
47*62c56f98SSadaf Ebrahimi #   endif
48*62c56f98SSadaf Ebrahimi #elif defined(_MSC_VER)
49*62c56f98SSadaf Ebrahimi /* TODO: We haven't verified MSVC from 1920 to 1928. If someone verified that,
50*62c56f98SSadaf Ebrahimi  *       please update this and document of `MBEDTLS_AESCE_C` in
51*62c56f98SSadaf Ebrahimi  *       `mbedtls_config.h`. */
52*62c56f98SSadaf Ebrahimi #   if _MSC_VER < 1929
53*62c56f98SSadaf Ebrahimi #       error "Minimum version of MSVC for MBEDTLS_AESCE_C is 2019 version 16.11.2."
54*62c56f98SSadaf Ebrahimi #   endif
55*62c56f98SSadaf Ebrahimi #endif
56*62c56f98SSadaf Ebrahimi 
57*62c56f98SSadaf Ebrahimi #ifdef __ARM_NEON
58*62c56f98SSadaf Ebrahimi #include <arm_neon.h>
59*62c56f98SSadaf Ebrahimi #else
60*62c56f98SSadaf Ebrahimi #error "Target does not support NEON instructions"
61*62c56f98SSadaf Ebrahimi #endif
62*62c56f98SSadaf Ebrahimi 
63*62c56f98SSadaf Ebrahimi #if !(defined(__ARM_FEATURE_CRYPTO) || defined(__ARM_FEATURE_AES)) || \
64*62c56f98SSadaf Ebrahimi     defined(MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG)
65*62c56f98SSadaf Ebrahimi #   if defined(__ARMCOMPILER_VERSION)
66*62c56f98SSadaf Ebrahimi #       if __ARMCOMPILER_VERSION <= 6090000
67*62c56f98SSadaf Ebrahimi #           error "Must use minimum -march=armv8-a+crypto for MBEDTLS_AESCE_C"
68*62c56f98SSadaf Ebrahimi #       else
69*62c56f98SSadaf Ebrahimi #           pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
70*62c56f98SSadaf Ebrahimi #           define MBEDTLS_POP_TARGET_PRAGMA
71*62c56f98SSadaf Ebrahimi #       endif
72*62c56f98SSadaf Ebrahimi #   elif defined(__clang__)
73*62c56f98SSadaf Ebrahimi #       pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
74*62c56f98SSadaf Ebrahimi #       define MBEDTLS_POP_TARGET_PRAGMA
75*62c56f98SSadaf Ebrahimi #   elif defined(__GNUC__)
76*62c56f98SSadaf Ebrahimi #       pragma GCC push_options
77*62c56f98SSadaf Ebrahimi #       pragma GCC target ("+crypto")
78*62c56f98SSadaf Ebrahimi #       define MBEDTLS_POP_TARGET_PRAGMA
79*62c56f98SSadaf Ebrahimi #   elif defined(_MSC_VER)
80*62c56f98SSadaf Ebrahimi #       error "Required feature(__ARM_FEATURE_AES) is not enabled."
81*62c56f98SSadaf Ebrahimi #   endif
82*62c56f98SSadaf Ebrahimi #endif /* !(__ARM_FEATURE_CRYPTO || __ARM_FEATURE_AES) ||
83*62c56f98SSadaf Ebrahimi           MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG */
84*62c56f98SSadaf Ebrahimi 
85*62c56f98SSadaf Ebrahimi #if defined(__linux__) && !defined(MBEDTLS_AES_USE_HARDWARE_ONLY)
86*62c56f98SSadaf Ebrahimi 
87*62c56f98SSadaf Ebrahimi #include <asm/hwcap.h>
88*62c56f98SSadaf Ebrahimi #include <sys/auxv.h>
89*62c56f98SSadaf Ebrahimi 
90*62c56f98SSadaf Ebrahimi signed char mbedtls_aesce_has_support_result = -1;
91*62c56f98SSadaf Ebrahimi 
92*62c56f98SSadaf Ebrahimi #if !defined(MBEDTLS_AES_USE_HARDWARE_ONLY)
93*62c56f98SSadaf Ebrahimi /*
94*62c56f98SSadaf Ebrahimi  * AES instruction support detection routine
95*62c56f98SSadaf Ebrahimi  */
mbedtls_aesce_has_support_impl(void)96*62c56f98SSadaf Ebrahimi int mbedtls_aesce_has_support_impl(void)
97*62c56f98SSadaf Ebrahimi {
98*62c56f98SSadaf Ebrahimi     /* To avoid many calls to getauxval, cache the result. This is
99*62c56f98SSadaf Ebrahimi      * thread-safe, because we store the result in a char so cannot
100*62c56f98SSadaf Ebrahimi      * be vulnerable to non-atomic updates.
101*62c56f98SSadaf Ebrahimi      * It is possible that we could end up setting result more than
102*62c56f98SSadaf Ebrahimi      * once, but that is harmless.
103*62c56f98SSadaf Ebrahimi      */
104*62c56f98SSadaf Ebrahimi     if (mbedtls_aesce_has_support_result == -1) {
105*62c56f98SSadaf Ebrahimi         unsigned long auxval = getauxval(AT_HWCAP);
106*62c56f98SSadaf Ebrahimi         if ((auxval & (HWCAP_ASIMD | HWCAP_AES)) ==
107*62c56f98SSadaf Ebrahimi             (HWCAP_ASIMD | HWCAP_AES)) {
108*62c56f98SSadaf Ebrahimi             mbedtls_aesce_has_support_result = 1;
109*62c56f98SSadaf Ebrahimi         } else {
110*62c56f98SSadaf Ebrahimi             mbedtls_aesce_has_support_result = 0;
111*62c56f98SSadaf Ebrahimi         }
112*62c56f98SSadaf Ebrahimi     }
113*62c56f98SSadaf Ebrahimi     return mbedtls_aesce_has_support_result;
114*62c56f98SSadaf Ebrahimi }
115*62c56f98SSadaf Ebrahimi #endif
116*62c56f98SSadaf Ebrahimi 
117*62c56f98SSadaf Ebrahimi #endif /* defined(__linux__) && !defined(MBEDTLS_AES_USE_HARDWARE_ONLY) */
118*62c56f98SSadaf Ebrahimi 
119*62c56f98SSadaf Ebrahimi /* Single round of AESCE encryption */
120*62c56f98SSadaf Ebrahimi #define AESCE_ENCRYPT_ROUND                   \
121*62c56f98SSadaf Ebrahimi     block = vaeseq_u8(block, vld1q_u8(keys)); \
122*62c56f98SSadaf Ebrahimi     block = vaesmcq_u8(block);                \
123*62c56f98SSadaf Ebrahimi     keys += 16
124*62c56f98SSadaf Ebrahimi /* Two rounds of AESCE encryption */
125*62c56f98SSadaf Ebrahimi #define AESCE_ENCRYPT_ROUND_X2        AESCE_ENCRYPT_ROUND; AESCE_ENCRYPT_ROUND
126*62c56f98SSadaf Ebrahimi 
127*62c56f98SSadaf Ebrahimi MBEDTLS_OPTIMIZE_FOR_PERFORMANCE
aesce_encrypt_block(uint8x16_t block,unsigned char * keys,int rounds)128*62c56f98SSadaf Ebrahimi static uint8x16_t aesce_encrypt_block(uint8x16_t block,
129*62c56f98SSadaf Ebrahimi                                       unsigned char *keys,
130*62c56f98SSadaf Ebrahimi                                       int rounds)
131*62c56f98SSadaf Ebrahimi {
132*62c56f98SSadaf Ebrahimi     /* 10, 12 or 14 rounds. Unroll loop. */
133*62c56f98SSadaf Ebrahimi     if (rounds == 10) {
134*62c56f98SSadaf Ebrahimi         goto rounds_10;
135*62c56f98SSadaf Ebrahimi     }
136*62c56f98SSadaf Ebrahimi     if (rounds == 12) {
137*62c56f98SSadaf Ebrahimi         goto rounds_12;
138*62c56f98SSadaf Ebrahimi     }
139*62c56f98SSadaf Ebrahimi     AESCE_ENCRYPT_ROUND_X2;
140*62c56f98SSadaf Ebrahimi rounds_12:
141*62c56f98SSadaf Ebrahimi     AESCE_ENCRYPT_ROUND_X2;
142*62c56f98SSadaf Ebrahimi rounds_10:
143*62c56f98SSadaf Ebrahimi     AESCE_ENCRYPT_ROUND_X2;
144*62c56f98SSadaf Ebrahimi     AESCE_ENCRYPT_ROUND_X2;
145*62c56f98SSadaf Ebrahimi     AESCE_ENCRYPT_ROUND_X2;
146*62c56f98SSadaf Ebrahimi     AESCE_ENCRYPT_ROUND_X2;
147*62c56f98SSadaf Ebrahimi     AESCE_ENCRYPT_ROUND;
148*62c56f98SSadaf Ebrahimi 
149*62c56f98SSadaf Ebrahimi     /* AES AddRoundKey for the previous round.
150*62c56f98SSadaf Ebrahimi      * SubBytes, ShiftRows for the final round.  */
151*62c56f98SSadaf Ebrahimi     block = vaeseq_u8(block, vld1q_u8(keys));
152*62c56f98SSadaf Ebrahimi     keys += 16;
153*62c56f98SSadaf Ebrahimi 
154*62c56f98SSadaf Ebrahimi     /* Final round: no MixColumns */
155*62c56f98SSadaf Ebrahimi 
156*62c56f98SSadaf Ebrahimi     /* Final AddRoundKey */
157*62c56f98SSadaf Ebrahimi     block = veorq_u8(block, vld1q_u8(keys));
158*62c56f98SSadaf Ebrahimi 
159*62c56f98SSadaf Ebrahimi     return block;
160*62c56f98SSadaf Ebrahimi }
161*62c56f98SSadaf Ebrahimi 
162*62c56f98SSadaf Ebrahimi /* Single round of AESCE decryption
163*62c56f98SSadaf Ebrahimi  *
164*62c56f98SSadaf Ebrahimi  * AES AddRoundKey, SubBytes, ShiftRows
165*62c56f98SSadaf Ebrahimi  *
166*62c56f98SSadaf Ebrahimi  *      block = vaesdq_u8(block, vld1q_u8(keys));
167*62c56f98SSadaf Ebrahimi  *
168*62c56f98SSadaf Ebrahimi  * AES inverse MixColumns for the next round.
169*62c56f98SSadaf Ebrahimi  *
170*62c56f98SSadaf Ebrahimi  * This means that we switch the order of the inverse AddRoundKey and
171*62c56f98SSadaf Ebrahimi  * inverse MixColumns operations. We have to do this as AddRoundKey is
172*62c56f98SSadaf Ebrahimi  * done in an atomic instruction together with the inverses of SubBytes
173*62c56f98SSadaf Ebrahimi  * and ShiftRows.
174*62c56f98SSadaf Ebrahimi  *
175*62c56f98SSadaf Ebrahimi  * It works because MixColumns is a linear operation over GF(2^8) and
176*62c56f98SSadaf Ebrahimi  * AddRoundKey is an exclusive or, which is equivalent to addition over
177*62c56f98SSadaf Ebrahimi  * GF(2^8). (The inverse of MixColumns needs to be applied to the
178*62c56f98SSadaf Ebrahimi  * affected round keys separately which has been done when the
179*62c56f98SSadaf Ebrahimi  * decryption round keys were calculated.)
180*62c56f98SSadaf Ebrahimi  *
181*62c56f98SSadaf Ebrahimi  *      block = vaesimcq_u8(block);
182*62c56f98SSadaf Ebrahimi  */
183*62c56f98SSadaf Ebrahimi #define AESCE_DECRYPT_ROUND                   \
184*62c56f98SSadaf Ebrahimi     block = vaesdq_u8(block, vld1q_u8(keys)); \
185*62c56f98SSadaf Ebrahimi     block = vaesimcq_u8(block);               \
186*62c56f98SSadaf Ebrahimi     keys += 16
187*62c56f98SSadaf Ebrahimi /* Two rounds of AESCE decryption */
188*62c56f98SSadaf Ebrahimi #define AESCE_DECRYPT_ROUND_X2        AESCE_DECRYPT_ROUND; AESCE_DECRYPT_ROUND
189*62c56f98SSadaf Ebrahimi 
aesce_decrypt_block(uint8x16_t block,unsigned char * keys,int rounds)190*62c56f98SSadaf Ebrahimi static uint8x16_t aesce_decrypt_block(uint8x16_t block,
191*62c56f98SSadaf Ebrahimi                                       unsigned char *keys,
192*62c56f98SSadaf Ebrahimi                                       int rounds)
193*62c56f98SSadaf Ebrahimi {
194*62c56f98SSadaf Ebrahimi     /* 10, 12 or 14 rounds. Unroll loop. */
195*62c56f98SSadaf Ebrahimi     if (rounds == 10) {
196*62c56f98SSadaf Ebrahimi         goto rounds_10;
197*62c56f98SSadaf Ebrahimi     }
198*62c56f98SSadaf Ebrahimi     if (rounds == 12) {
199*62c56f98SSadaf Ebrahimi         goto rounds_12;
200*62c56f98SSadaf Ebrahimi     }
201*62c56f98SSadaf Ebrahimi     AESCE_DECRYPT_ROUND_X2;
202*62c56f98SSadaf Ebrahimi rounds_12:
203*62c56f98SSadaf Ebrahimi     AESCE_DECRYPT_ROUND_X2;
204*62c56f98SSadaf Ebrahimi rounds_10:
205*62c56f98SSadaf Ebrahimi     AESCE_DECRYPT_ROUND_X2;
206*62c56f98SSadaf Ebrahimi     AESCE_DECRYPT_ROUND_X2;
207*62c56f98SSadaf Ebrahimi     AESCE_DECRYPT_ROUND_X2;
208*62c56f98SSadaf Ebrahimi     AESCE_DECRYPT_ROUND_X2;
209*62c56f98SSadaf Ebrahimi     AESCE_DECRYPT_ROUND;
210*62c56f98SSadaf Ebrahimi 
211*62c56f98SSadaf Ebrahimi     /* The inverses of AES AddRoundKey, SubBytes, ShiftRows finishing up the
212*62c56f98SSadaf Ebrahimi      * last full round. */
213*62c56f98SSadaf Ebrahimi     block = vaesdq_u8(block, vld1q_u8(keys));
214*62c56f98SSadaf Ebrahimi     keys += 16;
215*62c56f98SSadaf Ebrahimi 
216*62c56f98SSadaf Ebrahimi     /* Inverse AddRoundKey for inverting the initial round key addition. */
217*62c56f98SSadaf Ebrahimi     block = veorq_u8(block, vld1q_u8(keys));
218*62c56f98SSadaf Ebrahimi 
219*62c56f98SSadaf Ebrahimi     return block;
220*62c56f98SSadaf Ebrahimi }
221*62c56f98SSadaf Ebrahimi 
222*62c56f98SSadaf Ebrahimi /*
223*62c56f98SSadaf Ebrahimi  * AES-ECB block en(de)cryption
224*62c56f98SSadaf Ebrahimi  */
mbedtls_aesce_crypt_ecb(mbedtls_aes_context * ctx,int mode,const unsigned char input[16],unsigned char output[16])225*62c56f98SSadaf Ebrahimi int mbedtls_aesce_crypt_ecb(mbedtls_aes_context *ctx,
226*62c56f98SSadaf Ebrahimi                             int mode,
227*62c56f98SSadaf Ebrahimi                             const unsigned char input[16],
228*62c56f98SSadaf Ebrahimi                             unsigned char output[16])
229*62c56f98SSadaf Ebrahimi {
230*62c56f98SSadaf Ebrahimi     uint8x16_t block = vld1q_u8(&input[0]);
231*62c56f98SSadaf Ebrahimi     unsigned char *keys = (unsigned char *) (ctx->buf + ctx->rk_offset);
232*62c56f98SSadaf Ebrahimi 
233*62c56f98SSadaf Ebrahimi     if (mode == MBEDTLS_AES_ENCRYPT) {
234*62c56f98SSadaf Ebrahimi         block = aesce_encrypt_block(block, keys, ctx->nr);
235*62c56f98SSadaf Ebrahimi     } else {
236*62c56f98SSadaf Ebrahimi         block = aesce_decrypt_block(block, keys, ctx->nr);
237*62c56f98SSadaf Ebrahimi     }
238*62c56f98SSadaf Ebrahimi     vst1q_u8(&output[0], block);
239*62c56f98SSadaf Ebrahimi 
240*62c56f98SSadaf Ebrahimi     return 0;
241*62c56f98SSadaf Ebrahimi }
242*62c56f98SSadaf Ebrahimi 
243*62c56f98SSadaf Ebrahimi /*
244*62c56f98SSadaf Ebrahimi  * Compute decryption round keys from encryption round keys
245*62c56f98SSadaf Ebrahimi  */
mbedtls_aesce_inverse_key(unsigned char * invkey,const unsigned char * fwdkey,int nr)246*62c56f98SSadaf Ebrahimi void mbedtls_aesce_inverse_key(unsigned char *invkey,
247*62c56f98SSadaf Ebrahimi                                const unsigned char *fwdkey,
248*62c56f98SSadaf Ebrahimi                                int nr)
249*62c56f98SSadaf Ebrahimi {
250*62c56f98SSadaf Ebrahimi     int i, j;
251*62c56f98SSadaf Ebrahimi     j = nr;
252*62c56f98SSadaf Ebrahimi     vst1q_u8(invkey, vld1q_u8(fwdkey + j * 16));
253*62c56f98SSadaf Ebrahimi     for (i = 1, j--; j > 0; i++, j--) {
254*62c56f98SSadaf Ebrahimi         vst1q_u8(invkey + i * 16,
255*62c56f98SSadaf Ebrahimi                  vaesimcq_u8(vld1q_u8(fwdkey + j * 16)));
256*62c56f98SSadaf Ebrahimi     }
257*62c56f98SSadaf Ebrahimi     vst1q_u8(invkey + i * 16, vld1q_u8(fwdkey + j * 16));
258*62c56f98SSadaf Ebrahimi 
259*62c56f98SSadaf Ebrahimi }
260*62c56f98SSadaf Ebrahimi 
aes_rot_word(uint32_t word)261*62c56f98SSadaf Ebrahimi static inline uint32_t aes_rot_word(uint32_t word)
262*62c56f98SSadaf Ebrahimi {
263*62c56f98SSadaf Ebrahimi     return (word << (32 - 8)) | (word >> 8);
264*62c56f98SSadaf Ebrahimi }
265*62c56f98SSadaf Ebrahimi 
aes_sub_word(uint32_t in)266*62c56f98SSadaf Ebrahimi static inline uint32_t aes_sub_word(uint32_t in)
267*62c56f98SSadaf Ebrahimi {
268*62c56f98SSadaf Ebrahimi     uint8x16_t v = vreinterpretq_u8_u32(vdupq_n_u32(in));
269*62c56f98SSadaf Ebrahimi     uint8x16_t zero = vdupq_n_u8(0);
270*62c56f98SSadaf Ebrahimi 
271*62c56f98SSadaf Ebrahimi     /* vaeseq_u8 does both SubBytes and ShiftRows. Taking the first row yields
272*62c56f98SSadaf Ebrahimi      * the correct result as ShiftRows doesn't change the first row. */
273*62c56f98SSadaf Ebrahimi     v = vaeseq_u8(zero, v);
274*62c56f98SSadaf Ebrahimi     return vgetq_lane_u32(vreinterpretq_u32_u8(v), 0);
275*62c56f98SSadaf Ebrahimi }
276*62c56f98SSadaf Ebrahimi 
277*62c56f98SSadaf Ebrahimi /*
278*62c56f98SSadaf Ebrahimi  * Key expansion function
279*62c56f98SSadaf Ebrahimi  */
aesce_setkey_enc(unsigned char * rk,const unsigned char * key,const size_t key_bit_length)280*62c56f98SSadaf Ebrahimi static void aesce_setkey_enc(unsigned char *rk,
281*62c56f98SSadaf Ebrahimi                              const unsigned char *key,
282*62c56f98SSadaf Ebrahimi                              const size_t key_bit_length)
283*62c56f98SSadaf Ebrahimi {
284*62c56f98SSadaf Ebrahimi     static uint8_t const rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10,
285*62c56f98SSadaf Ebrahimi                                     0x20, 0x40, 0x80, 0x1b, 0x36 };
286*62c56f98SSadaf Ebrahimi     /* See https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf
287*62c56f98SSadaf Ebrahimi      *   - Section 5, Nr = Nk + 6
288*62c56f98SSadaf Ebrahimi      *   - Section 5.2, the length of round keys is Nb*(Nr+1)
289*62c56f98SSadaf Ebrahimi      */
290*62c56f98SSadaf Ebrahimi     const uint32_t key_len_in_words = key_bit_length / 32;  /* Nk */
291*62c56f98SSadaf Ebrahimi     const size_t round_key_len_in_words = 4;                /* Nb */
292*62c56f98SSadaf Ebrahimi     const size_t rounds_needed = key_len_in_words + 6;      /* Nr */
293*62c56f98SSadaf Ebrahimi     const size_t round_keys_len_in_words =
294*62c56f98SSadaf Ebrahimi         round_key_len_in_words * (rounds_needed + 1);       /* Nb*(Nr+1) */
295*62c56f98SSadaf Ebrahimi     const uint32_t *rko_end = (uint32_t *) rk + round_keys_len_in_words;
296*62c56f98SSadaf Ebrahimi 
297*62c56f98SSadaf Ebrahimi     memcpy(rk, key, key_len_in_words * 4);
298*62c56f98SSadaf Ebrahimi 
299*62c56f98SSadaf Ebrahimi     for (uint32_t *rki = (uint32_t *) rk;
300*62c56f98SSadaf Ebrahimi          rki + key_len_in_words < rko_end;
301*62c56f98SSadaf Ebrahimi          rki += key_len_in_words) {
302*62c56f98SSadaf Ebrahimi 
303*62c56f98SSadaf Ebrahimi         size_t iteration = (rki - (uint32_t *) rk) / key_len_in_words;
304*62c56f98SSadaf Ebrahimi         uint32_t *rko;
305*62c56f98SSadaf Ebrahimi         rko = rki + key_len_in_words;
306*62c56f98SSadaf Ebrahimi         rko[0] = aes_rot_word(aes_sub_word(rki[key_len_in_words - 1]));
307*62c56f98SSadaf Ebrahimi         rko[0] ^= rcon[iteration] ^ rki[0];
308*62c56f98SSadaf Ebrahimi         rko[1] = rko[0] ^ rki[1];
309*62c56f98SSadaf Ebrahimi         rko[2] = rko[1] ^ rki[2];
310*62c56f98SSadaf Ebrahimi         rko[3] = rko[2] ^ rki[3];
311*62c56f98SSadaf Ebrahimi         if (rko + key_len_in_words > rko_end) {
312*62c56f98SSadaf Ebrahimi             /* Do not write overflow words.*/
313*62c56f98SSadaf Ebrahimi             continue;
314*62c56f98SSadaf Ebrahimi         }
315*62c56f98SSadaf Ebrahimi #if !defined(MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH)
316*62c56f98SSadaf Ebrahimi         switch (key_bit_length) {
317*62c56f98SSadaf Ebrahimi             case 128:
318*62c56f98SSadaf Ebrahimi                 break;
319*62c56f98SSadaf Ebrahimi             case 192:
320*62c56f98SSadaf Ebrahimi                 rko[4] = rko[3] ^ rki[4];
321*62c56f98SSadaf Ebrahimi                 rko[5] = rko[4] ^ rki[5];
322*62c56f98SSadaf Ebrahimi                 break;
323*62c56f98SSadaf Ebrahimi             case 256:
324*62c56f98SSadaf Ebrahimi                 rko[4] = aes_sub_word(rko[3]) ^ rki[4];
325*62c56f98SSadaf Ebrahimi                 rko[5] = rko[4] ^ rki[5];
326*62c56f98SSadaf Ebrahimi                 rko[6] = rko[5] ^ rki[6];
327*62c56f98SSadaf Ebrahimi                 rko[7] = rko[6] ^ rki[7];
328*62c56f98SSadaf Ebrahimi                 break;
329*62c56f98SSadaf Ebrahimi         }
330*62c56f98SSadaf Ebrahimi #endif /* !MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH */
331*62c56f98SSadaf Ebrahimi     }
332*62c56f98SSadaf Ebrahimi }
333*62c56f98SSadaf Ebrahimi 
334*62c56f98SSadaf Ebrahimi /*
335*62c56f98SSadaf Ebrahimi  * Key expansion, wrapper
336*62c56f98SSadaf Ebrahimi  */
mbedtls_aesce_setkey_enc(unsigned char * rk,const unsigned char * key,size_t bits)337*62c56f98SSadaf Ebrahimi int mbedtls_aesce_setkey_enc(unsigned char *rk,
338*62c56f98SSadaf Ebrahimi                              const unsigned char *key,
339*62c56f98SSadaf Ebrahimi                              size_t bits)
340*62c56f98SSadaf Ebrahimi {
341*62c56f98SSadaf Ebrahimi     switch (bits) {
342*62c56f98SSadaf Ebrahimi         case 128:
343*62c56f98SSadaf Ebrahimi         case 192:
344*62c56f98SSadaf Ebrahimi         case 256:
345*62c56f98SSadaf Ebrahimi             aesce_setkey_enc(rk, key, bits);
346*62c56f98SSadaf Ebrahimi             break;
347*62c56f98SSadaf Ebrahimi         default:
348*62c56f98SSadaf Ebrahimi             return MBEDTLS_ERR_AES_INVALID_KEY_LENGTH;
349*62c56f98SSadaf Ebrahimi     }
350*62c56f98SSadaf Ebrahimi 
351*62c56f98SSadaf Ebrahimi     return 0;
352*62c56f98SSadaf Ebrahimi }
353*62c56f98SSadaf Ebrahimi 
354*62c56f98SSadaf Ebrahimi #if defined(MBEDTLS_GCM_C)
355*62c56f98SSadaf Ebrahimi 
356*62c56f98SSadaf Ebrahimi #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 5
357*62c56f98SSadaf Ebrahimi /* Some intrinsics are not available for GCC 5.X. */
358*62c56f98SSadaf Ebrahimi #define vreinterpretq_p64_u8(a) ((poly64x2_t) a)
359*62c56f98SSadaf Ebrahimi #define vreinterpretq_u8_p128(a) ((uint8x16_t) a)
vget_low_p64(poly64x2_t __a)360*62c56f98SSadaf Ebrahimi static inline poly64_t vget_low_p64(poly64x2_t __a)
361*62c56f98SSadaf Ebrahimi {
362*62c56f98SSadaf Ebrahimi     uint64x2_t tmp = (uint64x2_t) (__a);
363*62c56f98SSadaf Ebrahimi     uint64x1_t lo = vcreate_u64(vgetq_lane_u64(tmp, 0));
364*62c56f98SSadaf Ebrahimi     return (poly64_t) (lo);
365*62c56f98SSadaf Ebrahimi }
366*62c56f98SSadaf Ebrahimi #endif /* !__clang__ && __GNUC__ && __GNUC__ == 5*/
367*62c56f98SSadaf Ebrahimi 
368*62c56f98SSadaf Ebrahimi /* vmull_p64/vmull_high_p64 wrappers.
369*62c56f98SSadaf Ebrahimi  *
370*62c56f98SSadaf Ebrahimi  * Older compilers miss some intrinsic functions for `poly*_t`. We use
371*62c56f98SSadaf Ebrahimi  * uint8x16_t and uint8x16x3_t as input/output parameters.
372*62c56f98SSadaf Ebrahimi  */
373*62c56f98SSadaf Ebrahimi #if defined(__GNUC__) && !defined(__clang__)
374*62c56f98SSadaf Ebrahimi /* GCC reports incompatible type error without cast. GCC think poly64_t and
375*62c56f98SSadaf Ebrahimi  * poly64x1_t are different, that is different with MSVC and Clang. */
376*62c56f98SSadaf Ebrahimi #define MBEDTLS_VMULL_P64(a, b) vmull_p64((poly64_t) a, (poly64_t) b)
377*62c56f98SSadaf Ebrahimi #else
378*62c56f98SSadaf Ebrahimi /* MSVC reports `error C2440: 'type cast'` with cast. Clang does not report
379*62c56f98SSadaf Ebrahimi  * error with/without cast. And I think poly64_t and poly64x1_t are same, no
380*62c56f98SSadaf Ebrahimi  * cast for clang also. */
381*62c56f98SSadaf Ebrahimi #define MBEDTLS_VMULL_P64(a, b) vmull_p64(a, b)
382*62c56f98SSadaf Ebrahimi #endif
pmull_low(uint8x16_t a,uint8x16_t b)383*62c56f98SSadaf Ebrahimi static inline uint8x16_t pmull_low(uint8x16_t a, uint8x16_t b)
384*62c56f98SSadaf Ebrahimi {
385*62c56f98SSadaf Ebrahimi 
386*62c56f98SSadaf Ebrahimi     return vreinterpretq_u8_p128(
387*62c56f98SSadaf Ebrahimi         MBEDTLS_VMULL_P64(
388*62c56f98SSadaf Ebrahimi             vget_low_p64(vreinterpretq_p64_u8(a)),
389*62c56f98SSadaf Ebrahimi             vget_low_p64(vreinterpretq_p64_u8(b))
390*62c56f98SSadaf Ebrahimi             ));
391*62c56f98SSadaf Ebrahimi }
392*62c56f98SSadaf Ebrahimi 
pmull_high(uint8x16_t a,uint8x16_t b)393*62c56f98SSadaf Ebrahimi static inline uint8x16_t pmull_high(uint8x16_t a, uint8x16_t b)
394*62c56f98SSadaf Ebrahimi {
395*62c56f98SSadaf Ebrahimi     return vreinterpretq_u8_p128(
396*62c56f98SSadaf Ebrahimi         vmull_high_p64(vreinterpretq_p64_u8(a),
397*62c56f98SSadaf Ebrahimi                        vreinterpretq_p64_u8(b)));
398*62c56f98SSadaf Ebrahimi }
399*62c56f98SSadaf Ebrahimi 
400*62c56f98SSadaf Ebrahimi /* GHASH does 128b polynomial multiplication on block in GF(2^128) defined by
401*62c56f98SSadaf Ebrahimi  * `x^128 + x^7 + x^2 + x + 1`.
402*62c56f98SSadaf Ebrahimi  *
403*62c56f98SSadaf Ebrahimi  * Arm64 only has 64b->128b polynomial multipliers, we need to do 4 64b
404*62c56f98SSadaf Ebrahimi  * multiplies to generate a 128b.
405*62c56f98SSadaf Ebrahimi  *
406*62c56f98SSadaf Ebrahimi  * `poly_mult_128` executes polynomial multiplication and outputs 256b that
407*62c56f98SSadaf Ebrahimi  * represented by 3 128b due to code size optimization.
408*62c56f98SSadaf Ebrahimi  *
409*62c56f98SSadaf Ebrahimi  * Output layout:
410*62c56f98SSadaf Ebrahimi  * |            |             |             |
411*62c56f98SSadaf Ebrahimi  * |------------|-------------|-------------|
412*62c56f98SSadaf Ebrahimi  * | ret.val[0] | h3:h2:00:00 | high   128b |
413*62c56f98SSadaf Ebrahimi  * | ret.val[1] |   :m2:m1:00 | middle 128b |
414*62c56f98SSadaf Ebrahimi  * | ret.val[2] |   :  :l1:l0 | low    128b |
415*62c56f98SSadaf Ebrahimi  */
poly_mult_128(uint8x16_t a,uint8x16_t b)416*62c56f98SSadaf Ebrahimi static inline uint8x16x3_t poly_mult_128(uint8x16_t a, uint8x16_t b)
417*62c56f98SSadaf Ebrahimi {
418*62c56f98SSadaf Ebrahimi     uint8x16x3_t ret;
419*62c56f98SSadaf Ebrahimi     uint8x16_t h, m, l; /* retval high/middle/low */
420*62c56f98SSadaf Ebrahimi     uint8x16_t c, d, e;
421*62c56f98SSadaf Ebrahimi 
422*62c56f98SSadaf Ebrahimi     h = pmull_high(a, b);                       /* h3:h2:00:00 = a1*b1 */
423*62c56f98SSadaf Ebrahimi     l = pmull_low(a, b);                        /*   :  :l1:l0 = a0*b0 */
424*62c56f98SSadaf Ebrahimi     c = vextq_u8(b, b, 8);                      /*      :c1:c0 = b0:b1 */
425*62c56f98SSadaf Ebrahimi     d = pmull_high(a, c);                       /*   :d2:d1:00 = a1*b0 */
426*62c56f98SSadaf Ebrahimi     e = pmull_low(a, c);                        /*   :e2:e1:00 = a0*b1 */
427*62c56f98SSadaf Ebrahimi     m = veorq_u8(d, e);                         /*   :m2:m1:00 = d + e */
428*62c56f98SSadaf Ebrahimi 
429*62c56f98SSadaf Ebrahimi     ret.val[0] = h;
430*62c56f98SSadaf Ebrahimi     ret.val[1] = m;
431*62c56f98SSadaf Ebrahimi     ret.val[2] = l;
432*62c56f98SSadaf Ebrahimi     return ret;
433*62c56f98SSadaf Ebrahimi }
434*62c56f98SSadaf Ebrahimi 
435*62c56f98SSadaf Ebrahimi /*
436*62c56f98SSadaf Ebrahimi  * Modulo reduction.
437*62c56f98SSadaf Ebrahimi  *
438*62c56f98SSadaf Ebrahimi  * See: https://www.researchgate.net/publication/285612706_Implementing_GCM_on_ARMv8
439*62c56f98SSadaf Ebrahimi  *
440*62c56f98SSadaf Ebrahimi  * Section 4.3
441*62c56f98SSadaf Ebrahimi  *
442*62c56f98SSadaf Ebrahimi  * Modular reduction is slightly more complex. Write the GCM modulus as f(z) =
443*62c56f98SSadaf Ebrahimi  * z^128 +r(z), where r(z) = z^7+z^2+z+ 1. The well known approach is to
444*62c56f98SSadaf Ebrahimi  * consider that z^128 ≡r(z) (mod z^128 +r(z)), allowing us to write the 256-bit
445*62c56f98SSadaf Ebrahimi  * operand to be reduced as a(z) = h(z)z^128 +l(z)≡h(z)r(z) + l(z). That is, we
446*62c56f98SSadaf Ebrahimi  * simply multiply the higher part of the operand by r(z) and add it to l(z). If
447*62c56f98SSadaf Ebrahimi  * the result is still larger than 128 bits, we reduce again.
448*62c56f98SSadaf Ebrahimi  */
poly_mult_reduce(uint8x16x3_t input)449*62c56f98SSadaf Ebrahimi static inline uint8x16_t poly_mult_reduce(uint8x16x3_t input)
450*62c56f98SSadaf Ebrahimi {
451*62c56f98SSadaf Ebrahimi     uint8x16_t const ZERO = vdupq_n_u8(0);
452*62c56f98SSadaf Ebrahimi 
453*62c56f98SSadaf Ebrahimi     uint64x2_t r = vreinterpretq_u64_u8(vdupq_n_u8(0x87));
454*62c56f98SSadaf Ebrahimi #if defined(__GNUC__)
455*62c56f98SSadaf Ebrahimi     /* use 'asm' as an optimisation barrier to prevent loading MODULO from
456*62c56f98SSadaf Ebrahimi      * memory. It is for GNUC compatible compilers.
457*62c56f98SSadaf Ebrahimi      */
458*62c56f98SSadaf Ebrahimi     asm ("" : "+w" (r));
459*62c56f98SSadaf Ebrahimi #endif
460*62c56f98SSadaf Ebrahimi     uint8x16_t const MODULO = vreinterpretq_u8_u64(vshrq_n_u64(r, 64 - 8));
461*62c56f98SSadaf Ebrahimi     uint8x16_t h, m, l; /* input high/middle/low 128b */
462*62c56f98SSadaf Ebrahimi     uint8x16_t c, d, e, f, g, n, o;
463*62c56f98SSadaf Ebrahimi     h = input.val[0];            /* h3:h2:00:00                          */
464*62c56f98SSadaf Ebrahimi     m = input.val[1];            /*   :m2:m1:00                          */
465*62c56f98SSadaf Ebrahimi     l = input.val[2];            /*   :  :l1:l0                          */
466*62c56f98SSadaf Ebrahimi     c = pmull_high(h, MODULO);   /*   :c2:c1:00 = reduction of h3        */
467*62c56f98SSadaf Ebrahimi     d = pmull_low(h, MODULO);    /*   :  :d1:d0 = reduction of h2        */
468*62c56f98SSadaf Ebrahimi     e = veorq_u8(c, m);          /*   :e2:e1:00 = m2:m1:00 + c2:c1:00    */
469*62c56f98SSadaf Ebrahimi     f = pmull_high(e, MODULO);   /*   :  :f1:f0 = reduction of e2        */
470*62c56f98SSadaf Ebrahimi     g = vextq_u8(ZERO, e, 8);    /*   :  :g1:00 = e1:00                  */
471*62c56f98SSadaf Ebrahimi     n = veorq_u8(d, l);          /*   :  :n1:n0 = d1:d0 + l1:l0          */
472*62c56f98SSadaf Ebrahimi     o = veorq_u8(n, f);          /*       o1:o0 = f1:f0 + n1:n0          */
473*62c56f98SSadaf Ebrahimi     return veorq_u8(o, g);       /*             = o1:o0 + g1:00          */
474*62c56f98SSadaf Ebrahimi }
475*62c56f98SSadaf Ebrahimi 
476*62c56f98SSadaf Ebrahimi /*
477*62c56f98SSadaf Ebrahimi  * GCM multiplication: c = a times b in GF(2^128)
478*62c56f98SSadaf Ebrahimi  */
mbedtls_aesce_gcm_mult(unsigned char c[16],const unsigned char a[16],const unsigned char b[16])479*62c56f98SSadaf Ebrahimi void mbedtls_aesce_gcm_mult(unsigned char c[16],
480*62c56f98SSadaf Ebrahimi                             const unsigned char a[16],
481*62c56f98SSadaf Ebrahimi                             const unsigned char b[16])
482*62c56f98SSadaf Ebrahimi {
483*62c56f98SSadaf Ebrahimi     uint8x16_t va, vb, vc;
484*62c56f98SSadaf Ebrahimi     va = vrbitq_u8(vld1q_u8(&a[0]));
485*62c56f98SSadaf Ebrahimi     vb = vrbitq_u8(vld1q_u8(&b[0]));
486*62c56f98SSadaf Ebrahimi     vc = vrbitq_u8(poly_mult_reduce(poly_mult_128(va, vb)));
487*62c56f98SSadaf Ebrahimi     vst1q_u8(&c[0], vc);
488*62c56f98SSadaf Ebrahimi }
489*62c56f98SSadaf Ebrahimi 
490*62c56f98SSadaf Ebrahimi #endif /* MBEDTLS_GCM_C */
491*62c56f98SSadaf Ebrahimi 
492*62c56f98SSadaf Ebrahimi #if defined(MBEDTLS_POP_TARGET_PRAGMA)
493*62c56f98SSadaf Ebrahimi #if defined(__clang__)
494*62c56f98SSadaf Ebrahimi #pragma clang attribute pop
495*62c56f98SSadaf Ebrahimi #elif defined(__GNUC__)
496*62c56f98SSadaf Ebrahimi #pragma GCC pop_options
497*62c56f98SSadaf Ebrahimi #endif
498*62c56f98SSadaf Ebrahimi #undef MBEDTLS_POP_TARGET_PRAGMA
499*62c56f98SSadaf Ebrahimi #endif
500*62c56f98SSadaf Ebrahimi 
501*62c56f98SSadaf Ebrahimi #endif /* MBEDTLS_ARCH_IS_ARM64 */
502*62c56f98SSadaf Ebrahimi 
503*62c56f98SSadaf Ebrahimi #endif /* MBEDTLS_AESCE_C */
504