blob: 90d6417bb60880831c7308fd31d0750a85ac175e [file] [log] [blame]
Jerry Yu49231312023-01-10 16:57:21 +08001/*
Jerry Yuc8bcdc82023-02-21 14:49:02 +08002 * Arm64 crypto extension support functions
Jerry Yu49231312023-01-10 16:57:21 +08003 *
4 * Copyright The Mbed TLS Contributors
5 * SPDX-License-Identifier: Apache-2.0
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); you may
8 * not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
Jerry Yu48b999c2023-03-03 15:51:07 +080020#if defined(__aarch64__) && !defined(__ARM_FEATURE_CRYPTO) && \
Jerry Yu6f86c192023-03-13 11:03:40 +080021 defined(__clang__) && __clang_major__ >= 4
Jerry Yu48b999c2023-03-03 15:51:07 +080022/* TODO: Re-consider above after https://reviews.llvm.org/D131064 merged.
23 *
24 * The intrinsic declaration are guarded by predefined ACLE macros in clang:
25 * these are normally only enabled by the -march option on the command line.
26 * By defining the macros ourselves we gain access to those declarations without
27 * requiring -march on the command line.
28 *
29 * `arm_neon.h` could be included by any header file, so we put these defines
30 * at the top of this file, before any includes.
31 */
32#define __ARM_FEATURE_CRYPTO 1
Jerry Yuae129c32023-03-03 15:55:56 +080033/* See: https://arm-software.github.io/acle/main/acle.html#cryptographic-extensions
34 *
Jerry Yu490bf082023-03-06 15:21:44 +080035 * `__ARM_FEATURE_CRYPTO` is deprecated, but we need to continue to specify it
36 * for older compilers.
Jerry Yuae129c32023-03-03 15:55:56 +080037 */
38#define __ARM_FEATURE_AES 1
Jerry Yu02487a22023-03-06 15:28:51 +080039#define MBEDTLS_NEED_TAGET_OPTIONS
Jerry Yu490bf082023-03-06 15:21:44 +080040#endif
Jerry Yu48b999c2023-03-03 15:51:07 +080041
Jerry Yu49231312023-01-10 16:57:21 +080042#include <string.h>
43#include "common.h"
44
45#if defined(MBEDTLS_AESCE_C)
46
47#include "aesce.h"
48
49#if defined(MBEDTLS_HAVE_ARM64)
50
51#if defined(__clang__)
52# if __clang_major__ < 4
Jerry Yub2783f62023-02-13 18:03:25 +080053# error "A more recent Clang is required for MBEDTLS_AESCE_C"
Jerry Yu49231312023-01-10 16:57:21 +080054# endif
Jerry Yu48b999c2023-03-03 15:51:07 +080055# pragma clang attribute push (__attribute__((target("crypto"))), apply_to=function)
56# define MBEDTLS_POP_TARGET_PRAGMA
Jerry Yu49231312023-01-10 16:57:21 +080057#elif defined(__GNUC__)
58# if __GNUC__ < 6
Jerry Yub2783f62023-02-13 18:03:25 +080059# error "A more recent GCC is required for MBEDTLS_AESCE_C"
Jerry Yu49231312023-01-10 16:57:21 +080060# endif
Jerry Yu48b999c2023-03-03 15:51:07 +080061# pragma GCC push_options
62# pragma GCC target ("arch=armv8-a+crypto")
63# define MBEDTLS_POP_TARGET_PRAGMA
Jerry Yu49231312023-01-10 16:57:21 +080064#else
Jerry Yub2783f62023-02-13 18:03:25 +080065# error "Only GCC and Clang supported for MBEDTLS_AESCE_C"
Jerry Yu49231312023-01-10 16:57:21 +080066#endif
67
Jerry Yu49231312023-01-10 16:57:21 +080068#include <arm_neon.h>
69
Jerry Yub95c7762023-01-10 16:59:51 +080070#if defined(__linux__)
71#include <asm/hwcap.h>
72#include <sys/auxv.h>
73#endif
74
75/*
76 * AES instruction support detection routine
77 */
78int mbedtls_aesce_has_support(void)
79{
80#if defined(__linux__)
81 unsigned long auxval = getauxval(AT_HWCAP);
82 return (auxval & (HWCAP_ASIMD | HWCAP_AES)) ==
83 (HWCAP_ASIMD | HWCAP_AES);
84#else
Jerry Yuba1e78f2023-02-24 11:18:16 +080085 /* Assume AES instructions are supported. */
Jerry Yub95c7762023-01-10 16:59:51 +080086 return 1;
87#endif
88}
89
Jerry Yu2bb3d812023-01-10 17:38:26 +080090static uint8x16_t aesce_encrypt_block(uint8x16_t block,
91 unsigned char *keys,
92 int rounds)
93{
94 for (int i = 0; i < rounds - 1; i++) {
Jerry Yuc8bcdc82023-02-21 14:49:02 +080095 /* AES AddRoundKey, SubBytes, ShiftRows (in this order).
96 * AddRoundKey adds the round key for the previous round. */
Jerry Yu2bb3d812023-01-10 17:38:26 +080097 block = vaeseq_u8(block, vld1q_u8(keys + i * 16));
98 /* AES mix columns */
99 block = vaesmcq_u8(block);
100 }
101
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800102 /* AES AddRoundKey for the previous round.
103 * SubBytes, ShiftRows for the final round. */
Jerry Yu2bb3d812023-01-10 17:38:26 +0800104 block = vaeseq_u8(block, vld1q_u8(keys + (rounds -1) * 16));
105
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800106 /* Final round: no MixColumns */
Jerry Yu3304c202023-02-22 14:37:11 +0800107
108 /* Final AddRoundKey */
Jerry Yu2bb3d812023-01-10 17:38:26 +0800109 block = veorq_u8(block, vld1q_u8(keys + rounds * 16));
110
111 return block;
112}
113
114static uint8x16_t aesce_decrypt_block(uint8x16_t block,
115 unsigned char *keys,
116 int rounds)
117{
118
119 for (int i = 0; i < rounds - 1; i++) {
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800120 /* AES AddRoundKey, SubBytes, ShiftRows */
Jerry Yu2bb3d812023-01-10 17:38:26 +0800121 block = vaesdq_u8(block, vld1q_u8(keys + i * 16));
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800122 /* AES inverse MixColumns for the next round.
123 *
124 * This means that we switch the order of the inverse AddRoundKey and
125 * inverse MixColumns operations. We have to do this as AddRoundKey is
126 * done in an atomic instruction together with the inverses of SubBytes
127 * and ShiftRows.
128 *
129 * It works because MixColumns is a linear operation over GF(2^8) and
130 * AddRoundKey is an exclusive or, which is equivalent to addition over
131 * GF(2^8). (The inverse of MixColumns needs to be applied to the
132 * affected round keys separately which has been done when the
133 * decryption round keys were calculated.) */
Jerry Yu2bb3d812023-01-10 17:38:26 +0800134 block = vaesimcq_u8(block);
135 }
136
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800137 /* The inverses of AES AddRoundKey, SubBytes, ShiftRows finishing up the
138 * last full round. */
Jerry Yu2bb3d812023-01-10 17:38:26 +0800139 block = vaesdq_u8(block, vld1q_u8(keys + (rounds - 1) * 16));
140
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800141 /* Inverse AddRoundKey for inverting the initial round key addition. */
Jerry Yu2bb3d812023-01-10 17:38:26 +0800142 block = veorq_u8(block, vld1q_u8(keys + rounds * 16));
143
144 return block;
145}
146
147/*
148 * AES-ECB block en(de)cryption
149 */
150int mbedtls_aesce_crypt_ecb(mbedtls_aes_context *ctx,
151 int mode,
152 const unsigned char input[16],
153 unsigned char output[16])
154{
155 uint8x16_t block = vld1q_u8(&input[0]);
156 unsigned char *keys = (unsigned char *) (ctx->buf + ctx->rk_offset);
157
158 if (mode == MBEDTLS_AES_ENCRYPT) {
159 block = aesce_encrypt_block(block, keys, ctx->nr);
160 } else {
161 block = aesce_decrypt_block(block, keys, ctx->nr);
162 }
163 vst1q_u8(&output[0], block);
164
165 return 0;
166}
167
Jerry Yue096da12023-01-10 17:07:01 +0800168/*
169 * Compute decryption round keys from encryption round keys
170 */
171void mbedtls_aesce_inverse_key(unsigned char *invkey,
172 const unsigned char *fwdkey,
173 int nr)
174{
175 int i, j;
176 j = nr;
177 vst1q_u8(invkey, vld1q_u8(fwdkey + j * 16));
178 for (i = 1, j--; j > 0; i++, j--) {
179 vst1q_u8(invkey + i * 16,
180 vaesimcq_u8(vld1q_u8(fwdkey + j * 16)));
181 }
182 vst1q_u8(invkey + i * 16, vld1q_u8(fwdkey + j * 16));
183
184}
185
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800186static inline uint32_t aes_rot_word(uint32_t word)
Jerry Yu3f2fb712023-01-10 17:05:42 +0800187{
188 return (word << (32 - 8)) | (word >> 8);
189}
190
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800191static inline uint32_t aes_sub_word(uint32_t in)
Jerry Yu3f2fb712023-01-10 17:05:42 +0800192{
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800193 uint8x16_t v = vreinterpretq_u8_u32(vdupq_n_u32(in));
Jerry Yu3f2fb712023-01-10 17:05:42 +0800194 uint8x16_t zero = vdupq_n_u8(0);
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800195
196 /* vaeseq_u8 does both SubBytes and ShiftRows. Taking the first row yields
197 * the correct result as ShiftRows doesn't change the first row. */
198 v = vaeseq_u8(zero, v);
199 return vgetq_lane_u32(vreinterpretq_u32_u8(v), 0);
Jerry Yu3f2fb712023-01-10 17:05:42 +0800200}
201
202/*
Jerry Yubaae4012023-02-21 15:26:13 +0800203 * Key expansion function
Jerry Yu3f2fb712023-01-10 17:05:42 +0800204 */
Jerry Yubaae4012023-02-21 15:26:13 +0800205static void aesce_setkey_enc(unsigned char *rk,
206 const unsigned char *key,
207 const size_t key_bit_length)
Jerry Yu3f2fb712023-01-10 17:05:42 +0800208{
Jerry Yubaae4012023-02-21 15:26:13 +0800209 static uint8_t const rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10,
210 0x20, 0x40, 0x80, 0x1b, 0x36 };
Jerry Yu947bf962023-02-23 11:07:57 +0800211 /* See https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf
212 * - Section 5, Nr = Nk + 6
213 * - Section 5.2, the key expansion size is Nb*(Nr+1)
214 */
215 const uint32_t key_len_in_words = key_bit_length / 32; /* Nk */
216 const size_t round_key_len_in_words = 4; /* Nb */
217 const size_t round_keys_needed = key_len_in_words + 6; /* Nr */
Jerry Yu3304c202023-02-22 14:37:11 +0800218 const size_t key_expansion_size_in_words =
Jerry Yu947bf962023-02-23 11:07:57 +0800219 round_key_len_in_words * (round_keys_needed + 1); /* Nb*(Nr+1) */
Jerry Yu3304c202023-02-22 14:37:11 +0800220 const uint32_t *rko_end = (uint32_t *) rk + key_expansion_size_in_words;
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800221
Jerry Yu3304c202023-02-22 14:37:11 +0800222 memcpy(rk, key, key_len_in_words * 4);
Jerry Yu3f2fb712023-01-10 17:05:42 +0800223
Jerry Yu3304c202023-02-22 14:37:11 +0800224 for (uint32_t *rki = (uint32_t *) rk;
225 rki + key_len_in_words < rko_end;
226 rki += key_len_in_words) {
227
Jerry Yufac5a542023-02-23 10:13:40 +0800228 size_t iteration = (rki - (uint32_t *) rk) / key_len_in_words;
Jerry Yu3304c202023-02-22 14:37:11 +0800229 uint32_t *rko;
Jerry Yubaae4012023-02-21 15:26:13 +0800230 rko = rki + key_len_in_words;
231 rko[0] = aes_rot_word(aes_sub_word(rki[key_len_in_words - 1]));
Jerry Yu3304c202023-02-22 14:37:11 +0800232 rko[0] ^= rcon[iteration] ^ rki[0];
Jerry Yu3f2fb712023-01-10 17:05:42 +0800233 rko[1] = rko[0] ^ rki[1];
234 rko[2] = rko[1] ^ rki[2];
235 rko[3] = rko[2] ^ rki[3];
Jerry Yufac5a542023-02-23 10:13:40 +0800236 if (rko + key_len_in_words > rko_end) {
Jerry Yu3304c202023-02-22 14:37:11 +0800237 /* Do not write overflow words.*/
238 continue;
239 }
Jerry Yubaae4012023-02-21 15:26:13 +0800240 switch (key_bit_length) {
Jerry Yu3304c202023-02-22 14:37:11 +0800241 case 128:
242 break;
Jerry Yubaae4012023-02-21 15:26:13 +0800243 case 192:
Jerry Yu3304c202023-02-22 14:37:11 +0800244 rko[4] = rko[3] ^ rki[4];
245 rko[5] = rko[4] ^ rki[5];
Jerry Yubaae4012023-02-21 15:26:13 +0800246 break;
247 case 256:
Jerry Yu3304c202023-02-22 14:37:11 +0800248 rko[4] = aes_sub_word(rko[3]) ^ rki[4];
249 rko[5] = rko[4] ^ rki[5];
250 rko[6] = rko[5] ^ rki[6];
251 rko[7] = rko[6] ^ rki[7];
Jerry Yubaae4012023-02-21 15:26:13 +0800252 break;
Jerry Yu3f2fb712023-01-10 17:05:42 +0800253 }
254 }
255}
256
257/*
258 * Key expansion, wrapper
259 */
260int mbedtls_aesce_setkey_enc(unsigned char *rk,
261 const unsigned char *key,
262 size_t bits)
263{
264 switch (bits) {
Jerry Yubaae4012023-02-21 15:26:13 +0800265 case 128:
266 case 192:
267 case 256:
Jerry Yuba1e78f2023-02-24 11:18:16 +0800268 aesce_setkey_enc(rk, key, bits);
269 break;
270 default:
271 return MBEDTLS_ERR_AES_INVALID_KEY_LENGTH;
Jerry Yu3f2fb712023-01-10 17:05:42 +0800272 }
273
274 return 0;
275}
276
Jerry Yu48b999c2023-03-03 15:51:07 +0800277
278#if defined(MBEDTLS_POP_TARGET_PRAGMA)
279#if defined(__clang__)
280#pragma clang attribute pop
281#elif defined(__GNUC__)
282#pragma GCC pop_options
283#endif
284#undef MBEDTLS_POP_TARGET_PRAGMA
285#endif
286
Jerry Yu49231312023-01-10 16:57:21 +0800287#endif /* MBEDTLS_HAVE_ARM64 */
288
289#endif /* MBEDTLS_AESCE_C */