Jerry Yu | 4923131 | 2023-01-10 16:57:21 +0800 | [diff] [blame] | 1 | /* |
Dave Rodgman | f918d42 | 2023-03-17 17:52:23 +0000 | [diff] [blame] | 2 | * Armv8-A Cryptographic Extension support functions for Aarch64 |
Jerry Yu | 4923131 | 2023-01-10 16:57:21 +0800 | [diff] [blame] | 3 | * |
| 4 | * Copyright The Mbed TLS Contributors |
| 5 | * SPDX-License-Identifier: Apache-2.0 |
| 6 | * |
| 7 | * Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 8 | * not use this file except in compliance with the License. |
| 9 | * You may obtain a copy of the License at |
| 10 | * |
| 11 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | * |
| 13 | * Unless required by applicable law or agreed to in writing, software |
| 14 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 15 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | * See the License for the specific language governing permissions and |
| 17 | * limitations under the License. |
| 18 | */ |
| 19 | |
Jerry Yu | 48b999c | 2023-03-03 15:51:07 +0800 | [diff] [blame] | 20 | #if defined(__aarch64__) && !defined(__ARM_FEATURE_CRYPTO) && \ |
Jerry Yu | 6f86c19 | 2023-03-13 11:03:40 +0800 | [diff] [blame] | 21 | defined(__clang__) && __clang_major__ >= 4 |
Jerry Yu | 48b999c | 2023-03-03 15:51:07 +0800 | [diff] [blame] | 22 | /* TODO: Re-consider above after https://reviews.llvm.org/D131064 merged. |
| 23 | * |
| 24 | * The intrinsic declaration are guarded by predefined ACLE macros in clang: |
| 25 | * these are normally only enabled by the -march option on the command line. |
| 26 | * By defining the macros ourselves we gain access to those declarations without |
| 27 | * requiring -march on the command line. |
| 28 | * |
| 29 | * `arm_neon.h` could be included by any header file, so we put these defines |
| 30 | * at the top of this file, before any includes. |
| 31 | */ |
| 32 | #define __ARM_FEATURE_CRYPTO 1 |
Jerry Yu | ae129c3 | 2023-03-03 15:55:56 +0800 | [diff] [blame] | 33 | /* See: https://arm-software.github.io/acle/main/acle.html#cryptographic-extensions |
| 34 | * |
Jerry Yu | 490bf08 | 2023-03-06 15:21:44 +0800 | [diff] [blame] | 35 | * `__ARM_FEATURE_CRYPTO` is deprecated, but we need to continue to specify it |
| 36 | * for older compilers. |
Jerry Yu | ae129c3 | 2023-03-03 15:55:56 +0800 | [diff] [blame] | 37 | */ |
| 38 | #define __ARM_FEATURE_AES 1 |
Dave Rodgman | db6ab24 | 2023-03-14 16:03:57 +0000 | [diff] [blame] | 39 | #define MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG |
Jerry Yu | 490bf08 | 2023-03-06 15:21:44 +0800 | [diff] [blame] | 40 | #endif |
Jerry Yu | 48b999c | 2023-03-03 15:51:07 +0800 | [diff] [blame] | 41 | |
Jerry Yu | 4923131 | 2023-01-10 16:57:21 +0800 | [diff] [blame] | 42 | #include <string.h> |
| 43 | #include "common.h" |
| 44 | |
| 45 | #if defined(MBEDTLS_AESCE_C) |
| 46 | |
| 47 | #include "aesce.h" |
| 48 | |
| 49 | #if defined(MBEDTLS_HAVE_ARM64) |
| 50 | |
Jerry Yu | 61c4cfa | 2023-04-26 11:06:51 +0800 | [diff] [blame] | 51 | /* Compiler version checks. */ |
Jerry Yu | db368de | 2023-04-26 16:55:37 +0800 | [diff] [blame] | 52 | #if defined(__clang__) |
| 53 | # if __clang_major__ < 4 |
| 54 | # error "Minimum version of Clang for MBEDTLS_AESCE_C is 4.0." |
| 55 | # endif |
| 56 | #elif defined(__GNUC__) |
| 57 | # if __GNUC__ < 6 |
| 58 | # error "Minimum version of GCC for MBEDTLS_AESCE_C is 6.0." |
| 59 | # endif |
| 60 | #elif defined(_MSC_VER) |
Jerry Yu | 61c4cfa | 2023-04-26 11:06:51 +0800 | [diff] [blame] | 61 | /* TODO: We haven't verified MSVC from 1920 to 1928. If someone verified that, |
| 62 | * please update this and document of `MBEDTLS_AESCE_C` in |
| 63 | * `mbedtls_config.h`. */ |
Jerry Yu | db368de | 2023-04-26 16:55:37 +0800 | [diff] [blame] | 64 | # if _MSC_VER < 1929 |
| 65 | # error "Minimum version of MSVC for MBEDTLS_AESCE_C is 2019 version 16.11.2." |
| 66 | # endif |
Jerry Yu | 61c4cfa | 2023-04-26 11:06:51 +0800 | [diff] [blame] | 67 | #endif |
| 68 | |
Dave Rodgman | db6ab24 | 2023-03-14 16:03:57 +0000 | [diff] [blame] | 69 | #if !defined(__ARM_FEATURE_AES) || defined(MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG) |
Jerry Yu | ec9be84 | 2023-03-14 10:42:47 +0800 | [diff] [blame] | 70 | # if defined(__clang__) |
Jerry Yu | ec9be84 | 2023-03-14 10:42:47 +0800 | [diff] [blame] | 71 | # pragma clang attribute push (__attribute__((target("crypto"))), apply_to=function) |
| 72 | # define MBEDTLS_POP_TARGET_PRAGMA |
| 73 | # elif defined(__GNUC__) |
Jerry Yu | ec9be84 | 2023-03-14 10:42:47 +0800 | [diff] [blame] | 74 | # pragma GCC push_options |
| 75 | # pragma GCC target ("arch=armv8-a+crypto") |
| 76 | # define MBEDTLS_POP_TARGET_PRAGMA |
Jerry Yu | 07d28d8 | 2023-03-20 18:12:36 +0800 | [diff] [blame] | 77 | # elif defined(_MSC_VER) |
Jerry Yu | 61c4cfa | 2023-04-26 11:06:51 +0800 | [diff] [blame] | 78 | # error "Required feature(__ARM_FEATURE_AES) is not enabled." |
Jerry Yu | 4923131 | 2023-01-10 16:57:21 +0800 | [diff] [blame] | 79 | # endif |
Dave Rodgman | db6ab24 | 2023-03-14 16:03:57 +0000 | [diff] [blame] | 80 | #endif /* !__ARM_FEATURE_AES || MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG */ |
Jerry Yu | 4923131 | 2023-01-10 16:57:21 +0800 | [diff] [blame] | 81 | |
Jerry Yu | 4923131 | 2023-01-10 16:57:21 +0800 | [diff] [blame] | 82 | #include <arm_neon.h> |
| 83 | |
Jerry Yu | b95c776 | 2023-01-10 16:59:51 +0800 | [diff] [blame] | 84 | #if defined(__linux__) |
| 85 | #include <asm/hwcap.h> |
| 86 | #include <sys/auxv.h> |
| 87 | #endif |
| 88 | |
| 89 | /* |
| 90 | * AES instruction support detection routine |
| 91 | */ |
| 92 | int mbedtls_aesce_has_support(void) |
| 93 | { |
| 94 | #if defined(__linux__) |
| 95 | unsigned long auxval = getauxval(AT_HWCAP); |
| 96 | return (auxval & (HWCAP_ASIMD | HWCAP_AES)) == |
| 97 | (HWCAP_ASIMD | HWCAP_AES); |
| 98 | #else |
Jerry Yu | ba1e78f | 2023-02-24 11:18:16 +0800 | [diff] [blame] | 99 | /* Assume AES instructions are supported. */ |
Jerry Yu | b95c776 | 2023-01-10 16:59:51 +0800 | [diff] [blame] | 100 | return 1; |
| 101 | #endif |
| 102 | } |
| 103 | |
Jerry Yu | 2bb3d81 | 2023-01-10 17:38:26 +0800 | [diff] [blame] | 104 | static uint8x16_t aesce_encrypt_block(uint8x16_t block, |
| 105 | unsigned char *keys, |
| 106 | int rounds) |
| 107 | { |
Dave Rodgman | 96fdfb8 | 2023-06-15 16:21:31 +0100 | [diff] [blame] | 108 | /* Assume either 10, 12 or 14 rounds */ |
| 109 | if (rounds == 10) { |
| 110 | goto rounds_10; |
Jerry Yu | 2bb3d81 | 2023-01-10 17:38:26 +0800 | [diff] [blame] | 111 | } |
Dave Rodgman | 96fdfb8 | 2023-06-15 16:21:31 +0100 | [diff] [blame] | 112 | if (rounds == 12) { |
| 113 | goto rounds_12; |
| 114 | } |
| 115 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 116 | block = vaesmcq_u8(block); |
| 117 | keys += 16; |
| 118 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 119 | block = vaesmcq_u8(block); |
| 120 | keys += 16; |
| 121 | rounds_12: |
| 122 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 123 | block = vaesmcq_u8(block); |
| 124 | keys += 16; |
| 125 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 126 | block = vaesmcq_u8(block); |
| 127 | keys += 16; |
| 128 | rounds_10: |
| 129 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 130 | block = vaesmcq_u8(block); |
| 131 | keys += 16; |
| 132 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 133 | block = vaesmcq_u8(block); |
| 134 | keys += 16; |
| 135 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 136 | block = vaesmcq_u8(block); |
| 137 | keys += 16; |
| 138 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 139 | block = vaesmcq_u8(block); |
| 140 | keys += 16; |
| 141 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 142 | block = vaesmcq_u8(block); |
| 143 | keys += 16; |
| 144 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 145 | block = vaesmcq_u8(block); |
| 146 | keys += 16; |
| 147 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 148 | block = vaesmcq_u8(block); |
| 149 | keys += 16; |
| 150 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 151 | block = vaesmcq_u8(block); |
| 152 | keys += 16; |
| 153 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 154 | block = vaesmcq_u8(block); |
| 155 | keys += 16; |
Jerry Yu | 2bb3d81 | 2023-01-10 17:38:26 +0800 | [diff] [blame] | 156 | |
Jerry Yu | c8bcdc8 | 2023-02-21 14:49:02 +0800 | [diff] [blame] | 157 | /* AES AddRoundKey for the previous round. |
| 158 | * SubBytes, ShiftRows for the final round. */ |
Dave Rodgman | 96fdfb8 | 2023-06-15 16:21:31 +0100 | [diff] [blame] | 159 | block = vaeseq_u8(block, vld1q_u8(keys)); |
| 160 | keys += 16; |
Jerry Yu | 2bb3d81 | 2023-01-10 17:38:26 +0800 | [diff] [blame] | 161 | |
Jerry Yu | c8bcdc8 | 2023-02-21 14:49:02 +0800 | [diff] [blame] | 162 | /* Final round: no MixColumns */ |
Jerry Yu | 3304c20 | 2023-02-22 14:37:11 +0800 | [diff] [blame] | 163 | |
| 164 | /* Final AddRoundKey */ |
Dave Rodgman | 96fdfb8 | 2023-06-15 16:21:31 +0100 | [diff] [blame] | 165 | block = veorq_u8(block, vld1q_u8(keys)); |
Jerry Yu | 2bb3d81 | 2023-01-10 17:38:26 +0800 | [diff] [blame] | 166 | |
| 167 | return block; |
| 168 | } |
| 169 | |
| 170 | static uint8x16_t aesce_decrypt_block(uint8x16_t block, |
| 171 | unsigned char *keys, |
| 172 | int rounds) |
| 173 | { |
Dave Rodgman | 1c4451d | 2023-06-15 16:28:00 +0100 | [diff] [blame^] | 174 | /* Assume either 10, 12 or 14 rounds */ |
| 175 | if (rounds == 10) { |
| 176 | goto rounds_10; |
Jerry Yu | 2bb3d81 | 2023-01-10 17:38:26 +0800 | [diff] [blame] | 177 | } |
Dave Rodgman | 1c4451d | 2023-06-15 16:28:00 +0100 | [diff] [blame^] | 178 | if (rounds == 12) { |
| 179 | goto rounds_12; |
| 180 | } |
| 181 | |
| 182 | /* AES AddRoundKey, SubBytes, ShiftRows */ |
| 183 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 184 | /* AES inverse MixColumns for the next round. |
| 185 | * |
| 186 | * This means that we switch the order of the inverse AddRoundKey and |
| 187 | * inverse MixColumns operations. We have to do this as AddRoundKey is |
| 188 | * done in an atomic instruction together with the inverses of SubBytes |
| 189 | * and ShiftRows. |
| 190 | * |
| 191 | * It works because MixColumns is a linear operation over GF(2^8) and |
| 192 | * AddRoundKey is an exclusive or, which is equivalent to addition over |
| 193 | * GF(2^8). (The inverse of MixColumns needs to be applied to the |
| 194 | * affected round keys separately which has been done when the |
| 195 | * decryption round keys were calculated.) */ |
| 196 | block = vaesimcq_u8(block); |
| 197 | keys += 16; |
| 198 | |
| 199 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 200 | block = vaesimcq_u8(block); |
| 201 | keys += 16; |
| 202 | rounds_12: |
| 203 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 204 | block = vaesimcq_u8(block); |
| 205 | keys += 16; |
| 206 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 207 | block = vaesimcq_u8(block); |
| 208 | keys += 16; |
| 209 | rounds_10: |
| 210 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 211 | block = vaesimcq_u8(block); |
| 212 | keys += 16; |
| 213 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 214 | block = vaesimcq_u8(block); |
| 215 | keys += 16; |
| 216 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 217 | block = vaesimcq_u8(block); |
| 218 | keys += 16; |
| 219 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 220 | block = vaesimcq_u8(block); |
| 221 | keys += 16; |
| 222 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 223 | block = vaesimcq_u8(block); |
| 224 | keys += 16; |
| 225 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 226 | block = vaesimcq_u8(block); |
| 227 | keys += 16; |
| 228 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 229 | block = vaesimcq_u8(block); |
| 230 | keys += 16; |
| 231 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 232 | block = vaesimcq_u8(block); |
| 233 | keys += 16; |
| 234 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 235 | block = vaesimcq_u8(block); |
| 236 | keys += 16; |
Jerry Yu | 2bb3d81 | 2023-01-10 17:38:26 +0800 | [diff] [blame] | 237 | |
Jerry Yu | c8bcdc8 | 2023-02-21 14:49:02 +0800 | [diff] [blame] | 238 | /* The inverses of AES AddRoundKey, SubBytes, ShiftRows finishing up the |
| 239 | * last full round. */ |
Dave Rodgman | 1c4451d | 2023-06-15 16:28:00 +0100 | [diff] [blame^] | 240 | block = vaesdq_u8(block, vld1q_u8(keys)); |
| 241 | keys += 16; |
Jerry Yu | 2bb3d81 | 2023-01-10 17:38:26 +0800 | [diff] [blame] | 242 | |
Jerry Yu | c8bcdc8 | 2023-02-21 14:49:02 +0800 | [diff] [blame] | 243 | /* Inverse AddRoundKey for inverting the initial round key addition. */ |
Dave Rodgman | 1c4451d | 2023-06-15 16:28:00 +0100 | [diff] [blame^] | 244 | block = veorq_u8(block, vld1q_u8(keys)); |
Jerry Yu | 2bb3d81 | 2023-01-10 17:38:26 +0800 | [diff] [blame] | 245 | |
| 246 | return block; |
| 247 | } |
| 248 | |
| 249 | /* |
| 250 | * AES-ECB block en(de)cryption |
| 251 | */ |
| 252 | int mbedtls_aesce_crypt_ecb(mbedtls_aes_context *ctx, |
| 253 | int mode, |
| 254 | const unsigned char input[16], |
| 255 | unsigned char output[16]) |
| 256 | { |
| 257 | uint8x16_t block = vld1q_u8(&input[0]); |
| 258 | unsigned char *keys = (unsigned char *) (ctx->buf + ctx->rk_offset); |
| 259 | |
| 260 | if (mode == MBEDTLS_AES_ENCRYPT) { |
| 261 | block = aesce_encrypt_block(block, keys, ctx->nr); |
| 262 | } else { |
| 263 | block = aesce_decrypt_block(block, keys, ctx->nr); |
| 264 | } |
| 265 | vst1q_u8(&output[0], block); |
| 266 | |
| 267 | return 0; |
| 268 | } |
| 269 | |
Jerry Yu | e096da1 | 2023-01-10 17:07:01 +0800 | [diff] [blame] | 270 | /* |
| 271 | * Compute decryption round keys from encryption round keys |
| 272 | */ |
| 273 | void mbedtls_aesce_inverse_key(unsigned char *invkey, |
| 274 | const unsigned char *fwdkey, |
| 275 | int nr) |
| 276 | { |
| 277 | int i, j; |
| 278 | j = nr; |
| 279 | vst1q_u8(invkey, vld1q_u8(fwdkey + j * 16)); |
| 280 | for (i = 1, j--; j > 0; i++, j--) { |
| 281 | vst1q_u8(invkey + i * 16, |
| 282 | vaesimcq_u8(vld1q_u8(fwdkey + j * 16))); |
| 283 | } |
| 284 | vst1q_u8(invkey + i * 16, vld1q_u8(fwdkey + j * 16)); |
| 285 | |
| 286 | } |
| 287 | |
Jerry Yu | c8bcdc8 | 2023-02-21 14:49:02 +0800 | [diff] [blame] | 288 | static inline uint32_t aes_rot_word(uint32_t word) |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 289 | { |
| 290 | return (word << (32 - 8)) | (word >> 8); |
| 291 | } |
| 292 | |
Jerry Yu | c8bcdc8 | 2023-02-21 14:49:02 +0800 | [diff] [blame] | 293 | static inline uint32_t aes_sub_word(uint32_t in) |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 294 | { |
Jerry Yu | c8bcdc8 | 2023-02-21 14:49:02 +0800 | [diff] [blame] | 295 | uint8x16_t v = vreinterpretq_u8_u32(vdupq_n_u32(in)); |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 296 | uint8x16_t zero = vdupq_n_u8(0); |
Jerry Yu | c8bcdc8 | 2023-02-21 14:49:02 +0800 | [diff] [blame] | 297 | |
| 298 | /* vaeseq_u8 does both SubBytes and ShiftRows. Taking the first row yields |
| 299 | * the correct result as ShiftRows doesn't change the first row. */ |
| 300 | v = vaeseq_u8(zero, v); |
| 301 | return vgetq_lane_u32(vreinterpretq_u32_u8(v), 0); |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 302 | } |
| 303 | |
| 304 | /* |
Jerry Yu | baae401 | 2023-02-21 15:26:13 +0800 | [diff] [blame] | 305 | * Key expansion function |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 306 | */ |
Jerry Yu | baae401 | 2023-02-21 15:26:13 +0800 | [diff] [blame] | 307 | static void aesce_setkey_enc(unsigned char *rk, |
| 308 | const unsigned char *key, |
| 309 | const size_t key_bit_length) |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 310 | { |
Jerry Yu | baae401 | 2023-02-21 15:26:13 +0800 | [diff] [blame] | 311 | static uint8_t const rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10, |
| 312 | 0x20, 0x40, 0x80, 0x1b, 0x36 }; |
Jerry Yu | 947bf96 | 2023-02-23 11:07:57 +0800 | [diff] [blame] | 313 | /* See https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf |
| 314 | * - Section 5, Nr = Nk + 6 |
Jerry Yu | 2c26651 | 2023-03-01 11:18:20 +0800 | [diff] [blame] | 315 | * - Section 5.2, the length of round keys is Nb*(Nr+1) |
Jerry Yu | 947bf96 | 2023-02-23 11:07:57 +0800 | [diff] [blame] | 316 | */ |
| 317 | const uint32_t key_len_in_words = key_bit_length / 32; /* Nk */ |
| 318 | const size_t round_key_len_in_words = 4; /* Nb */ |
Jerry Yu | 2c26651 | 2023-03-01 11:18:20 +0800 | [diff] [blame] | 319 | const size_t rounds_needed = key_len_in_words + 6; /* Nr */ |
| 320 | const size_t round_keys_len_in_words = |
| 321 | round_key_len_in_words * (rounds_needed + 1); /* Nb*(Nr+1) */ |
| 322 | const uint32_t *rko_end = (uint32_t *) rk + round_keys_len_in_words; |
Jerry Yu | c8bcdc8 | 2023-02-21 14:49:02 +0800 | [diff] [blame] | 323 | |
Jerry Yu | 3304c20 | 2023-02-22 14:37:11 +0800 | [diff] [blame] | 324 | memcpy(rk, key, key_len_in_words * 4); |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 325 | |
Jerry Yu | 3304c20 | 2023-02-22 14:37:11 +0800 | [diff] [blame] | 326 | for (uint32_t *rki = (uint32_t *) rk; |
| 327 | rki + key_len_in_words < rko_end; |
| 328 | rki += key_len_in_words) { |
| 329 | |
Jerry Yu | fac5a54 | 2023-02-23 10:13:40 +0800 | [diff] [blame] | 330 | size_t iteration = (rki - (uint32_t *) rk) / key_len_in_words; |
Jerry Yu | 3304c20 | 2023-02-22 14:37:11 +0800 | [diff] [blame] | 331 | uint32_t *rko; |
Jerry Yu | baae401 | 2023-02-21 15:26:13 +0800 | [diff] [blame] | 332 | rko = rki + key_len_in_words; |
| 333 | rko[0] = aes_rot_word(aes_sub_word(rki[key_len_in_words - 1])); |
Jerry Yu | 3304c20 | 2023-02-22 14:37:11 +0800 | [diff] [blame] | 334 | rko[0] ^= rcon[iteration] ^ rki[0]; |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 335 | rko[1] = rko[0] ^ rki[1]; |
| 336 | rko[2] = rko[1] ^ rki[2]; |
| 337 | rko[3] = rko[2] ^ rki[3]; |
Jerry Yu | fac5a54 | 2023-02-23 10:13:40 +0800 | [diff] [blame] | 338 | if (rko + key_len_in_words > rko_end) { |
Jerry Yu | 3304c20 | 2023-02-22 14:37:11 +0800 | [diff] [blame] | 339 | /* Do not write overflow words.*/ |
| 340 | continue; |
| 341 | } |
Yanray Wang | e2bc158 | 2023-05-08 10:28:53 +0800 | [diff] [blame] | 342 | #if !defined(MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH) |
Jerry Yu | baae401 | 2023-02-21 15:26:13 +0800 | [diff] [blame] | 343 | switch (key_bit_length) { |
Jerry Yu | 3304c20 | 2023-02-22 14:37:11 +0800 | [diff] [blame] | 344 | case 128: |
| 345 | break; |
Jerry Yu | baae401 | 2023-02-21 15:26:13 +0800 | [diff] [blame] | 346 | case 192: |
Jerry Yu | 3304c20 | 2023-02-22 14:37:11 +0800 | [diff] [blame] | 347 | rko[4] = rko[3] ^ rki[4]; |
| 348 | rko[5] = rko[4] ^ rki[5]; |
Jerry Yu | baae401 | 2023-02-21 15:26:13 +0800 | [diff] [blame] | 349 | break; |
| 350 | case 256: |
Jerry Yu | 3304c20 | 2023-02-22 14:37:11 +0800 | [diff] [blame] | 351 | rko[4] = aes_sub_word(rko[3]) ^ rki[4]; |
| 352 | rko[5] = rko[4] ^ rki[5]; |
| 353 | rko[6] = rko[5] ^ rki[6]; |
| 354 | rko[7] = rko[6] ^ rki[7]; |
Jerry Yu | baae401 | 2023-02-21 15:26:13 +0800 | [diff] [blame] | 355 | break; |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 356 | } |
Yanray Wang | e2bc158 | 2023-05-08 10:28:53 +0800 | [diff] [blame] | 357 | #endif /* !MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH */ |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 358 | } |
| 359 | } |
| 360 | |
| 361 | /* |
| 362 | * Key expansion, wrapper |
| 363 | */ |
| 364 | int mbedtls_aesce_setkey_enc(unsigned char *rk, |
| 365 | const unsigned char *key, |
| 366 | size_t bits) |
| 367 | { |
| 368 | switch (bits) { |
Jerry Yu | baae401 | 2023-02-21 15:26:13 +0800 | [diff] [blame] | 369 | case 128: |
| 370 | case 192: |
| 371 | case 256: |
Jerry Yu | ba1e78f | 2023-02-24 11:18:16 +0800 | [diff] [blame] | 372 | aesce_setkey_enc(rk, key, bits); |
| 373 | break; |
| 374 | default: |
| 375 | return MBEDTLS_ERR_AES_INVALID_KEY_LENGTH; |
Jerry Yu | 3f2fb71 | 2023-01-10 17:05:42 +0800 | [diff] [blame] | 376 | } |
| 377 | |
| 378 | return 0; |
| 379 | } |
| 380 | |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 381 | #if defined(MBEDTLS_GCM_C) |
| 382 | |
Jerry Yu | 132d0cb | 2023-03-02 17:35:53 +0800 | [diff] [blame] | 383 | #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 5 |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 384 | /* Some intrinsics are not available for GCC 5.X. */ |
Jerry Yu | 132d0cb | 2023-03-02 17:35:53 +0800 | [diff] [blame] | 385 | #define vreinterpretq_p64_u8(a) ((poly64x2_t) a) |
| 386 | #define vreinterpretq_u8_p128(a) ((uint8x16_t) a) |
| 387 | static inline poly64_t vget_low_p64(poly64x2_t __a) |
| 388 | { |
| 389 | uint64x2_t tmp = (uint64x2_t) (__a); |
| 390 | uint64x1_t lo = vcreate_u64(vgetq_lane_u64(tmp, 0)); |
| 391 | return (poly64_t) (lo); |
| 392 | } |
| 393 | #endif /* !__clang__ && __GNUC__ && __GNUC__ == 5*/ |
| 394 | |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 395 | /* vmull_p64/vmull_high_p64 wrappers. |
| 396 | * |
| 397 | * Older compilers miss some intrinsic functions for `poly*_t`. We use |
| 398 | * uint8x16_t and uint8x16x3_t as input/output parameters. |
| 399 | */ |
Jerry Yu | 9db4b1f | 2023-03-21 16:56:43 +0800 | [diff] [blame] | 400 | #if defined(__GNUC__) && !defined(__clang__) |
| 401 | /* GCC reports incompatible type error without cast. GCC think poly64_t and |
| 402 | * poly64x1_t are different, that is different with MSVC and Clang. */ |
| 403 | #define MBEDTLS_VMULL_P64(a, b) vmull_p64((poly64_t) a, (poly64_t) b) |
| 404 | #else |
| 405 | /* MSVC reports `error C2440: 'type cast'` with cast. Clang does not report |
| 406 | * error with/without cast. And I think poly64_t and poly64x1_t are same, no |
| 407 | * cast for clang also. */ |
| 408 | #define MBEDTLS_VMULL_P64(a, b) vmull_p64(a, b) |
| 409 | #endif |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 410 | static inline uint8x16_t pmull_low(uint8x16_t a, uint8x16_t b) |
| 411 | { |
Jerry Yu | 9db4b1f | 2023-03-21 16:56:43 +0800 | [diff] [blame] | 412 | |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 413 | return vreinterpretq_u8_p128( |
Jerry Yu | 9db4b1f | 2023-03-21 16:56:43 +0800 | [diff] [blame] | 414 | MBEDTLS_VMULL_P64( |
| 415 | vget_low_p64(vreinterpretq_p64_u8(a)), |
| 416 | vget_low_p64(vreinterpretq_p64_u8(b)) |
| 417 | )); |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 418 | } |
| 419 | |
| 420 | static inline uint8x16_t pmull_high(uint8x16_t a, uint8x16_t b) |
| 421 | { |
| 422 | return vreinterpretq_u8_p128( |
| 423 | vmull_high_p64(vreinterpretq_p64_u8(a), |
| 424 | vreinterpretq_p64_u8(b))); |
| 425 | } |
| 426 | |
Jerry Yu | f0526a9 | 2023-03-14 15:00:29 +0800 | [diff] [blame] | 427 | /* GHASH does 128b polynomial multiplication on block in GF(2^128) defined by |
Jerry Yu | 49b4367 | 2023-03-13 10:09:34 +0800 | [diff] [blame] | 428 | * `x^128 + x^7 + x^2 + x + 1`. |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 429 | * |
| 430 | * Arm64 only has 64b->128b polynomial multipliers, we need to do 4 64b |
| 431 | * multiplies to generate a 128b. |
| 432 | * |
| 433 | * `poly_mult_128` executes polynomial multiplication and outputs 256b that |
| 434 | * represented by 3 128b due to code size optimization. |
| 435 | * |
| 436 | * Output layout: |
| 437 | * | | | | |
| 438 | * |------------|-------------|-------------| |
| 439 | * | ret.val[0] | h3:h2:00:00 | high 128b | |
Jerry Yu | 8f81060 | 2023-03-14 17:28:52 +0800 | [diff] [blame] | 440 | * | ret.val[1] | :m2:m1:00 | middle 128b | |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 441 | * | ret.val[2] | : :l1:l0 | low 128b | |
| 442 | */ |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 443 | static inline uint8x16x3_t poly_mult_128(uint8x16_t a, uint8x16_t b) |
| 444 | { |
| 445 | uint8x16x3_t ret; |
Jerry Yu | 8f81060 | 2023-03-14 17:28:52 +0800 | [diff] [blame] | 446 | uint8x16_t h, m, l; /* retval high/middle/low */ |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 447 | uint8x16_t c, d, e; |
| 448 | |
| 449 | h = pmull_high(a, b); /* h3:h2:00:00 = a1*b1 */ |
| 450 | l = pmull_low(a, b); /* : :l1:l0 = a0*b0 */ |
| 451 | c = vextq_u8(b, b, 8); /* :c1:c0 = b0:b1 */ |
| 452 | d = pmull_high(a, c); /* :d2:d1:00 = a1*b0 */ |
| 453 | e = pmull_low(a, c); /* :e2:e1:00 = a0*b1 */ |
| 454 | m = veorq_u8(d, e); /* :m2:m1:00 = d + e */ |
| 455 | |
| 456 | ret.val[0] = h; |
| 457 | ret.val[1] = m; |
| 458 | ret.val[2] = l; |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 459 | return ret; |
| 460 | } |
| 461 | |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 462 | /* |
| 463 | * Modulo reduction. |
| 464 | * |
| 465 | * See: https://www.researchgate.net/publication/285612706_Implementing_GCM_on_ARMv8 |
| 466 | * |
| 467 | * Section 4.3 |
| 468 | * |
| 469 | * Modular reduction is slightly more complex. Write the GCM modulus as f(z) = |
| 470 | * z^128 +r(z), where r(z) = z^7+z^2+z+ 1. The well known approach is to |
Jerry Yu | be4fdef | 2023-03-15 14:50:42 +0800 | [diff] [blame] | 471 | * consider that z^128 ≡r(z) (mod z^128 +r(z)), allowing us to write the 256-bit |
| 472 | * operand to be reduced as a(z) = h(z)z^128 +l(z)≡h(z)r(z) + l(z). That is, we |
| 473 | * simply multiply the higher part of the operand by r(z) and add it to l(z). If |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 474 | * the result is still larger than 128 bits, we reduce again. |
| 475 | */ |
| 476 | static inline uint8x16_t poly_mult_reduce(uint8x16x3_t input) |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 477 | { |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 478 | uint8x16_t const ZERO = vdupq_n_u8(0); |
Jerry Yu | 8b6df3f | 2023-03-21 16:59:13 +0800 | [diff] [blame] | 479 | |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 480 | uint64x2_t r = vreinterpretq_u64_u8(vdupq_n_u8(0x87)); |
Jerry Yu | 8b6df3f | 2023-03-21 16:59:13 +0800 | [diff] [blame] | 481 | #if defined(__GNUC__) |
| 482 | /* use 'asm' as an optimisation barrier to prevent loading MODULO from |
| 483 | * memory. It is for GNUC compatible compilers. |
| 484 | */ |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 485 | asm ("" : "+w" (r)); |
Jerry Yu | 8b6df3f | 2023-03-21 16:59:13 +0800 | [diff] [blame] | 486 | #endif |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 487 | uint8x16_t const MODULO = vreinterpretq_u8_u64(vshrq_n_u64(r, 64 - 8)); |
Jerry Yu | 8f81060 | 2023-03-14 17:28:52 +0800 | [diff] [blame] | 488 | uint8x16_t h, m, l; /* input high/middle/low 128b */ |
Jerry Yu | 1ac7f6b | 2023-03-07 15:44:59 +0800 | [diff] [blame] | 489 | uint8x16_t c, d, e, f, g, n, o; |
| 490 | h = input.val[0]; /* h3:h2:00:00 */ |
| 491 | m = input.val[1]; /* :m2:m1:00 */ |
| 492 | l = input.val[2]; /* : :l1:l0 */ |
| 493 | c = pmull_high(h, MODULO); /* :c2:c1:00 = reduction of h3 */ |
| 494 | d = pmull_low(h, MODULO); /* : :d1:d0 = reduction of h2 */ |
| 495 | e = veorq_u8(c, m); /* :e2:e1:00 = m2:m1:00 + c2:c1:00 */ |
| 496 | f = pmull_high(e, MODULO); /* : :f1:f0 = reduction of e2 */ |
| 497 | g = vextq_u8(ZERO, e, 8); /* : :g1:00 = e1:00 */ |
| 498 | n = veorq_u8(d, l); /* : :n1:n0 = d1:d0 + l1:l0 */ |
| 499 | o = veorq_u8(n, f); /* o1:o0 = f1:f0 + n1:n0 */ |
| 500 | return veorq_u8(o, g); /* = o1:o0 + g1:00 */ |
Jerry Yu | df87a12 | 2023-01-10 18:17:15 +0800 | [diff] [blame] | 501 | } |
| 502 | |
| 503 | /* |
| 504 | * GCM multiplication: c = a times b in GF(2^128) |
| 505 | */ |
| 506 | void mbedtls_aesce_gcm_mult(unsigned char c[16], |
| 507 | const unsigned char a[16], |
| 508 | const unsigned char b[16]) |
| 509 | { |
| 510 | uint8x16_t va, vb, vc; |
| 511 | va = vrbitq_u8(vld1q_u8(&a[0])); |
| 512 | vb = vrbitq_u8(vld1q_u8(&b[0])); |
| 513 | vc = vrbitq_u8(poly_mult_reduce(poly_mult_128(va, vb))); |
| 514 | vst1q_u8(&c[0], vc); |
| 515 | } |
| 516 | |
| 517 | #endif /* MBEDTLS_GCM_C */ |
Jerry Yu | 48b999c | 2023-03-03 15:51:07 +0800 | [diff] [blame] | 518 | |
| 519 | #if defined(MBEDTLS_POP_TARGET_PRAGMA) |
| 520 | #if defined(__clang__) |
| 521 | #pragma clang attribute pop |
| 522 | #elif defined(__GNUC__) |
| 523 | #pragma GCC pop_options |
| 524 | #endif |
| 525 | #undef MBEDTLS_POP_TARGET_PRAGMA |
| 526 | #endif |
| 527 | |
Jerry Yu | 4923131 | 2023-01-10 16:57:21 +0800 | [diff] [blame] | 528 | #endif /* MBEDTLS_HAVE_ARM64 */ |
| 529 | |
| 530 | #endif /* MBEDTLS_AESCE_C */ |