blob: 8b41aed19a99c70f7cf3c86d53f840e2f12151eb [file] [log] [blame]
gabor-mezei-armd1125342021-07-12 16:31:22 +02001/**
2 * Constant-time functions
3 *
4 * Copyright The Mbed TLS Contributors
5 * SPDX-License-Identifier: Apache-2.0
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); you may
8 * not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
Gilles Peskine449bd832023-01-11 14:50:10 +010020/*
Gabor Mezei642eeb22021-11-03 16:13:32 +010021 * The following functions are implemented without using comparison operators, as those
Gabor Mezeieab90bc2021-10-18 16:09:41 +020022 * might be translated to branches by some compilers on some platforms.
23 */
24
Dave Rodgman4f267702023-09-11 19:05:51 +010025#include <stdint.h>
Dave Rodgman40a41d02023-05-17 11:59:56 +010026#include <limits.h>
27
gabor-mezei-armd1125342021-07-12 16:31:22 +020028#include "common.h"
Gabor Mezei22c9a6f2021-10-20 12:09:35 +020029#include "constant_time_internal.h"
Gabor Mezei765862c2021-10-19 12:22:25 +020030#include "mbedtls/constant_time.h"
gabor-mezei-arm1349ffd2021-09-27 14:28:31 +020031#include "mbedtls/error.h"
gabor-mezei-arm5b3a32d2021-09-29 10:50:31 +020032#include "mbedtls/platform_util.h"
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +020033
gabor-mezei-armfdb71182021-09-27 16:11:12 +020034#include <string.h>
Andrzej Kurek1c7a9982023-05-30 09:21:20 -040035
36#if defined(MBEDTLS_USE_PSA_CRYPTO) && defined(MBEDTLS_SSL_SOME_SUITES_USE_MAC)
37#include "psa/crypto.h"
Andrzej Kurek00644842023-05-30 05:45:00 -040038/* Define a local translating function to save code size by not using too many
39 * arguments in each translating place. */
40static int local_err_translation(psa_status_t status)
41{
42 return psa_status_to_mbedtls(status, psa_to_ssl_errors,
Andrzej Kurek1e4a0302023-05-30 09:45:17 -040043 ARRAY_LENGTH(psa_to_ssl_errors),
Andrzej Kurek00644842023-05-30 05:45:00 -040044 psa_generic_status_to_mbedtls);
45}
46#define PSA_TO_MBEDTLS_ERR(status) local_err_translation(status)
Andrzej Kurek8a045ce2022-12-23 11:00:06 -050047#endif
gabor-mezei-arm3f90fd52021-09-27 12:55:33 +020048
Dave Rodgman58c80f42023-06-12 18:19:46 +010049#if !defined(MBEDTLS_CT_ASM)
50/*
Dave Rodgman1ab0b482023-06-12 18:22:18 +010051 * Define an object with the value zero, such that the compiler cannot prove that it
52 * has the value zero (because it is volatile, it "may be modified in ways unknown to
53 * the implementation").
54 */
Dave Rodgman58c80f42023-06-12 18:19:46 +010055volatile mbedtls_ct_uint_t mbedtls_ct_zero = 0;
56#endif
57
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000058/*
Dave Rodgman051225d2022-12-30 21:25:35 +000059 * Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to
60 * perform fast unaligned access to volatile data.
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000061 *
62 * This is needed because mbedtls_get_unaligned_uintXX etc don't support volatile
63 * memory accesses.
64 *
Dave Rodgman051225d2022-12-30 21:25:35 +000065 * Some of these definitions could be moved into alignment.h but for now they are
66 * only used here.
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000067 */
Dave Rodgman40a41d02023-05-17 11:59:56 +010068#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && \
Dave Rodgman9fbb0cf2023-06-28 18:52:02 +010069 ((defined(MBEDTLS_CT_ARM_ASM) && (UINTPTR_MAX == 0xfffffffful)) || \
70 defined(MBEDTLS_CT_AARCH64_ASM))
Dave Rodgman63e89b42023-06-21 11:55:17 +010071/* We check pointer sizes to avoid issues with them not matching register size requirements */
Dave Rodgman40a41d02023-05-17 11:59:56 +010072#define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS
73
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000074static inline uint32_t mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char *p)
75{
76 /* This is UB, even where it's safe:
77 * return *((volatile uint32_t*)p);
78 * so instead the same thing is expressed in assembly below.
79 */
80 uint32_t r;
Dave Rodgman40a41d02023-05-17 11:59:56 +010081#if defined(MBEDTLS_CT_ARM_ASM)
Antonio de Angelisf1adc2a2023-08-16 12:31:54 +010082 asm volatile ("ldr %0, [%1]" : "=r" (r) : "r" (p) :);
Dave Rodgman40a41d02023-05-17 11:59:56 +010083#elif defined(MBEDTLS_CT_AARCH64_ASM)
Dave Rodgman5b5dd012023-06-21 16:36:47 +010084 asm volatile ("ldr %w0, [%1]" : "=r" (r) : MBEDTLS_ASM_AARCH64_PTR_CONSTRAINT(p) :);
Dave Rodgman40a41d02023-05-17 11:59:56 +010085#else
Antonio de Angelis1ee4d122023-08-16 12:26:37 +010086#error "No assembly defined for mbedtls_get_unaligned_volatile_uint32"
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000087#endif
Dave Rodgman051225d2022-12-30 21:25:35 +000088 return r;
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000089}
Dave Rodgman40a41d02023-05-17 11:59:56 +010090#endif /* defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) &&
91 (defined(MBEDTLS_CT_ARM_ASM) || defined(MBEDTLS_CT_AARCH64_ASM)) */
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000092
Gilles Peskine449bd832023-01-11 14:50:10 +010093int mbedtls_ct_memcmp(const void *a,
94 const void *b,
95 size_t n)
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +020096{
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000097 size_t i = 0;
Dave Rodgman7658b632023-01-11 17:39:33 +000098 /*
99 * `A` and `B` are cast to volatile to ensure that the compiler
100 * generates code that always fully reads both buffers.
101 * Otherwise it could generate a test to exit early if `diff` has all
102 * bits set early in the loop.
103 */
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +0200104 volatile const unsigned char *A = (volatile const unsigned char *) a;
105 volatile const unsigned char *B = (volatile const unsigned char *) b;
Dave Rodgman7658b632023-01-11 17:39:33 +0000106 uint32_t diff = 0;
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +0200107
Dave Rodgman051225d2022-12-30 21:25:35 +0000108#if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS)
Dave Rodgman36dfc5a2022-12-22 15:04:43 +0000109 for (; (i + 4) <= n; i += 4) {
110 uint32_t x = mbedtls_get_unaligned_volatile_uint32(A + i);
111 uint32_t y = mbedtls_get_unaligned_volatile_uint32(B + i);
112 diff |= x ^ y;
113 }
114#endif
115
116 for (; i < n; i++) {
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +0200117 /* Read volatile data in order before computing diff.
118 * This avoids IAR compiler warning:
119 * 'the order of volatile accesses is undefined ..' */
120 unsigned char x = A[i], y = B[i];
121 diff |= x ^ y;
122 }
123
Dave Rodgman98926d52023-09-12 09:29:33 +0100124
Dave Rodgman50b0a352023-09-12 09:30:44 +0100125#if (INT_MAX < INT32_MAX)
Dave Rodgman98926d52023-09-12 09:29:33 +0100126 /* We don't support int smaller than 32-bits, but if someone tried to build
127 * with this configuration, there is a risk that, for differing data, the
128 * only bits set in diff are in the top 16-bits, and would be lost by a
129 * simple cast from uint32 to int.
130 * This would have significant security implications, so protect against it. */
131#error "mbedtls_ct_memcmp() requires minimum 32-bit ints"
Dave Rodgman4f267702023-09-11 19:05:51 +0100132#else
Dave Rodgmanbd589442023-09-12 12:38:53 +0100133 /* The bit-twiddling ensures that when we cast uint32_t to int, we are casting
134 * a value that is in the range 0..INT_MAX - a value larger than this would
135 * result in implementation defined behaviour.
136 *
137 * This ensures that the value returned by the function is non-zero iff
138 * diff is non-zero.
139 */
140 return (int) ((diff & 0xffff) | (diff >> 16));
Dave Rodgman4f267702023-09-11 19:05:51 +0100141#endif
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +0200142}
143
Dave Rodgman9c140072023-09-18 18:20:27 +0100144#if defined(MBEDTLS_NIST_KW_C)
145
146int mbedtls_ct_memcmp_partial(const void *a,
147 const void *b,
148 size_t n,
149 size_t skip_head,
150 size_t skip_tail)
151{
152 unsigned int diff = 0;
153
154 volatile const unsigned char *A = (volatile const unsigned char *) a;
155 volatile const unsigned char *B = (volatile const unsigned char *) b;
156
157 size_t valid_end = n - skip_tail;
158
159 for (size_t i = 0; i < n; i++) {
160 unsigned char x = A[i], y = B[i];
Dave Rodgmanc2630fa2023-09-19 14:13:41 +0100161 unsigned int d = x ^ y;
Dave Rodgman9c140072023-09-18 18:20:27 +0100162 mbedtls_ct_condition_t valid = mbedtls_ct_bool_and(mbedtls_ct_uint_ge(i, skip_head),
163 mbedtls_ct_uint_lt(i, valid_end));
164 diff |= mbedtls_ct_uint_if_else_0(valid, d);
165 }
166
Dave Rodgmanc2630fa2023-09-19 14:13:41 +0100167 /* Since we go byte-by-byte, the only bits set will be in the bottom 8 bits, so the
168 * cast from uint to int is safe. */
169 return (int) diff;
Dave Rodgman9c140072023-09-18 18:20:27 +0100170}
171
172#endif
173
Gabor Mezeie2123792021-10-18 17:05:06 +0200174#if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
175
Dave Rodgman15c142b2023-05-17 12:20:11 +0100176void mbedtls_ct_memmove_left(void *start, size_t total, size_t offset)
gabor-mezei-arm394aeaa2021-09-27 13:31:06 +0200177{
Dave Rodgman1714a9b2023-07-31 12:37:01 +0100178 volatile unsigned char *buf = start;
Dave Rodgman15c142b2023-05-17 12:20:11 +0100179 for (size_t i = 0; i < total; i++) {
Dave Rodgmanb7825ce2023-08-10 11:58:18 +0100180 mbedtls_ct_condition_t no_op = mbedtls_ct_uint_gt(total - offset, i);
Dave Rodgman1714a9b2023-07-31 12:37:01 +0100181 /* The first `total - offset` passes are a no-op. The last
182 * `offset` passes shift the data one byte to the left and
183 * zero out the last byte. */
184 for (size_t n = 0; n < total - 1; n++) {
185 unsigned char current = buf[n];
186 unsigned char next = buf[n+1];
187 buf[n] = mbedtls_ct_uint_if(no_op, current, next);
188 }
Dave Rodgman98ddc012023-08-10 12:11:31 +0100189 buf[total-1] = mbedtls_ct_uint_if_else_0(no_op, buf[total-1]);
gabor-mezei-arm394aeaa2021-09-27 13:31:06 +0200190 }
191}
gabor-mezei-armdee0fd32021-09-27 13:34:25 +0200192
Gabor Mezeie2123792021-10-18 17:05:06 +0200193#endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */
194
Dave Rodgman7fe6e6f2023-05-17 12:34:56 +0100195void mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition,
196 unsigned char *dest,
197 const unsigned char *src1,
198 const unsigned char *src2,
199 size_t len)
200{
Dave Rodgman42391b42023-05-19 10:33:21 +0100201#if defined(MBEDTLS_CT_SIZE_64)
202 const uint64_t mask = (uint64_t) condition;
203 const uint64_t not_mask = (uint64_t) ~mbedtls_ct_compiler_opaque(condition);
204#else
Dave Rodgman7fe6e6f2023-05-17 12:34:56 +0100205 const uint32_t mask = (uint32_t) condition;
206 const uint32_t not_mask = (uint32_t) ~mbedtls_ct_compiler_opaque(condition);
Dave Rodgman42391b42023-05-19 10:33:21 +0100207#endif
Dave Rodgman7fe6e6f2023-05-17 12:34:56 +0100208
Dave Rodgman07f85372023-07-31 12:27:49 +0100209 /* If src2 is NULL, setup src2 so that we read from the destination address.
210 *
211 * This means that if src2 == NULL && condition is false, the result will be a
212 * no-op because we read from dest and write the same data back into dest.
213 */
Dave Rodgman7fe6e6f2023-05-17 12:34:56 +0100214 if (src2 == NULL) {
215 src2 = dest;
216 }
217
218 /* dest[i] = c1 == c2 ? src[i] : dest[i] */
219 size_t i = 0;
220#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
Dave Rodgman42391b42023-05-19 10:33:21 +0100221#if defined(MBEDTLS_CT_SIZE_64)
222 for (; (i + 8) <= len; i += 8) {
223 uint64_t a = mbedtls_get_unaligned_uint64(src1 + i) & mask;
224 uint64_t b = mbedtls_get_unaligned_uint64(src2 + i) & not_mask;
225 mbedtls_put_unaligned_uint64(dest + i, a | b);
226 }
227#else
Dave Rodgman7fe6e6f2023-05-17 12:34:56 +0100228 for (; (i + 4) <= len; i += 4) {
229 uint32_t a = mbedtls_get_unaligned_uint32(src1 + i) & mask;
230 uint32_t b = mbedtls_get_unaligned_uint32(src2 + i) & not_mask;
231 mbedtls_put_unaligned_uint32(dest + i, a | b);
232 }
Dave Rodgman42391b42023-05-19 10:33:21 +0100233#endif /* defined(MBEDTLS_CT_SIZE_64) */
Dave Rodgman7fe6e6f2023-05-17 12:34:56 +0100234#endif /* MBEDTLS_EFFICIENT_UNALIGNED_ACCESS */
235 for (; i < len; i++) {
236 dest[i] = (src1[i] & mask) | (src2[i] & not_mask);
237 }
238}
239
Gilles Peskine449bd832023-01-11 14:50:10 +0100240void mbedtls_ct_memcpy_offset(unsigned char *dest,
241 const unsigned char *src,
242 size_t offset,
243 size_t offset_min,
244 size_t offset_max,
245 size_t len)
gabor-mezei-arm0e7f71e2021-09-27 13:57:45 +0200246{
Gabor Mezei63bbba52021-10-18 16:17:57 +0200247 size_t offsetval;
gabor-mezei-arm0e7f71e2021-09-27 13:57:45 +0200248
Gilles Peskine449bd832023-01-11 14:50:10 +0100249 for (offsetval = offset_min; offsetval <= offset_max; offsetval++) {
Dave Rodgmanb7825ce2023-08-10 11:58:18 +0100250 mbedtls_ct_memcpy_if(mbedtls_ct_uint_eq(offsetval, offset), dest, src + offsetval, NULL,
Dave Rodgman585f7f72023-05-17 17:45:33 +0100251 len);
gabor-mezei-arm0e7f71e2021-09-27 13:57:45 +0200252 }
253}
gabor-mezei-arm1349ffd2021-09-27 14:28:31 +0200254
Dave Rodgmandebf8672023-05-17 12:12:44 +0100255#if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
256
257void mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition, void *buf, size_t len)
258{
259 uint32_t mask = (uint32_t) ~condition;
260 uint8_t *p = (uint8_t *) buf;
261 size_t i = 0;
262#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
263 for (; (i + 4) <= len; i += 4) {
264 mbedtls_put_unaligned_uint32((void *) (p + i),
265 mbedtls_get_unaligned_uint32((void *) (p + i)) & mask);
266 }
267#endif
268 for (; i < len; i++) {
269 p[i] = p[i] & mask;
270 }
271}
272
273#endif /* defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) */