blob: 6523ccf1536b674410b8d07d49b8263cca289ddb [file] [log] [blame]
gabor-mezei-armd1125342021-07-12 16:31:22 +02001/**
2 * Constant-time functions
3 *
4 * Copyright The Mbed TLS Contributors
5 * SPDX-License-Identifier: Apache-2.0
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); you may
8 * not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
Gilles Peskine449bd832023-01-11 14:50:10 +010020/*
Gabor Mezei642eeb22021-11-03 16:13:32 +010021 * The following functions are implemented without using comparison operators, as those
Gabor Mezeieab90bc2021-10-18 16:09:41 +020022 * might be translated to branches by some compilers on some platforms.
23 */
24
Dave Rodgman40a41d02023-05-17 11:59:56 +010025#include <limits.h>
26
gabor-mezei-armd1125342021-07-12 16:31:22 +020027#include "common.h"
Gabor Mezei22c9a6f2021-10-20 12:09:35 +020028#include "constant_time_internal.h"
Gabor Mezei765862c2021-10-19 12:22:25 +020029#include "mbedtls/constant_time.h"
gabor-mezei-arm1349ffd2021-09-27 14:28:31 +020030#include "mbedtls/error.h"
gabor-mezei-arm5b3a32d2021-09-29 10:50:31 +020031#include "mbedtls/platform_util.h"
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +020032
Dave Rodgmanfa5a4bb2023-07-28 16:13:52 +010033#include "../tests/include/test/constant_flow.h"
34
gabor-mezei-armfdb71182021-09-27 16:11:12 +020035#include <string.h>
Andrzej Kurek1c7a9982023-05-30 09:21:20 -040036
37#if defined(MBEDTLS_USE_PSA_CRYPTO) && defined(MBEDTLS_SSL_SOME_SUITES_USE_MAC)
38#include "psa/crypto.h"
Andrzej Kurek00644842023-05-30 05:45:00 -040039/* Define a local translating function to save code size by not using too many
40 * arguments in each translating place. */
41static int local_err_translation(psa_status_t status)
42{
43 return psa_status_to_mbedtls(status, psa_to_ssl_errors,
Andrzej Kurek1e4a0302023-05-30 09:45:17 -040044 ARRAY_LENGTH(psa_to_ssl_errors),
Andrzej Kurek00644842023-05-30 05:45:00 -040045 psa_generic_status_to_mbedtls);
46}
47#define PSA_TO_MBEDTLS_ERR(status) local_err_translation(status)
Andrzej Kurek8a045ce2022-12-23 11:00:06 -050048#endif
gabor-mezei-arm3f90fd52021-09-27 12:55:33 +020049
Dave Rodgman58c80f42023-06-12 18:19:46 +010050#if !defined(MBEDTLS_CT_ASM)
51/*
Dave Rodgman1ab0b482023-06-12 18:22:18 +010052 * Define an object with the value zero, such that the compiler cannot prove that it
53 * has the value zero (because it is volatile, it "may be modified in ways unknown to
54 * the implementation").
55 */
Dave Rodgman58c80f42023-06-12 18:19:46 +010056volatile mbedtls_ct_uint_t mbedtls_ct_zero = 0;
57#endif
58
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000059/*
Dave Rodgman051225d2022-12-30 21:25:35 +000060 * Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to
61 * perform fast unaligned access to volatile data.
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000062 *
63 * This is needed because mbedtls_get_unaligned_uintXX etc don't support volatile
64 * memory accesses.
65 *
Dave Rodgman051225d2022-12-30 21:25:35 +000066 * Some of these definitions could be moved into alignment.h but for now they are
67 * only used here.
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000068 */
Dave Rodgman40a41d02023-05-17 11:59:56 +010069#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && \
Dave Rodgman9fbb0cf2023-06-28 18:52:02 +010070 ((defined(MBEDTLS_CT_ARM_ASM) && (UINTPTR_MAX == 0xfffffffful)) || \
71 defined(MBEDTLS_CT_AARCH64_ASM))
Dave Rodgman63e89b42023-06-21 11:55:17 +010072/* We check pointer sizes to avoid issues with them not matching register size requirements */
Dave Rodgman40a41d02023-05-17 11:59:56 +010073#define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS
74
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000075static inline uint32_t mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char *p)
76{
77 /* This is UB, even where it's safe:
78 * return *((volatile uint32_t*)p);
79 * so instead the same thing is expressed in assembly below.
80 */
81 uint32_t r;
Dave Rodgman40a41d02023-05-17 11:59:56 +010082#if defined(MBEDTLS_CT_ARM_ASM)
Dave Rodgman4610d4b2023-01-30 09:26:48 +000083 asm volatile ("ldr %0, [%1]" : "=r" (r) : "r" (p) :);
Dave Rodgman40a41d02023-05-17 11:59:56 +010084#elif defined(MBEDTLS_CT_AARCH64_ASM)
Dave Rodgman5b5dd012023-06-21 16:36:47 +010085 asm volatile ("ldr %w0, [%1]" : "=r" (r) : MBEDTLS_ASM_AARCH64_PTR_CONSTRAINT(p) :);
Dave Rodgman40a41d02023-05-17 11:59:56 +010086#else
87#error No assembly defined for mbedtls_get_unaligned_volatile_uint32
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000088#endif
Dave Rodgman051225d2022-12-30 21:25:35 +000089 return r;
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000090}
Dave Rodgman40a41d02023-05-17 11:59:56 +010091#endif /* defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) &&
92 (defined(MBEDTLS_CT_ARM_ASM) || defined(MBEDTLS_CT_AARCH64_ASM)) */
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000093
Gilles Peskine449bd832023-01-11 14:50:10 +010094int mbedtls_ct_memcmp(const void *a,
95 const void *b,
96 size_t n)
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +020097{
Dave Rodgman36dfc5a2022-12-22 15:04:43 +000098 size_t i = 0;
Dave Rodgman7658b632023-01-11 17:39:33 +000099 /*
100 * `A` and `B` are cast to volatile to ensure that the compiler
101 * generates code that always fully reads both buffers.
102 * Otherwise it could generate a test to exit early if `diff` has all
103 * bits set early in the loop.
104 */
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +0200105 volatile const unsigned char *A = (volatile const unsigned char *) a;
106 volatile const unsigned char *B = (volatile const unsigned char *) b;
Dave Rodgman7658b632023-01-11 17:39:33 +0000107 uint32_t diff = 0;
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +0200108
Dave Rodgman051225d2022-12-30 21:25:35 +0000109#if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS)
Dave Rodgman36dfc5a2022-12-22 15:04:43 +0000110 for (; (i + 4) <= n; i += 4) {
111 uint32_t x = mbedtls_get_unaligned_volatile_uint32(A + i);
112 uint32_t y = mbedtls_get_unaligned_volatile_uint32(B + i);
113 diff |= x ^ y;
114 }
115#endif
116
117 for (; i < n; i++) {
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +0200118 /* Read volatile data in order before computing diff.
119 * This avoids IAR compiler warning:
120 * 'the order of volatile accesses is undefined ..' */
121 unsigned char x = A[i], y = B[i];
122 diff |= x ^ y;
123 }
124
Gilles Peskine449bd832023-01-11 14:50:10 +0100125 return (int) diff;
gabor-mezei-armdb9a38c2021-09-27 11:28:54 +0200126}
127
Gabor Mezeie2123792021-10-18 17:05:06 +0200128#if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
129
Dave Rodgman15c142b2023-05-17 12:20:11 +0100130void mbedtls_ct_memmove_left(void *start, size_t total, size_t offset)
gabor-mezei-arm394aeaa2021-09-27 13:31:06 +0200131{
Dave Rodgmanfa5a4bb2023-07-28 16:13:52 +0100132 /* In case of inlining, ensure that code generated is independent of the value of offset
133 * (e.g., if the compiler knows that offset == 0, it might be able to optimise this function
134 * to a no-op). */
135 size_t hidden_offset = mbedtls_ct_compiler_opaque(offset);
136
137 /* During this loop, j will take every value from [0..total) exactly once,
138 * regardless of the value of hidden_offset (it only changes the initial
139 * value for j).
140 *
141 * For this reason, when testing, it is safe to mark hidden_offset as non-secret.
142 * This prevents the const-flow checkers from generating a false-positive.
143 */
144 TEST_CF_PUBLIC(&hidden_offset, sizeof(hidden_offset));
145
Dave Rodgman8f5e5c12023-05-16 13:30:15 +0100146 /* Iterate over the array, reading each byte once and writing each byte once. */
Dave Rodgman15c142b2023-05-17 12:20:11 +0100147 for (size_t i = 0; i < total; i++) {
Dave Rodgman8f5e5c12023-05-16 13:30:15 +0100148 /* Each iteration, read one byte, and write it to start[i].
149 *
150 * The source address will either be the "true" source address, if it's in the range
151 * where data is getting moved, or (if the source address is off the end of the
152 * array), it will wrap back to the start.
153 *
154 * If the source address is out of range, mask it to zero.
155 */
156
Dave Rodgmanfa5a4bb2023-07-28 16:13:52 +0100157 // The offset that we will read from (if in range)
158 size_t j = i + hidden_offset;
Dave Rodgman8f5e5c12023-05-16 13:30:15 +0100159
160 // Is the address off the end of the array?
161 mbedtls_ct_condition_t not_dummy = mbedtls_ct_bool_lt(j, total);
162
163 // Bring read address into range
164 j = j % total;
165
166 // Read a byte
Dave Rodgman585f7f72023-05-17 17:45:33 +0100167 uint8_t b = ((uint8_t *) start)[j];
Dave Rodgman8f5e5c12023-05-16 13:30:15 +0100168
169 // Set it to zero if it's out of range
170 b = mbedtls_ct_uint_if0(not_dummy, b);
171
172 // Write the byte to start[i]
Dave Rodgman585f7f72023-05-17 17:45:33 +0100173 ((uint8_t *) start)[i] = b;
gabor-mezei-arm394aeaa2021-09-27 13:31:06 +0200174 }
175}
gabor-mezei-armdee0fd32021-09-27 13:34:25 +0200176
Gabor Mezeie2123792021-10-18 17:05:06 +0200177#endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */
178
Dave Rodgman7fe6e6f2023-05-17 12:34:56 +0100179void mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition,
180 unsigned char *dest,
181 const unsigned char *src1,
182 const unsigned char *src2,
183 size_t len)
184{
185 const uint32_t mask = (uint32_t) condition;
186 const uint32_t not_mask = (uint32_t) ~mbedtls_ct_compiler_opaque(condition);
187
188 /* If src2 is NULL and condition == 0, then this function has no effect.
189 * In this case, copy from dest back into dest. */
190 if (src2 == NULL) {
191 src2 = dest;
192 }
193
194 /* dest[i] = c1 == c2 ? src[i] : dest[i] */
195 size_t i = 0;
196#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
197 for (; (i + 4) <= len; i += 4) {
198 uint32_t a = mbedtls_get_unaligned_uint32(src1 + i) & mask;
199 uint32_t b = mbedtls_get_unaligned_uint32(src2 + i) & not_mask;
200 mbedtls_put_unaligned_uint32(dest + i, a | b);
201 }
202#endif /* MBEDTLS_EFFICIENT_UNALIGNED_ACCESS */
203 for (; i < len; i++) {
204 dest[i] = (src1[i] & mask) | (src2[i] & not_mask);
205 }
206}
207
Gilles Peskine449bd832023-01-11 14:50:10 +0100208void mbedtls_ct_memcpy_offset(unsigned char *dest,
209 const unsigned char *src,
210 size_t offset,
211 size_t offset_min,
212 size_t offset_max,
213 size_t len)
gabor-mezei-arm0e7f71e2021-09-27 13:57:45 +0200214{
Gabor Mezei63bbba52021-10-18 16:17:57 +0200215 size_t offsetval;
gabor-mezei-arm0e7f71e2021-09-27 13:57:45 +0200216
Gilles Peskine449bd832023-01-11 14:50:10 +0100217 for (offsetval = offset_min; offsetval <= offset_max; offsetval++) {
Dave Rodgman585f7f72023-05-17 17:45:33 +0100218 mbedtls_ct_memcpy_if(mbedtls_ct_bool_eq(offsetval, offset), dest, src + offsetval, NULL,
219 len);
gabor-mezei-arm0e7f71e2021-09-27 13:57:45 +0200220 }
221}
gabor-mezei-arm1349ffd2021-09-27 14:28:31 +0200222
Dave Rodgmandebf8672023-05-17 12:12:44 +0100223#if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
224
225void mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition, void *buf, size_t len)
226{
227 uint32_t mask = (uint32_t) ~condition;
228 uint8_t *p = (uint8_t *) buf;
229 size_t i = 0;
230#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
231 for (; (i + 4) <= len; i += 4) {
232 mbedtls_put_unaligned_uint32((void *) (p + i),
233 mbedtls_get_unaligned_uint32((void *) (p + i)) & mask);
234 }
235#endif
236 for (; i < len; i++) {
237 p[i] = p[i] & mask;
238 }
239}
240
241#endif /* defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) */