blob: a17001dd91b1247f83213a80442800dbb2614604 [file] [log] [blame]
Dave Rodgmanfbc23222022-11-24 18:07:37 +00001/**
2 * \file alignment.h
3 *
4 * \brief Utility code for dealing with unaligned memory accesses
5 */
6/*
7 * Copyright The Mbed TLS Contributors
Dave Rodgman16799db2023-11-02 19:47:20 +00008 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Dave Rodgmanfbc23222022-11-24 18:07:37 +00009 */
10
11#ifndef MBEDTLS_LIBRARY_ALIGNMENT_H
12#define MBEDTLS_LIBRARY_ALIGNMENT_H
13
14#include <stdint.h>
Dave Rodgman96d61d12022-11-24 19:33:22 +000015#include <string.h>
Dave Rodgmanf7f1f742022-11-28 14:52:45 +000016#include <stdlib.h>
Dave Rodgmanfbc23222022-11-24 18:07:37 +000017
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000018/*
19 * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory
Dave Rodgman7f376fa2023-01-05 12:25:15 +000020 * accesses are known to be efficient.
21 *
22 * All functions defined here will behave correctly regardless, but might be less
23 * efficient when this is not defined.
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000024 */
25#if defined(__ARM_FEATURE_UNALIGNED) \
Dave Rodgmanc5cc7272023-09-15 11:41:17 +010026 || defined(MBEDTLS_ARCH_IS_X86) || defined(MBEDTLS_ARCH_IS_X64) \
Dave Rodgman0a487172023-09-15 11:52:06 +010027 || defined(MBEDTLS_PLATFORM_IS_WINDOWS_ON_ARM64)
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000028/*
29 * __ARM_FEATURE_UNALIGNED is defined where appropriate by armcc, gcc 7, clang 9
Dave Rodgman7f376fa2023-01-05 12:25:15 +000030 * (and later versions) for Arm v7 and later; all x86 platforms should have
31 * efficient unaligned access.
Dave Rodgman78fc0bd2023-08-08 10:36:15 +010032 *
33 * https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#alignment
34 * specifies that on Windows-on-Arm64, unaligned access is safe (except for uncached
35 * device memory).
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000036 */
37#define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS
38#endif
39
Dave Rodgmanc5812642024-01-19 14:04:28 +000040#if defined(__IAR_SYSTEMS_ICC__) && \
41 (defined(MBEDTLS_ARCH_IS_ARM64) || defined(MBEDTLS_ARCH_IS_ARM32) \
42 || defined(__ICCRX__) || defined(__ICCRL78__) || defined(__ICCRISCV__))
43#pragma language=save
44#pragma language=extended
45#define MBEDTLS_POP_IAR_LANGUAGE_PRAGMA
46/* IAR recommend this technique for accessing unaligned data in
47 * https://www.iar.com/knowledge/support/technical-notes/compiler/accessing-unaligned-data
48 * This results in a single load / store instruction (if unaligned access is supported).
49 * According to that document, this is only supported on certain architectures.
50 */
51 #define UINT_UNALIGNED
52typedef uint16_t __packed mbedtls_uint16_unaligned_t;
53typedef uint32_t __packed mbedtls_uint32_unaligned_t;
54typedef uint64_t __packed mbedtls_uint64_unaligned_t;
55#elif defined(MBEDTLS_COMPILER_IS_GCC) && (MBEDTLS_GCC_VERSION >= 40504) && \
Dave Rodgmanec9936d2024-02-06 12:56:45 +000056 ((MBEDTLS_GCC_VERSION < 60300) || (!defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)))
Dave Rodgmanc5812642024-01-19 14:04:28 +000057/*
Dave Rodgmanf4e82342024-02-06 12:57:03 +000058 * gcc may generate a branch to memcpy for calls like `memcpy(dest, src, 4)` rather than
59 * generating some LDR or LDRB instructions (similar for stores).
60 *
Dave Rodgmand09f96b2024-02-06 13:51:58 +000061 * This is architecture dependent: x86-64 seems fine even with old gcc; 32-bit Arm
62 * is affected. To keep it simple, we enable for all architectures.
Dave Rodgmanf4e82342024-02-06 12:57:03 +000063 *
Dave Rodgmand09f96b2024-02-06 13:51:58 +000064 * For versions of gcc < 5.4.0 this issue always happens.
65 * For gcc < 6.3.0, this issue happens at -O0
66 * For all versions, this issue happens iff unaligned access is not supported.
67 *
68 * For gcc 4.x, this implementation will generate byte-by-byte loads even if unaligned access is
69 * supported, which is correct but not optimal.
Dave Rodgmanc5812642024-01-19 14:04:28 +000070 *
71 * For performance (and code size, in some cases), we want to avoid the branch and just generate
72 * some inline load/store instructions since the access is small and constant-size.
73 *
74 * The manual states:
Dave Rodgmanf4e82342024-02-06 12:57:03 +000075 * "The packed attribute specifies that a variable or structure field should have the smallest
76 * possible alignment—one byte for a variable"
77 * https://gcc.gnu.org/onlinedocs/gcc-4.5.4/gcc/Variable-Attributes.html
Dave Rodgmanc5812642024-01-19 14:04:28 +000078 *
Dave Rodgmand09f96b2024-02-06 13:51:58 +000079 * Previous implementations used __attribute__((__aligned__(1)), but had issues with a gcc bug:
80 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94662
81 *
Dave Rodgmanf4e82342024-02-06 12:57:03 +000082 * Tested with several versions of GCC from 4.5.0 up to 13.2.0
Dave Rodgmanc5812642024-01-19 14:04:28 +000083 * We don't enable for older than 4.5.0 as this has not been tested.
84 */
Dave Rodgman22b934e2024-02-06 12:57:16 +000085 #define UINT_UNALIGNED_STRUCT
Dave Rodgmane0932812024-02-06 15:00:58 +000086typedef struct {
87 uint16_t x;
88} __attribute__((packed)) mbedtls_uint16_unaligned_t;
89typedef struct {
90 uint32_t x;
91} __attribute__((packed)) mbedtls_uint32_unaligned_t;
92typedef struct {
93 uint64_t x;
94} __attribute__((packed)) mbedtls_uint64_unaligned_t;
Dave Rodgmanc5812642024-01-19 14:04:28 +000095 #endif
96
Dave Rodgman55b5dd22024-01-19 14:06:52 +000097/*
98 * We try to force mbedtls_(get|put)_unaligned_uintXX to be always inline, because this results
99 * in code that is both smaller and faster. IAR and gcc both benefit from this when optimising
100 * for size.
101 */
102
Dave Rodgman96d61d12022-11-24 19:33:22 +0000103/**
Dave Rodgmana360e192022-11-28 14:44:05 +0000104 * Read the unsigned 16 bits integer from the given address, which need not
105 * be aligned.
106 *
107 * \param p pointer to 2 bytes of data
108 * \return Data at the given address
109 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000110#if defined(__IAR_SYSTEMS_ICC__)
111#pragma inline = forced
112#elif defined(__GNUC__)
113__attribute__((always_inline))
114#endif
115static inline uint16_t mbedtls_get_unaligned_uint16(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000116{
117 uint16_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000118#if defined(UINT_UNALIGNED)
119 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
120 r = *p16;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000121#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000122 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
123 r = p16->x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000124#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100125 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000126#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000127 return r;
128}
129
130/**
131 * Write the unsigned 16 bits integer to the given address, which need not
132 * be aligned.
133 *
134 * \param p pointer to 2 bytes of data
135 * \param x data to write
136 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000137#if defined(__IAR_SYSTEMS_ICC__)
138#pragma inline = forced
139#elif defined(__GNUC__)
140__attribute__((always_inline))
141#endif
142static inline void mbedtls_put_unaligned_uint16(void *p, uint16_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000143{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000144#if defined(UINT_UNALIGNED)
145 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
146 *p16 = x;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000147#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000148 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
149 p16->x = x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000150#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100151 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000152#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000153}
154
155/**
Dave Rodgman96d61d12022-11-24 19:33:22 +0000156 * Read the unsigned 32 bits integer from the given address, which need not
157 * be aligned.
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000158 *
Dave Rodgman96d61d12022-11-24 19:33:22 +0000159 * \param p pointer to 4 bytes of data
Dave Rodgman875d2382022-11-24 20:43:15 +0000160 * \return Data at the given address
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000161 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000162#if defined(__IAR_SYSTEMS_ICC__)
163#pragma inline = forced
164#elif defined(__GNUC__)
165__attribute__((always_inline))
166#endif
167static inline uint32_t mbedtls_get_unaligned_uint32(const void *p)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000168{
169 uint32_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000170#if defined(UINT_UNALIGNED)
171 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
172 r = *p32;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000173#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000174 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
175 r = p32->x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000176#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100177 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000178#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000179 return r;
180}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000181
Dave Rodgman96d61d12022-11-24 19:33:22 +0000182/**
183 * Write the unsigned 32 bits integer to the given address, which need not
184 * be aligned.
185 *
186 * \param p pointer to 4 bytes of data
187 * \param x data to write
188 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000189#if defined(__IAR_SYSTEMS_ICC__)
190#pragma inline = forced
191#elif defined(__GNUC__)
192__attribute__((always_inline))
193#endif
194static inline void mbedtls_put_unaligned_uint32(void *p, uint32_t x)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000195{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000196#if defined(UINT_UNALIGNED)
197 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
198 *p32 = x;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000199#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000200 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
201 p32->x = x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000202#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100203 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000204#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000205}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000206
Dave Rodgmana360e192022-11-28 14:44:05 +0000207/**
208 * Read the unsigned 64 bits integer from the given address, which need not
209 * be aligned.
210 *
211 * \param p pointer to 8 bytes of data
212 * \return Data at the given address
213 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000214#if defined(__IAR_SYSTEMS_ICC__)
215#pragma inline = forced
216#elif defined(__GNUC__)
217__attribute__((always_inline))
218#endif
219static inline uint64_t mbedtls_get_unaligned_uint64(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000220{
221 uint64_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000222#if defined(UINT_UNALIGNED)
223 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
224 r = *p64;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000225#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000226 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
227 r = p64->x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000228#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100229 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000230#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000231 return r;
232}
233
234/**
235 * Write the unsigned 64 bits integer to the given address, which need not
236 * be aligned.
237 *
238 * \param p pointer to 8 bytes of data
239 * \param x data to write
240 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000241#if defined(__IAR_SYSTEMS_ICC__)
242#pragma inline = forced
243#elif defined(__GNUC__)
244__attribute__((always_inline))
245#endif
246static inline void mbedtls_put_unaligned_uint64(void *p, uint64_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000247{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000248#if defined(UINT_UNALIGNED)
249 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
250 *p64 = x;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000251#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000252 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
253 p64->x = x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000254#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100255 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000256#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000257}
258
Dave Rodgmanc5812642024-01-19 14:04:28 +0000259#if defined(MBEDTLS_POP_IAR_LANGUAGE_PRAGMA)
260#pragma language=restore
261#endif
262
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000263/** Byte Reading Macros
264 *
265 * Given a multi-byte integer \p x, MBEDTLS_BYTE_n retrieves the n-th
266 * byte from x, where byte 0 is the least significant byte.
267 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100268#define MBEDTLS_BYTE_0(x) ((uint8_t) ((x) & 0xff))
Dave Rodgman914c6322023-03-01 09:30:14 +0000269#define MBEDTLS_BYTE_1(x) ((uint8_t) (((x) >> 8) & 0xff))
Gilles Peskine449bd832023-01-11 14:50:10 +0100270#define MBEDTLS_BYTE_2(x) ((uint8_t) (((x) >> 16) & 0xff))
271#define MBEDTLS_BYTE_3(x) ((uint8_t) (((x) >> 24) & 0xff))
272#define MBEDTLS_BYTE_4(x) ((uint8_t) (((x) >> 32) & 0xff))
273#define MBEDTLS_BYTE_5(x) ((uint8_t) (((x) >> 40) & 0xff))
274#define MBEDTLS_BYTE_6(x) ((uint8_t) (((x) >> 48) & 0xff))
275#define MBEDTLS_BYTE_7(x) ((uint8_t) (((x) >> 56) & 0xff))
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000276
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000277/*
278 * Detect GCC built-in byteswap routines
279 */
280#if defined(__GNUC__) && defined(__GNUC_PREREQ)
Gilles Peskine449bd832023-01-11 14:50:10 +0100281#if __GNUC_PREREQ(4, 8)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000282#define MBEDTLS_BSWAP16 __builtin_bswap16
283#endif /* __GNUC_PREREQ(4,8) */
Gilles Peskine449bd832023-01-11 14:50:10 +0100284#if __GNUC_PREREQ(4, 3)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000285#define MBEDTLS_BSWAP32 __builtin_bswap32
286#define MBEDTLS_BSWAP64 __builtin_bswap64
287#endif /* __GNUC_PREREQ(4,3) */
288#endif /* defined(__GNUC__) && defined(__GNUC_PREREQ) */
289
290/*
291 * Detect Clang built-in byteswap routines
292 */
293#if defined(__clang__) && defined(__has_builtin)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000294#if __has_builtin(__builtin_bswap16) && !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000295#define MBEDTLS_BSWAP16 __builtin_bswap16
296#endif /* __has_builtin(__builtin_bswap16) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000297#if __has_builtin(__builtin_bswap32) && !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000298#define MBEDTLS_BSWAP32 __builtin_bswap32
299#endif /* __has_builtin(__builtin_bswap32) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000300#if __has_builtin(__builtin_bswap64) && !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000301#define MBEDTLS_BSWAP64 __builtin_bswap64
302#endif /* __has_builtin(__builtin_bswap64) */
303#endif /* defined(__clang__) && defined(__has_builtin) */
304
305/*
306 * Detect MSVC built-in byteswap routines
307 */
308#if defined(_MSC_VER)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000309#if !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000310#define MBEDTLS_BSWAP16 _byteswap_ushort
Dave Rodgmane47899d2023-02-28 17:39:03 +0000311#endif
312#if !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000313#define MBEDTLS_BSWAP32 _byteswap_ulong
Dave Rodgmane47899d2023-02-28 17:39:03 +0000314#endif
315#if !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000316#define MBEDTLS_BSWAP64 _byteswap_uint64
Dave Rodgmane47899d2023-02-28 17:39:03 +0000317#endif
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000318#endif /* defined(_MSC_VER) */
319
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000320/* Detect armcc built-in byteswap routine */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000321#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 410000) && !defined(MBEDTLS_BSWAP32)
Tom Cosgrovef2b5a132023-04-26 17:00:12 +0100322#if defined(__ARM_ACLE) /* ARM Compiler 6 - earlier versions don't need a header */
323#include <arm_acle.h>
324#endif
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000325#define MBEDTLS_BSWAP32 __rev
326#endif
327
Dave Rodgman650674b2023-12-05 12:16:48 +0000328/* Detect IAR built-in byteswap routine */
329#if defined(__IAR_SYSTEMS_ICC__)
330#if defined(__ARM_ACLE)
331#include <arm_acle.h>
332#define MBEDTLS_BSWAP16(x) ((uint16_t) __rev16((uint32_t) (x)))
333#define MBEDTLS_BSWAP32 __rev
334#define MBEDTLS_BSWAP64 __revll
335#endif
336#endif
337
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000338/*
339 * Where compiler built-ins are not present, fall back to C code that the
340 * compiler may be able to detect and transform into the relevant bswap or
341 * similar instruction.
342 */
343#if !defined(MBEDTLS_BSWAP16)
Gilles Peskine449bd832023-01-11 14:50:10 +0100344static inline uint16_t mbedtls_bswap16(uint16_t x)
345{
Dave Rodgman6298b242022-11-28 14:51:49 +0000346 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100347 (x & 0x00ff) << 8 |
348 (x & 0xff00) >> 8;
Dave Rodgman6298b242022-11-28 14:51:49 +0000349}
350#define MBEDTLS_BSWAP16 mbedtls_bswap16
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000351#endif /* !defined(MBEDTLS_BSWAP16) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000352
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000353#if !defined(MBEDTLS_BSWAP32)
Gilles Peskine449bd832023-01-11 14:50:10 +0100354static inline uint32_t mbedtls_bswap32(uint32_t x)
355{
Dave Rodgman6298b242022-11-28 14:51:49 +0000356 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100357 (x & 0x000000ff) << 24 |
358 (x & 0x0000ff00) << 8 |
359 (x & 0x00ff0000) >> 8 |
360 (x & 0xff000000) >> 24;
Dave Rodgman6298b242022-11-28 14:51:49 +0000361}
362#define MBEDTLS_BSWAP32 mbedtls_bswap32
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000363#endif /* !defined(MBEDTLS_BSWAP32) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000364
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000365#if !defined(MBEDTLS_BSWAP64)
Gilles Peskine449bd832023-01-11 14:50:10 +0100366static inline uint64_t mbedtls_bswap64(uint64_t x)
367{
Dave Rodgman6298b242022-11-28 14:51:49 +0000368 return
Tom Cosgrovebbe166e2023-03-08 13:23:24 +0000369 (x & 0x00000000000000ffULL) << 56 |
370 (x & 0x000000000000ff00ULL) << 40 |
371 (x & 0x0000000000ff0000ULL) << 24 |
372 (x & 0x00000000ff000000ULL) << 8 |
373 (x & 0x000000ff00000000ULL) >> 8 |
374 (x & 0x0000ff0000000000ULL) >> 24 |
375 (x & 0x00ff000000000000ULL) >> 40 |
376 (x & 0xff00000000000000ULL) >> 56;
Dave Rodgman6298b242022-11-28 14:51:49 +0000377}
378#define MBEDTLS_BSWAP64 mbedtls_bswap64
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000379#endif /* !defined(MBEDTLS_BSWAP64) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000380
Dave Rodgmane5c42592022-11-28 14:47:46 +0000381#if !defined(__BYTE_ORDER__)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000382
383#if defined(__LITTLE_ENDIAN__)
384/* IAR defines __xxx_ENDIAN__, but not __BYTE_ORDER__ */
385#define MBEDTLS_IS_BIG_ENDIAN 0
386#elif defined(__BIG_ENDIAN__)
387#define MBEDTLS_IS_BIG_ENDIAN 1
388#else
Dave Rodgmane5c42592022-11-28 14:47:46 +0000389static const uint16_t mbedtls_byte_order_detector = { 0x100 };
390#define MBEDTLS_IS_BIG_ENDIAN (*((unsigned char *) (&mbedtls_byte_order_detector)) == 0x01)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000391#endif
392
Dave Rodgmane5c42592022-11-28 14:47:46 +0000393#else
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000394
395#if (__BYTE_ORDER__) == (__ORDER_BIG_ENDIAN__)
396#define MBEDTLS_IS_BIG_ENDIAN 1
397#else
398#define MBEDTLS_IS_BIG_ENDIAN 0
399#endif
400
Dave Rodgmane5c42592022-11-28 14:47:46 +0000401#endif /* !defined(__BYTE_ORDER__) */
402
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000403/**
404 * Get the unsigned 32 bits integer corresponding to four bytes in
405 * big-endian order (MSB first).
406 *
407 * \param data Base address of the memory to get the four bytes from.
408 * \param offset Offset from \p data of the first and most significant
409 * byte of the four bytes to build the 32 bits unsigned
410 * integer from.
411 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000412#define MBEDTLS_GET_UINT32_BE(data, offset) \
413 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000414 ? mbedtls_get_unaligned_uint32((data) + (offset)) \
415 : MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000416 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000417
418/**
419 * Put in memory a 32 bits unsigned integer in big-endian order.
420 *
421 * \param n 32 bits unsigned integer to put in memory.
422 * \param data Base address of the memory where to put the 32
423 * bits unsigned integer in.
424 * \param offset Offset from \p data where to put the most significant
425 * byte of the 32 bits unsigned integer \p n.
426 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000427#define MBEDTLS_PUT_UINT32_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100428 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000429 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100430 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000431 mbedtls_put_unaligned_uint32((data) + (offset), (uint32_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100432 } \
433 else \
434 { \
435 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
436 } \
437 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000438
439/**
440 * Get the unsigned 32 bits integer corresponding to four bytes in
441 * little-endian order (LSB first).
442 *
443 * \param data Base address of the memory to get the four bytes from.
444 * \param offset Offset from \p data of the first and least significant
445 * byte of the four bytes to build the 32 bits unsigned
446 * integer from.
447 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000448#define MBEDTLS_GET_UINT32_LE(data, offset) \
449 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000450 ? MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
451 : mbedtls_get_unaligned_uint32((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000452 )
Dave Rodgmana5110b02022-11-28 14:48:45 +0000453
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000454
455/**
456 * Put in memory a 32 bits unsigned integer in little-endian order.
457 *
458 * \param n 32 bits unsigned integer to put in memory.
459 * \param data Base address of the memory where to put the 32
460 * bits unsigned integer in.
461 * \param offset Offset from \p data where to put the least significant
462 * byte of the 32 bits unsigned integer \p n.
463 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000464#define MBEDTLS_PUT_UINT32_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100465 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000466 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100467 { \
468 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
469 } \
470 else \
471 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000472 mbedtls_put_unaligned_uint32((data) + (offset), ((uint32_t) (n))); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100473 } \
474 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000475
476/**
477 * Get the unsigned 16 bits integer corresponding to two bytes in
478 * little-endian order (LSB first).
479 *
480 * \param data Base address of the memory to get the two bytes from.
481 * \param offset Offset from \p data of the first and least significant
482 * byte of the two bytes to build the 16 bits unsigned
483 * integer from.
484 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000485#define MBEDTLS_GET_UINT16_LE(data, offset) \
486 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000487 ? MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
488 : mbedtls_get_unaligned_uint16((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000489 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000490
491/**
492 * Put in memory a 16 bits unsigned integer in little-endian order.
493 *
494 * \param n 16 bits unsigned integer to put in memory.
495 * \param data Base address of the memory where to put the 16
496 * bits unsigned integer in.
497 * \param offset Offset from \p data where to put the least significant
498 * byte of the 16 bits unsigned integer \p n.
499 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000500#define MBEDTLS_PUT_UINT16_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100501 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000502 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100503 { \
504 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
505 } \
506 else \
507 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000508 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100509 } \
510 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000511
512/**
513 * Get the unsigned 16 bits integer corresponding to two bytes in
514 * big-endian order (MSB first).
515 *
516 * \param data Base address of the memory to get the two bytes from.
517 * \param offset Offset from \p data of the first and most significant
518 * byte of the two bytes to build the 16 bits unsigned
519 * integer from.
520 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000521#define MBEDTLS_GET_UINT16_BE(data, offset) \
522 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000523 ? mbedtls_get_unaligned_uint16((data) + (offset)) \
524 : MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000525 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000526
527/**
528 * Put in memory a 16 bits unsigned integer in big-endian order.
529 *
530 * \param n 16 bits unsigned integer to put in memory.
531 * \param data Base address of the memory where to put the 16
532 * bits unsigned integer in.
533 * \param offset Offset from \p data where to put the most significant
534 * byte of the 16 bits unsigned integer \p n.
535 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000536#define MBEDTLS_PUT_UINT16_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100537 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000538 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100539 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000540 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100541 } \
542 else \
543 { \
544 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
545 } \
546 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000547
548/**
549 * Get the unsigned 24 bits integer corresponding to three bytes in
550 * big-endian order (MSB first).
551 *
552 * \param data Base address of the memory to get the three bytes from.
553 * \param offset Offset from \p data of the first and most significant
554 * byte of the three bytes to build the 24 bits unsigned
555 * integer from.
556 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000557#define MBEDTLS_GET_UINT24_BE(data, offset) \
558 ( \
559 ((uint32_t) (data)[(offset)] << 16) \
560 | ((uint32_t) (data)[(offset) + 1] << 8) \
561 | ((uint32_t) (data)[(offset) + 2]) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000562 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000563
564/**
565 * Put in memory a 24 bits unsigned integer in big-endian order.
566 *
567 * \param n 24 bits unsigned integer to put in memory.
568 * \param data Base address of the memory where to put the 24
569 * bits unsigned integer in.
570 * \param offset Offset from \p data where to put the most significant
571 * byte of the 24 bits unsigned integer \p n.
572 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100573#define MBEDTLS_PUT_UINT24_BE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000574 { \
575 (data)[(offset)] = MBEDTLS_BYTE_2(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100576 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
577 (data)[(offset) + 2] = MBEDTLS_BYTE_0(n); \
578 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000579
580/**
581 * Get the unsigned 24 bits integer corresponding to three bytes in
582 * little-endian order (LSB first).
583 *
584 * \param data Base address of the memory to get the three bytes from.
585 * \param offset Offset from \p data of the first and least significant
586 * byte of the three bytes to build the 24 bits unsigned
587 * integer from.
588 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000589#define MBEDTLS_GET_UINT24_LE(data, offset) \
590 ( \
591 ((uint32_t) (data)[(offset)]) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100592 | ((uint32_t) (data)[(offset) + 1] << 8) \
593 | ((uint32_t) (data)[(offset) + 2] << 16) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000594 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000595
596/**
597 * Put in memory a 24 bits unsigned integer in little-endian order.
598 *
599 * \param n 24 bits unsigned integer to put in memory.
600 * \param data Base address of the memory where to put the 24
601 * bits unsigned integer in.
602 * \param offset Offset from \p data where to put the least significant
603 * byte of the 24 bits unsigned integer \p n.
604 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100605#define MBEDTLS_PUT_UINT24_LE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000606 { \
607 (data)[(offset)] = MBEDTLS_BYTE_0(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100608 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
609 (data)[(offset) + 2] = MBEDTLS_BYTE_2(n); \
610 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000611
612/**
613 * Get the unsigned 64 bits integer corresponding to eight bytes in
614 * big-endian order (MSB first).
615 *
616 * \param data Base address of the memory to get the eight bytes from.
617 * \param offset Offset from \p data of the first and most significant
618 * byte of the eight bytes to build the 64 bits unsigned
619 * integer from.
620 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000621#define MBEDTLS_GET_UINT64_BE(data, offset) \
622 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000623 ? mbedtls_get_unaligned_uint64((data) + (offset)) \
624 : MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000625 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000626
627/**
628 * Put in memory a 64 bits unsigned integer in big-endian order.
629 *
630 * \param n 64 bits unsigned integer to put in memory.
631 * \param data Base address of the memory where to put the 64
632 * bits unsigned integer in.
633 * \param offset Offset from \p data where to put the most significant
634 * byte of the 64 bits unsigned integer \p n.
635 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000636#define MBEDTLS_PUT_UINT64_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100637 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000638 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100639 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000640 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100641 } \
642 else \
643 { \
644 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
645 } \
646 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000647
648/**
649 * Get the unsigned 64 bits integer corresponding to eight bytes in
650 * little-endian order (LSB first).
651 *
652 * \param data Base address of the memory to get the eight bytes from.
653 * \param offset Offset from \p data of the first and least significant
654 * byte of the eight bytes to build the 64 bits unsigned
655 * integer from.
656 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000657#define MBEDTLS_GET_UINT64_LE(data, offset) \
658 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000659 ? MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
660 : mbedtls_get_unaligned_uint64((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000661 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000662
663/**
664 * Put in memory a 64 bits unsigned integer in little-endian order.
665 *
666 * \param n 64 bits unsigned integer to put in memory.
667 * \param data Base address of the memory where to put the 64
668 * bits unsigned integer in.
669 * \param offset Offset from \p data where to put the least significant
670 * byte of the 64 bits unsigned integer \p n.
671 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000672#define MBEDTLS_PUT_UINT64_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100673 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000674 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100675 { \
676 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
677 } \
678 else \
679 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000680 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100681 } \
682 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000683
684#endif /* MBEDTLS_LIBRARY_ALIGNMENT_H */