blob: 26f15261cd33c2fac7698bbbd489643042e479cb [file] [log] [blame]
Dave Rodgmanfbc23222022-11-24 18:07:37 +00001/**
2 * \file alignment.h
3 *
4 * \brief Utility code for dealing with unaligned memory accesses
5 */
6/*
7 * Copyright The Mbed TLS Contributors
Dave Rodgman16799db2023-11-02 19:47:20 +00008 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Dave Rodgmanfbc23222022-11-24 18:07:37 +00009 */
10
11#ifndef MBEDTLS_LIBRARY_ALIGNMENT_H
12#define MBEDTLS_LIBRARY_ALIGNMENT_H
13
14#include <stdint.h>
Dave Rodgman96d61d12022-11-24 19:33:22 +000015#include <string.h>
Dave Rodgmanf7f1f742022-11-28 14:52:45 +000016#include <stdlib.h>
Dave Rodgmanfbc23222022-11-24 18:07:37 +000017
Dave Rodgman336efee2024-01-19 16:38:53 +000018#include "mbedtls/build_info.h"
Dave Rodgman7d8c99a2024-01-19 14:02:58 +000019
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000020/*
21 * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory
Dave Rodgman7f376fa2023-01-05 12:25:15 +000022 * accesses are known to be efficient.
23 *
24 * All functions defined here will behave correctly regardless, but might be less
25 * efficient when this is not defined.
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000026 */
27#if defined(__ARM_FEATURE_UNALIGNED) \
Dave Rodgmanc5cc7272023-09-15 11:41:17 +010028 || defined(MBEDTLS_ARCH_IS_X86) || defined(MBEDTLS_ARCH_IS_X64) \
Dave Rodgman0a487172023-09-15 11:52:06 +010029 || defined(MBEDTLS_PLATFORM_IS_WINDOWS_ON_ARM64)
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000030/*
31 * __ARM_FEATURE_UNALIGNED is defined where appropriate by armcc, gcc 7, clang 9
Dave Rodgman7f376fa2023-01-05 12:25:15 +000032 * (and later versions) for Arm v7 and later; all x86 platforms should have
33 * efficient unaligned access.
Dave Rodgman78fc0bd2023-08-08 10:36:15 +010034 *
35 * https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#alignment
36 * specifies that on Windows-on-Arm64, unaligned access is safe (except for uncached
37 * device memory).
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000038 */
39#define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS
40#endif
41
Dave Rodgmanc5812642024-01-19 14:04:28 +000042#if defined(__IAR_SYSTEMS_ICC__) && \
43 (defined(MBEDTLS_ARCH_IS_ARM64) || defined(MBEDTLS_ARCH_IS_ARM32) \
44 || defined(__ICCRX__) || defined(__ICCRL78__) || defined(__ICCRISCV__))
45#pragma language=save
46#pragma language=extended
47#define MBEDTLS_POP_IAR_LANGUAGE_PRAGMA
48/* IAR recommend this technique for accessing unaligned data in
49 * https://www.iar.com/knowledge/support/technical-notes/compiler/accessing-unaligned-data
50 * This results in a single load / store instruction (if unaligned access is supported).
51 * According to that document, this is only supported on certain architectures.
52 */
53 #define UINT_UNALIGNED
54typedef uint16_t __packed mbedtls_uint16_unaligned_t;
55typedef uint32_t __packed mbedtls_uint32_unaligned_t;
56typedef uint64_t __packed mbedtls_uint64_unaligned_t;
57#elif defined(MBEDTLS_COMPILER_IS_GCC) && (MBEDTLS_GCC_VERSION >= 40504) && \
58 ((MBEDTLS_GCC_VERSION < 90300) || (!defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)))
59/*
60 * Old versions of gcc, depending on how the target is specified, may generate a branch to memcpy
61 * for calls like `memcpy(dest, src, 4)` rather than generating some LDR or LDRB instructions
62 * (similar for stores).
63 * Recent versions where unaligned access is not enabled also do this.
64 *
65 * For performance (and code size, in some cases), we want to avoid the branch and just generate
66 * some inline load/store instructions since the access is small and constant-size.
67 *
68 * The manual states:
69 * "The aligned attribute specifies a minimum alignment for the variable or structure field,
70 * measured in bytes."
71 * https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html
72 *
73 * Tested with several versions of GCC from 4.5.0 up to 9.3.0
74 * We don't enable for older than 4.5.0 as this has not been tested.
75 */
76 #define UINT_UNALIGNED
77typedef uint16_t __attribute__((__aligned__(1))) mbedtls_uint16_unaligned_t;
78typedef uint32_t __attribute__((__aligned__(1))) mbedtls_uint32_unaligned_t;
79typedef uint64_t __attribute__((__aligned__(1))) mbedtls_uint64_unaligned_t;
80 #endif
81
Dave Rodgman55b5dd22024-01-19 14:06:52 +000082/*
83 * We try to force mbedtls_(get|put)_unaligned_uintXX to be always inline, because this results
84 * in code that is both smaller and faster. IAR and gcc both benefit from this when optimising
85 * for size.
86 */
87
Dave Rodgman96d61d12022-11-24 19:33:22 +000088/**
Dave Rodgmana360e192022-11-28 14:44:05 +000089 * Read the unsigned 16 bits integer from the given address, which need not
90 * be aligned.
91 *
92 * \param p pointer to 2 bytes of data
93 * \return Data at the given address
94 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +000095#if defined(__IAR_SYSTEMS_ICC__)
96#pragma inline = forced
97#elif defined(__GNUC__)
98__attribute__((always_inline))
99#endif
100static inline uint16_t mbedtls_get_unaligned_uint16(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000101{
102 uint16_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000103#if defined(UINT_UNALIGNED)
104 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
105 r = *p16;
106#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100107 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000108#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000109 return r;
110}
111
112/**
113 * Write the unsigned 16 bits integer to the given address, which need not
114 * be aligned.
115 *
116 * \param p pointer to 2 bytes of data
117 * \param x data to write
118 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000119#if defined(__IAR_SYSTEMS_ICC__)
120#pragma inline = forced
121#elif defined(__GNUC__)
122__attribute__((always_inline))
123#endif
124static inline void mbedtls_put_unaligned_uint16(void *p, uint16_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000125{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000126#if defined(UINT_UNALIGNED)
127 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
128 *p16 = x;
129#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100130 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000131#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000132}
133
134/**
Dave Rodgman96d61d12022-11-24 19:33:22 +0000135 * Read the unsigned 32 bits integer from the given address, which need not
136 * be aligned.
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000137 *
Dave Rodgman96d61d12022-11-24 19:33:22 +0000138 * \param p pointer to 4 bytes of data
Dave Rodgman875d2382022-11-24 20:43:15 +0000139 * \return Data at the given address
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000140 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000141#if defined(__IAR_SYSTEMS_ICC__)
142#pragma inline = forced
143#elif defined(__GNUC__)
144__attribute__((always_inline))
145#endif
146static inline uint32_t mbedtls_get_unaligned_uint32(const void *p)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000147{
148 uint32_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000149#if defined(UINT_UNALIGNED)
150 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
151 r = *p32;
152#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100153 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000154#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000155 return r;
156}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000157
Dave Rodgman96d61d12022-11-24 19:33:22 +0000158/**
159 * Write the unsigned 32 bits integer to the given address, which need not
160 * be aligned.
161 *
162 * \param p pointer to 4 bytes of data
163 * \param x data to write
164 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000165#if defined(__IAR_SYSTEMS_ICC__)
166#pragma inline = forced
167#elif defined(__GNUC__)
168__attribute__((always_inline))
169#endif
170static inline void mbedtls_put_unaligned_uint32(void *p, uint32_t x)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000171{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000172#if defined(UINT_UNALIGNED)
173 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
174 *p32 = x;
175#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100176 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000177#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000178}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000179
Dave Rodgmana360e192022-11-28 14:44:05 +0000180/**
181 * Read the unsigned 64 bits integer from the given address, which need not
182 * be aligned.
183 *
184 * \param p pointer to 8 bytes of data
185 * \return Data at the given address
186 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000187#if defined(__IAR_SYSTEMS_ICC__)
188#pragma inline = forced
189#elif defined(__GNUC__)
190__attribute__((always_inline))
191#endif
192static inline uint64_t mbedtls_get_unaligned_uint64(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000193{
194 uint64_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000195#if defined(UINT_UNALIGNED)
196 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
197 r = *p64;
198#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100199 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000200#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000201 return r;
202}
203
204/**
205 * Write the unsigned 64 bits integer to the given address, which need not
206 * be aligned.
207 *
208 * \param p pointer to 8 bytes of data
209 * \param x data to write
210 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000211#if defined(__IAR_SYSTEMS_ICC__)
212#pragma inline = forced
213#elif defined(__GNUC__)
214__attribute__((always_inline))
215#endif
216static inline void mbedtls_put_unaligned_uint64(void *p, uint64_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000217{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000218#if defined(UINT_UNALIGNED)
219 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
220 *p64 = x;
221#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100222 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000223#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000224}
225
Dave Rodgmanc5812642024-01-19 14:04:28 +0000226#if defined(MBEDTLS_POP_IAR_LANGUAGE_PRAGMA)
227#pragma language=restore
228#endif
229
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000230/** Byte Reading Macros
231 *
232 * Given a multi-byte integer \p x, MBEDTLS_BYTE_n retrieves the n-th
233 * byte from x, where byte 0 is the least significant byte.
234 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100235#define MBEDTLS_BYTE_0(x) ((uint8_t) ((x) & 0xff))
Dave Rodgman914c6322023-03-01 09:30:14 +0000236#define MBEDTLS_BYTE_1(x) ((uint8_t) (((x) >> 8) & 0xff))
Gilles Peskine449bd832023-01-11 14:50:10 +0100237#define MBEDTLS_BYTE_2(x) ((uint8_t) (((x) >> 16) & 0xff))
238#define MBEDTLS_BYTE_3(x) ((uint8_t) (((x) >> 24) & 0xff))
239#define MBEDTLS_BYTE_4(x) ((uint8_t) (((x) >> 32) & 0xff))
240#define MBEDTLS_BYTE_5(x) ((uint8_t) (((x) >> 40) & 0xff))
241#define MBEDTLS_BYTE_6(x) ((uint8_t) (((x) >> 48) & 0xff))
242#define MBEDTLS_BYTE_7(x) ((uint8_t) (((x) >> 56) & 0xff))
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000243
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000244/*
245 * Detect GCC built-in byteswap routines
246 */
247#if defined(__GNUC__) && defined(__GNUC_PREREQ)
Gilles Peskine449bd832023-01-11 14:50:10 +0100248#if __GNUC_PREREQ(4, 8)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000249#define MBEDTLS_BSWAP16 __builtin_bswap16
250#endif /* __GNUC_PREREQ(4,8) */
Gilles Peskine449bd832023-01-11 14:50:10 +0100251#if __GNUC_PREREQ(4, 3)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000252#define MBEDTLS_BSWAP32 __builtin_bswap32
253#define MBEDTLS_BSWAP64 __builtin_bswap64
254#endif /* __GNUC_PREREQ(4,3) */
255#endif /* defined(__GNUC__) && defined(__GNUC_PREREQ) */
256
257/*
258 * Detect Clang built-in byteswap routines
259 */
260#if defined(__clang__) && defined(__has_builtin)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000261#if __has_builtin(__builtin_bswap16) && !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000262#define MBEDTLS_BSWAP16 __builtin_bswap16
263#endif /* __has_builtin(__builtin_bswap16) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000264#if __has_builtin(__builtin_bswap32) && !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000265#define MBEDTLS_BSWAP32 __builtin_bswap32
266#endif /* __has_builtin(__builtin_bswap32) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000267#if __has_builtin(__builtin_bswap64) && !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000268#define MBEDTLS_BSWAP64 __builtin_bswap64
269#endif /* __has_builtin(__builtin_bswap64) */
270#endif /* defined(__clang__) && defined(__has_builtin) */
271
272/*
273 * Detect MSVC built-in byteswap routines
274 */
275#if defined(_MSC_VER)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000276#if !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000277#define MBEDTLS_BSWAP16 _byteswap_ushort
Dave Rodgmane47899d2023-02-28 17:39:03 +0000278#endif
279#if !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000280#define MBEDTLS_BSWAP32 _byteswap_ulong
Dave Rodgmane47899d2023-02-28 17:39:03 +0000281#endif
282#if !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000283#define MBEDTLS_BSWAP64 _byteswap_uint64
Dave Rodgmane47899d2023-02-28 17:39:03 +0000284#endif
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000285#endif /* defined(_MSC_VER) */
286
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000287/* Detect armcc built-in byteswap routine */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000288#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 410000) && !defined(MBEDTLS_BSWAP32)
Tom Cosgrovef2b5a132023-04-26 17:00:12 +0100289#if defined(__ARM_ACLE) /* ARM Compiler 6 - earlier versions don't need a header */
290#include <arm_acle.h>
291#endif
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000292#define MBEDTLS_BSWAP32 __rev
293#endif
294
Dave Rodgman650674b2023-12-05 12:16:48 +0000295/* Detect IAR built-in byteswap routine */
296#if defined(__IAR_SYSTEMS_ICC__)
297#if defined(__ARM_ACLE)
298#include <arm_acle.h>
299#define MBEDTLS_BSWAP16(x) ((uint16_t) __rev16((uint32_t) (x)))
300#define MBEDTLS_BSWAP32 __rev
301#define MBEDTLS_BSWAP64 __revll
302#endif
303#endif
304
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000305/*
306 * Where compiler built-ins are not present, fall back to C code that the
307 * compiler may be able to detect and transform into the relevant bswap or
308 * similar instruction.
309 */
310#if !defined(MBEDTLS_BSWAP16)
Gilles Peskine449bd832023-01-11 14:50:10 +0100311static inline uint16_t mbedtls_bswap16(uint16_t x)
312{
Dave Rodgman6298b242022-11-28 14:51:49 +0000313 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100314 (x & 0x00ff) << 8 |
315 (x & 0xff00) >> 8;
Dave Rodgman6298b242022-11-28 14:51:49 +0000316}
317#define MBEDTLS_BSWAP16 mbedtls_bswap16
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000318#endif /* !defined(MBEDTLS_BSWAP16) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000319
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000320#if !defined(MBEDTLS_BSWAP32)
Gilles Peskine449bd832023-01-11 14:50:10 +0100321static inline uint32_t mbedtls_bswap32(uint32_t x)
322{
Dave Rodgman6298b242022-11-28 14:51:49 +0000323 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100324 (x & 0x000000ff) << 24 |
325 (x & 0x0000ff00) << 8 |
326 (x & 0x00ff0000) >> 8 |
327 (x & 0xff000000) >> 24;
Dave Rodgman6298b242022-11-28 14:51:49 +0000328}
329#define MBEDTLS_BSWAP32 mbedtls_bswap32
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000330#endif /* !defined(MBEDTLS_BSWAP32) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000331
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000332#if !defined(MBEDTLS_BSWAP64)
Gilles Peskine449bd832023-01-11 14:50:10 +0100333static inline uint64_t mbedtls_bswap64(uint64_t x)
334{
Dave Rodgman6298b242022-11-28 14:51:49 +0000335 return
Tom Cosgrovebbe166e2023-03-08 13:23:24 +0000336 (x & 0x00000000000000ffULL) << 56 |
337 (x & 0x000000000000ff00ULL) << 40 |
338 (x & 0x0000000000ff0000ULL) << 24 |
339 (x & 0x00000000ff000000ULL) << 8 |
340 (x & 0x000000ff00000000ULL) >> 8 |
341 (x & 0x0000ff0000000000ULL) >> 24 |
342 (x & 0x00ff000000000000ULL) >> 40 |
343 (x & 0xff00000000000000ULL) >> 56;
Dave Rodgman6298b242022-11-28 14:51:49 +0000344}
345#define MBEDTLS_BSWAP64 mbedtls_bswap64
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000346#endif /* !defined(MBEDTLS_BSWAP64) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000347
Dave Rodgmane5c42592022-11-28 14:47:46 +0000348#if !defined(__BYTE_ORDER__)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000349
350#if defined(__LITTLE_ENDIAN__)
351/* IAR defines __xxx_ENDIAN__, but not __BYTE_ORDER__ */
352#define MBEDTLS_IS_BIG_ENDIAN 0
353#elif defined(__BIG_ENDIAN__)
354#define MBEDTLS_IS_BIG_ENDIAN 1
355#else
Dave Rodgmane5c42592022-11-28 14:47:46 +0000356static const uint16_t mbedtls_byte_order_detector = { 0x100 };
357#define MBEDTLS_IS_BIG_ENDIAN (*((unsigned char *) (&mbedtls_byte_order_detector)) == 0x01)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000358#endif
359
Dave Rodgmane5c42592022-11-28 14:47:46 +0000360#else
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000361
362#if (__BYTE_ORDER__) == (__ORDER_BIG_ENDIAN__)
363#define MBEDTLS_IS_BIG_ENDIAN 1
364#else
365#define MBEDTLS_IS_BIG_ENDIAN 0
366#endif
367
Dave Rodgmane5c42592022-11-28 14:47:46 +0000368#endif /* !defined(__BYTE_ORDER__) */
369
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000370/**
371 * Get the unsigned 32 bits integer corresponding to four bytes in
372 * big-endian order (MSB first).
373 *
374 * \param data Base address of the memory to get the four bytes from.
375 * \param offset Offset from \p data of the first and most significant
376 * byte of the four bytes to build the 32 bits unsigned
377 * integer from.
378 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000379#define MBEDTLS_GET_UINT32_BE(data, offset) \
380 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000381 ? mbedtls_get_unaligned_uint32((data) + (offset)) \
382 : MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000383 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000384
385/**
386 * Put in memory a 32 bits unsigned integer in big-endian order.
387 *
388 * \param n 32 bits unsigned integer to put in memory.
389 * \param data Base address of the memory where to put the 32
390 * bits unsigned integer in.
391 * \param offset Offset from \p data where to put the most significant
392 * byte of the 32 bits unsigned integer \p n.
393 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000394#define MBEDTLS_PUT_UINT32_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100395 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000396 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100397 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000398 mbedtls_put_unaligned_uint32((data) + (offset), (uint32_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100399 } \
400 else \
401 { \
402 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
403 } \
404 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000405
406/**
407 * Get the unsigned 32 bits integer corresponding to four bytes in
408 * little-endian order (LSB first).
409 *
410 * \param data Base address of the memory to get the four bytes from.
411 * \param offset Offset from \p data of the first and least significant
412 * byte of the four bytes to build the 32 bits unsigned
413 * integer from.
414 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000415#define MBEDTLS_GET_UINT32_LE(data, offset) \
416 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000417 ? MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
418 : mbedtls_get_unaligned_uint32((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000419 )
Dave Rodgmana5110b02022-11-28 14:48:45 +0000420
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000421
422/**
423 * Put in memory a 32 bits unsigned integer in little-endian order.
424 *
425 * \param n 32 bits unsigned integer to put in memory.
426 * \param data Base address of the memory where to put the 32
427 * bits unsigned integer in.
428 * \param offset Offset from \p data where to put the least significant
429 * byte of the 32 bits unsigned integer \p n.
430 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000431#define MBEDTLS_PUT_UINT32_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100432 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000433 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100434 { \
435 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
436 } \
437 else \
438 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000439 mbedtls_put_unaligned_uint32((data) + (offset), ((uint32_t) (n))); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100440 } \
441 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000442
443/**
444 * Get the unsigned 16 bits integer corresponding to two bytes in
445 * little-endian order (LSB first).
446 *
447 * \param data Base address of the memory to get the two bytes from.
448 * \param offset Offset from \p data of the first and least significant
449 * byte of the two bytes to build the 16 bits unsigned
450 * integer from.
451 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000452#define MBEDTLS_GET_UINT16_LE(data, offset) \
453 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000454 ? MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
455 : mbedtls_get_unaligned_uint16((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000456 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000457
458/**
459 * Put in memory a 16 bits unsigned integer in little-endian order.
460 *
461 * \param n 16 bits unsigned integer to put in memory.
462 * \param data Base address of the memory where to put the 16
463 * bits unsigned integer in.
464 * \param offset Offset from \p data where to put the least significant
465 * byte of the 16 bits unsigned integer \p n.
466 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000467#define MBEDTLS_PUT_UINT16_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100468 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000469 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100470 { \
471 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
472 } \
473 else \
474 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000475 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100476 } \
477 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000478
479/**
480 * Get the unsigned 16 bits integer corresponding to two bytes in
481 * big-endian order (MSB first).
482 *
483 * \param data Base address of the memory to get the two bytes from.
484 * \param offset Offset from \p data of the first and most significant
485 * byte of the two bytes to build the 16 bits unsigned
486 * integer from.
487 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000488#define MBEDTLS_GET_UINT16_BE(data, offset) \
489 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000490 ? mbedtls_get_unaligned_uint16((data) + (offset)) \
491 : MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000492 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000493
494/**
495 * Put in memory a 16 bits unsigned integer in big-endian order.
496 *
497 * \param n 16 bits unsigned integer to put in memory.
498 * \param data Base address of the memory where to put the 16
499 * bits unsigned integer in.
500 * \param offset Offset from \p data where to put the most significant
501 * byte of the 16 bits unsigned integer \p n.
502 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000503#define MBEDTLS_PUT_UINT16_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100504 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000505 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100506 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000507 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100508 } \
509 else \
510 { \
511 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
512 } \
513 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000514
515/**
516 * Get the unsigned 24 bits integer corresponding to three bytes in
517 * big-endian order (MSB first).
518 *
519 * \param data Base address of the memory to get the three bytes from.
520 * \param offset Offset from \p data of the first and most significant
521 * byte of the three bytes to build the 24 bits unsigned
522 * integer from.
523 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000524#define MBEDTLS_GET_UINT24_BE(data, offset) \
525 ( \
526 ((uint32_t) (data)[(offset)] << 16) \
527 | ((uint32_t) (data)[(offset) + 1] << 8) \
528 | ((uint32_t) (data)[(offset) + 2]) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000529 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000530
531/**
532 * Put in memory a 24 bits unsigned integer in big-endian order.
533 *
534 * \param n 24 bits unsigned integer to put in memory.
535 * \param data Base address of the memory where to put the 24
536 * bits unsigned integer in.
537 * \param offset Offset from \p data where to put the most significant
538 * byte of the 24 bits unsigned integer \p n.
539 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100540#define MBEDTLS_PUT_UINT24_BE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000541 { \
542 (data)[(offset)] = MBEDTLS_BYTE_2(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100543 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
544 (data)[(offset) + 2] = MBEDTLS_BYTE_0(n); \
545 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000546
547/**
548 * Get the unsigned 24 bits integer corresponding to three bytes in
549 * little-endian order (LSB first).
550 *
551 * \param data Base address of the memory to get the three bytes from.
552 * \param offset Offset from \p data of the first and least significant
553 * byte of the three bytes to build the 24 bits unsigned
554 * integer from.
555 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000556#define MBEDTLS_GET_UINT24_LE(data, offset) \
557 ( \
558 ((uint32_t) (data)[(offset)]) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100559 | ((uint32_t) (data)[(offset) + 1] << 8) \
560 | ((uint32_t) (data)[(offset) + 2] << 16) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000561 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000562
563/**
564 * Put in memory a 24 bits unsigned integer in little-endian order.
565 *
566 * \param n 24 bits unsigned integer to put in memory.
567 * \param data Base address of the memory where to put the 24
568 * bits unsigned integer in.
569 * \param offset Offset from \p data where to put the least significant
570 * byte of the 24 bits unsigned integer \p n.
571 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100572#define MBEDTLS_PUT_UINT24_LE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000573 { \
574 (data)[(offset)] = MBEDTLS_BYTE_0(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100575 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
576 (data)[(offset) + 2] = MBEDTLS_BYTE_2(n); \
577 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000578
579/**
580 * Get the unsigned 64 bits integer corresponding to eight bytes in
581 * big-endian order (MSB first).
582 *
583 * \param data Base address of the memory to get the eight bytes from.
584 * \param offset Offset from \p data of the first and most significant
585 * byte of the eight bytes to build the 64 bits unsigned
586 * integer from.
587 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000588#define MBEDTLS_GET_UINT64_BE(data, offset) \
589 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000590 ? mbedtls_get_unaligned_uint64((data) + (offset)) \
591 : MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000592 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000593
594/**
595 * Put in memory a 64 bits unsigned integer in big-endian order.
596 *
597 * \param n 64 bits unsigned integer to put in memory.
598 * \param data Base address of the memory where to put the 64
599 * bits unsigned integer in.
600 * \param offset Offset from \p data where to put the most significant
601 * byte of the 64 bits unsigned integer \p n.
602 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000603#define MBEDTLS_PUT_UINT64_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100604 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000605 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100606 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000607 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100608 } \
609 else \
610 { \
611 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
612 } \
613 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000614
615/**
616 * Get the unsigned 64 bits integer corresponding to eight bytes in
617 * little-endian order (LSB first).
618 *
619 * \param data Base address of the memory to get the eight bytes from.
620 * \param offset Offset from \p data of the first and least significant
621 * byte of the eight bytes to build the 64 bits unsigned
622 * integer from.
623 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000624#define MBEDTLS_GET_UINT64_LE(data, offset) \
625 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000626 ? MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
627 : mbedtls_get_unaligned_uint64((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000628 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000629
630/**
631 * Put in memory a 64 bits unsigned integer in little-endian order.
632 *
633 * \param n 64 bits unsigned integer to put in memory.
634 * \param data Base address of the memory where to put the 64
635 * bits unsigned integer in.
636 * \param offset Offset from \p data where to put the least significant
637 * byte of the 64 bits unsigned integer \p n.
638 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000639#define MBEDTLS_PUT_UINT64_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100640 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000641 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100642 { \
643 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
644 } \
645 else \
646 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000647 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100648 } \
649 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000650
651#endif /* MBEDTLS_LIBRARY_ALIGNMENT_H */