blob: 7dec5489379f5d0e786afcd838f1b7119620fcc2 [file] [log] [blame]
johpow01f19dc622021-06-16 17:57:28 -05001/*
AlexeiFedorov20e26832024-03-13 13:59:09 +00002 * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
johpow01f19dc622021-06-16 17:57:28 -05003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
Manish Pandey2461bd32021-11-09 20:49:56 +00009#include <inttypes.h>
johpow01f19dc622021-06-16 17:57:28 -050010#include <limits.h>
11#include <stdint.h>
12
13#include <arch.h>
Olivier Deprez62d64652024-01-17 15:12:04 +010014#include <arch_features.h>
johpow01f19dc622021-06-16 17:57:28 -050015#include <arch_helpers.h>
16#include <common/debug.h>
17#include "gpt_rme_private.h"
18#include <lib/gpt_rme/gpt_rme.h>
19#include <lib/smccc.h>
20#include <lib/spinlock.h>
21#include <lib/xlat_tables/xlat_tables_v2.h>
22
23#if !ENABLE_RME
AlexeiFedorovb99926e2024-03-13 15:18:02 +000024#error "ENABLE_RME must be enabled to use the GPT library"
johpow01f19dc622021-06-16 17:57:28 -050025#endif
26
27/*
28 * Lookup T from PPS
29 *
30 * PPS Size T
31 * 0b000 4GB 32
32 * 0b001 64GB 36
33 * 0b010 1TB 40
34 * 0b011 4TB 42
35 * 0b100 16TB 44
36 * 0b101 256TB 48
37 * 0b110 4PB 52
38 *
39 * See section 15.1.27 of the RME specification.
40 */
41static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
42 PPS_1TB_T, PPS_4TB_T,
43 PPS_16TB_T, PPS_256TB_T,
44 PPS_4PB_T};
45
46/*
47 * Lookup P from PGS
48 *
49 * PGS Size P
50 * 0b00 4KB 12
51 * 0b10 16KB 14
52 * 0b01 64KB 16
53 *
54 * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
55 *
56 * See section 15.1.27 of the RME specification.
57 */
58static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
59
AlexeiFedorovec0088b2024-03-13 17:07:03 +000060static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
61 uint64_t l1_desc);
62static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
63 uint64_t l1_desc);
64static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
65 uint64_t l1_desc);
66
johpow01f19dc622021-06-16 17:57:28 -050067/*
AlexeiFedorovb99926e2024-03-13 15:18:02 +000068 * This structure contains GPT configuration data
johpow01f19dc622021-06-16 17:57:28 -050069 */
70typedef struct {
71 uintptr_t plat_gpt_l0_base;
72 gpccr_pps_e pps;
73 gpt_t_val_e t;
74 gpccr_pgs_e pgs;
75 gpt_p_val_e p;
76} gpt_config_t;
77
78static gpt_config_t gpt_config;
79
AlexeiFedorovec0088b2024-03-13 17:07:03 +000080/*
81 * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS:
82 * +-------+------------+
83 * | PGS | L1 entries |
84 * +-------+------------+
85 * | 4KB | 32 |
86 * +-------+------------+
87 * | 16KB | 8 |
88 * +-------+------------+
89 * | 64KB | 2 |
90 * +-------+------------+
91 */
92static unsigned int gpt_l1_cnt_2mb;
93
94/*
95 * Mask for the L1 index field, depending on
96 * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS:
97 * +---------+-------------------------------+
98 * | | PGS |
99 * +---------+----------+----------+---------+
100 * | L0GPTSZ | 4KB | 16KB | 64KB |
101 * +---------+----------+----------+---------+
102 * | 1GB | 0x3FFF | 0xFFF | 0x3FF |
103 * +---------+----------+----------+---------+
104 * | 16GB | 0x3FFFF | 0xFFFF | 0x3FFF |
105 * +---------+----------+----------+---------+
106 * | 64GB | 0xFFFFF | 0x3FFFF | 0xFFFF |
107 * +---------+----------+----------+---------+
108 * | 512GB | 0x7FFFFF | 0x1FFFFF | 0x7FFFF |
109 * +---------+----------+----------+---------+
110 */
111static uint64_t gpt_l1_index_mask;
112
113/* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */
114#define L1_QWORDS_2MB (gpt_l1_cnt_2mb / 2U)
115#define L1_QWORDS_32MB (L1_QWORDS_2MB * 16U)
116#define L1_QWORDS_512MB (L1_QWORDS_32MB * 16U)
117
118/* Size in bytes of L1 entries in 2MB, 32MB */
119#define L1_BYTES_2MB (gpt_l1_cnt_2mb * sizeof(uint64_t))
120#define L1_BYTES_32MB (L1_BYTES_2MB * 16U)
121
122/* Get the index into the L1 table from a physical address */
123#define GPT_L1_INDEX(_pa) \
124 (((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask)
125
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000126/* These variables are used during initialization of the L1 tables */
johpow01f19dc622021-06-16 17:57:28 -0500127static uintptr_t gpt_l1_tbl;
128
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000129/* These variable is used during runtime */
130
131/* Bitlock base address for each 512 MB block of PPS */
132static bitlock_t *gpt_bitlock_base;
133
134static void tlbi_page_dsbosh(uintptr_t base)
135{
136 /* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */
137 static const gpt_tlbi_lookup_t tlbi_page_lookup[] = {
138 { tlbirpalos_4k, ~(SZ_4K - 1UL) },
139 { tlbirpalos_64k, ~(SZ_64K - 1UL) },
140 { tlbirpalos_16k, ~(SZ_16K - 1UL) }
141 };
142
143 tlbi_page_lookup[gpt_config.pgs].function(
144 base & tlbi_page_lookup[gpt_config.pgs].mask);
145 dsbosh();
146}
147
148/*
149 * Helper function to fill out GPI entries in a single L1 table
150 * with Granules or Contiguous descriptor.
151 *
152 * Parameters
153 * l1 Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out
154 * l1_desc GPT Granules or Contiguous descriptor set this range to
155 * cnt Number of double 128-bit L1 entries to fill
156 *
157 */
158static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt)
159{
160 uint128_t *l1_quad = (uint128_t *)l1;
161 uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64);
162
163 VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt);
164
165 for (unsigned int i = 0U; i < cnt; i++) {
166 *l1_quad++ = l1_quad_desc;
167 }
168}
169
170static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
171 uint64_t l1_desc)
172{
173 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
174
175 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
176 __func__, base, l1_desc);
177
178 /* Convert 2MB Contiguous block to Granules */
179 fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB);
180}
181
182static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
183 uint64_t l1_desc)
184{
185 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
186 const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx];
187 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
188 uint64_t *l1;
189
190 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
191 __func__, base, l1_desc);
192
193 /* Get index corresponding to 32MB aligned address */
194 idx = GPT_L1_INDEX(ALIGN_32MB(base));
195 l1 = &gpi_info->gpt_l1_addr[idx];
196
197 /* 16 x 2MB blocks in 32MB */
198 for (unsigned int i = 0U; i < 16U; i++) {
199 /* Fill with Granules or Contiguous descriptors */
200 fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc,
201 L1_QWORDS_2MB);
202 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB);
203 }
204}
205
206static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
207 uint64_t l1_desc)
208{
209 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base));
210 const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx];
211 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
212 uint64_t *l1;
213
214 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
215 __func__, base, l1_desc);
216
217 /* Get index corresponding to 512MB aligned address */
218 idx = GPT_L1_INDEX(ALIGN_512MB(base));
219 l1 = &gpi_info->gpt_l1_addr[idx];
220
221 /* 16 x 32MB blocks in 512MB */
222 for (unsigned int i = 0U; i < 16U; i++) {
223 if (l1 == l1_32mb) {
224 /* Shatter this 32MB block */
225 shatter_32mb(base, gpi_info, l1_desc);
226 } else {
227 /* Fill 32MB with Contiguous descriptors */
228 fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB);
229 }
230
231 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB);
232 }
233}
234
johpow01f19dc622021-06-16 17:57:28 -0500235/*
236 * This function checks to see if a GPI value is valid.
237 *
238 * These are valid GPI values.
239 * GPT_GPI_NO_ACCESS U(0x0)
240 * GPT_GPI_SECURE U(0x8)
241 * GPT_GPI_NS U(0x9)
242 * GPT_GPI_ROOT U(0xA)
243 * GPT_GPI_REALM U(0xB)
244 * GPT_GPI_ANY U(0xF)
245 *
246 * Parameters
247 * gpi GPI to check for validity.
248 *
249 * Return
250 * true for a valid GPI, false for an invalid one.
251 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000252static bool is_gpi_valid(unsigned int gpi)
johpow01f19dc622021-06-16 17:57:28 -0500253{
254 if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
255 ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
256 return true;
johpow01f19dc622021-06-16 17:57:28 -0500257 }
Robert Wakim6a00e9b2021-10-21 15:39:56 +0100258 return false;
johpow01f19dc622021-06-16 17:57:28 -0500259}
260
261/*
262 * This function checks to see if two PAS regions overlap.
263 *
264 * Parameters
265 * base_1: base address of first PAS
266 * size_1: size of first PAS
267 * base_2: base address of second PAS
268 * size_2: size of second PAS
269 *
270 * Return
271 * True if PAS regions overlap, false if they do not.
272 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000273static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
274 uintptr_t base_2, size_t size_2)
johpow01f19dc622021-06-16 17:57:28 -0500275{
276 if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
277 return true;
johpow01f19dc622021-06-16 17:57:28 -0500278 }
Robert Wakim6a00e9b2021-10-21 15:39:56 +0100279 return false;
johpow01f19dc622021-06-16 17:57:28 -0500280}
281
282/*
283 * This helper function checks to see if a PAS region from index 0 to
284 * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
285 *
286 * Parameters
287 * l0_idx: Index of the L0 entry to check
288 * pas_regions: PAS region array
289 * pas_idx: Upper bound of the PAS array index.
290 *
291 * Return
292 * True if a PAS region occupies the L0 region in question, false if not.
293 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000294static bool does_previous_pas_exist_here(unsigned int l0_idx,
295 pas_region_t *pas_regions,
296 unsigned int pas_idx)
johpow01f19dc622021-06-16 17:57:28 -0500297{
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000298 /* Iterate over PAS regions up to pas_idx */
johpow01f19dc622021-06-16 17:57:28 -0500299 for (unsigned int i = 0U; i < pas_idx; i++) {
AlexeiFedorov20e26832024-03-13 13:59:09 +0000300 if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
johpow01f19dc622021-06-16 17:57:28 -0500301 GPT_L0GPTSZ_ACTUAL_SIZE,
302 pas_regions[i].base_pa, pas_regions[i].size)) {
303 return true;
304 }
305 }
306 return false;
307}
308
309/*
310 * This function iterates over all of the PAS regions and checks them to ensure
311 * proper alignment of base and size, that the GPI is valid, and that no regions
312 * overlap. As a part of the overlap checks, this function checks existing L0
313 * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
314 * is called multiple times to place L1 tables in different areas of memory. It
315 * also counts the number of L1 tables needed and returns it on success.
316 *
317 * Parameters
318 * *pas_regions Pointer to array of PAS region structures.
319 * pas_region_cnt Total number of PAS regions in the array.
320 *
321 * Return
322 * Negative Linux error code in the event of a failure, number of L1 regions
323 * required when successful.
324 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000325static int validate_pas_mappings(pas_region_t *pas_regions,
326 unsigned int pas_region_cnt)
johpow01f19dc622021-06-16 17:57:28 -0500327{
328 unsigned int idx;
329 unsigned int l1_cnt = 0U;
330 unsigned int pas_l1_cnt;
331 uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
332
333 assert(pas_regions != NULL);
334 assert(pas_region_cnt != 0U);
335
336 for (idx = 0U; idx < pas_region_cnt; idx++) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000337 /* Check for arithmetic overflow in region */
johpow01f19dc622021-06-16 17:57:28 -0500338 if ((ULONG_MAX - pas_regions[idx].base_pa) <
339 pas_regions[idx].size) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000340 ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
johpow01f19dc622021-06-16 17:57:28 -0500341 return -EOVERFLOW;
342 }
343
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000344 /* Initial checks for PAS validity */
johpow01f19dc622021-06-16 17:57:28 -0500345 if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
346 GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
AlexeiFedorov20e26832024-03-13 13:59:09 +0000347 !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000348 ERROR("GPT: PAS[%u] is invalid!\n", idx);
johpow01f19dc622021-06-16 17:57:28 -0500349 return -EFAULT;
350 }
351
352 /*
353 * Make sure this PAS does not overlap with another one. We
354 * start from idx + 1 instead of 0 since prior PAS mappings will
355 * have already checked themselves against this one.
356 */
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000357 for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
AlexeiFedorov20e26832024-03-13 13:59:09 +0000358 if (check_pas_overlap(pas_regions[idx].base_pa,
johpow01f19dc622021-06-16 17:57:28 -0500359 pas_regions[idx].size,
360 pas_regions[i].base_pa,
361 pas_regions[i].size)) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000362 ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
johpow01f19dc622021-06-16 17:57:28 -0500363 i, idx);
364 return -EFAULT;
365 }
366 }
367
368 /*
369 * Since this function can be called multiple times with
370 * separate L1 tables we need to check the existing L0 mapping
371 * to see if this PAS would fall into one that has already been
372 * initialized.
373 */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000374 for (unsigned int i =
375 (unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa);
376 i <= GPT_L0_IDX(pas_regions[idx].base_pa +
377 pas_regions[idx].size - 1UL);
378 i++) {
johpow01f19dc622021-06-16 17:57:28 -0500379 if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
380 (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000381 /* This descriptor is unused so continue */
johpow01f19dc622021-06-16 17:57:28 -0500382 continue;
383 }
384
385 /*
386 * This descriptor has been initialized in a previous
387 * call to this function so cannot be initialized again.
388 */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000389 ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n",
johpow01f19dc622021-06-16 17:57:28 -0500390 idx, i);
391 return -EFAULT;
392 }
393
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000394 /* Check for block mapping (L0) type */
johpow01f19dc622021-06-16 17:57:28 -0500395 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
396 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000397 /* Make sure base and size are block-aligned */
johpow01f19dc622021-06-16 17:57:28 -0500398 if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
399 !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000400 ERROR("GPT: PAS[%u] is not block-aligned!\n",
johpow01f19dc622021-06-16 17:57:28 -0500401 idx);
402 return -EFAULT;
403 }
404
405 continue;
406 }
407
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000408 /* Check for granule mapping (L1) type */
johpow01f19dc622021-06-16 17:57:28 -0500409 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
410 GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000411 /* Make sure base and size are granule-aligned */
johpow01f19dc622021-06-16 17:57:28 -0500412 if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
413 !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000414 ERROR("GPT: PAS[%u] is not granule-aligned!\n",
johpow01f19dc622021-06-16 17:57:28 -0500415 idx);
416 return -EFAULT;
417 }
418
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000419 /* Find how many L1 tables this PAS occupies */
johpow01f19dc622021-06-16 17:57:28 -0500420 pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000421 pas_regions[idx].size - 1UL) -
422 GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
johpow01f19dc622021-06-16 17:57:28 -0500423
424 /*
425 * This creates a situation where, if multiple PAS
426 * regions occupy the same table descriptor, we can get
427 * an artificially high total L1 table count. The way we
428 * handle this is by checking each PAS against those
429 * before it in the array, and if they both occupy the
430 * same PAS we subtract from pas_l1_cnt and only the
431 * first PAS in the array gets to count it.
432 */
433
434 /*
435 * If L1 count is greater than 1 we know the start and
436 * end PAs are in different L0 regions so we must check
437 * both for overlap against other PAS.
438 */
439 if (pas_l1_cnt > 1) {
AlexeiFedorov20e26832024-03-13 13:59:09 +0000440 if (does_previous_pas_exist_here(
johpow01f19dc622021-06-16 17:57:28 -0500441 GPT_L0_IDX(pas_regions[idx].base_pa +
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000442 pas_regions[idx].size - 1UL),
johpow01f19dc622021-06-16 17:57:28 -0500443 pas_regions, idx)) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000444 pas_l1_cnt--;
johpow01f19dc622021-06-16 17:57:28 -0500445 }
446 }
447
AlexeiFedorov20e26832024-03-13 13:59:09 +0000448 if (does_previous_pas_exist_here(
johpow01f19dc622021-06-16 17:57:28 -0500449 GPT_L0_IDX(pas_regions[idx].base_pa),
450 pas_regions, idx)) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000451 pas_l1_cnt--;
johpow01f19dc622021-06-16 17:57:28 -0500452 }
453
454 l1_cnt += pas_l1_cnt;
455 continue;
456 }
457
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000458 /* If execution reaches this point, mapping type is invalid */
459 ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
johpow01f19dc622021-06-16 17:57:28 -0500460 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
461 return -EINVAL;
462 }
463
464 return l1_cnt;
465}
466
467/*
468 * This function validates L0 initialization parameters.
469 *
470 * Parameters
471 * l0_mem_base Base address of memory used for L0 tables.
472 * l1_mem_size Size of memory available for L0 tables.
473 *
474 * Return
475 * Negative Linux error code in the event of a failure, 0 for success.
476 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000477static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
478 size_t l0_mem_size)
johpow01f19dc622021-06-16 17:57:28 -0500479{
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000480 size_t l0_alignment, locks_size;
johpow01f19dc622021-06-16 17:57:28 -0500481
482 /*
483 * Make sure PPS is valid and then store it since macros need this value
484 * to work.
485 */
486 if (pps > GPT_PPS_MAX) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000487 ERROR("GPT: Invalid PPS: 0x%x\n", pps);
johpow01f19dc622021-06-16 17:57:28 -0500488 return -EINVAL;
489 }
490 gpt_config.pps = pps;
491 gpt_config.t = gpt_t_lookup[pps];
492
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000493 /* Alignment must be the greater of 4KB or l0 table size */
johpow01f19dc622021-06-16 17:57:28 -0500494 l0_alignment = PAGE_SIZE_4KB;
495 if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
496 l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
497 }
498
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000499 /* Check base address */
500 if ((l0_mem_base == 0UL) ||
501 ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
502 ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
johpow01f19dc622021-06-16 17:57:28 -0500503 return -EFAULT;
504 }
505
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000506 /* Check size */
johpow01f19dc622021-06-16 17:57:28 -0500507 if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000508 ERROR("%sL0%s\n", (const char *)"GPT: Inadequate ",
509 (const char *)" memory\n");
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000510 ERROR(" Expected 0x%lx bytes, got 0x%lx bytes\n",
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000511 GPT_L0_TABLE_SIZE(gpt_config.t), l0_mem_size);
512 return -ENOMEM;
513 }
514
515 /*
516 * Size of bitlocks in bytes for the protected address space
517 * with 512MB per bitlock.
518 */
519 locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) / (SZ_512M * 8U);
520
521 /* Check space for bitlocks */
522 if (locks_size > (l0_mem_size - GPT_L0_TABLE_SIZE(gpt_config.t))) {
523 ERROR("%sbitlock%s", (const char *)"GPT: Inadequate ",
524 (const char *)" memory\n");
525 ERROR(" Expected 0x%lx bytes, got 0x%lx bytes\n",
526 locks_size,
527 l0_mem_size - GPT_L0_TABLE_SIZE(gpt_config.t));
johpow01f19dc622021-06-16 17:57:28 -0500528 return -ENOMEM;
529 }
530
531 return 0;
532}
533
534/*
535 * In the event that L1 tables are needed, this function validates
536 * the L1 table generation parameters.
537 *
538 * Parameters
539 * l1_mem_base Base address of memory used for L1 table allocation.
540 * l1_mem_size Total size of memory available for L1 tables.
541 * l1_gpt_cnt Number of L1 tables needed.
542 *
543 * Return
544 * Negative Linux error code in the event of a failure, 0 for success.
545 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000546static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
547 unsigned int l1_gpt_cnt)
johpow01f19dc622021-06-16 17:57:28 -0500548{
549 size_t l1_gpt_mem_sz;
550
551 /* Check if the granularity is supported */
552 if (!xlat_arch_is_granule_size_supported(
553 GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
554 return -EPERM;
555 }
556
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000557 /* Make sure L1 tables are aligned to their size */
558 if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
559 ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
johpow01f19dc622021-06-16 17:57:28 -0500560 l1_mem_base);
561 return -EFAULT;
562 }
563
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000564 /* Get total memory needed for L1 tables */
johpow01f19dc622021-06-16 17:57:28 -0500565 l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
566
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000567 /* Check for overflow */
johpow01f19dc622021-06-16 17:57:28 -0500568 if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000569 ERROR("GPT: Overflow calculating L1 memory size\n");
johpow01f19dc622021-06-16 17:57:28 -0500570 return -ENOMEM;
571 }
572
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000573 /* Make sure enough space was supplied */
johpow01f19dc622021-06-16 17:57:28 -0500574 if (l1_mem_size < l1_gpt_mem_sz) {
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000575 ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ",
576 (const char *)" memory\n");
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000577 ERROR(" Expected 0x%lx bytes, got 0x%lx bytes\n",
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000578 l1_gpt_mem_sz, l1_mem_size);
johpow01f19dc622021-06-16 17:57:28 -0500579 return -ENOMEM;
580 }
581
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000582 VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
johpow01f19dc622021-06-16 17:57:28 -0500583 return 0;
584}
585
586/*
587 * This function initializes L0 block descriptors (regions that cannot be
588 * transitioned at the granule level) according to the provided PAS.
589 *
590 * Parameters
591 * *pas Pointer to the structure defining the PAS region to
592 * initialize.
593 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000594static void generate_l0_blk_desc(pas_region_t *pas)
johpow01f19dc622021-06-16 17:57:28 -0500595{
596 uint64_t gpt_desc;
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000597 unsigned long idx, end_idx;
johpow01f19dc622021-06-16 17:57:28 -0500598 uint64_t *l0_gpt_arr;
599
600 assert(gpt_config.plat_gpt_l0_base != 0U);
601 assert(pas != NULL);
602
603 /*
604 * Checking of PAS parameters has already been done in
AlexeiFedorov20e26832024-03-13 13:59:09 +0000605 * validate_pas_mappings so no need to check the same things again.
johpow01f19dc622021-06-16 17:57:28 -0500606 */
607
608 l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
609
610 /* Create the GPT Block descriptor for this PAS region */
611 gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
612
613 /* Start index of this region in L0 GPTs */
Robert Wakim6a00e9b2021-10-21 15:39:56 +0100614 idx = GPT_L0_IDX(pas->base_pa);
johpow01f19dc622021-06-16 17:57:28 -0500615
616 /*
617 * Determine number of L0 GPT descriptors covered by
618 * this PAS region and use the count to populate these
619 * descriptors.
620 */
Robert Wakim6a00e9b2021-10-21 15:39:56 +0100621 end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
johpow01f19dc622021-06-16 17:57:28 -0500622
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000623 /* Generate the needed block descriptors */
johpow01f19dc622021-06-16 17:57:28 -0500624 for (; idx < end_idx; idx++) {
625 l0_gpt_arr[idx] = gpt_desc;
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000626 VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
johpow01f19dc622021-06-16 17:57:28 -0500627 idx, &l0_gpt_arr[idx],
628 (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
629 GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
630 }
631}
632
633/*
634 * Helper function to determine if the end physical address lies in the same L0
635 * region as the current physical address. If true, the end physical address is
636 * returned else, the start address of the next region is returned.
637 *
638 * Parameters
639 * cur_pa Physical address of the current PA in the loop through
640 * the range.
641 * end_pa Physical address of the end PA in a PAS range.
642 *
643 * Return
644 * The PA of the end of the current range.
645 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000646static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
johpow01f19dc622021-06-16 17:57:28 -0500647{
648 uintptr_t cur_idx;
649 uintptr_t end_idx;
650
Robert Wakim6a00e9b2021-10-21 15:39:56 +0100651 cur_idx = GPT_L0_IDX(cur_pa);
652 end_idx = GPT_L0_IDX(end_pa);
johpow01f19dc622021-06-16 17:57:28 -0500653
654 assert(cur_idx <= end_idx);
655
656 if (cur_idx == end_idx) {
657 return end_pa;
658 }
659
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000660 return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT;
johpow01f19dc622021-06-16 17:57:28 -0500661}
662
663/*
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000664 * Helper function to fill out GPI entries from 'first' granule address of
665 * the specified 'length' in a single L1 table with 'l1_desc' Contiguous
666 * descriptor.
johpow01f19dc622021-06-16 17:57:28 -0500667 *
668 * Parameters
johpow01f19dc622021-06-16 17:57:28 -0500669 * l1 Pointer to L1 table to fill out
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000670 * first Address of first granule in range
671 * length Length of the range in bytes
672 * gpi GPI set this range to
673 *
674 * Return
675 * Address of next granule in range.
johpow01f19dc622021-06-16 17:57:28 -0500676 */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000677static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first,
678 size_t length, unsigned int gpi)
johpow01f19dc622021-06-16 17:57:28 -0500679{
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000680 /*
681 * Look up table for contiguous blocks and descriptors.
682 * Entries should be defined in descending block sizes:
683 * 512MB, 32MB and 2MB.
684 */
685 static const gpt_fill_lookup_t gpt_fill_lookup[] = {
686#if (RME_GPT_MAX_BLOCK == 512)
687 { SZ_512M, GPT_L1_CONT_DESC_512MB },
688#endif
689#if (RME_GPT_MAX_BLOCK >= 32)
690 { SZ_32M, GPT_L1_CONT_DESC_32MB },
691#endif
692#if (RME_GPT_MAX_BLOCK != 0)
693 { SZ_2M, GPT_L1_CONT_DESC_2MB }
694#endif
695 };
johpow01f19dc622021-06-16 17:57:28 -0500696
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000697 /*
698 * Iterate through all block sizes (512MB, 32MB and 2MB)
699 * starting with maximum supported.
700 */
701 for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) {
702 /* Calculate index */
703 unsigned long idx = GPT_L1_INDEX(first);
704
705 /* Contiguous block size */
706 size_t cont_size = gpt_fill_lookup[i].size;
707
708 if (GPT_REGION_IS_CONT(length, first, cont_size)) {
709
710 /* Generate Contiguous descriptor */
711 uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi,
712 gpt_fill_lookup[i].desc);
713
714 /* Number of 128-bit L1 entries in block */
715 unsigned int cnt;
716
717 switch (cont_size) {
718 case SZ_512M:
719 cnt = L1_QWORDS_512MB;
720 break;
721 case SZ_32M:
722 cnt = L1_QWORDS_32MB;
723 break;
724 default: /* SZ_2MB */
725 cnt = L1_QWORDS_2MB;
726 }
727
728 VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n",
729 first, cont_size / SZ_1M);
730
731 /* Fill Contiguous descriptors */
732 fill_desc(&l1[idx], l1_desc, cnt);
733 first += cont_size;
734 length -= cont_size;
735
736 if (length == 0UL) {
737 break;
738 }
739 }
740 }
741
742 return first;
743}
744
745/* Build Granules descriptor with the same 'gpi' for every GPI entry */
746static uint64_t build_l1_desc(unsigned int gpi)
747{
748 uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4);
749
750 l1_desc |= (l1_desc << 8);
751 l1_desc |= (l1_desc << 16);
752 return (l1_desc | (l1_desc << 32));
753}
754
755/*
756 * Helper function to fill out GPI entries from 'first' to 'last' granule
757 * address in a single L1 table with 'l1_desc' Granules descriptor.
758 *
759 * Parameters
760 * l1 Pointer to L1 table to fill out
761 * first Address of first granule in range
762 * last Address of last granule in range (inclusive)
763 * gpi GPI set this range to
764 *
765 * Return
766 * Address of next granule in range.
767 */
768static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first,
769 uintptr_t last, unsigned int gpi)
770{
771 uint64_t gpi_mask;
772 unsigned long i;
773
774 /* Generate Granules descriptor */
775 uint64_t l1_desc = build_l1_desc(gpi);
johpow01f19dc622021-06-16 17:57:28 -0500776
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000777 /* Shift the mask if we're starting in the middle of an L1 entry */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000778 gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
johpow01f19dc622021-06-16 17:57:28 -0500779
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000780 /* Fill out each L1 entry for this region */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000781 for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) {
782
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000783 /* Account for stopping in the middle of an L1 entry */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000784 if (i == GPT_L1_INDEX(last)) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000785 gpi_mask &= (gpi_mask >> ((15U -
johpow01f19dc622021-06-16 17:57:28 -0500786 GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
787 }
788
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000789 assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask));
790
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000791 /* Write GPI values */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000792 l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask);
johpow01f19dc622021-06-16 17:57:28 -0500793
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000794 /* Reset mask */
795 gpi_mask = ULONG_MAX;
johpow01f19dc622021-06-16 17:57:28 -0500796 }
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000797
798 return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
799}
800
801/*
802 * Helper function to fill out GPI entries in a single L1 table.
803 * This function fills out an entire L1 table with either Contiguous
804 * or Granules descriptors depending on region length and alignment.
805 *
806 * Parameters
807 * l1 Pointer to L1 table to fill out
808 * first Address of first granule in range
809 * last Address of last granule in range (inclusive)
810 * gpi GPI set this range to
811 */
812static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last,
813 unsigned int gpi)
814{
815 assert(l1 != NULL);
816 assert(first <= last);
817 assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
818 assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
819 assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
820
821 while (first < last) {
822 /* Region length */
823 size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
824
825 if (length < SZ_2M) {
826 /*
827 * Fill with Granule descriptor in case of
828 * region length < 2MB.
829 */
830 first = fill_l1_gran_desc(l1, first, last, gpi);
831
832 } else if ((first & (SZ_2M - UL(1))) == UL(0)) {
833 /*
834 * For region length >= 2MB and at least 2MB aligned
835 * call to fill_l1_cont_desc will iterate through
836 * all block sizes (512MB, 32MB and 2MB) supported and
837 * fill corresponding Contiguous descriptors.
838 */
839 first = fill_l1_cont_desc(l1, first, length, gpi);
840 } else {
841 /*
842 * For not aligned region >= 2MB fill with Granules
843 * descriptors up to the next 2MB aligned address.
844 */
845 uintptr_t new_last = ALIGN_2MB(first + SZ_2M) -
846 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
847
848 first = fill_l1_gran_desc(l1, first, new_last, gpi);
849 }
850 }
851
852 assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p)));
johpow01f19dc622021-06-16 17:57:28 -0500853}
854
855/*
856 * This function finds the next available unused L1 table and initializes all
857 * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
858 * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
859 * event that a PAS region stops midway through an L1 table, thus guaranteeing
860 * that all memory not explicitly assigned is GPI_ANY. This function does not
861 * check for overflow conditions, that should be done by the caller.
862 *
863 * Return
864 * Pointer to the next available L1 table.
865 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000866static uint64_t *get_new_l1_tbl(void)
johpow01f19dc622021-06-16 17:57:28 -0500867{
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000868 /* Retrieve the next L1 table */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000869 uint64_t *l1 = (uint64_t *)gpt_l1_tbl;
johpow01f19dc622021-06-16 17:57:28 -0500870
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000871 /* Increment L1 GPT address */
872 gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p);
johpow01f19dc622021-06-16 17:57:28 -0500873
874 /* Initialize all GPIs to GPT_GPI_ANY */
875 for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000876 l1[i] = GPT_L1_ANY_DESC;
johpow01f19dc622021-06-16 17:57:28 -0500877 }
878
879 return l1;
880}
881
882/*
883 * When L1 tables are needed, this function creates the necessary L0 table
884 * descriptors and fills out the L1 table entries according to the supplied
885 * PAS range.
886 *
887 * Parameters
888 * *pas Pointer to the structure defining the PAS region.
889 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000890static void generate_l0_tbl_desc(pas_region_t *pas)
johpow01f19dc622021-06-16 17:57:28 -0500891{
892 uintptr_t end_pa;
893 uintptr_t cur_pa;
894 uintptr_t last_gran_pa;
895 uint64_t *l0_gpt_base;
896 uint64_t *l1_gpt_arr;
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000897 unsigned int l0_idx, gpi;
johpow01f19dc622021-06-16 17:57:28 -0500898
899 assert(gpt_config.plat_gpt_l0_base != 0U);
900 assert(pas != NULL);
901
902 /*
903 * Checking of PAS parameters has already been done in
AlexeiFedorov20e26832024-03-13 13:59:09 +0000904 * validate_pas_mappings so no need to check the same things again.
johpow01f19dc622021-06-16 17:57:28 -0500905 */
johpow01f19dc622021-06-16 17:57:28 -0500906 end_pa = pas->base_pa + pas->size;
907 l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
908
909 /* We start working from the granule at base PA */
910 cur_pa = pas->base_pa;
911
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000912 /* Get GPI */
913 gpi = GPT_PAS_ATTR_GPI(pas->attrs);
johpow01f19dc622021-06-16 17:57:28 -0500914
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000915 /* Iterate over each L0 region in this memory range */
916 for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa);
917 l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL);
918 l0_idx++) {
johpow01f19dc622021-06-16 17:57:28 -0500919 /*
920 * See if the L0 entry is already a table descriptor or if we
921 * need to create one.
922 */
923 if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000924 /* Get the L1 array from the L0 entry */
johpow01f19dc622021-06-16 17:57:28 -0500925 l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
926 } else {
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000927 /* Get a new L1 table from the L1 memory space */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000928 l1_gpt_arr = get_new_l1_tbl();
johpow01f19dc622021-06-16 17:57:28 -0500929
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000930 /* Fill out the L0 descriptor and flush it */
johpow01f19dc622021-06-16 17:57:28 -0500931 l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
932 }
933
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000934 VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
935 l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
johpow01f19dc622021-06-16 17:57:28 -0500936
937 /*
938 * Determine the PA of the last granule in this L0 descriptor.
939 */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000940 last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
johpow01f19dc622021-06-16 17:57:28 -0500941 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
942
943 /*
944 * Fill up L1 GPT entries between these two addresses. This
945 * function needs the addresses of the first granule and last
946 * granule in the range.
947 */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000948 fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi);
johpow01f19dc622021-06-16 17:57:28 -0500949
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000950 /* Advance cur_pa to first granule in next L0 region */
AlexeiFedorov20e26832024-03-13 13:59:09 +0000951 cur_pa = get_l1_end_pa(cur_pa, end_pa);
johpow01f19dc622021-06-16 17:57:28 -0500952 }
953}
954
955/*
956 * This function flushes a range of L0 descriptors used by a given PAS region
957 * array. There is a chance that some unmodified L0 descriptors would be flushed
958 * in the case that there are "holes" in an array of PAS regions but overall
959 * this should be faster than individually flushing each modified L0 descriptor
960 * as they are created.
961 *
962 * Parameters
963 * *pas Pointer to an array of PAS regions.
964 * pas_count Number of entries in the PAS array.
965 */
966static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
967{
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000968 unsigned long idx;
969 unsigned long start_idx;
970 unsigned long end_idx;
johpow01f19dc622021-06-16 17:57:28 -0500971 uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
972
973 assert(pas != NULL);
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000974 assert(pas_count != 0U);
johpow01f19dc622021-06-16 17:57:28 -0500975
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000976 /* Initial start and end values */
johpow01f19dc622021-06-16 17:57:28 -0500977 start_idx = GPT_L0_IDX(pas[0].base_pa);
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000978 end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
johpow01f19dc622021-06-16 17:57:28 -0500979
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000980 /* Find lowest and highest L0 indices used in this PAS array */
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000981 for (idx = 1UL; idx < pas_count; idx++) {
johpow01f19dc622021-06-16 17:57:28 -0500982 if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
983 start_idx = GPT_L0_IDX(pas[idx].base_pa);
984 }
AlexeiFedorovb99926e2024-03-13 15:18:02 +0000985 if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
986 end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
johpow01f19dc622021-06-16 17:57:28 -0500987 }
988 }
989
990 /*
991 * Flush all covered L0 descriptors, add 1 because we need to include
992 * the end index value.
993 */
994 flush_dcache_range((uintptr_t)&l0[start_idx],
AlexeiFedorovec0088b2024-03-13 17:07:03 +0000995 ((end_idx + 1UL) - start_idx) * sizeof(uint64_t));
johpow01f19dc622021-06-16 17:57:28 -0500996}
997
998/*
999 * Public API to enable granule protection checks once the tables have all been
1000 * initialized. This function is called at first initialization and then again
1001 * later during warm boots of CPU cores.
1002 *
1003 * Return
1004 * Negative Linux error code in the event of a failure, 0 for success.
1005 */
1006int gpt_enable(void)
1007{
1008 u_register_t gpccr_el3;
1009
1010 /*
1011 * Granule tables must be initialised before enabling
1012 * granule protection.
1013 */
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001014 if (gpt_config.plat_gpt_l0_base == 0UL) {
1015 ERROR("GPT: Tables have not been initialized!\n");
johpow01f19dc622021-06-16 17:57:28 -05001016 return -EPERM;
1017 }
1018
johpow01f19dc622021-06-16 17:57:28 -05001019 /* Write the base address of the L0 tables into GPTBR */
1020 write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
1021 >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
1022
1023 /* GPCCR_EL3.PPS */
1024 gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
1025
1026 /* GPCCR_EL3.PGS */
1027 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
1028
Soby Mathew77612b92021-10-11 14:38:46 +01001029 /*
1030 * Since EL3 maps the L1 region as Inner shareable, use the same
1031 * shareability attribute for GPC as well so that
1032 * GPC fetches are visible to PEs
1033 */
1034 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
johpow01f19dc622021-06-16 17:57:28 -05001035
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001036 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
johpow01f19dc622021-06-16 17:57:28 -05001037 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
1038 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
1039
Kathleen Capella14cddd72022-07-22 16:26:36 -04001040 /* Prepopulate GPCCR_EL3 but don't enable GPC yet */
1041 write_gpccr_el3(gpccr_el3);
1042 isb();
1043
1044 /* Invalidate any stale TLB entries and any cached register fields */
1045 tlbipaallos();
1046 dsb();
1047 isb();
1048
johpow01f19dc622021-06-16 17:57:28 -05001049 /* Enable GPT */
1050 gpccr_el3 |= GPCCR_GPC_BIT;
1051
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001052 /* TODO: Configure GPCCR_EL3_GPCP for Fault control */
johpow01f19dc622021-06-16 17:57:28 -05001053 write_gpccr_el3(gpccr_el3);
Soby Mathew77612b92021-10-11 14:38:46 +01001054 isb();
johpow01f19dc622021-06-16 17:57:28 -05001055 tlbipaallos();
1056 dsb();
1057 isb();
1058
1059 return 0;
1060}
1061
1062/*
1063 * Public API to disable granule protection checks.
1064 */
1065void gpt_disable(void)
1066{
1067 u_register_t gpccr_el3 = read_gpccr_el3();
1068
1069 write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
1070 dsbsy();
1071 isb();
1072}
1073
1074/*
1075 * Public API that initializes the entire protected space to GPT_GPI_ANY using
1076 * the L0 tables (block descriptors). Ideally, this function is invoked prior
1077 * to DDR discovery and initialization. The MMU must be initialized before
1078 * calling this function.
1079 *
1080 * Parameters
1081 * pps PPS value to use for table generation
1082 * l0_mem_base Base address of L0 tables in memory.
1083 * l0_mem_size Total size of memory available for L0 tables.
1084 *
1085 * Return
1086 * Negative Linux error code in the event of a failure, 0 for success.
1087 */
AlexeiFedorova0d51472022-12-09 11:27:14 +00001088int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
johpow01f19dc622021-06-16 17:57:28 -05001089 size_t l0_mem_size)
1090{
johpow01f19dc622021-06-16 17:57:28 -05001091 uint64_t gpt_desc;
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001092 size_t locks_size;
1093 bitlock_t *bit_locks;
1094 int ret;
johpow01f19dc622021-06-16 17:57:28 -05001095
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001096 /* Ensure that MMU and Data caches are enabled */
johpow01f19dc622021-06-16 17:57:28 -05001097 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1098
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001099 /* Validate other parameters */
AlexeiFedorov20e26832024-03-13 13:59:09 +00001100 ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001101 if (ret != 0) {
johpow01f19dc622021-06-16 17:57:28 -05001102 return ret;
1103 }
1104
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001105 /* Create the descriptor to initialize L0 entries with */
johpow01f19dc622021-06-16 17:57:28 -05001106 gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
1107
1108 /* Iterate through all L0 entries */
1109 for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
1110 ((uint64_t *)l0_mem_base)[i] = gpt_desc;
1111 }
1112
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001113 /* Initialise bitlocks at the end of L0 table */
1114 bit_locks = (bitlock_t *)(l0_mem_base +
1115 GPT_L0_TABLE_SIZE(gpt_config.t));
1116
1117 /* Size of bitlocks in bytes */
1118 locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) / (SZ_512M * 8U);
1119
1120 for (size_t i = 0UL; i < (locks_size/LOCK_SIZE); i++) {
1121 bit_locks[i].lock = 0U;
1122 }
1123
1124 /* Flush updated L0 tables and bitlocks to memory */
johpow01f19dc622021-06-16 17:57:28 -05001125 flush_dcache_range((uintptr_t)l0_mem_base,
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001126 GPT_L0_TABLE_SIZE(gpt_config.t) + locks_size);
johpow01f19dc622021-06-16 17:57:28 -05001127
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001128 /* Stash the L0 base address once initial setup is complete */
johpow01f19dc622021-06-16 17:57:28 -05001129 gpt_config.plat_gpt_l0_base = l0_mem_base;
1130
1131 return 0;
1132}
1133
1134/*
1135 * Public API that carves out PAS regions from the L0 tables and builds any L1
1136 * tables that are needed. This function ideally is run after DDR discovery and
1137 * initialization. The L0 tables must have already been initialized to GPI_ANY
1138 * when this function is called.
1139 *
1140 * This function can be called multiple times with different L1 memory ranges
1141 * and PAS regions if it is desirable to place L1 tables in different locations
1142 * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001143 * in the DDR bank that they control).
johpow01f19dc622021-06-16 17:57:28 -05001144 *
1145 * Parameters
1146 * pgs PGS value to use for table generation.
1147 * l1_mem_base Base address of memory used for L1 tables.
1148 * l1_mem_size Total size of memory available for L1 tables.
1149 * *pas_regions Pointer to PAS regions structure array.
1150 * pas_count Total number of PAS regions.
1151 *
1152 * Return
1153 * Negative Linux error code in the event of a failure, 0 for success.
1154 */
1155int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
1156 size_t l1_mem_size, pas_region_t *pas_regions,
1157 unsigned int pas_count)
1158{
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001159 int l1_gpt_cnt, ret;
johpow01f19dc622021-06-16 17:57:28 -05001160
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001161 /* Ensure that MMU and Data caches are enabled */
johpow01f19dc622021-06-16 17:57:28 -05001162 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1163
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001164 /* PGS is needed for validate_pas_mappings so check it now */
johpow01f19dc622021-06-16 17:57:28 -05001165 if (pgs > GPT_PGS_MAX) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001166 ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
johpow01f19dc622021-06-16 17:57:28 -05001167 return -EINVAL;
1168 }
1169 gpt_config.pgs = pgs;
1170 gpt_config.p = gpt_p_lookup[pgs];
1171
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001172 /* Make sure L0 tables have been initialized */
johpow01f19dc622021-06-16 17:57:28 -05001173 if (gpt_config.plat_gpt_l0_base == 0U) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001174 ERROR("GPT: L0 tables must be initialized first!\n");
johpow01f19dc622021-06-16 17:57:28 -05001175 return -EPERM;
1176 }
1177
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001178 /* Check if L1 GPTs are required and how many */
AlexeiFedorov20e26832024-03-13 13:59:09 +00001179 l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
johpow01f19dc622021-06-16 17:57:28 -05001180 if (l1_gpt_cnt < 0) {
1181 return l1_gpt_cnt;
1182 }
1183
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001184 VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
johpow01f19dc622021-06-16 17:57:28 -05001185
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001186 /* If L1 tables are needed then validate the L1 parameters */
johpow01f19dc622021-06-16 17:57:28 -05001187 if (l1_gpt_cnt > 0) {
AlexeiFedorov20e26832024-03-13 13:59:09 +00001188 ret = validate_l1_params(l1_mem_base, l1_mem_size,
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001189 (unsigned int)l1_gpt_cnt);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001190 if (ret != 0) {
johpow01f19dc622021-06-16 17:57:28 -05001191 return ret;
1192 }
1193
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001194 /* Set up parameters for L1 table generation */
johpow01f19dc622021-06-16 17:57:28 -05001195 gpt_l1_tbl = l1_mem_base;
johpow01f19dc622021-06-16 17:57:28 -05001196 }
1197
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001198 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1199 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1200
1201 /* Mask for the L1 index field */
1202 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1203
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001204 INFO("GPT: Boot Configuration\n");
johpow01f19dc622021-06-16 17:57:28 -05001205 INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1206 INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1207 INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001208 INFO(" PAS count: %u\n", pas_count);
1209 INFO(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
johpow01f19dc622021-06-16 17:57:28 -05001210
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001211 /* Generate the tables in memory */
johpow01f19dc622021-06-16 17:57:28 -05001212 for (unsigned int idx = 0U; idx < pas_count; idx++) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001213 VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
1214 idx, pas_regions[idx].base_pa, pas_regions[idx].size,
1215 GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
1216 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
johpow01f19dc622021-06-16 17:57:28 -05001217
1218 /* Check if a block or table descriptor is required */
1219 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
1220 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
AlexeiFedorov20e26832024-03-13 13:59:09 +00001221 generate_l0_blk_desc(&pas_regions[idx]);
johpow01f19dc622021-06-16 17:57:28 -05001222
1223 } else {
AlexeiFedorov20e26832024-03-13 13:59:09 +00001224 generate_l0_tbl_desc(&pas_regions[idx]);
johpow01f19dc622021-06-16 17:57:28 -05001225 }
1226 }
1227
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001228 /* Flush modified L0 tables */
johpow01f19dc622021-06-16 17:57:28 -05001229 flush_l0_for_pas_array(pas_regions, pas_count);
1230
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001231 /* Flush L1 tables if needed */
johpow01f19dc622021-06-16 17:57:28 -05001232 if (l1_gpt_cnt > 0) {
1233 flush_dcache_range(l1_mem_base,
1234 GPT_L1_TABLE_SIZE(gpt_config.p) *
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001235 (size_t)l1_gpt_cnt);
johpow01f19dc622021-06-16 17:57:28 -05001236 }
1237
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001238 /* Make sure that all the entries are written to the memory */
johpow01f19dc622021-06-16 17:57:28 -05001239 dsbishst();
Soby Mathew77612b92021-10-11 14:38:46 +01001240 tlbipaallos();
1241 dsb();
1242 isb();
johpow01f19dc622021-06-16 17:57:28 -05001243
1244 return 0;
1245}
1246
1247/*
1248 * Public API to initialize the runtime gpt_config structure based on the values
1249 * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
1250 * typically happens in a bootloader stage prior to setting up the EL3 runtime
1251 * environment for the granule transition service so this function detects the
1252 * initialization from a previous stage. Granule protection checks must be
1253 * enabled already or this function will return an error.
1254 *
1255 * Return
1256 * Negative Linux error code in the event of a failure, 0 for success.
1257 */
1258int gpt_runtime_init(void)
1259{
1260 u_register_t reg;
1261
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001262 /* Ensure that MMU and Data caches are enabled */
johpow01f19dc622021-06-16 17:57:28 -05001263 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1264
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001265 /* Ensure GPC are already enabled */
johpow01f19dc622021-06-16 17:57:28 -05001266 if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001267 ERROR("GPT: Granule protection checks are not enabled!\n");
johpow01f19dc622021-06-16 17:57:28 -05001268 return -EPERM;
1269 }
1270
1271 /*
1272 * Read the L0 table address from GPTBR, we don't need the L1 base
1273 * address since those are included in the L0 tables as needed.
1274 */
1275 reg = read_gptbr_el3();
1276 gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
1277 GPTBR_BADDR_MASK) <<
1278 GPTBR_BADDR_VAL_SHIFT;
1279
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001280 /* Read GPCCR to get PGS and PPS values */
johpow01f19dc622021-06-16 17:57:28 -05001281 reg = read_gpccr_el3();
1282 gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
1283 gpt_config.t = gpt_t_lookup[gpt_config.pps];
1284 gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
1285 gpt_config.p = gpt_p_lookup[gpt_config.pgs];
1286
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001287 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1288 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1289
1290 /* Mask for the L1 index field */
1291 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1292
1293 /* Bitlocks at the end of L0 table */
1294 gpt_bitlock_base = (bitlock_t *)(gpt_config.plat_gpt_l0_base +
1295 GPT_L0_TABLE_SIZE(gpt_config.t));
1296
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001297 VERBOSE("GPT: Runtime Configuration\n");
johpow01f19dc622021-06-16 17:57:28 -05001298 VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1299 VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1300 VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001301 VERBOSE(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001302 VERBOSE(" Bitlocks: 0x%"PRIxPTR"\n", (uintptr_t)gpt_bitlock_base);
johpow01f19dc622021-06-16 17:57:28 -05001303
1304 return 0;
1305}
1306
1307/*
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001308 * A helper to write the value (target_pas << gpi_shift) to the index of
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001309 * the gpt_l1_addr.
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001310 */
1311static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
1312 unsigned int gpi_shift, unsigned int idx,
1313 unsigned int target_pas)
1314{
1315 *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
1316 *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
1317 gpt_l1_addr[idx] = *gpt_l1_desc;
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001318
1319 dsboshst();
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001320}
1321
1322/*
1323 * Helper to retrieve the gpt_l1_* information from the base address
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001324 * returned in gpi_info.
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001325 */
1326static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
1327{
1328 uint64_t gpt_l0_desc, *gpt_l0_base;
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001329 unsigned int idx_512;
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001330
1331 gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
1332 gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
1333 if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001334 VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
1335 VERBOSE(" Base=0x%"PRIx64"\n", base);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001336 return -EINVAL;
1337 }
1338
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001339 /* Get the table index and GPI shift from PA */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001340 gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001341 gpi_info->idx = (unsigned int)GPT_L1_INDEX(base);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001342 gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
1343
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001344 /* 512MB block index */
1345 idx_512 = (unsigned int)(base / SZ_512M);
1346
1347 /* Bitlock address and mask */
1348 gpi_info->lock = &gpt_bitlock_base[idx_512 / LOCK_BITS];
1349 gpi_info->mask = 1U << (idx_512 & (LOCK_BITS - 1U));
1350
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001351 return 0;
1352}
1353
1354/*
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001355 * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info.
1356 * This function is called with bitlock acquired.
1357 */
1358static void read_gpi(gpi_info_t *gpi_info)
1359{
1360 gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
1361
1362 if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1363 GPT_L1_TYPE_CONT_DESC) {
1364 /* Read GPI from Contiguous descriptor */
1365 gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc);
1366 } else {
1367 /* Read GPI from Granules descriptor */
1368 gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
1369 GPT_L1_GRAN_DESC_GPI_MASK);
1370 }
1371}
1372
1373static void flush_page_to_popa(uintptr_t addr)
1374{
1375 size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p);
1376
1377 if (is_feat_mte2_supported()) {
1378 flush_dcache_to_popa_range_mte2(addr, size);
1379 } else {
1380 flush_dcache_to_popa_range(addr, size);
1381 }
1382}
1383
1384/*
1385 * Helper function to check if all L1 entries in 2MB block have
1386 * the same Granules descriptor value.
1387 *
1388 * Parameters
1389 * base Base address of the region to be checked
1390 * gpi_info Pointer to 'gpt_config_t' structure
1391 * l1_desc GPT Granules descriptor with all entries
1392 * set to the same GPI.
1393 *
1394 * Return
1395 * true if L1 all entries have the same descriptor value, false otherwise.
1396 */
1397__unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1398 uint64_t l1_desc)
1399{
1400 /* Last L1 entry index in 2MB block */
1401 unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) +
1402 gpt_l1_cnt_2mb - 1UL;
1403
1404 /* Number of L1 entries in 2MB block */
1405 unsigned int cnt = gpt_l1_cnt_2mb;
1406
1407 /*
1408 * Start check from the last L1 entry and continue until the first
1409 * non-matching to the passed Granules descriptor value is found.
1410 */
1411 while (cnt-- != 0U) {
1412 if (gpi_info->gpt_l1_addr[idx--] != l1_desc) {
1413 /* Non-matching L1 entry found */
1414 return false;
1415 }
1416 }
1417
1418 return true;
1419}
1420
1421__unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1422 uint64_t l1_desc)
1423{
1424 /* L1 entry index of the start of 2MB block */
1425 unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base));
1426
1427 /* 2MB Contiguous descriptor */
1428 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1429
1430 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1431
1432 fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB);
1433}
1434
1435/*
1436 * Helper function to check if all 1st L1 entries of 2MB blocks
1437 * in 32MB have the same 2MB Contiguous descriptor value.
1438 *
1439 * Parameters
1440 * base Base address of the region to be checked
1441 * gpi_info Pointer to 'gpt_config_t' structure
1442 * l1_desc GPT Granules descriptor.
1443 *
1444 * Return
1445 * true if all L1 entries have the same descriptor value, false otherwise.
1446 */
1447__unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1448 uint64_t l1_desc)
1449{
1450 /* The 1st L1 entry index of the last 2MB block in 32MB */
1451 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) +
1452 (15UL * gpt_l1_cnt_2mb);
1453
1454 /* 2MB Contiguous descriptor */
1455 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1456
1457 /* Number of 2MB blocks in 32MB */
1458 unsigned int cnt = 16U;
1459
1460 /* Set the first L1 entry to 2MB Contiguous descriptor */
1461 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc;
1462
1463 /*
1464 * Start check from the 1st L1 entry of the last 2MB block and
1465 * continue until the first non-matching to 2MB Contiguous descriptor
1466 * value is found.
1467 */
1468 while (cnt-- != 0U) {
1469 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1470 /* Non-matching L1 entry found */
1471 return false;
1472 }
1473 idx -= gpt_l1_cnt_2mb;
1474 }
1475
1476 return true;
1477}
1478
1479__unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1480 uint64_t l1_desc)
1481{
1482 /* L1 entry index of the start of 32MB block */
1483 unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base));
1484
1485 /* 32MB Contiguous descriptor */
1486 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1487
1488 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1489
1490 fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB);
1491}
1492
1493/*
1494 * Helper function to check if all 1st L1 entries of 32MB blocks
1495 * in 512MB have the same 32MB Contiguous descriptor value.
1496 *
1497 * Parameters
1498 * base Base address of the region to be checked
1499 * gpi_info Pointer to 'gpt_config_t' structure
1500 * l1_desc GPT Granules descriptor.
1501 *
1502 * Return
1503 * true if all L1 entries have the same descriptor value, false otherwise.
1504 */
1505__unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1506 uint64_t l1_desc)
1507{
1508 /* The 1st L1 entry index of the last 32MB block in 512MB */
1509 unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) +
1510 (15UL * 16UL * gpt_l1_cnt_2mb);
1511
1512 /* 32MB Contiguous descriptor */
1513 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1514
1515 /* Number of 32MB blocks in 512MB */
1516 unsigned int cnt = 16U;
1517
1518 /* Set the first L1 entry to 2MB Contiguous descriptor */
1519 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc;
1520
1521 /*
1522 * Start check from the 1st L1 entry of the last 32MB block and
1523 * continue until the first non-matching to 32MB Contiguous descriptor
1524 * value is found.
1525 */
1526 while (cnt-- != 0U) {
1527 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1528 /* Non-matching L1 entry found */
1529 return false;
1530 }
1531 idx -= 16UL * gpt_l1_cnt_2mb;
1532 }
1533
1534 return true;
1535}
1536
1537__unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1538 uint64_t l1_desc)
1539{
1540 /* L1 entry index of the start of 512MB block */
1541 unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base));
1542
1543 /* 512MB Contiguous descriptor */
1544 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB);
1545
1546 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1547
1548 fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB);
1549}
1550
1551/*
1552 * Helper function to convert GPI entries in a single L1 table
1553 * from Granules to Contiguous descriptor.
1554 *
1555 * Parameters
1556 * base Base address of the region to be written
1557 * gpi_info Pointer to 'gpt_config_t' structure
1558 * l1_desc GPT Granules descriptor with all entries
1559 * set to the same GPI.
1560 */
1561__unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info,
1562 uint64_t l1_desc)
1563{
1564 /* Start with check for 2MB block */
1565 if (!check_fuse_2mb(base, gpi_info, l1_desc)) {
1566 /* Check for 2MB fusing failed */
1567 return;
1568 }
1569
1570#if (RME_GPT_MAX_BLOCK == 2)
1571 fuse_2mb(base, gpi_info, l1_desc);
1572#else
1573 /* Check for 32MB block */
1574 if (!check_fuse_32mb(base, gpi_info, l1_desc)) {
1575 /* Check for 32MB fusing failed, fuse to 2MB */
1576 fuse_2mb(base, gpi_info, l1_desc);
1577 return;
1578 }
1579
1580#if (RME_GPT_MAX_BLOCK == 32)
1581 fuse_32mb(base, gpi_info, l1_desc);
1582#else
1583 /* Check for 512MB block */
1584 if (!check_fuse_512mb(base, gpi_info, l1_desc)) {
1585 /* Check for 512MB fusing failed, fuse to 32MB */
1586 fuse_32mb(base, gpi_info, l1_desc);
1587 return;
1588 }
1589
1590 /* Fuse to 512MB */
1591 fuse_512mb(base, gpi_info, l1_desc);
1592
1593#endif /* RME_GPT_MAX_BLOCK == 32 */
1594#endif /* RME_GPT_MAX_BLOCK == 2 */
1595}
1596
1597/*
1598 * Helper function to convert GPI entries in a single L1 table
1599 * from Contiguous to Granules descriptor. This function updates
1600 * descriptor to Granules in passed 'gpt_config_t' structure as
1601 * the result of shuttering.
1602 *
1603 * Parameters
1604 * base Base address of the region to be written
1605 * gpi_info Pointer to 'gpt_config_t' structure
1606 * l1_desc GPT Granules descriptor set this range to.
1607 */
1608__unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info,
1609 uint64_t l1_desc)
1610{
1611 /* Look-up table for 2MB, 32MB and 512MB locks shattering */
1612 static const gpt_shatter_func gpt_shatter_lookup[] = {
1613 shatter_2mb,
1614 shatter_32mb,
1615 shatter_512mb
1616 };
1617
1618 /* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */
1619 static const gpt_tlbi_lookup_t tlbi_lookup[] = {
1620 { tlbirpalos_2m, ~(SZ_2M - 1UL) },
1621 { tlbirpalos_32m, ~(SZ_32M - 1UL) },
1622 { tlbirpalos_512m, ~(SZ_512M - 1UL) }
1623 };
1624
1625 /* Get shattering level from Contig field of Contiguous descriptor */
1626 unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL;
1627
1628 /* Shatter contiguous block */
1629 gpt_shatter_lookup[level](base, gpi_info, l1_desc);
1630
1631 tlbi_lookup[level].function(base & tlbi_lookup[level].mask);
1632 dsbosh();
1633
1634 /*
1635 * Update 'gpt_config_t' structure's descriptor to Granules to reflect
1636 * the shattered GPI back to caller.
1637 */
1638 gpi_info->gpt_l1_desc = l1_desc;
1639}
1640
1641/*
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001642 * This function is the granule transition delegate service. When a granule
1643 * transition request occurs it is routed to this function to have the request,
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001644 * if valid, fulfilled following A1.1.1 Delegate of RME supplement.
johpow01f19dc622021-06-16 17:57:28 -05001645 *
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001646 * TODO: implement support for transitioning multiple granules at once.
johpow01f19dc622021-06-16 17:57:28 -05001647 *
1648 * Parameters
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001649 * base Base address of the region to transition, must be
1650 * aligned to granule size.
1651 * size Size of region to transition, must be aligned to granule
1652 * size.
johpow01f19dc622021-06-16 17:57:28 -05001653 * src_sec_state Security state of the caller.
johpow01f19dc622021-06-16 17:57:28 -05001654 *
1655 * Return
1656 * Negative Linux error code in the event of a failure, 0 for success.
1657 */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001658int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
johpow01f19dc622021-06-16 17:57:28 -05001659{
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001660 gpi_info_t gpi_info;
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001661 uint64_t nse, __unused l1_desc;
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001662 unsigned int target_pas;
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001663 int res;
johpow01f19dc622021-06-16 17:57:28 -05001664
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001665 /* Ensure that the tables have been set up before taking requests */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001666 assert(gpt_config.plat_gpt_l0_base != 0UL);
1667
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001668 /* Ensure that caches are enabled */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001669 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1670
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001671 /* See if this is a single or a range of granule transition */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001672 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
johpow01f19dc622021-06-16 17:57:28 -05001673 return -EINVAL;
1674 }
1675
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001676 /* Check that base and size are valid */
1677 if ((ULONG_MAX - base) < size) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001678 VERBOSE("GPT: Transition request address overflow!\n");
1679 VERBOSE(" Base=0x%"PRIx64"\n", base);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001680 VERBOSE(" Size=0x%lx\n", size);
1681 return -EINVAL;
1682 }
1683
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001684 /* Make sure base and size are valid */
1685 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1686 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001687 (size == 0UL) ||
1688 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001689 VERBOSE("GPT: Invalid granule transition address range!\n");
1690 VERBOSE(" Base=0x%"PRIx64"\n", base);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001691 VERBOSE(" Size=0x%lx\n", size);
1692 return -EINVAL;
1693 }
1694
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001695 /* Delegate request can only come from REALM or SECURE */
1696 if ((src_sec_state != SMC_FROM_REALM) &&
1697 (src_sec_state != SMC_FROM_SECURE)) {
1698 VERBOSE("GPT: Invalid caller security state 0x%x\n",
1699 src_sec_state);
1700 return -EINVAL;
1701 }
1702
1703 if (src_sec_state == SMC_FROM_REALM) {
1704 target_pas = GPT_GPI_REALM;
1705 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1706 l1_desc = GPT_L1_REALM_DESC;
1707 } else {
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001708 target_pas = GPT_GPI_SECURE;
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001709 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1710 l1_desc = GPT_L1_SECURE_DESC;
1711 }
1712
1713 res = get_gpi_params(base, &gpi_info);
1714 if (res != 0) {
1715 return res;
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001716 }
1717
1718 /*
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001719 * Access to each 512MB block in L1 tables is controlled by a bitlock
1720 * to ensure that no more than one CPU is allowed to make changes at
1721 * any given time.
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001722 */
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001723 bit_lock(gpi_info.lock, gpi_info.mask);
1724
1725 read_gpi(&gpi_info);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001726
1727 /* Check that the current address is in NS state */
1728 if (gpi_info.gpi != GPT_GPI_NS) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001729 VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001730 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
1731 gpi_info.gpi);
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001732 bit_unlock(gpi_info.lock, gpi_info.mask);
Javier Almansa Sobrinoe50fedb2022-07-04 17:06:36 +01001733 return -EPERM;
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001734 }
1735
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001736#if (RME_GPT_MAX_BLOCK != 0)
1737 /* Check for Contiguous descriptor */
1738 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1739 GPT_L1_TYPE_CONT_DESC) {
1740 shatter_block(base, &gpi_info, GPT_L1_NS_DESC);
johpow01f19dc622021-06-16 17:57:28 -05001741 }
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001742#endif
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001743 /*
1744 * In order to maintain mutual distrust between Realm and Secure
1745 * states, remove any data speculatively fetched into the target
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001746 * physical address space.
1747 * Issue DC CIPAPA or DC_CIGDPAPA on implementations with FEAT_MTE2.
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001748 */
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001749 flush_page_to_popa(base | nse);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001750
1751 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1752 gpi_info.gpi_shift, gpi_info.idx, target_pas);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001753
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001754 /* Ensure that all agents observe the new configuration */
1755 tlbi_page_dsbosh(base);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001756
1757 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1758
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001759 /* Ensure that the scrubbed data have made it past the PoPA */
1760 flush_page_to_popa(base | nse);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001761
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001762#if (RME_GPT_MAX_BLOCK != 0)
1763 if (gpi_info.gpt_l1_desc == l1_desc) {
1764 /* Try to fuse */
1765 fuse_block(base, &gpi_info, l1_desc);
1766 }
1767#endif
1768
1769 /* Unlock access to 512MB block */
1770 bit_unlock(gpi_info.lock, gpi_info.mask);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001771
1772 /*
1773 * The isb() will be done as part of context
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001774 * synchronization when returning to lower EL.
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001775 */
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001776 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001777 base, gpi_info.gpi, target_pas);
johpow01f19dc622021-06-16 17:57:28 -05001778
1779 return 0;
1780}
1781
1782/*
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001783 * This function is the granule transition undelegate service. When a granule
johpow01f19dc622021-06-16 17:57:28 -05001784 * transition request occurs it is routed to this function where the request is
1785 * validated then fulfilled if possible.
1786 *
1787 * TODO: implement support for transitioning multiple granules at once.
1788 *
1789 * Parameters
1790 * base Base address of the region to transition, must be
1791 * aligned to granule size.
1792 * size Size of region to transition, must be aligned to granule
1793 * size.
1794 * src_sec_state Security state of the caller.
johpow01f19dc622021-06-16 17:57:28 -05001795 *
1796 * Return
1797 * Negative Linux error code in the event of a failure, 0 for success.
1798 */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001799int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
johpow01f19dc622021-06-16 17:57:28 -05001800{
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001801 gpi_info_t gpi_info;
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001802 uint64_t nse, __unused l1_desc;
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001803 int res;
johpow01f19dc622021-06-16 17:57:28 -05001804
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001805 /* Ensure that the tables have been set up before taking requests */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001806 assert(gpt_config.plat_gpt_l0_base != 0UL);
johpow01f19dc622021-06-16 17:57:28 -05001807
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001808 /* Ensure that MMU and caches are enabled */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001809 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
Soby Mathew77612b92021-10-11 14:38:46 +01001810
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001811 /* See if this is a single or a range of granule transition */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001812 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1813 return -EINVAL;
1814 }
1815
1816 /* Check that base and size are valid */
johpow01f19dc622021-06-16 17:57:28 -05001817 if ((ULONG_MAX - base) < size) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001818 VERBOSE("GPT: Transition request address overflow!\n");
1819 VERBOSE(" Base=0x%"PRIx64"\n", base);
johpow01f19dc622021-06-16 17:57:28 -05001820 VERBOSE(" Size=0x%lx\n", size);
1821 return -EINVAL;
1822 }
1823
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001824 /* Make sure base and size are valid */
1825 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1826 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001827 (size == 0UL) ||
johpow01f19dc622021-06-16 17:57:28 -05001828 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001829 VERBOSE("GPT: Invalid granule transition address range!\n");
1830 VERBOSE(" Base=0x%"PRIx64"\n", base);
johpow01f19dc622021-06-16 17:57:28 -05001831 VERBOSE(" Size=0x%lx\n", size);
1832 return -EINVAL;
1833 }
1834
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001835 res = get_gpi_params(base, &gpi_info);
1836 if (res != 0) {
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001837 return res;
johpow01f19dc622021-06-16 17:57:28 -05001838 }
1839
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001840 /*
1841 * Access to each 512MB block in L1 tables is controlled by a bitlock
1842 * to ensure that no more than one CPU is allowed to make changes at
1843 * any given time.
1844 */
1845 bit_lock(gpi_info.lock, gpi_info.mask);
1846
1847 read_gpi(&gpi_info);
1848
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001849 /* Check that the current address is in the delegated state */
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001850 if ((src_sec_state == SMC_FROM_REALM) &&
1851 (gpi_info.gpi == GPT_GPI_REALM)) {
1852 l1_desc = GPT_L1_REALM_DESC;
1853 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1854 } else if ((src_sec_state == SMC_FROM_SECURE) &&
1855 (gpi_info.gpi == GPT_GPI_SECURE)) {
1856 l1_desc = GPT_L1_SECURE_DESC;
1857 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1858 } else {
1859 VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated\n");
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001860 VERBOSE(" Caller: %u Current GPI: %u\n", src_sec_state,
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001861 gpi_info.gpi);
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001862 bit_unlock(gpi_info.lock, gpi_info.mask);
Javier Almansa Sobrinoe50fedb2022-07-04 17:06:36 +01001863 return -EPERM;
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001864 }
johpow01f19dc622021-06-16 17:57:28 -05001865
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001866#if (RME_GPT_MAX_BLOCK != 0)
1867 /* Check for Contiguous descriptor */
1868 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1869 GPT_L1_TYPE_CONT_DESC) {
1870 shatter_block(base, &gpi_info, l1_desc);
1871 }
1872#endif
1873 /*
1874 * In order to maintain mutual distrust between Realm and Secure
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001875 * states, remove access now, in order to guarantee that writes
1876 * to the currently-accessible physical address space will not
1877 * later become observable.
1878 */
1879 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1880 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001881
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001882 /* Ensure that all agents observe the new NO_ACCESS configuration */
1883 tlbi_page_dsbosh(base);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001884
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001885 /* Ensure that the scrubbed data have made it past the PoPA */
1886 flush_page_to_popa(base | nse);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001887
1888 /*
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001889 * Remove any data loaded speculatively in NS space from before
1890 * the scrubbing.
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001891 */
1892 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1893
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001894 flush_page_to_popa(base | nse);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001895
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001896 /* Clear existing GPI encoding and transition granule */
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001897 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1898 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001899
1900 /* Ensure that all agents observe the new NS configuration */
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001901 tlbi_page_dsbosh(base);
johpow01f19dc622021-06-16 17:57:28 -05001902
AlexeiFedorovec0088b2024-03-13 17:07:03 +00001903#if (RME_GPT_MAX_BLOCK != 0)
1904 if (gpi_info.gpt_l1_desc == GPT_L1_NS_DESC) {
1905 /* Try to fuse */
1906 fuse_block(base, &gpi_info, GPT_L1_NS_DESC);
1907 }
1908#endif
1909 /* Unlock access to 512MB block */
1910 bit_unlock(gpi_info.lock, gpi_info.mask);
johpow01f19dc622021-06-16 17:57:28 -05001911
Soby Mathew77612b92021-10-11 14:38:46 +01001912 /*
1913 * The isb() will be done as part of context
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001914 * synchronization when returning to lower EL.
Soby Mathew77612b92021-10-11 14:38:46 +01001915 */
AlexeiFedorovb99926e2024-03-13 15:18:02 +00001916 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
Robert Wakim6a00e9b2021-10-21 15:39:56 +01001917 base, gpi_info.gpi, GPT_GPI_NS);
johpow01f19dc622021-06-16 17:57:28 -05001918
1919 return 0;
1920}