blob: 3df437416caa018400d8e4562740bd706a60c71e [file] [log] [blame]
Antonio Nino Diazfd2299e2018-07-03 11:58:49 +01001/*
2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <assert.h>
9#include <debug.h>
10#include <errno.h>
11#include <platform_def.h>
12#include <types.h>
13#include <utils_def.h>
14#include <xlat_tables_arch_private.h>
15#include <xlat_tables_defs.h>
16#include <xlat_tables_v2.h>
17
18#include "xlat_tables_private.h"
19
20#if LOG_LEVEL < LOG_LEVEL_VERBOSE
21
22void xlat_mmap_print(__unused mmap_region_t *const mmap)
23{
24 /* Empty */
25}
26
27void xlat_tables_print(__unused xlat_ctx_t *ctx)
28{
29 /* Empty */
30}
31
32#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
33
34void xlat_mmap_print(mmap_region_t *const mmap)
35{
36 tf_printf("mmap:\n");
37 const mmap_region_t *mm = mmap;
38
39 while (mm->size != 0U) {
40 tf_printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x "
41 "granularity:0x%zx\n", mm->base_va, mm->base_pa,
42 mm->size, mm->attr, mm->granularity);
43 ++mm;
44 };
45 tf_printf("\n");
46}
47
48/* Print the attributes of the specified block descriptor. */
49static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
50{
51 int mem_type_index = ATTR_INDEX_GET(desc);
52 int xlat_regime = ctx->xlat_regime;
53
54 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
55 tf_printf("MEM");
56 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
57 tf_printf("NC");
58 } else {
59 assert(mem_type_index == ATTR_DEVICE_INDEX);
60 tf_printf("DEV");
61 }
62
63 const char *priv_str = "(PRIV)";
64 const char *user_str = "(USER)";
65
66 /*
67 * Showing Privileged vs Unprivileged only makes sense for EL1&0
68 * mappings
69 */
70 const char *ro_str = "-RO";
71 const char *rw_str = "-RW";
72 const char *no_access_str = "-NOACCESS";
73
74 if (xlat_regime == EL3_REGIME) {
75 /* For EL3, the AP[2] bit is all what matters */
76 tf_printf("%s", (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str);
77 } else {
78 const char *ap_str = (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str;
79 tf_printf("%s", ap_str);
80 tf_printf("%s", priv_str);
81 /*
82 * EL0 can only have the same permissions as EL1 or no
83 * permissions at all.
84 */
85 tf_printf("%s",
86 (desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
87 ? ap_str : no_access_str);
88 tf_printf("%s", user_str);
89 }
90
91 const char *xn_str = "-XN";
92 const char *exec_str = "-EXEC";
93
94 if (xlat_regime == EL3_REGIME) {
95 /* For EL3, the XN bit is all what matters */
96 tf_printf("%s", (UPPER_ATTRS(XN) & desc) ? xn_str : exec_str);
97 } else {
98 /* For EL0 and EL1, we need to know who has which rights */
99 tf_printf("%s", (UPPER_ATTRS(PXN) & desc) ? xn_str : exec_str);
100 tf_printf("%s", priv_str);
101
102 tf_printf("%s", (UPPER_ATTRS(UXN) & desc) ? xn_str : exec_str);
103 tf_printf("%s", user_str);
104 }
105
106 tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
107}
108
109static const char * const level_spacers[] = {
110 "[LV0] ",
111 " [LV1] ",
112 " [LV2] ",
113 " [LV3] "
114};
115
116static const char *invalid_descriptors_ommited =
117 "%s(%d invalid descriptors omitted)\n";
118
119/*
120 * Recursive function that reads the translation tables passed as an argument
121 * and prints their status.
122 */
123static void xlat_tables_print_internal(xlat_ctx_t *ctx,
124 const uintptr_t table_base_va,
125 uint64_t *const table_base, const int table_entries,
126 const unsigned int level)
127{
128 assert(level <= XLAT_TABLE_LEVEL_MAX);
129
130 uint64_t desc;
131 uintptr_t table_idx_va = table_base_va;
132 int table_idx = 0;
133
134 size_t level_size = XLAT_BLOCK_SIZE(level);
135
136 /*
137 * Keep track of how many invalid descriptors are counted in a row.
138 * Whenever multiple invalid descriptors are found, only the first one
139 * is printed, and a line is added to inform about how many descriptors
140 * have been omitted.
141 */
142 int invalid_row_count = 0;
143
144 while (table_idx < table_entries) {
145
146 desc = table_base[table_idx];
147
148 if ((desc & DESC_MASK) == INVALID_DESC) {
149
150 if (invalid_row_count == 0) {
151 tf_printf("%sVA:%p size:0x%zx\n",
152 level_spacers[level],
153 (void *)table_idx_va, level_size);
154 }
155 invalid_row_count++;
156
157 } else {
158
159 if (invalid_row_count > 1) {
160 tf_printf(invalid_descriptors_ommited,
161 level_spacers[level],
162 invalid_row_count - 1);
163 }
164 invalid_row_count = 0;
165
166 /*
167 * Check if this is a table or a block. Tables are only
168 * allowed in levels other than 3, but DESC_PAGE has the
169 * same value as DESC_TABLE, so we need to check.
170 */
171 if (((desc & DESC_MASK) == TABLE_DESC) &&
172 (level < XLAT_TABLE_LEVEL_MAX)) {
173 /*
174 * Do not print any PA for a table descriptor,
175 * as it doesn't directly map physical memory
176 * but instead points to the next translation
177 * table in the translation table walk.
178 */
179 tf_printf("%sVA:%p size:0x%zx\n",
180 level_spacers[level],
181 (void *)table_idx_va, level_size);
182
183 uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
184
185 xlat_tables_print_internal(ctx, table_idx_va,
186 (uint64_t *)addr_inner,
187 XLAT_TABLE_ENTRIES, level + 1);
188 } else {
189 tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
190 level_spacers[level],
191 (void *)table_idx_va,
192 (unsigned long long)(desc & TABLE_ADDR_MASK),
193 level_size);
194 xlat_desc_print(ctx, desc);
195 tf_printf("\n");
196 }
197 }
198
199 table_idx++;
200 table_idx_va += level_size;
201 }
202
203 if (invalid_row_count > 1) {
204 tf_printf(invalid_descriptors_ommited,
205 level_spacers[level], invalid_row_count - 1);
206 }
207}
208
209void xlat_tables_print(xlat_ctx_t *ctx)
210{
211 const char *xlat_regime_str;
212 if (ctx->xlat_regime == EL1_EL0_REGIME) {
213 xlat_regime_str = "1&0";
214 } else {
215 assert(ctx->xlat_regime == EL3_REGIME);
216 xlat_regime_str = "3";
217 }
218 VERBOSE("Translation tables state:\n");
219 VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
220 VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
221 VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
222 VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
223 VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va);
224
225 VERBOSE(" Initial lookup level: %i\n", ctx->base_level);
226 VERBOSE(" Entries @initial lookup level: %i\n",
227 ctx->base_table_entries);
228
229 int used_page_tables;
230#if PLAT_XLAT_TABLES_DYNAMIC
231 used_page_tables = 0;
232 for (unsigned int i = 0; i < ctx->tables_num; ++i) {
233 if (ctx->tables_mapped_regions[i] != 0)
234 ++used_page_tables;
235 }
236#else
237 used_page_tables = ctx->next_table;
238#endif
239 VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n",
240 used_page_tables, ctx->tables_num,
241 ctx->tables_num - used_page_tables);
242
243 xlat_tables_print_internal(ctx, 0, ctx->base_table,
244 ctx->base_table_entries, ctx->base_level);
245}
246
247#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
248
249/*
250 * Do a translation table walk to find the block or page descriptor that maps
251 * virtual_addr.
252 *
253 * On success, return the address of the descriptor within the translation
254 * table. Its lookup level is stored in '*out_level'.
255 * On error, return NULL.
256 *
257 * xlat_table_base
258 * Base address for the initial lookup level.
259 * xlat_table_base_entries
260 * Number of entries in the translation table for the initial lookup level.
261 * virt_addr_space_size
262 * Size in bytes of the virtual address space.
263 */
264static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
265 void *xlat_table_base,
266 int xlat_table_base_entries,
267 unsigned long long virt_addr_space_size,
268 int *out_level)
269{
270 unsigned int start_level;
271 uint64_t *table;
272 int entries;
273
274 VERBOSE("%s(%p)\n", __func__, (void *)virtual_addr);
275
276 start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
277 VERBOSE("Starting translation table walk from level %i\n", start_level);
278
279 table = xlat_table_base;
280 entries = xlat_table_base_entries;
281
282 for (unsigned int level = start_level;
283 level <= XLAT_TABLE_LEVEL_MAX;
284 ++level) {
285 int idx;
286 uint64_t desc;
287 uint64_t desc_type;
288
289 VERBOSE("Table address: %p\n", (void *)table);
290
291 idx = XLAT_TABLE_IDX(virtual_addr, level);
292 VERBOSE("Index into level %i table: %i\n", level, idx);
293 if (idx >= entries) {
294 VERBOSE("Invalid address\n");
295 return NULL;
296 }
297
298 desc = table[idx];
299 desc_type = desc & DESC_MASK;
300 VERBOSE("Descriptor at level %i: 0x%llx\n", level,
301 (unsigned long long)desc);
302
303 if (desc_type == INVALID_DESC) {
304 VERBOSE("Invalid entry (memory not mapped)\n");
305 return NULL;
306 }
307
308 if (level == XLAT_TABLE_LEVEL_MAX) {
309 /*
310 * There can't be table entries at the final lookup
311 * level.
312 */
313 assert(desc_type == PAGE_DESC);
314 VERBOSE("Descriptor mapping a memory page (size: 0x%llx)\n",
315 (unsigned long long)XLAT_BLOCK_SIZE(XLAT_TABLE_LEVEL_MAX));
316 *out_level = level;
317 return &table[idx];
318 }
319
320 if (desc_type == BLOCK_DESC) {
321 VERBOSE("Descriptor mapping a memory block (size: 0x%llx)\n",
322 (unsigned long long)XLAT_BLOCK_SIZE(level));
323 *out_level = level;
324 return &table[idx];
325 }
326
327 assert(desc_type == TABLE_DESC);
328 VERBOSE("Table descriptor, continuing xlat table walk...\n");
329 table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
330 entries = XLAT_TABLE_ENTRIES;
331 }
332
333 /*
334 * This shouldn't be reached, the translation table walk should end at
335 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
336 */
337 assert(0);
338
339 return NULL;
340}
341
342
343static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
344 uint32_t *attributes, uint64_t **table_entry,
345 unsigned long long *addr_pa, int *table_level)
346{
347 uint64_t *entry;
348 uint64_t desc;
349 int level;
350 unsigned long long virt_addr_space_size;
351
352 /*
353 * Sanity-check arguments.
354 */
355 assert(ctx != NULL);
356 assert(ctx->initialized);
357 assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
358
359 virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
360 assert(virt_addr_space_size > 0);
361
362 entry = find_xlat_table_entry(base_va,
363 ctx->base_table,
364 ctx->base_table_entries,
365 virt_addr_space_size,
366 &level);
367 if (entry == NULL) {
368 WARN("Address %p is not mapped.\n", (void *)base_va);
369 return -EINVAL;
370 }
371
372 if (addr_pa != NULL) {
373 *addr_pa = *entry & TABLE_ADDR_MASK;
374 }
375
376 if (table_entry != NULL) {
377 *table_entry = entry;
378 }
379
380 if (table_level != NULL) {
381 *table_level = level;
382 }
383
384 desc = *entry;
385
386#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
387 VERBOSE("Attributes: ");
388 xlat_desc_print(ctx, desc);
389 tf_printf("\n");
390#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
391
392 assert(attributes != NULL);
393 *attributes = 0;
394
395 int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
396
397 if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
398 *attributes |= MT_MEMORY;
399 } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
400 *attributes |= MT_NON_CACHEABLE;
401 } else {
402 assert(attr_index == ATTR_DEVICE_INDEX);
403 *attributes |= MT_DEVICE;
404 }
405
406 int ap2_bit = (desc >> AP2_SHIFT) & 1;
407
408 if (ap2_bit == AP2_RW)
409 *attributes |= MT_RW;
410
411 if (ctx->xlat_regime == EL1_EL0_REGIME) {
412 int ap1_bit = (desc >> AP1_SHIFT) & 1;
413 if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
414 *attributes |= MT_USER;
415 }
416
417 int ns_bit = (desc >> NS_SHIFT) & 1;
418
419 if (ns_bit == 1)
420 *attributes |= MT_NS;
421
422 uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
423
424 if ((desc & xn_mask) == xn_mask) {
425 *attributes |= MT_EXECUTE_NEVER;
426 } else {
427 assert((desc & xn_mask) == 0);
428 }
429
430 return 0;
431}
432
433
434int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
435 uint32_t *attributes)
436{
437 return get_mem_attributes_internal(ctx, base_va, attributes,
438 NULL, NULL, NULL);
439}
440
441
442int change_mem_attributes(xlat_ctx_t *ctx,
443 uintptr_t base_va,
444 size_t size,
445 uint32_t attr)
446{
447 /* Note: This implementation isn't optimized. */
448
449 assert(ctx != NULL);
450 assert(ctx->initialized);
451
452 unsigned long long virt_addr_space_size =
453 (unsigned long long)ctx->va_max_address + 1;
454 assert(virt_addr_space_size > 0);
455
456 if (!IS_PAGE_ALIGNED(base_va)) {
457 WARN("%s: Address %p is not aligned on a page boundary.\n",
458 __func__, (void *)base_va);
459 return -EINVAL;
460 }
461
462 if (size == 0) {
463 WARN("%s: Size is 0.\n", __func__);
464 return -EINVAL;
465 }
466
467 if ((size % PAGE_SIZE) != 0) {
468 WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
469 __func__, size);
470 return -EINVAL;
471 }
472
473 if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
474 WARN("%s() doesn't allow to remap memory as read-write and executable.\n",
475 __func__);
476 return -EINVAL;
477 }
478
479 int pages_count = size / PAGE_SIZE;
480
481 VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
482 pages_count, (void *)base_va);
483
484 uintptr_t base_va_original = base_va;
485
486 /*
487 * Sanity checks.
488 */
489 for (int i = 0; i < pages_count; ++i) {
490 uint64_t *entry;
491 uint64_t desc;
492 int level;
493
494 entry = find_xlat_table_entry(base_va,
495 ctx->base_table,
496 ctx->base_table_entries,
497 virt_addr_space_size,
498 &level);
499 if (entry == NULL) {
500 WARN("Address %p is not mapped.\n", (void *)base_va);
501 return -EINVAL;
502 }
503
504 desc = *entry;
505
506 /*
507 * Check that all the required pages are mapped at page
508 * granularity.
509 */
510 if (((desc & DESC_MASK) != PAGE_DESC) ||
511 (level != XLAT_TABLE_LEVEL_MAX)) {
512 WARN("Address %p is not mapped at the right granularity.\n",
513 (void *)base_va);
514 WARN("Granularity is 0x%llx, should be 0x%x.\n",
515 (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
516 return -EINVAL;
517 }
518
519 /*
520 * If the region type is device, it shouldn't be executable.
521 */
522 int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
523 if (attr_index == ATTR_DEVICE_INDEX) {
524 if ((attr & MT_EXECUTE_NEVER) == 0) {
525 WARN("Setting device memory as executable at address %p.",
526 (void *)base_va);
527 return -EINVAL;
528 }
529 }
530
531 base_va += PAGE_SIZE;
532 }
533
534 /* Restore original value. */
535 base_va = base_va_original;
536
537 VERBOSE("%s: All pages are mapped, now changing their attributes...\n",
538 __func__);
539
540 for (int i = 0; i < pages_count; ++i) {
541
542 uint32_t old_attr, new_attr;
543 uint64_t *entry;
544 int level;
545 unsigned long long addr_pa;
546
547 get_mem_attributes_internal(ctx, base_va, &old_attr,
548 &entry, &addr_pa, &level);
549
550 VERBOSE("Old attributes: 0x%x\n", old_attr);
551
552 /*
553 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
554 * MT_USER/MT_PRIVILEGED are taken into account. Any other
555 * information is ignored.
556 */
557
558 /* Clean the old attributes so that they can be rebuilt. */
559 new_attr = old_attr & ~(MT_RW|MT_EXECUTE_NEVER|MT_USER);
560
561 /*
562 * Update attributes, but filter out the ones this function
563 * isn't allowed to change.
564 */
565 new_attr |= attr & (MT_RW|MT_EXECUTE_NEVER|MT_USER);
566
567 VERBOSE("New attributes: 0x%x\n", new_attr);
568
569 /*
570 * The break-before-make sequence requires writing an invalid
571 * descriptor and making sure that the system sees the change
572 * before writing the new descriptor.
573 */
574 *entry = INVALID_DESC;
575
576 /* Invalidate any cached copy of this mapping in the TLBs. */
577 xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
578
579 /* Ensure completion of the invalidation. */
580 xlat_arch_tlbi_va_sync();
581
582 /* Write new descriptor */
583 *entry = xlat_desc(ctx, new_attr, addr_pa, level);
584
585 base_va += PAGE_SIZE;
586 }
587
588 /* Ensure that the last descriptor writen is seen by the system. */
589 dsbish();
590
591 return 0;
592}