blob: df491d097a82abed8b38093f622a0168ebbd19c2 [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <debug.h>
9#include <platform_def.h>
10#include <xlat_tables_defs.h>
11#include <xlat_tables_v2.h>
12
13#include "xlat_tables_private.h"
14
15/*
16 * MMU configuration register values for the active translation context. Used
17 * from the MMU assembly helpers.
18 */
19uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
20
21/*
22 * Each platform can define the size of its physical and virtual address spaces.
23 * If the platform hasn't defined one or both of them, default to
24 * ADDR_SPACE_SIZE. The latter is deprecated, though.
25 */
26#if ERROR_DEPRECATED
27# ifdef ADDR_SPACE_SIZE
28# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
29# endif
30#elif defined(ADDR_SPACE_SIZE)
31# ifndef PLAT_PHY_ADDR_SPACE_SIZE
32# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
33# endif
34# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
35# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
36# endif
37#endif
38
39/*
40 * Allocate and initialise the default translation context for the software
41 * image currently executing.
42 */
43REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
44 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
45
46void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
47 unsigned int attr)
48{
49 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
50
51 mmap_add_region_ctx(&tf_xlat_ctx, &mm);
52}
53
54void mmap_add(const mmap_region_t *mm)
55{
56 mmap_add_ctx(&tf_xlat_ctx, mm);
57}
58
59void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
60 size_t size, unsigned int attr)
61{
62 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
63
64 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
65
66 *base_va = mm.base_va;
67}
68
69void mmap_add_alloc_va(mmap_region_t *mm)
70{
71 while (mm->size != 0U) {
72 assert(mm->base_va == 0U);
73 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
74 mm++;
75 }
76}
77
78#if PLAT_XLAT_TABLES_DYNAMIC
79
80int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
81 size_t size, unsigned int attr)
82{
83 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
84
85 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
86}
87
88int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
89 uintptr_t *base_va, size_t size,
90 unsigned int attr)
91{
92 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
93
94 int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
95
96 *base_va = mm.base_va;
97
98 return rc;
99}
100
101
102int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
103{
104 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
105 base_va, size);
106}
107
108#endif /* PLAT_XLAT_TABLES_DYNAMIC */
109
110void init_xlat_tables(void)
111{
112 assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
113
114 unsigned int current_el = xlat_arch_current_el();
115
116 if (current_el == 1U) {
117 tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
118 } else if (current_el == 2U) {
119 tf_xlat_ctx.xlat_regime = EL2_REGIME;
120 } else {
121 assert(current_el == 3U);
122 tf_xlat_ctx.xlat_regime = EL3_REGIME;
123 }
124
125 init_xlat_tables_ctx(&tf_xlat_ctx);
126}
127
128int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
129{
130 return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
131}
132
133int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
134{
135 return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
136}
137
138/*
139 * If dynamic allocation of new regions is disabled then by the time we call the
140 * function enabling the MMU, we'll have registered all the memory regions to
141 * map for the system's lifetime. Therefore, at this point we know the maximum
142 * physical address that will ever be mapped.
143 *
144 * If dynamic allocation is enabled then we can't make any such assumption
145 * because the maximum physical address could get pushed while adding a new
146 * region. Therefore, in this case we have to assume that the whole address
147 * space size might be mapped.
148 */
149#ifdef PLAT_XLAT_TABLES_DYNAMIC
150#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
151#else
152#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
153#endif
154
155#ifdef AARCH32
156
157void enable_mmu_svc_mon(unsigned int flags)
158{
159 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
160 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
161 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
162 enable_mmu_direct_svc_mon(flags);
163}
164
165void enable_mmu_hyp(unsigned int flags)
166{
167 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
168 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
169 tf_xlat_ctx.va_max_address, EL2_REGIME);
170 enable_mmu_direct_hyp(flags);
171}
172
173#else
174
175void enable_mmu_el1(unsigned int flags)
176{
177 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
178 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
179 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
180 enable_mmu_direct_el1(flags);
181}
182
183void enable_mmu_el2(unsigned int flags)
184{
185 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
186 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
187 tf_xlat_ctx.va_max_address, EL2_REGIME);
188 enable_mmu_direct_el2(flags);
189}
190
191void enable_mmu_el3(unsigned int flags)
192{
193 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
194 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
195 tf_xlat_ctx.va_max_address, EL3_REGIME);
196 enable_mmu_direct_el3(flags);
197}
198
199#endif /* AARCH32 */