blob: 1b99cc85feba3ed2f8446886cbb2d8ab6e8b0a47 [file] [log] [blame]
Jon Medhurstc481c262014-01-24 15:41:33 +00001/*
2 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handleydff8e472014-05-16 14:08:45 +010031#include <arch.h>
32#include <arch_helpers.h>
Jon Medhurstc481c262014-01-24 15:41:33 +000033#include <assert.h>
Dan Handley5f0cdb02014-05-14 17:44:19 +010034#include <platform_def.h>
Jon Medhurstc481c262014-01-24 15:41:33 +000035#include <string.h>
36#include <xlat_tables.h>
37
38
39#ifndef DEBUG_XLAT_TABLE
40#define DEBUG_XLAT_TABLE 0
41#endif
42
43#if DEBUG_XLAT_TABLE
44#define debug_print(...) printf(__VA_ARGS__)
45#else
46#define debug_print(...) ((void)0)
47#endif
48
49
50#define UNSET_DESC ~0ul
51
52#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
53
Dan Handleydff8e472014-05-16 14:08:45 +010054static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
Jon Medhurstc481c262014-01-24 15:41:33 +000055__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
56
57static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
58__aligned(XLAT_TABLE_SIZE) __attribute__((section("xlat_table")));
59
60static unsigned next_xlat;
61
62/*
63 * Array of all memory regions stored in order of ascending base address.
64 * The list is terminated by the first entry with size == 0.
65 */
Dan Handleyfb037bf2014-04-10 15:37:22 +010066static mmap_region_t mmap[MAX_MMAP_REGIONS + 1];
Jon Medhurstc481c262014-01-24 15:41:33 +000067
68
69static void print_mmap(void)
70{
71#if DEBUG_XLAT_TABLE
72 debug_print("mmap:\n");
Dan Handleyfb037bf2014-04-10 15:37:22 +010073 mmap_region_t *mm = mmap;
Jon Medhurstc481c262014-01-24 15:41:33 +000074 while (mm->size) {
Lin Maf984ce82014-06-02 11:45:36 -070075 debug_print(" %010lx %010lx %10lx %x\n", mm->base_va,
76 mm->base_pa, mm->size, mm->attr);
Jon Medhurstc481c262014-01-24 15:41:33 +000077 ++mm;
78 };
79 debug_print("\n");
80#endif
81}
82
Lin Maf984ce82014-06-02 11:45:36 -070083void mmap_add_region(unsigned long base_pa, unsigned long base_va,
84 unsigned long size, unsigned attr)
Jon Medhurstc481c262014-01-24 15:41:33 +000085{
Dan Handleyfb037bf2014-04-10 15:37:22 +010086 mmap_region_t *mm = mmap;
87 mmap_region_t *mm_last = mm + sizeof(mmap) / sizeof(mmap[0]) - 1;
Jon Medhurstc481c262014-01-24 15:41:33 +000088
Lin Maf984ce82014-06-02 11:45:36 -070089 assert(IS_PAGE_ALIGNED(base_pa));
90 assert(IS_PAGE_ALIGNED(base_va));
Jon Medhurstc481c262014-01-24 15:41:33 +000091 assert(IS_PAGE_ALIGNED(size));
92
93 if (!size)
94 return;
95
96 /* Find correct place in mmap to insert new region */
Lin Maf984ce82014-06-02 11:45:36 -070097 while (mm->base_va < base_va && mm->size)
Jon Medhurstc481c262014-01-24 15:41:33 +000098 ++mm;
99
100 /* Make room for new region by moving other regions up by one place */
101 memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
102
103 /* Check we haven't lost the empty sentinal from the end of the array */
104 assert(mm_last->size == 0);
105
Lin Maf984ce82014-06-02 11:45:36 -0700106 mm->base_pa = base_pa;
107 mm->base_va = base_va;
Jon Medhurstc481c262014-01-24 15:41:33 +0000108 mm->size = size;
109 mm->attr = attr;
110}
111
Dan Handleyfb037bf2014-04-10 15:37:22 +0100112void mmap_add(const mmap_region_t *mm)
Jon Medhurstc481c262014-01-24 15:41:33 +0000113{
114 while (mm->size) {
Lin Maf984ce82014-06-02 11:45:36 -0700115 mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
Jon Medhurstc481c262014-01-24 15:41:33 +0000116 ++mm;
117 }
118}
119
Lin Maf984ce82014-06-02 11:45:36 -0700120static unsigned long mmap_desc(unsigned attr, unsigned long addr_pa,
Jon Medhurstc481c262014-01-24 15:41:33 +0000121 unsigned level)
122{
Lin Maf984ce82014-06-02 11:45:36 -0700123 unsigned long desc = addr_pa;
Jon Medhurstc481c262014-01-24 15:41:33 +0000124
125 desc |= level == 3 ? TABLE_DESC : BLOCK_DESC;
126
127 desc |= attr & MT_NS ? LOWER_ATTRS(NS) : 0;
128
129 desc |= attr & MT_RW ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
130
131 desc |= LOWER_ATTRS(ACCESS_FLAG);
132
133 if (attr & MT_MEMORY) {
134 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
135 if (attr & MT_RW)
136 desc |= UPPER_ATTRS(XN);
137 } else {
138 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
139 desc |= UPPER_ATTRS(XN);
140 }
141
142 debug_print(attr & MT_MEMORY ? "MEM" : "DEV");
143 debug_print(attr & MT_RW ? "-RW" : "-RO");
144 debug_print(attr & MT_NS ? "-NS" : "-S");
145
146 return desc;
147}
148
Lin Maf984ce82014-06-02 11:45:36 -0700149static int mmap_region_attr(mmap_region_t *mm, unsigned long base_va,
Jon Medhurstc481c262014-01-24 15:41:33 +0000150 unsigned long size)
151{
152 int attr = mm->attr;
153
154 for (;;) {
155 ++mm;
156
157 if (!mm->size)
158 return attr; /* Reached end of list */
159
Lin Maf984ce82014-06-02 11:45:36 -0700160 if (mm->base_va >= base_va + size)
Jon Medhurstc481c262014-01-24 15:41:33 +0000161 return attr; /* Next region is after area so end */
162
Lin Maf984ce82014-06-02 11:45:36 -0700163 if (mm->base_va + mm->size <= base_va)
Jon Medhurstc481c262014-01-24 15:41:33 +0000164 continue; /* Next region has already been overtaken */
165
166 if ((mm->attr & attr) == attr)
167 continue; /* Region doesn't override attribs so skip */
168
169 attr &= mm->attr;
170
Lin Maf984ce82014-06-02 11:45:36 -0700171 if (mm->base_va > base_va ||
172 mm->base_va + mm->size < base_va + size)
Jon Medhurstc481c262014-01-24 15:41:33 +0000173 return -1; /* Region doesn't fully cover our area */
174 }
175}
176
Lin Maf984ce82014-06-02 11:45:36 -0700177static mmap_region_t *init_xlation_table(mmap_region_t *mm,
178 unsigned long base_va,
Jon Medhurstc481c262014-01-24 15:41:33 +0000179 unsigned long *table, unsigned level)
180{
181 unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
182 XLAT_TABLE_ENTRIES_SHIFT;
183 unsigned level_size = 1 << level_size_shift;
Lin Ma444281c2014-05-20 11:25:55 -0700184 unsigned long level_index_mask = XLAT_TABLE_ENTRIES_MASK << level_size_shift;
Jon Medhurstc481c262014-01-24 15:41:33 +0000185
186 assert(level <= 3);
187
188 debug_print("New xlat table:\n");
189
190 do {
191 unsigned long desc = UNSET_DESC;
192
Lin Maf984ce82014-06-02 11:45:36 -0700193 if (mm->base_va + mm->size <= base_va) {
Jon Medhurstc481c262014-01-24 15:41:33 +0000194 /* Area now after the region so skip it */
195 ++mm;
196 continue;
197 }
198
Lin Maf984ce82014-06-02 11:45:36 -0700199 debug_print(" %010lx %8lx " + 6 - 2 * level, base_va,
200 level_size);
Jon Medhurstc481c262014-01-24 15:41:33 +0000201
Lin Maf984ce82014-06-02 11:45:36 -0700202 if (mm->base_va >= base_va + level_size) {
Jon Medhurstc481c262014-01-24 15:41:33 +0000203 /* Next region is after area so nothing to map yet */
204 desc = INVALID_DESC;
Lin Maf984ce82014-06-02 11:45:36 -0700205 } else if (mm->base_va <= base_va && mm->base_va + mm->size >=
206 base_va + level_size) {
Jon Medhurstc481c262014-01-24 15:41:33 +0000207 /* Next region covers all of area */
Lin Maf984ce82014-06-02 11:45:36 -0700208 int attr = mmap_region_attr(mm, base_va, level_size);
Jon Medhurstc481c262014-01-24 15:41:33 +0000209 if (attr >= 0)
Lin Maf984ce82014-06-02 11:45:36 -0700210 desc = mmap_desc(attr,
211 base_va - mm->base_va + mm->base_pa,
212 level);
Jon Medhurstc481c262014-01-24 15:41:33 +0000213 }
214 /* else Next region only partially covers area, so need */
215
216 if (desc == UNSET_DESC) {
217 /* Area not covered by a region so need finer table */
218 unsigned long *new_table = xlat_tables[next_xlat++];
219 assert(next_xlat <= MAX_XLAT_TABLES);
220 desc = TABLE_DESC | (unsigned long)new_table;
221
222 /* Recurse to fill in new table */
Lin Maf984ce82014-06-02 11:45:36 -0700223 mm = init_xlation_table(mm, base_va,
224 new_table, level+1);
Jon Medhurstc481c262014-01-24 15:41:33 +0000225 }
226
227 debug_print("\n");
228
229 *table++ = desc;
Lin Maf984ce82014-06-02 11:45:36 -0700230 base_va += level_size;
231 } while (mm->size && (base_va & level_index_mask));
Jon Medhurstc481c262014-01-24 15:41:33 +0000232
233 return mm;
234}
235
236void init_xlat_tables(void)
237{
238 print_mmap();
239 init_xlation_table(mmap, 0, l1_xlation_table, 1);
240}
Dan Handleydff8e472014-05-16 14:08:45 +0100241
242/*******************************************************************************
243 * Macro generating the code for the function enabling the MMU in the given
244 * exception level, assuming that the pagetables have already been created.
245 *
246 * _el: Exception level at which the function will run
247 * _tcr_extra: Extra bits to set in the TCR register. This mask will
248 * be OR'ed with the default TCR value.
249 * _tlbi_fct: Function to invalidate the TLBs at the current
250 * exception level
251 ******************************************************************************/
252#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
253 void enable_mmu_el##_el(void) \
254 { \
255 uint64_t mair, tcr, ttbr; \
256 uint32_t sctlr; \
257 \
258 assert(IS_IN_EL(_el)); \
259 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
260 \
261 /* Set attributes in the right indices of the MAIR */ \
262 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
263 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
264 ATTR_IWBWA_OWBWA_NTR_INDEX); \
265 write_mair_el##_el(mair); \
266 \
267 /* Invalidate TLBs at the current exception level */ \
268 _tlbi_fct(); \
269 \
270 /* Set TCR bits as well. */ \
271 /* Inner & outer WBWA & shareable + T0SZ = 32 */ \
272 tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
273 TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; \
274 tcr |= _tcr_extra; \
275 write_tcr_el##_el(tcr); \
276 \
277 /* Set TTBR bits as well */ \
278 ttbr = (uint64_t) l1_xlation_table; \
279 write_ttbr0_el##_el(ttbr); \
280 \
281 /* Ensure all translation table writes have drained */ \
282 /* into memory, the TLB invalidation is complete, */ \
283 /* and translation register writes are committed */ \
284 /* before enabling the MMU */ \
285 dsb(); \
286 isb(); \
287 \
288 sctlr = read_sctlr_el##_el(); \
289 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; \
290 sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; \
291 write_sctlr_el##_el(sctlr); \
292 \
293 /* Ensure the MMU enable takes effect immediately */ \
294 isb(); \
295 }
296
297/* Define EL1 and EL3 variants of the function enabling the MMU */
298DEFINE_ENABLE_MMU_EL(1, 0, tlbivmalle1)
299DEFINE_ENABLE_MMU_EL(3, TCR_EL3_RES1, tlbialle3)