blob: 6033522a0a47829e53ca6f5a9beaf55a606de4fb [file] [log] [blame]
Soby Mathewb2bca612016-06-30 15:11:07 +01001/*
Antonio Nino Diazaa613682017-03-22 15:48:51 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathewb2bca612016-06-30 15:11:07 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <cassert.h>
35#include <platform_def.h>
36#include <utils.h>
37#include <xlat_tables.h>
38#include "../xlat_tables_private.h"
39
40/*
Antonio Nino Diaze8719552016-08-02 09:21:41 +010041 * Each platform can define the size of the virtual address space, which is
Antonio Nino Diaz00296242016-12-13 15:28:54 +000042 * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus
43 * the width of said address space. The value of TTBCR.TxSZ must be in the
44 * range 0 to 7 [1], which means that the virtual address space width must be
45 * in the range 32 to 25 bits.
Antonio Nino Diaze8719552016-08-02 09:21:41 +010046 *
Antonio Nino Diaz00296242016-12-13 15:28:54 +000047 * Here we calculate the initial lookup level from the value of
48 * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual
49 * address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or
50 * narrower address spaces are not supported. As a result, level 3 cannot be
51 * used as initial lookup level with 4 KB granularity [1].
Antonio Nino Diaze8719552016-08-02 09:21:41 +010052 *
Antonio Nino Diaz00296242016-12-13 15:28:54 +000053 * For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
54 * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
55 * G4-5 in the ARM ARM, the initial lookup level for an address space like that
56 * is 1.
Antonio Nino Diaze8719552016-08-02 09:21:41 +010057 *
58 * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
59 * information:
60 * [1] Section G4.6.5
Soby Mathewb2bca612016-06-30 15:11:07 +010061 */
Soby Mathewb2bca612016-06-30 15:11:07 +010062
Antonio Nino Diaz00296242016-12-13 15:28:54 +000063#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
Soby Mathewb2bca612016-06-30 15:11:07 +010064
Antonio Nino Diaz00296242016-12-13 15:28:54 +000065# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
Antonio Nino Diaze8719552016-08-02 09:21:41 +010066
Antonio Nino Diaz00296242016-12-13 15:28:54 +000067#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
Antonio Nino Diaze8719552016-08-02 09:21:41 +010068
69# define XLAT_TABLE_LEVEL_BASE 1
Antonio Nino Diaz00296242016-12-13 15:28:54 +000070# define NUM_BASE_LEVEL_ENTRIES \
71 (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
Antonio Nino Diaze8719552016-08-02 09:21:41 +010072
Antonio Nino Diaz00296242016-12-13 15:28:54 +000073#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
Antonio Nino Diaze8719552016-08-02 09:21:41 +010074
75# define XLAT_TABLE_LEVEL_BASE 2
Antonio Nino Diaz00296242016-12-13 15:28:54 +000076# define NUM_BASE_LEVEL_ENTRIES \
77 (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
Antonio Nino Diaze8719552016-08-02 09:21:41 +010078
79#else
80
Antonio Nino Diaz00296242016-12-13 15:28:54 +000081# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
Antonio Nino Diaze8719552016-08-02 09:21:41 +010082
83#endif
84
85static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
86 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathewb2bca612016-06-30 15:11:07 +010087
Antonio Nino Diazaa613682017-03-22 15:48:51 +000088#if ENABLE_ASSERTIONS
Antonio Nino Diaz00296242016-12-13 15:28:54 +000089static unsigned long long get_max_supported_pa(void)
90{
91 /* Physical address space size for long descriptor format. */
92 return (1ULL << 40) - 1ULL;
93}
Antonio Nino Diazaa613682017-03-22 15:48:51 +000094#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diaz00296242016-12-13 15:28:54 +000095
Antonio Nino Diaza5640252017-04-27 13:30:22 +010096int xlat_arch_current_el(void)
97{
98 /*
99 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
100 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
101 */
102 return 3;
103}
104
105uint64_t xlat_arch_get_xn_desc(int el __unused)
106{
107 return UPPER_ATTRS(XN);
108}
109
Soby Mathewb2bca612016-06-30 15:11:07 +0100110void init_xlat_tables(void)
111{
112 unsigned long long max_pa;
113 uintptr_t max_va;
114 print_mmap();
Antonio Nino Diaze8719552016-08-02 09:21:41 +0100115 init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
116 &max_va, &max_pa);
Antonio Nino Diaz00296242016-12-13 15:28:54 +0000117
118 assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
119 assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
120 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
Soby Mathewb2bca612016-06-30 15:11:07 +0100121}
122
123/*******************************************************************************
124 * Function for enabling the MMU in Secure PL1, assuming that the
125 * page-tables have already been created.
126 ******************************************************************************/
127void enable_mmu_secure(unsigned int flags)
128{
129 unsigned int mair0, ttbcr, sctlr;
130 uint64_t ttbr0;
131
132 assert(IS_IN_SECURE());
133 assert((read_sctlr() & SCTLR_M_BIT) == 0);
134
135 /* Set attributes in the right indices of the MAIR */
136 mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
137 mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
138 ATTR_IWBWA_OWBWA_NTR_INDEX);
139 mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
140 ATTR_NON_CACHEABLE_INDEX);
141 write_mair0(mair0);
142
143 /* Invalidate TLBs at the current exception level */
144 tlbiall();
145
146 /*
Summer Qin5d21b032017-03-16 17:16:34 +0000147 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
Soby Mathewb2bca612016-06-30 15:11:07 +0100148 */
Summer Qin5d21b032017-03-16 17:16:34 +0000149 if (flags & XLAT_TABLE_NC) {
150 /* Inner & outer non-cacheable non-shareable. */
151 ttbcr = TTBCR_EAE_BIT |
152 TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
153 TTBCR_RGN0_INNER_NC |
154 (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
155 } else {
156 /* Inner & outer WBWA & shareable. */
157 ttbcr = TTBCR_EAE_BIT |
158 TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
159 TTBCR_RGN0_INNER_WBA |
160 (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
161 }
Soby Mathewb2bca612016-06-30 15:11:07 +0100162 ttbcr |= TTBCR_EPD1_BIT;
163 write_ttbcr(ttbcr);
164
165 /* Set TTBR0 bits as well */
Antonio Nino Diaze8719552016-08-02 09:21:41 +0100166 ttbr0 = (uintptr_t) base_xlation_table;
Soby Mathewb2bca612016-06-30 15:11:07 +0100167 write64_ttbr0(ttbr0);
168 write64_ttbr1(0);
169
170 /*
171 * Ensure all translation table writes have drained
172 * into memory, the TLB invalidation is complete,
173 * and translation register writes are committed
174 * before enabling the MMU
175 */
176 dsb();
177 isb();
178
179 sctlr = read_sctlr();
180 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
181
182 if (flags & DISABLE_DCACHE)
183 sctlr &= ~SCTLR_C_BIT;
184 else
185 sctlr |= SCTLR_C_BIT;
186
187 write_sctlr(sctlr);
188
189 /* Ensure the MMU enable takes effect immediately */
190 isb();
191}