blob: 2e5b38ff369ae44d653d42dff193f629645a36b4 [file] [log] [blame]
Soby Mathewe33b78a2016-05-05 14:10:46 +01001/*
Olivier Deprez2e61d682021-05-25 12:06:03 +02002 * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
Soby Mathewe33b78a2016-05-05 14:10:46 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewe33b78a2016-05-05 14:10:46 +01005 */
6
Soby Mathewe33b78a2016-05-05 14:10:46 +01007#include <assert.h>
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +01008#include <stdbool.h>
Soby Mathewe33b78a2016-05-05 14:10:46 +01009#include <string.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000010
11#include <platform_def.h>
12
13#include <arch.h>
14#include <arch_helpers.h>
15#include <common/bl_common.h>
16#include <context.h>
17#include <lib/el3_runtime/context_mgmt.h>
18#include <lib/extensions/amu.h>
Manish V Badarkhed4582d32021-06-29 11:44:20 +010019#include <lib/extensions/sys_reg_trace.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000020#include <lib/utils.h>
Soby Mathewe33b78a2016-05-05 14:10:46 +010021
22/*******************************************************************************
23 * Context management library initialisation routine. This library is used by
24 * runtime services to share pointers to 'cpu_context' structures for the secure
25 * and non-secure states. Management of the structures and their associated
26 * memory is not done by the context management library e.g. the PSCI service
27 * manages the cpu context used for entry from and exit to the non-secure state.
28 * The Secure payload manages the context(s) corresponding to the secure state.
29 * It also uses this library to get access to the non-secure
30 * state cpu context pointers.
31 ******************************************************************************/
32void cm_init(void)
33{
34 /*
35 * The context management library has only global data to initialize, but
36 * that will be done when the BSS is zeroed out
37 */
38}
39
40/*******************************************************************************
41 * The following function initializes the cpu_context 'ctx' for
42 * first use, and sets the initial entrypoint state as specified by the
43 * entry_point_info structure.
44 *
45 * The security state to initialize is determined by the SECURE attribute
Antonio Nino Diaz1634cae2018-05-22 10:09:10 +010046 * of the entry_point_info.
Soby Mathewe33b78a2016-05-05 14:10:46 +010047 *
48 * The EE and ST attributes are used to configure the endianness and secure
49 * timer availability for the new execution context.
50 *
51 * To prepare the register state for entry call cm_prepare_el3_exit() and
52 * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
Olivier Deprez2e61d682021-05-25 12:06:03 +020053 * cm_el1_sysregs_context_restore().
Soby Mathewe33b78a2016-05-05 14:10:46 +010054 ******************************************************************************/
Antonio Nino Diaz1634cae2018-05-22 10:09:10 +010055void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
Soby Mathewe33b78a2016-05-05 14:10:46 +010056{
57 unsigned int security_state;
58 uint32_t scr, sctlr;
59 regs_t *reg_ctx;
60
Antonio Nino Diaza0fee742018-10-31 15:25:35 +000061 assert(ctx != NULL);
Soby Mathewe33b78a2016-05-05 14:10:46 +010062
63 security_state = GET_SECURITY_STATE(ep->h.attr);
64
65 /* Clear any residual register values from the context */
Douglas Raillard32f0d3c2017-01-26 15:54:44 +000066 zeromem(ctx, sizeof(*ctx));
Soby Mathewe33b78a2016-05-05 14:10:46 +010067
Soby Mathew9e3b4cb2016-08-31 12:34:33 +010068 reg_ctx = get_regs_ctx(ctx);
69
Soby Mathewe33b78a2016-05-05 14:10:46 +010070 /*
71 * Base the context SCR on the current value, adjust for entry point
72 * specific requirements
73 */
74 scr = read_scr();
75 scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
76
77 if (security_state != SECURE)
78 scr |= SCR_NS_BIT;
79
Soby Mathewe33b78a2016-05-05 14:10:46 +010080 if (security_state != SECURE) {
Soby Mathewb7b07872016-09-29 14:15:57 +010081 /*
David Cunado18f2efd2017-04-13 22:38:29 +010082 * Set up SCTLR for the Non-secure context.
83 *
84 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
85 *
86 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
87 * required by PSCI specification)
88 *
89 * Set remaining SCTLR fields to their architecturally defined
90 * values. Some fields reset to an IMPLEMENTATION DEFINED value:
91 *
92 * SCTLR.TE: Set to zero so that exceptions to an Exception
93 * Level executing at PL1 are taken to A32 state.
94 *
95 * SCTLR.V: Set to zero to select the normal exception vectors
96 * with base address held in VBAR.
Soby Mathewb7b07872016-09-29 14:15:57 +010097 */
David Cunado18f2efd2017-04-13 22:38:29 +010098 assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) ==
99 (EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT));
100
Antonio Nino Diaza0fee742018-10-31 15:25:35 +0000101 sctlr = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U;
David Cunado18f2efd2017-04-13 22:38:29 +0100102 sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT));
Soby Mathewe33b78a2016-05-05 14:10:46 +0100103 write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
104 }
105
David Cunado18f2efd2017-04-13 22:38:29 +0100106 /*
107 * The target exception level is based on the spsr mode requested. If
108 * execution is requested to hyp mode, HVC is enabled via SCR.HCE.
109 */
Soby Mathewe33b78a2016-05-05 14:10:46 +0100110 if (GET_M32(ep->spsr) == MODE32_hyp)
111 scr |= SCR_HCE_BIT;
112
David Cunado18f2efd2017-04-13 22:38:29 +0100113 /*
114 * Store the initialised values for SCTLR and SCR in the cpu_context.
115 * The Hyp mode registers are not part of the saved context and are
116 * set-up in cm_prepare_el3_exit().
117 */
Soby Mathewe33b78a2016-05-05 14:10:46 +0100118 write_ctx_reg(reg_ctx, CTX_SCR, scr);
119 write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
120 write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
121
122 /*
123 * Store the r0-r3 value from the entrypoint into the context
124 * Use memcpy as we are in control of the layout of the structures
125 */
126 memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
127}
128
129/*******************************************************************************
Dimitris Papastamos0fd0f222017-11-07 09:55:29 +0000130 * Enable architecture extensions on first entry to Non-secure world.
131 * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
132 * it is zero.
133 ******************************************************************************/
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100134static void enable_extensions_nonsecure(bool el2_unused)
Dimitris Papastamos0fd0f222017-11-07 09:55:29 +0000135{
136#if IMAGE_BL32
Dimitris Papastamosef69e1e2017-10-17 14:03:14 +0100137#if ENABLE_AMU
138 amu_enable(el2_unused);
139#endif
Manish V Badarkhed4582d32021-06-29 11:44:20 +0100140
141#if ENABLE_SYS_REG_TRACE_FOR_NS
142 sys_reg_trace_enable();
143#endif /* ENABLE_SYS_REG_TRACE_FOR_NS */
Dimitris Papastamos0fd0f222017-11-07 09:55:29 +0000144#endif
145}
146
147/*******************************************************************************
Soby Mathewe33b78a2016-05-05 14:10:46 +0100148 * The following function initializes the cpu_context for a CPU specified by
149 * its `cpu_idx` for first use, and sets the initial entrypoint state as
150 * specified by the entry_point_info structure.
151 ******************************************************************************/
152void cm_init_context_by_index(unsigned int cpu_idx,
153 const entry_point_info_t *ep)
154{
155 cpu_context_t *ctx;
156 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
Antonio Nino Diaz1634cae2018-05-22 10:09:10 +0100157 cm_setup_context(ctx, ep);
Soby Mathewe33b78a2016-05-05 14:10:46 +0100158}
159
160/*******************************************************************************
161 * The following function initializes the cpu_context for the current CPU
162 * for first use, and sets the initial entrypoint state as specified by the
163 * entry_point_info structure.
164 ******************************************************************************/
165void cm_init_my_context(const entry_point_info_t *ep)
166{
167 cpu_context_t *ctx;
168 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
Antonio Nino Diaz1634cae2018-05-22 10:09:10 +0100169 cm_setup_context(ctx, ep);
Soby Mathewe33b78a2016-05-05 14:10:46 +0100170}
171
172/*******************************************************************************
173 * Prepare the CPU system registers for first entry into secure or normal world
174 *
175 * If execution is requested to hyp mode, HSCTLR is initialized
176 * If execution is requested to non-secure PL1, and the CPU supports
177 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
178 * registers.
179 ******************************************************************************/
180void cm_prepare_el3_exit(uint32_t security_state)
181{
David Cunado18f2efd2017-04-13 22:38:29 +0100182 uint32_t hsctlr, scr;
Soby Mathewe33b78a2016-05-05 14:10:46 +0100183 cpu_context_t *ctx = cm_get_context(security_state);
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100184 bool el2_unused = false;
Soby Mathewe33b78a2016-05-05 14:10:46 +0100185
Antonio Nino Diaza0fee742018-10-31 15:25:35 +0000186 assert(ctx != NULL);
Soby Mathewe33b78a2016-05-05 14:10:46 +0100187
188 if (security_state == NON_SECURE) {
189 scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
Antonio Nino Diaza0fee742018-10-31 15:25:35 +0000190 if ((scr & SCR_HCE_BIT) != 0U) {
Soby Mathewe33b78a2016-05-05 14:10:46 +0100191 /* Use SCTLR value to initialize HSCTLR */
David Cunado18f2efd2017-04-13 22:38:29 +0100192 hsctlr = read_ctx_reg(get_regs_ctx(ctx),
Soby Mathewe33b78a2016-05-05 14:10:46 +0100193 CTX_NS_SCTLR);
David Cunado18f2efd2017-04-13 22:38:29 +0100194 hsctlr |= HSCTLR_RES1;
Soby Mathewe33b78a2016-05-05 14:10:46 +0100195 /* Temporarily set the NS bit to access HSCTLR */
196 write_scr(read_scr() | SCR_NS_BIT);
197 /*
198 * Make sure the write to SCR is complete so that
199 * we can access HSCTLR
200 */
201 isb();
David Cunado18f2efd2017-04-13 22:38:29 +0100202 write_hsctlr(hsctlr);
Soby Mathewe33b78a2016-05-05 14:10:46 +0100203 isb();
204
205 write_scr(read_scr() & ~SCR_NS_BIT);
206 isb();
Antonio Nino Diaza0fee742018-10-31 15:25:35 +0000207 } else if ((read_id_pfr1() &
208 (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) != 0U) {
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100209 el2_unused = true;
Dimitris Papastamos0fd0f222017-11-07 09:55:29 +0000210
David Cunado495f3d32016-10-31 17:37:34 +0000211 /*
212 * Set the NS bit to access NS copies of certain banked
213 * registers
214 */
Soby Mathewe33b78a2016-05-05 14:10:46 +0100215 write_scr(read_scr() | SCR_NS_BIT);
216 isb();
217
David Cunado18f2efd2017-04-13 22:38:29 +0100218 /*
219 * Hyp / PL2 present but unused, need to disable safely.
220 * HSCTLR can be ignored in this case.
221 *
222 * Set HCR to its architectural reset value so that
223 * Non-secure operations do not trap to Hyp mode.
224 */
225 write_hcr(HCR_RESET_VAL);
Soby Mathewe33b78a2016-05-05 14:10:46 +0100226
David Cunado18f2efd2017-04-13 22:38:29 +0100227 /*
228 * Set HCPTR to its architectural reset value so that
229 * Non-secure access from EL1 or EL0 to trace and to
230 * Advanced SIMD and floating point functionality does
231 * not trap to Hyp mode.
232 */
233 write_hcptr(HCPTR_RESET_VAL);
Soby Mathewe33b78a2016-05-05 14:10:46 +0100234
David Cunado18f2efd2017-04-13 22:38:29 +0100235 /*
236 * Initialise CNTHCTL. All fields are architecturally
237 * UNKNOWN on reset and are set to zero except for
238 * field(s) listed below.
239 *
240 * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of
241 * Non-secure EL0 and EL1 accessed to the physical
242 * timer registers.
243 *
244 * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of
245 * Non-secure EL0 and EL1 accessed to the physical
246 * counter registers.
247 */
248 write_cnthctl(CNTHCTL_RESET_VAL |
249 PL1PCEN_BIT | PL1PCTEN_BIT);
Soby Mathewe33b78a2016-05-05 14:10:46 +0100250
David Cunado18f2efd2017-04-13 22:38:29 +0100251 /*
252 * Initialise CNTVOFF to zero as it resets to an
253 * IMPLEMENTATION DEFINED value.
254 */
Soby Mathewe33b78a2016-05-05 14:10:46 +0100255 write64_cntvoff(0);
256
David Cunado18f2efd2017-04-13 22:38:29 +0100257 /*
258 * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR
259 * respectively.
260 */
Soby Mathewe33b78a2016-05-05 14:10:46 +0100261 write_vpidr(read_midr());
262 write_vmpidr(read_mpidr());
263
264 /*
David Cunado18f2efd2017-04-13 22:38:29 +0100265 * Initialise VTTBR, setting all fields rather than
266 * relying on the hw. Some fields are architecturally
267 * UNKNOWN at reset.
268 *
269 * VTTBR.VMID: Set to zero which is the architecturally
270 * defined reset value. Even though EL1&0 stage 2
271 * address translation is disabled, cache maintenance
272 * operations depend on the VMID.
273 *
274 * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address
275 * translation is disabled.
Soby Mathewe33b78a2016-05-05 14:10:46 +0100276 */
David Cunado18f2efd2017-04-13 22:38:29 +0100277 write64_vttbr(VTTBR_RESET_VAL &
278 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
279 | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
David Cunado495f3d32016-10-31 17:37:34 +0000280
281 /*
David Cunado18f2efd2017-04-13 22:38:29 +0100282 * Initialise HDCR, setting all the fields rather than
283 * relying on hw.
284 *
285 * HDCR.HPMN: Set to value of PMCR.N which is the
286 * architecturally-defined reset value.
Alexei Fedorovc3e8b0b2019-08-20 15:22:44 +0100287 *
288 * HDCR.HLP: Set to one so that event counter
289 * overflow, that is recorded in PMOVSCLR[0-30],
290 * occurs on the increment that changes
291 * PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU is
292 * implemented. This bit is RES0 in versions of the
293 * architecture earlier than ARMv8.5, setting it to 1
294 * doesn't have any effect on them.
295 * This bit is Reserved, UNK/SBZP in ARMv7.
296 *
297 * HDCR.HPME: Set to zero to disable EL2 Event
298 * counters.
David Cunado495f3d32016-10-31 17:37:34 +0000299 */
Alexei Fedorovc3e8b0b2019-08-20 15:22:44 +0100300#if (ARM_ARCH_MAJOR > 7)
301 write_hdcr((HDCR_RESET_VAL | HDCR_HLP_BIT |
302 ((read_pmcr() & PMCR_N_BITS) >>
303 PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
304#else
305 write_hdcr((HDCR_RESET_VAL |
306 ((read_pmcr() & PMCR_N_BITS) >>
307 PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
308#endif
David Cunado939f66d2016-11-25 00:21:59 +0000309 /*
David Cunado18f2efd2017-04-13 22:38:29 +0100310 * Set HSTR to its architectural reset value so that
311 * access to system registers in the cproc=1111
312 * encoding space do not trap to Hyp mode.
David Cunado939f66d2016-11-25 00:21:59 +0000313 */
David Cunado18f2efd2017-04-13 22:38:29 +0100314 write_hstr(HSTR_RESET_VAL);
315 /*
316 * Set CNTHP_CTL to its architectural reset value to
317 * disable the EL2 physical timer and prevent timer
318 * interrupts. Some fields are architecturally UNKNOWN
319 * on reset and are set to zero.
320 */
321 write_cnthp_ctl(CNTHP_CTL_RESET_VAL);
Soby Mathewe33b78a2016-05-05 14:10:46 +0100322 isb();
323
324 write_scr(read_scr() & ~SCR_NS_BIT);
325 isb();
326 }
Dimitris Papastamos0fd0f222017-11-07 09:55:29 +0000327 enable_extensions_nonsecure(el2_unused);
Soby Mathewe33b78a2016-05-05 14:10:46 +0100328 }
329}