blob: 2aa22226f50624e7d439b0e31af6e2d2dffbf91f [file] [log] [blame]
Sandrine Bailleux52010cc2015-05-19 11:54:45 +01001/*
Alexei Fedorov12f6c062021-05-14 11:21:56 +01002 * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Sandrine Bailleux52010cc2015-05-19 11:54:45 +01005 */
6
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +00007#ifndef EL3_COMMON_MACROS_S
8#define EL3_COMMON_MACROS_S
Sandrine Bailleux52010cc2015-05-19 11:54:45 +01009
10#include <arch.h>
11#include <asm_macros.S>
Manish V Badarkhe3b8456b2020-07-23 12:43:25 +010012#include <context.h>
Varun Wadekar1a04b2e2020-05-16 20:59:30 -070013#include <lib/xlat_tables/xlat_tables_defs.h>
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010014
15 /*
16 * Helper macro to initialise EL3 registers we care about.
17 */
Dimitris Papastamosf62ad322017-11-30 14:53:53 +000018 .macro el3_arch_init_common
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010019 /* ---------------------------------------------------------------------
David Cunado18f2efd2017-04-13 22:38:29 +010020 * SCTLR_EL3 has already been initialised - read current value before
21 * modifying.
22 *
23 * SCTLR_EL3.I: Enable the instruction cache.
24 *
Qixiang Xu79c17992018-03-05 09:31:11 +080025 * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault
David Cunado18f2efd2017-04-13 22:38:29 +010026 * exception is generated if a load or store instruction executed at
27 * EL3 uses the SP as the base address and the SP is not aligned to a
28 * 16-byte boundary.
29 *
30 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
31 * load or store one or more registers have an alignment check that the
32 * address being accessed is aligned to the size of the data element(s)
33 * being accessed.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010034 * ---------------------------------------------------------------------
35 */
36 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
37 mrs x0, sctlr_el3
38 orr x0, x0, x1
39 msr sctlr_el3, x0
40 isb
41
Masahiro Yamada3d8256b2016-12-25 23:36:24 +090042#ifdef IMAGE_BL31
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010043 /* ---------------------------------------------------------------------
44 * Initialise the per-cpu cache pointer to the CPU.
45 * This is done early to enable crash reporting to have access to crash
46 * stack. Since crash reporting depends on cpu_data to report the
47 * unhandled exception, not doing so can lead to recursive exceptions
48 * due to a NULL TPIDR_EL3.
49 * ---------------------------------------------------------------------
50 */
51 bl init_cpu_data_ptr
52#endif /* IMAGE_BL31 */
53
54 /* ---------------------------------------------------------------------
David Cunado18f2efd2017-04-13 22:38:29 +010055 * Initialise SCR_EL3, setting all fields rather than relying on hw.
56 * All fields are architecturally UNKNOWN on reset. The following fields
57 * do not change during the TF lifetime. The remaining fields are set to
58 * zero here but are updated ahead of transitioning to a lower EL in the
59 * function cm_init_context_common().
60 *
61 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
62 * EL2, EL1 and EL0 are not trapped to EL3.
63 *
64 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
65 * EL2, EL1 and EL0 are not trapped to EL3.
66 *
67 * SCR_EL3.SIF: Set to one to disable instruction fetches from
68 * Non-secure memory.
69 *
70 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
71 * both Security states and both Execution states.
72 *
73 * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
74 * to EL3 when executing at any EL.
Jeenu Viswambharan3ff4aaa2018-08-15 14:29:29 +010075 *
76 * SCR_EL3.{API,APK}: For Armv8.3 pointer authentication feature,
77 * disable traps to EL3 when accessing key registers or using pointer
78 * authentication instructions from lower ELs.
Gerald Lejeuneadb4fcf2016-03-22 09:29:23 +010079 * ---------------------------------------------------------------------
80 */
Antonio Nino Diaz52839622019-01-31 11:58:00 +000081 mov_imm x0, ((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) \
David Cunado18f2efd2017-04-13 22:38:29 +010082 & ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
Antonio Nino Diaz52839622019-01-31 11:58:00 +000083#if CTX_INCLUDE_PAUTH_REGS
84 /*
85 * If the pointer authentication registers are saved during world
86 * switches, enable pointer authentication everywhere, as it is safe to
87 * do so.
88 */
89 orr x0, x0, #(SCR_API_BIT | SCR_APK_BIT)
90#endif
Gerald Lejeuneadb4fcf2016-03-22 09:29:23 +010091 msr scr_el3, x0
David Cunado495f3d32016-10-31 17:37:34 +000092
93 /* ---------------------------------------------------------------------
David Cunado18f2efd2017-04-13 22:38:29 +010094 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
95 * Some fields are architecturally UNKNOWN on reset.
96 *
97 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
98 * Debug exceptions, other than Breakpoint Instruction exceptions, are
99 * disabled from all ELs in Secure state.
100 *
101 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
102 * privileged debug from S-EL1.
103 *
104 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
105 * access to the powerdown debug registers do not trap to EL3.
106 *
107 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
108 * debug registers, other than those registers that are controlled by
109 * MDCR_EL3.TDOSA.
110 *
111 * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
112 * accesses to all Performance Monitors registers do not trap to EL3.
Antonio Nino Diazed4fc6f2019-02-18 16:55:43 +0000113 *
114 * MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
115 * prohibited in Secure state. This bit is RES0 in versions of the
Alexei Fedorov12f6c062021-05-14 11:21:56 +0100116 * architecture with FEAT_PMUv3p5 not implemented, setting it to 1
117 * doesn't have any effect on them.
118 *
119 * MDCR_EL3.MCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
120 * prohibited in EL3. This bit is RES0 in versions of the
121 * architecture with FEAT_PMUv3p7 not implemented, setting it to 1
122 * doesn't have any effect on them.
Petre-Ionut Tudor2a7adf22019-10-03 17:09:08 +0100123 *
124 * MDCR_EL3.SPME: Set to zero so that event counting by the programmable
125 * counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If ARMv8.2
126 * Debug is not implemented this bit does not have any effect on the
127 * counters unless there is support for the implementation defined
128 * authentication interface ExternalSecureNoninvasiveDebugEnabled().
Manish V Badarkhe40ff9072021-06-23 20:02:39 +0100129 *
130 * MDCR_EL3.NSTB, MDCR_EL3.NSTBE: Set to zero so that Trace Buffer
131 * owning security state is Secure state. If FEAT_TRBE is implemented,
132 * accesses to Trace Buffer control registers at EL2 and EL1 in any
133 * security state generates trap exceptions to EL3.
134 * If FEAT_TRBE is not implemented, these bits are RES0.
David Cunado495f3d32016-10-31 17:37:34 +0000135 * ---------------------------------------------------------------------
136 */
Antonio Nino Diazed4fc6f2019-02-18 16:55:43 +0000137 mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \
Alexei Fedorov12f6c062021-05-14 11:21:56 +0100138 MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT | \
139 MDCR_MCCD_BIT) & ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | \
Manish V Badarkhe40ff9072021-06-23 20:02:39 +0100140 MDCR_TDA_BIT | MDCR_TPM_BIT | MDCR_NSTB(MDCR_NSTB_EL1) | \
141 MDCR_NSTBE))
Antonio Nino Diazed4fc6f2019-02-18 16:55:43 +0000142
dp-arm85e93ba2017-02-08 11:51:50 +0000143 msr mdcr_el3, x0
David Cunado495f3d32016-10-31 17:37:34 +0000144
Gerald Lejeuneadb4fcf2016-03-22 09:29:23 +0100145 /* ---------------------------------------------------------------------
Alexei Fedorove290a8fc2019-08-13 15:17:53 +0100146 * Initialise PMCR_EL0 setting all fields rather than relying
147 * on hw. Some fields are architecturally UNKNOWN on reset.
148 *
149 * PMCR_EL0.LP: Set to one so that event counter overflow, that
150 * is recorded in PMOVSCLR_EL0[0-30], occurs on the increment
151 * that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU
152 * is implemented. This bit is RES0 in versions of the architecture
153 * earlier than ARMv8.5, setting it to 1 doesn't have any effect
154 * on them.
155 *
156 * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
157 * is recorded in PMOVSCLR_EL0[31], occurs on the increment
158 * that changes PMCCNTR_EL0[63] from 1 to 0.
159 *
160 * PMCR_EL0.DP: Set to one so that the cycle counter,
161 * PMCCNTR_EL0 does not count when event counting is prohibited.
162 *
163 * PMCR_EL0.X: Set to zero to disable export of events.
164 *
165 * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
166 * counts on every clock cycle.
167 * ---------------------------------------------------------------------
168 */
169 mov_imm x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \
170 PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \
171 ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
172
173 msr pmcr_el0, x0
174
175 /* ---------------------------------------------------------------------
Gerald Lejeuneadb4fcf2016-03-22 09:29:23 +0100176 * Enable External Aborts and SError Interrupts now that the exception
177 * vectors have been setup.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100178 * ---------------------------------------------------------------------
179 */
180 msr daifclr, #DAIF_ABT_BIT
181
182 /* ---------------------------------------------------------------------
David Cunado18f2efd2017-04-13 22:38:29 +0100183 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
184 * All fields are architecturally UNKNOWN on reset.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100185 *
David Cunado18f2efd2017-04-13 22:38:29 +0100186 * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
187 * CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100188 *
David Cunado18f2efd2017-04-13 22:38:29 +0100189 * CPTR_EL3.TTA: Set to zero so that System register accesses to the
190 * trace registers do not trap to EL3.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100191 *
David Cunado1a853372017-10-20 11:30:57 +0100192 * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
193 * by Advanced SIMD, floating-point or SVE instructions (if implemented)
194 * do not trap to EL3.
Max Shvetsov0c5e7d12021-03-22 11:59:37 +0000195 *
196 * CPTR_EL3.TAM: Set to one so that Activity Monitor access is
197 * trapped to EL3 by default.
198 *
199 * CPTR_EL3.EZ: Set to zero so that all SVE functionality is trapped
200 * to EL3 by default.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100201 */
Max Shvetsov0c5e7d12021-03-22 11:59:37 +0000202
David Cunado18f2efd2017-04-13 22:38:29 +0100203 mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100204 msr cptr_el3, x0
Sathees Balya65849aa2018-12-06 13:33:24 +0000205
206 /*
207 * If Data Independent Timing (DIT) functionality is implemented,
208 * always enable DIT in EL3
209 */
210 mrs x0, id_aa64pfr0_el1
211 ubfx x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
212 cmp x0, #ID_AA64PFR0_DIT_SUPPORTED
213 bne 1f
214 mov x0, #DIT_BIT
215 msr DIT, x0
2161:
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100217 .endm
218
219/* -----------------------------------------------------------------------------
220 * This is the super set of actions that need to be performed during a cold boot
Juan Castillod1786372015-12-14 09:35:25 +0000221 * or a warm boot in EL3. This code is shared by BL1 and BL31.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100222 *
223 * This macro will always perform reset handling, architectural initialisations
224 * and stack setup. The rest of the actions are optional because they might not
225 * be needed, depending on the context in which this macro is called. This is
226 * why this macro is parameterised ; each parameter allows to enable/disable
227 * some actions.
228 *
David Cunado18f2efd2017-04-13 22:38:29 +0100229 * _init_sctlr:
230 * Whether the macro needs to initialise SCTLR_EL3, including configuring
231 * the endianness of data accesses.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100232 *
233 * _warm_boot_mailbox:
234 * Whether the macro needs to detect the type of boot (cold/warm). The
235 * detection is based on the platform entrypoint address : if it is zero
236 * then it is a cold boot, otherwise it is a warm boot. In the latter case,
237 * this macro jumps on the platform entrypoint address.
238 *
239 * _secondary_cold_boot:
240 * Whether the macro needs to identify the CPU that is calling it: primary
241 * CPU or secondary CPU. The primary CPU will be allowed to carry on with
242 * the platform initialisations, while the secondaries will be put in a
243 * platform-specific state in the meantime.
244 *
245 * If the caller knows this macro will only be called by the primary CPU
246 * then this parameter can be defined to 0 to skip this step.
247 *
248 * _init_memory:
249 * Whether the macro needs to initialise the memory.
250 *
251 * _init_c_runtime:
252 * Whether the macro needs to initialise the C runtime environment.
253 *
254 * _exception_vectors:
255 * Address of the exception vectors to program in the VBAR_EL3 register.
Manish Pandeyda903592019-11-26 11:34:17 +0000256 *
257 * _pie_fixup_size:
258 * Size of memory region to fixup Global Descriptor Table (GDT).
259 *
260 * A non-zero value is expected when firmware needs GDT to be fixed-up.
261 *
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100262 * -----------------------------------------------------------------------------
263 */
264 .macro el3_entrypoint_common \
David Cunado18f2efd2017-04-13 22:38:29 +0100265 _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \
Manish Pandeyda903592019-11-26 11:34:17 +0000266 _init_memory, _init_c_runtime, _exception_vectors, \
267 _pie_fixup_size
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100268
David Cunado18f2efd2017-04-13 22:38:29 +0100269 .if \_init_sctlr
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100270 /* -------------------------------------------------------------
David Cunado18f2efd2017-04-13 22:38:29 +0100271 * This is the initialisation of SCTLR_EL3 and so must ensure
272 * that all fields are explicitly set rather than relying on hw.
273 * Some fields reset to an IMPLEMENTATION DEFINED value and
274 * others are architecturally UNKNOWN on reset.
275 *
276 * SCTLR.EE: Set the CPU endianness before doing anything that
277 * might involve memory reads or writes. Set to zero to select
278 * Little Endian.
279 *
280 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
281 * force all memory regions that are writeable to be treated as
282 * XN (Execute-never). Set to zero so that this control has no
283 * effect on memory access permissions.
284 *
Qixiang Xu79c17992018-03-05 09:31:11 +0800285 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
David Cunado18f2efd2017-04-13 22:38:29 +0100286 *
287 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
Jeenu Viswambharan48e1d352018-11-15 11:38:03 +0000288 *
289 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
290 * safe behaviour upon exception entry to EL3.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100291 * -------------------------------------------------------------
292 */
David Cunado18f2efd2017-04-13 22:38:29 +0100293 mov_imm x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
Jeenu Viswambharan48e1d352018-11-15 11:38:03 +0000294 | SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100295 msr sctlr_el3, x0
296 isb
David Cunado18f2efd2017-04-13 22:38:29 +0100297 .endif /* _init_sctlr */
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100298
Javier Almansa Sobrino0063dd12020-11-23 18:38:15 +0000299#if DISABLE_MTPMU
300 bl mtpmu_disable
301#endif
302
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100303 .if \_warm_boot_mailbox
304 /* -------------------------------------------------------------
305 * This code will be executed for both warm and cold resets.
306 * Now is the time to distinguish between the two.
307 * Query the platform entrypoint address and if it is not zero
308 * then it means it is a warm boot so jump to this address.
309 * -------------------------------------------------------------
310 */
Soby Mathew85a181c2015-07-13 11:21:11 +0100311 bl plat_get_my_entrypoint
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100312 cbz x0, do_cold_boot
313 br x0
314
315 do_cold_boot:
316 .endif /* _warm_boot_mailbox */
317
Manish Pandeyda903592019-11-26 11:34:17 +0000318 .if \_pie_fixup_size
319#if ENABLE_PIE
320 /*
321 * ------------------------------------------------------------
322 * If PIE is enabled fixup the Global descriptor Table only
323 * once during primary core cold boot path.
324 *
325 * Compile time base address, required for fixup, is calculated
326 * using "pie_fixup" label present within first page.
327 * ------------------------------------------------------------
328 */
329 pie_fixup:
330 ldr x0, =pie_fixup
Jimmy Brissond7b5f402020-08-04 16:18:52 -0500331 and x0, x0, #~(PAGE_SIZE_MASK)
Manish Pandeyda903592019-11-26 11:34:17 +0000332 mov_imm x1, \_pie_fixup_size
333 add x1, x1, x0
334 bl fixup_gdt_reloc
335#endif /* ENABLE_PIE */
336 .endif /* _pie_fixup_size */
337
Antonio Nino Diaz4e85e4f2016-02-23 12:04:58 +0000338 /* ---------------------------------------------------------------------
Dimitris Papastamosf62ad322017-11-30 14:53:53 +0000339 * Set the exception vectors.
340 * ---------------------------------------------------------------------
341 */
342 adr x0, \_exception_vectors
343 msr vbar_el3, x0
344 isb
345
346 /* ---------------------------------------------------------------------
Antonio Nino Diaz4e85e4f2016-02-23 12:04:58 +0000347 * It is a cold boot.
348 * Perform any processor specific actions upon reset e.g. cache, TLB
349 * invalidations etc.
350 * ---------------------------------------------------------------------
351 */
352 bl reset_handler
353
Dimitris Papastamosf62ad322017-11-30 14:53:53 +0000354 el3_arch_init_common
Antonio Nino Diaz4e85e4f2016-02-23 12:04:58 +0000355
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100356 .if \_secondary_cold_boot
357 /* -------------------------------------------------------------
Antonio Nino Diaz4e85e4f2016-02-23 12:04:58 +0000358 * Check if this is a primary or secondary CPU cold boot.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100359 * The primary CPU will set up the platform while the
360 * secondaries are placed in a platform-specific state until the
361 * primary CPU performs the necessary actions to bring them out
362 * of that state and allows entry into the OS.
363 * -------------------------------------------------------------
364 */
Soby Mathew85a181c2015-07-13 11:21:11 +0100365 bl plat_is_my_cpu_primary
Soby Mathew58523c02015-06-08 12:32:50 +0100366 cbnz w0, do_primary_cold_boot
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100367
368 /* This is a cold boot on a secondary CPU */
369 bl plat_secondary_cold_boot_setup
370 /* plat_secondary_cold_boot_setup() is not supposed to return */
Antonio Nino Diaz1c3ea102016-02-01 13:57:25 +0000371 bl el3_panic
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100372
373 do_primary_cold_boot:
374 .endif /* _secondary_cold_boot */
375
376 /* ---------------------------------------------------------------------
Antonio Nino Diaz4e85e4f2016-02-23 12:04:58 +0000377 * Initialize memory now. Secondary CPU initialization won't get to this
378 * point.
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100379 * ---------------------------------------------------------------------
380 */
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100381
382 .if \_init_memory
383 bl platform_mem_init
384 .endif /* _init_memory */
385
386 /* ---------------------------------------------------------------------
387 * Init C runtime environment:
388 * - Zero-initialise the NOBITS sections. There are 2 of them:
389 * - the .bss section;
390 * - the coherent memory section (if any).
391 * - Relocate the data section from ROM to RAM, if required.
392 * ---------------------------------------------------------------------
393 */
394 .if \_init_c_runtime
Hadi Asyrafib90f2072019-08-20 15:33:27 +0800395#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_INV_DCACHE)
Achin Gupta54dc71e2015-09-11 16:03:13 +0100396 /* -------------------------------------------------------------
397 * Invalidate the RW memory used by the BL31 image. This
398 * includes the data and NOBITS sections. This is done to
399 * safeguard against possible corruption of this memory by
400 * dirty cache lines in a system cache as a result of use by
401 * an earlier boot loader stage.
402 * -------------------------------------------------------------
403 */
Soby Mathewf1722b62018-10-12 16:40:28 +0100404 adrp x0, __RW_START__
405 add x0, x0, :lo12:__RW_START__
406 adrp x1, __RW_END__
407 add x1, x1, :lo12:__RW_END__
Achin Gupta54dc71e2015-09-11 16:03:13 +0100408 sub x1, x1, x0
409 bl inv_dcache_range
Samuel Hollandf8578e62018-10-17 21:40:18 -0500410#if defined(IMAGE_BL31) && SEPARATE_NOBITS_REGION
411 adrp x0, __NOBITS_START__
412 add x0, x0, :lo12:__NOBITS_START__
413 adrp x1, __NOBITS_END__
414 add x1, x1, :lo12:__NOBITS_END__
415 sub x1, x1, x0
416 bl inv_dcache_range
417#endif
Roberto Vargasb1d27b42017-10-30 14:43:43 +0000418#endif
Soby Mathewf1722b62018-10-12 16:40:28 +0100419 adrp x0, __BSS_START__
420 add x0, x0, :lo12:__BSS_START__
Achin Gupta54dc71e2015-09-11 16:03:13 +0100421
Soby Mathewf1722b62018-10-12 16:40:28 +0100422 adrp x1, __BSS_END__
423 add x1, x1, :lo12:__BSS_END__
424 sub x1, x1, x0
Douglas Raillard308d3592016-12-02 13:51:54 +0000425 bl zeromem
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100426
427#if USE_COHERENT_MEM
Soby Mathewf1722b62018-10-12 16:40:28 +0100428 adrp x0, __COHERENT_RAM_START__
429 add x0, x0, :lo12:__COHERENT_RAM_START__
430 adrp x1, __COHERENT_RAM_END_UNALIGNED__
431 add x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
432 sub x1, x1, x0
Douglas Raillard308d3592016-12-02 13:51:54 +0000433 bl zeromem
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100434#endif
435
Lionel Debieve0a123022019-05-27 09:32:00 +0200436#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
Soby Mathewf1722b62018-10-12 16:40:28 +0100437 adrp x0, __DATA_RAM_START__
438 add x0, x0, :lo12:__DATA_RAM_START__
439 adrp x1, __DATA_ROM_START__
440 add x1, x1, :lo12:__DATA_ROM_START__
441 adrp x2, __DATA_RAM_END__
442 add x2, x2, :lo12:__DATA_RAM_END__
443 sub x2, x2, x0
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100444 bl memcpy16
445#endif
446 .endif /* _init_c_runtime */
447
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100448 /* ---------------------------------------------------------------------
449 * Use SP_EL0 for the C runtime stack.
450 * ---------------------------------------------------------------------
451 */
452 msr spsel, #0
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100453
454 /* ---------------------------------------------------------------------
455 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
456 * the MMU is enabled. There is no risk of reading stale stack memory
457 * after enabling the MMU as only the primary CPU is running at the
458 * moment.
459 * ---------------------------------------------------------------------
460 */
Soby Mathew85a181c2015-07-13 11:21:11 +0100461 bl plat_set_my_stack
Douglas Raillard51faada2017-02-24 18:14:15 +0000462
463#if STACK_PROTECTOR_ENABLED
464 .if \_init_c_runtime
465 bl update_stack_protector_canary
466 .endif /* _init_c_runtime */
467#endif
Sandrine Bailleux52010cc2015-05-19 11:54:45 +0100468 .endm
469
Manish V Badarkhe3b8456b2020-07-23 12:43:25 +0100470 .macro apply_at_speculative_wa
471#if ERRATA_SPECULATIVE_AT
472 /*
473 * Explicitly save x30 so as to free up a register and to enable
474 * branching and also, save x29 which will be used in the called
475 * function
476 */
477 stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
478 bl save_and_update_ptw_el1_sys_regs
479 ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
480#endif
481 .endm
482
483 .macro restore_ptw_el1_sys_regs
484#if ERRATA_SPECULATIVE_AT
485 /* -----------------------------------------------------------
486 * In case of ERRATA_SPECULATIVE_AT, must follow below order
487 * to ensure that page table walk is not enabled until
488 * restoration of all EL1 system registers. TCR_EL1 register
489 * should be updated at the end which restores previous page
490 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
491 * ensures that CPU does below steps in order.
492 *
493 * 1. Ensure all other system registers are written before
494 * updating SCTLR_EL1 using ISB.
495 * 2. Restore SCTLR_EL1 register.
496 * 3. Ensure SCTLR_EL1 written successfully using ISB.
497 * 4. Restore TCR_EL1 register.
498 * -----------------------------------------------------------
499 */
500 isb
501 ldp x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
502 msr sctlr_el1, x28
503 isb
504 msr tcr_el1, x29
505#endif
506 .endm
507
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +0000508#endif /* EL3_COMMON_MACROS_S */