blob: 2d672dd12d8f33ac9c93a0732d825335c2c305ef [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Madhukar Pappireddyc367b752020-01-27 15:32:15 -06002 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
Soby Mathew931f7c62018-10-14 08:09:22 +01007#include <platform_def.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00008
9#include <arch.h>
10#include <common/bl_common.h>
11#include <el3_common_macros.S>
Bence Szépkúti0531ada2019-11-07 12:09:24 +010012#include <lib/pmf/aarch64/pmf_asm_macros.S>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000013#include <lib/runtime_instr.h>
14#include <lib/xlat_tables/xlat_mmu_helpers.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010015
16 .globl bl31_entrypoint
Soby Mathewcf0b1492016-04-29 19:01:30 +010017 .globl bl31_warm_entrypoint
Achin Gupta4f6ad662013-10-25 09:08:21 +010018
Achin Gupta4f6ad662013-10-25 09:08:21 +010019 /* -----------------------------------------------------
20 * bl31_entrypoint() is the cold boot entrypoint,
21 * executed only by the primary cpu.
22 * -----------------------------------------------------
23 */
24
Andrew Thoelke0a30cf52014-03-18 13:46:55 +000025func bl31_entrypoint
Vikram Kanigiri4112bfa2014-04-15 18:08:08 +010026 /* ---------------------------------------------------------------
Soby Mathewa6f340f2018-01-09 14:36:14 +000027 * Stash the previous bootloader arguments x0 - x3 for later use.
Vikram Kanigiri4112bfa2014-04-15 18:08:08 +010028 * ---------------------------------------------------------------
Sandrine Bailleuxc10bd2c2013-11-12 16:41:16 +000029 */
Vikram Kanigiri29fb9052014-05-15 18:27:15 +010030 mov x20, x0
31 mov x21, x1
Soby Mathewa6f340f2018-01-09 14:36:14 +000032 mov x22, x2
33 mov x23, x3
Sandrine Bailleuxc10bd2c2013-11-12 16:41:16 +000034
Louis Mayencourt330ead82019-03-22 16:33:23 +000035#if !RESET_TO_BL31
Harry Liebel4f603682014-01-14 18:11:48 +000036 /* ---------------------------------------------------------------------
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010037 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
38 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
39 * and primary/secondary CPU logic should not be executed in this case.
Harry Liebel4f603682014-01-14 18:11:48 +000040 *
David Cunado18f2efd2017-04-13 22:38:29 +010041 * Also, assume that the previous bootloader has already initialised the
42 * SCTLR_EL3, including the endianness, and has initialised the memory.
Harry Liebel4f603682014-01-14 18:11:48 +000043 * ---------------------------------------------------------------------
44 */
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010045 el3_entrypoint_common \
David Cunado18f2efd2017-04-13 22:38:29 +010046 _init_sctlr=0 \
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010047 _warm_boot_mailbox=0 \
48 _secondary_cold_boot=0 \
49 _init_memory=0 \
50 _init_c_runtime=1 \
Manish Pandeyda903592019-11-26 11:34:17 +000051 _exception_vectors=runtime_exceptions \
52 _pie_fixup_size=BL31_LIMIT - BL31_BASE
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010053#else
Louis Mayencourt330ead82019-03-22 16:33:23 +000054
Sandrine Bailleuxbf031bb2015-06-02 17:19:43 +010055 /* ---------------------------------------------------------------------
56 * For RESET_TO_BL31 systems which have a programmable reset address,
57 * bl31_entrypoint() is executed only on the cold boot path so we can
58 * skip the warm boot mailbox mechanism.
59 * ---------------------------------------------------------------------
60 */
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010061 el3_entrypoint_common \
David Cunado18f2efd2017-04-13 22:38:29 +010062 _init_sctlr=1 \
Sandrine Bailleuxbf031bb2015-06-02 17:19:43 +010063 _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
Sandrine Bailleuxa9bec672015-10-30 15:05:17 +000064 _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010065 _init_memory=1 \
66 _init_c_runtime=1 \
Manish Pandeyda903592019-11-26 11:34:17 +000067 _exception_vectors=runtime_exceptions \
68 _pie_fixup_size=BL31_LIMIT - BL31_BASE
Achin Gupta4f6ad662013-10-25 09:08:21 +010069
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010070 /* ---------------------------------------------------------------------
Juan Castillod1786372015-12-14 09:35:25 +000071 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010072 * there's no argument to relay from a previous bootloader. Zero the
73 * arguments passed to the platform layer to reflect that.
74 * ---------------------------------------------------------------------
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000075 */
Soby Mathewa6f340f2018-01-09 14:36:14 +000076 mov x20, 0
77 mov x21, 0
78 mov x22, 0
79 mov x23, 0
Sandrine Bailleux52010cc2015-05-19 11:54:45 +010080#endif /* RESET_TO_BL31 */
Soby Mathew931f7c62018-10-14 08:09:22 +010081
82 /* --------------------------------------------------------------------
Antonio Nino Diaz88cfd9a2019-01-31 11:01:26 +000083 * Perform BL31 setup
84 * --------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +010085 */
Soby Mathewa6f340f2018-01-09 14:36:14 +000086 mov x0, x20
87 mov x1, x21
88 mov x2, x22
89 mov x3, x23
Antonio Nino Diaz88cfd9a2019-01-31 11:01:26 +000090 bl bl31_setup
Achin Gupta4f6ad662013-10-25 09:08:21 +010091
Antonio Nino Diaz88cfd9a2019-01-31 11:01:26 +000092#if ENABLE_PAUTH
Alexei Fedorov9fc59632019-05-24 12:17:09 +010093 /* --------------------------------------------------------------------
Alexei Fedoroved108b52019-09-13 14:11:59 +010094 * Program APIAKey_EL1 and enable pointer authentication
Alexei Fedorov9fc59632019-05-24 12:17:09 +010095 * --------------------------------------------------------------------
96 */
Alexei Fedoroved108b52019-09-13 14:11:59 +010097 bl pauth_init_enable_el3
Antonio Nino Diaz88cfd9a2019-01-31 11:01:26 +000098#endif /* ENABLE_PAUTH */
99
100 /* --------------------------------------------------------------------
Alexei Fedoroved108b52019-09-13 14:11:59 +0100101 * Jump to main function
Antonio Nino Diaz88cfd9a2019-01-31 11:01:26 +0000102 * --------------------------------------------------------------------
Achin Gupta4f6ad662013-10-25 09:08:21 +0100103 */
104 bl bl31_main
105
Antonio Nino Diaz88cfd9a2019-01-31 11:01:26 +0000106 /* --------------------------------------------------------------------
Achin Gupta54dc71e2015-09-11 16:03:13 +0100107 * Clean the .data & .bss sections to main memory. This ensures
108 * that any global data which was initialised by the primary CPU
109 * is visible to secondary CPUs before they enable their data
110 * caches and participate in coherency.
Antonio Nino Diaz88cfd9a2019-01-31 11:01:26 +0000111 * --------------------------------------------------------------------
Achin Gupta54dc71e2015-09-11 16:03:13 +0100112 */
Madhukar Pappireddyc367b752020-01-27 15:32:15 -0600113 adrp x0, __DATA_START__
114 add x0, x0, :lo12:__DATA_START__
115 adrp x1, __DATA_END__
116 add x1, x1, :lo12:__DATA_END__
Achin Gupta54dc71e2015-09-11 16:03:13 +0100117 sub x1, x1, x0
118 bl clean_dcache_range
119
Madhukar Pappireddyc367b752020-01-27 15:32:15 -0600120 adrp x0, __BSS_START__
121 add x0, x0, :lo12:__BSS_START__
122 adrp x1, __BSS_END__
123 add x1, x1, :lo12:__BSS_END__
Achin Gupta54dc71e2015-09-11 16:03:13 +0100124 sub x1, x1, x0
125 bl clean_dcache_range
126
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000127 b el3_exit
Kévin Petit8b779622015-03-24 14:03:57 +0000128endfunc bl31_entrypoint
Soby Mathewcf0b1492016-04-29 19:01:30 +0100129
130 /* --------------------------------------------------------------------
131 * This CPU has been physically powered up. It is either resuming from
132 * suspend or has simply been turned on. In both cases, call the BL31
133 * warmboot entrypoint
134 * --------------------------------------------------------------------
135 */
136func bl31_warm_entrypoint
dp-arm872be882016-09-19 11:18:44 +0100137#if ENABLE_RUNTIME_INSTRUMENTATION
138
139 /*
140 * This timestamp update happens with cache off. The next
141 * timestamp collection will need to do cache maintenance prior
142 * to timestamp update.
143 */
Antonio Nino Diaz81542c02018-10-04 09:55:23 +0100144 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
dp-arm872be882016-09-19 11:18:44 +0100145 mrs x1, cntpct_el0
146 str x1, [x0]
147#endif
148
Soby Mathewcf0b1492016-04-29 19:01:30 +0100149 /*
150 * On the warm boot path, most of the EL3 initialisations performed by
151 * 'el3_entrypoint_common' must be skipped:
152 *
153 * - Only when the platform bypasses the BL1/BL31 entrypoint by
David Cunado18f2efd2017-04-13 22:38:29 +0100154 * programming the reset address do we need to initialise SCTLR_EL3.
Soby Mathewcf0b1492016-04-29 19:01:30 +0100155 * In other cases, we assume this has been taken care by the
156 * entrypoint code.
157 *
158 * - No need to determine the type of boot, we know it is a warm boot.
159 *
160 * - Do not try to distinguish between primary and secondary CPUs, this
161 * notion only exists for a cold boot.
162 *
163 * - No need to initialise the memory or the C runtime environment,
164 * it has been done once and for all on the cold boot path.
165 */
166 el3_entrypoint_common \
David Cunado18f2efd2017-04-13 22:38:29 +0100167 _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \
Soby Mathewcf0b1492016-04-29 19:01:30 +0100168 _warm_boot_mailbox=0 \
169 _secondary_cold_boot=0 \
170 _init_memory=0 \
171 _init_c_runtime=0 \
Manish Pandeyda903592019-11-26 11:34:17 +0000172 _exception_vectors=runtime_exceptions \
173 _pie_fixup_size=0
Soby Mathewcf0b1492016-04-29 19:01:30 +0100174
Jeenu Viswambharan25a93f72017-01-05 10:37:21 +0000175 /*
176 * We're about to enable MMU and participate in PSCI state coordination.
177 *
178 * The PSCI implementation invokes platform routines that enable CPUs to
179 * participate in coherency. On a system where CPUs are not
Soby Mathewbcc3c492017-04-10 22:35:42 +0100180 * cache-coherent without appropriate platform specific programming,
181 * having caches enabled until such time might lead to coherency issues
182 * (resulting from stale data getting speculatively fetched, among
183 * others). Therefore we keep data caches disabled even after enabling
184 * the MMU for such platforms.
Jeenu Viswambharan25a93f72017-01-05 10:37:21 +0000185 *
Soby Mathewbcc3c492017-04-10 22:35:42 +0100186 * On systems with hardware-assisted coherency, or on single cluster
187 * platforms, such platform specific programming is not required to
188 * enter coherency (as CPUs already are); and there's no reason to have
189 * caches disabled either.
Soby Mathewcf0b1492016-04-29 19:01:30 +0100190 */
Soby Mathewbcc3c492017-04-10 22:35:42 +0100191#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
Jeenu Viswambharan64ee2632018-04-27 15:17:03 +0100192 mov x0, xzr
193#else
194 mov x0, #DISABLE_DCACHE
Soby Mathewbcc3c492017-04-10 22:35:42 +0100195#endif
Jeenu Viswambharan64ee2632018-04-27 15:17:03 +0100196 bl bl31_plat_enable_mmu
Soby Mathewbcc3c492017-04-10 22:35:42 +0100197
Alexei Fedorov7dcbb4f2019-03-06 11:15:51 +0000198#if ENABLE_PAUTH
Alexei Fedorov9fc59632019-05-24 12:17:09 +0100199 /* --------------------------------------------------------------------
Alexei Fedoroved108b52019-09-13 14:11:59 +0100200 * Program APIAKey_EL1 and enable pointer authentication
Alexei Fedorov9fc59632019-05-24 12:17:09 +0100201 * --------------------------------------------------------------------
202 */
Alexei Fedoroved108b52019-09-13 14:11:59 +0100203 bl pauth_init_enable_el3
Alexei Fedorov7dcbb4f2019-03-06 11:15:51 +0000204#endif /* ENABLE_PAUTH */
205
Soby Mathewcf0b1492016-04-29 19:01:30 +0100206 bl psci_warmboot_entrypoint
207
dp-arm872be882016-09-19 11:18:44 +0100208#if ENABLE_RUNTIME_INSTRUMENTATION
Antonio Nino Diaz81542c02018-10-04 09:55:23 +0100209 pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
dp-arm872be882016-09-19 11:18:44 +0100210 mov x19, x0
211
212 /*
213 * Invalidate before updating timestamp to ensure previous timestamp
214 * updates on the same cache line with caches disabled are properly
215 * seen by the same core. Without the cache invalidate, the core might
216 * write into a stale cache line.
217 */
218 mov x1, #PMF_TS_SIZE
219 mov x20, x30
220 bl inv_dcache_range
221 mov x30, x20
222
223 mrs x0, cntpct_el0
224 str x0, [x19]
225#endif
Soby Mathewcf0b1492016-04-29 19:01:30 +0100226 b el3_exit
227endfunc bl31_warm_entrypoint