blob: 002c41b81a5b2ed1cefcedda0d19ee08fc46ec7b [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Achin Gupta7c88f3f2014-02-18 18:09:12 +000031#include <arch.h>
Andrew Thoelke0a30cf52014-03-18 13:46:55 +000032#include <asm_macros.S>
Dan Handley97043ac2014-04-09 13:14:54 +010033#include <tsp.h>
Achin Guptab51da822014-06-26 09:58:52 +010034#include <xlat_tables.h>
Achin Gupta7c88f3f2014-02-18 18:09:12 +000035
36
37 .globl tsp_entrypoint
Andrew Thoelke399fb082014-05-20 21:43:27 +010038 .globl tsp_vector_table
Achin Gupta7c88f3f2014-02-18 18:09:12 +000039
Soby Mathew239b04f2014-05-09 20:49:17 +010040
41
Achin Gupta7c88f3f2014-02-18 18:09:12 +000042 /* ---------------------------------------------
43 * Populate the params in x0-x7 from the pointer
44 * to the smc args structure in x0.
45 * ---------------------------------------------
46 */
47 .macro restore_args_call_smc
48 ldp x6, x7, [x0, #TSP_ARG6]
49 ldp x4, x5, [x0, #TSP_ARG4]
50 ldp x2, x3, [x0, #TSP_ARG2]
51 ldp x0, x1, [x0, #TSP_ARG0]
52 smc #0
53 .endm
54
Achin Gupta6cf89022014-05-09 11:42:56 +010055 .macro save_eret_context reg1 reg2
56 mrs \reg1, elr_el1
57 mrs \reg2, spsr_el1
58 stp \reg1, \reg2, [sp, #-0x10]!
59 stp x30, x18, [sp, #-0x10]!
60 .endm
61
62 .macro restore_eret_context reg1 reg2
63 ldp x30, x18, [sp], #0x10
64 ldp \reg1, \reg2, [sp], #0x10
65 msr elr_el1, \reg1
66 msr spsr_el1, \reg2
67 .endm
68
69 .section .text, "ax"
70 .align 3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000071
Andrew Thoelke0a30cf52014-03-18 13:46:55 +000072func tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +000073
74 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000075 * Set the exception vector to something sane.
76 * ---------------------------------------------
77 */
Achin Gupta57356e92014-05-09 12:17:56 +010078 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000079 msr vbar_el1, x0
Achin Gupta0c8d4fe2014-08-04 23:13:10 +010080 isb
81
82 /* ---------------------------------------------
83 * Enable the SError interrupt now that the
84 * exception vectors have been setup.
85 * ---------------------------------------------
86 */
87 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +000088
89 /* ---------------------------------------------
Achin Guptaec3c1002014-07-18 18:38:28 +010090 * Enable the instruction cache, stack pointer
91 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +000092 * ---------------------------------------------
93 */
Achin Guptaec3c1002014-07-18 18:38:28 +010094 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000095 mrs x0, sctlr_el1
Achin Guptaec3c1002014-07-18 18:38:28 +010096 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +000097 msr sctlr_el1, x0
98 isb
99
100 /* ---------------------------------------------
101 * Zero out NOBITS sections. There are 2 of them:
102 * - the .bss section;
103 * - the coherent memory section.
104 * ---------------------------------------------
105 */
106 ldr x0, =__BSS_START__
107 ldr x1, =__BSS_SIZE__
108 bl zeromem16
109
110 ldr x0, =__COHERENT_RAM_START__
111 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
112 bl zeromem16
113
114 /* --------------------------------------------
Achin Gupta754a2b72014-06-25 19:26:22 +0100115 * Allocate a stack whose memory will be marked
116 * as Normal-IS-WBWA when the MMU is enabled.
117 * There is no risk of reading stale stack
118 * memory after enabling the MMU as only the
119 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000120 * --------------------------------------------
121 */
122 mrs x0, mpidr_el1
Achin Gupta754a2b72014-06-25 19:26:22 +0100123 bl platform_set_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000124
125 /* ---------------------------------------------
126 * Perform early platform setup & platform
127 * specific early arch. setup e.g. mmu setup
128 * ---------------------------------------------
129 */
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000130 bl bl32_early_platform_setup
131 bl bl32_plat_arch_setup
132
133 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000134 * Jump to main function.
135 * ---------------------------------------------
136 */
137 bl tsp_main
138
139 /* ---------------------------------------------
140 * Tell TSPD that we are done initialising
141 * ---------------------------------------------
142 */
143 mov x1, x0
144 mov x0, #TSP_ENTRY_DONE
145 smc #0
146
147tsp_entrypoint_panic:
148 b tsp_entrypoint_panic
149
Andrew Thoelke399fb082014-05-20 21:43:27 +0100150
151 /* -------------------------------------------
152 * Table of entrypoint vectors provided to the
153 * TSPD for the various entrypoints
154 * -------------------------------------------
155 */
156func tsp_vector_table
157 b tsp_std_smc_entry
158 b tsp_fast_smc_entry
159 b tsp_cpu_on_entry
160 b tsp_cpu_off_entry
161 b tsp_cpu_resume_entry
162 b tsp_cpu_suspend_entry
163 b tsp_fiq_entry
164
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000165 /*---------------------------------------------
166 * This entrypoint is used by the TSPD when this
167 * cpu is to be turned off through a CPU_OFF
168 * psci call to ask the TSP to perform any
169 * bookeeping necessary. In the current
170 * implementation, the TSPD expects the TSP to
171 * re-initialise its state so nothing is done
172 * here except for acknowledging the request.
173 * ---------------------------------------------
174 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000175func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000176 bl tsp_cpu_off_main
177 restore_args_call_smc
178
179 /*---------------------------------------------
180 * This entrypoint is used by the TSPD when this
181 * cpu is turned on using a CPU_ON psci call to
182 * ask the TSP to initialise itself i.e. setup
183 * the mmu, stacks etc. Minimal architectural
184 * state will be initialised by the TSPD when
185 * this function is entered i.e. Caches and MMU
186 * will be turned off, the execution state
187 * will be aarch64 and exceptions masked.
188 * ---------------------------------------------
189 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000190func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000191 /* ---------------------------------------------
192 * Set the exception vector to something sane.
193 * ---------------------------------------------
194 */
Achin Gupta57356e92014-05-09 12:17:56 +0100195 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000196 msr vbar_el1, x0
Achin Gupta0c8d4fe2014-08-04 23:13:10 +0100197 isb
198
199 /* Enable the SError interrupt */
200 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000201
202 /* ---------------------------------------------
Achin Guptaec3c1002014-07-18 18:38:28 +0100203 * Enable the instruction cache, stack pointer
204 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000205 * ---------------------------------------------
206 */
Achin Guptaec3c1002014-07-18 18:38:28 +0100207 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000208 mrs x0, sctlr_el1
Achin Guptaec3c1002014-07-18 18:38:28 +0100209 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000210 msr sctlr_el1, x0
211 isb
212
213 /* --------------------------------------------
Achin Guptab51da822014-06-26 09:58:52 +0100214 * Give ourselves a stack whose memory will be
215 * marked as Normal-IS-WBWA when the MMU is
216 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000217 * --------------------------------------------
218 */
219 mrs x0, mpidr_el1
Achin Guptab51da822014-06-26 09:58:52 +0100220 bl platform_set_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000221
Achin Guptab51da822014-06-26 09:58:52 +0100222 /* --------------------------------------------
223 * Enable the MMU with the DCache disabled. It
224 * is safe to use stacks allocated in normal
225 * memory as a result. All memory accesses are
226 * marked nGnRnE when the MMU is disabled. So
227 * all the stack writes will make it to memory.
228 * All memory accesses are marked Non-cacheable
229 * when the MMU is enabled but D$ is disabled.
230 * So used stack memory is guaranteed to be
231 * visible immediately after the MMU is enabled
232 * Enabling the DCache at the same time as the
233 * MMU can lead to speculatively fetched and
234 * possibly stale stack memory being read from
235 * other caches. This can lead to coherency
236 * issues.
237 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000238 */
Achin Guptab51da822014-06-26 09:58:52 +0100239 mov x0, #DISABLE_DCACHE
Dan Handleydff8e472014-05-16 14:08:45 +0100240 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000241
242 /* ---------------------------------------------
Achin Guptab51da822014-06-26 09:58:52 +0100243 * Enable the Data cache now that the MMU has
244 * been enabled. The stack has been unwound. It
245 * will be written first before being read. This
246 * will invalidate any stale cache lines resi-
247 * -dent in other caches. We assume that
248 * interconnect coherency has been enabled for
249 * this cluster by EL3 firmware.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000250 * ---------------------------------------------
251 */
Achin Guptab51da822014-06-26 09:58:52 +0100252 mrs x0, sctlr_el1
253 orr x0, x0, #SCTLR_C_BIT
254 msr sctlr_el1, x0
255 isb
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000256
257 /* ---------------------------------------------
258 * Enter C runtime to perform any remaining
259 * book keeping
260 * ---------------------------------------------
261 */
262 bl tsp_cpu_on_main
263 restore_args_call_smc
264
265 /* Should never reach here */
266tsp_cpu_on_entry_panic:
267 b tsp_cpu_on_entry_panic
268
269 /*---------------------------------------------
270 * This entrypoint is used by the TSPD when this
271 * cpu is to be suspended through a CPU_SUSPEND
272 * psci call to ask the TSP to perform any
273 * bookeeping necessary. In the current
274 * implementation, the TSPD saves and restores
275 * the EL1 state.
276 * ---------------------------------------------
277 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000278func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000279 bl tsp_cpu_suspend_main
280 restore_args_call_smc
281
282 /*---------------------------------------------
Achin Gupta6cf89022014-05-09 11:42:56 +0100283 * This entrypoint is used by the TSPD to pass
284 * control for handling a pending S-EL1 FIQ.
285 * 'x0' contains a magic number which indicates
286 * this. TSPD expects control to be handed back
287 * at the end of FIQ processing. This is done
288 * through an SMC. The handover agreement is:
289 *
290 * 1. PSTATE.DAIF are set upon entry. 'x1' has
291 * the ELR_EL3 from the non-secure state.
292 * 2. TSP has to preserve the callee saved
293 * general purpose registers, SP_EL1/EL0 and
294 * LR.
295 * 3. TSP has to preserve the system and vfp
296 * registers (if applicable).
297 * 4. TSP can use 'x0-x18' to enable its C
298 * runtime.
299 * 5. TSP returns to TSPD using an SMC with
300 * 'x0' = TSP_HANDLED_S_EL1_FIQ
301 * ---------------------------------------------
302 */
303func tsp_fiq_entry
304#if DEBUG
305 mov x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff)
306 movk x2, #(TSP_HANDLE_FIQ_AND_RETURN & 0xffff)
307 cmp x0, x2
308 b.ne tsp_fiq_entry_panic
309#endif
310 /*---------------------------------------------
311 * Save any previous context needed to perform
312 * an exception return from S-EL1 e.g. context
313 * from a previous IRQ. Update statistics and
314 * handle the FIQ before returning to the TSPD.
315 * IRQ/FIQs are not enabled since that will
316 * complicate the implementation. Execution
317 * will be transferred back to the normal world
318 * in any case. A non-zero return value from the
319 * fiq handler is an error.
320 * ---------------------------------------------
321 */
322 save_eret_context x2 x3
323 bl tsp_update_sync_fiq_stats
324 bl tsp_fiq_handler
325 cbnz x0, tsp_fiq_entry_panic
326 restore_eret_context x2 x3
327 mov x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff)
328 movk x0, #(TSP_HANDLED_S_EL1_FIQ & 0xffff)
329 smc #0
330
331tsp_fiq_entry_panic:
332 b tsp_fiq_entry_panic
333
334 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000335 * This entrypoint is used by the TSPD when this
336 * cpu resumes execution after an earlier
337 * CPU_SUSPEND psci call to ask the TSP to
338 * restore its saved context. In the current
339 * implementation, the TSPD saves and restores
340 * EL1 state so nothing is done here apart from
341 * acknowledging the request.
342 * ---------------------------------------------
343 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000344func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000345 bl tsp_cpu_resume_main
346 restore_args_call_smc
347tsp_cpu_resume_panic:
348 b tsp_cpu_resume_panic
349
350 /*---------------------------------------------
351 * This entrypoint is used by the TSPD to ask
352 * the TSP to service a fast smc request.
353 * ---------------------------------------------
354 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000355func tsp_fast_smc_entry
Soby Mathew239b04f2014-05-09 20:49:17 +0100356 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000357 restore_args_call_smc
358tsp_fast_smc_entry_panic:
359 b tsp_fast_smc_entry_panic
360
Soby Mathew239b04f2014-05-09 20:49:17 +0100361 /*---------------------------------------------
362 * This entrypoint is used by the TSPD to ask
363 * the TSP to service a std smc request.
364 * We will enable preemption during execution
365 * of tsp_smc_handler.
366 * ---------------------------------------------
367 */
368func tsp_std_smc_entry
369 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
370 bl tsp_smc_handler
371 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
372 restore_args_call_smc
373tsp_std_smc_entry_panic:
374 b tsp_std_smc_entry_panic