blob: 1d6d0748f04728b1f8a4fecf4a8e213d4445c5a0 [file] [log] [blame]
Varun Wadekar948c0902016-11-08 15:46:48 -08001/*
Varun Wadekar8e590622017-02-16 18:14:37 -08002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Varun Wadekar948c0902016-11-08 15:46:48 -08003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Varun Wadekar948c0902016-11-08 15:46:48 -08005 */
6
Anthony Zhoudae374b2015-10-31 06:03:41 +08007#include <arch_helpers.h>
8#include <assert.h> /* for context_mgmt.h */
Varun Wadekar948c0902016-11-08 15:46:48 -08009#include <bl31.h>
Isla Mitchell2a4b4b72017-07-11 14:54:08 +010010#include <bl_common.h>
Varun Wadekar948c0902016-11-08 15:46:48 -080011#include <context_mgmt.h>
12#include <debug.h>
13#include <interrupt_mgmt.h>
14#include <platform.h>
15#include <runtime_svc.h>
16#include <string.h>
17
Varun Wadekar948c0902016-11-08 15:46:48 -080018#include "sm_err.h"
Isla Mitchell2a4b4b72017-07-11 14:54:08 +010019#include "smcall.h"
Varun Wadekar948c0902016-11-08 15:46:48 -080020
Anthony Zhoudae374b2015-10-31 06:03:41 +080021/* macro to check if Hypervisor is enabled in the HCR_EL2 register */
22#define HYP_ENABLE_FLAG 0x286001
23
Varun Wadekar948c0902016-11-08 15:46:48 -080024struct trusty_stack {
25 uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
Varun Wadekar8e590622017-02-16 18:14:37 -080026 uint32_t end;
Varun Wadekar948c0902016-11-08 15:46:48 -080027};
28
29struct trusty_cpu_ctx {
30 cpu_context_t cpu_ctx;
31 void *saved_sp;
32 uint32_t saved_security_state;
33 int fiq_handler_active;
34 uint64_t fiq_handler_pc;
35 uint64_t fiq_handler_cpsr;
36 uint64_t fiq_handler_sp;
37 uint64_t fiq_pc;
38 uint64_t fiq_cpsr;
39 uint64_t fiq_sp_el1;
40 gp_regs_t fiq_gpregs;
41 struct trusty_stack secure_stack;
42};
43
44struct args {
45 uint64_t r0;
46 uint64_t r1;
47 uint64_t r2;
48 uint64_t r3;
Anthony Zhoudae374b2015-10-31 06:03:41 +080049 uint64_t r4;
50 uint64_t r5;
51 uint64_t r6;
52 uint64_t r7;
Varun Wadekar948c0902016-11-08 15:46:48 -080053};
54
55struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
56
57struct args trusty_init_context_stack(void **sp, void *new_stack);
Anthony Zhoudae374b2015-10-31 06:03:41 +080058struct args trusty_context_switch_helper(void **sp, void *smc_params);
Varun Wadekar948c0902016-11-08 15:46:48 -080059
Anthony Zhou64c07d02016-04-20 10:16:48 +080060static uint32_t current_vmid;
61
Varun Wadekar948c0902016-11-08 15:46:48 -080062static struct trusty_cpu_ctx *get_trusty_ctx(void)
63{
64 return &trusty_cpu_ctx[plat_my_core_pos()];
65}
66
Anthony Zhoudae374b2015-10-31 06:03:41 +080067static uint32_t is_hypervisor_mode(void)
68{
69 uint64_t hcr = read_hcr();
70
71 return !!(hcr & HYP_ENABLE_FLAG);
72}
73
Varun Wadekar948c0902016-11-08 15:46:48 -080074static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
75 uint64_t r1, uint64_t r2, uint64_t r3)
76{
77 struct args ret;
78 struct trusty_cpu_ctx *ctx = get_trusty_ctx();
Anthony Zhoudae374b2015-10-31 06:03:41 +080079 struct trusty_cpu_ctx *ctx_smc;
Varun Wadekar948c0902016-11-08 15:46:48 -080080
81 assert(ctx->saved_security_state != security_state);
82
Anthony Zhoudae374b2015-10-31 06:03:41 +080083 ret.r7 = 0;
84 if (is_hypervisor_mode()) {
85 /* According to the ARM DEN0028A spec, VMID is stored in x7 */
86 ctx_smc = cm_get_context(NON_SECURE);
87 assert(ctx_smc);
88 ret.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
89 }
90 /* r4, r5, r6 reserved for future use. */
91 ret.r6 = 0;
92 ret.r5 = 0;
93 ret.r4 = 0;
94 ret.r3 = r3;
95 ret.r2 = r2;
96 ret.r1 = r1;
97 ret.r0 = r0;
98
Aijun Sunab609e12017-09-19 16:52:08 +080099 /*
100 * To avoid the additional overhead in PSCI flow, skip FP context
101 * saving/restoring in case of CPU suspend and resume, asssuming that
102 * when it's needed the PSCI caller has preserved FP context before
103 * going here.
104 */
Aijun Sunab609e12017-09-19 16:52:08 +0800105 if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
106 fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
Varun Wadekar948c0902016-11-08 15:46:48 -0800107 cm_el1_sysregs_context_save(security_state);
108
109 ctx->saved_security_state = security_state;
Anthony Zhoudae374b2015-10-31 06:03:41 +0800110 ret = trusty_context_switch_helper(&ctx->saved_sp, &ret);
Varun Wadekar948c0902016-11-08 15:46:48 -0800111
112 assert(ctx->saved_security_state == !security_state);
113
114 cm_el1_sysregs_context_restore(security_state);
Aijun Sunab609e12017-09-19 16:52:08 +0800115 if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
116 fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));
Aijun Sunab609e12017-09-19 16:52:08 +0800117
Varun Wadekar948c0902016-11-08 15:46:48 -0800118 cm_set_next_eret_context(security_state);
119
120 return ret;
121}
122
123static uint64_t trusty_fiq_handler(uint32_t id,
124 uint32_t flags,
125 void *handle,
126 void *cookie)
127{
128 struct args ret;
129 struct trusty_cpu_ctx *ctx = get_trusty_ctx();
130
131 assert(!is_caller_secure(flags));
132
133 ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0);
134 if (ret.r0) {
135 SMC_RET0(handle);
136 }
137
138 if (ctx->fiq_handler_active) {
139 INFO("%s: fiq handler already active\n", __func__);
140 SMC_RET0(handle);
141 }
142
143 ctx->fiq_handler_active = 1;
144 memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
145 ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
146 ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
147 ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1);
148
149 write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
150 cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, ctx->fiq_handler_cpsr);
151
152 SMC_RET0(handle);
153}
154
155static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu,
156 uint64_t handler, uint64_t stack)
157{
158 struct trusty_cpu_ctx *ctx;
159
160 if (cpu >= PLATFORM_CORE_COUNT) {
161 ERROR("%s: cpu %ld >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT);
162 return SM_ERR_INVALID_PARAMETERS;
163 }
164
165 ctx = &trusty_cpu_ctx[cpu];
166 ctx->fiq_handler_pc = handler;
167 ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
168 ctx->fiq_handler_sp = stack;
169
170 SMC_RET1(handle, 0);
171}
172
173static uint64_t trusty_get_fiq_regs(void *handle)
174{
175 struct trusty_cpu_ctx *ctx = get_trusty_ctx();
176 uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0);
177
178 SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1);
179}
180
181static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3)
182{
183 struct args ret;
184 struct trusty_cpu_ctx *ctx = get_trusty_ctx();
185
186 if (!ctx->fiq_handler_active) {
187 NOTICE("%s: fiq handler not active\n", __func__);
188 SMC_RET1(handle, SM_ERR_INVALID_PARAMETERS);
189 }
190
191 ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0);
192 if (ret.r0 != 1) {
193 INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %ld\n",
194 __func__, handle, ret.r0);
195 }
196
197 /*
198 * Restore register state to state recorded on fiq entry.
199 *
200 * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
201 * restore them.
202 *
203 * x1-x4 and x8-x17 need to be restored here because smc_handler64
204 * corrupts them (el1 code also restored them).
205 */
206 memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
207 ctx->fiq_handler_active = 0;
208 write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
209 cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, ctx->fiq_cpsr);
210
211 SMC_RET0(handle);
212}
213
214static uint64_t trusty_smc_handler(uint32_t smc_fid,
215 uint64_t x1,
216 uint64_t x2,
217 uint64_t x3,
218 uint64_t x4,
219 void *cookie,
220 void *handle,
221 uint64_t flags)
222{
223 struct args ret;
Anthony Zhou64c07d02016-04-20 10:16:48 +0800224 uint32_t vmid = 0;
Varun Wadekar0e1f9e32016-09-29 16:08:16 -0700225 entry_point_info_t *ep_info = bl31_plat_get_next_image_ep_info(SECURE);
226
227 /*
228 * Return success for SET_ROT_PARAMS if Trusty is not present, as
229 * Verified Boot is not even supported and returning success here
230 * would not compromise the boot process.
231 */
David Cunadobbbbcda2017-04-16 17:15:08 +0100232 if (!ep_info && (smc_fid == SMC_YC_SET_ROT_PARAMS)) {
Varun Wadekar0e1f9e32016-09-29 16:08:16 -0700233 SMC_RET1(handle, 0);
234 } else if (!ep_info) {
235 SMC_RET1(handle, SMC_UNK);
236 }
Varun Wadekar948c0902016-11-08 15:46:48 -0800237
238 if (is_caller_secure(flags)) {
David Cunadobbbbcda2017-04-16 17:15:08 +0100239 if (smc_fid == SMC_YC_NS_RETURN) {
Varun Wadekar948c0902016-11-08 15:46:48 -0800240 ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
Anthony Zhoudae374b2015-10-31 06:03:41 +0800241 SMC_RET8(handle, ret.r0, ret.r1, ret.r2, ret.r3,
242 ret.r4, ret.r5, ret.r6, ret.r7);
Varun Wadekar948c0902016-11-08 15:46:48 -0800243 }
244 INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
245 cpu %d, unknown smc\n",
246 __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags,
247 plat_my_core_pos());
248 SMC_RET1(handle, SMC_UNK);
249 } else {
250 switch (smc_fid) {
251 case SMC_FC64_SET_FIQ_HANDLER:
252 return trusty_set_fiq_handler(handle, x1, x2, x3);
253 case SMC_FC64_GET_FIQ_REGS:
254 return trusty_get_fiq_regs(handle);
255 case SMC_FC_FIQ_EXIT:
256 return trusty_fiq_exit(handle, x1, x2, x3);
257 default:
Anthony Zhou64c07d02016-04-20 10:16:48 +0800258 if (is_hypervisor_mode())
259 vmid = SMC_GET_GP(handle, CTX_GPREG_X7);
260
261 if ((current_vmid != 0) && (current_vmid != vmid)) {
262 /* This message will cause SMC mechanism
263 * abnormal in multi-guest environment.
264 * Change it to WARN in case you need it.
265 */
266 VERBOSE("Previous SMC not finished.\n");
267 SMC_RET1(handle, SM_ERR_BUSY);
268 }
269 current_vmid = vmid;
Varun Wadekar948c0902016-11-08 15:46:48 -0800270 ret = trusty_context_switch(NON_SECURE, smc_fid, x1,
271 x2, x3);
Anthony Zhou64c07d02016-04-20 10:16:48 +0800272 current_vmid = 0;
Varun Wadekar948c0902016-11-08 15:46:48 -0800273 SMC_RET1(handle, ret.r0);
274 }
275 }
276}
277
278static int32_t trusty_init(void)
279{
Sandrine Bailleux48c1c392016-11-23 10:53:07 +0000280 void el3_exit(void);
Varun Wadekar948c0902016-11-08 15:46:48 -0800281 entry_point_info_t *ep_info;
Anthony Zhoudae374b2015-10-31 06:03:41 +0800282 struct args zero_args = {0};
Varun Wadekar948c0902016-11-08 15:46:48 -0800283 struct trusty_cpu_ctx *ctx = get_trusty_ctx();
284 uint32_t cpu = plat_my_core_pos();
285 int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
286 CTX_SPSR_EL3));
287
Sandrine Bailleuxe97e4132016-11-30 11:24:01 +0000288 /*
289 * Get information about the Trusty image. Its absence is a critical
290 * failure.
291 */
Varun Wadekar948c0902016-11-08 15:46:48 -0800292 ep_info = bl31_plat_get_next_image_ep_info(SECURE);
Sandrine Bailleuxe97e4132016-11-30 11:24:01 +0000293 assert(ep_info);
Varun Wadekar948c0902016-11-08 15:46:48 -0800294
Arve Hjønnevågcb03c912015-08-04 16:19:27 -0700295 fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
Varun Wadekar948c0902016-11-08 15:46:48 -0800296 cm_el1_sysregs_context_save(NON_SECURE);
297
298 cm_set_context(&ctx->cpu_ctx, SECURE);
299 cm_init_my_context(ep_info);
300
301 /*
302 * Adjust secondary cpu entry point for 32 bit images to the
303 * end of exeption vectors
304 */
305 if ((cpu != 0) && (reg_width == MODE_RW_32)) {
306 INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
307 cpu, ep_info->pc + (1U << 5));
308 cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5));
309 }
310
311 cm_el1_sysregs_context_restore(SECURE);
Arve Hjønnevågcb03c912015-08-04 16:19:27 -0700312 fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
Varun Wadekar948c0902016-11-08 15:46:48 -0800313 cm_set_next_eret_context(SECURE);
314
315 ctx->saved_security_state = ~0; /* initial saved state is invalid */
Varun Wadekar8e590622017-02-16 18:14:37 -0800316 trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack.end);
Varun Wadekar948c0902016-11-08 15:46:48 -0800317
Anthony Zhoudae374b2015-10-31 06:03:41 +0800318 trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
Varun Wadekar948c0902016-11-08 15:46:48 -0800319
320 cm_el1_sysregs_context_restore(NON_SECURE);
Arve Hjønnevågcb03c912015-08-04 16:19:27 -0700321 fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
Varun Wadekar948c0902016-11-08 15:46:48 -0800322 cm_set_next_eret_context(NON_SECURE);
323
324 return 0;
325}
326
Arve Hjønnevågfab23192017-11-27 11:05:46 -0800327static void trusty_cpu_suspend(uint32_t off)
Varun Wadekar948c0902016-11-08 15:46:48 -0800328{
329 struct args ret;
Varun Wadekar948c0902016-11-08 15:46:48 -0800330
Arve Hjønnevågfab23192017-11-27 11:05:46 -0800331 ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, off, 0, 0);
Varun Wadekar948c0902016-11-08 15:46:48 -0800332 if (ret.r0 != 0) {
333 INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %ld\n",
Sandrine Bailleux696f41e2016-11-23 09:50:53 +0000334 __func__, plat_my_core_pos(), ret.r0);
Varun Wadekar948c0902016-11-08 15:46:48 -0800335 }
336}
337
Arve Hjønnevågfab23192017-11-27 11:05:46 -0800338static void trusty_cpu_resume(uint32_t on)
Varun Wadekar948c0902016-11-08 15:46:48 -0800339{
340 struct args ret;
Varun Wadekar948c0902016-11-08 15:46:48 -0800341
Arve Hjønnevågfab23192017-11-27 11:05:46 -0800342 ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, on, 0, 0);
Varun Wadekar948c0902016-11-08 15:46:48 -0800343 if (ret.r0 != 0) {
344 INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %ld\n",
Sandrine Bailleux696f41e2016-11-23 09:50:53 +0000345 __func__, plat_my_core_pos(), ret.r0);
Varun Wadekar948c0902016-11-08 15:46:48 -0800346 }
347}
348
349static int32_t trusty_cpu_off_handler(uint64_t unused)
350{
Arve Hjønnevågfab23192017-11-27 11:05:46 -0800351 trusty_cpu_suspend(1);
Varun Wadekar948c0902016-11-08 15:46:48 -0800352
353 return 0;
354}
355
356static void trusty_cpu_on_finish_handler(uint64_t unused)
357{
358 struct trusty_cpu_ctx *ctx = get_trusty_ctx();
359
360 if (!ctx->saved_sp) {
361 trusty_init();
362 } else {
Arve Hjønnevågfab23192017-11-27 11:05:46 -0800363 trusty_cpu_resume(1);
Varun Wadekar948c0902016-11-08 15:46:48 -0800364 }
365}
366
367static void trusty_cpu_suspend_handler(uint64_t unused)
368{
Arve Hjønnevågfab23192017-11-27 11:05:46 -0800369 trusty_cpu_suspend(0);
Varun Wadekar948c0902016-11-08 15:46:48 -0800370}
371
372static void trusty_cpu_suspend_finish_handler(uint64_t unused)
373{
Arve Hjønnevågfab23192017-11-27 11:05:46 -0800374 trusty_cpu_resume(0);
Varun Wadekar948c0902016-11-08 15:46:48 -0800375}
376
377static const spd_pm_ops_t trusty_pm = {
378 .svc_off = trusty_cpu_off_handler,
379 .svc_suspend = trusty_cpu_suspend_handler,
380 .svc_on_finish = trusty_cpu_on_finish_handler,
381 .svc_suspend_finish = trusty_cpu_suspend_finish_handler,
382};
383
Arve Hjønnevåg7c3309c2017-11-28 14:05:30 -0800384void plat_trusty_set_boot_args(aapcs64_params_t *args);
385
386#ifdef TSP_SEC_MEM_SIZE
387#pragma weak plat_trusty_set_boot_args
388void plat_trusty_set_boot_args(aapcs64_params_t *args)
389{
390 args->arg0 = TSP_SEC_MEM_SIZE;
391}
392#endif
393
Varun Wadekar948c0902016-11-08 15:46:48 -0800394static int32_t trusty_setup(void)
395{
396 entry_point_info_t *ep_info;
Arve Hjønnevåg7c3309c2017-11-28 14:05:30 -0800397 uint32_t instr;
Varun Wadekar948c0902016-11-08 15:46:48 -0800398 uint32_t flags;
399 int ret;
Arve Hjønnevåg7c3309c2017-11-28 14:05:30 -0800400 int aarch32 = 0;
Varun Wadekar948c0902016-11-08 15:46:48 -0800401
Varun Wadekard67d0212017-02-23 10:34:06 -0800402 /* Get trusty's entry point info */
Varun Wadekar948c0902016-11-08 15:46:48 -0800403 ep_info = bl31_plat_get_next_image_ep_info(SECURE);
404 if (!ep_info) {
405 INFO("Trusty image missing.\n");
406 return -1;
407 }
408
Arve Hjønnevåg7c3309c2017-11-28 14:05:30 -0800409 instr = *(uint32_t *)ep_info->pc;
Varun Wadekar948c0902016-11-08 15:46:48 -0800410
Arve Hjønnevåg7c3309c2017-11-28 14:05:30 -0800411 if (instr >> 24 == 0xea) {
412 INFO("trusty: Found 32 bit image\n");
413 aarch32 = 1;
414 } else if (instr >> 8 == 0xd53810 || instr >> 16 == 0x9400) {
415 INFO("trusty: Found 64 bit image\n");
416 } else {
417 NOTICE("trusty: Found unknown image, 0x%x\n", instr);
418 }
419
420 SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
421 if (!aarch32)
422 ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
423 DISABLE_ALL_EXCEPTIONS);
424 else
425 ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
426 SPSR_E_LITTLE,
427 DAIF_FIQ_BIT |
428 DAIF_IRQ_BIT |
429 DAIF_ABT_BIT);
430 memset(&ep_info->args, 0, sizeof(ep_info->args));
431 plat_trusty_set_boot_args(&ep_info->args);
Wayne Linfeb5aa22016-05-24 15:28:42 -0700432
Varun Wadekard67d0212017-02-23 10:34:06 -0800433 /* register init handler */
Varun Wadekar948c0902016-11-08 15:46:48 -0800434 bl31_register_bl32_init(trusty_init);
435
Varun Wadekard67d0212017-02-23 10:34:06 -0800436 /* register power management hooks */
Varun Wadekar948c0902016-11-08 15:46:48 -0800437 psci_register_spd_pm_hook(&trusty_pm);
438
Varun Wadekard67d0212017-02-23 10:34:06 -0800439 /* register interrupt handler */
Varun Wadekar948c0902016-11-08 15:46:48 -0800440 flags = 0;
441 set_interrupt_rm_flag(flags, NON_SECURE);
442 ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
443 trusty_fiq_handler,
444 flags);
445 if (ret)
446 ERROR("trusty: failed to register fiq handler, ret = %d\n", ret);
447
Arve Hjønnevåg27d8e1e2017-09-28 14:59:10 -0700448 if (aarch32) {
449 entry_point_info_t *ns_ep_info;
450 uint32_t spsr;
451
452 ns_ep_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
453 if (!ep_info) {
454 NOTICE("Trusty: non-secure image missing.\n");
455 return -1;
456 }
457 spsr = ns_ep_info->spsr;
458 if (GET_RW(spsr) == MODE_RW_64 && GET_EL(spsr) == MODE_EL2) {
459 spsr &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
460 spsr |= MODE_EL1 << MODE_EL_SHIFT;
461 }
462 if (GET_RW(spsr) == MODE_RW_32 && GET_M32(spsr) == MODE32_hyp) {
463 spsr &= ~(MODE32_MASK << MODE32_SHIFT);
464 spsr |= MODE32_svc << MODE32_SHIFT;
465 }
466 if (spsr != ns_ep_info->spsr) {
467 NOTICE("Trusty: Switch bl33 from EL2 to EL1 (spsr 0x%x -> 0x%x)\n",
468 ns_ep_info->spsr, spsr);
469 ns_ep_info->spsr = spsr;
470 }
471 }
472
Varun Wadekar948c0902016-11-08 15:46:48 -0800473 return 0;
474}
475
476/* Define a SPD runtime service descriptor for fast SMC calls */
477DECLARE_RT_SVC(
478 trusty_fast,
479
480 OEN_TOS_START,
481 SMC_ENTITY_SECURE_MONITOR,
482 SMC_TYPE_FAST,
483 trusty_setup,
484 trusty_smc_handler
485);
486
David Cunadobbbbcda2017-04-16 17:15:08 +0100487/* Define a SPD runtime service descriptor for yielding SMC calls */
Varun Wadekar948c0902016-11-08 15:46:48 -0800488DECLARE_RT_SVC(
489 trusty_std,
490
Amithf6e8ead2015-08-19 20:13:12 -0700491 OEN_TAP_START,
Varun Wadekar948c0902016-11-08 15:46:48 -0800492 SMC_ENTITY_SECURE_MONITOR,
David Cunadobbbbcda2017-04-16 17:15:08 +0100493 SMC_TYPE_YIELD,
Varun Wadekar948c0902016-11-08 15:46:48 -0800494 NULL,
495 trusty_smc_handler
496);