blob: b602840d28865be13e63b94742cb5f71ac855d60 [file] [log] [blame]
Achin Gupta7aea9082014-02-01 07:51:28 +00001/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Achin Guptac429b5e2014-05-04 18:38:28 +010031#include <arch.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000032#include <arch_helpers.h>
Dan Handley97043ac2014-04-09 13:14:54 +010033#include <assert.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000034#include <bl_common.h>
Soby Mathewa43d4312014-04-07 15:28:55 +010035#include <bl31.h>
Dan Handley97043ac2014-04-09 13:14:54 +010036#include <context.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000037#include <context_mgmt.h>
Achin Guptae1333f72014-05-09 10:03:15 +010038#include <interrupt_mgmt.h>
Dan Handley97043ac2014-04-09 13:14:54 +010039#include <platform.h>
Dan Handley5f0cdb02014-05-14 17:44:19 +010040#include <platform_def.h>
Dan Handley97043ac2014-04-09 13:14:54 +010041#include <runtime_svc.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000042
43/*******************************************************************************
44 * Data structure which holds the pointers to non-secure and secure security
45 * state contexts for each cpu. It is aligned to the cache line boundary to
46 * allow efficient concurrent manipulation of these pointers on different cpus
47 ******************************************************************************/
48typedef struct {
49 void *ptr[2];
Dan Handleyfb037bf2014-04-10 15:37:22 +010050} __aligned (CACHE_WRITEBACK_GRANULE) context_info_t;
Achin Gupta7aea9082014-02-01 07:51:28 +000051
Dan Handleyfb037bf2014-04-10 15:37:22 +010052static context_info_t cm_context_info[PLATFORM_CORE_COUNT];
Achin Gupta7aea9082014-02-01 07:51:28 +000053
Soby Mathewa43d4312014-04-07 15:28:55 +010054/* The per_cpu_ptr_cache_t space allocation */
55static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT];
56
Achin Gupta7aea9082014-02-01 07:51:28 +000057/*******************************************************************************
58 * Context management library initialisation routine. This library is used by
59 * runtime services to share pointers to 'cpu_context' structures for the secure
60 * and non-secure states. Management of the structures and their associated
61 * memory is not done by the context management library e.g. the PSCI service
62 * manages the cpu context used for entry from and exit to the non-secure state.
63 * The Secure payload dispatcher service manages the context(s) corresponding to
64 * the secure state. It also uses this library to get access to the non-secure
65 * state cpu context pointers.
66 * Lastly, this library provides the api to make SP_EL3 point to the cpu context
67 * which will used for programming an entry into a lower EL. The same context
68 * will used to save state upon exception entry from that EL.
69 ******************************************************************************/
70void cm_init()
71{
72 /*
73 * The context management library has only global data to intialize, but
74 * that will be done when the BSS is zeroed out
75 */
76}
77
78/*******************************************************************************
79 * This function returns a pointer to the most recent 'cpu_context' structure
Andrew Thoelke08ab89d2014-05-14 17:09:32 +010080 * for the CPU identified by MPIDR that was set as the context for the specified
81 * security state. NULL is returned if no such structure has been specified.
Achin Gupta7aea9082014-02-01 07:51:28 +000082 ******************************************************************************/
Andrew Thoelke08ab89d2014-05-14 17:09:32 +010083void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
Achin Gupta7aea9082014-02-01 07:51:28 +000084{
85 uint32_t linear_id = platform_get_core_pos(mpidr);
86
87 assert(security_state <= NON_SECURE);
88
89 return cm_context_info[linear_id].ptr[security_state];
90}
91
92/*******************************************************************************
Andrew Thoelke08ab89d2014-05-14 17:09:32 +010093 * This function returns a pointer to the most recent 'cpu_context' structure
94 * for the calling CPU that was set as the context for the specified security
95 * state. NULL is returned if no such structure has been specified.
Achin Gupta7aea9082014-02-01 07:51:28 +000096 ******************************************************************************/
Andrew Thoelke08ab89d2014-05-14 17:09:32 +010097void *cm_get_context(uint32_t security_state)
98{
99 uint32_t linear_id = platform_get_core_pos(read_mpidr());
100
101 assert(security_state <= NON_SECURE);
102
103 return cm_context_info[linear_id].ptr[security_state];
104}
105
106/*******************************************************************************
107 * This function sets the pointer to the current 'cpu_context' structure for the
108 * specified security state for the CPU identified by MPIDR
109 ******************************************************************************/
110void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
Achin Gupta7aea9082014-02-01 07:51:28 +0000111{
112 uint32_t linear_id = platform_get_core_pos(mpidr);
113
114 assert(security_state <= NON_SECURE);
115
116 cm_context_info[linear_id].ptr[security_state] = context;
117}
118
119/*******************************************************************************
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100120 * This function sets the pointer to the current 'cpu_context' structure for the
121 * specified security state for the calling CPU
122 ******************************************************************************/
123void cm_set_context(void *context, uint32_t security_state)
124{
125 cm_set_context_by_mpidr(read_mpidr(), context, security_state);
126}
127
128/*******************************************************************************
Achin Gupta7aea9082014-02-01 07:51:28 +0000129 * The next four functions are used by runtime services to save and restore EL3
130 * and EL1 contexts on the 'cpu_context' structure for the specified security
131 * state.
132 ******************************************************************************/
133void cm_el3_sysregs_context_save(uint32_t security_state)
134{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100135 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000136
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100137 ctx = cm_get_context(security_state);
Achin Gupta7aea9082014-02-01 07:51:28 +0000138 assert(ctx);
139
140 el3_sysregs_context_save(get_el3state_ctx(ctx));
141}
142
143void cm_el3_sysregs_context_restore(uint32_t security_state)
144{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100145 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000146
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100147 ctx = cm_get_context(security_state);
Achin Gupta7aea9082014-02-01 07:51:28 +0000148 assert(ctx);
149
150 el3_sysregs_context_restore(get_el3state_ctx(ctx));
151}
152
153void cm_el1_sysregs_context_save(uint32_t security_state)
154{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100155 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000156
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100157 ctx = cm_get_context(security_state);
Achin Gupta7aea9082014-02-01 07:51:28 +0000158 assert(ctx);
159
160 el1_sysregs_context_save(get_sysregs_ctx(ctx));
161}
162
163void cm_el1_sysregs_context_restore(uint32_t security_state)
164{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100165 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000166
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100167 ctx = cm_get_context(security_state);
Achin Gupta7aea9082014-02-01 07:51:28 +0000168 assert(ctx);
169
170 el1_sysregs_context_restore(get_sysregs_ctx(ctx));
171}
172
173/*******************************************************************************
Achin Guptac429b5e2014-05-04 18:38:28 +0100174 * This function populates 'cpu_context' pertaining to the given security state
175 * with the entrypoint, SPSR and SCR values so that an ERET from this security
176 * state correctly restores corresponding values to drop the CPU to the next
177 * exception level
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000178 ******************************************************************************/
179void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
180 uint32_t spsr, uint32_t scr)
181{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100182 cpu_context_t *ctx;
183 el3_state_t *state;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000184
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100185 ctx = cm_get_context(security_state);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000186 assert(ctx);
187
Achin Guptae1333f72014-05-09 10:03:15 +0100188 /* Program the interrupt routing model for this security state */
189 scr &= ~SCR_FIQ_BIT;
190 scr &= ~SCR_IRQ_BIT;
191 scr |= get_scr_el3_from_routing_model(security_state);
192
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000193 /* Populate EL3 state so that we've the right context before doing ERET */
194 state = get_el3state_ctx(ctx);
195 write_ctx_reg(state, CTX_SPSR_EL3, spsr);
196 write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
197 write_ctx_reg(state, CTX_SCR_EL3, scr);
198}
199
200/*******************************************************************************
Achin Guptac429b5e2014-05-04 18:38:28 +0100201 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
202 * given security state with the given entrypoint
Achin Gupta607084e2014-02-09 18:24:19 +0000203 ******************************************************************************/
Achin Guptac429b5e2014-05-04 18:38:28 +0100204void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
Achin Gupta607084e2014-02-09 18:24:19 +0000205{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100206 cpu_context_t *ctx;
207 el3_state_t *state;
Achin Gupta607084e2014-02-09 18:24:19 +0000208
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100209 ctx = cm_get_context(security_state);
Achin Gupta607084e2014-02-09 18:24:19 +0000210 assert(ctx);
211
212 /* Populate EL3 state so that ERET jumps to the correct entry */
213 state = get_el3state_ctx(ctx);
214 write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
215}
216
217/*******************************************************************************
Achin Guptac429b5e2014-05-04 18:38:28 +0100218 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
219 * pertaining to the given security state using the value and bit position
220 * specified in the parameters. It preserves all other bits.
221 ******************************************************************************/
222void cm_write_scr_el3_bit(uint32_t security_state,
223 uint32_t bit_pos,
224 uint32_t value)
225{
226 cpu_context_t *ctx;
227 el3_state_t *state;
228 uint32_t scr_el3;
229
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100230 ctx = cm_get_context(security_state);
Achin Guptac429b5e2014-05-04 18:38:28 +0100231 assert(ctx);
232
233 /* Ensure that the bit position is a valid one */
234 assert((1 << bit_pos) & SCR_VALID_BIT_MASK);
235
236 /* Ensure that the 'value' is only a bit wide */
237 assert(value <= 1);
238
239 /*
240 * Get the SCR_EL3 value from the cpu context, clear the desired bit
241 * and set it to its new value.
242 */
243 state = get_el3state_ctx(ctx);
244 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
245 scr_el3 &= ~(1 << bit_pos);
246 scr_el3 |= value << bit_pos;
247 write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
248}
249
250/*******************************************************************************
251 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
252 * given security state.
253 ******************************************************************************/
254uint32_t cm_get_scr_el3(uint32_t security_state)
255{
256 cpu_context_t *ctx;
257 el3_state_t *state;
258
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100259 ctx = cm_get_context(security_state);
Achin Guptac429b5e2014-05-04 18:38:28 +0100260 assert(ctx);
261
262 /* Populate EL3 state so that ERET jumps to the correct entry */
263 state = get_el3state_ctx(ctx);
264 return read_ctx_reg(state, CTX_SCR_EL3);
265}
266
267/*******************************************************************************
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000268 * This function is used to program the context that's used for exception
269 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
270 * the required security state
Achin Gupta7aea9082014-02-01 07:51:28 +0000271 ******************************************************************************/
272void cm_set_next_eret_context(uint32_t security_state)
273{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100274 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000275#if DEBUG
276 uint64_t sp_mode;
277#endif
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000278
Andrew Thoelke08ab89d2014-05-14 17:09:32 +0100279 ctx = cm_get_context(security_state);
Achin Gupta7aea9082014-02-01 07:51:28 +0000280 assert(ctx);
281
282#if DEBUG
283 /*
284 * Check that this function is called with SP_EL0 as the stack
285 * pointer
286 */
287 __asm__ volatile("mrs %0, SPSel\n"
288 : "=r" (sp_mode));
289
290 assert(sp_mode == MODE_SP_EL0);
291#endif
292
293 __asm__ volatile("msr spsel, #1\n"
294 "mov sp, %0\n"
295 "msr spsel, #0\n"
296 : : "r" (ctx));
297}
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000298
Soby Mathewa43d4312014-04-07 15:28:55 +0100299/************************************************************************
300 * The following function is used to populate the per cpu pointer cache.
301 * The pointer will be stored in the tpidr_el3 register.
302 *************************************************************************/
303void cm_init_pcpu_ptr_cache()
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000304{
Soby Mathewa43d4312014-04-07 15:28:55 +0100305 unsigned long mpidr = read_mpidr();
306 uint32_t linear_id = platform_get_core_pos(mpidr);
307 per_cpu_ptr_cache_t *pcpu_ptr_cache;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000308
Soby Mathewa43d4312014-04-07 15:28:55 +0100309 pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id];
310 assert(pcpu_ptr_cache);
311 pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000312
Soby Mathewa43d4312014-04-07 15:28:55 +0100313 cm_set_pcpu_ptr_cache(pcpu_ptr_cache);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000314}
Soby Mathewa43d4312014-04-07 15:28:55 +0100315
316
317void cm_set_pcpu_ptr_cache(const void *pcpu_ptr)
318{
319 write_tpidr_el3((unsigned long)pcpu_ptr);
320}
321
322void *cm_get_pcpu_ptr_cache(void)
323{
324 return (void *)read_tpidr_el3();
325}
326