blob: eae608c5f906db4ba58faa9f5576fc551514ae56 [file] [log] [blame]
Achin Gupta7aea9082014-02-01 07:51:28 +00001/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Achin Gupta7aea9082014-02-01 07:51:28 +000031#include <arch_helpers.h>
Dan Handley97043ac2014-04-09 13:14:54 +010032#include <assert.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000033#include <bl_common.h>
Soby Mathewa43d4312014-04-07 15:28:55 +010034#include <bl31.h>
Dan Handley97043ac2014-04-09 13:14:54 +010035#include <context.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000036#include <context_mgmt.h>
Dan Handley97043ac2014-04-09 13:14:54 +010037#include <platform.h>
38#include <runtime_svc.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000039
40/*******************************************************************************
41 * Data structure which holds the pointers to non-secure and secure security
42 * state contexts for each cpu. It is aligned to the cache line boundary to
43 * allow efficient concurrent manipulation of these pointers on different cpus
44 ******************************************************************************/
45typedef struct {
46 void *ptr[2];
Dan Handleyfb037bf2014-04-10 15:37:22 +010047} __aligned (CACHE_WRITEBACK_GRANULE) context_info_t;
Achin Gupta7aea9082014-02-01 07:51:28 +000048
Dan Handleyfb037bf2014-04-10 15:37:22 +010049static context_info_t cm_context_info[PLATFORM_CORE_COUNT];
Achin Gupta7aea9082014-02-01 07:51:28 +000050
Soby Mathewa43d4312014-04-07 15:28:55 +010051/* The per_cpu_ptr_cache_t space allocation */
52static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT];
53
Achin Gupta7aea9082014-02-01 07:51:28 +000054/*******************************************************************************
55 * Context management library initialisation routine. This library is used by
56 * runtime services to share pointers to 'cpu_context' structures for the secure
57 * and non-secure states. Management of the structures and their associated
58 * memory is not done by the context management library e.g. the PSCI service
59 * manages the cpu context used for entry from and exit to the non-secure state.
60 * The Secure payload dispatcher service manages the context(s) corresponding to
61 * the secure state. It also uses this library to get access to the non-secure
62 * state cpu context pointers.
63 * Lastly, this library provides the api to make SP_EL3 point to the cpu context
64 * which will used for programming an entry into a lower EL. The same context
65 * will used to save state upon exception entry from that EL.
66 ******************************************************************************/
67void cm_init()
68{
69 /*
70 * The context management library has only global data to intialize, but
71 * that will be done when the BSS is zeroed out
72 */
73}
74
75/*******************************************************************************
76 * This function returns a pointer to the most recent 'cpu_context' structure
77 * that was set as the context for the specified security state. NULL is
78 * returned if no such structure has been specified.
79 ******************************************************************************/
80void *cm_get_context(uint64_t mpidr, uint32_t security_state)
81{
82 uint32_t linear_id = platform_get_core_pos(mpidr);
83
84 assert(security_state <= NON_SECURE);
85
86 return cm_context_info[linear_id].ptr[security_state];
87}
88
89/*******************************************************************************
90 * This function sets the pointer to the current 'cpu_context' structure for the
91 * specified security state.
92 ******************************************************************************/
93void cm_set_context(uint64_t mpidr, void *context, uint32_t security_state)
94{
95 uint32_t linear_id = platform_get_core_pos(mpidr);
96
97 assert(security_state <= NON_SECURE);
98
99 cm_context_info[linear_id].ptr[security_state] = context;
100}
101
102/*******************************************************************************
103 * The next four functions are used by runtime services to save and restore EL3
104 * and EL1 contexts on the 'cpu_context' structure for the specified security
105 * state.
106 ******************************************************************************/
107void cm_el3_sysregs_context_save(uint32_t security_state)
108{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100109 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000110
111 ctx = cm_get_context(read_mpidr(), security_state);
112 assert(ctx);
113
114 el3_sysregs_context_save(get_el3state_ctx(ctx));
115}
116
117void cm_el3_sysregs_context_restore(uint32_t security_state)
118{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100119 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000120
121 ctx = cm_get_context(read_mpidr(), security_state);
122 assert(ctx);
123
124 el3_sysregs_context_restore(get_el3state_ctx(ctx));
125}
126
127void cm_el1_sysregs_context_save(uint32_t security_state)
128{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100129 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000130
131 ctx = cm_get_context(read_mpidr(), security_state);
132 assert(ctx);
133
134 el1_sysregs_context_save(get_sysregs_ctx(ctx));
135}
136
137void cm_el1_sysregs_context_restore(uint32_t security_state)
138{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100139 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000140
141 ctx = cm_get_context(read_mpidr(), security_state);
142 assert(ctx);
143
144 el1_sysregs_context_restore(get_sysregs_ctx(ctx));
145}
146
147/*******************************************************************************
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000148 * This function function populates 'cpu_context' pertaining to the given
149 * security state with the entrypoint, SPSR and SCR values so that an ERET from
150 * this securit state correctly restores corresponding values to drop the CPU to
151 * the next exception level
152 ******************************************************************************/
153void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
154 uint32_t spsr, uint32_t scr)
155{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100156 cpu_context_t *ctx;
157 el3_state_t *state;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000158
159 ctx = cm_get_context(read_mpidr(), security_state);
160 assert(ctx);
161
162 /* Populate EL3 state so that we've the right context before doing ERET */
163 state = get_el3state_ctx(ctx);
164 write_ctx_reg(state, CTX_SPSR_EL3, spsr);
165 write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
166 write_ctx_reg(state, CTX_SCR_EL3, scr);
167}
168
169/*******************************************************************************
Achin Gupta607084e2014-02-09 18:24:19 +0000170 * This function function populates ELR_EL3 member of 'cpu_context' pertaining
171 * to the given security state with the given entrypoint
172 ******************************************************************************/
173void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint)
174{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100175 cpu_context_t *ctx;
176 el3_state_t *state;
Achin Gupta607084e2014-02-09 18:24:19 +0000177
178 ctx = cm_get_context(read_mpidr(), security_state);
179 assert(ctx);
180
181 /* Populate EL3 state so that ERET jumps to the correct entry */
182 state = get_el3state_ctx(ctx);
183 write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
184}
185
186/*******************************************************************************
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000187 * This function is used to program the context that's used for exception
188 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
189 * the required security state
Achin Gupta7aea9082014-02-01 07:51:28 +0000190 ******************************************************************************/
191void cm_set_next_eret_context(uint32_t security_state)
192{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100193 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000194#if DEBUG
195 uint64_t sp_mode;
196#endif
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000197
Achin Gupta7aea9082014-02-01 07:51:28 +0000198 ctx = cm_get_context(read_mpidr(), security_state);
199 assert(ctx);
200
201#if DEBUG
202 /*
203 * Check that this function is called with SP_EL0 as the stack
204 * pointer
205 */
206 __asm__ volatile("mrs %0, SPSel\n"
207 : "=r" (sp_mode));
208
209 assert(sp_mode == MODE_SP_EL0);
210#endif
211
212 __asm__ volatile("msr spsel, #1\n"
213 "mov sp, %0\n"
214 "msr spsel, #0\n"
215 : : "r" (ctx));
216}
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000217
Soby Mathewa43d4312014-04-07 15:28:55 +0100218/************************************************************************
219 * The following function is used to populate the per cpu pointer cache.
220 * The pointer will be stored in the tpidr_el3 register.
221 *************************************************************************/
222void cm_init_pcpu_ptr_cache()
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000223{
Soby Mathewa43d4312014-04-07 15:28:55 +0100224 unsigned long mpidr = read_mpidr();
225 uint32_t linear_id = platform_get_core_pos(mpidr);
226 per_cpu_ptr_cache_t *pcpu_ptr_cache;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000227
Soby Mathewa43d4312014-04-07 15:28:55 +0100228 pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id];
229 assert(pcpu_ptr_cache);
230 pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000231
Soby Mathewa43d4312014-04-07 15:28:55 +0100232 cm_set_pcpu_ptr_cache(pcpu_ptr_cache);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000233}
Soby Mathewa43d4312014-04-07 15:28:55 +0100234
235
236void cm_set_pcpu_ptr_cache(const void *pcpu_ptr)
237{
238 write_tpidr_el3((unsigned long)pcpu_ptr);
239}
240
241void *cm_get_pcpu_ptr_cache(void)
242{
243 return (void *)read_tpidr_el3();
244}
245