blob: d91db961295cde5a5a85447cdc99ad0529ad60d8 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley97043ac2014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <arch_helpers.h>
Dan Handley97043ac2014-04-09 13:14:54 +010033#include <assert.h>
34#include <bl_common.h>
35#include <bl31.h>
Achin Gupta0a9f7472014-02-09 17:48:12 +000036#include <context_mgmt.h>
Dan Handley5f0cdb02014-05-14 17:44:19 +010037#include <platform.h>
Dan Handley5b827a82014-04-17 18:53:42 +010038#include <runtime_svc.h>
Dan Handley97043ac2014-04-09 13:14:54 +010039#include <stddef.h>
Dan Handley35e98e52014-04-09 13:13:04 +010040#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010041
Dan Handleyfb037bf2014-04-10 15:37:22 +010042typedef int (*afflvl_on_handler_t)(unsigned long,
43 aff_map_node_t *,
Achin Gupta4f6ad662013-10-25 09:08:21 +010044 unsigned long,
45 unsigned long);
46
47/*******************************************************************************
48 * This function checks whether a cpu which has been requested to be turned on
49 * is OFF to begin with.
50 ******************************************************************************/
Dan Handleyfb037bf2014-04-10 15:37:22 +010051static int cpu_on_validate_state(aff_map_node_t *node)
Achin Gupta4f6ad662013-10-25 09:08:21 +010052{
53 unsigned int psci_state;
54
55 /* Get the raw psci state */
Achin Gupta75f73672013-12-05 16:33:10 +000056 psci_state = psci_get_state(node);
Achin Gupta4f6ad662013-10-25 09:08:21 +010057
58 if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
59 return PSCI_E_ALREADY_ON;
60
61 if (psci_state == PSCI_STATE_ON_PENDING)
62 return PSCI_E_ON_PENDING;
63
64 assert(psci_state == PSCI_STATE_OFF);
65 return PSCI_E_SUCCESS;
66}
67
68/*******************************************************************************
69 * Handler routine to turn a cpu on. It takes care of any generic, architectural
70 * or platform specific setup required.
71 * TODO: Split this code across separate handlers for each type of setup?
72 ******************************************************************************/
73static int psci_afflvl0_on(unsigned long target_cpu,
Dan Handleyfb037bf2014-04-10 15:37:22 +010074 aff_map_node_t *cpu_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +010075 unsigned long ns_entrypoint,
76 unsigned long context_id)
77{
Andrew Thoelke167a9352014-06-04 21:10:52 +010078 unsigned int plat_state;
Achin Gupta4f6ad662013-10-25 09:08:21 +010079 unsigned long psci_entrypoint;
Andrew Thoelke167a9352014-06-04 21:10:52 +010080 uint32_t ns_scr_el3 = read_scr_el3();
81 uint32_t ns_sctlr_el1 = read_sctlr_el1();
Achin Gupta4f6ad662013-10-25 09:08:21 +010082 int rc;
83
84 /* Sanity check to safeguard against data corruption */
85 assert(cpu_node->level == MPIDR_AFFLVL0);
86
87 /*
88 * Generic management: Ensure that the cpu is off to be
89 * turned on
90 */
Achin Gupta75f73672013-12-05 16:33:10 +000091 rc = cpu_on_validate_state(cpu_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +010092 if (rc != PSCI_E_SUCCESS)
93 return rc;
94
95 /*
Achin Gupta607084e2014-02-09 18:24:19 +000096 * Call the cpu on handler registered by the Secure Payload Dispatcher
97 * to let it do any bookeeping. If the handler encounters an error, it's
98 * expected to assert within
99 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000100 if (psci_spd_pm && psci_spd_pm->svc_on)
101 psci_spd_pm->svc_on(target_cpu);
Achin Gupta607084e2014-02-09 18:24:19 +0000102
103 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100104 * Arch. management: Derive the re-entry information for
105 * the non-secure world from the non-secure state from
106 * where this call originated.
107 */
Andrew Thoelke167a9352014-06-04 21:10:52 +0100108 rc = psci_save_ns_entry(target_cpu, ns_entrypoint, context_id,
109 ns_scr_el3, ns_sctlr_el1);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110 if (rc != PSCI_E_SUCCESS)
111 return rc;
112
113 /* Set the secure world (EL3) re-entry point after BL1 */
114 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
115
Achin Gupta75f73672013-12-05 16:33:10 +0000116 /* State management: Set this cpu's state as ON PENDING */
117 psci_set_state(cpu_node, PSCI_STATE_ON_PENDING);
118
Achin Gupta4f6ad662013-10-25 09:08:21 +0100119 /*
120 * Plat. management: Give the platform the current state
121 * of the target cpu to allow it to perform the necessary
122 * steps to power on.
123 */
124 if (psci_plat_pm_ops->affinst_on) {
125
126 /* Get the current physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000127 plat_state = psci_get_phys_state(cpu_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100128 rc = psci_plat_pm_ops->affinst_on(target_cpu,
129 psci_entrypoint,
130 ns_entrypoint,
131 cpu_node->level,
132 plat_state);
133 }
134
135 return rc;
136}
137
138/*******************************************************************************
139 * Handler routine to turn a cluster on. It takes care or any generic, arch.
140 * or platform specific setup required.
141 * TODO: Split this code across separate handlers for each type of setup?
142 ******************************************************************************/
143static int psci_afflvl1_on(unsigned long target_cpu,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100144 aff_map_node_t *cluster_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100145 unsigned long ns_entrypoint,
146 unsigned long context_id)
147{
148 int rc = PSCI_E_SUCCESS;
149 unsigned int plat_state;
150 unsigned long psci_entrypoint;
151
152 assert(cluster_node->level == MPIDR_AFFLVL1);
153
154 /*
155 * There is no generic and arch. specific cluster
156 * management required
157 */
158
Achin Gupta75f73672013-12-05 16:33:10 +0000159 /* State management: Is not required while turning a cluster on */
160
Achin Gupta4f6ad662013-10-25 09:08:21 +0100161 /*
162 * Plat. management: Give the platform the current state
163 * of the target cpu to allow it to perform the necessary
164 * steps to power on.
165 */
166 if (psci_plat_pm_ops->affinst_on) {
Achin Gupta75f73672013-12-05 16:33:10 +0000167 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100168 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
169 rc = psci_plat_pm_ops->affinst_on(target_cpu,
170 psci_entrypoint,
171 ns_entrypoint,
172 cluster_node->level,
173 plat_state);
174 }
175
176 return rc;
177}
178
179/*******************************************************************************
180 * Handler routine to turn a cluster of clusters on. It takes care or any
181 * generic, arch. or platform specific setup required.
182 * TODO: Split this code across separate handlers for each type of setup?
183 ******************************************************************************/
184static int psci_afflvl2_on(unsigned long target_cpu,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100185 aff_map_node_t *system_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100186 unsigned long ns_entrypoint,
187 unsigned long context_id)
188{
189 int rc = PSCI_E_SUCCESS;
190 unsigned int plat_state;
191 unsigned long psci_entrypoint;
192
193 /* Cannot go beyond affinity level 2 in this psci imp. */
194 assert(system_node->level == MPIDR_AFFLVL2);
195
196 /*
197 * There is no generic and arch. specific system management
198 * required
199 */
200
Achin Gupta75f73672013-12-05 16:33:10 +0000201 /* State management: Is not required while turning a system on */
202
Achin Gupta4f6ad662013-10-25 09:08:21 +0100203 /*
204 * Plat. management: Give the platform the current state
205 * of the target cpu to allow it to perform the necessary
206 * steps to power on.
207 */
208 if (psci_plat_pm_ops->affinst_on) {
Achin Gupta75f73672013-12-05 16:33:10 +0000209 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100210 psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
211 rc = psci_plat_pm_ops->affinst_on(target_cpu,
212 psci_entrypoint,
213 ns_entrypoint,
214 system_node->level,
215 plat_state);
216 }
217
218 return rc;
219}
220
221/* Private data structure to make this handlers accessible through indexing */
Dan Handleyfb037bf2014-04-10 15:37:22 +0100222static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100223 psci_afflvl0_on,
224 psci_afflvl1_on,
225 psci_afflvl2_on,
226};
227
228/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000229 * This function takes an array of pointers to affinity instance nodes in the
230 * topology tree and calls the on handler for the corresponding affinity
231 * levels
232 ******************************************************************************/
Dan Handleyfb037bf2014-04-10 15:37:22 +0100233static int psci_call_on_handlers(mpidr_aff_map_nodes_t target_cpu_nodes,
Achin Gupta0959db52013-12-02 17:33:04 +0000234 int start_afflvl,
235 int end_afflvl,
236 unsigned long target_cpu,
237 unsigned long entrypoint,
238 unsigned long context_id)
239{
240 int rc = PSCI_E_INVALID_PARAMS, level;
Dan Handleyfb037bf2014-04-10 15:37:22 +0100241 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000242
243 for (level = end_afflvl; level >= start_afflvl; level--) {
244 node = target_cpu_nodes[level];
245 if (node == NULL)
246 continue;
247
248 /*
249 * TODO: In case of an error should there be a way
250 * of undoing what we might have setup at higher
251 * affinity levels.
252 */
253 rc = psci_afflvl_on_handlers[level](target_cpu,
254 node,
255 entrypoint,
256 context_id);
257 if (rc != PSCI_E_SUCCESS)
258 break;
259 }
260
261 return rc;
262}
263
264/*******************************************************************************
265 * Generic handler which is called to physically power on a cpu identified by
266 * its mpidr. It traverses through all the affinity levels performing generic,
267 * architectural, platform setup and state management e.g. for a cpu that is
268 * to be powered on, it will ensure that enough information is stashed for it
269 * to resume execution in the non-secure security state.
270 *
271 * The state of all the relevant affinity levels is changed after calling the
272 * affinity level specific handlers as their actions would depend upon the state
273 * the affinity level is currently in.
274 *
275 * The affinity level specific handlers are called in descending order i.e. from
276 * the highest to the lowest affinity level implemented by the platform because
277 * to turn on affinity level X it is neccesary to turn on affinity level X + 1
278 * first.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100279 ******************************************************************************/
280int psci_afflvl_on(unsigned long target_cpu,
281 unsigned long entrypoint,
282 unsigned long context_id,
Achin Gupta0959db52013-12-02 17:33:04 +0000283 int start_afflvl,
284 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100285{
Achin Gupta0959db52013-12-02 17:33:04 +0000286 int rc = PSCI_E_SUCCESS;
Dan Handleyfb037bf2014-04-10 15:37:22 +0100287 mpidr_aff_map_nodes_t target_cpu_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100288 unsigned long mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
289
290 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000291 * Collect the pointers to the nodes in the topology tree for
292 * each affinity instance in the mpidr. If this function does
293 * not return successfully then either the mpidr or the affinity
294 * levels are incorrect.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100295 */
Achin Gupta0959db52013-12-02 17:33:04 +0000296 rc = psci_get_aff_map_nodes(target_cpu,
297 start_afflvl,
298 end_afflvl,
299 target_cpu_nodes);
300 if (rc != PSCI_E_SUCCESS)
301 return rc;
302
Achin Gupta4f6ad662013-10-25 09:08:21 +0100303
304 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000305 * This function acquires the lock corresponding to each affinity
306 * level so that by the time all locks are taken, the system topology
307 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100308 */
Achin Gupta0959db52013-12-02 17:33:04 +0000309 psci_acquire_afflvl_locks(mpidr,
310 start_afflvl,
311 end_afflvl,
312 target_cpu_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100313
Achin Gupta0959db52013-12-02 17:33:04 +0000314 /* Perform generic, architecture and platform specific handling. */
315 rc = psci_call_on_handlers(target_cpu_nodes,
316 start_afflvl,
317 end_afflvl,
318 target_cpu,
319 entrypoint,
320 context_id);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100321
322 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100323 * This loop releases the lock corresponding to each affinity level
Achin Gupta0959db52013-12-02 17:33:04 +0000324 * in the reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100325 */
Achin Gupta0959db52013-12-02 17:33:04 +0000326 psci_release_afflvl_locks(mpidr,
327 start_afflvl,
328 end_afflvl,
329 target_cpu_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100330
331 return rc;
332}
333
334/*******************************************************************************
335 * The following functions finish an earlier affinity power on request. They
336 * are called by the common finisher routine in psci_common.c.
337 ******************************************************************************/
338static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100339 aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100340{
Andrew Thoelke167a9352014-06-04 21:10:52 +0100341 unsigned int plat_state, state, rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100342
343 assert(cpu_node->level == MPIDR_AFFLVL0);
344
Achin Gupta0959db52013-12-02 17:33:04 +0000345 /* Ensure we have been explicitly woken up by another cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000346 state = psci_get_state(cpu_node);
Achin Gupta0959db52013-12-02 17:33:04 +0000347 assert(state == PSCI_STATE_ON_PENDING);
348
Achin Gupta4f6ad662013-10-25 09:08:21 +0100349 /*
350 * Plat. management: Perform the platform specific actions
351 * for this cpu e.g. enabling the gic or zeroing the mailbox
352 * register. The actual state of this cpu has already been
353 * changed.
354 */
355 if (psci_plat_pm_ops->affinst_on_finish) {
356
Achin Gupta0959db52013-12-02 17:33:04 +0000357 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000358 plat_state = get_phys_state(state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100359 rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
360 cpu_node->level,
361 plat_state);
362 assert(rc == PSCI_E_SUCCESS);
363 }
364
365 /*
366 * Arch. management: Turn on mmu & restore architectural state
367 */
Dan Handleydff8e472014-05-16 14:08:45 +0100368 bl31_plat_enable_mmu();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100369
370 /*
371 * All the platform specific actions for turning this cpu
372 * on have completed. Perform enough arch.initialization
373 * to run in the non-secure address space.
374 */
375 bl31_arch_setup();
376
377 /*
Achin Gupta607084e2014-02-09 18:24:19 +0000378 * Call the cpu on finish handler registered by the Secure Payload
379 * Dispatcher to let it do any bookeeping. If the handler encounters an
380 * error, it's expected to assert within
381 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000382 if (psci_spd_pm && psci_spd_pm->svc_on_finish)
383 psci_spd_pm->svc_on_finish(0);
Achin Gupta607084e2014-02-09 18:24:19 +0000384
385 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100386 * Generic management: Now we just need to retrieve the
387 * information that we had stashed away during the cpu_on
Andrew Thoelke167a9352014-06-04 21:10:52 +0100388 * call to set this cpu on its way.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100389 */
Andrew Thoelke167a9352014-06-04 21:10:52 +0100390 cm_prepare_el3_exit(NON_SECURE);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100391
Achin Gupta75f73672013-12-05 16:33:10 +0000392 /* State management: mark this cpu as on */
393 psci_set_state(cpu_node, PSCI_STATE_ON);
394
Achin Gupta4f6ad662013-10-25 09:08:21 +0100395 /* Clean caches before re-entering normal world */
396 dcsw_op_louis(DCCSW);
397
Andrew Thoelke167a9352014-06-04 21:10:52 +0100398 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100399 return rc;
400}
401
402static unsigned int psci_afflvl1_on_finish(unsigned long mpidr,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100403 aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100404{
Achin Gupta0959db52013-12-02 17:33:04 +0000405 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100406
407 assert(cluster_node->level == MPIDR_AFFLVL1);
408
409 /*
410 * Plat. management: Perform the platform specific actions
411 * as per the old state of the cluster e.g. enabling
412 * coherency at the interconnect depends upon the state with
413 * which this cluster was powered up. If anything goes wrong
414 * then assert as there is no way to recover from this
415 * situation.
416 */
417 if (psci_plat_pm_ops->affinst_on_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000418
419 /* Get the physical state of this cluster */
Achin Gupta75f73672013-12-05 16:33:10 +0000420 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100421 rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
422 cluster_node->level,
423 plat_state);
424 assert(rc == PSCI_E_SUCCESS);
425 }
426
Achin Gupta75f73672013-12-05 16:33:10 +0000427 /* State management: Increment the cluster reference count */
428 psci_set_state(cluster_node, PSCI_STATE_ON);
429
Achin Gupta4f6ad662013-10-25 09:08:21 +0100430 return rc;
431}
432
433
434static unsigned int psci_afflvl2_on_finish(unsigned long mpidr,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100435 aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100436{
Achin Gupta0959db52013-12-02 17:33:04 +0000437 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100438
439 /* Cannot go beyond this affinity level */
440 assert(system_node->level == MPIDR_AFFLVL2);
441
442 /*
443 * Currently, there are no architectural actions to perform
444 * at the system level.
445 */
446
447 /*
448 * Plat. management: Perform the platform specific actions
449 * as per the old state of the cluster e.g. enabling
450 * coherency at the interconnect depends upon the state with
451 * which this cluster was powered up. If anything goes wrong
452 * then assert as there is no way to recover from this
453 * situation.
454 */
455 if (psci_plat_pm_ops->affinst_on_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000456
457 /* Get the physical state of the system */
Achin Gupta75f73672013-12-05 16:33:10 +0000458 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100459 rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
460 system_node->level,
461 plat_state);
462 assert(rc == PSCI_E_SUCCESS);
463 }
464
Achin Gupta75f73672013-12-05 16:33:10 +0000465 /* State management: Increment the system reference count */
466 psci_set_state(system_node, PSCI_STATE_ON);
467
Achin Gupta4f6ad662013-10-25 09:08:21 +0100468 return rc;
469}
470
Dan Handleyfb037bf2014-04-10 15:37:22 +0100471const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100472 psci_afflvl0_on_finish,
473 psci_afflvl1_on_finish,
474 psci_afflvl2_on_finish,
475};
476