blob: c78e119a6c9d38cd2fd6392d0662be06e3210ced [file] [log] [blame]
Soby Mathewb48349e2015-06-29 16:30:12 +01001/*
Soby Mathew4067dc32015-05-05 16:33:16 +01002 * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
Soby Mathewb48349e2015-06-29 16:30:12 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <bl_common.h>
35#include <bl31.h>
36#include <debug.h>
37#include <context_mgmt.h>
38#include <platform.h>
39#include <runtime_svc.h>
40#include <stddef.h>
41#include "psci_private.h"
42
Soby Mathewb48349e2015-06-29 16:30:12 +010043/*******************************************************************************
44 * This function checks whether a cpu which has been requested to be turned on
45 * is OFF to begin with.
46 ******************************************************************************/
47static int cpu_on_validate_state(unsigned int psci_state)
48{
49 if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
50 return PSCI_E_ALREADY_ON;
51
52 if (psci_state == PSCI_STATE_ON_PENDING)
53 return PSCI_E_ON_PENDING;
54
55 assert(psci_state == PSCI_STATE_OFF);
56 return PSCI_E_SUCCESS;
57}
58
59/*******************************************************************************
Soby Mathewb48349e2015-06-29 16:30:12 +010060 * Generic handler which is called to physically power on a cpu identified by
Soby Mathew6590ce22015-06-30 11:00:24 +010061 * its mpidr. It performs the generic, architectural, platform setup and state
62 * management to power on the target cpu e.g. it will ensure that
63 * enough information is stashed for it to resume execution in the non-secure
64 * security state.
Soby Mathewb48349e2015-06-29 16:30:12 +010065 *
Soby Mathew4067dc32015-05-05 16:33:16 +010066 * The state of all the relevant power domains are changed after calling the
Soby Mathew6590ce22015-06-30 11:00:24 +010067 * platform handler as it can return error.
Soby Mathewb48349e2015-06-29 16:30:12 +010068 ******************************************************************************/
Soby Mathew4067dc32015-05-05 16:33:16 +010069int psci_cpu_on_start(unsigned long target_cpu,
Soby Mathewb48349e2015-06-29 16:30:12 +010070 entry_point_info_t *ep,
Soby Mathew4067dc32015-05-05 16:33:16 +010071 int end_pwrlvl)
Soby Mathewb48349e2015-06-29 16:30:12 +010072{
73 int rc;
Soby Mathew4067dc32015-05-05 16:33:16 +010074 mpidr_pwr_map_nodes_t target_cpu_nodes;
Soby Mathew6590ce22015-06-30 11:00:24 +010075 unsigned long psci_entrypoint;
Soby Mathewb48349e2015-06-29 16:30:12 +010076
77 /*
78 * This function must only be called on platforms where the
79 * CPU_ON platform hooks have been implemented.
80 */
Soby Mathew4067dc32015-05-05 16:33:16 +010081 assert(psci_plat_pm_ops->pwr_domain_on &&
82 psci_plat_pm_ops->pwr_domain_on_finish);
Soby Mathewb48349e2015-06-29 16:30:12 +010083
84 /*
85 * Collect the pointers to the nodes in the topology tree for
Soby Mathew4067dc32015-05-05 16:33:16 +010086 * each power domain instance in the mpidr. If this function does
87 * not return successfully then either the mpidr or the power
Soby Mathewb48349e2015-06-29 16:30:12 +010088 * levels are incorrect.
89 */
Soby Mathew4067dc32015-05-05 16:33:16 +010090 rc = psci_get_pwr_map_nodes(target_cpu,
Soby Mathew6590ce22015-06-30 11:00:24 +010091 MPIDR_AFFLVL0,
Soby Mathew4067dc32015-05-05 16:33:16 +010092 end_pwrlvl,
Soby Mathewb48349e2015-06-29 16:30:12 +010093 target_cpu_nodes);
94 assert(rc == PSCI_E_SUCCESS);
95
96 /*
Soby Mathew4067dc32015-05-05 16:33:16 +010097 * This function acquires the lock corresponding to each power
Soby Mathewb48349e2015-06-29 16:30:12 +010098 * level so that by the time all locks are taken, the system topology
99 * is snapshot and state management can be done safely.
100 */
Soby Mathew4067dc32015-05-05 16:33:16 +0100101 psci_acquire_pwr_domain_locks(MPIDR_AFFLVL0,
102 end_pwrlvl,
Soby Mathewb48349e2015-06-29 16:30:12 +0100103 target_cpu_nodes);
104
105 /*
106 * Generic management: Ensure that the cpu is off to be
107 * turned on.
108 */
109 rc = cpu_on_validate_state(psci_get_state(
110 target_cpu_nodes[MPIDR_AFFLVL0]));
111 if (rc != PSCI_E_SUCCESS)
112 goto exit;
113
114 /*
115 * Call the cpu on handler registered by the Secure Payload Dispatcher
116 * to let it do any bookeeping. If the handler encounters an error, it's
117 * expected to assert within
118 */
119 if (psci_spd_pm && psci_spd_pm->svc_on)
120 psci_spd_pm->svc_on(target_cpu);
121
122 /*
123 * This function updates the state of each affinity instance
124 * corresponding to the mpidr in the range of affinity levels
125 * specified.
126 */
Soby Mathew4067dc32015-05-05 16:33:16 +0100127 psci_do_state_coordination(MPIDR_AFFLVL0,
128 end_pwrlvl,
Soby Mathew6590ce22015-06-30 11:00:24 +0100129 target_cpu_nodes,
130 PSCI_STATE_ON_PENDING);
Soby Mathewb48349e2015-06-29 16:30:12 +0100131
Soby Mathew6590ce22015-06-30 11:00:24 +0100132 /*
133 * Perform generic, architecture and platform specific handling.
134 */
135 /* Set the secure world (EL3) re-entry point after BL1 */
Soby Mathew4067dc32015-05-05 16:33:16 +0100136 psci_entrypoint = (unsigned long) psci_cpu_on_finish_entry;
Soby Mathew6590ce22015-06-30 11:00:24 +0100137
138 /*
139 * Plat. management: Give the platform the current state
140 * of the target cpu to allow it to perform the necessary
141 * steps to power on.
142 */
Soby Mathew4067dc32015-05-05 16:33:16 +0100143 rc = psci_plat_pm_ops->pwr_domain_on(target_cpu,
Soby Mathew6590ce22015-06-30 11:00:24 +0100144 psci_entrypoint,
145 MPIDR_AFFLVL0);
Soby Mathewb48349e2015-06-29 16:30:12 +0100146 assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
147
148 if (rc == PSCI_E_SUCCESS)
149 /* Store the re-entry information for the non-secure world. */
Soby Mathew12d0d002015-04-09 13:40:55 +0100150 cm_init_context_by_index(target_idx, ep);
Soby Mathewb48349e2015-06-29 16:30:12 +0100151 else
152 /* Restore the state on error. */
Soby Mathew4067dc32015-05-05 16:33:16 +0100153 psci_do_state_coordination(MPIDR_AFFLVL0,
154 end_pwrlvl,
Soby Mathewb48349e2015-06-29 16:30:12 +0100155 target_cpu_nodes,
156 PSCI_STATE_OFF);
Soby Mathew12d0d002015-04-09 13:40:55 +0100157
Soby Mathewb48349e2015-06-29 16:30:12 +0100158exit:
159 /*
Soby Mathew4067dc32015-05-05 16:33:16 +0100160 * This loop releases the lock corresponding to each power level
Soby Mathewb48349e2015-06-29 16:30:12 +0100161 * in the reverse order to which they were acquired.
162 */
Soby Mathew4067dc32015-05-05 16:33:16 +0100163 psci_release_pwr_domain_locks(MPIDR_AFFLVL0,
164 end_pwrlvl,
Soby Mathewb48349e2015-06-29 16:30:12 +0100165 target_cpu_nodes);
166
167 return rc;
168}
169
170/*******************************************************************************
Soby Mathew4067dc32015-05-05 16:33:16 +0100171 * The following function finish an earlier power on request. They
Soby Mathewb48349e2015-06-29 16:30:12 +0100172 * are called by the common finisher routine in psci_common.c.
173 ******************************************************************************/
Soby Mathew4067dc32015-05-05 16:33:16 +0100174void psci_cpu_on_finish(pwr_map_node_t *node[], int pwrlvl)
Soby Mathewb48349e2015-06-29 16:30:12 +0100175{
Soby Mathew4067dc32015-05-05 16:33:16 +0100176 assert(node[pwrlvl]->level == pwrlvl);
Soby Mathewb48349e2015-06-29 16:30:12 +0100177
178 /* Ensure we have been explicitly woken up by another cpu */
Soby Mathew6590ce22015-06-30 11:00:24 +0100179 assert(psci_get_state(node[MPIDR_AFFLVL0]) == PSCI_STATE_ON_PENDING);
Soby Mathewb48349e2015-06-29 16:30:12 +0100180
181 /*
182 * Plat. management: Perform the platform specific actions
183 * for this cpu e.g. enabling the gic or zeroing the mailbox
184 * register. The actual state of this cpu has already been
185 * changed.
186 */
Soby Mathew4067dc32015-05-05 16:33:16 +0100187 psci_plat_pm_ops->pwr_domain_on_finish(pwrlvl);
Soby Mathewb48349e2015-06-29 16:30:12 +0100188
189 /*
190 * Arch. management: Enable data cache and manage stack memory
191 */
192 psci_do_pwrup_cache_maintenance();
193
194 /*
195 * All the platform specific actions for turning this cpu
196 * on have completed. Perform enough arch.initialization
197 * to run in the non-secure address space.
198 */
199 bl31_arch_setup();
200
201 /*
202 * Call the cpu on finish handler registered by the Secure Payload
203 * Dispatcher to let it do any bookeeping. If the handler encounters an
204 * error, it's expected to assert within
205 */
206 if (psci_spd_pm && psci_spd_pm->svc_on_finish)
207 psci_spd_pm->svc_on_finish(0);
208
209 /*
210 * Generic management: Now we just need to retrieve the
211 * information that we had stashed away during the cpu_on
212 * call to set this cpu on its way.
213 */
214 cm_prepare_el3_exit(NON_SECURE);
215
216 /* Clean caches before re-entering normal world */
217 dcsw_op_louis(DCCSW);
218}
219