blob: dc12f7a3911943019186ca4b6ab8ae0614e33c60 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Achin Gupta4f6ad662013-10-25 09:08:21 +010031#include <assert.h>
Dan Handley97043ac2014-04-09 13:14:54 +010032#include <bl_common.h>
33#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010034#include <arch_helpers.h>
Dan Handley97043ac2014-04-09 13:14:54 +010035#include <context.h>
Achin Guptaef7a28c2014-02-01 08:59:56 +000036#include <context_mgmt.h>
Dan Handley5b827a82014-04-17 18:53:42 +010037#include <runtime_svc.h>
Dan Handley97043ac2014-04-09 13:14:54 +010038#include <stddef.h>
Dan Handley35e98e52014-04-09 13:13:04 +010039#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010040
Dan Handleyfb037bf2014-04-10 15:37:22 +010041typedef int (*afflvl_suspend_handler_t)(unsigned long,
42 aff_map_node_t *,
Achin Gupta4f6ad662013-10-25 09:08:21 +010043 unsigned long,
44 unsigned long,
45 unsigned int);
46
47/*******************************************************************************
Vikram Kanigiri759ec932014-04-01 19:26:26 +010048 * This function sets the power state of the current cpu while
49 * powering down during a cpu_suspend call
Achin Guptaa45e3972013-12-05 15:10:48 +000050 ******************************************************************************/
Dan Handleyfb037bf2014-04-10 15:37:22 +010051void psci_set_suspend_power_state(aff_map_node_t *node, unsigned int power_state)
Achin Guptaa45e3972013-12-05 15:10:48 +000052{
53 /*
54 * Check that nobody else is calling this function on our behalf &
55 * this information is being set only in the cpu node
56 */
57 assert(node->mpidr == (read_mpidr() & MPIDR_AFFINITY_MASK));
58 assert(node->level == MPIDR_AFFLVL0);
59
Vikram Kanigiri759ec932014-04-01 19:26:26 +010060 /* Save PSCI power state parameter for the core in suspend context */
61 psci_suspend_context[node->data].power_state = power_state;
62
Achin Guptaa45e3972013-12-05 15:10:48 +000063 /*
Vikram Kanigiri759ec932014-04-01 19:26:26 +010064 * Flush the suspend data to PoC since it will be accessed while
65 * returning back from suspend with the caches turned off
Achin Guptaa45e3972013-12-05 15:10:48 +000066 */
Vikram Kanigiri759ec932014-04-01 19:26:26 +010067 flush_dcache_range(
68 (unsigned long)&psci_suspend_context[node->data],
Dan Handleyfb037bf2014-04-10 15:37:22 +010069 sizeof(suspend_context_t));
Achin Guptaa45e3972013-12-05 15:10:48 +000070}
71
72/*******************************************************************************
Vikram Kanigiri759ec932014-04-01 19:26:26 +010073 * This function gets the affinity level till which a cpu is powered down
74 * during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
75 * power state saved for the node is invalid
Achin Guptaa45e3972013-12-05 15:10:48 +000076 ******************************************************************************/
Vikram Kanigiri759ec932014-04-01 19:26:26 +010077int psci_get_suspend_afflvl(unsigned long mpidr)
Achin Guptaa45e3972013-12-05 15:10:48 +000078{
Dan Handleyfb037bf2014-04-10 15:37:22 +010079 aff_map_node_t *node;
Vikram Kanigiri759ec932014-04-01 19:26:26 +010080
81 node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK,
82 MPIDR_AFFLVL0);
83 assert(node);
84
85 return psci_get_aff_map_node_suspend_afflvl(node);
86}
87
88
89/*******************************************************************************
90 * This function gets the affinity level till which the current cpu was powered
91 * down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
92 * power state saved for the node is invalid
93 ******************************************************************************/
Dan Handleyfb037bf2014-04-10 15:37:22 +010094int psci_get_aff_map_node_suspend_afflvl(aff_map_node_t *node)
Vikram Kanigiri759ec932014-04-01 19:26:26 +010095{
96 unsigned int power_state;
97
98 assert(node->level == MPIDR_AFFLVL0);
99
100 power_state = psci_suspend_context[node->data].power_state;
101 return ((power_state == PSCI_INVALID_DATA) ?
102 power_state : psci_get_pstate_afflvl(power_state));
103}
104
105/*******************************************************************************
106 * This function gets the state id of a cpu stored in suspend context
107 * while powering down during a cpu_suspend call. Returns 0xFFFFFFFF
108 * if the power state saved for the node is invalid
109 ******************************************************************************/
110int psci_get_suspend_stateid(unsigned long mpidr)
111{
Dan Handleyfb037bf2014-04-10 15:37:22 +0100112 aff_map_node_t *node;
Vikram Kanigiri759ec932014-04-01 19:26:26 +0100113 unsigned int power_state;
114
115 node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK,
116 MPIDR_AFFLVL0);
117 assert(node);
118 assert(node->level == MPIDR_AFFLVL0);
119
120 power_state = psci_suspend_context[node->data].power_state;
121 return ((power_state == PSCI_INVALID_DATA) ?
122 power_state : psci_get_pstate_id(power_state));
Achin Guptaa45e3972013-12-05 15:10:48 +0000123}
124
125/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100126 * The next three functions implement a handler for each supported affinity
127 * level which is called when that affinity level is about to be suspended.
128 ******************************************************************************/
129static int psci_afflvl0_suspend(unsigned long mpidr,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100130 aff_map_node_t *cpu_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100131 unsigned long ns_entrypoint,
132 unsigned long context_id,
133 unsigned int power_state)
134{
135 unsigned int index, plat_state;
Vikram Kanigiri6ba0b6d2014-03-11 17:41:00 +0000136 unsigned long psci_entrypoint, sctlr;
Dan Handleyfb037bf2014-04-10 15:37:22 +0100137 el3_state_t *saved_el3_state;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100138 int rc = PSCI_E_SUCCESS;
139
140 /* Sanity check to safeguard against data corruption */
141 assert(cpu_node->level == MPIDR_AFFLVL0);
142
Vikram Kanigiri759ec932014-04-01 19:26:26 +0100143 /* Save PSCI power state parameter for the core in suspend context */
144 psci_set_suspend_power_state(cpu_node, power_state);
145
Achin Gupta607084e2014-02-09 18:24:19 +0000146 /*
147 * Generic management: Store the re-entry information for the non-secure
148 * world and allow the secure world to suspend itself
149 */
150
151 /*
152 * Call the cpu suspend handler registered by the Secure Payload
153 * Dispatcher to let it do any bookeeping. If the handler encounters an
154 * error, it's expected to assert within
155 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000156 if (psci_spd_pm && psci_spd_pm->svc_suspend)
157 psci_spd_pm->svc_suspend(power_state);
Achin Gupta607084e2014-02-09 18:24:19 +0000158
Achin Gupta75f73672013-12-05 16:33:10 +0000159 /* State management: mark this cpu as suspended */
160 psci_set_state(cpu_node, PSCI_STATE_SUSPEND);
161
Achin Gupta4f6ad662013-10-25 09:08:21 +0100162 /*
163 * Generic management: Store the re-entry information for the
164 * non-secure world
165 */
166 index = cpu_node->data;
167 rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
168 if (rc != PSCI_E_SUCCESS)
169 return rc;
170
171 /*
Achin Guptaef7a28c2014-02-01 08:59:56 +0000172 * Arch. management: Save the EL3 state in the 'cpu_context'
173 * structure that has been allocated for this cpu, flush the
Achin Gupta4f6ad662013-10-25 09:08:21 +0100174 * L1 caches and exit intra-cluster coherency et al
175 */
Achin Guptaef7a28c2014-02-01 08:59:56 +0000176 cm_el3_sysregs_context_save(NON_SECURE);
177 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100178
Achin Gupta0a9f7472014-02-09 17:48:12 +0000179 /*
180 * The EL3 state to PoC since it will be accessed after a
181 * reset with the caches turned off
182 */
183 saved_el3_state = get_el3state_ctx(cm_get_context(mpidr, NON_SECURE));
184 flush_dcache_range((uint64_t) saved_el3_state, sizeof(*saved_el3_state));
185
Achin Gupta4f6ad662013-10-25 09:08:21 +0100186 /* Set the secure world (EL3) re-entry point after BL1 */
187 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
188
189 /*
190 * Arch. management. Perform the necessary steps to flush all
191 * cpu caches.
192 *
193 * TODO: This power down sequence varies across cpus so it needs to be
194 * abstracted out on the basis of the MIDR like in cpu_reset_handler().
195 * Do the bare minimal for the time being. Fix this before porting to
196 * Cortex models.
197 */
Vikram Kanigiri6ba0b6d2014-03-11 17:41:00 +0000198 sctlr = read_sctlr_el3();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100199 sctlr &= ~SCTLR_C_BIT;
Vikram Kanigiri6ba0b6d2014-03-11 17:41:00 +0000200 write_sctlr_el3(sctlr);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100201
202 /*
203 * CAUTION: This flush to the level of unification makes an assumption
204 * about the cache hierarchy at affinity level 0 (cpu) in the platform.
205 * Ideally the platform should tell psci which levels to flush to exit
206 * coherency.
207 */
208 dcsw_op_louis(DCCISW);
209
210 /*
211 * Plat. management: Allow the platform to perform the
212 * necessary actions to turn off this cpu e.g. set the
213 * platform defined mailbox with the psci entrypoint,
214 * program the power controller etc.
215 */
216 if (psci_plat_pm_ops->affinst_suspend) {
Achin Gupta75f73672013-12-05 16:33:10 +0000217 plat_state = psci_get_phys_state(cpu_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100218 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
219 psci_entrypoint,
220 ns_entrypoint,
221 cpu_node->level,
222 plat_state);
223 }
224
225 return rc;
226}
227
228static int psci_afflvl1_suspend(unsigned long mpidr,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100229 aff_map_node_t *cluster_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100230 unsigned long ns_entrypoint,
231 unsigned long context_id,
232 unsigned int power_state)
233{
234 int rc = PSCI_E_SUCCESS;
235 unsigned int plat_state;
236 unsigned long psci_entrypoint;
237
238 /* Sanity check the cluster level */
239 assert(cluster_node->level == MPIDR_AFFLVL1);
240
Achin Gupta75f73672013-12-05 16:33:10 +0000241 /* State management: Decrement the cluster reference count */
242 psci_set_state(cluster_node, PSCI_STATE_SUSPEND);
243
Achin Gupta4f6ad662013-10-25 09:08:21 +0100244 /*
245 * Keep the physical state of this cluster handy to decide
246 * what action needs to be taken
247 */
Achin Gupta75f73672013-12-05 16:33:10 +0000248 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100249
250 /*
251 * Arch. management: Flush all levels of caches to PoC if the
252 * cluster is to be shutdown
253 */
254 if (plat_state == PSCI_STATE_OFF)
255 dcsw_op_all(DCCISW);
256
257 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000258 * Plat. Management. Allow the platform to do its cluster
Achin Gupta4f6ad662013-10-25 09:08:21 +0100259 * specific bookeeping e.g. turn off interconnect coherency,
260 * program the power controller etc.
261 */
262 if (psci_plat_pm_ops->affinst_suspend) {
263
264 /*
265 * Sending the psci entrypoint is currently redundant
266 * beyond affinity level 0 but one never knows what a
267 * platform might do. Also it allows us to keep the
268 * platform handler prototype the same.
269 */
270 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100271 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
272 psci_entrypoint,
273 ns_entrypoint,
274 cluster_node->level,
275 plat_state);
276 }
277
278 return rc;
279}
280
281
282static int psci_afflvl2_suspend(unsigned long mpidr,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100283 aff_map_node_t *system_node,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100284 unsigned long ns_entrypoint,
285 unsigned long context_id,
286 unsigned int power_state)
287{
288 int rc = PSCI_E_SUCCESS;
289 unsigned int plat_state;
290 unsigned long psci_entrypoint;
291
292 /* Cannot go beyond this */
293 assert(system_node->level == MPIDR_AFFLVL2);
294
Achin Gupta75f73672013-12-05 16:33:10 +0000295 /* State management: Decrement the system reference count */
296 psci_set_state(system_node, PSCI_STATE_SUSPEND);
297
Achin Gupta4f6ad662013-10-25 09:08:21 +0100298 /*
299 * Keep the physical state of the system handy to decide what
300 * action needs to be taken
301 */
Achin Gupta75f73672013-12-05 16:33:10 +0000302 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100303
304 /*
Achin Gupta3140a9e2013-12-02 16:23:12 +0000305 * Plat. Management : Allow the platform to do its bookeeping
Achin Gupta4f6ad662013-10-25 09:08:21 +0100306 * at this affinity level
307 */
308 if (psci_plat_pm_ops->affinst_suspend) {
309
310 /*
311 * Sending the psci entrypoint is currently redundant
312 * beyond affinity level 0 but one never knows what a
313 * platform might do. Also it allows us to keep the
314 * platform handler prototype the same.
315 */
316 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100317 rc = psci_plat_pm_ops->affinst_suspend(mpidr,
318 psci_entrypoint,
319 ns_entrypoint,
320 system_node->level,
321 plat_state);
322 }
323
324 return rc;
325}
326
Dan Handleyfb037bf2014-04-10 15:37:22 +0100327static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100328 psci_afflvl0_suspend,
329 psci_afflvl1_suspend,
330 psci_afflvl2_suspend,
331};
332
333/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000334 * This function takes an array of pointers to affinity instance nodes in the
335 * topology tree and calls the suspend handler for the corresponding affinity
336 * levels
337 ******************************************************************************/
Dan Handleyfb037bf2014-04-10 15:37:22 +0100338static int psci_call_suspend_handlers(mpidr_aff_map_nodes_t mpidr_nodes,
Achin Gupta0959db52013-12-02 17:33:04 +0000339 int start_afflvl,
340 int end_afflvl,
341 unsigned long mpidr,
342 unsigned long entrypoint,
343 unsigned long context_id,
344 unsigned int power_state)
345{
346 int rc = PSCI_E_INVALID_PARAMS, level;
Dan Handleyfb037bf2014-04-10 15:37:22 +0100347 aff_map_node_t *node;
Achin Gupta0959db52013-12-02 17:33:04 +0000348
349 for (level = start_afflvl; level <= end_afflvl; level++) {
350 node = mpidr_nodes[level];
351 if (node == NULL)
352 continue;
353
354 /*
355 * TODO: In case of an error should there be a way
356 * of restoring what we might have torn down at
357 * lower affinity levels.
358 */
359 rc = psci_afflvl_suspend_handlers[level](mpidr,
360 node,
361 entrypoint,
362 context_id,
363 power_state);
364 if (rc != PSCI_E_SUCCESS)
365 break;
366 }
367
368 return rc;
369}
370
371/*******************************************************************************
372 * Top level handler which is called when a cpu wants to suspend its execution.
373 * It is assumed that along with turning the cpu off, higher affinity levels
374 * until the target affinity level will be turned off as well. It traverses
375 * through all the affinity levels performing generic, architectural, platform
376 * setup and state management e.g. for a cluster that's to be suspended, it will
377 * call the platform specific code which will disable coherency at the
378 * interconnect level if the cpu is the last in the cluster. For a cpu it could
379 * mean programming the power controller etc.
380 *
381 * The state of all the relevant affinity levels is changed prior to calling the
382 * affinity level specific handlers as their actions would depend upon the state
383 * the affinity level is about to enter.
384 *
385 * The affinity level specific handlers are called in ascending order i.e. from
386 * the lowest to the highest affinity level implemented by the platform because
387 * to turn off affinity level X it is neccesary to turn off affinity level X - 1
388 * first.
389 *
390 * CAUTION: This function is called with coherent stacks so that coherency can
391 * be turned off and caches can be flushed safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100392 ******************************************************************************/
393int psci_afflvl_suspend(unsigned long mpidr,
394 unsigned long entrypoint,
395 unsigned long context_id,
396 unsigned int power_state,
Achin Gupta0959db52013-12-02 17:33:04 +0000397 int start_afflvl,
398 int end_afflvl)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100399{
Achin Gupta0959db52013-12-02 17:33:04 +0000400 int rc = PSCI_E_SUCCESS;
Dan Handleyfb037bf2014-04-10 15:37:22 +0100401 mpidr_aff_map_nodes_t mpidr_nodes;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100402
403 mpidr &= MPIDR_AFFINITY_MASK;
404
405 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000406 * Collect the pointers to the nodes in the topology tree for
407 * each affinity instance in the mpidr. If this function does
408 * not return successfully then either the mpidr or the affinity
409 * levels are incorrect.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100410 */
Achin Gupta0959db52013-12-02 17:33:04 +0000411 rc = psci_get_aff_map_nodes(mpidr,
412 start_afflvl,
413 end_afflvl,
414 mpidr_nodes);
415 if (rc != PSCI_E_SUCCESS)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100416 return rc;
417
Achin Gupta0959db52013-12-02 17:33:04 +0000418 /*
419 * This function acquires the lock corresponding to each affinity
420 * level so that by the time all locks are taken, the system topology
421 * is snapshot and state management can be done safely.
422 */
423 psci_acquire_afflvl_locks(mpidr,
424 start_afflvl,
425 end_afflvl,
426 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100427
Achin Gupta0959db52013-12-02 17:33:04 +0000428 /* Perform generic, architecture and platform specific handling */
429 rc = psci_call_suspend_handlers(mpidr_nodes,
430 start_afflvl,
431 end_afflvl,
432 mpidr,
433 entrypoint,
434 context_id,
435 power_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100436
437 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000438 * Release the locks corresponding to each affinity level in the
439 * reverse order to which they were acquired.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100440 */
Achin Gupta0959db52013-12-02 17:33:04 +0000441 psci_release_afflvl_locks(mpidr,
442 start_afflvl,
443 end_afflvl,
444 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100445
Achin Gupta4f6ad662013-10-25 09:08:21 +0100446 return rc;
447}
448
449/*******************************************************************************
450 * The following functions finish an earlier affinity suspend request. They
451 * are called by the common finisher routine in psci_common.c.
452 ******************************************************************************/
453static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100454 aff_map_node_t *cpu_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100455{
Achin Gupta0959db52013-12-02 17:33:04 +0000456 unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS;
Achin Gupta607084e2014-02-09 18:24:19 +0000457 int32_t suspend_level;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100458
459 assert(cpu_node->level == MPIDR_AFFLVL0);
460
Achin Gupta0959db52013-12-02 17:33:04 +0000461 /* Ensure we have been woken up from a suspended state */
Achin Gupta75f73672013-12-05 16:33:10 +0000462 state = psci_get_state(cpu_node);
Achin Gupta0959db52013-12-02 17:33:04 +0000463 assert(state == PSCI_STATE_SUSPEND);
464
Achin Gupta4f6ad662013-10-25 09:08:21 +0100465 /*
466 * Plat. management: Perform the platform specific actions
467 * before we change the state of the cpu e.g. enabling the
468 * gic or zeroing the mailbox register. If anything goes
469 * wrong then assert as there is no way to recover from this
470 * situation.
471 */
472 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000473
474 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000475 plat_state = get_phys_state(state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100476 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
477 cpu_node->level,
478 plat_state);
479 assert(rc == PSCI_E_SUCCESS);
480 }
481
482 /* Get the index for restoring the re-entry information */
483 index = cpu_node->data;
484
485 /*
Achin Guptaef7a28c2014-02-01 08:59:56 +0000486 * Arch. management: Restore the stashed EL3 architectural
487 * context from the 'cpu_context' structure for this cpu.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100488 */
Achin Guptaef7a28c2014-02-01 08:59:56 +0000489 cm_el3_sysregs_context_restore(NON_SECURE);
490 rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100491
492 /*
Achin Gupta607084e2014-02-09 18:24:19 +0000493 * Use the more complex exception vectors to enable SPD
494 * initialisation. SP_EL3 should point to a 'cpu_context'
495 * structure which has an exception stack allocated. The
496 * non-secure context should have been set on this cpu
497 * prior to suspension.
498 */
499 assert(cm_get_context(mpidr, NON_SECURE));
500 cm_set_next_eret_context(NON_SECURE);
501 write_vbar_el3((uint64_t) runtime_exceptions);
502
503 /*
504 * Call the cpu suspend finish handler registered by the Secure Payload
505 * Dispatcher to let it do any bookeeping. If the handler encounters an
506 * error, it's expected to assert within
507 */
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000508 if (psci_spd_pm && psci_spd_pm->svc_suspend) {
Vikram Kanigiri759ec932014-04-01 19:26:26 +0100509 suspend_level = psci_get_aff_map_node_suspend_afflvl(cpu_node);
510 assert (suspend_level != PSCI_INVALID_DATA);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000511 psci_spd_pm->svc_suspend_finish(suspend_level);
Achin Gupta607084e2014-02-09 18:24:19 +0000512 }
513
Vikram Kanigiri759ec932014-04-01 19:26:26 +0100514 /* Invalidate the suspend context for the node */
515 psci_set_suspend_power_state(cpu_node, PSCI_INVALID_DATA);
516
Achin Gupta607084e2014-02-09 18:24:19 +0000517 /*
Achin Gupta4f6ad662013-10-25 09:08:21 +0100518 * Generic management: Now we just need to retrieve the
519 * information that we had stashed away during the suspend
Achin Gupta3140a9e2013-12-02 16:23:12 +0000520 * call to set this cpu on its way.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100521 */
Achin Guptac8afc782013-11-25 18:45:02 +0000522 psci_get_ns_entry_info(index);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100523
Achin Gupta75f73672013-12-05 16:33:10 +0000524 /* State management: mark this cpu as on */
525 psci_set_state(cpu_node, PSCI_STATE_ON);
526
Achin Gupta4f6ad662013-10-25 09:08:21 +0100527 /* Clean caches before re-entering normal world */
528 dcsw_op_louis(DCCSW);
529
530 return rc;
531}
532
533static unsigned int psci_afflvl1_suspend_finish(unsigned long mpidr,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100534 aff_map_node_t *cluster_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100535{
Achin Gupta0959db52013-12-02 17:33:04 +0000536 unsigned int plat_state, rc = PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100537
538 assert(cluster_node->level == MPIDR_AFFLVL1);
539
540 /*
541 * Plat. management: Perform the platform specific actions
542 * as per the old state of the cluster e.g. enabling
543 * coherency at the interconnect depends upon the state with
544 * which this cluster was powered up. If anything goes wrong
545 * then assert as there is no way to recover from this
546 * situation.
547 */
548 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000549
550 /* Get the physical state of this cpu */
Achin Gupta75f73672013-12-05 16:33:10 +0000551 plat_state = psci_get_phys_state(cluster_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100552 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
553 cluster_node->level,
554 plat_state);
555 assert(rc == PSCI_E_SUCCESS);
556 }
557
Achin Gupta75f73672013-12-05 16:33:10 +0000558 /* State management: Increment the cluster reference count */
559 psci_set_state(cluster_node, PSCI_STATE_ON);
560
Achin Gupta4f6ad662013-10-25 09:08:21 +0100561 return rc;
562}
563
564
565static unsigned int psci_afflvl2_suspend_finish(unsigned long mpidr,
Dan Handleyfb037bf2014-04-10 15:37:22 +0100566 aff_map_node_t *system_node)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100567{
Achin Gupta0959db52013-12-02 17:33:04 +0000568 unsigned int plat_state, rc = PSCI_E_SUCCESS;;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100569
570 /* Cannot go beyond this affinity level */
571 assert(system_node->level == MPIDR_AFFLVL2);
572
573 /*
574 * Currently, there are no architectural actions to perform
575 * at the system level.
576 */
577
578 /*
579 * Plat. management: Perform the platform specific actions
580 * as per the old state of the cluster e.g. enabling
581 * coherency at the interconnect depends upon the state with
582 * which this cluster was powered up. If anything goes wrong
583 * then assert as there is no way to recover from this
584 * situation.
585 */
586 if (psci_plat_pm_ops->affinst_suspend_finish) {
Achin Gupta0959db52013-12-02 17:33:04 +0000587
588 /* Get the physical state of the system */
Achin Gupta75f73672013-12-05 16:33:10 +0000589 plat_state = psci_get_phys_state(system_node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100590 rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
591 system_node->level,
592 plat_state);
593 assert(rc == PSCI_E_SUCCESS);
594 }
595
Achin Gupta75f73672013-12-05 16:33:10 +0000596 /* State management: Increment the system reference count */
597 psci_set_state(system_node, PSCI_STATE_ON);
598
Achin Gupta4f6ad662013-10-25 09:08:21 +0100599 return rc;
600}
601
Dan Handleyfb037bf2014-04-10 15:37:22 +0100602const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100603 psci_afflvl0_suspend_finish,
604 psci_afflvl1_suspend_finish,
605 psci_afflvl2_suspend_finish,
606};
607