blob: c37658bb63cb7e851282c0b53679f5093a71aee1 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleyab2d31e2013-12-02 19:25:12 +00002 * Copyright (c) 2013, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <stdio.h>
32#include <string.h>
33#include <assert.h>
34#include <arch_helpers.h>
35#include <console.h>
36#include <platform.h>
37#include <psci.h>
38#include <psci_private.h>
Achin Guptac8afc782013-11-25 18:45:02 +000039#include <runtime_svc.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010040
41/*******************************************************************************
42 * Arrays that contains information needs to resume a cpu's execution when woken
43 * out of suspend or off states. 'psci_ns_einfo_idx' keeps track of the next
44 * free index in the 'psci_ns_entry_info' & 'psci_secure_context' arrays. Each
45 * cpu is allocated a single entry in each array during startup.
46 ******************************************************************************/
47secure_context psci_secure_context[PSCI_NUM_AFFS];
48ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
49unsigned int psci_ns_einfo_idx;
50
51/*******************************************************************************
52 * Grand array that holds the platform's topology information for state
53 * management of affinity instances. Each node (aff_map_node) in the array
54 * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
55 ******************************************************************************/
56aff_map_node psci_aff_map[PSCI_NUM_AFFS]
57__attribute__ ((section("tzfw_coherent_mem")));
58
59/*******************************************************************************
60 * In a system, a certain number of affinity instances are present at an
61 * affinity level. The cumulative number of instances across all levels are
62 * stored in 'psci_aff_map'. The topology tree has been flattenned into this
63 * array. To retrieve nodes, information about the extents of each affinity
64 * level i.e. start index and end index needs to be present. 'psci_aff_limits'
65 * stores this information.
66 ******************************************************************************/
67aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
68
69/*******************************************************************************
70 * Pointer to functions exported by the platform to complete power mgmt. ops
71 ******************************************************************************/
72plat_pm_ops *psci_plat_pm_ops;
73
74/*******************************************************************************
75 * Simple routine to retrieve the maximum affinity level supported by the
76 * platform and check that it makes sense.
77 ******************************************************************************/
78int get_max_afflvl()
79{
80 int aff_lvl;
81
82 aff_lvl = plat_get_max_afflvl();
83 assert(aff_lvl <= MPIDR_MAX_AFFLVL && aff_lvl >= MPIDR_AFFLVL0);
84
85 return aff_lvl;
86}
87
88/*******************************************************************************
89 * Simple routine to set the id of an affinity instance at a given level in the
90 * mpidr.
91 ******************************************************************************/
92unsigned long mpidr_set_aff_inst(unsigned long mpidr,
93 unsigned char aff_inst,
94 int aff_lvl)
95{
96 unsigned long aff_shift;
97
98 assert(aff_lvl <= MPIDR_AFFLVL3);
99
100 /*
101 * Decide the number of bits to shift by depending upon
102 * the affinity level
103 */
104 aff_shift = get_afflvl_shift(aff_lvl);
105
106 /* Clear the existing affinity instance & set the new one*/
107 mpidr &= ~(MPIDR_AFFLVL_MASK << aff_shift);
108 mpidr |= aff_inst << aff_shift;
109
110 return mpidr;
111}
112
113/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000114 * This function sanity checks a range of affinity levels.
115 ******************************************************************************/
116int psci_check_afflvl_range(int start_afflvl, int end_afflvl)
117{
118 /* Sanity check the parameters passed */
119 if (end_afflvl > MPIDR_MAX_AFFLVL)
120 return PSCI_E_INVALID_PARAMS;
121
122 if (start_afflvl < MPIDR_AFFLVL0)
123 return PSCI_E_INVALID_PARAMS;
124
125 if (end_afflvl < start_afflvl)
126 return PSCI_E_INVALID_PARAMS;
127
128 return PSCI_E_SUCCESS;
129}
130
131/*******************************************************************************
132 * This function is passed an array of pointers to affinity level nodes in the
133 * topology tree for an mpidr. It picks up locks for each affinity level bottom
134 * up in the range specified.
135 ******************************************************************************/
136void psci_acquire_afflvl_locks(unsigned long mpidr,
137 int start_afflvl,
138 int end_afflvl,
139 mpidr_aff_map_nodes mpidr_nodes)
140{
141 int level;
142
143 for (level = start_afflvl; level <= end_afflvl; level++) {
144 if (mpidr_nodes[level] == NULL)
145 continue;
146 bakery_lock_get(mpidr, &mpidr_nodes[level]->lock);
147 }
148}
149
150/*******************************************************************************
151 * This function is passed an array of pointers to affinity level nodes in the
152 * topology tree for an mpidr. It releases the lock for each affinity level top
153 * down in the range specified.
154 ******************************************************************************/
155void psci_release_afflvl_locks(unsigned long mpidr,
156 int start_afflvl,
157 int end_afflvl,
158 mpidr_aff_map_nodes mpidr_nodes)
159{
160 int level;
161
162 for (level = end_afflvl; level >= start_afflvl; level--) {
163 if (mpidr_nodes[level] == NULL)
164 continue;
165 bakery_lock_release(mpidr, &mpidr_nodes[level]->lock);
166 }
167}
168
169/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100170 * Simple routine to determine whether an affinity instance at a given level
171 * in an mpidr exists or not.
172 ******************************************************************************/
173int psci_validate_mpidr(unsigned long mpidr, int level)
174{
175 aff_map_node *node;
176
177 node = psci_get_aff_map_node(mpidr, level);
178 if (node && (node->state & PSCI_AFF_PRESENT))
179 return PSCI_E_SUCCESS;
180 else
181 return PSCI_E_INVALID_PARAMS;
182}
183
184/*******************************************************************************
185 * Simple routine to determine the first affinity level instance that is present
186 * between the start and end affinity levels. This helps to skip handling of
187 * absent affinity levels while performing psci operations.
188 * The start level can be > or <= to the end level depending upon whether this
189 * routine is expected to search top down or bottom up.
190 ******************************************************************************/
191int psci_get_first_present_afflvl(unsigned long mpidr,
192 int start_afflvl,
193 int end_afflvl,
194 aff_map_node **node)
195{
196 int level;
197
198 /* Check whether we have to search up or down */
199 if (start_afflvl <= end_afflvl) {
200 for (level = start_afflvl; level <= end_afflvl; level++) {
201 *node = psci_get_aff_map_node(mpidr, level);
202 if (*node && ((*node)->state & PSCI_AFF_PRESENT))
203 break;
204 }
205 } else {
206 for (level = start_afflvl; level >= end_afflvl; level--) {
207 *node = psci_get_aff_map_node(mpidr, level);
208 if (*node && ((*node)->state & PSCI_AFF_PRESENT))
209 break;
210 }
211 }
212
213 return level;
214}
215
216/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000217 * Iteratively change the affinity state between the current and target affinity
Achin Gupta4f6ad662013-10-25 09:08:21 +0100218 * levels. The target state matters only if we are starting from affinity level
219 * 0 i.e. a cpu otherwise the state depends upon the state of the lower affinity
220 * levels.
221 ******************************************************************************/
Achin Gupta0959db52013-12-02 17:33:04 +0000222int psci_change_state(mpidr_aff_map_nodes mpidr_nodes,
223 int start_afflvl,
224 int end_afflvl,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100225 unsigned int tgt_state)
226{
Achin Gupta0959db52013-12-02 17:33:04 +0000227 int rc = PSCI_E_SUCCESS, level;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100228 unsigned int state;
Achin Gupta0959db52013-12-02 17:33:04 +0000229 aff_map_node *node;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100230
Achin Gupta0959db52013-12-02 17:33:04 +0000231 /*
232 * Get a temp pointer to the node. It is not possible that affinity
233 * level 0 is missing. Simply ignore higher missing levels.
234 */
235 for (level = start_afflvl; level <= end_afflvl; level++) {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100236
Achin Gupta0959db52013-12-02 17:33:04 +0000237 node = mpidr_nodes[level];
238 if (level == MPIDR_AFFLVL0) {
239 assert(node);
240 psci_set_state(node->state, tgt_state);
241 } else {
242 if (node == NULL)
243 continue;
244 state = psci_calculate_affinity_state(node);
245 psci_set_state(node->state, state);
246 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100247 }
248
Achin Gupta0959db52013-12-02 17:33:04 +0000249 /* If all went well then the cpu should be in the target state */
250 if (start_afflvl == MPIDR_AFFLVL0) {
251 node = mpidr_nodes[MPIDR_AFFLVL0];
252 state = psci_get_state(node->state);
253 assert(tgt_state == state);
254 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100255
256 return rc;
257}
258
259/*******************************************************************************
260 * This routine does the heavy lifting for psci_change_state(). It examines the
261 * state of each affinity instance at the next lower affinity level and decides
Achin Gupta3140a9e2013-12-02 16:23:12 +0000262 * its final state accordingly. If a lower affinity instance is ON then the
Achin Gupta4f6ad662013-10-25 09:08:21 +0100263 * higher affinity instance is ON. If all the lower affinity instances are OFF
264 * then the higher affinity instance is OFF. If atleast one lower affinity
265 * instance is SUSPENDED then the higher affinity instance is SUSPENDED. If only
266 * a single lower affinity instance is ON_PENDING then the higher affinity
267 * instance in ON_PENDING as well.
268 ******************************************************************************/
269unsigned int psci_calculate_affinity_state(aff_map_node *aff_node)
270{
271 int ctr;
272 unsigned int aff_count, hi_aff_state;
273 unsigned long tempidr;
274 aff_map_node *lo_aff_node;
275
Achin Gupta3140a9e2013-12-02 16:23:12 +0000276 /* Cannot calculate lowest affinity state. It is simply assigned */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100277 assert(aff_node->level > MPIDR_AFFLVL0);
278
279 /*
280 * Find the number of affinity instances at level X-1 e.g. number of
281 * cpus in a cluster. The level X state depends upon the state of each
282 * instance at level X-1
283 */
284 hi_aff_state = PSCI_STATE_OFF;
285 aff_count = plat_get_aff_count(aff_node->level - 1, aff_node->mpidr);
286 for (ctr = 0; ctr < aff_count; ctr++) {
287
288 /*
289 * Create a mpidr for each lower affinity level (X-1). Use their
290 * states to influence the higher affinity state (X).
291 */
292 tempidr = mpidr_set_aff_inst(aff_node->mpidr,
293 ctr,
294 aff_node->level - 1);
295 lo_aff_node = psci_get_aff_map_node(tempidr,
296 aff_node->level - 1);
297 assert(lo_aff_node);
298
299 /* Continue only if the cpu exists within the cluster */
300 if (!(lo_aff_node->state & PSCI_AFF_PRESENT))
301 continue;
302
303 switch (psci_get_state(lo_aff_node->state)) {
304
305 /*
306 * If any lower affinity is on within the cluster, then
307 * the higher affinity is on.
308 */
309 case PSCI_STATE_ON:
310 return PSCI_STATE_ON;
311
312 /*
313 * At least one X-1 needs to be suspended for X to be suspended
Achin Gupta3140a9e2013-12-02 16:23:12 +0000314 * but it is effectively on for the affinity_info call.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100315 * SUSPEND > ON_PENDING > OFF.
316 */
317 case PSCI_STATE_SUSPEND:
318 hi_aff_state = PSCI_STATE_SUSPEND;
319 continue;
320
321 /*
322 * Atleast one X-1 needs to be on_pending & the rest off for X
323 * to be on_pending. ON_PENDING > OFF.
324 */
325 case PSCI_STATE_ON_PENDING:
326 if (hi_aff_state != PSCI_STATE_SUSPEND)
327 hi_aff_state = PSCI_STATE_ON_PENDING;
328 continue;
329
330 /* Higher affinity is off if all lower affinities are off. */
331 case PSCI_STATE_OFF:
332 continue;
333
334 default:
335 assert(0);
336 }
337 }
338
339 return hi_aff_state;
340}
341
342/*******************************************************************************
343 * This function retrieves all the stashed information needed to correctly
344 * resume a cpu's execution in the non-secure state after it has been physically
345 * powered on i.e. turned ON or resumed from SUSPEND
346 ******************************************************************************/
Achin Guptac8afc782013-11-25 18:45:02 +0000347void psci_get_ns_entry_info(unsigned int index)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100348{
349 unsigned long sctlr = 0, scr, el_status, id_aa64pfr0;
Achin Guptac8afc782013-11-25 18:45:02 +0000350 gp_regs *ns_gp_regs;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100351
352 scr = read_scr();
353
354 /* Switch to the non-secure view of the registers */
355 write_scr(scr | SCR_NS_BIT);
356
357 /* Find out which EL we are going to */
358 id_aa64pfr0 = read_id_aa64pfr0_el1();
359 el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
360 ID_AA64PFR0_ELX_MASK;
361
362 /* Restore endianess */
363 if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT)
364 sctlr |= SCTLR_EE_BIT;
365 else
366 sctlr &= ~SCTLR_EE_BIT;
367
368 /* Turn off MMU and Caching */
369 sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT);
370
371 /* Set the register width */
372 if (psci_ns_entry_info[index].scr & SCR_RW_BIT)
373 scr |= SCR_RW_BIT;
374 else
375 scr &= ~SCR_RW_BIT;
376
377 scr |= SCR_NS_BIT;
378
379 if (el_status)
380 write_sctlr_el2(sctlr);
381 else
382 write_sctlr_el1(sctlr);
383
384 /* Fulfill the cpu_on entry reqs. as per the psci spec */
385 write_scr(scr);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100386 write_elr(psci_ns_entry_info[index].eret_info.entrypoint);
387
Achin Guptac8afc782013-11-25 18:45:02 +0000388 /*
389 * Set the general purpose registers to ~0 upon entry into the
390 * non-secure world except for x0 which should contain the
391 * context id & spsr. This is done directly on the "would be"
392 * stack pointer. Prior to entry into the non-secure world, an
393 * offset equivalent to the size of the 'gp_regs' structure is
394 * added to the sp. This general purpose register context is
395 * retrieved then.
396 */
397 ns_gp_regs = (gp_regs *) platform_get_stack(read_mpidr());
398 ns_gp_regs--;
399 memset(ns_gp_regs, ~0, sizeof(*ns_gp_regs));
400 ns_gp_regs->x0 = psci_ns_entry_info[index].context_id;
401 ns_gp_regs->spsr = psci_ns_entry_info[index].eret_info.spsr;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100402}
403
404/*******************************************************************************
405 * This function retrieves and stashes all the information needed to correctly
406 * resume a cpu's execution in the non-secure state after it has been physically
407 * powered on i.e. turned ON or resumed from SUSPEND. This is done prior to
408 * turning it on or before suspending it.
409 ******************************************************************************/
410int psci_set_ns_entry_info(unsigned int index,
411 unsigned long entrypoint,
412 unsigned long context_id)
413{
414 int rc = PSCI_E_SUCCESS;
415 unsigned int rw, mode, ee, spsr = 0;
416 unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr();
417 unsigned long el_status;
418
419 /* Figure out what mode do we enter the non-secure world in */
420 el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
421 ID_AA64PFR0_ELX_MASK;
422
423 /*
424 * Figure out whether the cpu enters the non-secure address space
425 * in aarch32 or aarch64
426 */
427 rw = scr & SCR_RW_BIT;
428 if (rw) {
429
430 /*
431 * Check whether a Thumb entry point has been provided for an
432 * aarch64 EL
433 */
434 if (entrypoint & 0x1)
435 return PSCI_E_INVALID_PARAMS;
436
437 if (el_status && (scr & SCR_HCE_BIT)) {
438 mode = MODE_EL2;
439 ee = read_sctlr_el2() & SCTLR_EE_BIT;
440 } else {
441 mode = MODE_EL1;
442 ee = read_sctlr_el1() & SCTLR_EE_BIT;
443 }
444
445 spsr = DAIF_DBG_BIT | DAIF_ABT_BIT;
446 spsr |= DAIF_IRQ_BIT | DAIF_FIQ_BIT;
447 spsr <<= PSR_DAIF_SHIFT;
448 spsr |= make_spsr(mode, MODE_SP_ELX, !rw);
449
450 psci_ns_entry_info[index].sctlr |= ee;
451 psci_ns_entry_info[index].scr |= SCR_RW_BIT;
452 } else {
453
454 /* Check whether aarch32 has to be entered in Thumb mode */
455 if (entrypoint & 0x1)
456 spsr = SPSR32_T_BIT;
457
458 if (el_status && (scr & SCR_HCE_BIT)) {
459 mode = AARCH32_MODE_HYP;
460 ee = read_sctlr_el2() & SCTLR_EE_BIT;
461 } else {
462 mode = AARCH32_MODE_SVC;
463 ee = read_sctlr_el1() & SCTLR_EE_BIT;
464 }
465
466 /*
467 * TODO: Choose async. exception bits if HYP mode is not
468 * implemented according to the values of SCR.{AW, FW} bits
469 */
470 spsr |= DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
471 spsr <<= PSR_DAIF_SHIFT;
472 if(ee)
473 spsr |= SPSR32_EE_BIT;
474 spsr |= mode;
475
476 /* Ensure that the CSPR.E and SCTLR.EE bits match */
477 psci_ns_entry_info[index].sctlr |= ee;
478 psci_ns_entry_info[index].scr &= ~SCR_RW_BIT;
479 }
480
481 psci_ns_entry_info[index].eret_info.entrypoint = entrypoint;
482 psci_ns_entry_info[index].eret_info.spsr = spsr;
483 psci_ns_entry_info[index].context_id = context_id;
484
485 return rc;
486}
487
488/*******************************************************************************
489 * An affinity level could be on, on_pending, suspended or off. These are the
Achin Gupta3140a9e2013-12-02 16:23:12 +0000490 * logical states it can be in. Physically either it is off or on. When it is in
491 * the state on_pending then it is about to be turned on. It is not possible to
Achin Gupta4f6ad662013-10-25 09:08:21 +0100492 * tell whether that's actually happenned or not. So we err on the side of
493 * caution & treat the affinity level as being turned off.
494 ******************************************************************************/
495inline unsigned int psci_get_phys_state(unsigned int aff_state)
496{
497 return (aff_state != PSCI_STATE_ON ? PSCI_STATE_OFF : PSCI_STATE_ON);
498}
499
500unsigned int psci_get_aff_phys_state(aff_map_node *aff_node)
501{
502 unsigned int aff_state;
503
504 aff_state = psci_get_state(aff_node->state);
505 return psci_get_phys_state(aff_state);
506}
507
508/*******************************************************************************
Achin Gupta0959db52013-12-02 17:33:04 +0000509 * This function takes an array of pointers to affinity instance nodes in the
510 * topology tree and calls the physical power on handler for the corresponding
511 * affinity levels
512 ******************************************************************************/
513static int psci_call_power_on_handlers(mpidr_aff_map_nodes mpidr_nodes,
514 int start_afflvl,
515 int end_afflvl,
516 afflvl_power_on_finisher *pon_handlers,
517 unsigned long mpidr)
518{
519 int rc = PSCI_E_INVALID_PARAMS, level;
520 aff_map_node *node;
521
522 for (level = end_afflvl; level >= start_afflvl; level--) {
523 node = mpidr_nodes[level];
524 if (node == NULL)
525 continue;
526
527 /*
528 * If we run into any trouble while powering up an
529 * affinity instance, then there is no recovery path
530 * so simply return an error and let the caller take
531 * care of the situation.
532 */
533 rc = pon_handlers[level](mpidr, node);
534 if (rc != PSCI_E_SUCCESS)
535 break;
536 }
537
538 return rc;
539}
540
541/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100542 * Generic handler which is called when a cpu is physically powered on. It
Achin Gupta0959db52013-12-02 17:33:04 +0000543 * traverses through all the affinity levels performing generic, architectural,
Achin Gupta4f6ad662013-10-25 09:08:21 +0100544 * platform setup and state management e.g. for a cluster that's been powered
545 * on, it will call the platform specific code which will enable coherency at
546 * the interconnect level. For a cpu it could mean turning on the MMU etc.
547 *
Achin Gupta0959db52013-12-02 17:33:04 +0000548 * The state of all the relevant affinity levels is changed after calling the
549 * affinity level specific handlers as their actions would depend upon the state
550 * the affinity level is exiting from.
551 *
552 * The affinity level specific handlers are called in descending order i.e. from
553 * the highest to the lowest affinity level implemented by the platform because
554 * to turn on affinity level X it is neccesary to turn on affinity level X + 1
555 * first.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100556 *
557 * CAUTION: This function is called with coherent stacks so that coherency and
558 * the mmu can be turned on safely.
559 ******************************************************************************/
Achin Gupta0959db52013-12-02 17:33:04 +0000560void psci_afflvl_power_on_finish(unsigned long mpidr,
561 int start_afflvl,
562 int end_afflvl,
563 afflvl_power_on_finisher *pon_handlers)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100564{
Achin Gupta0959db52013-12-02 17:33:04 +0000565 mpidr_aff_map_nodes mpidr_nodes;
566 int rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100567
568 mpidr &= MPIDR_AFFINITY_MASK;;
569
570 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000571 * Collect the pointers to the nodes in the topology tree for
572 * each affinity instance in the mpidr. If this function does
573 * not return successfully then either the mpidr or the affinity
574 * levels are incorrect. Either case is an irrecoverable error.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100575 */
Achin Gupta0959db52013-12-02 17:33:04 +0000576 rc = psci_get_aff_map_nodes(mpidr,
577 start_afflvl,
578 end_afflvl,
579 mpidr_nodes);
580 assert (rc == PSCI_E_SUCCESS);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100581
582 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000583 * This function acquires the lock corresponding to each affinity
584 * level so that by the time all locks are taken, the system topology
585 * is snapshot and state management can be done safely.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100586 */
Achin Gupta0959db52013-12-02 17:33:04 +0000587 psci_acquire_afflvl_locks(mpidr,
588 start_afflvl,
589 end_afflvl,
590 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100591
592 /* Perform generic, architecture and platform specific handling */
Achin Gupta0959db52013-12-02 17:33:04 +0000593 rc = psci_call_power_on_handlers(mpidr_nodes,
594 start_afflvl,
595 end_afflvl,
596 pon_handlers,
597 mpidr);
598 assert (rc == PSCI_E_SUCCESS);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100599
600 /*
Achin Gupta0959db52013-12-02 17:33:04 +0000601 * State management: Update the state of each affinity instance
602 * between the start and end affinity levels
Achin Gupta4f6ad662013-10-25 09:08:21 +0100603 */
Achin Gupta0959db52013-12-02 17:33:04 +0000604 psci_change_state(mpidr_nodes,
605 start_afflvl,
606 end_afflvl,
607 PSCI_STATE_ON);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100608
Achin Gupta0959db52013-12-02 17:33:04 +0000609 /*
610 * This loop releases the lock corresponding to each affinity level
611 * in the reverse order to which they were acquired.
612 */
613 psci_release_afflvl_locks(mpidr,
614 start_afflvl,
615 end_afflvl,
616 mpidr_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100617}