blob: ba0a379c2807817e292c5453896439f305d7aabc [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleyab2d31e2013-12-02 19:25:12 +00002 * Copyright (c) 2013, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <stdio.h>
32#include <string.h>
33#include <assert.h>
34#include <arch_helpers.h>
35#include <console.h>
36#include <platform.h>
37#include <psci.h>
38#include <psci_private.h>
Achin Guptac8afc782013-11-25 18:45:02 +000039#include <runtime_svc.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010040
41/*******************************************************************************
42 * Arrays that contains information needs to resume a cpu's execution when woken
43 * out of suspend or off states. 'psci_ns_einfo_idx' keeps track of the next
44 * free index in the 'psci_ns_entry_info' & 'psci_secure_context' arrays. Each
45 * cpu is allocated a single entry in each array during startup.
46 ******************************************************************************/
47secure_context psci_secure_context[PSCI_NUM_AFFS];
48ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
49unsigned int psci_ns_einfo_idx;
50
51/*******************************************************************************
52 * Grand array that holds the platform's topology information for state
53 * management of affinity instances. Each node (aff_map_node) in the array
54 * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
55 ******************************************************************************/
56aff_map_node psci_aff_map[PSCI_NUM_AFFS]
57__attribute__ ((section("tzfw_coherent_mem")));
58
59/*******************************************************************************
60 * In a system, a certain number of affinity instances are present at an
61 * affinity level. The cumulative number of instances across all levels are
62 * stored in 'psci_aff_map'. The topology tree has been flattenned into this
63 * array. To retrieve nodes, information about the extents of each affinity
64 * level i.e. start index and end index needs to be present. 'psci_aff_limits'
65 * stores this information.
66 ******************************************************************************/
67aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
68
69/*******************************************************************************
70 * Pointer to functions exported by the platform to complete power mgmt. ops
71 ******************************************************************************/
72plat_pm_ops *psci_plat_pm_ops;
73
74/*******************************************************************************
75 * Simple routine to retrieve the maximum affinity level supported by the
76 * platform and check that it makes sense.
77 ******************************************************************************/
78int get_max_afflvl()
79{
80 int aff_lvl;
81
82 aff_lvl = plat_get_max_afflvl();
83 assert(aff_lvl <= MPIDR_MAX_AFFLVL && aff_lvl >= MPIDR_AFFLVL0);
84
85 return aff_lvl;
86}
87
88/*******************************************************************************
89 * Simple routine to set the id of an affinity instance at a given level in the
90 * mpidr.
91 ******************************************************************************/
92unsigned long mpidr_set_aff_inst(unsigned long mpidr,
93 unsigned char aff_inst,
94 int aff_lvl)
95{
96 unsigned long aff_shift;
97
98 assert(aff_lvl <= MPIDR_AFFLVL3);
99
100 /*
101 * Decide the number of bits to shift by depending upon
102 * the affinity level
103 */
104 aff_shift = get_afflvl_shift(aff_lvl);
105
106 /* Clear the existing affinity instance & set the new one*/
107 mpidr &= ~(MPIDR_AFFLVL_MASK << aff_shift);
108 mpidr |= aff_inst << aff_shift;
109
110 return mpidr;
111}
112
113/*******************************************************************************
114 * Simple routine to determine whether an affinity instance at a given level
115 * in an mpidr exists or not.
116 ******************************************************************************/
117int psci_validate_mpidr(unsigned long mpidr, int level)
118{
119 aff_map_node *node;
120
121 node = psci_get_aff_map_node(mpidr, level);
122 if (node && (node->state & PSCI_AFF_PRESENT))
123 return PSCI_E_SUCCESS;
124 else
125 return PSCI_E_INVALID_PARAMS;
126}
127
128/*******************************************************************************
129 * Simple routine to determine the first affinity level instance that is present
130 * between the start and end affinity levels. This helps to skip handling of
131 * absent affinity levels while performing psci operations.
132 * The start level can be > or <= to the end level depending upon whether this
133 * routine is expected to search top down or bottom up.
134 ******************************************************************************/
135int psci_get_first_present_afflvl(unsigned long mpidr,
136 int start_afflvl,
137 int end_afflvl,
138 aff_map_node **node)
139{
140 int level;
141
142 /* Check whether we have to search up or down */
143 if (start_afflvl <= end_afflvl) {
144 for (level = start_afflvl; level <= end_afflvl; level++) {
145 *node = psci_get_aff_map_node(mpidr, level);
146 if (*node && ((*node)->state & PSCI_AFF_PRESENT))
147 break;
148 }
149 } else {
150 for (level = start_afflvl; level >= end_afflvl; level--) {
151 *node = psci_get_aff_map_node(mpidr, level);
152 if (*node && ((*node)->state & PSCI_AFF_PRESENT))
153 break;
154 }
155 }
156
157 return level;
158}
159
160/*******************************************************************************
161 * Recursively change the affinity state between the current and target affinity
162 * levels. The target state matters only if we are starting from affinity level
163 * 0 i.e. a cpu otherwise the state depends upon the state of the lower affinity
164 * levels.
165 ******************************************************************************/
166int psci_change_state(unsigned long mpidr,
167 int cur_afflvl,
168 int tgt_afflvl,
169 unsigned int tgt_state)
170{
171 int rc = PSCI_E_SUCCESS;
172 unsigned int state;
173 aff_map_node *aff_node;
174
175 /* Sanity check the affinity levels */
176 assert(tgt_afflvl >= cur_afflvl);
177
178 aff_node = psci_get_aff_map_node(mpidr, cur_afflvl);
179 assert(aff_node);
180
181 /* TODO: Check whether the affinity level is present or absent*/
182
183 if (cur_afflvl == MPIDR_AFFLVL0) {
184 psci_set_state(aff_node->state, tgt_state);
185 } else {
186 state = psci_calculate_affinity_state(aff_node);
187 psci_set_state(aff_node->state, state);
188 }
189
190 if (cur_afflvl != tgt_afflvl)
191 psci_change_state(mpidr, cur_afflvl + 1, tgt_afflvl, tgt_state);
192
193 return rc;
194}
195
196/*******************************************************************************
197 * This routine does the heavy lifting for psci_change_state(). It examines the
198 * state of each affinity instance at the next lower affinity level and decides
Achin Gupta3140a9e2013-12-02 16:23:12 +0000199 * its final state accordingly. If a lower affinity instance is ON then the
Achin Gupta4f6ad662013-10-25 09:08:21 +0100200 * higher affinity instance is ON. If all the lower affinity instances are OFF
201 * then the higher affinity instance is OFF. If atleast one lower affinity
202 * instance is SUSPENDED then the higher affinity instance is SUSPENDED. If only
203 * a single lower affinity instance is ON_PENDING then the higher affinity
204 * instance in ON_PENDING as well.
205 ******************************************************************************/
206unsigned int psci_calculate_affinity_state(aff_map_node *aff_node)
207{
208 int ctr;
209 unsigned int aff_count, hi_aff_state;
210 unsigned long tempidr;
211 aff_map_node *lo_aff_node;
212
Achin Gupta3140a9e2013-12-02 16:23:12 +0000213 /* Cannot calculate lowest affinity state. It is simply assigned */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100214 assert(aff_node->level > MPIDR_AFFLVL0);
215
216 /*
217 * Find the number of affinity instances at level X-1 e.g. number of
218 * cpus in a cluster. The level X state depends upon the state of each
219 * instance at level X-1
220 */
221 hi_aff_state = PSCI_STATE_OFF;
222 aff_count = plat_get_aff_count(aff_node->level - 1, aff_node->mpidr);
223 for (ctr = 0; ctr < aff_count; ctr++) {
224
225 /*
226 * Create a mpidr for each lower affinity level (X-1). Use their
227 * states to influence the higher affinity state (X).
228 */
229 tempidr = mpidr_set_aff_inst(aff_node->mpidr,
230 ctr,
231 aff_node->level - 1);
232 lo_aff_node = psci_get_aff_map_node(tempidr,
233 aff_node->level - 1);
234 assert(lo_aff_node);
235
236 /* Continue only if the cpu exists within the cluster */
237 if (!(lo_aff_node->state & PSCI_AFF_PRESENT))
238 continue;
239
240 switch (psci_get_state(lo_aff_node->state)) {
241
242 /*
243 * If any lower affinity is on within the cluster, then
244 * the higher affinity is on.
245 */
246 case PSCI_STATE_ON:
247 return PSCI_STATE_ON;
248
249 /*
250 * At least one X-1 needs to be suspended for X to be suspended
Achin Gupta3140a9e2013-12-02 16:23:12 +0000251 * but it is effectively on for the affinity_info call.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100252 * SUSPEND > ON_PENDING > OFF.
253 */
254 case PSCI_STATE_SUSPEND:
255 hi_aff_state = PSCI_STATE_SUSPEND;
256 continue;
257
258 /*
259 * Atleast one X-1 needs to be on_pending & the rest off for X
260 * to be on_pending. ON_PENDING > OFF.
261 */
262 case PSCI_STATE_ON_PENDING:
263 if (hi_aff_state != PSCI_STATE_SUSPEND)
264 hi_aff_state = PSCI_STATE_ON_PENDING;
265 continue;
266
267 /* Higher affinity is off if all lower affinities are off. */
268 case PSCI_STATE_OFF:
269 continue;
270
271 default:
272 assert(0);
273 }
274 }
275
276 return hi_aff_state;
277}
278
279/*******************************************************************************
280 * This function retrieves all the stashed information needed to correctly
281 * resume a cpu's execution in the non-secure state after it has been physically
282 * powered on i.e. turned ON or resumed from SUSPEND
283 ******************************************************************************/
Achin Guptac8afc782013-11-25 18:45:02 +0000284void psci_get_ns_entry_info(unsigned int index)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100285{
286 unsigned long sctlr = 0, scr, el_status, id_aa64pfr0;
Achin Guptac8afc782013-11-25 18:45:02 +0000287 gp_regs *ns_gp_regs;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100288
289 scr = read_scr();
290
291 /* Switch to the non-secure view of the registers */
292 write_scr(scr | SCR_NS_BIT);
293
294 /* Find out which EL we are going to */
295 id_aa64pfr0 = read_id_aa64pfr0_el1();
296 el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
297 ID_AA64PFR0_ELX_MASK;
298
299 /* Restore endianess */
300 if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT)
301 sctlr |= SCTLR_EE_BIT;
302 else
303 sctlr &= ~SCTLR_EE_BIT;
304
305 /* Turn off MMU and Caching */
306 sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT);
307
308 /* Set the register width */
309 if (psci_ns_entry_info[index].scr & SCR_RW_BIT)
310 scr |= SCR_RW_BIT;
311 else
312 scr &= ~SCR_RW_BIT;
313
314 scr |= SCR_NS_BIT;
315
316 if (el_status)
317 write_sctlr_el2(sctlr);
318 else
319 write_sctlr_el1(sctlr);
320
321 /* Fulfill the cpu_on entry reqs. as per the psci spec */
322 write_scr(scr);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100323 write_elr(psci_ns_entry_info[index].eret_info.entrypoint);
324
Achin Guptac8afc782013-11-25 18:45:02 +0000325 /*
326 * Set the general purpose registers to ~0 upon entry into the
327 * non-secure world except for x0 which should contain the
328 * context id & spsr. This is done directly on the "would be"
329 * stack pointer. Prior to entry into the non-secure world, an
330 * offset equivalent to the size of the 'gp_regs' structure is
331 * added to the sp. This general purpose register context is
332 * retrieved then.
333 */
334 ns_gp_regs = (gp_regs *) platform_get_stack(read_mpidr());
335 ns_gp_regs--;
336 memset(ns_gp_regs, ~0, sizeof(*ns_gp_regs));
337 ns_gp_regs->x0 = psci_ns_entry_info[index].context_id;
338 ns_gp_regs->spsr = psci_ns_entry_info[index].eret_info.spsr;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100339}
340
341/*******************************************************************************
342 * This function retrieves and stashes all the information needed to correctly
343 * resume a cpu's execution in the non-secure state after it has been physically
344 * powered on i.e. turned ON or resumed from SUSPEND. This is done prior to
345 * turning it on or before suspending it.
346 ******************************************************************************/
347int psci_set_ns_entry_info(unsigned int index,
348 unsigned long entrypoint,
349 unsigned long context_id)
350{
351 int rc = PSCI_E_SUCCESS;
352 unsigned int rw, mode, ee, spsr = 0;
353 unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr();
354 unsigned long el_status;
355
356 /* Figure out what mode do we enter the non-secure world in */
357 el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
358 ID_AA64PFR0_ELX_MASK;
359
360 /*
361 * Figure out whether the cpu enters the non-secure address space
362 * in aarch32 or aarch64
363 */
364 rw = scr & SCR_RW_BIT;
365 if (rw) {
366
367 /*
368 * Check whether a Thumb entry point has been provided for an
369 * aarch64 EL
370 */
371 if (entrypoint & 0x1)
372 return PSCI_E_INVALID_PARAMS;
373
374 if (el_status && (scr & SCR_HCE_BIT)) {
375 mode = MODE_EL2;
376 ee = read_sctlr_el2() & SCTLR_EE_BIT;
377 } else {
378 mode = MODE_EL1;
379 ee = read_sctlr_el1() & SCTLR_EE_BIT;
380 }
381
382 spsr = DAIF_DBG_BIT | DAIF_ABT_BIT;
383 spsr |= DAIF_IRQ_BIT | DAIF_FIQ_BIT;
384 spsr <<= PSR_DAIF_SHIFT;
385 spsr |= make_spsr(mode, MODE_SP_ELX, !rw);
386
387 psci_ns_entry_info[index].sctlr |= ee;
388 psci_ns_entry_info[index].scr |= SCR_RW_BIT;
389 } else {
390
391 /* Check whether aarch32 has to be entered in Thumb mode */
392 if (entrypoint & 0x1)
393 spsr = SPSR32_T_BIT;
394
395 if (el_status && (scr & SCR_HCE_BIT)) {
396 mode = AARCH32_MODE_HYP;
397 ee = read_sctlr_el2() & SCTLR_EE_BIT;
398 } else {
399 mode = AARCH32_MODE_SVC;
400 ee = read_sctlr_el1() & SCTLR_EE_BIT;
401 }
402
403 /*
404 * TODO: Choose async. exception bits if HYP mode is not
405 * implemented according to the values of SCR.{AW, FW} bits
406 */
407 spsr |= DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
408 spsr <<= PSR_DAIF_SHIFT;
409 if(ee)
410 spsr |= SPSR32_EE_BIT;
411 spsr |= mode;
412
413 /* Ensure that the CSPR.E and SCTLR.EE bits match */
414 psci_ns_entry_info[index].sctlr |= ee;
415 psci_ns_entry_info[index].scr &= ~SCR_RW_BIT;
416 }
417
418 psci_ns_entry_info[index].eret_info.entrypoint = entrypoint;
419 psci_ns_entry_info[index].eret_info.spsr = spsr;
420 psci_ns_entry_info[index].context_id = context_id;
421
422 return rc;
423}
424
425/*******************************************************************************
426 * An affinity level could be on, on_pending, suspended or off. These are the
Achin Gupta3140a9e2013-12-02 16:23:12 +0000427 * logical states it can be in. Physically either it is off or on. When it is in
428 * the state on_pending then it is about to be turned on. It is not possible to
Achin Gupta4f6ad662013-10-25 09:08:21 +0100429 * tell whether that's actually happenned or not. So we err on the side of
430 * caution & treat the affinity level as being turned off.
431 ******************************************************************************/
432inline unsigned int psci_get_phys_state(unsigned int aff_state)
433{
434 return (aff_state != PSCI_STATE_ON ? PSCI_STATE_OFF : PSCI_STATE_ON);
435}
436
437unsigned int psci_get_aff_phys_state(aff_map_node *aff_node)
438{
439 unsigned int aff_state;
440
441 aff_state = psci_get_state(aff_node->state);
442 return psci_get_phys_state(aff_state);
443}
444
445/*******************************************************************************
446 * Generic handler which is called when a cpu is physically powered on. It
447 * recurses through all the affinity levels performing generic, architectural,
448 * platform setup and state management e.g. for a cluster that's been powered
449 * on, it will call the platform specific code which will enable coherency at
450 * the interconnect level. For a cpu it could mean turning on the MMU etc.
451 *
452 * This function traverses from the lowest to the highest affinity level
453 * implemented by the platform. Since it's recursive, for each call the
454 * 'cur_afflvl' & 'tgt_afflvl' parameters keep track of which level we are at
455 * and which level we need to get to respectively. Locks are picked up along the
456 * way so that when the lowest affinity level is hit, state management can be
457 * safely done. Prior to this, each affinity level does it's bookeeping as per
458 * the state out of reset.
459 *
460 * CAUTION: This function is called with coherent stacks so that coherency and
461 * the mmu can be turned on safely.
462 ******************************************************************************/
463unsigned int psci_afflvl_power_on_finish(unsigned long mpidr,
464 int cur_afflvl,
465 int tgt_afflvl,
466 afflvl_power_on_finisher *pon_handlers)
467{
468 unsigned int prev_state, next_state, rc = PSCI_E_SUCCESS;
469 aff_map_node *aff_node;
470 int level;
471
472 mpidr &= MPIDR_AFFINITY_MASK;;
473
474 /*
475 * Some affinity instances at levels between the current and
476 * target levels could be absent in the mpidr. Skip them and
477 * start from the first present instance.
478 */
479 level = psci_get_first_present_afflvl(mpidr,
480 cur_afflvl,
481 tgt_afflvl,
482 &aff_node);
483 /*
484 * Return if there are no more affinity instances beyond this
485 * level to process. Else ensure that the returned affinity
486 * node makes sense.
487 */
488 if (aff_node == NULL)
489 return rc;
490
491 assert(level == aff_node->level);
492
493 /*
494 * This function acquires the lock corresponding to each
495 * affinity level so that by the time we hit the highest
496 * affinity level, the system topology is snapshot and state
497 * management can be done safely.
498 */
499 bakery_lock_get(mpidr, &aff_node->lock);
500
501 /* Keep the old and new state handy */
502 prev_state = psci_get_state(aff_node->state);
503 next_state = PSCI_STATE_ON;
504
505 /* Perform generic, architecture and platform specific handling */
506 rc = pon_handlers[level](mpidr, aff_node, prev_state);
507 if (rc != PSCI_E_SUCCESS) {
508 psci_set_state(aff_node->state, prev_state);
509 goto exit;
510 }
511
512 /*
513 * State management: Update the states if this is the highest
514 * affinity level requested else pass the job to the next level.
515 */
516 if (aff_node->level != tgt_afflvl) {
517 rc = psci_afflvl_power_on_finish(mpidr,
518 level + 1,
519 tgt_afflvl,
520 pon_handlers);
521 } else {
522 psci_change_state(mpidr, MPIDR_AFFLVL0, tgt_afflvl, next_state);
523 }
524
525 /* If all has gone as per plan then this cpu should be marked as ON */
526 if (level == MPIDR_AFFLVL0) {
527 next_state = psci_get_state(aff_node->state);
528 assert(next_state == PSCI_STATE_ON);
529 }
530
531exit:
532 bakery_lock_release(mpidr, &aff_node->lock);
533 return rc;
534}