Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 1 | /* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 2 | * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions are met: |
| 6 | * |
| 7 | * Redistributions of source code must retain the above copyright notice, this |
| 8 | * list of conditions and the following disclaimer. |
| 9 | * |
| 10 | * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * Neither the name of ARM nor the names of its contributors may be used |
| 15 | * to endorse or promote products derived from this software without specific |
| 16 | * prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 28 | * POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
| 31 | #include <arch.h> |
| 32 | #include <arch_helpers.h> |
| 33 | #include <assert.h> |
| 34 | #include <bl_common.h> |
| 35 | #include <context.h> |
| 36 | #include <context_mgmt.h> |
| 37 | #include <debug.h> |
| 38 | #include <platform.h> |
| 39 | #include <string.h> |
| 40 | #include "psci_private.h" |
| 41 | |
| 42 | /* |
| 43 | * SPD power management operations, expected to be supplied by the registered |
| 44 | * SPD on successful SP initialization |
| 45 | */ |
| 46 | const spd_pm_ops_t *psci_spd_pm; |
| 47 | |
| 48 | /******************************************************************************* |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 49 | * Arrays that hold the platform's power domain tree information for state |
| 50 | * management of power domains. |
| 51 | * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain |
| 52 | * which is an ancestor of a CPU power domain. |
| 53 | * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 54 | ******************************************************************************/ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 55 | non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 56 | #if USE_COHERENT_MEM |
| 57 | __attribute__ ((section("tzfw_coherent_mem"))) |
| 58 | #endif |
| 59 | ; |
| 60 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 61 | cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; |
| 62 | |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 63 | /******************************************************************************* |
| 64 | * Pointer to functions exported by the platform to complete power mgmt. ops |
| 65 | ******************************************************************************/ |
| 66 | const plat_pm_ops_t *psci_plat_pm_ops; |
| 67 | |
| 68 | /******************************************************************************* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 69 | * Check that the maximum power level supported by the platform makes sense |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 70 | * ****************************************************************************/ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 71 | CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \ |
| 72 | PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \ |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 73 | assert_platform_max_pwrlvl_check); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 74 | |
| 75 | /******************************************************************************* |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 76 | * This function is passed a cpu_index and the highest level in the topology |
| 77 | * tree. It iterates through the nodes to find the highest power level at which |
| 78 | * a domain is physically powered off. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 79 | ******************************************************************************/ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 80 | uint32_t psci_find_max_phys_off_pwrlvl(uint32_t end_pwrlvl, |
| 81 | unsigned int cpu_idx) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 82 | { |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 83 | int max_pwrlvl, level; |
| 84 | unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 85 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 86 | if (psci_get_phys_state(cpu_idx, PSCI_CPU_PWR_LVL) != PSCI_STATE_OFF) |
| 87 | return PSCI_INVALID_DATA; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 88 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 89 | max_pwrlvl = PSCI_CPU_PWR_LVL; |
| 90 | |
| 91 | for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { |
| 92 | if (psci_get_phys_state(parent_idx, level) == PSCI_STATE_OFF) |
| 93 | max_pwrlvl = level; |
| 94 | |
| 95 | parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 96 | } |
| 97 | |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 98 | return max_pwrlvl; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | /******************************************************************************* |
| 102 | * This function verifies that the all the other cores in the system have been |
| 103 | * turned OFF and the current CPU is the last running CPU in the system. |
| 104 | * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) |
| 105 | * otherwise. |
| 106 | ******************************************************************************/ |
| 107 | unsigned int psci_is_last_on_cpu(void) |
| 108 | { |
| 109 | unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; |
| 110 | unsigned int i; |
| 111 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 112 | for (i = 0; i < PLATFORM_CORE_COUNT; i++) { |
| 113 | if (psci_cpu_pd_nodes[i].mpidr == mpidr) { |
| 114 | assert(psci_get_state(i, PSCI_CPU_PWR_LVL) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 115 | == PSCI_STATE_ON); |
| 116 | continue; |
| 117 | } |
| 118 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 119 | if (psci_get_state(i, PSCI_CPU_PWR_LVL) != PSCI_STATE_OFF) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | return 1; |
| 124 | } |
| 125 | |
| 126 | /******************************************************************************* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 127 | * Routine to return the maximum power level to traverse to after a cpu has |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 128 | * been physically powered up. It is expected to be called immediately after |
| 129 | * reset from assembler code. |
| 130 | ******************************************************************************/ |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 131 | int get_power_on_target_pwrlvl(void) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 132 | { |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 133 | int pwrlvl; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 134 | |
| 135 | #if DEBUG |
| 136 | unsigned int state; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 137 | |
| 138 | /* |
| 139 | * Sanity check the state of the cpu. It should be either suspend or "on |
| 140 | * pending" |
| 141 | */ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 142 | state = psci_get_state(plat_my_core_pos(), PSCI_CPU_PWR_LVL); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 143 | assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING); |
| 144 | #endif |
| 145 | |
| 146 | /* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 147 | * Assume that this cpu was suspended and retrieve its target power |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 148 | * level. If it is invalid then it could only have been turned off |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 149 | * earlier. PLAT_MAX_PWR_LVL will be the highest power level a |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 150 | * cpu can be turned off to. |
| 151 | */ |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 152 | pwrlvl = psci_get_suspend_pwrlvl(); |
| 153 | if (pwrlvl == PSCI_INVALID_DATA) |
| 154 | pwrlvl = PLAT_MAX_PWR_LVL; |
| 155 | return pwrlvl; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | /******************************************************************************* |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 159 | * PSCI helper function to get the parent nodes corresponding to a cpu_index. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 160 | ******************************************************************************/ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 161 | void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, |
| 162 | int end_lvl, |
| 163 | unsigned int node_index[]) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 164 | { |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 165 | unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; |
| 166 | int i; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 167 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 168 | for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) { |
| 169 | *node_index++ = parent_node; |
| 170 | parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 171 | } |
| 172 | } |
| 173 | |
| 174 | /******************************************************************************* |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 175 | * This function is passed a cpu_index and the highest level in the topology |
| 176 | * tree and the state which each node should transition to. It updates the |
| 177 | * state of each node between the specified power levels. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 178 | ******************************************************************************/ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 179 | void psci_do_state_coordination(int end_pwrlvl, |
| 180 | unsigned int cpu_idx, |
| 181 | uint32_t state) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 182 | { |
| 183 | int level; |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 184 | unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; |
| 185 | psci_set_state(cpu_idx, state, PSCI_CPU_PWR_LVL); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 186 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 187 | for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { |
| 188 | psci_set_state(parent_idx, state, level); |
| 189 | parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 190 | } |
| 191 | } |
| 192 | |
| 193 | /******************************************************************************* |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 194 | * This function is passed a cpu_index and the highest level in the topology |
| 195 | * tree that the operation should be applied to. It picks up locks in order of |
| 196 | * increasing power domain level in the range specified. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 197 | ******************************************************************************/ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 198 | void psci_acquire_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 199 | { |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 200 | unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 201 | int level; |
| 202 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 203 | /* No locking required for level 0. Hence start locking from level 1 */ |
| 204 | for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { |
| 205 | psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); |
| 206 | parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; |
| 207 | } |
| 208 | } |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 209 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 210 | /******************************************************************************* |
| 211 | * This function is passed a cpu_index and the highest level in the topology |
| 212 | * tree that the operation should be applied to. It releases the locks in order |
| 213 | * of decreasing power domain level in the range specified. |
| 214 | ******************************************************************************/ |
| 215 | void psci_release_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx) |
| 216 | { |
| 217 | unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0}; |
| 218 | int level; |
| 219 | |
| 220 | /* Get the parent nodes */ |
| 221 | psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); |
| 222 | |
| 223 | /* Unlock top down. No unlocking required for level 0. */ |
| 224 | for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) { |
| 225 | parent_idx = parent_nodes[level - 1]; |
| 226 | psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 227 | } |
| 228 | } |
| 229 | |
| 230 | /******************************************************************************* |
Soby Mathew | 12d0d00 | 2015-04-09 13:40:55 +0100 | [diff] [blame] | 231 | * Simple routine to determine whether a mpidr is valid or not. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 232 | ******************************************************************************/ |
Soby Mathew | 12d0d00 | 2015-04-09 13:40:55 +0100 | [diff] [blame] | 233 | int psci_validate_mpidr(unsigned long mpidr) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 234 | { |
Soby Mathew | 12d0d00 | 2015-04-09 13:40:55 +0100 | [diff] [blame] | 235 | if (plat_core_pos_by_mpidr(mpidr) < 0) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 236 | return PSCI_E_INVALID_PARAMS; |
Soby Mathew | 12d0d00 | 2015-04-09 13:40:55 +0100 | [diff] [blame] | 237 | |
| 238 | return PSCI_E_SUCCESS; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | /******************************************************************************* |
| 242 | * This function determines the full entrypoint information for the requested |
| 243 | * PSCI entrypoint on power on/resume and returns it. |
| 244 | ******************************************************************************/ |
| 245 | int psci_get_ns_ep_info(entry_point_info_t *ep, |
| 246 | uint64_t entrypoint, uint64_t context_id) |
| 247 | { |
| 248 | uint32_t ep_attr, mode, sctlr, daif, ee; |
| 249 | uint32_t ns_scr_el3 = read_scr_el3(); |
| 250 | uint32_t ns_sctlr_el1 = read_sctlr_el1(); |
| 251 | |
| 252 | sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; |
| 253 | ee = 0; |
| 254 | |
| 255 | ep_attr = NON_SECURE | EP_ST_DISABLE; |
| 256 | if (sctlr & SCTLR_EE_BIT) { |
| 257 | ep_attr |= EP_EE_BIG; |
| 258 | ee = 1; |
| 259 | } |
| 260 | SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); |
| 261 | |
| 262 | ep->pc = entrypoint; |
| 263 | memset(&ep->args, 0, sizeof(ep->args)); |
| 264 | ep->args.arg0 = context_id; |
| 265 | |
| 266 | /* |
| 267 | * Figure out whether the cpu enters the non-secure address space |
| 268 | * in aarch32 or aarch64 |
| 269 | */ |
| 270 | if (ns_scr_el3 & SCR_RW_BIT) { |
| 271 | |
| 272 | /* |
| 273 | * Check whether a Thumb entry point has been provided for an |
| 274 | * aarch64 EL |
| 275 | */ |
| 276 | if (entrypoint & 0x1) |
| 277 | return PSCI_E_INVALID_PARAMS; |
| 278 | |
| 279 | mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; |
| 280 | |
| 281 | ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); |
| 282 | } else { |
| 283 | |
| 284 | mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; |
| 285 | |
| 286 | /* |
| 287 | * TODO: Choose async. exception bits if HYP mode is not |
| 288 | * implemented according to the values of SCR.{AW, FW} bits |
| 289 | */ |
| 290 | daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; |
| 291 | |
| 292 | ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); |
| 293 | } |
| 294 | |
| 295 | return PSCI_E_SUCCESS; |
| 296 | } |
| 297 | |
| 298 | /******************************************************************************* |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 299 | * This function takes an index and level of a power domain node in the topology |
| 300 | * tree and returns its state. State of a non-leaf node needs to be calculated. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 301 | ******************************************************************************/ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 302 | unsigned short psci_get_state(unsigned int idx, |
| 303 | int level) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 304 | { |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 305 | /* A cpu node just contains the state which can be directly returned */ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 306 | if (level == PSCI_CPU_PWR_LVL) { |
| 307 | flush_cpu_data_by_index(idx, psci_svc_cpu_data.psci_state); |
| 308 | return get_cpu_data_by_index(idx, psci_svc_cpu_data.psci_state); |
| 309 | } |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 310 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 311 | #if !USE_COHERENT_MEM |
| 312 | flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes[idx], |
| 313 | sizeof(psci_non_cpu_pd_nodes[idx])); |
| 314 | #endif |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 315 | /* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 316 | * For a power level higher than a cpu, the state has to be |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 317 | * calculated. It depends upon the value of the reference count |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 318 | * which is managed by each node at the next lower power level |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 319 | * e.g. for a cluster, each cpu increments/decrements the reference |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 320 | * count. If the reference count is 0 then the power level is |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 321 | * OFF else ON. |
| 322 | */ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 323 | if (psci_non_cpu_pd_nodes[idx].ref_count) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 324 | return PSCI_STATE_ON; |
| 325 | else |
| 326 | return PSCI_STATE_OFF; |
| 327 | } |
| 328 | |
| 329 | /******************************************************************************* |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 330 | * This function takes an index and level of a power domain node in the topology |
| 331 | * tree and a target state. State of a non-leaf node needs to be converted to |
| 332 | * a reference count. State of a leaf node can be set directly. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 333 | ******************************************************************************/ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 334 | void psci_set_state(unsigned int idx, |
| 335 | unsigned short state, |
| 336 | int level) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 337 | { |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 338 | /* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 339 | * For a power level higher than a cpu, the state is used |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 340 | * to decide whether the reference count is incremented or |
| 341 | * decremented. Entry into the ON_PENDING state does not have |
| 342 | * effect. |
| 343 | */ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 344 | if (level > PSCI_CPU_PWR_LVL) { |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 345 | switch (state) { |
| 346 | case PSCI_STATE_ON: |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 347 | psci_non_cpu_pd_nodes[idx].ref_count++; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 348 | break; |
| 349 | case PSCI_STATE_OFF: |
| 350 | case PSCI_STATE_SUSPEND: |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 351 | psci_non_cpu_pd_nodes[idx].ref_count--; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 352 | break; |
| 353 | case PSCI_STATE_ON_PENDING: |
| 354 | /* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 355 | * A power level higher than a cpu will not undergo |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 356 | * a state change when it is about to be turned on |
| 357 | */ |
| 358 | return; |
| 359 | default: |
| 360 | assert(0); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 361 | |
| 362 | #if !USE_COHERENT_MEM |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 363 | flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes[idx], |
| 364 | sizeof(psci_non_cpu_pd_nodes[idx])); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 365 | #endif |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 366 | } |
| 367 | } else { |
| 368 | set_cpu_data_by_index(idx, psci_svc_cpu_data.psci_state, state); |
| 369 | flush_cpu_data_by_index(idx, psci_svc_cpu_data.psci_state); |
| 370 | } |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | /******************************************************************************* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 374 | * A power domain could be on, on_pending, suspended or off. These are the |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 375 | * logical states it can be in. Physically either it is off or on. When it is in |
| 376 | * the state on_pending then it is about to be turned on. It is not possible to |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 377 | * tell whether that's actually happened or not. So we err on the side of |
| 378 | * caution & treat the power domain as being turned off. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 379 | ******************************************************************************/ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 380 | unsigned short psci_get_phys_state(unsigned int idx, |
| 381 | int level) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 382 | { |
| 383 | unsigned int state; |
| 384 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 385 | state = psci_get_state(idx, level); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 386 | return get_phys_state(state); |
| 387 | } |
| 388 | |
| 389 | /******************************************************************************* |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 390 | * Generic handler which is called when a cpu is physically powered on. It |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 391 | * traverses the node information and finds the highest power level powered |
Soby Mathew | 6590ce2 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 392 | * off and performs generic, architectural, platform setup and state management |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 393 | * to power on that power level and power levels below it. |
Soby Mathew | 6590ce2 | 2015-06-30 11:00:24 +0100 | [diff] [blame] | 394 | * e.g. For a cpu that's been powered on, it will call the platform specific |
| 395 | * code to enable the gic cpu interface and for a cluster it will enable |
| 396 | * coherency at the interconnect level in addition to gic cpu interface. |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 397 | ******************************************************************************/ |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 398 | void psci_power_up_finish(int end_pwrlvl, |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 399 | pwrlvl_power_on_finisher_t pon_handler) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 400 | { |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 401 | unsigned int cpu_idx = plat_my_core_pos(); |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 402 | unsigned int max_phys_off_pwrlvl; |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 403 | |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 404 | /* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 405 | * This function acquires the lock corresponding to each power |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 406 | * level so that by the time all locks are taken, the system topology |
| 407 | * is snapshot and state management can be done safely. |
| 408 | */ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 409 | psci_acquire_pwr_domain_locks(end_pwrlvl, |
| 410 | cpu_idx); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 411 | |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 412 | max_phys_off_pwrlvl = psci_find_max_phys_off_pwrlvl(end_pwrlvl, |
| 413 | cpu_idx); |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 414 | assert(max_phys_off_pwrlvl != PSCI_INVALID_DATA); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 415 | |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 416 | /* Perform generic, architecture and platform specific handling */ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 417 | pon_handler(cpu_idx, max_phys_off_pwrlvl); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 418 | |
| 419 | /* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 420 | * This function updates the state of each power instance |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 421 | * corresponding to the cpu index in the range of power levels |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 422 | * specified. |
| 423 | */ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 424 | psci_do_state_coordination(end_pwrlvl, |
| 425 | cpu_idx, |
| 426 | PSCI_STATE_ON); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 427 | |
| 428 | /* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 429 | * This loop releases the lock corresponding to each power level |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 430 | * in the reverse order to which they were acquired. |
| 431 | */ |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 432 | psci_release_pwr_domain_locks(end_pwrlvl, |
| 433 | cpu_idx); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 434 | } |
| 435 | |
| 436 | /******************************************************************************* |
| 437 | * This function initializes the set of hooks that PSCI invokes as part of power |
| 438 | * management operation. The power management hooks are expected to be provided |
| 439 | * by the SPD, after it finishes all its initialization |
| 440 | ******************************************************************************/ |
| 441 | void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) |
| 442 | { |
| 443 | assert(pm); |
| 444 | psci_spd_pm = pm; |
| 445 | |
| 446 | if (pm->svc_migrate) |
| 447 | psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); |
| 448 | |
| 449 | if (pm->svc_migrate_info) |
| 450 | psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) |
| 451 | | define_psci_cap(PSCI_MIG_INFO_TYPE); |
| 452 | } |
| 453 | |
| 454 | /******************************************************************************* |
| 455 | * This function invokes the migrate info hook in the spd_pm_ops. It performs |
| 456 | * the necessary return value validation. If the Secure Payload is UP and |
| 457 | * migrate capable, it returns the mpidr of the CPU on which the Secure payload |
| 458 | * is resident through the mpidr parameter. Else the value of the parameter on |
| 459 | * return is undefined. |
| 460 | ******************************************************************************/ |
| 461 | int psci_spd_migrate_info(uint64_t *mpidr) |
| 462 | { |
| 463 | int rc; |
| 464 | |
| 465 | if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info) |
| 466 | return PSCI_E_NOT_SUPPORTED; |
| 467 | |
| 468 | rc = psci_spd_pm->svc_migrate_info(mpidr); |
| 469 | |
| 470 | assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \ |
| 471 | || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED); |
| 472 | |
| 473 | return rc; |
| 474 | } |
| 475 | |
| 476 | |
| 477 | /******************************************************************************* |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 478 | * This function prints the state of all power domains present in the |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 479 | * system |
| 480 | ******************************************************************************/ |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 481 | void psci_print_power_domain_map(void) |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 482 | { |
| 483 | #if LOG_LEVEL >= LOG_LEVEL_INFO |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 484 | unsigned int idx, state; |
| 485 | |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 486 | /* This array maps to the PSCI_STATE_X definitions in psci.h */ |
| 487 | static const char *psci_state_str[] = { |
| 488 | "ON", |
| 489 | "OFF", |
| 490 | "ON_PENDING", |
| 491 | "SUSPEND" |
| 492 | }; |
| 493 | |
Soby Mathew | 4067dc3 | 2015-05-05 16:33:16 +0100 | [diff] [blame] | 494 | INFO("PSCI Power Domain Map:\n"); |
Soby Mathew | 82dcc03 | 2015-04-08 17:42:06 +0100 | [diff] [blame^] | 495 | for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT); idx++) { |
| 496 | state = psci_get_state(idx, psci_non_cpu_pd_nodes[idx].level); |
| 497 | INFO(" Domain Node : Level %u, parent_node %d, State %s\n", |
| 498 | psci_non_cpu_pd_nodes[idx].level, |
| 499 | psci_non_cpu_pd_nodes[idx].parent_node, |
| 500 | psci_state_str[state]); |
| 501 | } |
| 502 | |
| 503 | for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) { |
| 504 | state = psci_get_state(idx, PSCI_CPU_PWR_LVL); |
| 505 | INFO(" CPU Node : MPID 0x%lx, parent_node %d, State %s\n", |
| 506 | psci_cpu_pd_nodes[idx].mpidr, |
| 507 | psci_cpu_pd_nodes[idx].parent_node, |
| 508 | psci_state_str[state]); |
Soby Mathew | b48349e | 2015-06-29 16:30:12 +0100 | [diff] [blame] | 509 | } |
| 510 | #endif |
| 511 | } |