blob: ce4da9599fd528e534a1bc19d8c2e7c63bdabb5e [file] [log] [blame]
Soby Mathewb48349e2015-06-29 16:30:12 +01001/*
Soby Mathew4067dc32015-05-05 16:33:16 +01002 * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
Soby Mathewb48349e2015-06-29 16:30:12 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <bl_common.h>
35#include <context.h>
36#include <context_mgmt.h>
37#include <platform.h>
38#include <stddef.h>
39#include "psci_private.h"
40
41/*******************************************************************************
42 * Per cpu non-secure contexts used to program the architectural state prior
43 * return to the normal world.
44 * TODO: Use the memory allocator to set aside memory for the contexts instead
Soby Mathew82dcc032015-04-08 17:42:06 +010045 * of relying on platform defined constants.
Soby Mathewb48349e2015-06-29 16:30:12 +010046 ******************************************************************************/
47static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
48
Soby Mathewb48349e2015-06-29 16:30:12 +010049/******************************************************************************
50 * Define the psci capability variable.
51 *****************************************************************************/
52uint32_t psci_caps;
53
Soby Mathewb48349e2015-06-29 16:30:12 +010054/*******************************************************************************
Soby Mathew82dcc032015-04-08 17:42:06 +010055 * Function which initializes the 'psci_non_cpu_pd_nodes' or the
56 * 'psci_cpu_pd_nodes' corresponding to the power level.
Soby Mathewb48349e2015-06-29 16:30:12 +010057 ******************************************************************************/
Soby Mathew82dcc032015-04-08 17:42:06 +010058static void psci_init_pwr_domain_node(int node_idx, int parent_idx, int level)
Soby Mathewb48349e2015-06-29 16:30:12 +010059{
Soby Mathew82dcc032015-04-08 17:42:06 +010060 if (level > PSCI_CPU_PWR_LVL) {
61 psci_non_cpu_pd_nodes[node_idx].level = level;
62 psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
63 psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
Soby Mathew8ee24982015-04-07 12:16:56 +010064 psci_non_cpu_pd_nodes[node_idx].local_state =
65 PLAT_MAX_OFF_STATE;
Soby Mathewb48349e2015-06-29 16:30:12 +010066 } else {
Soby Mathew8ee24982015-04-07 12:16:56 +010067 psci_cpu_data_t *svc_cpu_data;
Soby Mathewb48349e2015-06-29 16:30:12 +010068
Soby Mathew82dcc032015-04-08 17:42:06 +010069 psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
70
71 /* Initialize with an invalid mpidr */
72 psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
73
Soby Mathew8ee24982015-04-07 12:16:56 +010074 svc_cpu_data =
75 &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
Soby Mathew82dcc032015-04-08 17:42:06 +010076
Soby Mathew8ee24982015-04-07 12:16:56 +010077 /* Set the Affinity Info for the cores as OFF */
78 svc_cpu_data->aff_info_state = AFF_STATE_OFF;
Soby Mathew82dcc032015-04-08 17:42:06 +010079
Soby Mathew8ee24982015-04-07 12:16:56 +010080 /* Invalidate the suspend level for the cpu */
81 svc_cpu_data->target_pwrlvl = PSCI_INVALID_DATA;
82
83 /* Set the power state to OFF state */
84 svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
85
86 flush_dcache_range((uint64_t)svc_cpu_data,
87 sizeof(*svc_cpu_data));
Soby Mathew82dcc032015-04-08 17:42:06 +010088
89 cm_set_context_by_index(node_idx,
90 (void *) &psci_ns_context[node_idx],
91 NON_SECURE);
Soby Mathewb48349e2015-06-29 16:30:12 +010092 }
Soby Mathewb48349e2015-06-29 16:30:12 +010093}
94
95/*******************************************************************************
Soby Mathew82dcc032015-04-08 17:42:06 +010096 * This functions updates cpu_start_idx and ncpus field for each of the node in
97 * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
98 * the CPUs and check whether they match with the parent of the previous
99 * CPU. The basic assumption for this work is that children of the same parent
100 * are allocated adjacent indices. The platform should ensure this though proper
101 * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
102 * plat_my_core_pos() APIs.
103 *******************************************************************************/
104static void psci_update_pwrlvl_limits(void)
105{
106 int cpu_idx, j;
107 unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
108 unsigned int temp_index[PLAT_MAX_PWR_LVL];
109
110 for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
111 psci_get_parent_pwr_domain_nodes(cpu_idx,
112 PLAT_MAX_PWR_LVL,
113 temp_index);
114 for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
115 if (temp_index[j] != nodes_idx[j]) {
116 nodes_idx[j] = temp_index[j];
117 psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
118 = cpu_idx;
119 }
120 psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
121 }
122 }
123}
124
125/*******************************************************************************
126 * Core routine to populate the power domain tree. The tree descriptor passed by
127 * the platform is populated breadth-first and the first entry in the map
128 * informs the number of root power domains. The parent nodes of the root nodes
129 * will point to an invalid entry(-1).
130 ******************************************************************************/
131static void populate_power_domain_tree(const unsigned char *topology)
132{
133 unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
134 unsigned int node_index = 0, parent_node_index = 0, num_children;
135 int level = PLAT_MAX_PWR_LVL;
136
137 /*
138 * For each level the inputs are:
139 * - number of nodes at this level in plat_array i.e. num_nodes_at_level
140 * This is the sum of values of nodes at the parent level.
141 * - Index of first entry at this level in the plat_array i.e.
142 * parent_node_index.
143 * - Index of first free entry in psci_non_cpu_pd_nodes[] or
144 * psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
145 */
146 while (level >= PSCI_CPU_PWR_LVL) {
147 num_nodes_at_next_lvl = 0;
148 /*
149 * For each entry (parent node) at this level in the plat_array:
150 * - Find the number of children
151 * - Allocate a node in a power domain array for each child
152 * - Set the parent of the child to the parent_node_index - 1
153 * - Increment parent_node_index to point to the next parent
154 * - Accumulate the number of children at next level.
155 */
156 for (i = 0; i < num_nodes_at_lvl; i++) {
157 assert(parent_node_index <=
158 PSCI_NUM_NON_CPU_PWR_DOMAINS);
159 num_children = topology[parent_node_index];
160
161 for (j = node_index;
162 j < node_index + num_children; j++)
163 psci_init_pwr_domain_node(j,
164 parent_node_index - 1,
165 level);
166
167 node_index = j;
168 num_nodes_at_next_lvl += num_children;
169 parent_node_index++;
170 }
171
172 num_nodes_at_lvl = num_nodes_at_next_lvl;
173 level--;
174
175 /* Reset the index for the cpu power domain array */
176 if (level == PSCI_CPU_PWR_LVL)
177 node_index = 0;
178 }
179
180 /* Validate the sanity of array exported by the platform */
181 assert(j == PLATFORM_CORE_COUNT);
182
183#if !USE_COHERENT_MEM
184 /* Flush the non CPU power domain data to memory */
185 flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes,
186 sizeof(psci_non_cpu_pd_nodes));
187#endif
188}
189
190/*******************************************************************************
191 * This function initializes the power domain topology tree by querying the
192 * platform. The power domain nodes higher than the CPU are populated in the
193 * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
194 * psci_cpu_pd_nodes[]. The platform exports its static topology map through the
195 * populate_power_domain_topology_tree() API. The algorithm populates the
196 * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
197 * topology map. On a platform that implements two clusters of 2 cpus each, and
198 * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
199 * like this:
Soby Mathewb48349e2015-06-29 16:30:12 +0100200 *
Soby Mathewb48349e2015-06-29 16:30:12 +0100201 * ---------------------------------------------------
Soby Mathew82dcc032015-04-08 17:42:06 +0100202 * | system node | cluster 0 node | cluster 1 node |
Soby Mathewb48349e2015-06-29 16:30:12 +0100203 * ---------------------------------------------------
Soby Mathewb48349e2015-06-29 16:30:12 +0100204 *
Soby Mathew82dcc032015-04-08 17:42:06 +0100205 * And populated psci_cpu_pd_nodes would look like this :
206 * <- cpus cluster0 -><- cpus cluster1 ->
207 * ------------------------------------------------
208 * | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
209 * ------------------------------------------------
Soby Mathewb48349e2015-06-29 16:30:12 +0100210 ******************************************************************************/
211int32_t psci_setup(void)
212{
Soby Mathew82dcc032015-04-08 17:42:06 +0100213 const unsigned char *topology_tree;
Soby Mathewb48349e2015-06-29 16:30:12 +0100214
Soby Mathew82dcc032015-04-08 17:42:06 +0100215 /* Query the topology map from the platform */
216 topology_tree = plat_get_power_domain_tree_desc();
Soby Mathewb48349e2015-06-29 16:30:12 +0100217
Soby Mathew82dcc032015-04-08 17:42:06 +0100218 /* Populate the power domain arrays using the platform topology map */
219 populate_power_domain_tree(topology_tree);
Soby Mathewb48349e2015-06-29 16:30:12 +0100220
Soby Mathew82dcc032015-04-08 17:42:06 +0100221 /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
222 psci_update_pwrlvl_limits();
223
224 /* Populate the mpidr field of cpu node for this CPU */
225 psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
226 read_mpidr() & MPIDR_AFFINITY_MASK;
Soby Mathewb48349e2015-06-29 16:30:12 +0100227
228#if !USE_COHERENT_MEM
229 /*
Soby Mathew82dcc032015-04-08 17:42:06 +0100230 * The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in
231 * coherent memory.
Soby Mathewb48349e2015-06-29 16:30:12 +0100232 */
Soby Mathew82dcc032015-04-08 17:42:06 +0100233 flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes,
234 sizeof(psci_non_cpu_pd_nodes));
Soby Mathewb48349e2015-06-29 16:30:12 +0100235#endif
236
Soby Mathew82dcc032015-04-08 17:42:06 +0100237 flush_dcache_range((uint64_t) &psci_cpu_pd_nodes,
238 sizeof(psci_cpu_pd_nodes));
Soby Mathewb48349e2015-06-29 16:30:12 +0100239
Soby Mathew8ee24982015-04-07 12:16:56 +0100240 psci_init_req_local_pwr_states();
Soby Mathewb48349e2015-06-29 16:30:12 +0100241
Soby Mathew8ee24982015-04-07 12:16:56 +0100242 /*
243 * Set the requested and target state of this CPU and all the higher
244 * power domain levels for this CPU to run.
245 */
246 psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
247
Sandrine Bailleuxeb975f52015-06-11 10:46:48 +0100248 plat_setup_psci_ops((uintptr_t)psci_entrypoint,
249 &psci_plat_pm_ops);
Soby Mathewb48349e2015-06-29 16:30:12 +0100250 assert(psci_plat_pm_ops);
251
252 /* Initialize the psci capability */
253 psci_caps = PSCI_GENERIC_CAP;
254
Soby Mathew4067dc32015-05-05 16:33:16 +0100255 if (psci_plat_pm_ops->pwr_domain_off)
Soby Mathewb48349e2015-06-29 16:30:12 +0100256 psci_caps |= define_psci_cap(PSCI_CPU_OFF);
Soby Mathew4067dc32015-05-05 16:33:16 +0100257 if (psci_plat_pm_ops->pwr_domain_on &&
258 psci_plat_pm_ops->pwr_domain_on_finish)
Soby Mathewb48349e2015-06-29 16:30:12 +0100259 psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
Soby Mathew4067dc32015-05-05 16:33:16 +0100260 if (psci_plat_pm_ops->pwr_domain_suspend &&
261 psci_plat_pm_ops->pwr_domain_suspend_finish) {
Soby Mathewb48349e2015-06-29 16:30:12 +0100262 psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
263 if (psci_plat_pm_ops->get_sys_suspend_power_state)
264 psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
265 }
266 if (psci_plat_pm_ops->system_off)
267 psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF);
268 if (psci_plat_pm_ops->system_reset)
269 psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET);
270
271 return 0;
272}