blob: a04f8e792ef8010208fbd3eef2433e6aa7f5631e [file] [log] [blame]
Soby Mathewb48349e2015-06-29 16:30:12 +01001/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <bl_common.h>
35#include <context.h>
36#include <context_mgmt.h>
37#include <platform.h>
38#include <stddef.h>
39#include "psci_private.h"
40
41/*******************************************************************************
42 * Per cpu non-secure contexts used to program the architectural state prior
43 * return to the normal world.
44 * TODO: Use the memory allocator to set aside memory for the contexts instead
45 * of relying on platform defined constants. Using PSCI_NUM_AFFS will be an
46 * overkill.
47 ******************************************************************************/
48static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
49
50/*******************************************************************************
51 * In a system, a certain number of affinity instances are present at an
52 * affinity level. The cumulative number of instances across all levels are
53 * stored in 'psci_aff_map'. The topology tree has been flattenned into this
54 * array. To retrieve nodes, information about the extents of each affinity
55 * level i.e. start index and end index needs to be present. 'psci_aff_limits'
56 * stores this information.
57 ******************************************************************************/
58aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
59
60/******************************************************************************
61 * Define the psci capability variable.
62 *****************************************************************************/
63uint32_t psci_caps;
64
65
66/*******************************************************************************
67 * Routines for retrieving the node corresponding to an affinity level instance
68 * in the mpidr. The first one uses binary search to find the node corresponding
69 * to the mpidr (key) at a particular affinity level. The second routine decides
70 * extents of the binary search at each affinity level.
71 ******************************************************************************/
72static int psci_aff_map_get_idx(unsigned long key,
73 int min_idx,
74 int max_idx)
75{
76 int mid;
77
78 /*
79 * Terminating condition: If the max and min indices have crossed paths
80 * during the binary search then the key has not been found.
81 */
82 if (max_idx < min_idx)
83 return PSCI_E_INVALID_PARAMS;
84
85 /*
86 * Make sure we are within array limits.
87 */
88 assert(min_idx >= 0 && max_idx < PSCI_NUM_AFFS);
89
90 /*
91 * Bisect the array around 'mid' and then recurse into the array chunk
92 * where the key is likely to be found. The mpidrs in each node in the
93 * 'psci_aff_map' for a given affinity level are stored in an ascending
94 * order which makes the binary search possible.
95 */
96 mid = min_idx + ((max_idx - min_idx) >> 1); /* Divide by 2 */
97
98 if (psci_aff_map[mid].mpidr > key)
99 return psci_aff_map_get_idx(key, min_idx, mid - 1);
100 else if (psci_aff_map[mid].mpidr < key)
101 return psci_aff_map_get_idx(key, mid + 1, max_idx);
102 else
103 return mid;
104}
105
106aff_map_node_t *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl)
107{
108 int rc;
109
110 if (aff_lvl > PLATFORM_MAX_AFFLVL)
111 return NULL;
112
113 /* Right shift the mpidr to the required affinity level */
114 mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl);
115
116 rc = psci_aff_map_get_idx(mpidr,
117 psci_aff_limits[aff_lvl].min,
118 psci_aff_limits[aff_lvl].max);
119 if (rc >= 0)
120 return &psci_aff_map[rc];
121 else
122 return NULL;
123}
124
125/*******************************************************************************
126 * This function populates an array with nodes corresponding to a given range of
127 * affinity levels in an mpidr. It returns successfully only when the affinity
128 * levels are correct, the mpidr is valid i.e. no affinity level is absent from
129 * the topology tree & the affinity instance at level 0 is not absent.
130 ******************************************************************************/
131int psci_get_aff_map_nodes(unsigned long mpidr,
132 int start_afflvl,
133 int end_afflvl,
134 aff_map_node_t *mpidr_nodes[])
135{
136 int rc = PSCI_E_INVALID_PARAMS, level;
137 aff_map_node_t *node;
138
139 rc = psci_check_afflvl_range(start_afflvl, end_afflvl);
140 if (rc != PSCI_E_SUCCESS)
141 return rc;
142
143 for (level = start_afflvl; level <= end_afflvl; level++) {
144
145 /*
146 * Grab the node for each affinity level. No affinity level
147 * can be missing as that would mean that the topology tree
148 * is corrupted.
149 */
150 node = psci_get_aff_map_node(mpidr, level);
151 if (node == NULL) {
152 rc = PSCI_E_INVALID_PARAMS;
153 break;
154 }
155
156 /*
157 * Skip absent affinity levels unless it's afffinity level 0.
158 * An absent cpu means that the mpidr is invalid. Save the
159 * pointer to the node for the present affinity level
160 */
161 if (!(node->state & PSCI_AFF_PRESENT)) {
162 if (level == MPIDR_AFFLVL0) {
163 rc = PSCI_E_INVALID_PARAMS;
164 break;
165 }
166
167 mpidr_nodes[level] = NULL;
168 } else
169 mpidr_nodes[level] = node;
170 }
171
172 return rc;
173}
174
175/*******************************************************************************
176 * Function which initializes the 'aff_map_node' corresponding to an affinity
177 * level instance. Each node has a unique mpidr, level and bakery lock. The data
178 * field is opaque and holds affinity level specific data e.g. for affinity
179 * level 0 it contains the index into arrays that hold the secure/non-secure
180 * state for a cpu that's been turned on/off
181 ******************************************************************************/
182static void psci_init_aff_map_node(unsigned long mpidr,
183 int level,
184 unsigned int idx)
185{
186 unsigned char state;
187 uint32_t linear_id;
188 psci_aff_map[idx].mpidr = mpidr;
189 psci_aff_map[idx].level = level;
190 psci_lock_init(psci_aff_map, idx);
191
192 /*
193 * If an affinity instance is present then mark it as OFF to begin with.
194 */
195 state = plat_get_aff_state(level, mpidr);
196 psci_aff_map[idx].state = state;
197
198 if (level == MPIDR_AFFLVL0) {
199
200 /*
201 * Mark the cpu as OFF. Higher affinity level reference counts
202 * have already been memset to 0
203 */
204 if (state & PSCI_AFF_PRESENT)
205 psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF);
206
207 /*
208 * Associate a non-secure context with this affinity
209 * instance through the context management library.
210 */
211 linear_id = platform_get_core_pos(mpidr);
212 assert(linear_id < PLATFORM_CORE_COUNT);
213
214 /* Invalidate the suspend context for the node */
215 set_cpu_data_by_index(linear_id,
216 psci_svc_cpu_data.power_state,
217 PSCI_INVALID_DATA);
218
Soby Mathewb48349e2015-06-29 16:30:12 +0100219 flush_cpu_data_by_index(linear_id, psci_svc_cpu_data);
220
221 cm_set_context_by_mpidr(mpidr,
222 (void *) &psci_ns_context[linear_id],
223 NON_SECURE);
224 }
225
226 return;
227}
228
229/*******************************************************************************
230 * Core routine used by the Breadth-First-Search algorithm to populate the
231 * affinity tree. Each level in the tree corresponds to an affinity level. This
232 * routine's aim is to traverse to the target affinity level and populate nodes
233 * in the 'psci_aff_map' for all the siblings at that level. It uses the current
234 * affinity level to keep track of how many levels from the root of the tree
235 * have been traversed. If the current affinity level != target affinity level,
236 * then the platform is asked to return the number of children that each
237 * affinity instance has at the current affinity level. Traversal is then done
238 * for each child at the next lower level i.e. current affinity level - 1.
239 *
240 * CAUTION: This routine assumes that affinity instance ids are allocated in a
241 * monotonically increasing manner at each affinity level in a mpidr starting
242 * from 0. If the platform breaks this assumption then this code will have to
243 * be reworked accordingly.
244 ******************************************************************************/
245static unsigned int psci_init_aff_map(unsigned long mpidr,
246 unsigned int affmap_idx,
247 int cur_afflvl,
248 int tgt_afflvl)
249{
250 unsigned int ctr, aff_count;
251
252 assert(cur_afflvl >= tgt_afflvl);
253
254 /*
255 * Find the number of siblings at the current affinity level &
256 * assert if there are none 'cause then we have been invoked with
257 * an invalid mpidr.
258 */
259 aff_count = plat_get_aff_count(cur_afflvl, mpidr);
260 assert(aff_count);
261
262 if (tgt_afflvl < cur_afflvl) {
263 for (ctr = 0; ctr < aff_count; ctr++) {
264 mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
265 affmap_idx = psci_init_aff_map(mpidr,
266 affmap_idx,
267 cur_afflvl - 1,
268 tgt_afflvl);
269 }
270 } else {
271 for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) {
272 mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
273 psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx);
274 }
275
276 /* affmap_idx is 1 greater than the max index of cur_afflvl */
277 psci_aff_limits[cur_afflvl].max = affmap_idx - 1;
278 }
279
280 return affmap_idx;
281}
282
283/*******************************************************************************
284 * This function initializes the topology tree by querying the platform. To do
285 * so, it's helper routines implement a Breadth-First-Search. At each affinity
286 * level the platform conveys the number of affinity instances that exist i.e.
287 * the affinity count. The algorithm populates the psci_aff_map recursively
288 * using this information. On a platform that implements two clusters of 4 cpus
289 * each, the populated aff_map_array would look like this:
290 *
291 * <- cpus cluster0 -><- cpus cluster1 ->
292 * ---------------------------------------------------
293 * | 0 | 1 | 0 | 1 | 2 | 3 | 0 | 1 | 2 | 3 |
294 * ---------------------------------------------------
295 * ^ ^
296 * cluster __| cpu __|
297 * limit limit
298 *
299 * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus
300 * within cluster 0. The last 4 entries are of cpus within cluster 1.
301 * The 'psci_aff_limits' array contains the max & min index of each affinity
302 * level within the 'psci_aff_map' array. This allows restricting search of a
303 * node at an affinity level between the indices in the limits array.
304 ******************************************************************************/
305int32_t psci_setup(void)
306{
307 unsigned long mpidr = read_mpidr();
308 int afflvl, affmap_idx, max_afflvl;
309 aff_map_node_t *node;
310
311 psci_plat_pm_ops = NULL;
312
313 /* Find out the maximum affinity level that the platform implements */
314 max_afflvl = PLATFORM_MAX_AFFLVL;
315 assert(max_afflvl <= MPIDR_MAX_AFFLVL);
316
317 /*
318 * This call traverses the topology tree with help from the platform and
319 * populates the affinity map using a breadth-first-search recursively.
320 * We assume that the platform allocates affinity instance ids from 0
321 * onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
322 */
323 affmap_idx = 0;
324 for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) {
325 affmap_idx = psci_init_aff_map(FIRST_MPIDR,
326 affmap_idx,
327 max_afflvl,
328 afflvl);
329 }
330
331#if !USE_COHERENT_MEM
332 /*
333 * The psci_aff_map only needs flushing when it's not allocated in
334 * coherent memory.
335 */
336 flush_dcache_range((uint64_t) &psci_aff_map, sizeof(psci_aff_map));
337#endif
338
339 /*
340 * Set the bounds for the affinity counts of each level in the map. Also
341 * flush out the entire array so that it's visible to subsequent power
342 * management operations. The 'psci_aff_limits' array is allocated in
343 * normal memory. It will be accessed when the mmu is off e.g. after
344 * reset. Hence it needs to be flushed.
345 */
346 for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) {
347 psci_aff_limits[afflvl].min =
348 psci_aff_limits[afflvl + 1].max + 1;
349 }
350
351 flush_dcache_range((unsigned long) psci_aff_limits,
352 sizeof(psci_aff_limits));
353
354 /*
355 * Mark the affinity instances in our mpidr as ON. No need to lock as
356 * this is the primary cpu.
357 */
358 mpidr &= MPIDR_AFFINITY_MASK;
359 for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) {
360
361 node = psci_get_aff_map_node(mpidr, afflvl);
362 assert(node);
363
364 /* Mark each present node as ON. */
365 if (node->state & PSCI_AFF_PRESENT)
366 psci_set_state(node, PSCI_STATE_ON);
367 }
368
369 platform_setup_pm(&psci_plat_pm_ops);
370 assert(psci_plat_pm_ops);
371
372 /* Initialize the psci capability */
373 psci_caps = PSCI_GENERIC_CAP;
374
375 if (psci_plat_pm_ops->affinst_off)
376 psci_caps |= define_psci_cap(PSCI_CPU_OFF);
377 if (psci_plat_pm_ops->affinst_on && psci_plat_pm_ops->affinst_on_finish)
378 psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
379 if (psci_plat_pm_ops->affinst_suspend &&
380 psci_plat_pm_ops->affinst_suspend_finish) {
381 psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
382 if (psci_plat_pm_ops->get_sys_suspend_power_state)
383 psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
384 }
385 if (psci_plat_pm_ops->system_off)
386 psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF);
387 if (psci_plat_pm_ops->system_reset)
388 psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET);
389
390 return 0;
391}