blob: a7920c37b039c580664b73f8ab8455179672cf0a [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
2 * Copyright (c) 2018, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <assert.h>
9#include <debug.h>
10#include <plat_topology.h>
11#include <platform.h>
12#include <stdlib.h>
13
14#define CPU_INDEX_IS_VALID(_cpu_idx) \
15 (((_cpu_idx) - tftf_pwr_domain_start_idx[0]) < PLATFORM_CORE_COUNT)
16
17#define IS_A_CPU_NODE(_cpu_idx) (tftf_pd_nodes[(_cpu_idx)].level == 0)
18
19#define CPU_NODE_IS_VALID(_cpu_node) \
20 (CPU_INDEX_IS_VALID(_cpu_node) && IS_A_CPU_NODE(_cpu_node))
21
22/*
23 * Global variable to check that the platform topology is not queried until it
24 * has been setup.
25 */
26static unsigned int topology_setup_done;
27
28/*
29 * Store the start indices of power domains at various levels. This array makes it
30 * easier to traverse the topology tree if the power domain level is known.
31 */
32unsigned int tftf_pwr_domain_start_idx[PLATFORM_MAX_AFFLVL + 1];
33
34/* The grand array to store the platform power domain topology */
35tftf_pwr_domain_node_t tftf_pd_nodes[PLATFORM_NUM_AFFS];
36
37#if DEBUG
38/*
39 * Debug function to display the platform topology.
40 * Does not print absent affinity instances.
41 */
42static void dump_topology(void)
43{
44 unsigned int cluster_idx, cpu_idx, count;
45
46 NOTICE("Platform topology:\n");
47
48 NOTICE(" %u cluster(s)\n", tftf_get_total_clusters_count());
49 NOTICE(" %u CPU(s) (total)\n\n", tftf_get_total_cpus_count());
50
51 for (cluster_idx = PWR_DOMAIN_INIT;
52 cluster_idx = tftf_get_next_peer_domain(cluster_idx, 1),
53 cluster_idx != PWR_DOMAIN_INIT;) {
54 count = 0;
55 for (cpu_idx = tftf_pd_nodes[cluster_idx].cpu_start_node;
56 cpu_idx < (tftf_pd_nodes[cluster_idx].cpu_start_node +
57 tftf_pd_nodes[cluster_idx].ncpus);
58 cpu_idx++) {
59 if (tftf_pd_nodes[cpu_idx].is_present)
60 count++;
61 }
62 NOTICE(" Cluster #%u [%u CPUs]\n",
63 cluster_idx - tftf_pwr_domain_start_idx[1],
64 count);
65 for (cpu_idx = PWR_DOMAIN_INIT;
66 cpu_idx = tftf_get_next_cpu_in_pwr_domain(cluster_idx, cpu_idx),
67 cpu_idx != PWR_DOMAIN_INIT;) {
68 NOTICE(" CPU #%u [MPID: 0x%x]\n",
69 cpu_idx - tftf_pwr_domain_start_idx[0],
70 tftf_get_mpidr_from_node(cpu_idx));
71 }
72 }
73 NOTICE("\n");
74}
75#endif
76
77unsigned int tftf_get_total_aff_count(unsigned int aff_lvl)
78{
79 unsigned int count = 0;
80 unsigned int node_idx;
81
82 assert(topology_setup_done == 1);
83
84 if (aff_lvl > PLATFORM_MAX_AFFLVL)
85 return count;
86
87 node_idx = tftf_pwr_domain_start_idx[aff_lvl];
88
89 while (tftf_pd_nodes[node_idx].level == aff_lvl) {
90 if (tftf_pd_nodes[node_idx].is_present)
91 count++;
92 node_idx++;
93 }
94
95 return count;
96}
97
98unsigned int tftf_get_next_peer_domain(unsigned int pwr_domain_idx,
99 unsigned int pwr_lvl)
100{
101 assert(topology_setup_done == 1);
102
103 assert(pwr_lvl <= PLATFORM_MAX_AFFLVL);
104
105 if (pwr_domain_idx == PWR_DOMAIN_INIT) {
106 pwr_domain_idx = tftf_pwr_domain_start_idx[pwr_lvl];
107 if (tftf_pd_nodes[pwr_domain_idx].is_present)
108 return pwr_domain_idx;
109 }
110
111 assert(pwr_domain_idx < PLATFORM_NUM_AFFS &&
112 tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
113
114 for (++pwr_domain_idx; (pwr_domain_idx < PLATFORM_NUM_AFFS)
115 && (tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
116 pwr_domain_idx++) {
117 if (tftf_pd_nodes[pwr_domain_idx].is_present)
118 return pwr_domain_idx;
119 }
120
121 return PWR_DOMAIN_INIT;
122}
123
124unsigned int tftf_get_next_cpu_in_pwr_domain(unsigned int pwr_domain_idx,
125 unsigned int cpu_node)
126{
127 unsigned int cpu_end_node;
128
129 assert(topology_setup_done == 1);
130 assert(pwr_domain_idx != PWR_DOMAIN_INIT
131 && pwr_domain_idx < PLATFORM_NUM_AFFS);
132
133 if (cpu_node == PWR_DOMAIN_INIT) {
134 cpu_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node;
135 if (tftf_pd_nodes[cpu_node].is_present)
136 return cpu_node;
137 }
138
139 assert(CPU_NODE_IS_VALID(cpu_node));
140
141 cpu_end_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node
142 + tftf_pd_nodes[pwr_domain_idx].ncpus - 1;
143
144 assert(cpu_end_node < PLATFORM_NUM_AFFS);
145
146 for (++cpu_node; cpu_node <= cpu_end_node; cpu_node++) {
147 if (tftf_pd_nodes[cpu_node].is_present)
148 return cpu_node;
149 }
150
151 return PWR_DOMAIN_INIT;
152}
153
154/*
155 * Helper function to get the parent nodes of a particular CPU power
156 * domain.
157 */
158static void get_parent_pwr_domain_nodes(unsigned int cpu_node,
159 unsigned int end_lvl,
160 unsigned int node_index[])
161{
162 unsigned int parent_node = tftf_pd_nodes[cpu_node].parent_node;
163 unsigned int i;
164
165 for (i = 1; i <= end_lvl; i++) {
166 node_index[i - 1] = parent_node;
167 parent_node = tftf_pd_nodes[parent_node].parent_node;
168 }
169}
170
171/*******************************************************************************
172 * This function updates cpu_start_node and ncpus field for each of the nodes
173 * in tftf_pd_nodes[]. It does so by comparing the parent nodes of each of
174 * the CPUs and check whether they match with the parent of the previous
175 * CPU. The basic assumption for this work is that children of the same parent
176 * are allocated adjacent indices. The platform should ensure this through
177 * proper mapping of the CPUs to indices via platform_get_core_pos() API.
178 *
179 * It also updates the 'is_present' field for non-cpu power domains. It does
180 * this by checking the 'is_present' field of the child cpu nodes and updates
181 * it if any of the child cpu nodes are present.
182 *******************************************************************************/
183static void update_pwrlvl_limits(void)
184{
185 int cpu_id, j, is_present;
186 unsigned int nodes_idx[PLATFORM_MAX_AFFLVL] = {-1};
187 unsigned int temp_index[PLATFORM_MAX_AFFLVL];
188
189 unsigned int cpu_node_offset = tftf_pwr_domain_start_idx[0];
190
191 for (cpu_id = 0; cpu_id < PLATFORM_CORE_COUNT; cpu_id++) {
192 get_parent_pwr_domain_nodes(cpu_id + cpu_node_offset,
193 PLATFORM_MAX_AFFLVL,
194 temp_index);
195 is_present = tftf_pd_nodes[cpu_id + cpu_node_offset].is_present;
196
197 for (j = PLATFORM_MAX_AFFLVL - 1; j >= 0; j--) {
198 if (temp_index[j] != nodes_idx[j]) {
199 nodes_idx[j] = temp_index[j];
200 tftf_pd_nodes[nodes_idx[j]].cpu_start_node
201 = cpu_id + cpu_node_offset;
202 if (!tftf_pd_nodes[nodes_idx[j]].is_present)
203 tftf_pd_nodes[nodes_idx[j]].is_present = is_present;
204 }
205 tftf_pd_nodes[nodes_idx[j]].ncpus++;
206 }
207 }
208}
209
210/******************************************************************************
211 * This function populates the power domain topology array 'tftf_pd_nodes[]'
212 * based on the power domain description retrieved from the platform layer.
213 * It also updates the start index of each power domain level in
214 * tftf_pwr_domain_start_idx[]. The uninitialized fields of 'tftf_pd_nodes[]'
215 * for the non CPU power domain will be initialized in update_pwrlvl_limits().
216 *****************************************************************************/
217static void populate_power_domain_tree(void)
218{
219 unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl,
220 node_index = 0, parent_idx = 0, num_children;
221 int num_level = PLATFORM_MAX_AFFLVL;
222 const unsigned char *plat_array;
223
224 plat_array = tftf_plat_get_pwr_domain_tree_desc();
225
226 /*
227 * For each level the inputs are:
228 * - number of nodes at this level in plat_array i.e. num_nodes_at_lvl
229 * This is the sum of values of nodes at the parent level.
230 * - Index of first entry at this level in the plat_array i.e.
231 * parent_idx.
232 * - Index of first free entry in tftf_pd_nodes[].
233 */
234 while (num_level >= 0) {
235 num_nodes_at_next_lvl = 0;
236
237 /* Store the start index for every level */
238 tftf_pwr_domain_start_idx[num_level] = node_index;
239
240 /*
241 * For each entry (parent node) at this level in the plat_array:
242 * - Find the number of children
243 * - Allocate a node in a power domain array for each child
244 * - Set the parent of the child to the parent_node_index - 1
245 * - Increment parent_node_index to point to the next parent
246 * - Accumulate the number of children at next level.
247 */
248 for (i = 0; i < num_nodes_at_lvl; i++) {
249 assert(parent_idx <=
250 PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT);
251 num_children = plat_array[parent_idx];
252
253 for (j = node_index;
254 j < node_index + num_children; j++) {
255 /* Initialize the power domain node */
256 tftf_pd_nodes[j].parent_node = parent_idx - 1;
257 tftf_pd_nodes[j].level = num_level;
258
259 /* Additional initializations for CPU power domains */
260 if (num_level == 0) {
261 /* Calculate the cpu id from node index */
262 int cpu_id = j - tftf_pwr_domain_start_idx[0];
263
264 assert(cpu_id < PLATFORM_CORE_COUNT);
265
266 /* Set the mpidr of cpu node */
267 tftf_pd_nodes[j].mpidr =
268 tftf_plat_get_mpidr(cpu_id);
269 if (tftf_pd_nodes[j].mpidr != INVALID_MPID)
270 tftf_pd_nodes[j].is_present = 1;
271
272 tftf_pd_nodes[j].cpu_start_node = j;
273 tftf_pd_nodes[j].ncpus = 1;
274 }
275 }
276 node_index = j;
277 num_nodes_at_next_lvl += num_children;
278 parent_idx++;
279 }
280
281 num_nodes_at_lvl = num_nodes_at_next_lvl;
282 num_level--;
283 }
284
285 /* Validate the sanity of array exported by the platform */
286 assert(j == PLATFORM_NUM_AFFS);
287}
288
289
290void tftf_init_topology(void)
291{
292 populate_power_domain_tree();
293 update_pwrlvl_limits();
294 topology_setup_done = 1;
295#if DEBUG
296 dump_topology();
297#endif
298}
299
300unsigned int tftf_topology_next_cpu(unsigned int cpu_node)
301{
302 assert(topology_setup_done == 1);
303
304 if (cpu_node == PWR_DOMAIN_INIT) {
305 cpu_node = tftf_pwr_domain_start_idx[0];
306 if (tftf_pd_nodes[cpu_node].is_present)
307 return cpu_node;
308 }
309
310 assert(CPU_NODE_IS_VALID(cpu_node));
311
312 for (++cpu_node; cpu_node < PLATFORM_NUM_AFFS; cpu_node++) {
313 if (tftf_pd_nodes[cpu_node].is_present)
314 return cpu_node;
315 }
316
317 return PWR_DOMAIN_INIT;
318}
319
320
321unsigned int tftf_get_mpidr_from_node(unsigned int cpu_node)
322{
323 assert(topology_setup_done == 1);
324
325 assert(CPU_NODE_IS_VALID(cpu_node));
326
327 if (tftf_pd_nodes[cpu_node].is_present)
328 return tftf_pd_nodes[cpu_node].mpidr;
329
330 return INVALID_MPID;
331}
332
333unsigned int tftf_find_any_cpu_other_than(unsigned exclude_mpid)
334{
335 unsigned int cpu_node, mpidr;
336
337 for_each_cpu(cpu_node) {
338 mpidr = tftf_get_mpidr_from_node(cpu_node);
339 if (mpidr != exclude_mpid)
340 return mpidr;
341 }
342
343 return INVALID_MPID;
344}
345
346unsigned int tftf_find_random_cpu_other_than(unsigned int exclude_mpid)
347{
348 unsigned int cpu_node, mpidr;
349 unsigned int possible_cpus_cnt = 0;
350 unsigned int possible_cpus[PLATFORM_CORE_COUNT];
351
352 for_each_cpu(cpu_node) {
353 mpidr = tftf_get_mpidr_from_node(cpu_node);
354 if (mpidr != exclude_mpid)
355 possible_cpus[possible_cpus_cnt++] = mpidr;
356 }
357
358 if (possible_cpus_cnt == 0)
359 return INVALID_MPID;
360
361 return possible_cpus[rand() % possible_cpus_cnt];
362}