blob: 33b6e57cc3b69577f9906a18b1a6ed666cfd4302 [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
Alexei Fedorov8dec8452019-07-16 09:34:24 +01002 * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <assert.h>
9#include <debug.h>
10#include <plat_topology.h>
11#include <platform.h>
12#include <stdlib.h>
13
14#define CPU_INDEX_IS_VALID(_cpu_idx) \
15 (((_cpu_idx) - tftf_pwr_domain_start_idx[0]) < PLATFORM_CORE_COUNT)
16
17#define IS_A_CPU_NODE(_cpu_idx) (tftf_pd_nodes[(_cpu_idx)].level == 0)
18
19#define CPU_NODE_IS_VALID(_cpu_node) \
20 (CPU_INDEX_IS_VALID(_cpu_node) && IS_A_CPU_NODE(_cpu_node))
21
22/*
23 * Global variable to check that the platform topology is not queried until it
24 * has been setup.
25 */
26static unsigned int topology_setup_done;
27
28/*
29 * Store the start indices of power domains at various levels. This array makes it
30 * easier to traverse the topology tree if the power domain level is known.
31 */
32unsigned int tftf_pwr_domain_start_idx[PLATFORM_MAX_AFFLVL + 1];
33
34/* The grand array to store the platform power domain topology */
35tftf_pwr_domain_node_t tftf_pd_nodes[PLATFORM_NUM_AFFS];
36
37#if DEBUG
38/*
39 * Debug function to display the platform topology.
40 * Does not print absent affinity instances.
41 */
42static void dump_topology(void)
43{
44 unsigned int cluster_idx, cpu_idx, count;
45
46 NOTICE("Platform topology:\n");
47
48 NOTICE(" %u cluster(s)\n", tftf_get_total_clusters_count());
49 NOTICE(" %u CPU(s) (total)\n\n", tftf_get_total_cpus_count());
50
51 for (cluster_idx = PWR_DOMAIN_INIT;
52 cluster_idx = tftf_get_next_peer_domain(cluster_idx, 1),
53 cluster_idx != PWR_DOMAIN_INIT;) {
54 count = 0;
55 for (cpu_idx = tftf_pd_nodes[cluster_idx].cpu_start_node;
56 cpu_idx < (tftf_pd_nodes[cluster_idx].cpu_start_node +
57 tftf_pd_nodes[cluster_idx].ncpus);
58 cpu_idx++) {
59 if (tftf_pd_nodes[cpu_idx].is_present)
60 count++;
61 }
62 NOTICE(" Cluster #%u [%u CPUs]\n",
63 cluster_idx - tftf_pwr_domain_start_idx[1],
64 count);
65 for (cpu_idx = PWR_DOMAIN_INIT;
66 cpu_idx = tftf_get_next_cpu_in_pwr_domain(cluster_idx, cpu_idx),
67 cpu_idx != PWR_DOMAIN_INIT;) {
68 NOTICE(" CPU #%u [MPID: 0x%x]\n",
69 cpu_idx - tftf_pwr_domain_start_idx[0],
70 tftf_get_mpidr_from_node(cpu_idx));
71 }
72 }
73 NOTICE("\n");
74}
75#endif
76
77unsigned int tftf_get_total_aff_count(unsigned int aff_lvl)
78{
79 unsigned int count = 0;
80 unsigned int node_idx;
81
82 assert(topology_setup_done == 1);
83
84 if (aff_lvl > PLATFORM_MAX_AFFLVL)
85 return count;
86
87 node_idx = tftf_pwr_domain_start_idx[aff_lvl];
88
Alexei Fedorov8dec8452019-07-16 09:34:24 +010089 while ((node_idx < PLATFORM_NUM_AFFS) &&
90 (tftf_pd_nodes[node_idx].level == aff_lvl)) {
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020091 if (tftf_pd_nodes[node_idx].is_present)
92 count++;
93 node_idx++;
94 }
95
96 return count;
97}
98
99unsigned int tftf_get_next_peer_domain(unsigned int pwr_domain_idx,
100 unsigned int pwr_lvl)
101{
102 assert(topology_setup_done == 1);
103
104 assert(pwr_lvl <= PLATFORM_MAX_AFFLVL);
105
106 if (pwr_domain_idx == PWR_DOMAIN_INIT) {
107 pwr_domain_idx = tftf_pwr_domain_start_idx[pwr_lvl];
108 if (tftf_pd_nodes[pwr_domain_idx].is_present)
109 return pwr_domain_idx;
110 }
111
112 assert(pwr_domain_idx < PLATFORM_NUM_AFFS &&
113 tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
114
115 for (++pwr_domain_idx; (pwr_domain_idx < PLATFORM_NUM_AFFS)
116 && (tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
117 pwr_domain_idx++) {
118 if (tftf_pd_nodes[pwr_domain_idx].is_present)
119 return pwr_domain_idx;
120 }
121
122 return PWR_DOMAIN_INIT;
123}
124
125unsigned int tftf_get_next_cpu_in_pwr_domain(unsigned int pwr_domain_idx,
126 unsigned int cpu_node)
127{
128 unsigned int cpu_end_node;
129
130 assert(topology_setup_done == 1);
131 assert(pwr_domain_idx != PWR_DOMAIN_INIT
132 && pwr_domain_idx < PLATFORM_NUM_AFFS);
133
134 if (cpu_node == PWR_DOMAIN_INIT) {
135 cpu_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node;
136 if (tftf_pd_nodes[cpu_node].is_present)
137 return cpu_node;
138 }
139
140 assert(CPU_NODE_IS_VALID(cpu_node));
141
142 cpu_end_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node
143 + tftf_pd_nodes[pwr_domain_idx].ncpus - 1;
144
145 assert(cpu_end_node < PLATFORM_NUM_AFFS);
146
147 for (++cpu_node; cpu_node <= cpu_end_node; cpu_node++) {
148 if (tftf_pd_nodes[cpu_node].is_present)
149 return cpu_node;
150 }
151
152 return PWR_DOMAIN_INIT;
153}
154
155/*
156 * Helper function to get the parent nodes of a particular CPU power
157 * domain.
158 */
159static void get_parent_pwr_domain_nodes(unsigned int cpu_node,
160 unsigned int end_lvl,
161 unsigned int node_index[])
162{
163 unsigned int parent_node = tftf_pd_nodes[cpu_node].parent_node;
164 unsigned int i;
165
166 for (i = 1; i <= end_lvl; i++) {
167 node_index[i - 1] = parent_node;
168 parent_node = tftf_pd_nodes[parent_node].parent_node;
169 }
170}
171
172/*******************************************************************************
173 * This function updates cpu_start_node and ncpus field for each of the nodes
174 * in tftf_pd_nodes[]. It does so by comparing the parent nodes of each of
175 * the CPUs and check whether they match with the parent of the previous
176 * CPU. The basic assumption for this work is that children of the same parent
177 * are allocated adjacent indices. The platform should ensure this through
178 * proper mapping of the CPUs to indices via platform_get_core_pos() API.
179 *
180 * It also updates the 'is_present' field for non-cpu power domains. It does
181 * this by checking the 'is_present' field of the child cpu nodes and updates
182 * it if any of the child cpu nodes are present.
183 *******************************************************************************/
184static void update_pwrlvl_limits(void)
185{
186 int cpu_id, j, is_present;
187 unsigned int nodes_idx[PLATFORM_MAX_AFFLVL] = {-1};
188 unsigned int temp_index[PLATFORM_MAX_AFFLVL];
189
190 unsigned int cpu_node_offset = tftf_pwr_domain_start_idx[0];
191
192 for (cpu_id = 0; cpu_id < PLATFORM_CORE_COUNT; cpu_id++) {
193 get_parent_pwr_domain_nodes(cpu_id + cpu_node_offset,
194 PLATFORM_MAX_AFFLVL,
195 temp_index);
196 is_present = tftf_pd_nodes[cpu_id + cpu_node_offset].is_present;
197
198 for (j = PLATFORM_MAX_AFFLVL - 1; j >= 0; j--) {
199 if (temp_index[j] != nodes_idx[j]) {
200 nodes_idx[j] = temp_index[j];
201 tftf_pd_nodes[nodes_idx[j]].cpu_start_node
202 = cpu_id + cpu_node_offset;
203 if (!tftf_pd_nodes[nodes_idx[j]].is_present)
204 tftf_pd_nodes[nodes_idx[j]].is_present = is_present;
205 }
206 tftf_pd_nodes[nodes_idx[j]].ncpus++;
207 }
208 }
209}
210
211/******************************************************************************
212 * This function populates the power domain topology array 'tftf_pd_nodes[]'
213 * based on the power domain description retrieved from the platform layer.
214 * It also updates the start index of each power domain level in
215 * tftf_pwr_domain_start_idx[]. The uninitialized fields of 'tftf_pd_nodes[]'
216 * for the non CPU power domain will be initialized in update_pwrlvl_limits().
217 *****************************************************************************/
218static void populate_power_domain_tree(void)
219{
220 unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl,
221 node_index = 0, parent_idx = 0, num_children;
222 int num_level = PLATFORM_MAX_AFFLVL;
223 const unsigned char *plat_array;
224
225 plat_array = tftf_plat_get_pwr_domain_tree_desc();
226
227 /*
228 * For each level the inputs are:
229 * - number of nodes at this level in plat_array i.e. num_nodes_at_lvl
230 * This is the sum of values of nodes at the parent level.
231 * - Index of first entry at this level in the plat_array i.e.
232 * parent_idx.
233 * - Index of first free entry in tftf_pd_nodes[].
234 */
235 while (num_level >= 0) {
236 num_nodes_at_next_lvl = 0;
237
238 /* Store the start index for every level */
239 tftf_pwr_domain_start_idx[num_level] = node_index;
240
241 /*
242 * For each entry (parent node) at this level in the plat_array:
243 * - Find the number of children
244 * - Allocate a node in a power domain array for each child
245 * - Set the parent of the child to the parent_node_index - 1
246 * - Increment parent_node_index to point to the next parent
247 * - Accumulate the number of children at next level.
248 */
249 for (i = 0; i < num_nodes_at_lvl; i++) {
250 assert(parent_idx <=
251 PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT);
252 num_children = plat_array[parent_idx];
253
254 for (j = node_index;
255 j < node_index + num_children; j++) {
256 /* Initialize the power domain node */
257 tftf_pd_nodes[j].parent_node = parent_idx - 1;
258 tftf_pd_nodes[j].level = num_level;
259
260 /* Additional initializations for CPU power domains */
261 if (num_level == 0) {
262 /* Calculate the cpu id from node index */
263 int cpu_id = j - tftf_pwr_domain_start_idx[0];
264
265 assert(cpu_id < PLATFORM_CORE_COUNT);
266
267 /* Set the mpidr of cpu node */
268 tftf_pd_nodes[j].mpidr =
269 tftf_plat_get_mpidr(cpu_id);
270 if (tftf_pd_nodes[j].mpidr != INVALID_MPID)
271 tftf_pd_nodes[j].is_present = 1;
272
273 tftf_pd_nodes[j].cpu_start_node = j;
274 tftf_pd_nodes[j].ncpus = 1;
275 }
276 }
277 node_index = j;
278 num_nodes_at_next_lvl += num_children;
279 parent_idx++;
280 }
281
282 num_nodes_at_lvl = num_nodes_at_next_lvl;
283 num_level--;
284 }
285
286 /* Validate the sanity of array exported by the platform */
287 assert(j == PLATFORM_NUM_AFFS);
288}
289
290
291void tftf_init_topology(void)
292{
293 populate_power_domain_tree();
294 update_pwrlvl_limits();
295 topology_setup_done = 1;
296#if DEBUG
297 dump_topology();
298#endif
299}
300
301unsigned int tftf_topology_next_cpu(unsigned int cpu_node)
302{
303 assert(topology_setup_done == 1);
304
305 if (cpu_node == PWR_DOMAIN_INIT) {
306 cpu_node = tftf_pwr_domain_start_idx[0];
307 if (tftf_pd_nodes[cpu_node].is_present)
308 return cpu_node;
309 }
310
311 assert(CPU_NODE_IS_VALID(cpu_node));
312
313 for (++cpu_node; cpu_node < PLATFORM_NUM_AFFS; cpu_node++) {
314 if (tftf_pd_nodes[cpu_node].is_present)
315 return cpu_node;
316 }
317
318 return PWR_DOMAIN_INIT;
319}
320
Soby Mathew8d84b4c2018-07-09 13:07:57 +0100321unsigned int tftf_get_parent_node_from_mpidr(unsigned int mpidr, unsigned int pwrlvl)
322{
323 unsigned int core_pos = platform_get_core_pos(mpidr);
324 unsigned int node, i;
325
326 if (core_pos >= PLATFORM_CORE_COUNT)
327 return PWR_DOMAIN_INIT;
328
329 if (pwrlvl > PLAT_MAX_PWR_LEVEL)
330 return PWR_DOMAIN_INIT;
331
332 node = tftf_pwr_domain_start_idx[0] + core_pos;
333
334 for (i = 1; i <= pwrlvl; i++)
335 node = tftf_pd_nodes[node].parent_node;
336
337 return node;
338}
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200339
340unsigned int tftf_get_mpidr_from_node(unsigned int cpu_node)
341{
342 assert(topology_setup_done == 1);
343
344 assert(CPU_NODE_IS_VALID(cpu_node));
345
346 if (tftf_pd_nodes[cpu_node].is_present)
347 return tftf_pd_nodes[cpu_node].mpidr;
348
349 return INVALID_MPID;
350}
351
352unsigned int tftf_find_any_cpu_other_than(unsigned exclude_mpid)
353{
354 unsigned int cpu_node, mpidr;
355
356 for_each_cpu(cpu_node) {
357 mpidr = tftf_get_mpidr_from_node(cpu_node);
358 if (mpidr != exclude_mpid)
359 return mpidr;
360 }
361
362 return INVALID_MPID;
363}
364
365unsigned int tftf_find_random_cpu_other_than(unsigned int exclude_mpid)
366{
367 unsigned int cpu_node, mpidr;
368 unsigned int possible_cpus_cnt = 0;
369 unsigned int possible_cpus[PLATFORM_CORE_COUNT];
370
371 for_each_cpu(cpu_node) {
372 mpidr = tftf_get_mpidr_from_node(cpu_node);
373 if (mpidr != exclude_mpid)
374 possible_cpus[possible_cpus_cnt++] = mpidr;
375 }
376
377 if (possible_cpus_cnt == 0)
378 return INVALID_MPID;
379
380 return possible_cpus[rand() % possible_cpus_cnt];
381}