blob: 0ede8ff627fbe9e42901780dc4ae4b8b5cd0f821 [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
Charlie Barehamaf8934c2024-07-26 14:51:14 +01002 * Copyright (c) 2018-2025, Arm Limited. All rights reserved.
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <assert.h>
9#include <debug.h>
10#include <plat_topology.h>
11#include <platform.h>
12#include <stdlib.h>
13
14#define CPU_INDEX_IS_VALID(_cpu_idx) \
15 (((_cpu_idx) - tftf_pwr_domain_start_idx[0]) < PLATFORM_CORE_COUNT)
16
17#define IS_A_CPU_NODE(_cpu_idx) (tftf_pd_nodes[(_cpu_idx)].level == 0)
18
19#define CPU_NODE_IS_VALID(_cpu_node) \
20 (CPU_INDEX_IS_VALID(_cpu_node) && IS_A_CPU_NODE(_cpu_node))
21
22/*
23 * Global variable to check that the platform topology is not queried until it
24 * has been setup.
25 */
26static unsigned int topology_setup_done;
27
28/*
29 * Store the start indices of power domains at various levels. This array makes it
30 * easier to traverse the topology tree if the power domain level is known.
31 */
32unsigned int tftf_pwr_domain_start_idx[PLATFORM_MAX_AFFLVL + 1];
33
34/* The grand array to store the platform power domain topology */
35tftf_pwr_domain_node_t tftf_pd_nodes[PLATFORM_NUM_AFFS];
36
37#if DEBUG
38/*
39 * Debug function to display the platform topology.
40 * Does not print absent affinity instances.
41 */
42static void dump_topology(void)
43{
44 unsigned int cluster_idx, cpu_idx, count;
45
46 NOTICE("Platform topology:\n");
47
48 NOTICE(" %u cluster(s)\n", tftf_get_total_clusters_count());
49 NOTICE(" %u CPU(s) (total)\n\n", tftf_get_total_cpus_count());
50
51 for (cluster_idx = PWR_DOMAIN_INIT;
52 cluster_idx = tftf_get_next_peer_domain(cluster_idx, 1),
53 cluster_idx != PWR_DOMAIN_INIT;) {
54 count = 0;
55 for (cpu_idx = tftf_pd_nodes[cluster_idx].cpu_start_node;
56 cpu_idx < (tftf_pd_nodes[cluster_idx].cpu_start_node +
57 tftf_pd_nodes[cluster_idx].ncpus);
58 cpu_idx++) {
59 if (tftf_pd_nodes[cpu_idx].is_present)
60 count++;
61 }
62 NOTICE(" Cluster #%u [%u CPUs]\n",
63 cluster_idx - tftf_pwr_domain_start_idx[1],
64 count);
65 for (cpu_idx = PWR_DOMAIN_INIT;
66 cpu_idx = tftf_get_next_cpu_in_pwr_domain(cluster_idx, cpu_idx),
67 cpu_idx != PWR_DOMAIN_INIT;) {
68 NOTICE(" CPU #%u [MPID: 0x%x]\n",
69 cpu_idx - tftf_pwr_domain_start_idx[0],
70 tftf_get_mpidr_from_node(cpu_idx));
71 }
72 }
73 NOTICE("\n");
74}
75#endif
76
77unsigned int tftf_get_total_aff_count(unsigned int aff_lvl)
78{
79 unsigned int count = 0;
80 unsigned int node_idx;
81
82 assert(topology_setup_done == 1);
83
84 if (aff_lvl > PLATFORM_MAX_AFFLVL)
85 return count;
86
87 node_idx = tftf_pwr_domain_start_idx[aff_lvl];
88
Alexei Fedorov8dec8452019-07-16 09:34:24 +010089 while ((node_idx < PLATFORM_NUM_AFFS) &&
90 (tftf_pd_nodes[node_idx].level == aff_lvl)) {
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020091 if (tftf_pd_nodes[node_idx].is_present)
92 count++;
93 node_idx++;
94 }
95
96 return count;
97}
98
99unsigned int tftf_get_next_peer_domain(unsigned int pwr_domain_idx,
100 unsigned int pwr_lvl)
101{
102 assert(topology_setup_done == 1);
103
104 assert(pwr_lvl <= PLATFORM_MAX_AFFLVL);
105
106 if (pwr_domain_idx == PWR_DOMAIN_INIT) {
107 pwr_domain_idx = tftf_pwr_domain_start_idx[pwr_lvl];
108 if (tftf_pd_nodes[pwr_domain_idx].is_present)
109 return pwr_domain_idx;
110 }
111
112 assert(pwr_domain_idx < PLATFORM_NUM_AFFS &&
113 tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
114
115 for (++pwr_domain_idx; (pwr_domain_idx < PLATFORM_NUM_AFFS)
116 && (tftf_pd_nodes[pwr_domain_idx].level == pwr_lvl);
117 pwr_domain_idx++) {
118 if (tftf_pd_nodes[pwr_domain_idx].is_present)
119 return pwr_domain_idx;
120 }
121
122 return PWR_DOMAIN_INIT;
123}
124
125unsigned int tftf_get_next_cpu_in_pwr_domain(unsigned int pwr_domain_idx,
126 unsigned int cpu_node)
127{
128 unsigned int cpu_end_node;
129
130 assert(topology_setup_done == 1);
131 assert(pwr_domain_idx != PWR_DOMAIN_INIT
132 && pwr_domain_idx < PLATFORM_NUM_AFFS);
133
134 if (cpu_node == PWR_DOMAIN_INIT) {
135 cpu_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node;
136 if (tftf_pd_nodes[cpu_node].is_present)
137 return cpu_node;
138 }
139
140 assert(CPU_NODE_IS_VALID(cpu_node));
141
142 cpu_end_node = tftf_pd_nodes[pwr_domain_idx].cpu_start_node
143 + tftf_pd_nodes[pwr_domain_idx].ncpus - 1;
144
145 assert(cpu_end_node < PLATFORM_NUM_AFFS);
146
147 for (++cpu_node; cpu_node <= cpu_end_node; cpu_node++) {
148 if (tftf_pd_nodes[cpu_node].is_present)
149 return cpu_node;
150 }
151
152 return PWR_DOMAIN_INIT;
153}
154
155/*
156 * Helper function to get the parent nodes of a particular CPU power
157 * domain.
158 */
159static void get_parent_pwr_domain_nodes(unsigned int cpu_node,
160 unsigned int end_lvl,
161 unsigned int node_index[])
162{
163 unsigned int parent_node = tftf_pd_nodes[cpu_node].parent_node;
164 unsigned int i;
165
166 for (i = 1; i <= end_lvl; i++) {
167 node_index[i - 1] = parent_node;
168 parent_node = tftf_pd_nodes[parent_node].parent_node;
169 }
170}
171
172/*******************************************************************************
173 * This function updates cpu_start_node and ncpus field for each of the nodes
174 * in tftf_pd_nodes[]. It does so by comparing the parent nodes of each of
175 * the CPUs and check whether they match with the parent of the previous
176 * CPU. The basic assumption for this work is that children of the same parent
177 * are allocated adjacent indices. The platform should ensure this through
178 * proper mapping of the CPUs to indices via platform_get_core_pos() API.
179 *
180 * It also updates the 'is_present' field for non-cpu power domains. It does
181 * this by checking the 'is_present' field of the child cpu nodes and updates
182 * it if any of the child cpu nodes are present.
183 *******************************************************************************/
184static void update_pwrlvl_limits(void)
185{
186 int cpu_id, j, is_present;
Wing Licb88add2022-10-29 02:32:06 +0100187 unsigned int nodes_idx[PLATFORM_MAX_AFFLVL];
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200188 unsigned int temp_index[PLATFORM_MAX_AFFLVL];
189
190 unsigned int cpu_node_offset = tftf_pwr_domain_start_idx[0];
191
Wing Licb88add2022-10-29 02:32:06 +0100192 for (j = 0; j < PLATFORM_MAX_AFFLVL; j++)
193 nodes_idx[j] = -1;
194
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200195 for (cpu_id = 0; cpu_id < PLATFORM_CORE_COUNT; cpu_id++) {
196 get_parent_pwr_domain_nodes(cpu_id + cpu_node_offset,
197 PLATFORM_MAX_AFFLVL,
198 temp_index);
199 is_present = tftf_pd_nodes[cpu_id + cpu_node_offset].is_present;
200
201 for (j = PLATFORM_MAX_AFFLVL - 1; j >= 0; j--) {
202 if (temp_index[j] != nodes_idx[j]) {
203 nodes_idx[j] = temp_index[j];
204 tftf_pd_nodes[nodes_idx[j]].cpu_start_node
205 = cpu_id + cpu_node_offset;
206 if (!tftf_pd_nodes[nodes_idx[j]].is_present)
207 tftf_pd_nodes[nodes_idx[j]].is_present = is_present;
208 }
209 tftf_pd_nodes[nodes_idx[j]].ncpus++;
210 }
211 }
212}
213
214/******************************************************************************
215 * This function populates the power domain topology array 'tftf_pd_nodes[]'
216 * based on the power domain description retrieved from the platform layer.
217 * It also updates the start index of each power domain level in
218 * tftf_pwr_domain_start_idx[]. The uninitialized fields of 'tftf_pd_nodes[]'
219 * for the non CPU power domain will be initialized in update_pwrlvl_limits().
220 *****************************************************************************/
221static void populate_power_domain_tree(void)
222{
223 unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl,
224 node_index = 0, parent_idx = 0, num_children;
225 int num_level = PLATFORM_MAX_AFFLVL;
226 const unsigned char *plat_array;
227
228 plat_array = tftf_plat_get_pwr_domain_tree_desc();
229
230 /*
231 * For each level the inputs are:
232 * - number of nodes at this level in plat_array i.e. num_nodes_at_lvl
233 * This is the sum of values of nodes at the parent level.
234 * - Index of first entry at this level in the plat_array i.e.
235 * parent_idx.
236 * - Index of first free entry in tftf_pd_nodes[].
237 */
238 while (num_level >= 0) {
239 num_nodes_at_next_lvl = 0;
240
241 /* Store the start index for every level */
242 tftf_pwr_domain_start_idx[num_level] = node_index;
243
244 /*
245 * For each entry (parent node) at this level in the plat_array:
246 * - Find the number of children
247 * - Allocate a node in a power domain array for each child
248 * - Set the parent of the child to the parent_node_index - 1
249 * - Increment parent_node_index to point to the next parent
250 * - Accumulate the number of children at next level.
251 */
252 for (i = 0; i < num_nodes_at_lvl; i++) {
253 assert(parent_idx <=
254 PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT);
255 num_children = plat_array[parent_idx];
256
257 for (j = node_index;
258 j < node_index + num_children; j++) {
259 /* Initialize the power domain node */
260 tftf_pd_nodes[j].parent_node = parent_idx - 1;
261 tftf_pd_nodes[j].level = num_level;
262
263 /* Additional initializations for CPU power domains */
264 if (num_level == 0) {
265 /* Calculate the cpu id from node index */
266 int cpu_id = j - tftf_pwr_domain_start_idx[0];
267
268 assert(cpu_id < PLATFORM_CORE_COUNT);
269
270 /* Set the mpidr of cpu node */
271 tftf_pd_nodes[j].mpidr =
272 tftf_plat_get_mpidr(cpu_id);
273 if (tftf_pd_nodes[j].mpidr != INVALID_MPID)
274 tftf_pd_nodes[j].is_present = 1;
275
276 tftf_pd_nodes[j].cpu_start_node = j;
277 tftf_pd_nodes[j].ncpus = 1;
278 }
279 }
280 node_index = j;
281 num_nodes_at_next_lvl += num_children;
282 parent_idx++;
283 }
284
285 num_nodes_at_lvl = num_nodes_at_next_lvl;
286 num_level--;
287 }
288
289 /* Validate the sanity of array exported by the platform */
290 assert(j == PLATFORM_NUM_AFFS);
291}
292
293
294void tftf_init_topology(void)
295{
296 populate_power_domain_tree();
297 update_pwrlvl_limits();
298 topology_setup_done = 1;
299#if DEBUG
300 dump_topology();
301#endif
302}
303
304unsigned int tftf_topology_next_cpu(unsigned int cpu_node)
305{
306 assert(topology_setup_done == 1);
307
308 if (cpu_node == PWR_DOMAIN_INIT) {
309 cpu_node = tftf_pwr_domain_start_idx[0];
310 if (tftf_pd_nodes[cpu_node].is_present)
311 return cpu_node;
312 }
313
314 assert(CPU_NODE_IS_VALID(cpu_node));
315
316 for (++cpu_node; cpu_node < PLATFORM_NUM_AFFS; cpu_node++) {
317 if (tftf_pd_nodes[cpu_node].is_present)
318 return cpu_node;
319 }
320
321 return PWR_DOMAIN_INIT;
322}
323
Soby Mathew8d84b4c2018-07-09 13:07:57 +0100324unsigned int tftf_get_parent_node_from_mpidr(unsigned int mpidr, unsigned int pwrlvl)
325{
326 unsigned int core_pos = platform_get_core_pos(mpidr);
327 unsigned int node, i;
328
329 if (core_pos >= PLATFORM_CORE_COUNT)
330 return PWR_DOMAIN_INIT;
331
332 if (pwrlvl > PLAT_MAX_PWR_LEVEL)
333 return PWR_DOMAIN_INIT;
334
335 node = tftf_pwr_domain_start_idx[0] + core_pos;
336
337 for (i = 1; i <= pwrlvl; i++)
338 node = tftf_pd_nodes[node].parent_node;
339
340 return node;
341}
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200342
343unsigned int tftf_get_mpidr_from_node(unsigned int cpu_node)
344{
345 assert(topology_setup_done == 1);
346
347 assert(CPU_NODE_IS_VALID(cpu_node));
348
349 if (tftf_pd_nodes[cpu_node].is_present)
350 return tftf_pd_nodes[cpu_node].mpidr;
351
352 return INVALID_MPID;
353}
354
355unsigned int tftf_find_any_cpu_other_than(unsigned exclude_mpid)
356{
357 unsigned int cpu_node, mpidr;
358
359 for_each_cpu(cpu_node) {
360 mpidr = tftf_get_mpidr_from_node(cpu_node);
361 if (mpidr != exclude_mpid)
362 return mpidr;
363 }
364
365 return INVALID_MPID;
366}
367
Charlie Barehamaf8934c2024-07-26 14:51:14 +0100368unsigned int tftf_find_any_cpu_in_other_cluster(unsigned exclude_mpid)
369{
370 unsigned int cpu_node, cluster_node, mpidr, exclude_cluster_node;
371
372 exclude_cluster_node = tftf_get_parent_node_from_mpidr(exclude_mpid,
373 MPIDR_AFFLVL1);
374
375 for_each_cpu(cpu_node) {
376 mpidr = tftf_get_mpidr_from_node(cpu_node);
377 cluster_node = tftf_get_parent_node_from_mpidr(mpidr,
378 MPIDR_AFFLVL1);
379 if (cluster_node != exclude_cluster_node)
380 return mpidr;
381 }
382
383 return INVALID_MPID;
384}
385
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200386unsigned int tftf_find_random_cpu_other_than(unsigned int exclude_mpid)
387{
Alexei Fedorov7cc25872020-06-02 16:35:36 +0100388#if (PLATFORM_CORE_COUNT == 1)
389 return INVALID_MPID;
390#else
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200391 unsigned int cpu_node, mpidr;
392 unsigned int possible_cpus_cnt = 0;
393 unsigned int possible_cpus[PLATFORM_CORE_COUNT];
394
395 for_each_cpu(cpu_node) {
396 mpidr = tftf_get_mpidr_from_node(cpu_node);
397 if (mpidr != exclude_mpid)
398 possible_cpus[possible_cpus_cnt++] = mpidr;
399 }
400
401 if (possible_cpus_cnt == 0)
402 return INVALID_MPID;
403
404 return possible_cpus[rand() % possible_cpus_cnt];
Alexei Fedorov7cc25872020-06-02 16:35:36 +0100405#endif
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200406}