blob: f7125204a7b92cca88b39360764170155019fdb5 [file] [log] [blame]
Dimitris Papastamos380559c2017-10-12 13:02:29 +01001/*
johpow01873d4242020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamos380559c2017-10-12 13:02:29 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00007#include <assert.h>
Chris Kay33b9be62021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00009#include <stdbool.h>
10
Chris Kaye747a592021-05-24 20:35:26 +010011#include "../amu_private.h"
Dimitris Papastamos380559c2017-10-12 13:02:29 +010012#include <arch.h>
johpow01873d4242020-10-02 13:41:11 -050013#include <arch_features.h>
Dimitris Papastamos380559c2017-10-12 13:02:29 +010014#include <arch_helpers.h>
Chris Kay742ca232021-08-19 11:21:52 +010015#include <common/debug.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000016#include <lib/el3_runtime/pubsub_events.h>
17#include <lib/extensions/amu.h>
Alexei Fedorovf3ccf032020-07-14 08:17:56 +010018
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000019#include <plat/common/platform.h>
Dimitris Papastamos380559c2017-10-12 13:02:29 +010020
Chris Kay742ca232021-08-19 11:21:52 +010021#if ENABLE_AMU_FCONF
22# include <lib/fconf/fconf.h>
23# include <lib/fconf/fconf_amu_getter.h>
24#endif
25
Chris Kaye747a592021-05-24 20:35:26 +010026struct amu_ctx {
27 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
28#if ENABLE_AMU_AUXILIARY_COUNTERS
29 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
30#endif
31
32 /* Architected event counter 1 does not have an offset register */
33 uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U];
34#if ENABLE_AMU_AUXILIARY_COUNTERS
35 uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS];
36#endif
37
38 uint16_t group0_enable;
39#if ENABLE_AMU_AUXILIARY_COUNTERS
40 uint16_t group1_enable;
41#endif
42};
43
44static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
45
46CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
47 amu_ctx_group0_enable_cannot_represent_all_group0_counters);
48
49#if ENABLE_AMU_AUXILIARY_COUNTERS
50CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
51 amu_ctx_group1_enable_cannot_represent_all_group1_counters);
52#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +000053
Chris Kay33b9be62021-05-26 11:58:23 +010054static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
Dimitris Papastamos380559c2017-10-12 13:02:29 +010055{
Chris Kay33b9be62021-05-26 11:58:23 +010056 return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
johpow01873d4242020-10-02 13:41:11 -050057 ID_AA64PFR0_AMU_MASK;
Dimitris Papastamos0767d502017-11-13 09:49:45 +000058}
Dimitris Papastamos380559c2017-10-12 13:02:29 +010059
Chris Kay33b9be62021-05-26 11:58:23 +010060static inline __unused uint64_t read_hcr_el2_amvoffen(void)
61{
62 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
63 HCR_AMVOFFEN_SHIFT;
64}
65
66static inline __unused void write_cptr_el2_tam(uint64_t value)
67{
68 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
69 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
70}
71
72static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
73{
74 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
75
76 value &= ~TAM_BIT;
77 value |= (tam << TAM_SHIFT) & TAM_BIT;
78
79 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
80}
81
82static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
83{
84 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
85 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
86}
87
88static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
89{
90 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
91 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
92}
93
94static inline __unused uint64_t read_amcfgr_el0_ncg(void)
95{
96 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
97 AMCFGR_EL0_NCG_MASK;
98}
99
Chris Kaye747a592021-05-24 20:35:26 +0100100static inline __unused uint64_t read_amcgcr_el0_cg0nc(void)
Chris Kay81e2ff12021-05-25 12:33:18 +0100101{
102 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
103 AMCGCR_EL0_CG0NC_MASK;
104}
105
Chris Kay33b9be62021-05-26 11:58:23 +0100106static inline __unused uint64_t read_amcg1idr_el0_voff(void)
107{
108 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
109 AMCG1IDR_VOFF_MASK;
110}
111
112static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
113{
114 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
115 AMCGCR_EL0_CG1NC_MASK;
116}
117
118static inline __unused uint64_t read_amcntenset0_el0_px(void)
119{
120 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
121 AMCNTENSET0_EL0_Pn_MASK;
122}
123
124static inline __unused uint64_t read_amcntenset1_el0_px(void)
125{
126 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
127 AMCNTENSET1_EL0_Pn_MASK;
128}
129
130static inline __unused void write_amcntenset0_el0_px(uint64_t px)
131{
132 uint64_t value = read_amcntenset0_el0();
133
134 value &= ~AMCNTENSET0_EL0_Pn_MASK;
135 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
136
137 write_amcntenset0_el0(value);
138}
139
140static inline __unused void write_amcntenset1_el0_px(uint64_t px)
141{
142 uint64_t value = read_amcntenset1_el0();
143
144 value &= ~AMCNTENSET1_EL0_Pn_MASK;
145 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
146
147 write_amcntenset1_el0(value);
148}
149
150static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
151{
152 uint64_t value = read_amcntenclr0_el0();
153
154 value &= ~AMCNTENCLR0_EL0_Pn_MASK;
155 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
156
157 write_amcntenclr0_el0(value);
158}
159
160static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
161{
162 uint64_t value = read_amcntenclr1_el0();
163
164 value &= ~AMCNTENCLR1_EL0_Pn_MASK;
165 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
166
167 write_amcntenclr1_el0(value);
168}
169
Chris Kaye747a592021-05-24 20:35:26 +0100170static __unused bool amu_supported(void)
Chris Kay33b9be62021-05-26 11:58:23 +0100171{
172 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
173}
174
Chris Kaye747a592021-05-24 20:35:26 +0100175static __unused bool amu_v1p1_supported(void)
Chris Kay33b9be62021-05-26 11:58:23 +0100176{
177 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
178}
179
180#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kaye747a592021-05-24 20:35:26 +0100181static __unused bool amu_group1_supported(void)
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100182{
Chris Kay33b9be62021-05-26 11:58:23 +0100183 return read_amcfgr_el0_ncg() > 0U;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100184}
185#endif
186
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000187/*
Chris Kaye747a592021-05-24 20:35:26 +0100188 * Enable counters. This function is meant to be invoked by the context
189 * management library before exiting from EL3.
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000190 */
Arunachalam Ganapathy68ac5ed2021-07-08 09:35:57 +0100191void amu_enable(bool el2_unused, cpu_context_t *ctx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000192{
Chris Kaye747a592021-05-24 20:35:26 +0100193 uint64_t id_aa64pfr0_el1_amu; /* AMU version */
194
195 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
196 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
197
198 uint64_t amcntenset0_el0_px = 0x0; /* Group 0 enable mask */
199 uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */
200
201 id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
202 if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
203 /*
204 * If the AMU is unsupported, nothing needs to be done.
205 */
206
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000207 return;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100208 }
209
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000210 if (el2_unused) {
211 /*
Chris Kaye747a592021-05-24 20:35:26 +0100212 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity
213 * Monitor registers do not trap to EL2.
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000214 */
Chris Kay33b9be62021-05-26 11:58:23 +0100215 write_cptr_el2_tam(0U);
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000216 }
217
218 /*
Arunachalam Ganapathy68ac5ed2021-07-08 09:35:57 +0100219 * Retrieve and update the CPTR_EL3 value from the context mentioned
220 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000221 * the Activity Monitor registers do not trap to EL3.
222 */
Chris Kay33b9be62021-05-26 11:58:23 +0100223 write_cptr_el3_tam(ctx, 0U);
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000224
Chris Kaye747a592021-05-24 20:35:26 +0100225 /*
226 * Retrieve the number of architected counters. All of these counters
227 * are enabled by default.
228 */
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100229
Chris Kaye747a592021-05-24 20:35:26 +0100230 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
231 amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U;
232
233 assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
234
235 /*
Chris Kay742ca232021-08-19 11:21:52 +0100236 * The platform may opt to enable specific auxiliary counters. This can
237 * be done via the common FCONF getter, or via the platform-implemented
238 * function.
239 */
240
241#if ENABLE_AMU_AUXILIARY_COUNTERS
242 const struct amu_topology *topology;
243
244#if ENABLE_AMU_FCONF
245 topology = FCONF_GET_PROPERTY(amu, config, topology);
246#else
247 topology = plat_amu_topology();
248#endif /* ENABLE_AMU_FCONF */
249
250 if (topology != NULL) {
251 unsigned int core_pos = plat_my_core_pos();
252
253 amcntenset1_el0_px = topology->cores[core_pos].enable;
254 } else {
255 ERROR("AMU: failed to generate AMU topology\n");
256 }
257#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
258
259 /*
Chris Kaye747a592021-05-24 20:35:26 +0100260 * Enable the requested counters.
261 */
262
263 write_amcntenset0_el0_px(amcntenset0_el0_px);
264
265 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
266 if (amcfgr_el0_ncg > 0U) {
267 write_amcntenset1_el0_px(amcntenset1_el0_px);
Chris Kay742ca232021-08-19 11:21:52 +0100268
269#if !ENABLE_AMU_AUXILIARY_COUNTERS
270 VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
271#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100272 }
johpow01873d4242020-10-02 13:41:11 -0500273
274 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kaye747a592021-05-24 20:35:26 +0100275 if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) {
johpow01873d4242020-10-02 13:41:11 -0500276 return;
277 }
278
279 if (el2_unused) {
280 /* Make sure virtual offsets are disabled if EL2 not used. */
Chris Kay33b9be62021-05-26 11:58:23 +0100281 write_hcr_el2_amvoffen(0U);
johpow01873d4242020-10-02 13:41:11 -0500282 }
283
284#if AMU_RESTRICT_COUNTERS
285 /*
286 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
287 * counters at all but the highest implemented EL. This is controlled
288 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
289 * register reads at lower ELs return zero. Reads from the memory
290 * mapped view are unaffected.
291 */
292 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kay33b9be62021-05-26 11:58:23 +0100293 write_amcr_el0_cg1rz(1U);
johpow01873d4242020-10-02 13:41:11 -0500294#else
Chris Kay33b9be62021-05-26 11:58:23 +0100295 write_amcr_el0_cg1rz(0U);
johpow01873d4242020-10-02 13:41:11 -0500296#endif
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000297}
298
299/* Read the group 0 counter identified by the given `idx`. */
Chris Kayb4b726e2021-05-24 21:00:07 +0100300static uint64_t amu_group0_cnt_read(unsigned int idx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000301{
Chris Kay33b9be62021-05-26 11:58:23 +0100302 assert(amu_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100303 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000304
305 return amu_group0_cnt_read_internal(idx);
306}
307
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100308/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100309static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000310{
Chris Kay33b9be62021-05-26 11:58:23 +0100311 assert(amu_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100312 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000313
314 amu_group0_cnt_write_internal(idx, val);
315 isb();
316}
317
johpow01873d4242020-10-02 13:41:11 -0500318/*
Chris Kaye747a592021-05-24 20:35:26 +0100319 * Unlike with auxiliary counters, we cannot detect at runtime whether an
320 * architected counter supports a virtual offset. These are instead fixed
321 * according to FEAT_AMUv1p1, but this switch will need to be updated if later
322 * revisions of FEAT_AMU add additional architected counters.
323 */
324static bool amu_group0_voffset_supported(uint64_t idx)
325{
326 switch (idx) {
327 case 0U:
328 case 2U:
329 case 3U:
330 return true;
331
332 case 1U:
333 return false;
334
335 default:
336 ERROR("AMU: can't set up virtual offset for unknown "
337 "architected counter %llu!\n", idx);
338
339 panic();
340 }
341}
342
343/*
johpow01873d4242020-10-02 13:41:11 -0500344 * Read the group 0 offset register for a given index. Index must be 0, 2,
345 * or 3, the register for 1 does not exist.
346 *
347 * Using this function requires FEAT_AMUv1p1 support.
348 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100349static uint64_t amu_group0_voffset_read(unsigned int idx)
johpow01873d4242020-10-02 13:41:11 -0500350{
Chris Kay33b9be62021-05-26 11:58:23 +0100351 assert(amu_v1p1_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100352 assert(idx < read_amcgcr_el0_cg0nc());
johpow01873d4242020-10-02 13:41:11 -0500353 assert(idx != 1U);
354
355 return amu_group0_voffset_read_internal(idx);
356}
357
358/*
359 * Write the group 0 offset register for a given index. Index must be 0, 2, or
360 * 3, the register for 1 does not exist.
361 *
362 * Using this function requires FEAT_AMUv1p1 support.
363 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100364static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
johpow01873d4242020-10-02 13:41:11 -0500365{
Chris Kay33b9be62021-05-26 11:58:23 +0100366 assert(amu_v1p1_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100367 assert(idx < read_amcgcr_el0_cg0nc());
johpow01873d4242020-10-02 13:41:11 -0500368 assert(idx != 1U);
369
370 amu_group0_voffset_write_internal(idx, val);
371 isb();
372}
373
Chris Kay1fd685a2021-05-25 10:42:56 +0100374#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100375/* Read the group 1 counter identified by the given `idx` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100376static uint64_t amu_group1_cnt_read(unsigned int idx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000377{
Chris Kay33b9be62021-05-26 11:58:23 +0100378 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100379 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100380 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000381
382 return amu_group1_cnt_read_internal(idx);
383}
384
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100385/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100386static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000387{
Chris Kay33b9be62021-05-26 11:58:23 +0100388 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100389 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100390 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000391
392 amu_group1_cnt_write_internal(idx, val);
393 isb();
394}
395
396/*
johpow01873d4242020-10-02 13:41:11 -0500397 * Read the group 1 offset register for a given index.
398 *
399 * Using this function requires FEAT_AMUv1p1 support.
400 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100401static uint64_t amu_group1_voffset_read(unsigned int idx)
johpow01873d4242020-10-02 13:41:11 -0500402{
Chris Kay33b9be62021-05-26 11:58:23 +0100403 assert(amu_v1p1_supported());
johpow01873d4242020-10-02 13:41:11 -0500404 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100405 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kay33b9be62021-05-26 11:58:23 +0100406 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01873d4242020-10-02 13:41:11 -0500407
408 return amu_group1_voffset_read_internal(idx);
409}
410
411/*
412 * Write the group 1 offset register for a given index.
413 *
414 * Using this function requires FEAT_AMUv1p1 support.
415 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100416static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
johpow01873d4242020-10-02 13:41:11 -0500417{
Chris Kay33b9be62021-05-26 11:58:23 +0100418 assert(amu_v1p1_supported());
johpow01873d4242020-10-02 13:41:11 -0500419 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100420 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kay33b9be62021-05-26 11:58:23 +0100421 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01873d4242020-10-02 13:41:11 -0500422
423 amu_group1_voffset_write_internal(idx, val);
424 isb();
425}
Chris Kay1fd685a2021-05-25 10:42:56 +0100426#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000427
428static void *amu_context_save(const void *arg)
429{
Chris Kaye747a592021-05-24 20:35:26 +0100430 uint64_t i, j;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000431
Chris Kaye747a592021-05-24 20:35:26 +0100432 unsigned int core_pos;
433 struct amu_ctx *ctx;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000434
Chris Kaye747a592021-05-24 20:35:26 +0100435 uint64_t id_aa64pfr0_el1_amu; /* AMU version */
436 uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
437 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000438
Chris Kay1fd685a2021-05-25 10:42:56 +0100439#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kaye747a592021-05-24 20:35:26 +0100440 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
441 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
442 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
443#endif
444
445 id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
446 if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
447 return (void *)0;
448 }
449
450 core_pos = plat_my_core_pos();
451 ctx = &amu_ctxs_[core_pos];
452
453 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
454 hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
455 read_hcr_el2_amvoffen() : 0U;
456
457#if ENABLE_AMU_AUXILIARY_COUNTERS
458 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
459 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
460 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
461#endif
462
463 /*
464 * Disable all AMU counters.
465 */
466
467 ctx->group0_enable = read_amcntenset0_el0_px();
468 write_amcntenclr0_el0_px(ctx->group0_enable);
469
470#if ENABLE_AMU_AUXILIARY_COUNTERS
471 if (amcfgr_el0_ncg > 0U) {
472 ctx->group1_enable = read_amcntenset1_el0_px();
473 write_amcntenclr1_el0_px(ctx->group1_enable);
Chris Kay1fd685a2021-05-25 10:42:56 +0100474 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100475#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100476
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000477 /*
Chris Kaye747a592021-05-24 20:35:26 +0100478 * Save the counters to the local context.
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000479 */
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100480
Chris Kaye747a592021-05-24 20:35:26 +0100481 isb(); /* Ensure counters have been stopped */
Chris Kay1fd685a2021-05-25 10:42:56 +0100482
Chris Kaye747a592021-05-24 20:35:26 +0100483 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000484 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100485 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000486
Chris Kay1fd685a2021-05-25 10:42:56 +0100487#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kaye747a592021-05-24 20:35:26 +0100488 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
489 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
johpow01873d4242020-10-02 13:41:11 -0500490 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100491#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100492
Chris Kaye747a592021-05-24 20:35:26 +0100493 /*
494 * Save virtual offsets for counters that offer them.
495 */
496
497 if (hcr_el2_amvoffen != 0U) {
498 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
499 if (!amu_group0_voffset_supported(i)) {
500 continue; /* No virtual offset */
501 }
502
503 ctx->group0_voffsets[j++] = amu_group0_voffset_read(i);
504 }
505
506#if ENABLE_AMU_AUXILIARY_COUNTERS
507 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
508 if ((amcg1idr_el0_voff >> i) & 1U) {
509 continue; /* No virtual offset */
510 }
511
512 ctx->group1_voffsets[j++] = amu_group1_voffset_read(i);
513 }
514#endif
515 }
516
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100517 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000518}
519
520static void *amu_context_restore(const void *arg)
521{
Chris Kaye747a592021-05-24 20:35:26 +0100522 uint64_t i, j;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000523
Chris Kaye747a592021-05-24 20:35:26 +0100524 unsigned int core_pos;
525 struct amu_ctx *ctx;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000526
Chris Kaye747a592021-05-24 20:35:26 +0100527 uint64_t id_aa64pfr0_el1_amu; /* AMU version */
528
529 uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
530
531 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
532 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000533
Chris Kay1fd685a2021-05-25 10:42:56 +0100534#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kaye747a592021-05-24 20:35:26 +0100535 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
536 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100537#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000538
Chris Kaye747a592021-05-24 20:35:26 +0100539 id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
540 if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
541 return (void *)0;
542 }
543
544 core_pos = plat_my_core_pos();
545 ctx = &amu_ctxs_[core_pos];
546
547 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
548 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
549
550 hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
551 read_hcr_el2_amvoffen() : 0U;
552
553#if ENABLE_AMU_AUXILIARY_COUNTERS
554 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
555 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
556#endif
557
558 /*
559 * Sanity check that all counters were disabled when the context was
560 * previously saved.
561 */
562
563 assert(read_amcntenset0_el0_px() == 0U);
564
565 if (amcfgr_el0_ncg > 0U) {
566 assert(read_amcntenset1_el0_px() == 0U);
567 }
568
569 /*
570 * Restore the counter values from the local context.
571 */
572
573 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100574 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
575 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000576
Chris Kaye747a592021-05-24 20:35:26 +0100577#if ENABLE_AMU_AUXILIARY_COUNTERS
578 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
579 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
johpow01873d4242020-10-02 13:41:11 -0500580 }
Chris Kaye747a592021-05-24 20:35:26 +0100581#endif
johpow01873d4242020-10-02 13:41:11 -0500582
Chris Kaye747a592021-05-24 20:35:26 +0100583 /*
584 * Restore virtual offsets for counters that offer them.
585 */
586
587 if (hcr_el2_amvoffen != 0U) {
588 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
589 if (!amu_group0_voffset_supported(i)) {
590 continue; /* No virtual offset */
591 }
592
593 amu_group0_voffset_write(i, ctx->group0_voffsets[j++]);
594 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100595
Chris Kay1fd685a2021-05-25 10:42:56 +0100596#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kaye747a592021-05-24 20:35:26 +0100597 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
598 if ((amcg1idr_el0_voff >> i) & 1U) {
599 continue; /* No virtual offset */
johpow01873d4242020-10-02 13:41:11 -0500600 }
Chris Kaye747a592021-05-24 20:35:26 +0100601
602 amu_group1_voffset_write(i, ctx->group1_voffsets[j++]);
johpow01873d4242020-10-02 13:41:11 -0500603 }
Chris Kaye747a592021-05-24 20:35:26 +0100604#endif
605 }
johpow01873d4242020-10-02 13:41:11 -0500606
Chris Kaye747a592021-05-24 20:35:26 +0100607 /*
608 * Re-enable counters that were disabled during context save.
609 */
Chris Kay1fd685a2021-05-25 10:42:56 +0100610
Chris Kaye747a592021-05-24 20:35:26 +0100611 write_amcntenset0_el0_px(ctx->group0_enable);
Chris Kay1fd685a2021-05-25 10:42:56 +0100612
Chris Kaye747a592021-05-24 20:35:26 +0100613#if ENABLE_AMU_AUXILIARY_COUNTERS
614 if (amcfgr_el0_ncg > 0) {
615 write_amcntenset1_el0_px(ctx->group1_enable);
Chris Kay1fd685a2021-05-25 10:42:56 +0100616 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100617#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000618
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100619 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000620}
621
622SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
623SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);