blob: d192a897d72671613c514ed66dba732941799d40 [file] [log] [blame]
Dimitris Papastamosef69e1e2017-10-17 14:03:14 +01001/*
johpow01873d4242020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamosef69e1e2017-10-17 14:03:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorovf3ccf032020-07-14 08:17:56 +01007#include <assert.h>
Chris Kay33b9be62021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00009#include <stdbool.h>
10
Dimitris Papastamosef69e1e2017-10-17 14:03:14 +010011#include <arch.h>
12#include <arch_helpers.h>
Alexei Fedorovf3ccf032020-07-14 08:17:56 +010013
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000014#include <lib/el3_runtime/pubsub_events.h>
15#include <lib/extensions/amu.h>
16#include <lib/extensions/amu_private.h>
Alexei Fedorovf3ccf032020-07-14 08:17:56 +010017
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000018#include <plat/common/platform.h>
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +000019
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +000020static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
Dimitris Papastamosef69e1e2017-10-17 14:03:14 +010021
Chris Kay33b9be62021-05-26 11:58:23 +010022static inline __unused uint32_t read_id_pfr0_amu(void)
Dimitris Papastamosef69e1e2017-10-17 14:03:14 +010023{
Chris Kay33b9be62021-05-26 11:58:23 +010024 return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
johpow01873d4242020-10-02 13:41:11 -050025 ID_PFR0_AMU_MASK;
Joel Huttonc70da542017-12-21 15:21:20 +000026}
27
Chris Kay33b9be62021-05-26 11:58:23 +010028static inline __unused void write_hcptr_tam(uint32_t value)
29{
30 write_hcptr((read_hcptr() & ~TAM_BIT) |
31 ((value << TAM_SHIFT) & TAM_BIT));
32}
33
34static inline __unused void write_amcr_cg1rz(uint32_t value)
35{
36 write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
37 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
38}
39
40static inline __unused uint32_t read_amcfgr_ncg(void)
41{
42 return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
43 AMCFGR_NCG_MASK;
44}
45
46static inline __unused uint32_t read_amcgcr_cg1nc(void)
47{
48 return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
49 AMCGCR_CG1NC_MASK;
50}
51
52static inline __unused uint32_t read_amcntenset0_px(void)
53{
54 return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
55 AMCNTENSET0_Pn_MASK;
56}
57
58static inline __unused uint32_t read_amcntenset1_px(void)
59{
60 return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
61 AMCNTENSET1_Pn_MASK;
62}
63
64static inline __unused void write_amcntenset0_px(uint32_t px)
65{
66 uint32_t value = read_amcntenset0();
67
68 value &= ~AMCNTENSET0_Pn_MASK;
69 value |= (px << AMCNTENSET0_Pn_SHIFT) &
70 AMCNTENSET0_Pn_MASK;
71
72 write_amcntenset0(value);
73}
74
75static inline __unused void write_amcntenset1_px(uint32_t px)
76{
77 uint32_t value = read_amcntenset1();
78
79 value &= ~AMCNTENSET1_Pn_MASK;
80 value |= (px << AMCNTENSET1_Pn_SHIFT) &
81 AMCNTENSET1_Pn_MASK;
82
83 write_amcntenset1(value);
84}
85
86static inline __unused void write_amcntenclr0_px(uint32_t px)
87{
88 uint32_t value = read_amcntenclr0();
89
90 value &= ~AMCNTENCLR0_Pn_MASK;
91 value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
92
93 write_amcntenclr0(value);
94}
95
96static inline __unused void write_amcntenclr1_px(uint32_t px)
97{
98 uint32_t value = read_amcntenclr1();
99
100 value &= ~AMCNTENCLR1_Pn_MASK;
101 value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
102
103 write_amcntenclr1(value);
104}
105
106static bool amu_supported(void)
107{
108 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
109}
110
111static bool amu_v1p1_supported(void)
112{
113 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1P1;
114}
115
116#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kayb4b726e2021-05-24 21:00:07 +0100117static bool amu_group1_supported(void)
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100118{
Chris Kay33b9be62021-05-26 11:58:23 +0100119 return read_amcfgr_ncg() > 0U;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100120}
121#endif
122
123/*
124 * Enable counters. This function is meant to be invoked
125 * by the context management library before exiting from EL3.
126 */
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100127void amu_enable(bool el2_unused)
Joel Huttonc70da542017-12-21 15:21:20 +0000128{
Chris Kay33b9be62021-05-26 11:58:23 +0100129 if (!amu_supported()) {
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000130 return;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100131 }
132
Chris Kay1fd685a2021-05-25 10:42:56 +0100133#if ENABLE_AMU_AUXILIARY_COUNTERS
134 if (AMU_GROUP1_NR_COUNTERS > 0U) {
135 /* Check and set presence of group 1 counters */
136 if (!amu_group1_supported()) {
137 ERROR("AMU Counter Group 1 is not implemented\n");
138 panic();
139 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100140
Chris Kay1fd685a2021-05-25 10:42:56 +0100141 /* Check number of group 1 counters */
142 uint32_t cnt_num = read_amcgcr_cg1nc();
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100143
Chris Kay1fd685a2021-05-25 10:42:56 +0100144 VERBOSE("%s%u. %s%u\n",
145 "Number of AMU Group 1 Counters ", cnt_num,
146 "Requested number ", AMU_GROUP1_NR_COUNTERS);
147
148 if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
149 ERROR("%s%u is less than %s%u\n",
150 "Number of AMU Group 1 Counters ", cnt_num,
151 "Requested number ", AMU_GROUP1_NR_COUNTERS);
152 panic();
153 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100154 }
155#endif
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000156
157 if (el2_unused) {
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000158 /*
159 * Non-secure access from EL0 or EL1 to the Activity Monitor
160 * registers do not trap to EL2.
161 */
Chris Kay33b9be62021-05-26 11:58:23 +0100162 write_hcptr_tam(0U);
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000163 }
164
165 /* Enable group 0 counters */
Chris Kay33b9be62021-05-26 11:58:23 +0100166 write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
Joel Huttonc70da542017-12-21 15:21:20 +0000167
Chris Kay1fd685a2021-05-25 10:42:56 +0100168#if ENABLE_AMU_AUXILIARY_COUNTERS
169 if (AMU_GROUP1_NR_COUNTERS > 0U) {
170 /* Enable group 1 counters */
171 write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
172 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100173#endif
johpow01873d4242020-10-02 13:41:11 -0500174
175 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kay33b9be62021-05-26 11:58:23 +0100176 if (!amu_v1p1_supported()) {
johpow01873d4242020-10-02 13:41:11 -0500177 return;
178 }
179
180#if AMU_RESTRICT_COUNTERS
181 /*
182 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
183 * counters at all but the highest implemented EL. This is controlled
184 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
185 * register reads at lower ELs return zero. Reads from the memory
186 * mapped view are unaffected.
187 */
188 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kay33b9be62021-05-26 11:58:23 +0100189 write_amcr_cg1rz(1U);
johpow01873d4242020-10-02 13:41:11 -0500190#else
Chris Kay33b9be62021-05-26 11:58:23 +0100191 write_amcr_cg1rz(0U);
johpow01873d4242020-10-02 13:41:11 -0500192#endif
Joel Huttonc70da542017-12-21 15:21:20 +0000193}
194
195/* Read the group 0 counter identified by the given `idx`. */
Chris Kayb4b726e2021-05-24 21:00:07 +0100196static uint64_t amu_group0_cnt_read(unsigned int idx)
Joel Huttonc70da542017-12-21 15:21:20 +0000197{
Chris Kay33b9be62021-05-26 11:58:23 +0100198 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100199 assert(idx < AMU_GROUP0_NR_COUNTERS);
Joel Huttonc70da542017-12-21 15:21:20 +0000200
201 return amu_group0_cnt_read_internal(idx);
202}
203
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100204/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100205static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Joel Huttonc70da542017-12-21 15:21:20 +0000206{
Chris Kay33b9be62021-05-26 11:58:23 +0100207 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100208 assert(idx < AMU_GROUP0_NR_COUNTERS);
Joel Huttonc70da542017-12-21 15:21:20 +0000209
210 amu_group0_cnt_write_internal(idx, val);
211 isb();
212}
213
Chris Kay1fd685a2021-05-25 10:42:56 +0100214#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100215/* Read the group 1 counter identified by the given `idx` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100216static uint64_t amu_group1_cnt_read(unsigned int idx)
Joel Huttonc70da542017-12-21 15:21:20 +0000217{
Chris Kay33b9be62021-05-26 11:58:23 +0100218 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100219 assert(amu_group1_supported());
220 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Huttonc70da542017-12-21 15:21:20 +0000221
222 return amu_group1_cnt_read_internal(idx);
223}
224
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100225/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100226static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Joel Huttonc70da542017-12-21 15:21:20 +0000227{
Chris Kay33b9be62021-05-26 11:58:23 +0100228 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100229 assert(amu_group1_supported());
230 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Huttonc70da542017-12-21 15:21:20 +0000231
232 amu_group1_cnt_write_internal(idx, val);
233 isb();
234}
Chris Kay1fd685a2021-05-25 10:42:56 +0100235#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000236
237static void *amu_context_save(const void *arg)
238{
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100239 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
240 unsigned int i;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000241
Chris Kay33b9be62021-05-26 11:58:23 +0100242 if (!amu_supported()) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000243 return (void *)-1;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100244 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000245
Chris Kay1fd685a2021-05-25 10:42:56 +0100246#if ENABLE_AMU_AUXILIARY_COUNTERS
247 if (AMU_GROUP1_NR_COUNTERS > 0U) {
248 if (!amu_group1_supported()) {
249 return (void *)-1;
250 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100251 }
252#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100253
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100254 /* Assert that group 0/1 counter configuration is what we expect */
Chris Kay33b9be62021-05-26 11:58:23 +0100255 assert(read_amcntenset0_px() == AMU_GROUP0_COUNTERS_MASK);
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000256
Chris Kay1fd685a2021-05-25 10:42:56 +0100257#if ENABLE_AMU_AUXILIARY_COUNTERS
258 if (AMU_GROUP1_NR_COUNTERS > 0U) {
259 assert(read_amcntenset1_px() == AMU_GROUP1_COUNTERS_MASK);
260 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100261#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000262 /*
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100263 * Disable group 0/1 counters to avoid other observers like SCP sampling
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000264 * counter values from the future via the memory mapped view.
265 */
Chris Kay33b9be62021-05-26 11:58:23 +0100266 write_amcntenclr0_px(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100267
Chris Kay1fd685a2021-05-25 10:42:56 +0100268#if ENABLE_AMU_AUXILIARY_COUNTERS
269 if (AMU_GROUP1_NR_COUNTERS > 0U) {
270 write_amcntenclr1_px(AMU_GROUP1_COUNTERS_MASK);
271 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100272#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100273
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000274 isb();
275
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100276 /* Save all group 0 counters */
277 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Joel Huttonc70da542017-12-21 15:21:20 +0000278 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100279 }
Joel Huttonc70da542017-12-21 15:21:20 +0000280
Chris Kay1fd685a2021-05-25 10:42:56 +0100281#if ENABLE_AMU_AUXILIARY_COUNTERS
282 if (AMU_GROUP1_NR_COUNTERS > 0U) {
283 /* Save group 1 counters */
284 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
285 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
286 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
287 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100288 }
289 }
290#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100291
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100292 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000293}
294
295static void *amu_context_restore(const void *arg)
296{
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100297 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
298 unsigned int i;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000299
Chris Kay33b9be62021-05-26 11:58:23 +0100300 if (!amu_supported()) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000301 return (void *)-1;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100302 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000303
Chris Kay1fd685a2021-05-25 10:42:56 +0100304#if ENABLE_AMU_AUXILIARY_COUNTERS
305 if (AMU_GROUP1_NR_COUNTERS > 0U) {
306 if (!amu_group1_supported()) {
307 return (void *)-1;
308 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100309 }
310#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100311
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000312 /* Counters were disabled in `amu_context_save()` */
Chris Kay33b9be62021-05-26 11:58:23 +0100313 assert(read_amcntenset0_px() == 0U);
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000314
Chris Kay1fd685a2021-05-25 10:42:56 +0100315#if ENABLE_AMU_AUXILIARY_COUNTERS
316 if (AMU_GROUP1_NR_COUNTERS > 0U) {
317 assert(read_amcntenset1_px() == 0U);
318 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100319#endif
320
321 /* Restore all group 0 counters */
322 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Joel Huttonc70da542017-12-21 15:21:20 +0000323 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100324 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000325
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100326 /* Restore group 0 counter configuration */
Chris Kay33b9be62021-05-26 11:58:23 +0100327 write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000328
Chris Kay1fd685a2021-05-25 10:42:56 +0100329#if ENABLE_AMU_AUXILIARY_COUNTERS
330 if (AMU_GROUP1_NR_COUNTERS > 0U) {
331 /* Restore group 1 counters */
332 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
333 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
334 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
335 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100336 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100337
Chris Kay1fd685a2021-05-25 10:42:56 +0100338 /* Restore group 1 counter configuration */
339 write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
340 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100341#endif
342
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100343 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000344}
345
346SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
347SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);