blob: 129616ea9ee7b89746a56e687da31756b18a892c [file] [log] [blame]
Dimitris Papastamos380559c2017-10-12 13:02:29 +01001/*
johpow01873d4242020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamos380559c2017-10-12 13:02:29 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00007#include <assert.h>
Chris Kay33b9be62021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00009#include <stdbool.h>
10
Dimitris Papastamos380559c2017-10-12 13:02:29 +010011#include <arch.h>
johpow01873d4242020-10-02 13:41:11 -050012#include <arch_features.h>
Dimitris Papastamos380559c2017-10-12 13:02:29 +010013#include <arch_helpers.h>
Alexei Fedorovf3ccf032020-07-14 08:17:56 +010014
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000015#include <lib/el3_runtime/pubsub_events.h>
16#include <lib/extensions/amu.h>
17#include <lib/extensions/amu_private.h>
Alexei Fedorovf3ccf032020-07-14 08:17:56 +010018
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000019#include <plat/common/platform.h>
Dimitris Papastamos380559c2017-10-12 13:02:29 +010020
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +000021static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22
Chris Kay33b9be62021-05-26 11:58:23 +010023static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
Dimitris Papastamos380559c2017-10-12 13:02:29 +010024{
Chris Kay33b9be62021-05-26 11:58:23 +010025 return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
johpow01873d4242020-10-02 13:41:11 -050026 ID_AA64PFR0_AMU_MASK;
Dimitris Papastamos0767d502017-11-13 09:49:45 +000027}
Dimitris Papastamos380559c2017-10-12 13:02:29 +010028
Chris Kay33b9be62021-05-26 11:58:23 +010029static inline __unused uint64_t read_hcr_el2_amvoffen(void)
30{
31 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
32 HCR_AMVOFFEN_SHIFT;
33}
34
35static inline __unused void write_cptr_el2_tam(uint64_t value)
36{
37 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
38 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
39}
40
41static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
42{
43 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
44
45 value &= ~TAM_BIT;
46 value |= (tam << TAM_SHIFT) & TAM_BIT;
47
48 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
49}
50
51static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
52{
53 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
54 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
55}
56
57static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
58{
59 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
60 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
61}
62
63static inline __unused uint64_t read_amcfgr_el0_ncg(void)
64{
65 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
66 AMCFGR_EL0_NCG_MASK;
67}
68
Chris Kay81e2ff12021-05-25 12:33:18 +010069static inline uint64_t read_amcgcr_el0_cg0nc(void)
70{
71 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
72 AMCGCR_EL0_CG0NC_MASK;
73}
74
Chris Kay33b9be62021-05-26 11:58:23 +010075static inline __unused uint64_t read_amcg1idr_el0_voff(void)
76{
77 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
78 AMCG1IDR_VOFF_MASK;
79}
80
81static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
82{
83 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
84 AMCGCR_EL0_CG1NC_MASK;
85}
86
87static inline __unused uint64_t read_amcntenset0_el0_px(void)
88{
89 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
90 AMCNTENSET0_EL0_Pn_MASK;
91}
92
93static inline __unused uint64_t read_amcntenset1_el0_px(void)
94{
95 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
96 AMCNTENSET1_EL0_Pn_MASK;
97}
98
99static inline __unused void write_amcntenset0_el0_px(uint64_t px)
100{
101 uint64_t value = read_amcntenset0_el0();
102
103 value &= ~AMCNTENSET0_EL0_Pn_MASK;
104 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
105
106 write_amcntenset0_el0(value);
107}
108
109static inline __unused void write_amcntenset1_el0_px(uint64_t px)
110{
111 uint64_t value = read_amcntenset1_el0();
112
113 value &= ~AMCNTENSET1_EL0_Pn_MASK;
114 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
115
116 write_amcntenset1_el0(value);
117}
118
119static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
120{
121 uint64_t value = read_amcntenclr0_el0();
122
123 value &= ~AMCNTENCLR0_EL0_Pn_MASK;
124 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
125
126 write_amcntenclr0_el0(value);
127}
128
129static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
130{
131 uint64_t value = read_amcntenclr1_el0();
132
133 value &= ~AMCNTENCLR1_EL0_Pn_MASK;
134 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
135
136 write_amcntenclr1_el0(value);
137}
138
139static bool amu_supported(void)
140{
141 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
142}
143
144static bool amu_v1p1_supported(void)
145{
146 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
147}
148
149#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kayb4b726e2021-05-24 21:00:07 +0100150static bool amu_group1_supported(void)
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100151{
Chris Kay33b9be62021-05-26 11:58:23 +0100152 return read_amcfgr_el0_ncg() > 0U;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100153}
154#endif
155
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000156/*
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100157 * Enable counters. This function is meant to be invoked
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000158 * by the context management library before exiting from EL3.
159 */
Arunachalam Ganapathy68ac5ed2021-07-08 09:35:57 +0100160void amu_enable(bool el2_unused, cpu_context_t *ctx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000161{
Chris Kay33b9be62021-05-26 11:58:23 +0100162 if (!amu_supported()) {
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000163 return;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100164 }
165
Chris Kay1fd685a2021-05-25 10:42:56 +0100166#if ENABLE_AMU_AUXILIARY_COUNTERS
167 if (AMU_GROUP1_NR_COUNTERS > 0U) {
168 /* Check and set presence of group 1 counters */
169 if (!amu_group1_supported()) {
170 ERROR("AMU Counter Group 1 is not implemented\n");
171 panic();
172 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100173
Chris Kay1fd685a2021-05-25 10:42:56 +0100174 /* Check number of group 1 counters */
175 uint64_t cnt_num = read_amcgcr_el0_cg1nc();
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100176
Chris Kay1fd685a2021-05-25 10:42:56 +0100177 VERBOSE("%s%llu. %s%u\n",
178 "Number of AMU Group 1 Counters ", cnt_num,
179 "Requested number ", AMU_GROUP1_NR_COUNTERS);
180
181 if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
182 ERROR("%s%llu is less than %s%u\n",
183 "Number of AMU Group 1 Counters ", cnt_num,
184 "Requested number ", AMU_GROUP1_NR_COUNTERS);
185 panic();
186 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100187 }
188#endif
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000189
190 if (el2_unused) {
191 /*
192 * CPTR_EL2.TAM: Set to zero so any accesses to
193 * the Activity Monitor registers do not trap to EL2.
194 */
Chris Kay33b9be62021-05-26 11:58:23 +0100195 write_cptr_el2_tam(0U);
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000196 }
197
198 /*
Arunachalam Ganapathy68ac5ed2021-07-08 09:35:57 +0100199 * Retrieve and update the CPTR_EL3 value from the context mentioned
200 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000201 * the Activity Monitor registers do not trap to EL3.
202 */
Chris Kay33b9be62021-05-26 11:58:23 +0100203 write_cptr_el3_tam(ctx, 0U);
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000204
205 /* Enable group 0 counters */
Chris Kay81e2ff12021-05-25 12:33:18 +0100206 write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100207
Chris Kay1fd685a2021-05-25 10:42:56 +0100208#if ENABLE_AMU_AUXILIARY_COUNTERS
209 if (AMU_GROUP1_NR_COUNTERS > 0U) {
210 /* Enable group 1 counters */
211 write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
212 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100213#endif
johpow01873d4242020-10-02 13:41:11 -0500214
215 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kay33b9be62021-05-26 11:58:23 +0100216 if (!amu_v1p1_supported()) {
johpow01873d4242020-10-02 13:41:11 -0500217 return;
218 }
219
220 if (el2_unused) {
221 /* Make sure virtual offsets are disabled if EL2 not used. */
Chris Kay33b9be62021-05-26 11:58:23 +0100222 write_hcr_el2_amvoffen(0U);
johpow01873d4242020-10-02 13:41:11 -0500223 }
224
225#if AMU_RESTRICT_COUNTERS
226 /*
227 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
228 * counters at all but the highest implemented EL. This is controlled
229 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
230 * register reads at lower ELs return zero. Reads from the memory
231 * mapped view are unaffected.
232 */
233 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kay33b9be62021-05-26 11:58:23 +0100234 write_amcr_el0_cg1rz(1U);
johpow01873d4242020-10-02 13:41:11 -0500235#else
Chris Kay33b9be62021-05-26 11:58:23 +0100236 write_amcr_el0_cg1rz(0U);
johpow01873d4242020-10-02 13:41:11 -0500237#endif
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000238}
239
240/* Read the group 0 counter identified by the given `idx`. */
Chris Kayb4b726e2021-05-24 21:00:07 +0100241static uint64_t amu_group0_cnt_read(unsigned int idx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000242{
Chris Kay33b9be62021-05-26 11:58:23 +0100243 assert(amu_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100244 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000245
246 return amu_group0_cnt_read_internal(idx);
247}
248
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100249/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100250static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000251{
Chris Kay33b9be62021-05-26 11:58:23 +0100252 assert(amu_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100253 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000254
255 amu_group0_cnt_write_internal(idx, val);
256 isb();
257}
258
johpow01873d4242020-10-02 13:41:11 -0500259/*
260 * Read the group 0 offset register for a given index. Index must be 0, 2,
261 * or 3, the register for 1 does not exist.
262 *
263 * Using this function requires FEAT_AMUv1p1 support.
264 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100265static uint64_t amu_group0_voffset_read(unsigned int idx)
johpow01873d4242020-10-02 13:41:11 -0500266{
Chris Kay33b9be62021-05-26 11:58:23 +0100267 assert(amu_v1p1_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100268 assert(idx < read_amcgcr_el0_cg0nc());
johpow01873d4242020-10-02 13:41:11 -0500269 assert(idx != 1U);
270
271 return amu_group0_voffset_read_internal(idx);
272}
273
274/*
275 * Write the group 0 offset register for a given index. Index must be 0, 2, or
276 * 3, the register for 1 does not exist.
277 *
278 * Using this function requires FEAT_AMUv1p1 support.
279 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100280static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
johpow01873d4242020-10-02 13:41:11 -0500281{
Chris Kay33b9be62021-05-26 11:58:23 +0100282 assert(amu_v1p1_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100283 assert(idx < read_amcgcr_el0_cg0nc());
johpow01873d4242020-10-02 13:41:11 -0500284 assert(idx != 1U);
285
286 amu_group0_voffset_write_internal(idx, val);
287 isb();
288}
289
Chris Kay1fd685a2021-05-25 10:42:56 +0100290#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100291/* Read the group 1 counter identified by the given `idx` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100292static uint64_t amu_group1_cnt_read(unsigned int idx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000293{
Chris Kay33b9be62021-05-26 11:58:23 +0100294 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100295 assert(amu_group1_supported());
296 assert(idx < AMU_GROUP1_NR_COUNTERS);
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000297
298 return amu_group1_cnt_read_internal(idx);
299}
300
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100301/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100302static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000303{
Chris Kay33b9be62021-05-26 11:58:23 +0100304 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100305 assert(amu_group1_supported());
306 assert(idx < AMU_GROUP1_NR_COUNTERS);
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000307
308 amu_group1_cnt_write_internal(idx, val);
309 isb();
310}
311
312/*
johpow01873d4242020-10-02 13:41:11 -0500313 * Read the group 1 offset register for a given index.
314 *
315 * Using this function requires FEAT_AMUv1p1 support.
316 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100317static uint64_t amu_group1_voffset_read(unsigned int idx)
johpow01873d4242020-10-02 13:41:11 -0500318{
Chris Kay33b9be62021-05-26 11:58:23 +0100319 assert(amu_v1p1_supported());
johpow01873d4242020-10-02 13:41:11 -0500320 assert(amu_group1_supported());
321 assert(idx < AMU_GROUP1_NR_COUNTERS);
Chris Kay33b9be62021-05-26 11:58:23 +0100322 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01873d4242020-10-02 13:41:11 -0500323
324 return amu_group1_voffset_read_internal(idx);
325}
326
327/*
328 * Write the group 1 offset register for a given index.
329 *
330 * Using this function requires FEAT_AMUv1p1 support.
331 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100332static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
johpow01873d4242020-10-02 13:41:11 -0500333{
Chris Kay33b9be62021-05-26 11:58:23 +0100334 assert(amu_v1p1_supported());
johpow01873d4242020-10-02 13:41:11 -0500335 assert(amu_group1_supported());
336 assert(idx < AMU_GROUP1_NR_COUNTERS);
Chris Kay33b9be62021-05-26 11:58:23 +0100337 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01873d4242020-10-02 13:41:11 -0500338
339 amu_group1_voffset_write_internal(idx, val);
340 isb();
341}
Chris Kay1fd685a2021-05-25 10:42:56 +0100342#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000343
344static void *amu_context_save(const void *arg)
345{
346 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100347 unsigned int i;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000348
Chris Kay33b9be62021-05-26 11:58:23 +0100349 if (!amu_supported()) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000350 return (void *)-1;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100351 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000352
Chris Kay1fd685a2021-05-25 10:42:56 +0100353#if ENABLE_AMU_AUXILIARY_COUNTERS
354 if (AMU_GROUP1_NR_COUNTERS > 0U) {
355 if (!amu_group1_supported()) {
356 return (void *)-1;
357 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100358 }
359#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100360
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000361 /* Assert that group 0/1 counter configuration is what we expect */
Chris Kay81e2ff12021-05-25 12:33:18 +0100362 assert(read_amcntenset0_el0_px() ==
363 ((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U));
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000364
Chris Kay1fd685a2021-05-25 10:42:56 +0100365#if ENABLE_AMU_AUXILIARY_COUNTERS
366 if (AMU_GROUP1_NR_COUNTERS > 0U) {
367 assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
368 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100369#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100370
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000371 /*
372 * Disable group 0/1 counters to avoid other observers like SCP sampling
373 * counter values from the future via the memory mapped view.
374 */
Chris Kay81e2ff12021-05-25 12:33:18 +0100375 write_amcntenclr0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100376
Chris Kay1fd685a2021-05-25 10:42:56 +0100377#if ENABLE_AMU_AUXILIARY_COUNTERS
378 if (AMU_GROUP1_NR_COUNTERS > 0U) {
379 write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
380 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100381#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100382
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000383 isb();
384
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100385 /* Save all group 0 counters */
Chris Kay81e2ff12021-05-25 12:33:18 +0100386 for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000387 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100388 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000389
johpow01873d4242020-10-02 13:41:11 -0500390 /* Save group 0 virtual offsets if supported and enabled. */
Chris Kay33b9be62021-05-26 11:58:23 +0100391 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
johpow01873d4242020-10-02 13:41:11 -0500392 /* Not using a loop because count is fixed and index 1 DNE. */
393 ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
394 ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
395 ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
396 }
397
Chris Kay1fd685a2021-05-25 10:42:56 +0100398#if ENABLE_AMU_AUXILIARY_COUNTERS
399 if (AMU_GROUP1_NR_COUNTERS > 0U) {
400 /* Save group 1 counters */
johpow01873d4242020-10-02 13:41:11 -0500401 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100402 if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
403 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
404 }
405 }
406
407 /* Save group 1 virtual offsets if supported and enabled. */
408 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
409 uint64_t amcg1idr = read_amcg1idr_el0_voff() &
410 AMU_GROUP1_COUNTERS_MASK;
411
412 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
413 if (((amcg1idr >> i) & 1ULL) != 0ULL) {
414 ctx->group1_voffsets[i] =
415 amu_group1_voffset_read(i);
416 }
johpow01873d4242020-10-02 13:41:11 -0500417 }
418 }
419 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100420#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100421
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100422 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000423}
424
425static void *amu_context_restore(const void *arg)
426{
427 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100428 unsigned int i;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000429
Chris Kay33b9be62021-05-26 11:58:23 +0100430 if (!amu_supported()) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000431 return (void *)-1;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100432 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000433
Chris Kay1fd685a2021-05-25 10:42:56 +0100434#if ENABLE_AMU_AUXILIARY_COUNTERS
435 if (AMU_GROUP1_NR_COUNTERS > 0U) {
436 if (!amu_group1_supported()) {
437 return (void *)-1;
438 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100439 }
440#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100441
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000442 /* Counters were disabled in `amu_context_save()` */
Chris Kay33b9be62021-05-26 11:58:23 +0100443 assert(read_amcntenset0_el0_px() == 0U);
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000444
Chris Kay1fd685a2021-05-25 10:42:56 +0100445#if ENABLE_AMU_AUXILIARY_COUNTERS
446 if (AMU_GROUP1_NR_COUNTERS > 0U) {
447 assert(read_amcntenset1_el0_px() == 0U);
448 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100449#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000450
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100451 /* Restore all group 0 counters */
Chris Kay81e2ff12021-05-25 12:33:18 +0100452 for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100453 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
454 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000455
johpow01873d4242020-10-02 13:41:11 -0500456 /* Restore group 0 virtual offsets if supported and enabled. */
Chris Kay33b9be62021-05-26 11:58:23 +0100457 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
johpow01873d4242020-10-02 13:41:11 -0500458 /* Not using a loop because count is fixed and index 1 DNE. */
459 amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
460 amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
461 amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
462 }
463
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100464 /* Restore group 0 counter configuration */
Chris Kay81e2ff12021-05-25 12:33:18 +0100465 write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100466
Chris Kay1fd685a2021-05-25 10:42:56 +0100467#if ENABLE_AMU_AUXILIARY_COUNTERS
468 if (AMU_GROUP1_NR_COUNTERS > 0U) {
469 /* Restore group 1 counters */
johpow01873d4242020-10-02 13:41:11 -0500470 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100471 if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
472 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
johpow01873d4242020-10-02 13:41:11 -0500473 }
474 }
johpow01873d4242020-10-02 13:41:11 -0500475
Chris Kay1fd685a2021-05-25 10:42:56 +0100476 /* Restore group 1 virtual offsets if supported and enabled. */
477 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
478 uint64_t amcg1idr = read_amcg1idr_el0_voff() &
479 AMU_GROUP1_COUNTERS_MASK;
480
481 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
482 if (((amcg1idr >> i) & 1ULL) != 0ULL) {
483 amu_group1_voffset_write(i,
484 ctx->group1_voffsets[i]);
485 }
486 }
487 }
488
489 /* Restore group 1 counter configuration */
490 write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
491 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100492#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000493
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100494 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000495}
496
497SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
498SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);