blob: 6bed6c3cfa5327950835d7f722b3a7932cc1a827 [file] [log] [blame]
Dimitris Papastamos380559c2017-10-12 13:02:29 +01001/*
johpow01873d4242020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamos380559c2017-10-12 13:02:29 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00007#include <assert.h>
Chris Kay33b9be62021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00009#include <stdbool.h>
10
Dimitris Papastamos380559c2017-10-12 13:02:29 +010011#include <arch.h>
johpow01873d4242020-10-02 13:41:11 -050012#include <arch_features.h>
Dimitris Papastamos380559c2017-10-12 13:02:29 +010013#include <arch_helpers.h>
Alexei Fedorovf3ccf032020-07-14 08:17:56 +010014
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000015#include <lib/el3_runtime/pubsub_events.h>
16#include <lib/extensions/amu.h>
17#include <lib/extensions/amu_private.h>
Alexei Fedorovf3ccf032020-07-14 08:17:56 +010018
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000019#include <plat/common/platform.h>
Dimitris Papastamos380559c2017-10-12 13:02:29 +010020
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +000021static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22
Chris Kay33b9be62021-05-26 11:58:23 +010023static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
Dimitris Papastamos380559c2017-10-12 13:02:29 +010024{
Chris Kay33b9be62021-05-26 11:58:23 +010025 return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
johpow01873d4242020-10-02 13:41:11 -050026 ID_AA64PFR0_AMU_MASK;
Dimitris Papastamos0767d502017-11-13 09:49:45 +000027}
Dimitris Papastamos380559c2017-10-12 13:02:29 +010028
Chris Kay33b9be62021-05-26 11:58:23 +010029static inline __unused uint64_t read_hcr_el2_amvoffen(void)
30{
31 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
32 HCR_AMVOFFEN_SHIFT;
33}
34
35static inline __unused void write_cptr_el2_tam(uint64_t value)
36{
37 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
38 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
39}
40
41static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
42{
43 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
44
45 value &= ~TAM_BIT;
46 value |= (tam << TAM_SHIFT) & TAM_BIT;
47
48 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
49}
50
51static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
52{
53 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
54 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
55}
56
57static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
58{
59 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
60 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
61}
62
63static inline __unused uint64_t read_amcfgr_el0_ncg(void)
64{
65 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
66 AMCFGR_EL0_NCG_MASK;
67}
68
Chris Kay81e2ff12021-05-25 12:33:18 +010069static inline uint64_t read_amcgcr_el0_cg0nc(void)
70{
71 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
72 AMCGCR_EL0_CG0NC_MASK;
73}
74
Chris Kay33b9be62021-05-26 11:58:23 +010075static inline __unused uint64_t read_amcg1idr_el0_voff(void)
76{
77 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
78 AMCG1IDR_VOFF_MASK;
79}
80
81static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
82{
83 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
84 AMCGCR_EL0_CG1NC_MASK;
85}
86
87static inline __unused uint64_t read_amcntenset0_el0_px(void)
88{
89 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
90 AMCNTENSET0_EL0_Pn_MASK;
91}
92
93static inline __unused uint64_t read_amcntenset1_el0_px(void)
94{
95 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
96 AMCNTENSET1_EL0_Pn_MASK;
97}
98
99static inline __unused void write_amcntenset0_el0_px(uint64_t px)
100{
101 uint64_t value = read_amcntenset0_el0();
102
103 value &= ~AMCNTENSET0_EL0_Pn_MASK;
104 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
105
106 write_amcntenset0_el0(value);
107}
108
109static inline __unused void write_amcntenset1_el0_px(uint64_t px)
110{
111 uint64_t value = read_amcntenset1_el0();
112
113 value &= ~AMCNTENSET1_EL0_Pn_MASK;
114 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
115
116 write_amcntenset1_el0(value);
117}
118
119static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
120{
121 uint64_t value = read_amcntenclr0_el0();
122
123 value &= ~AMCNTENCLR0_EL0_Pn_MASK;
124 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
125
126 write_amcntenclr0_el0(value);
127}
128
129static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
130{
131 uint64_t value = read_amcntenclr1_el0();
132
133 value &= ~AMCNTENCLR1_EL0_Pn_MASK;
134 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
135
136 write_amcntenclr1_el0(value);
137}
138
139static bool amu_supported(void)
140{
141 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
142}
143
144static bool amu_v1p1_supported(void)
145{
146 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
147}
148
149#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kayb4b726e2021-05-24 21:00:07 +0100150static bool amu_group1_supported(void)
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100151{
Chris Kay33b9be62021-05-26 11:58:23 +0100152 return read_amcfgr_el0_ncg() > 0U;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100153}
154#endif
155
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000156/*
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100157 * Enable counters. This function is meant to be invoked
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000158 * by the context management library before exiting from EL3.
159 */
Arunachalam Ganapathy68ac5ed2021-07-08 09:35:57 +0100160void amu_enable(bool el2_unused, cpu_context_t *ctx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000161{
Chris Kay33b9be62021-05-26 11:58:23 +0100162 if (!amu_supported()) {
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000163 return;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100164 }
165
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000166 if (el2_unused) {
167 /*
168 * CPTR_EL2.TAM: Set to zero so any accesses to
169 * the Activity Monitor registers do not trap to EL2.
170 */
Chris Kay33b9be62021-05-26 11:58:23 +0100171 write_cptr_el2_tam(0U);
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000172 }
173
174 /*
Arunachalam Ganapathy68ac5ed2021-07-08 09:35:57 +0100175 * Retrieve and update the CPTR_EL3 value from the context mentioned
176 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000177 * the Activity Monitor registers do not trap to EL3.
178 */
Chris Kay33b9be62021-05-26 11:58:23 +0100179 write_cptr_el3_tam(ctx, 0U);
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000180
181 /* Enable group 0 counters */
Chris Kay81e2ff12021-05-25 12:33:18 +0100182 write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100183
Chris Kay1fd685a2021-05-25 10:42:56 +0100184#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay31d3cc22021-05-25 15:24:18 +0100185 if (amu_group1_supported()) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100186 /* Enable group 1 counters */
187 write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
188 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100189#endif
johpow01873d4242020-10-02 13:41:11 -0500190
191 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kay33b9be62021-05-26 11:58:23 +0100192 if (!amu_v1p1_supported()) {
johpow01873d4242020-10-02 13:41:11 -0500193 return;
194 }
195
196 if (el2_unused) {
197 /* Make sure virtual offsets are disabled if EL2 not used. */
Chris Kay33b9be62021-05-26 11:58:23 +0100198 write_hcr_el2_amvoffen(0U);
johpow01873d4242020-10-02 13:41:11 -0500199 }
200
201#if AMU_RESTRICT_COUNTERS
202 /*
203 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
204 * counters at all but the highest implemented EL. This is controlled
205 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
206 * register reads at lower ELs return zero. Reads from the memory
207 * mapped view are unaffected.
208 */
209 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kay33b9be62021-05-26 11:58:23 +0100210 write_amcr_el0_cg1rz(1U);
johpow01873d4242020-10-02 13:41:11 -0500211#else
Chris Kay33b9be62021-05-26 11:58:23 +0100212 write_amcr_el0_cg1rz(0U);
johpow01873d4242020-10-02 13:41:11 -0500213#endif
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000214}
215
216/* Read the group 0 counter identified by the given `idx`. */
Chris Kayb4b726e2021-05-24 21:00:07 +0100217static uint64_t amu_group0_cnt_read(unsigned int idx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000218{
Chris Kay33b9be62021-05-26 11:58:23 +0100219 assert(amu_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100220 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000221
222 return amu_group0_cnt_read_internal(idx);
223}
224
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100225/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100226static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000227{
Chris Kay33b9be62021-05-26 11:58:23 +0100228 assert(amu_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100229 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000230
231 amu_group0_cnt_write_internal(idx, val);
232 isb();
233}
234
johpow01873d4242020-10-02 13:41:11 -0500235/*
236 * Read the group 0 offset register for a given index. Index must be 0, 2,
237 * or 3, the register for 1 does not exist.
238 *
239 * Using this function requires FEAT_AMUv1p1 support.
240 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100241static uint64_t amu_group0_voffset_read(unsigned int idx)
johpow01873d4242020-10-02 13:41:11 -0500242{
Chris Kay33b9be62021-05-26 11:58:23 +0100243 assert(amu_v1p1_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100244 assert(idx < read_amcgcr_el0_cg0nc());
johpow01873d4242020-10-02 13:41:11 -0500245 assert(idx != 1U);
246
247 return amu_group0_voffset_read_internal(idx);
248}
249
250/*
251 * Write the group 0 offset register for a given index. Index must be 0, 2, or
252 * 3, the register for 1 does not exist.
253 *
254 * Using this function requires FEAT_AMUv1p1 support.
255 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100256static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
johpow01873d4242020-10-02 13:41:11 -0500257{
Chris Kay33b9be62021-05-26 11:58:23 +0100258 assert(amu_v1p1_supported());
Chris Kay81e2ff12021-05-25 12:33:18 +0100259 assert(idx < read_amcgcr_el0_cg0nc());
johpow01873d4242020-10-02 13:41:11 -0500260 assert(idx != 1U);
261
262 amu_group0_voffset_write_internal(idx, val);
263 isb();
264}
265
Chris Kay1fd685a2021-05-25 10:42:56 +0100266#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100267/* Read the group 1 counter identified by the given `idx` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100268static uint64_t amu_group1_cnt_read(unsigned int idx)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000269{
Chris Kay33b9be62021-05-26 11:58:23 +0100270 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100271 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100272 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000273
274 return amu_group1_cnt_read_internal(idx);
275}
276
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100277/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayb4b726e2021-05-24 21:00:07 +0100278static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000279{
Chris Kay33b9be62021-05-26 11:58:23 +0100280 assert(amu_supported());
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100281 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100282 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos0767d502017-11-13 09:49:45 +0000283
284 amu_group1_cnt_write_internal(idx, val);
285 isb();
286}
287
288/*
johpow01873d4242020-10-02 13:41:11 -0500289 * Read the group 1 offset register for a given index.
290 *
291 * Using this function requires FEAT_AMUv1p1 support.
292 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100293static uint64_t amu_group1_voffset_read(unsigned int idx)
johpow01873d4242020-10-02 13:41:11 -0500294{
Chris Kay33b9be62021-05-26 11:58:23 +0100295 assert(amu_v1p1_supported());
johpow01873d4242020-10-02 13:41:11 -0500296 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100297 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kay33b9be62021-05-26 11:58:23 +0100298 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01873d4242020-10-02 13:41:11 -0500299
300 return amu_group1_voffset_read_internal(idx);
301}
302
303/*
304 * Write the group 1 offset register for a given index.
305 *
306 * Using this function requires FEAT_AMUv1p1 support.
307 */
Chris Kayb4b726e2021-05-24 21:00:07 +0100308static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
johpow01873d4242020-10-02 13:41:11 -0500309{
Chris Kay33b9be62021-05-26 11:58:23 +0100310 assert(amu_v1p1_supported());
johpow01873d4242020-10-02 13:41:11 -0500311 assert(amu_group1_supported());
Chris Kay31d3cc22021-05-25 15:24:18 +0100312 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kay33b9be62021-05-26 11:58:23 +0100313 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01873d4242020-10-02 13:41:11 -0500314
315 amu_group1_voffset_write_internal(idx, val);
316 isb();
317}
Chris Kay1fd685a2021-05-25 10:42:56 +0100318#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000319
320static void *amu_context_save(const void *arg)
321{
322 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100323 unsigned int i;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000324
Chris Kay33b9be62021-05-26 11:58:23 +0100325 if (!amu_supported()) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000326 return (void *)-1;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100327 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000328
329 /* Assert that group 0/1 counter configuration is what we expect */
Chris Kay81e2ff12021-05-25 12:33:18 +0100330 assert(read_amcntenset0_el0_px() ==
331 ((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U));
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000332
Chris Kay1fd685a2021-05-25 10:42:56 +0100333#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay31d3cc22021-05-25 15:24:18 +0100334 if (amu_group1_supported()) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100335 assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
336 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100337#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100338
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000339 /*
340 * Disable group 0/1 counters to avoid other observers like SCP sampling
341 * counter values from the future via the memory mapped view.
342 */
Chris Kay81e2ff12021-05-25 12:33:18 +0100343 write_amcntenclr0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100344
Chris Kay1fd685a2021-05-25 10:42:56 +0100345#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay31d3cc22021-05-25 15:24:18 +0100346 if (amu_group1_supported()) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100347 write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
348 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100349#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100350
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000351 isb();
352
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100353 /* Save all group 0 counters */
Chris Kay81e2ff12021-05-25 12:33:18 +0100354 for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000355 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100356 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000357
johpow01873d4242020-10-02 13:41:11 -0500358 /* Save group 0 virtual offsets if supported and enabled. */
Chris Kay33b9be62021-05-26 11:58:23 +0100359 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
johpow01873d4242020-10-02 13:41:11 -0500360 /* Not using a loop because count is fixed and index 1 DNE. */
361 ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
362 ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
363 ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
364 }
365
Chris Kay1fd685a2021-05-25 10:42:56 +0100366#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay31d3cc22021-05-25 15:24:18 +0100367 if (amu_group1_supported()) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100368 /* Save group 1 counters */
Chris Kay31d3cc22021-05-25 15:24:18 +0100369 for (i = 0U; i < read_amcgcr_el0_cg1nc(); i++) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100370 if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
371 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
372 }
373 }
374
375 /* Save group 1 virtual offsets if supported and enabled. */
376 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
377 uint64_t amcg1idr = read_amcg1idr_el0_voff() &
378 AMU_GROUP1_COUNTERS_MASK;
379
Chris Kay31d3cc22021-05-25 15:24:18 +0100380 for (i = 0U; i < read_amcgcr_el0_cg1nc(); i++) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100381 if (((amcg1idr >> i) & 1ULL) != 0ULL) {
382 ctx->group1_voffsets[i] =
383 amu_group1_voffset_read(i);
384 }
johpow01873d4242020-10-02 13:41:11 -0500385 }
386 }
387 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100388#endif
Chris Kay1fd685a2021-05-25 10:42:56 +0100389
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100390 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000391}
392
393static void *amu_context_restore(const void *arg)
394{
395 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100396 unsigned int i;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000397
Chris Kay33b9be62021-05-26 11:58:23 +0100398 if (!amu_supported()) {
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000399 return (void *)-1;
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100400 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000401
402 /* Counters were disabled in `amu_context_save()` */
Chris Kay33b9be62021-05-26 11:58:23 +0100403 assert(read_amcntenset0_el0_px() == 0U);
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000404
Chris Kay1fd685a2021-05-25 10:42:56 +0100405#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay31d3cc22021-05-25 15:24:18 +0100406 if (amu_group1_supported()) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100407 assert(read_amcntenset1_el0_px() == 0U);
408 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100409#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000410
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100411 /* Restore all group 0 counters */
Chris Kay81e2ff12021-05-25 12:33:18 +0100412 for (i = 0U; i < read_amcgcr_el0_cg0nc(); i++) {
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100413 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
414 }
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000415
johpow01873d4242020-10-02 13:41:11 -0500416 /* Restore group 0 virtual offsets if supported and enabled. */
Chris Kay33b9be62021-05-26 11:58:23 +0100417 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
johpow01873d4242020-10-02 13:41:11 -0500418 /* Not using a loop because count is fixed and index 1 DNE. */
419 amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
420 amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
421 amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
422 }
423
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100424 /* Restore group 0 counter configuration */
Chris Kay81e2ff12021-05-25 12:33:18 +0100425 write_amcntenset0_el0_px((UINT64_C(1) << read_amcgcr_el0_cg0nc()) - 1U);
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100426
Chris Kay1fd685a2021-05-25 10:42:56 +0100427#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay31d3cc22021-05-25 15:24:18 +0100428 if (amu_group1_supported()) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100429 /* Restore group 1 counters */
Chris Kay31d3cc22021-05-25 15:24:18 +0100430 for (i = 0U; i < read_amcgcr_el0_cg1nc(); i++) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100431 if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
432 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
johpow01873d4242020-10-02 13:41:11 -0500433 }
434 }
johpow01873d4242020-10-02 13:41:11 -0500435
Chris Kay1fd685a2021-05-25 10:42:56 +0100436 /* Restore group 1 virtual offsets if supported and enabled. */
437 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
438 uint64_t amcg1idr = read_amcg1idr_el0_voff() &
439 AMU_GROUP1_COUNTERS_MASK;
440
Chris Kay31d3cc22021-05-25 15:24:18 +0100441 for (i = 0U; i < read_amcgcr_el0_cg1nc(); i++) {
Chris Kay1fd685a2021-05-25 10:42:56 +0100442 if (((amcg1idr >> i) & 1ULL) != 0ULL) {
443 amu_group1_voffset_write(i,
444 ctx->group1_voffsets[i]);
445 }
446 }
447 }
448
449 /* Restore group 1 counter configuration */
450 write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
451 }
Alexei Fedorovf3ccf032020-07-14 08:17:56 +0100452#endif
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000453
Antonio Nino Diaz40daecc2018-10-25 16:52:26 +0100454 return (void *)0;
Dimitris Papastamosb6eb3932017-11-28 13:47:06 +0000455}
456
457SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
458SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);