blob: cb9a0f26e2b06e2699d09f3a11f200a09c48353c [file] [log] [blame]
Dimitris Papastamose08005a2017-10-12 13:02:29 +01001/*
Elizabeth Ho4fc00d22023-07-18 14:10:25 +01002 * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
Dimitris Papastamose08005a2017-10-12 13:02:29 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Dimitris Papastamos525c37a2017-11-13 09:49:45 +00007#include <assert.h>
Chris Kaya5fde282021-05-26 11:58:23 +01008#include <cdefs.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +010010#include <stdbool.h>
Scott Brandene5dcf982020-08-25 13:49:32 -070011#include <stdint.h>
Dimitris Papastamose08005a2017-10-12 13:02:29 +010012
Chris Kay26a79612021-05-24 20:35:26 +010013#include "../amu_private.h"
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014#include <arch.h>
johpow01fa59c6f2020-10-02 13:41:11 -050015#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000016#include <arch_helpers.h>
Chris Kayf11909f2021-08-19 11:21:52 +010017#include <common/debug.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000018#include <lib/el3_runtime/pubsub_events.h>
19#include <lib/extensions/amu.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000020
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010021#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000022
Chris Kayf11909f2021-08-19 11:21:52 +010023#if ENABLE_AMU_FCONF
24# include <lib/fconf/fconf.h>
25# include <lib/fconf/fconf_amu_getter.h>
26#endif
27
Chris Kay03be39d2021-05-05 13:38:30 +010028#if ENABLE_MPMM
29# include <lib/mpmm/mpmm.h>
30#endif
31
Chris Kay26a79612021-05-24 20:35:26 +010032struct amu_ctx {
33 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
34#if ENABLE_AMU_AUXILIARY_COUNTERS
35 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
36#endif
37
38 /* Architected event counter 1 does not have an offset register */
39 uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U];
40#if ENABLE_AMU_AUXILIARY_COUNTERS
41 uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS];
42#endif
43
44 uint16_t group0_enable;
45#if ENABLE_AMU_AUXILIARY_COUNTERS
46 uint16_t group1_enable;
47#endif
48};
49
50static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
51
52CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
53 amu_ctx_group0_enable_cannot_represent_all_group0_counters);
54
55#if ENABLE_AMU_AUXILIARY_COUNTERS
56CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
57 amu_ctx_group1_enable_cannot_represent_all_group1_counters);
58#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000059
Chris Kaya5fde282021-05-26 11:58:23 +010060static inline __unused uint64_t read_hcr_el2_amvoffen(void)
61{
62 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
63 HCR_AMVOFFEN_SHIFT;
64}
65
66static inline __unused void write_cptr_el2_tam(uint64_t value)
67{
68 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
69 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
70}
71
John Powellcc799272022-03-29 00:25:59 -050072static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
73{
74 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
75
76 value &= ~SCR_AMVOFFEN_BIT;
77 value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT;
78
79 write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value);
80}
81
Chris Kaya5fde282021-05-26 11:58:23 +010082static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
83{
84 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
85 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
86}
87
88static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
89{
90 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
91 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
92}
93
94static inline __unused uint64_t read_amcfgr_el0_ncg(void)
95{
96 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
97 AMCFGR_EL0_NCG_MASK;
98}
99
Chris Kay26a79612021-05-24 20:35:26 +0100100static inline __unused uint64_t read_amcgcr_el0_cg0nc(void)
Chris Kaya40141d2021-05-25 12:33:18 +0100101{
102 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
103 AMCGCR_EL0_CG0NC_MASK;
104}
105
Chris Kaya5fde282021-05-26 11:58:23 +0100106static inline __unused uint64_t read_amcg1idr_el0_voff(void)
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100107{
Chris Kaya5fde282021-05-26 11:58:23 +0100108 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
109 AMCG1IDR_VOFF_MASK;
110}
111
112static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
113{
114 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
115 AMCGCR_EL0_CG1NC_MASK;
116}
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100117
Chris Kaya5fde282021-05-26 11:58:23 +0100118static inline __unused uint64_t read_amcntenset0_el0_px(void)
119{
120 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
121 AMCNTENSET0_EL0_Pn_MASK;
122}
123
124static inline __unused uint64_t read_amcntenset1_el0_px(void)
125{
126 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
127 AMCNTENSET1_EL0_Pn_MASK;
128}
129
130static inline __unused void write_amcntenset0_el0_px(uint64_t px)
131{
132 uint64_t value = read_amcntenset0_el0();
133
134 value &= ~AMCNTENSET0_EL0_Pn_MASK;
135 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
136
137 write_amcntenset0_el0(value);
138}
139
140static inline __unused void write_amcntenset1_el0_px(uint64_t px)
141{
142 uint64_t value = read_amcntenset1_el0();
143
144 value &= ~AMCNTENSET1_EL0_Pn_MASK;
145 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
146
147 write_amcntenset1_el0(value);
148}
149
150static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
151{
152 uint64_t value = read_amcntenclr0_el0();
153
154 value &= ~AMCNTENCLR0_EL0_Pn_MASK;
155 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
156
157 write_amcntenclr0_el0(value);
158}
159
160static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
161{
162 uint64_t value = read_amcntenclr1_el0();
163
164 value &= ~AMCNTENCLR1_EL0_Pn_MASK;
165 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
166
167 write_amcntenclr1_el0(value);
168}
169
Chris Kaya5fde282021-05-26 11:58:23 +0100170#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100171static __unused bool amu_group1_supported(void)
Chris Kaya5fde282021-05-26 11:58:23 +0100172{
173 return read_amcfgr_el0_ncg() > 0U;
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000174}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100175#endif
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000176
177/*
Chris Kay26a79612021-05-24 20:35:26 +0100178 * Enable counters. This function is meant to be invoked by the context
179 * management library before exiting from EL3.
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000180 */
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100181void amu_enable(cpu_context_t *ctx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000182{
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100183 /* Initialize FEAT_AMUv1p1 features if present. */
184 if (is_feat_amuv1p1_supported()) {
185 /*
186 * Set SCR_EL3.AMVOFFEN to one so that accesses to virtual
187 * offset registers at EL2 do not trap to EL3
188 */
189 ctx_write_scr_el3_amvoffen(ctx, 1U);
190 }
191}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100192
Elizabeth Ho4fc00d22023-07-18 14:10:25 +0100193void amu_enable_per_world(per_world_context_t *per_world_ctx)
194{
195 /*
196 * Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor
197 * registers do not trap to EL3.
198 */
199 uint64_t cptr_el3 = per_world_ctx->ctx_cptr_el3;
200
201 cptr_el3 &= ~TAM_BIT;
202 per_world_ctx->ctx_cptr_el3 = cptr_el3;
203}
204
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100205void amu_init_el3(void)
206{
207 uint64_t group0_impl_ctr = read_amcgcr_el0_cg0nc();
208 uint64_t group0_en_mask = (1 << (group0_impl_ctr)) - 1U;
209 uint64_t num_ctr_groups = read_amcfgr_el0_ncg();
Chris Kay26a79612021-05-24 20:35:26 +0100210
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100211 /* Enable all architected counters by default */
212 write_amcntenset0_el0_px(group0_en_mask);
Chris Kayf11909f2021-08-19 11:21:52 +0100213
214#if ENABLE_AMU_AUXILIARY_COUNTERS
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100215 if (num_ctr_groups > 0U) {
216 uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */
217 const struct amu_topology *topology;
Chris Kayf11909f2021-08-19 11:21:52 +0100218
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100219 /*
220 * The platform may opt to enable specific auxiliary counters.
221 * This can be done via the common FCONF getter, or via the
222 * platform-implemented function.
223 */
Chris Kayf11909f2021-08-19 11:21:52 +0100224#if ENABLE_AMU_FCONF
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100225 topology = FCONF_GET_PROPERTY(amu, config, topology);
Chris Kayf11909f2021-08-19 11:21:52 +0100226#else
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100227 topology = plat_amu_topology();
Chris Kayf11909f2021-08-19 11:21:52 +0100228#endif /* ENABLE_AMU_FCONF */
229
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100230 if (topology != NULL) {
231 unsigned int core_pos = plat_my_core_pos();
Chris Kayf11909f2021-08-19 11:21:52 +0100232
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100233 amcntenset1_el0_px = topology->cores[core_pos].enable;
234 } else {
235 ERROR("AMU: failed to generate AMU topology\n");
236 }
Chris Kay26a79612021-05-24 20:35:26 +0100237
Chris Kay26a79612021-05-24 20:35:26 +0100238 write_amcntenset1_el0_px(amcntenset1_el0_px);
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100239 }
240#else /* ENABLE_AMU_AUXILIARY_COUNTERS */
241 if (num_ctr_groups > 0U) {
Chris Kayf11909f2021-08-19 11:21:52 +0100242 VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
Chris Kay925fda42021-05-25 10:42:56 +0100243 }
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100244#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
johpow01fa59c6f2020-10-02 13:41:11 -0500245
Andre Przywara906776e2023-03-03 10:30:06 +0000246 if (is_feat_amuv1p1_supported()) {
johpow01fa59c6f2020-10-02 13:41:11 -0500247#if AMU_RESTRICT_COUNTERS
Chris Kay03be39d2021-05-05 13:38:30 +0100248 /*
249 * FEAT_AMUv1p1 adds a register field to restrict access to
250 * group 1 counters at all but the highest implemented EL. This
251 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time
252 * flag, when set, system register reads at lower ELs return
253 * zero. Reads from the memory mapped view are unaffected.
254 */
255 VERBOSE("AMU group 1 counter access restricted.\n");
256 write_amcr_el0_cg1rz(1U);
johpow01fa59c6f2020-10-02 13:41:11 -0500257#else
Chris Kay03be39d2021-05-05 13:38:30 +0100258 write_amcr_el0_cg1rz(0U);
259#endif
260 }
261
262#if ENABLE_MPMM
263 mpmm_enable();
johpow01fa59c6f2020-10-02 13:41:11 -0500264#endif
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000265}
266
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100267void amu_init_el2_unused(void)
268{
269 /*
270 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor
271 * registers do not trap to EL2.
272 */
273 write_cptr_el2_tam(0U);
274
275 /* Initialize FEAT_AMUv1p1 features if present. */
276 if (is_feat_amuv1p1_supported()) {
277 /* Make sure virtual offsets are disabled if EL2 not used. */
278 write_hcr_el2_amvoffen(0U);
279 }
280}
281
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000282/* Read the group 0 counter identified by the given `idx`. */
Chris Kayf13c6b52021-05-24 21:00:07 +0100283static uint64_t amu_group0_cnt_read(unsigned int idx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000284{
Andre Przywara906776e2023-03-03 10:30:06 +0000285 assert(is_feat_amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100286 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000287
288 return amu_group0_cnt_read_internal(idx);
289}
290
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100291/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100292static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000293{
Andre Przywara906776e2023-03-03 10:30:06 +0000294 assert(is_feat_amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100295 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000296
297 amu_group0_cnt_write_internal(idx, val);
298 isb();
299}
300
johpow01fa59c6f2020-10-02 13:41:11 -0500301/*
Chris Kay26a79612021-05-24 20:35:26 +0100302 * Unlike with auxiliary counters, we cannot detect at runtime whether an
303 * architected counter supports a virtual offset. These are instead fixed
304 * according to FEAT_AMUv1p1, but this switch will need to be updated if later
305 * revisions of FEAT_AMU add additional architected counters.
306 */
307static bool amu_group0_voffset_supported(uint64_t idx)
308{
309 switch (idx) {
310 case 0U:
311 case 2U:
312 case 3U:
313 return true;
314
315 case 1U:
316 return false;
317
318 default:
319 ERROR("AMU: can't set up virtual offset for unknown "
Scott Brandene5dcf982020-08-25 13:49:32 -0700320 "architected counter %" PRIu64 "!\n", idx);
Chris Kay26a79612021-05-24 20:35:26 +0100321
322 panic();
323 }
324}
325
326/*
johpow01fa59c6f2020-10-02 13:41:11 -0500327 * Read the group 0 offset register for a given index. Index must be 0, 2,
328 * or 3, the register for 1 does not exist.
329 *
330 * Using this function requires FEAT_AMUv1p1 support.
331 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100332static uint64_t amu_group0_voffset_read(unsigned int idx)
johpow01fa59c6f2020-10-02 13:41:11 -0500333{
Andre Przywara906776e2023-03-03 10:30:06 +0000334 assert(is_feat_amuv1p1_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100335 assert(idx < read_amcgcr_el0_cg0nc());
johpow01fa59c6f2020-10-02 13:41:11 -0500336 assert(idx != 1U);
337
338 return amu_group0_voffset_read_internal(idx);
339}
340
341/*
342 * Write the group 0 offset register for a given index. Index must be 0, 2, or
343 * 3, the register for 1 does not exist.
344 *
345 * Using this function requires FEAT_AMUv1p1 support.
346 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100347static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
johpow01fa59c6f2020-10-02 13:41:11 -0500348{
Andre Przywara906776e2023-03-03 10:30:06 +0000349 assert(is_feat_amuv1p1_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100350 assert(idx < read_amcgcr_el0_cg0nc());
johpow01fa59c6f2020-10-02 13:41:11 -0500351 assert(idx != 1U);
352
353 amu_group0_voffset_write_internal(idx, val);
354 isb();
355}
356
Chris Kay925fda42021-05-25 10:42:56 +0100357#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100358/* Read the group 1 counter identified by the given `idx` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100359static uint64_t amu_group1_cnt_read(unsigned int idx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000360{
Andre Przywara906776e2023-03-03 10:30:06 +0000361 assert(is_feat_amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100362 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100363 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000364
365 return amu_group1_cnt_read_internal(idx);
366}
367
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100368/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100369static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000370{
Andre Przywara906776e2023-03-03 10:30:06 +0000371 assert(is_feat_amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100372 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100373 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000374
375 amu_group1_cnt_write_internal(idx, val);
376 isb();
377}
378
379/*
johpow01fa59c6f2020-10-02 13:41:11 -0500380 * Read the group 1 offset register for a given index.
381 *
382 * Using this function requires FEAT_AMUv1p1 support.
383 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100384static uint64_t amu_group1_voffset_read(unsigned int idx)
johpow01fa59c6f2020-10-02 13:41:11 -0500385{
Andre Przywara906776e2023-03-03 10:30:06 +0000386 assert(is_feat_amuv1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500387 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100388 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kaya5fde282021-05-26 11:58:23 +0100389 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500390
391 return amu_group1_voffset_read_internal(idx);
392}
393
394/*
395 * Write the group 1 offset register for a given index.
396 *
397 * Using this function requires FEAT_AMUv1p1 support.
398 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100399static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
johpow01fa59c6f2020-10-02 13:41:11 -0500400{
Andre Przywara906776e2023-03-03 10:30:06 +0000401 assert(is_feat_amuv1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500402 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100403 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kaya5fde282021-05-26 11:58:23 +0100404 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500405
406 amu_group1_voffset_write_internal(idx, val);
407 isb();
408}
Chris Kay925fda42021-05-25 10:42:56 +0100409#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000410
411static void *amu_context_save(const void *arg)
412{
Chris Kay26a79612021-05-24 20:35:26 +0100413 uint64_t i, j;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000414
Chris Kay26a79612021-05-24 20:35:26 +0100415 unsigned int core_pos;
416 struct amu_ctx *ctx;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000417
Andre Przywara906776e2023-03-03 10:30:06 +0000418 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */
Chris Kay26a79612021-05-24 20:35:26 +0100419 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000420
Chris Kay925fda42021-05-25 10:42:56 +0100421#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100422 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
423 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
424 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
425#endif
426
Andre Przywara906776e2023-03-03 10:30:06 +0000427 if (!is_feat_amu_supported()) {
Chris Kay26a79612021-05-24 20:35:26 +0100428 return (void *)0;
Chris Kay925fda42021-05-25 10:42:56 +0100429 }
Chris Kay26a79612021-05-24 20:35:26 +0100430
431 core_pos = plat_my_core_pos();
432 ctx = &amu_ctxs_[core_pos];
433
434 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
Andre Przywara906776e2023-03-03 10:30:06 +0000435 if (is_feat_amuv1p1_supported()) {
436 hcr_el2_amvoffen = read_hcr_el2_amvoffen();
437 }
Chris Kay26a79612021-05-24 20:35:26 +0100438
439#if ENABLE_AMU_AUXILIARY_COUNTERS
440 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
441 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
442 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100443#endif
Chris Kay925fda42021-05-25 10:42:56 +0100444
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000445 /*
Chris Kay26a79612021-05-24 20:35:26 +0100446 * Disable all AMU counters.
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000447 */
Chris Kay26a79612021-05-24 20:35:26 +0100448
449 ctx->group0_enable = read_amcntenset0_el0_px();
450 write_amcntenclr0_el0_px(ctx->group0_enable);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100451
Chris Kay925fda42021-05-25 10:42:56 +0100452#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100453 if (amcfgr_el0_ncg > 0U) {
454 ctx->group1_enable = read_amcntenset1_el0_px();
455 write_amcntenclr1_el0_px(ctx->group1_enable);
Chris Kay925fda42021-05-25 10:42:56 +0100456 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100457#endif
Chris Kay925fda42021-05-25 10:42:56 +0100458
Chris Kay26a79612021-05-24 20:35:26 +0100459 /*
460 * Save the counters to the local context.
461 */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000462
Chris Kay26a79612021-05-24 20:35:26 +0100463 isb(); /* Ensure counters have been stopped */
464
465 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000466 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100467 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000468
Chris Kay26a79612021-05-24 20:35:26 +0100469#if ENABLE_AMU_AUXILIARY_COUNTERS
470 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
471 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
johpow01fa59c6f2020-10-02 13:41:11 -0500472 }
Chris Kay26a79612021-05-24 20:35:26 +0100473#endif
johpow01fa59c6f2020-10-02 13:41:11 -0500474
Chris Kay26a79612021-05-24 20:35:26 +0100475 /*
476 * Save virtual offsets for counters that offer them.
477 */
478
479 if (hcr_el2_amvoffen != 0U) {
480 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
481 if (!amu_group0_voffset_supported(i)) {
482 continue; /* No virtual offset */
Chris Kay925fda42021-05-25 10:42:56 +0100483 }
johpow01fa59c6f2020-10-02 13:41:11 -0500484
Chris Kay26a79612021-05-24 20:35:26 +0100485 ctx->group0_voffsets[j++] = amu_group0_voffset_read(i);
486 }
johpow01fa59c6f2020-10-02 13:41:11 -0500487
Chris Kay26a79612021-05-24 20:35:26 +0100488#if ENABLE_AMU_AUXILIARY_COUNTERS
489 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
490 if ((amcg1idr_el0_voff >> i) & 1U) {
491 continue; /* No virtual offset */
johpow01fa59c6f2020-10-02 13:41:11 -0500492 }
Chris Kay26a79612021-05-24 20:35:26 +0100493
494 ctx->group1_voffsets[j++] = amu_group1_voffset_read(i);
johpow01fa59c6f2020-10-02 13:41:11 -0500495 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100496#endif
Chris Kay26a79612021-05-24 20:35:26 +0100497 }
Chris Kay925fda42021-05-25 10:42:56 +0100498
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100499 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000500}
501
502static void *amu_context_restore(const void *arg)
503{
Chris Kay26a79612021-05-24 20:35:26 +0100504 uint64_t i, j;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000505
Chris Kay26a79612021-05-24 20:35:26 +0100506 unsigned int core_pos;
507 struct amu_ctx *ctx;
508
Andre Przywara906776e2023-03-03 10:30:06 +0000509 uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */
Chris Kay26a79612021-05-24 20:35:26 +0100510
Chris Kay26a79612021-05-24 20:35:26 +0100511 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
512
513#if ENABLE_AMU_AUXILIARY_COUNTERS
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100514 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
Chris Kay26a79612021-05-24 20:35:26 +0100515 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
516 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
517#endif
518
Andre Przywara906776e2023-03-03 10:30:06 +0000519 if (!is_feat_amu_supported()) {
Chris Kay26a79612021-05-24 20:35:26 +0100520 return (void *)0;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100521 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000522
Chris Kay26a79612021-05-24 20:35:26 +0100523 core_pos = plat_my_core_pos();
524 ctx = &amu_ctxs_[core_pos];
525
Chris Kay26a79612021-05-24 20:35:26 +0100526 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
527
Andre Przywara906776e2023-03-03 10:30:06 +0000528 if (is_feat_amuv1p1_supported()) {
529 hcr_el2_amvoffen = read_hcr_el2_amvoffen();
530 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000531
Chris Kay925fda42021-05-25 10:42:56 +0100532#if ENABLE_AMU_AUXILIARY_COUNTERS
Boyan Karatotev1e966f32023-03-27 17:02:43 +0100533 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
Chris Kay26a79612021-05-24 20:35:26 +0100534 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
535 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
536#endif
537
538 /*
Chris Kay26a79612021-05-24 20:35:26 +0100539 * Restore the counter values from the local context.
540 */
541
542 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100543 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
544 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000545
Chris Kay26a79612021-05-24 20:35:26 +0100546#if ENABLE_AMU_AUXILIARY_COUNTERS
547 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
548 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
johpow01fa59c6f2020-10-02 13:41:11 -0500549 }
Chris Kay26a79612021-05-24 20:35:26 +0100550#endif
johpow01fa59c6f2020-10-02 13:41:11 -0500551
Chris Kay26a79612021-05-24 20:35:26 +0100552 /*
553 * Restore virtual offsets for counters that offer them.
554 */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100555
Chris Kay26a79612021-05-24 20:35:26 +0100556 if (hcr_el2_amvoffen != 0U) {
557 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
558 if (!amu_group0_voffset_supported(i)) {
559 continue; /* No virtual offset */
Chris Kay925fda42021-05-25 10:42:56 +0100560 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000561
Chris Kay26a79612021-05-24 20:35:26 +0100562 amu_group0_voffset_write(i, ctx->group0_voffsets[j++]);
563 }
johpow01fa59c6f2020-10-02 13:41:11 -0500564
Chris Kay26a79612021-05-24 20:35:26 +0100565#if ENABLE_AMU_AUXILIARY_COUNTERS
566 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
567 if ((amcg1idr_el0_voff >> i) & 1U) {
568 continue; /* No virtual offset */
johpow01fa59c6f2020-10-02 13:41:11 -0500569 }
Chris Kay26a79612021-05-24 20:35:26 +0100570
571 amu_group1_voffset_write(i, ctx->group1_voffsets[j++]);
johpow01fa59c6f2020-10-02 13:41:11 -0500572 }
Chris Kay26a79612021-05-24 20:35:26 +0100573#endif
574 }
575
576 /*
577 * Re-enable counters that were disabled during context save.
578 */
579
580 write_amcntenset0_el0_px(ctx->group0_enable);
johpow01fa59c6f2020-10-02 13:41:11 -0500581
Chris Kay26a79612021-05-24 20:35:26 +0100582#if ENABLE_AMU_AUXILIARY_COUNTERS
583 if (amcfgr_el0_ncg > 0) {
584 write_amcntenset1_el0_px(ctx->group1_enable);
Chris Kay925fda42021-05-25 10:42:56 +0100585 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100586#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000587
Chris Kay03be39d2021-05-05 13:38:30 +0100588#if ENABLE_MPMM
589 mpmm_enable();
590#endif
591
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100592 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000593}
594
595SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
596SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);