blob: d329c3d33855c4920ad8e00541c99bc9ddf677fa [file] [log] [blame]
Dimitris Papastamose08005a2017-10-12 13:02:29 +01001/*
johpow01fa59c6f2020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamose08005a2017-10-12 13:02:29 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Dimitris Papastamos525c37a2017-11-13 09:49:45 +00007#include <assert.h>
Chris Kaya5fde282021-05-26 11:58:23 +01008#include <cdefs.h>
Scott Brandene5dcf982020-08-25 13:49:32 -07009#include <inttypes.h>
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +010010#include <stdbool.h>
Scott Brandene5dcf982020-08-25 13:49:32 -070011#include <stdint.h>
Dimitris Papastamose08005a2017-10-12 13:02:29 +010012
Chris Kay26a79612021-05-24 20:35:26 +010013#include "../amu_private.h"
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014#include <arch.h>
johpow01fa59c6f2020-10-02 13:41:11 -050015#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000016#include <arch_helpers.h>
Chris Kayf11909f2021-08-19 11:21:52 +010017#include <common/debug.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000018#include <lib/el3_runtime/pubsub_events.h>
19#include <lib/extensions/amu.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000020
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010021#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000022
Chris Kayf11909f2021-08-19 11:21:52 +010023#if ENABLE_AMU_FCONF
24# include <lib/fconf/fconf.h>
25# include <lib/fconf/fconf_amu_getter.h>
26#endif
27
Chris Kay03be39d2021-05-05 13:38:30 +010028#if ENABLE_MPMM
29# include <lib/mpmm/mpmm.h>
30#endif
31
Chris Kay26a79612021-05-24 20:35:26 +010032struct amu_ctx {
33 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
34#if ENABLE_AMU_AUXILIARY_COUNTERS
35 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
36#endif
37
38 /* Architected event counter 1 does not have an offset register */
39 uint64_t group0_voffsets[AMU_GROUP0_MAX_COUNTERS - 1U];
40#if ENABLE_AMU_AUXILIARY_COUNTERS
41 uint64_t group1_voffsets[AMU_GROUP1_MAX_COUNTERS];
42#endif
43
44 uint16_t group0_enable;
45#if ENABLE_AMU_AUXILIARY_COUNTERS
46 uint16_t group1_enable;
47#endif
48};
49
50static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
51
52CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
53 amu_ctx_group0_enable_cannot_represent_all_group0_counters);
54
55#if ENABLE_AMU_AUXILIARY_COUNTERS
56CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
57 amu_ctx_group1_enable_cannot_represent_all_group1_counters);
58#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000059
Chris Kaya5fde282021-05-26 11:58:23 +010060static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
Dimitris Papastamose08005a2017-10-12 13:02:29 +010061{
Chris Kaya5fde282021-05-26 11:58:23 +010062 return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
johpow01fa59c6f2020-10-02 13:41:11 -050063 ID_AA64PFR0_AMU_MASK;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010064}
65
Chris Kaya5fde282021-05-26 11:58:23 +010066static inline __unused uint64_t read_hcr_el2_amvoffen(void)
67{
68 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
69 HCR_AMVOFFEN_SHIFT;
70}
71
72static inline __unused void write_cptr_el2_tam(uint64_t value)
73{
74 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
75 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
76}
77
78static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
79{
80 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
81
82 value &= ~TAM_BIT;
83 value |= (tam << TAM_SHIFT) & TAM_BIT;
84
85 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
86}
87
88static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
89{
90 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
91 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
92}
93
94static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
95{
96 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
97 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
98}
99
100static inline __unused uint64_t read_amcfgr_el0_ncg(void)
101{
102 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
103 AMCFGR_EL0_NCG_MASK;
104}
105
Chris Kay26a79612021-05-24 20:35:26 +0100106static inline __unused uint64_t read_amcgcr_el0_cg0nc(void)
Chris Kaya40141d2021-05-25 12:33:18 +0100107{
108 return (read_amcgcr_el0() >> AMCGCR_EL0_CG0NC_SHIFT) &
109 AMCGCR_EL0_CG0NC_MASK;
110}
111
Chris Kaya5fde282021-05-26 11:58:23 +0100112static inline __unused uint64_t read_amcg1idr_el0_voff(void)
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100113{
Chris Kaya5fde282021-05-26 11:58:23 +0100114 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
115 AMCG1IDR_VOFF_MASK;
116}
117
118static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
119{
120 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
121 AMCGCR_EL0_CG1NC_MASK;
122}
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100123
Chris Kaya5fde282021-05-26 11:58:23 +0100124static inline __unused uint64_t read_amcntenset0_el0_px(void)
125{
126 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
127 AMCNTENSET0_EL0_Pn_MASK;
128}
129
130static inline __unused uint64_t read_amcntenset1_el0_px(void)
131{
132 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
133 AMCNTENSET1_EL0_Pn_MASK;
134}
135
136static inline __unused void write_amcntenset0_el0_px(uint64_t px)
137{
138 uint64_t value = read_amcntenset0_el0();
139
140 value &= ~AMCNTENSET0_EL0_Pn_MASK;
141 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
142
143 write_amcntenset0_el0(value);
144}
145
146static inline __unused void write_amcntenset1_el0_px(uint64_t px)
147{
148 uint64_t value = read_amcntenset1_el0();
149
150 value &= ~AMCNTENSET1_EL0_Pn_MASK;
151 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
152
153 write_amcntenset1_el0(value);
154}
155
156static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
157{
158 uint64_t value = read_amcntenclr0_el0();
159
160 value &= ~AMCNTENCLR0_EL0_Pn_MASK;
161 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
162
163 write_amcntenclr0_el0(value);
164}
165
166static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
167{
168 uint64_t value = read_amcntenclr1_el0();
169
170 value &= ~AMCNTENCLR1_EL0_Pn_MASK;
171 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
172
173 write_amcntenclr1_el0(value);
174}
175
Chris Kay26a79612021-05-24 20:35:26 +0100176static __unused bool amu_supported(void)
Chris Kaya5fde282021-05-26 11:58:23 +0100177{
178 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
179}
180
Chris Kay26a79612021-05-24 20:35:26 +0100181static __unused bool amu_v1p1_supported(void)
Chris Kaya5fde282021-05-26 11:58:23 +0100182{
183 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
184}
185
186#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100187static __unused bool amu_group1_supported(void)
Chris Kaya5fde282021-05-26 11:58:23 +0100188{
189 return read_amcfgr_el0_ncg() > 0U;
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000190}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100191#endif
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000192
193/*
Chris Kay26a79612021-05-24 20:35:26 +0100194 * Enable counters. This function is meant to be invoked by the context
195 * management library before exiting from EL3.
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000196 */
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100197void amu_enable(bool el2_unused, cpu_context_t *ctx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000198{
Chris Kay26a79612021-05-24 20:35:26 +0100199 uint64_t id_aa64pfr0_el1_amu; /* AMU version */
200
201 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
202 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
203
204 uint64_t amcntenset0_el0_px = 0x0; /* Group 0 enable mask */
205 uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */
206
207 id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
208 if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
209 /*
210 * If the AMU is unsupported, nothing needs to be done.
211 */
212
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000213 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100214 }
215
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000216 if (el2_unused) {
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100217 /*
Chris Kay26a79612021-05-24 20:35:26 +0100218 * CPTR_EL2.TAM: Set to zero so any accesses to the Activity
219 * Monitor registers do not trap to EL2.
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100220 */
Chris Kaya5fde282021-05-26 11:58:23 +0100221 write_cptr_el2_tam(0U);
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100222 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000223
224 /*
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100225 * Retrieve and update the CPTR_EL3 value from the context mentioned
226 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000227 * the Activity Monitor registers do not trap to EL3.
228 */
Chris Kaya5fde282021-05-26 11:58:23 +0100229 write_cptr_el3_tam(ctx, 0U);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000230
Chris Kay26a79612021-05-24 20:35:26 +0100231 /*
232 * Retrieve the number of architected counters. All of these counters
233 * are enabled by default.
234 */
235
236 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
237 amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100238
Chris Kay26a79612021-05-24 20:35:26 +0100239 assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
240
241 /*
Chris Kayf11909f2021-08-19 11:21:52 +0100242 * The platform may opt to enable specific auxiliary counters. This can
243 * be done via the common FCONF getter, or via the platform-implemented
244 * function.
245 */
246
247#if ENABLE_AMU_AUXILIARY_COUNTERS
248 const struct amu_topology *topology;
249
250#if ENABLE_AMU_FCONF
251 topology = FCONF_GET_PROPERTY(amu, config, topology);
252#else
253 topology = plat_amu_topology();
254#endif /* ENABLE_AMU_FCONF */
255
256 if (topology != NULL) {
257 unsigned int core_pos = plat_my_core_pos();
258
259 amcntenset1_el0_px = topology->cores[core_pos].enable;
260 } else {
261 ERROR("AMU: failed to generate AMU topology\n");
262 }
263#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
264
265 /*
Chris Kay26a79612021-05-24 20:35:26 +0100266 * Enable the requested counters.
267 */
268
269 write_amcntenset0_el0_px(amcntenset0_el0_px);
270
271 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
272 if (amcfgr_el0_ncg > 0U) {
273 write_amcntenset1_el0_px(amcntenset1_el0_px);
Chris Kayf11909f2021-08-19 11:21:52 +0100274
275#if !ENABLE_AMU_AUXILIARY_COUNTERS
276 VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
277#endif
Chris Kay925fda42021-05-25 10:42:56 +0100278 }
johpow01fa59c6f2020-10-02 13:41:11 -0500279
280 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kay26a79612021-05-24 20:35:26 +0100281 if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) {
Chris Kay03be39d2021-05-05 13:38:30 +0100282 if (el2_unused) {
283 /*
284 * Make sure virtual offsets are disabled if EL2 not
285 * used.
286 */
287 write_hcr_el2_amvoffen(0U);
288 }
johpow01fa59c6f2020-10-02 13:41:11 -0500289
290#if AMU_RESTRICT_COUNTERS
Chris Kay03be39d2021-05-05 13:38:30 +0100291 /*
292 * FEAT_AMUv1p1 adds a register field to restrict access to
293 * group 1 counters at all but the highest implemented EL. This
294 * is controlled with the `AMU_RESTRICT_COUNTERS` compile time
295 * flag, when set, system register reads at lower ELs return
296 * zero. Reads from the memory mapped view are unaffected.
297 */
298 VERBOSE("AMU group 1 counter access restricted.\n");
299 write_amcr_el0_cg1rz(1U);
johpow01fa59c6f2020-10-02 13:41:11 -0500300#else
Chris Kay03be39d2021-05-05 13:38:30 +0100301 write_amcr_el0_cg1rz(0U);
302#endif
303 }
304
305#if ENABLE_MPMM
306 mpmm_enable();
johpow01fa59c6f2020-10-02 13:41:11 -0500307#endif
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000308}
309
310/* Read the group 0 counter identified by the given `idx`. */
Chris Kayf13c6b52021-05-24 21:00:07 +0100311static uint64_t amu_group0_cnt_read(unsigned int idx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000312{
Chris Kaya5fde282021-05-26 11:58:23 +0100313 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100314 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000315
316 return amu_group0_cnt_read_internal(idx);
317}
318
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100319/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100320static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000321{
Chris Kaya5fde282021-05-26 11:58:23 +0100322 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100323 assert(idx < read_amcgcr_el0_cg0nc());
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000324
325 amu_group0_cnt_write_internal(idx, val);
326 isb();
327}
328
johpow01fa59c6f2020-10-02 13:41:11 -0500329/*
Chris Kay26a79612021-05-24 20:35:26 +0100330 * Unlike with auxiliary counters, we cannot detect at runtime whether an
331 * architected counter supports a virtual offset. These are instead fixed
332 * according to FEAT_AMUv1p1, but this switch will need to be updated if later
333 * revisions of FEAT_AMU add additional architected counters.
334 */
335static bool amu_group0_voffset_supported(uint64_t idx)
336{
337 switch (idx) {
338 case 0U:
339 case 2U:
340 case 3U:
341 return true;
342
343 case 1U:
344 return false;
345
346 default:
347 ERROR("AMU: can't set up virtual offset for unknown "
Scott Brandene5dcf982020-08-25 13:49:32 -0700348 "architected counter %" PRIu64 "!\n", idx);
Chris Kay26a79612021-05-24 20:35:26 +0100349
350 panic();
351 }
352}
353
354/*
johpow01fa59c6f2020-10-02 13:41:11 -0500355 * Read the group 0 offset register for a given index. Index must be 0, 2,
356 * or 3, the register for 1 does not exist.
357 *
358 * Using this function requires FEAT_AMUv1p1 support.
359 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100360static uint64_t amu_group0_voffset_read(unsigned int idx)
johpow01fa59c6f2020-10-02 13:41:11 -0500361{
Chris Kaya5fde282021-05-26 11:58:23 +0100362 assert(amu_v1p1_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100363 assert(idx < read_amcgcr_el0_cg0nc());
johpow01fa59c6f2020-10-02 13:41:11 -0500364 assert(idx != 1U);
365
366 return amu_group0_voffset_read_internal(idx);
367}
368
369/*
370 * Write the group 0 offset register for a given index. Index must be 0, 2, or
371 * 3, the register for 1 does not exist.
372 *
373 * Using this function requires FEAT_AMUv1p1 support.
374 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100375static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
johpow01fa59c6f2020-10-02 13:41:11 -0500376{
Chris Kaya5fde282021-05-26 11:58:23 +0100377 assert(amu_v1p1_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100378 assert(idx < read_amcgcr_el0_cg0nc());
johpow01fa59c6f2020-10-02 13:41:11 -0500379 assert(idx != 1U);
380
381 amu_group0_voffset_write_internal(idx, val);
382 isb();
383}
384
Chris Kay925fda42021-05-25 10:42:56 +0100385#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100386/* Read the group 1 counter identified by the given `idx` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100387static uint64_t amu_group1_cnt_read(unsigned int idx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000388{
Chris Kaya5fde282021-05-26 11:58:23 +0100389 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100390 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100391 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000392
393 return amu_group1_cnt_read_internal(idx);
394}
395
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100396/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100397static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000398{
Chris Kaya5fde282021-05-26 11:58:23 +0100399 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100400 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100401 assert(idx < read_amcgcr_el0_cg1nc());
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000402
403 amu_group1_cnt_write_internal(idx, val);
404 isb();
405}
406
407/*
johpow01fa59c6f2020-10-02 13:41:11 -0500408 * Read the group 1 offset register for a given index.
409 *
410 * Using this function requires FEAT_AMUv1p1 support.
411 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100412static uint64_t amu_group1_voffset_read(unsigned int idx)
johpow01fa59c6f2020-10-02 13:41:11 -0500413{
Chris Kaya5fde282021-05-26 11:58:23 +0100414 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500415 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100416 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kaya5fde282021-05-26 11:58:23 +0100417 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500418
419 return amu_group1_voffset_read_internal(idx);
420}
421
422/*
423 * Write the group 1 offset register for a given index.
424 *
425 * Using this function requires FEAT_AMUv1p1 support.
426 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100427static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
johpow01fa59c6f2020-10-02 13:41:11 -0500428{
Chris Kaya5fde282021-05-26 11:58:23 +0100429 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500430 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100431 assert(idx < read_amcgcr_el0_cg1nc());
Chris Kaya5fde282021-05-26 11:58:23 +0100432 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500433
434 amu_group1_voffset_write_internal(idx, val);
435 isb();
436}
Chris Kay925fda42021-05-25 10:42:56 +0100437#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000438
439static void *amu_context_save(const void *arg)
440{
Chris Kay26a79612021-05-24 20:35:26 +0100441 uint64_t i, j;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000442
Chris Kay26a79612021-05-24 20:35:26 +0100443 unsigned int core_pos;
444 struct amu_ctx *ctx;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000445
Chris Kay26a79612021-05-24 20:35:26 +0100446 uint64_t id_aa64pfr0_el1_amu; /* AMU version */
447 uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
448 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000449
Chris Kay925fda42021-05-25 10:42:56 +0100450#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100451 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
452 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
453 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
454#endif
455
456 id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
457 if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
458 return (void *)0;
Chris Kay925fda42021-05-25 10:42:56 +0100459 }
Chris Kay26a79612021-05-24 20:35:26 +0100460
461 core_pos = plat_my_core_pos();
462 ctx = &amu_ctxs_[core_pos];
463
464 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
465 hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
466 read_hcr_el2_amvoffen() : 0U;
467
468#if ENABLE_AMU_AUXILIARY_COUNTERS
469 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
470 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
471 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100472#endif
Chris Kay925fda42021-05-25 10:42:56 +0100473
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000474 /*
Chris Kay26a79612021-05-24 20:35:26 +0100475 * Disable all AMU counters.
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000476 */
Chris Kay26a79612021-05-24 20:35:26 +0100477
478 ctx->group0_enable = read_amcntenset0_el0_px();
479 write_amcntenclr0_el0_px(ctx->group0_enable);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100480
Chris Kay925fda42021-05-25 10:42:56 +0100481#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100482 if (amcfgr_el0_ncg > 0U) {
483 ctx->group1_enable = read_amcntenset1_el0_px();
484 write_amcntenclr1_el0_px(ctx->group1_enable);
Chris Kay925fda42021-05-25 10:42:56 +0100485 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100486#endif
Chris Kay925fda42021-05-25 10:42:56 +0100487
Chris Kay26a79612021-05-24 20:35:26 +0100488 /*
489 * Save the counters to the local context.
490 */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000491
Chris Kay26a79612021-05-24 20:35:26 +0100492 isb(); /* Ensure counters have been stopped */
493
494 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000495 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100496 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000497
Chris Kay26a79612021-05-24 20:35:26 +0100498#if ENABLE_AMU_AUXILIARY_COUNTERS
499 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
500 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
johpow01fa59c6f2020-10-02 13:41:11 -0500501 }
Chris Kay26a79612021-05-24 20:35:26 +0100502#endif
johpow01fa59c6f2020-10-02 13:41:11 -0500503
Chris Kay26a79612021-05-24 20:35:26 +0100504 /*
505 * Save virtual offsets for counters that offer them.
506 */
507
508 if (hcr_el2_amvoffen != 0U) {
509 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
510 if (!amu_group0_voffset_supported(i)) {
511 continue; /* No virtual offset */
Chris Kay925fda42021-05-25 10:42:56 +0100512 }
johpow01fa59c6f2020-10-02 13:41:11 -0500513
Chris Kay26a79612021-05-24 20:35:26 +0100514 ctx->group0_voffsets[j++] = amu_group0_voffset_read(i);
515 }
johpow01fa59c6f2020-10-02 13:41:11 -0500516
Chris Kay26a79612021-05-24 20:35:26 +0100517#if ENABLE_AMU_AUXILIARY_COUNTERS
518 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
519 if ((amcg1idr_el0_voff >> i) & 1U) {
520 continue; /* No virtual offset */
johpow01fa59c6f2020-10-02 13:41:11 -0500521 }
Chris Kay26a79612021-05-24 20:35:26 +0100522
523 ctx->group1_voffsets[j++] = amu_group1_voffset_read(i);
johpow01fa59c6f2020-10-02 13:41:11 -0500524 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100525#endif
Chris Kay26a79612021-05-24 20:35:26 +0100526 }
Chris Kay925fda42021-05-25 10:42:56 +0100527
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100528 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000529}
530
531static void *amu_context_restore(const void *arg)
532{
Chris Kay26a79612021-05-24 20:35:26 +0100533 uint64_t i, j;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000534
Chris Kay26a79612021-05-24 20:35:26 +0100535 unsigned int core_pos;
536 struct amu_ctx *ctx;
537
538 uint64_t id_aa64pfr0_el1_amu; /* AMU version */
539
540 uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
541
542 uint64_t amcfgr_el0_ncg; /* Number of counter groups */
543 uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
544
545#if ENABLE_AMU_AUXILIARY_COUNTERS
546 uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
547 uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
548#endif
549
550 id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
551 if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
552 return (void *)0;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100553 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000554
Chris Kay26a79612021-05-24 20:35:26 +0100555 core_pos = plat_my_core_pos();
556 ctx = &amu_ctxs_[core_pos];
557
558 amcfgr_el0_ncg = read_amcfgr_el0_ncg();
559 amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
560
561 hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
562 read_hcr_el2_amvoffen() : 0U;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000563
Chris Kay925fda42021-05-25 10:42:56 +0100564#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100565 amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
566 amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
567#endif
568
569 /*
570 * Sanity check that all counters were disabled when the context was
571 * previously saved.
572 */
573
574 assert(read_amcntenset0_el0_px() == 0U);
575
576 if (amcfgr_el0_ncg > 0U) {
Chris Kay925fda42021-05-25 10:42:56 +0100577 assert(read_amcntenset1_el0_px() == 0U);
578 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000579
Chris Kay26a79612021-05-24 20:35:26 +0100580 /*
581 * Restore the counter values from the local context.
582 */
583
584 for (i = 0U; i < amcgcr_el0_cg0nc; i++) {
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100585 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
586 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000587
Chris Kay26a79612021-05-24 20:35:26 +0100588#if ENABLE_AMU_AUXILIARY_COUNTERS
589 for (i = 0U; i < amcgcr_el0_cg1nc; i++) {
590 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
johpow01fa59c6f2020-10-02 13:41:11 -0500591 }
Chris Kay26a79612021-05-24 20:35:26 +0100592#endif
johpow01fa59c6f2020-10-02 13:41:11 -0500593
Chris Kay26a79612021-05-24 20:35:26 +0100594 /*
595 * Restore virtual offsets for counters that offer them.
596 */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100597
Chris Kay26a79612021-05-24 20:35:26 +0100598 if (hcr_el2_amvoffen != 0U) {
599 for (i = 0U, j = 0U; i < amcgcr_el0_cg0nc; i++) {
600 if (!amu_group0_voffset_supported(i)) {
601 continue; /* No virtual offset */
Chris Kay925fda42021-05-25 10:42:56 +0100602 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000603
Chris Kay26a79612021-05-24 20:35:26 +0100604 amu_group0_voffset_write(i, ctx->group0_voffsets[j++]);
605 }
johpow01fa59c6f2020-10-02 13:41:11 -0500606
Chris Kay26a79612021-05-24 20:35:26 +0100607#if ENABLE_AMU_AUXILIARY_COUNTERS
608 for (i = 0U, j = 0U; i < amcgcr_el0_cg1nc; i++) {
609 if ((amcg1idr_el0_voff >> i) & 1U) {
610 continue; /* No virtual offset */
johpow01fa59c6f2020-10-02 13:41:11 -0500611 }
Chris Kay26a79612021-05-24 20:35:26 +0100612
613 amu_group1_voffset_write(i, ctx->group1_voffsets[j++]);
johpow01fa59c6f2020-10-02 13:41:11 -0500614 }
Chris Kay26a79612021-05-24 20:35:26 +0100615#endif
616 }
617
618 /*
619 * Re-enable counters that were disabled during context save.
620 */
621
622 write_amcntenset0_el0_px(ctx->group0_enable);
johpow01fa59c6f2020-10-02 13:41:11 -0500623
Chris Kay26a79612021-05-24 20:35:26 +0100624#if ENABLE_AMU_AUXILIARY_COUNTERS
625 if (amcfgr_el0_ncg > 0) {
626 write_amcntenset1_el0_px(ctx->group1_enable);
Chris Kay925fda42021-05-25 10:42:56 +0100627 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100628#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000629
Chris Kay03be39d2021-05-05 13:38:30 +0100630#if ENABLE_MPMM
631 mpmm_enable();
632#endif
633
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100634 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000635}
636
637SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
638SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);