blob: 57b11582530aa0e065f1a2ce0f6d5791e1fcf450 [file] [log] [blame]
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01001/*
johpow01fa59c6f2020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorov7e6306b2020-07-14 08:17:56 +01007#include <assert.h>
Chris Kaya5fde282021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <stdbool.h>
10
Chris Kay26a79612021-05-24 20:35:26 +010011#include "../amu_private.h"
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010012#include <arch.h>
13#include <arch_helpers.h>
Chris Kayf11909f2021-08-19 11:21:52 +010014#include <common/debug.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <lib/el3_runtime/pubsub_events.h>
16#include <lib/extensions/amu.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000017
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010018#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000019
Chris Kay26a79612021-05-24 20:35:26 +010020struct amu_ctx {
21 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
22#if ENABLE_AMU_AUXILIARY_COUNTERS
23 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
24#endif
25
26 uint16_t group0_enable;
27#if ENABLE_AMU_AUXILIARY_COUNTERS
28 uint16_t group1_enable;
29#endif
30};
31
32static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
33
34CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
35 amu_ctx_group0_enable_cannot_represent_all_group0_counters);
36
37#if ENABLE_AMU_AUXILIARY_COUNTERS
38CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
39 amu_ctx_group1_enable_cannot_represent_all_group1_counters);
40#endif
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010041
Chris Kaya5fde282021-05-26 11:58:23 +010042static inline __unused uint32_t read_id_pfr0_amu(void)
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010043{
Chris Kaya5fde282021-05-26 11:58:23 +010044 return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
johpow01fa59c6f2020-10-02 13:41:11 -050045 ID_PFR0_AMU_MASK;
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000046}
47
Chris Kaya5fde282021-05-26 11:58:23 +010048static inline __unused void write_hcptr_tam(uint32_t value)
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010049{
Chris Kaya5fde282021-05-26 11:58:23 +010050 write_hcptr((read_hcptr() & ~TAM_BIT) |
51 ((value << TAM_SHIFT) & TAM_BIT));
52}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010053
Chris Kaya5fde282021-05-26 11:58:23 +010054static inline __unused void write_amcr_cg1rz(uint32_t value)
55{
56 write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
57 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
58}
59
60static inline __unused uint32_t read_amcfgr_ncg(void)
61{
62 return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
63 AMCFGR_NCG_MASK;
64}
65
Chris Kaya40141d2021-05-25 12:33:18 +010066static inline __unused uint32_t read_amcgcr_cg0nc(void)
67{
68 return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
69 AMCGCR_CG0NC_MASK;
70}
71
Chris Kaya5fde282021-05-26 11:58:23 +010072static inline __unused uint32_t read_amcgcr_cg1nc(void)
73{
74 return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
75 AMCGCR_CG1NC_MASK;
76}
77
78static inline __unused uint32_t read_amcntenset0_px(void)
79{
80 return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
81 AMCNTENSET0_Pn_MASK;
82}
83
84static inline __unused uint32_t read_amcntenset1_px(void)
85{
86 return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
87 AMCNTENSET1_Pn_MASK;
88}
89
90static inline __unused void write_amcntenset0_px(uint32_t px)
91{
92 uint32_t value = read_amcntenset0();
93
94 value &= ~AMCNTENSET0_Pn_MASK;
95 value |= (px << AMCNTENSET0_Pn_SHIFT) &
96 AMCNTENSET0_Pn_MASK;
97
98 write_amcntenset0(value);
99}
100
101static inline __unused void write_amcntenset1_px(uint32_t px)
102{
103 uint32_t value = read_amcntenset1();
104
105 value &= ~AMCNTENSET1_Pn_MASK;
106 value |= (px << AMCNTENSET1_Pn_SHIFT) &
107 AMCNTENSET1_Pn_MASK;
108
109 write_amcntenset1(value);
110}
111
112static inline __unused void write_amcntenclr0_px(uint32_t px)
113{
114 uint32_t value = read_amcntenclr0();
115
116 value &= ~AMCNTENCLR0_Pn_MASK;
117 value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
118
119 write_amcntenclr0(value);
120}
121
122static inline __unused void write_amcntenclr1_px(uint32_t px)
123{
124 uint32_t value = read_amcntenclr1();
125
126 value &= ~AMCNTENCLR1_Pn_MASK;
127 value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
128
129 write_amcntenclr1(value);
130}
131
Chris Kay26a79612021-05-24 20:35:26 +0100132static __unused bool amu_supported(void)
Chris Kaya5fde282021-05-26 11:58:23 +0100133{
134 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
135}
136
Chris Kaya5fde282021-05-26 11:58:23 +0100137#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100138static __unused bool amu_group1_supported(void)
Chris Kaya5fde282021-05-26 11:58:23 +0100139{
140 return read_amcfgr_ncg() > 0U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100141}
142#endif
143
144/*
Chris Kay26a79612021-05-24 20:35:26 +0100145 * Enable counters. This function is meant to be invoked by the context
146 * management library before exiting from EL3.
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100147 */
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100148void amu_enable(bool el2_unused)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000149{
Chris Kay26a79612021-05-24 20:35:26 +0100150 uint32_t id_pfr0_amu; /* AMU version */
151
152 uint32_t amcfgr_ncg; /* Number of counter groups */
153 uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
154
155 uint32_t amcntenset0_px = 0x0; /* Group 0 enable mask */
156 uint32_t amcntenset1_px = 0x0; /* Group 1 enable mask */
157
158 id_pfr0_amu = read_id_pfr0_amu();
159 if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
160 /*
161 * If the AMU is unsupported, nothing needs to be done.
162 */
163
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000164 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100165 }
166
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000167 if (el2_unused) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000168 /*
Chris Kay26a79612021-05-24 20:35:26 +0100169 * HCPTR.TAM: Set to zero so any accesses to the Activity
170 * Monitor registers do not trap to EL2.
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000171 */
Chris Kaya5fde282021-05-26 11:58:23 +0100172 write_hcptr_tam(0U);
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100173 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000174
Chris Kay26a79612021-05-24 20:35:26 +0100175 /*
176 * Retrieve the number of architected counters. All of these counters
177 * are enabled by default.
178 */
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000179
Chris Kay26a79612021-05-24 20:35:26 +0100180 amcgcr_cg0nc = read_amcgcr_cg0nc();
181 amcntenset0_px = (UINT32_C(1) << (amcgcr_cg0nc)) - 1U;
182
183 assert(amcgcr_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
184
185 /*
Chris Kayf11909f2021-08-19 11:21:52 +0100186 * The platform may opt to enable specific auxiliary counters. This can
187 * be done via the common FCONF getter, or via the platform-implemented
188 * function.
189 */
190
191#if ENABLE_AMU_AUXILIARY_COUNTERS
192 const struct amu_topology *topology;
193
194#if ENABLE_AMU_FCONF
195 topology = FCONF_GET_PROPERTY(amu, config, topology);
196#else
197 topology = plat_amu_topology();
198#endif /* ENABLE_AMU_FCONF */
199
200 if (topology != NULL) {
201 unsigned int core_pos = plat_my_core_pos();
202
203 amcntenset1_el0_px = topology->cores[core_pos].enable;
204 } else {
205 ERROR("AMU: failed to generate AMU topology\n");
206 }
207#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
208
209 /*
Chris Kay26a79612021-05-24 20:35:26 +0100210 * Enable the requested counters.
211 */
212
213 write_amcntenset0_px(amcntenset0_px);
214
215 amcfgr_ncg = read_amcfgr_ncg();
216 if (amcfgr_ncg > 0U) {
217 write_amcntenset1_px(amcntenset1_px);
Chris Kayf11909f2021-08-19 11:21:52 +0100218
219#if !ENABLE_AMU_AUXILIARY_COUNTERS
220 VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
221#endif
Chris Kay925fda42021-05-25 10:42:56 +0100222 }
johpow01fa59c6f2020-10-02 13:41:11 -0500223
224 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kay26a79612021-05-24 20:35:26 +0100225 if (id_pfr0_amu < ID_PFR0_AMU_V1P1) {
johpow01fa59c6f2020-10-02 13:41:11 -0500226 return;
227 }
228
229#if AMU_RESTRICT_COUNTERS
230 /*
231 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
232 * counters at all but the highest implemented EL. This is controlled
233 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
234 * register reads at lower ELs return zero. Reads from the memory
235 * mapped view are unaffected.
236 */
237 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kaya5fde282021-05-26 11:58:23 +0100238 write_amcr_cg1rz(1U);
johpow01fa59c6f2020-10-02 13:41:11 -0500239#else
Chris Kaya5fde282021-05-26 11:58:23 +0100240 write_amcr_cg1rz(0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500241#endif
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000242}
243
244/* Read the group 0 counter identified by the given `idx`. */
Chris Kayf13c6b52021-05-24 21:00:07 +0100245static uint64_t amu_group0_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000246{
Chris Kaya5fde282021-05-26 11:58:23 +0100247 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100248 assert(idx < read_amcgcr_cg0nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000249
250 return amu_group0_cnt_read_internal(idx);
251}
252
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100253/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100254static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000255{
Chris Kaya5fde282021-05-26 11:58:23 +0100256 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100257 assert(idx < read_amcgcr_cg0nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000258
259 amu_group0_cnt_write_internal(idx, val);
260 isb();
261}
262
Chris Kay925fda42021-05-25 10:42:56 +0100263#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100264/* Read the group 1 counter identified by the given `idx` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100265static uint64_t amu_group1_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000266{
Chris Kaya5fde282021-05-26 11:58:23 +0100267 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100268 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100269 assert(idx < read_amcgcr_cg1nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000270
271 return amu_group1_cnt_read_internal(idx);
272}
273
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100274/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100275static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000276{
Chris Kaya5fde282021-05-26 11:58:23 +0100277 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100278 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100279 assert(idx < read_amcgcr_cg1nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000280
281 amu_group1_cnt_write_internal(idx, val);
282 isb();
283}
Chris Kay925fda42021-05-25 10:42:56 +0100284#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000285
286static void *amu_context_save(const void *arg)
287{
Chris Kay26a79612021-05-24 20:35:26 +0100288 uint32_t i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000289
Chris Kay26a79612021-05-24 20:35:26 +0100290 unsigned int core_pos;
291 struct amu_ctx *ctx;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000292
Chris Kay26a79612021-05-24 20:35:26 +0100293 uint32_t id_pfr0_amu; /* AMU version */
294 uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000295
Chris Kay925fda42021-05-25 10:42:56 +0100296#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100297 uint32_t amcfgr_ncg; /* Number of counter groups */
298 uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
299#endif
300
301 id_pfr0_amu = read_id_pfr0_amu();
302 if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
303 return (void *)0;
Chris Kay925fda42021-05-25 10:42:56 +0100304 }
Chris Kay26a79612021-05-24 20:35:26 +0100305
306 core_pos = plat_my_core_pos();
307 ctx = &amu_ctxs_[core_pos];
308
309 amcgcr_cg0nc = read_amcgcr_cg0nc();
310
311#if ENABLE_AMU_AUXILIARY_COUNTERS
312 amcfgr_ncg = read_amcfgr_ncg();
313 amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100314#endif
Chris Kay26a79612021-05-24 20:35:26 +0100315
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000316 /*
Chris Kay26a79612021-05-24 20:35:26 +0100317 * Disable all AMU counters.
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000318 */
Chris Kay26a79612021-05-24 20:35:26 +0100319
320 ctx->group0_enable = read_amcntenset0_px();
321 write_amcntenclr0_px(ctx->group0_enable);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100322
Chris Kay925fda42021-05-25 10:42:56 +0100323#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100324 if (amcfgr_ncg > 0U) {
325 ctx->group1_enable = read_amcntenset1_px();
326 write_amcntenclr1_px(ctx->group1_enable);
Chris Kay925fda42021-05-25 10:42:56 +0100327 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100328#endif
Chris Kay925fda42021-05-25 10:42:56 +0100329
Chris Kay26a79612021-05-24 20:35:26 +0100330 /*
331 * Save the counters to the local context.
332 */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000333
Chris Kay26a79612021-05-24 20:35:26 +0100334 isb(); /* Ensure counters have been stopped */
335
336 for (i = 0U; i < amcgcr_cg0nc; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000337 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100338 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000339
Chris Kay925fda42021-05-25 10:42:56 +0100340#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100341 for (i = 0U; i < amcgcr_cg1nc; i++) {
342 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100343 }
344#endif
Chris Kay925fda42021-05-25 10:42:56 +0100345
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100346 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000347}
348
349static void *amu_context_restore(const void *arg)
350{
Chris Kay26a79612021-05-24 20:35:26 +0100351 uint32_t i;
352
353 unsigned int core_pos;
354 struct amu_ctx *ctx;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000355
Chris Kay26a79612021-05-24 20:35:26 +0100356 uint32_t id_pfr0_amu; /* AMU version */
357
358 uint32_t amcfgr_ncg; /* Number of counter groups */
359 uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
360
361#if ENABLE_AMU_AUXILIARY_COUNTERS
362 uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
363#endif
364
365 id_pfr0_amu = read_id_pfr0_amu();
366 if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
367 return (void *)0;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100368 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000369
Chris Kay26a79612021-05-24 20:35:26 +0100370 core_pos = plat_my_core_pos();
371 ctx = &amu_ctxs_[core_pos];
372
373 amcfgr_ncg = read_amcfgr_ncg();
374 amcgcr_cg0nc = read_amcgcr_cg0nc();
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100375
Chris Kay925fda42021-05-25 10:42:56 +0100376#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100377 amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
378#endif
379
380 /*
381 * Sanity check that all counters were disabled when the context was
382 * previously saved.
383 */
384
385 assert(read_amcntenset0_px() == 0U);
386
387 if (amcfgr_ncg > 0U) {
Chris Kay925fda42021-05-25 10:42:56 +0100388 assert(read_amcntenset1_px() == 0U);
389 }
Chris Kay26a79612021-05-24 20:35:26 +0100390
391 /*
392 * Restore the counter values from the local context.
393 */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000394
Chris Kay26a79612021-05-24 20:35:26 +0100395 for (i = 0U; i < amcgcr_cg0nc; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000396 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100397 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000398
Chris Kay925fda42021-05-25 10:42:56 +0100399#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100400 for (i = 0U; i < amcgcr_cg1nc; i++) {
401 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
402 }
403#endif
404
405 /*
406 * Re-enable counters that were disabled during context save.
407 */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100408
Chris Kay26a79612021-05-24 20:35:26 +0100409 write_amcntenset0_px(ctx->group0_enable);
410
411#if ENABLE_AMU_AUXILIARY_COUNTERS
412 if (amcfgr_ncg > 0U) {
413 write_amcntenset1_px(ctx->group1_enable);
Chris Kay925fda42021-05-25 10:42:56 +0100414 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100415#endif
416
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100417 return (void *)0;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100418}
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000419
420SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
421SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);