blob: e92b9f1303d98025832aea1bfdfe0dfab00fa0c1 [file] [log] [blame]
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01001/*
johpow01fa59c6f2020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorov7e6306b2020-07-14 08:17:56 +01007#include <assert.h>
Chris Kaya5fde282021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <stdbool.h>
10
Chris Kay26a79612021-05-24 20:35:26 +010011#include "../amu_private.h"
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010012#include <arch.h>
13#include <arch_helpers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014#include <lib/el3_runtime/pubsub_events.h>
15#include <lib/extensions/amu.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000016
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010017#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000018
Chris Kay26a79612021-05-24 20:35:26 +010019struct amu_ctx {
20 uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
21#if ENABLE_AMU_AUXILIARY_COUNTERS
22 uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
23#endif
24
25 uint16_t group0_enable;
26#if ENABLE_AMU_AUXILIARY_COUNTERS
27 uint16_t group1_enable;
28#endif
29};
30
31static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
32
33CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
34 amu_ctx_group0_enable_cannot_represent_all_group0_counters);
35
36#if ENABLE_AMU_AUXILIARY_COUNTERS
37CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
38 amu_ctx_group1_enable_cannot_represent_all_group1_counters);
39#endif
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010040
Chris Kaya5fde282021-05-26 11:58:23 +010041static inline __unused uint32_t read_id_pfr0_amu(void)
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010042{
Chris Kaya5fde282021-05-26 11:58:23 +010043 return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
johpow01fa59c6f2020-10-02 13:41:11 -050044 ID_PFR0_AMU_MASK;
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000045}
46
Chris Kaya5fde282021-05-26 11:58:23 +010047static inline __unused void write_hcptr_tam(uint32_t value)
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010048{
Chris Kaya5fde282021-05-26 11:58:23 +010049 write_hcptr((read_hcptr() & ~TAM_BIT) |
50 ((value << TAM_SHIFT) & TAM_BIT));
51}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010052
Chris Kaya5fde282021-05-26 11:58:23 +010053static inline __unused void write_amcr_cg1rz(uint32_t value)
54{
55 write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
56 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
57}
58
59static inline __unused uint32_t read_amcfgr_ncg(void)
60{
61 return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
62 AMCFGR_NCG_MASK;
63}
64
Chris Kaya40141d2021-05-25 12:33:18 +010065static inline __unused uint32_t read_amcgcr_cg0nc(void)
66{
67 return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
68 AMCGCR_CG0NC_MASK;
69}
70
Chris Kaya5fde282021-05-26 11:58:23 +010071static inline __unused uint32_t read_amcgcr_cg1nc(void)
72{
73 return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
74 AMCGCR_CG1NC_MASK;
75}
76
77static inline __unused uint32_t read_amcntenset0_px(void)
78{
79 return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
80 AMCNTENSET0_Pn_MASK;
81}
82
83static inline __unused uint32_t read_amcntenset1_px(void)
84{
85 return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
86 AMCNTENSET1_Pn_MASK;
87}
88
89static inline __unused void write_amcntenset0_px(uint32_t px)
90{
91 uint32_t value = read_amcntenset0();
92
93 value &= ~AMCNTENSET0_Pn_MASK;
94 value |= (px << AMCNTENSET0_Pn_SHIFT) &
95 AMCNTENSET0_Pn_MASK;
96
97 write_amcntenset0(value);
98}
99
100static inline __unused void write_amcntenset1_px(uint32_t px)
101{
102 uint32_t value = read_amcntenset1();
103
104 value &= ~AMCNTENSET1_Pn_MASK;
105 value |= (px << AMCNTENSET1_Pn_SHIFT) &
106 AMCNTENSET1_Pn_MASK;
107
108 write_amcntenset1(value);
109}
110
111static inline __unused void write_amcntenclr0_px(uint32_t px)
112{
113 uint32_t value = read_amcntenclr0();
114
115 value &= ~AMCNTENCLR0_Pn_MASK;
116 value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
117
118 write_amcntenclr0(value);
119}
120
121static inline __unused void write_amcntenclr1_px(uint32_t px)
122{
123 uint32_t value = read_amcntenclr1();
124
125 value &= ~AMCNTENCLR1_Pn_MASK;
126 value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
127
128 write_amcntenclr1(value);
129}
130
Chris Kay26a79612021-05-24 20:35:26 +0100131static __unused bool amu_supported(void)
Chris Kaya5fde282021-05-26 11:58:23 +0100132{
133 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
134}
135
Chris Kaya5fde282021-05-26 11:58:23 +0100136#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100137static __unused bool amu_group1_supported(void)
Chris Kaya5fde282021-05-26 11:58:23 +0100138{
139 return read_amcfgr_ncg() > 0U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100140}
141#endif
142
143/*
Chris Kay26a79612021-05-24 20:35:26 +0100144 * Enable counters. This function is meant to be invoked by the context
145 * management library before exiting from EL3.
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100146 */
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100147void amu_enable(bool el2_unused)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000148{
Chris Kay26a79612021-05-24 20:35:26 +0100149 uint32_t id_pfr0_amu; /* AMU version */
150
151 uint32_t amcfgr_ncg; /* Number of counter groups */
152 uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
153
154 uint32_t amcntenset0_px = 0x0; /* Group 0 enable mask */
155 uint32_t amcntenset1_px = 0x0; /* Group 1 enable mask */
156
157 id_pfr0_amu = read_id_pfr0_amu();
158 if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
159 /*
160 * If the AMU is unsupported, nothing needs to be done.
161 */
162
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000163 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100164 }
165
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000166 if (el2_unused) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000167 /*
Chris Kay26a79612021-05-24 20:35:26 +0100168 * HCPTR.TAM: Set to zero so any accesses to the Activity
169 * Monitor registers do not trap to EL2.
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000170 */
Chris Kaya5fde282021-05-26 11:58:23 +0100171 write_hcptr_tam(0U);
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100172 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000173
Chris Kay26a79612021-05-24 20:35:26 +0100174 /*
175 * Retrieve the number of architected counters. All of these counters
176 * are enabled by default.
177 */
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000178
Chris Kay26a79612021-05-24 20:35:26 +0100179 amcgcr_cg0nc = read_amcgcr_cg0nc();
180 amcntenset0_px = (UINT32_C(1) << (amcgcr_cg0nc)) - 1U;
181
182 assert(amcgcr_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
183
184 /*
185 * Enable the requested counters.
186 */
187
188 write_amcntenset0_px(amcntenset0_px);
189
190 amcfgr_ncg = read_amcfgr_ncg();
191 if (amcfgr_ncg > 0U) {
192 write_amcntenset1_px(amcntenset1_px);
Chris Kay925fda42021-05-25 10:42:56 +0100193 }
johpow01fa59c6f2020-10-02 13:41:11 -0500194
195 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kay26a79612021-05-24 20:35:26 +0100196 if (id_pfr0_amu < ID_PFR0_AMU_V1P1) {
johpow01fa59c6f2020-10-02 13:41:11 -0500197 return;
198 }
199
200#if AMU_RESTRICT_COUNTERS
201 /*
202 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
203 * counters at all but the highest implemented EL. This is controlled
204 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
205 * register reads at lower ELs return zero. Reads from the memory
206 * mapped view are unaffected.
207 */
208 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kaya5fde282021-05-26 11:58:23 +0100209 write_amcr_cg1rz(1U);
johpow01fa59c6f2020-10-02 13:41:11 -0500210#else
Chris Kaya5fde282021-05-26 11:58:23 +0100211 write_amcr_cg1rz(0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500212#endif
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000213}
214
215/* Read the group 0 counter identified by the given `idx`. */
Chris Kayf13c6b52021-05-24 21:00:07 +0100216static uint64_t amu_group0_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000217{
Chris Kaya5fde282021-05-26 11:58:23 +0100218 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100219 assert(idx < read_amcgcr_cg0nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000220
221 return amu_group0_cnt_read_internal(idx);
222}
223
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100224/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100225static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000226{
Chris Kaya5fde282021-05-26 11:58:23 +0100227 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100228 assert(idx < read_amcgcr_cg0nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000229
230 amu_group0_cnt_write_internal(idx, val);
231 isb();
232}
233
Chris Kay925fda42021-05-25 10:42:56 +0100234#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100235/* Read the group 1 counter identified by the given `idx` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100236static uint64_t amu_group1_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000237{
Chris Kaya5fde282021-05-26 11:58:23 +0100238 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100239 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100240 assert(idx < read_amcgcr_cg1nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000241
242 return amu_group1_cnt_read_internal(idx);
243}
244
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100245/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100246static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000247{
Chris Kaya5fde282021-05-26 11:58:23 +0100248 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100249 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100250 assert(idx < read_amcgcr_cg1nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000251
252 amu_group1_cnt_write_internal(idx, val);
253 isb();
254}
Chris Kay925fda42021-05-25 10:42:56 +0100255#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000256
257static void *amu_context_save(const void *arg)
258{
Chris Kay26a79612021-05-24 20:35:26 +0100259 uint32_t i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000260
Chris Kay26a79612021-05-24 20:35:26 +0100261 unsigned int core_pos;
262 struct amu_ctx *ctx;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000263
Chris Kay26a79612021-05-24 20:35:26 +0100264 uint32_t id_pfr0_amu; /* AMU version */
265 uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000266
Chris Kay925fda42021-05-25 10:42:56 +0100267#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100268 uint32_t amcfgr_ncg; /* Number of counter groups */
269 uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
270#endif
271
272 id_pfr0_amu = read_id_pfr0_amu();
273 if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
274 return (void *)0;
Chris Kay925fda42021-05-25 10:42:56 +0100275 }
Chris Kay26a79612021-05-24 20:35:26 +0100276
277 core_pos = plat_my_core_pos();
278 ctx = &amu_ctxs_[core_pos];
279
280 amcgcr_cg0nc = read_amcgcr_cg0nc();
281
282#if ENABLE_AMU_AUXILIARY_COUNTERS
283 amcfgr_ncg = read_amcfgr_ncg();
284 amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100285#endif
Chris Kay26a79612021-05-24 20:35:26 +0100286
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000287 /*
Chris Kay26a79612021-05-24 20:35:26 +0100288 * Disable all AMU counters.
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000289 */
Chris Kay26a79612021-05-24 20:35:26 +0100290
291 ctx->group0_enable = read_amcntenset0_px();
292 write_amcntenclr0_px(ctx->group0_enable);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100293
Chris Kay925fda42021-05-25 10:42:56 +0100294#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100295 if (amcfgr_ncg > 0U) {
296 ctx->group1_enable = read_amcntenset1_px();
297 write_amcntenclr1_px(ctx->group1_enable);
Chris Kay925fda42021-05-25 10:42:56 +0100298 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100299#endif
Chris Kay925fda42021-05-25 10:42:56 +0100300
Chris Kay26a79612021-05-24 20:35:26 +0100301 /*
302 * Save the counters to the local context.
303 */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000304
Chris Kay26a79612021-05-24 20:35:26 +0100305 isb(); /* Ensure counters have been stopped */
306
307 for (i = 0U; i < amcgcr_cg0nc; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000308 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100309 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000310
Chris Kay925fda42021-05-25 10:42:56 +0100311#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100312 for (i = 0U; i < amcgcr_cg1nc; i++) {
313 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100314 }
315#endif
Chris Kay925fda42021-05-25 10:42:56 +0100316
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100317 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000318}
319
320static void *amu_context_restore(const void *arg)
321{
Chris Kay26a79612021-05-24 20:35:26 +0100322 uint32_t i;
323
324 unsigned int core_pos;
325 struct amu_ctx *ctx;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000326
Chris Kay26a79612021-05-24 20:35:26 +0100327 uint32_t id_pfr0_amu; /* AMU version */
328
329 uint32_t amcfgr_ncg; /* Number of counter groups */
330 uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
331
332#if ENABLE_AMU_AUXILIARY_COUNTERS
333 uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
334#endif
335
336 id_pfr0_amu = read_id_pfr0_amu();
337 if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
338 return (void *)0;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100339 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000340
Chris Kay26a79612021-05-24 20:35:26 +0100341 core_pos = plat_my_core_pos();
342 ctx = &amu_ctxs_[core_pos];
343
344 amcfgr_ncg = read_amcfgr_ncg();
345 amcgcr_cg0nc = read_amcgcr_cg0nc();
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100346
Chris Kay925fda42021-05-25 10:42:56 +0100347#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100348 amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
349#endif
350
351 /*
352 * Sanity check that all counters were disabled when the context was
353 * previously saved.
354 */
355
356 assert(read_amcntenset0_px() == 0U);
357
358 if (amcfgr_ncg > 0U) {
Chris Kay925fda42021-05-25 10:42:56 +0100359 assert(read_amcntenset1_px() == 0U);
360 }
Chris Kay26a79612021-05-24 20:35:26 +0100361
362 /*
363 * Restore the counter values from the local context.
364 */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000365
Chris Kay26a79612021-05-24 20:35:26 +0100366 for (i = 0U; i < amcgcr_cg0nc; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000367 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100368 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000369
Chris Kay925fda42021-05-25 10:42:56 +0100370#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kay26a79612021-05-24 20:35:26 +0100371 for (i = 0U; i < amcgcr_cg1nc; i++) {
372 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
373 }
374#endif
375
376 /*
377 * Re-enable counters that were disabled during context save.
378 */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100379
Chris Kay26a79612021-05-24 20:35:26 +0100380 write_amcntenset0_px(ctx->group0_enable);
381
382#if ENABLE_AMU_AUXILIARY_COUNTERS
383 if (amcfgr_ncg > 0U) {
384 write_amcntenset1_px(ctx->group1_enable);
Chris Kay925fda42021-05-25 10:42:56 +0100385 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100386#endif
387
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100388 return (void *)0;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100389}
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000390
391SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
392SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);