blob: 47330863e16b449c00a8f7ad75b8078f554dc956 [file] [log] [blame]
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01001/*
johpow01fa59c6f2020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorov7e6306b2020-07-14 08:17:56 +01007#include <assert.h>
Chris Kaya5fde282021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <stdbool.h>
10
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010011#include <arch.h>
12#include <arch_helpers.h>
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010013
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014#include <lib/el3_runtime/pubsub_events.h>
15#include <lib/extensions/amu.h>
16#include <lib/extensions/amu_private.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000017
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010018#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000019
20static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010021
Chris Kaya5fde282021-05-26 11:58:23 +010022static inline __unused uint32_t read_id_pfr0_amu(void)
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010023{
Chris Kaya5fde282021-05-26 11:58:23 +010024 return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
johpow01fa59c6f2020-10-02 13:41:11 -050025 ID_PFR0_AMU_MASK;
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000026}
27
Chris Kaya5fde282021-05-26 11:58:23 +010028static inline __unused void write_hcptr_tam(uint32_t value)
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010029{
Chris Kaya5fde282021-05-26 11:58:23 +010030 write_hcptr((read_hcptr() & ~TAM_BIT) |
31 ((value << TAM_SHIFT) & TAM_BIT));
32}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010033
Chris Kaya5fde282021-05-26 11:58:23 +010034static inline __unused void write_amcr_cg1rz(uint32_t value)
35{
36 write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
37 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
38}
39
40static inline __unused uint32_t read_amcfgr_ncg(void)
41{
42 return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
43 AMCFGR_NCG_MASK;
44}
45
Chris Kaya40141d2021-05-25 12:33:18 +010046static inline __unused uint32_t read_amcgcr_cg0nc(void)
47{
48 return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
49 AMCGCR_CG0NC_MASK;
50}
51
Chris Kaya5fde282021-05-26 11:58:23 +010052static inline __unused uint32_t read_amcgcr_cg1nc(void)
53{
54 return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
55 AMCGCR_CG1NC_MASK;
56}
57
58static inline __unused uint32_t read_amcntenset0_px(void)
59{
60 return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
61 AMCNTENSET0_Pn_MASK;
62}
63
64static inline __unused uint32_t read_amcntenset1_px(void)
65{
66 return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
67 AMCNTENSET1_Pn_MASK;
68}
69
70static inline __unused void write_amcntenset0_px(uint32_t px)
71{
72 uint32_t value = read_amcntenset0();
73
74 value &= ~AMCNTENSET0_Pn_MASK;
75 value |= (px << AMCNTENSET0_Pn_SHIFT) &
76 AMCNTENSET0_Pn_MASK;
77
78 write_amcntenset0(value);
79}
80
81static inline __unused void write_amcntenset1_px(uint32_t px)
82{
83 uint32_t value = read_amcntenset1();
84
85 value &= ~AMCNTENSET1_Pn_MASK;
86 value |= (px << AMCNTENSET1_Pn_SHIFT) &
87 AMCNTENSET1_Pn_MASK;
88
89 write_amcntenset1(value);
90}
91
92static inline __unused void write_amcntenclr0_px(uint32_t px)
93{
94 uint32_t value = read_amcntenclr0();
95
96 value &= ~AMCNTENCLR0_Pn_MASK;
97 value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
98
99 write_amcntenclr0(value);
100}
101
102static inline __unused void write_amcntenclr1_px(uint32_t px)
103{
104 uint32_t value = read_amcntenclr1();
105
106 value &= ~AMCNTENCLR1_Pn_MASK;
107 value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
108
109 write_amcntenclr1(value);
110}
111
112static bool amu_supported(void)
113{
114 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
115}
116
117static bool amu_v1p1_supported(void)
118{
119 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1P1;
120}
121
122#if ENABLE_AMU_AUXILIARY_COUNTERS
123static bool amu_group1_supported(void)
124{
125 return read_amcfgr_ncg() > 0U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100126}
127#endif
128
129/*
130 * Enable counters. This function is meant to be invoked
131 * by the context management library before exiting from EL3.
132 */
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100133void amu_enable(bool el2_unused)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000134{
Chris Kaya5fde282021-05-26 11:58:23 +0100135 if (!amu_supported()) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000136 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100137 }
138
Chris Kay925fda42021-05-25 10:42:56 +0100139#if ENABLE_AMU_AUXILIARY_COUNTERS
140 if (AMU_GROUP1_NR_COUNTERS > 0U) {
141 /* Check and set presence of group 1 counters */
142 if (!amu_group1_supported()) {
143 ERROR("AMU Counter Group 1 is not implemented\n");
144 panic();
145 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100146
Chris Kay925fda42021-05-25 10:42:56 +0100147 /* Check number of group 1 counters */
148 uint32_t cnt_num = read_amcgcr_cg1nc();
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100149
Chris Kay925fda42021-05-25 10:42:56 +0100150 VERBOSE("%s%u. %s%u\n",
151 "Number of AMU Group 1 Counters ", cnt_num,
152 "Requested number ", AMU_GROUP1_NR_COUNTERS);
153
154 if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
155 ERROR("%s%u is less than %s%u\n",
156 "Number of AMU Group 1 Counters ", cnt_num,
157 "Requested number ", AMU_GROUP1_NR_COUNTERS);
158 panic();
159 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100160 }
161#endif
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100162
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000163 if (el2_unused) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000164 /*
165 * Non-secure access from EL0 or EL1 to the Activity Monitor
166 * registers do not trap to EL2.
167 */
Chris Kaya5fde282021-05-26 11:58:23 +0100168 write_hcptr_tam(0U);
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100169 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000170
171 /* Enable group 0 counters */
Chris Kaya40141d2021-05-25 12:33:18 +0100172 write_amcntenset0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000173
Chris Kay925fda42021-05-25 10:42:56 +0100174#if ENABLE_AMU_AUXILIARY_COUNTERS
175 if (AMU_GROUP1_NR_COUNTERS > 0U) {
176 /* Enable group 1 counters */
177 write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
178 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100179#endif
johpow01fa59c6f2020-10-02 13:41:11 -0500180
181 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kaya5fde282021-05-26 11:58:23 +0100182 if (!amu_v1p1_supported()) {
johpow01fa59c6f2020-10-02 13:41:11 -0500183 return;
184 }
185
186#if AMU_RESTRICT_COUNTERS
187 /*
188 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
189 * counters at all but the highest implemented EL. This is controlled
190 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
191 * register reads at lower ELs return zero. Reads from the memory
192 * mapped view are unaffected.
193 */
194 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kaya5fde282021-05-26 11:58:23 +0100195 write_amcr_cg1rz(1U);
johpow01fa59c6f2020-10-02 13:41:11 -0500196#else
Chris Kaya5fde282021-05-26 11:58:23 +0100197 write_amcr_cg1rz(0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500198#endif
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000199}
200
201/* Read the group 0 counter identified by the given `idx`. */
Chris Kayf13c6b52021-05-24 21:00:07 +0100202static uint64_t amu_group0_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000203{
Chris Kaya5fde282021-05-26 11:58:23 +0100204 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100205 assert(idx < read_amcgcr_cg0nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000206
207 return amu_group0_cnt_read_internal(idx);
208}
209
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100210/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100211static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000212{
Chris Kaya5fde282021-05-26 11:58:23 +0100213 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100214 assert(idx < read_amcgcr_cg0nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000215
216 amu_group0_cnt_write_internal(idx, val);
217 isb();
218}
219
Chris Kay925fda42021-05-25 10:42:56 +0100220#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100221/* Read the group 1 counter identified by the given `idx` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100222static uint64_t amu_group1_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000223{
Chris Kaya5fde282021-05-26 11:58:23 +0100224 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100225 assert(amu_group1_supported());
226 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000227
228 return amu_group1_cnt_read_internal(idx);
229}
230
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100231/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100232static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000233{
Chris Kaya5fde282021-05-26 11:58:23 +0100234 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100235 assert(amu_group1_supported());
236 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000237
238 amu_group1_cnt_write_internal(idx, val);
239 isb();
240}
Chris Kay925fda42021-05-25 10:42:56 +0100241#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000242
243static void *amu_context_save(const void *arg)
244{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100245 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
246 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000247
Chris Kaya5fde282021-05-26 11:58:23 +0100248 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000249 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100250 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000251
Chris Kay925fda42021-05-25 10:42:56 +0100252#if ENABLE_AMU_AUXILIARY_COUNTERS
253 if (AMU_GROUP1_NR_COUNTERS > 0U) {
254 if (!amu_group1_supported()) {
255 return (void *)-1;
256 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100257 }
258#endif
Chris Kay925fda42021-05-25 10:42:56 +0100259
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100260 /* Assert that group 0/1 counter configuration is what we expect */
Chris Kaya40141d2021-05-25 12:33:18 +0100261 assert(read_amcntenset0_px() ==
262 ((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U));
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000263
Chris Kay925fda42021-05-25 10:42:56 +0100264#if ENABLE_AMU_AUXILIARY_COUNTERS
265 if (AMU_GROUP1_NR_COUNTERS > 0U) {
266 assert(read_amcntenset1_px() == AMU_GROUP1_COUNTERS_MASK);
267 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100268#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000269 /*
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100270 * Disable group 0/1 counters to avoid other observers like SCP sampling
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000271 * counter values from the future via the memory mapped view.
272 */
Chris Kaya40141d2021-05-25 12:33:18 +0100273 write_amcntenclr0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100274
Chris Kay925fda42021-05-25 10:42:56 +0100275#if ENABLE_AMU_AUXILIARY_COUNTERS
276 if (AMU_GROUP1_NR_COUNTERS > 0U) {
277 write_amcntenclr1_px(AMU_GROUP1_COUNTERS_MASK);
278 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100279#endif
Chris Kay925fda42021-05-25 10:42:56 +0100280
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000281 isb();
282
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100283 /* Save all group 0 counters */
Chris Kaya40141d2021-05-25 12:33:18 +0100284 for (i = 0U; i < read_amcgcr_cg0nc(); i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000285 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100286 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000287
Chris Kay925fda42021-05-25 10:42:56 +0100288#if ENABLE_AMU_AUXILIARY_COUNTERS
289 if (AMU_GROUP1_NR_COUNTERS > 0U) {
290 /* Save group 1 counters */
291 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
292 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
293 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
294 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100295 }
296 }
297#endif
Chris Kay925fda42021-05-25 10:42:56 +0100298
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100299 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000300}
301
302static void *amu_context_restore(const void *arg)
303{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100304 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
305 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000306
Chris Kaya5fde282021-05-26 11:58:23 +0100307 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000308 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100309 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000310
Chris Kay925fda42021-05-25 10:42:56 +0100311#if ENABLE_AMU_AUXILIARY_COUNTERS
312 if (AMU_GROUP1_NR_COUNTERS > 0U) {
313 if (!amu_group1_supported()) {
314 return (void *)-1;
315 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100316 }
317#endif
Chris Kay925fda42021-05-25 10:42:56 +0100318
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000319 /* Counters were disabled in `amu_context_save()` */
Chris Kaya5fde282021-05-26 11:58:23 +0100320 assert(read_amcntenset0_px() == 0U);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100321
Chris Kay925fda42021-05-25 10:42:56 +0100322#if ENABLE_AMU_AUXILIARY_COUNTERS
323 if (AMU_GROUP1_NR_COUNTERS > 0U) {
324 assert(read_amcntenset1_px() == 0U);
325 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100326#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000327
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100328 /* Restore all group 0 counters */
Chris Kaya40141d2021-05-25 12:33:18 +0100329 for (i = 0U; i < read_amcgcr_cg0nc(); i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000330 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100331 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000332
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100333 /* Restore group 0 counter configuration */
Chris Kaya40141d2021-05-25 12:33:18 +0100334 write_amcntenset0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000335
Chris Kay925fda42021-05-25 10:42:56 +0100336#if ENABLE_AMU_AUXILIARY_COUNTERS
337 if (AMU_GROUP1_NR_COUNTERS > 0U) {
338 /* Restore group 1 counters */
339 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
340 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
341 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
342 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100343 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100344
Chris Kay925fda42021-05-25 10:42:56 +0100345 /* Restore group 1 counter configuration */
346 write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
347 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100348#endif
349
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100350 return (void *)0;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100351}
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000352
353SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
354SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);