blob: 89487986d1c81ca72e55491c823c4c0382302ea4 [file] [log] [blame]
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01001/*
johpow01fa59c6f2020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorov7e6306b2020-07-14 08:17:56 +01007#include <assert.h>
Chris Kaya5fde282021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <stdbool.h>
10
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010011#include <arch.h>
12#include <arch_helpers.h>
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010013
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014#include <lib/el3_runtime/pubsub_events.h>
15#include <lib/extensions/amu.h>
16#include <lib/extensions/amu_private.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000017
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010018#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000019
20static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010021
Chris Kaya5fde282021-05-26 11:58:23 +010022static inline __unused uint32_t read_id_pfr0_amu(void)
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010023{
Chris Kaya5fde282021-05-26 11:58:23 +010024 return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
johpow01fa59c6f2020-10-02 13:41:11 -050025 ID_PFR0_AMU_MASK;
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000026}
27
Chris Kaya5fde282021-05-26 11:58:23 +010028static inline __unused void write_hcptr_tam(uint32_t value)
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010029{
Chris Kaya5fde282021-05-26 11:58:23 +010030 write_hcptr((read_hcptr() & ~TAM_BIT) |
31 ((value << TAM_SHIFT) & TAM_BIT));
32}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010033
Chris Kaya5fde282021-05-26 11:58:23 +010034static inline __unused void write_amcr_cg1rz(uint32_t value)
35{
36 write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
37 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
38}
39
40static inline __unused uint32_t read_amcfgr_ncg(void)
41{
42 return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
43 AMCFGR_NCG_MASK;
44}
45
Chris Kaya40141d2021-05-25 12:33:18 +010046static inline __unused uint32_t read_amcgcr_cg0nc(void)
47{
48 return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
49 AMCGCR_CG0NC_MASK;
50}
51
Chris Kaya5fde282021-05-26 11:58:23 +010052static inline __unused uint32_t read_amcgcr_cg1nc(void)
53{
54 return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
55 AMCGCR_CG1NC_MASK;
56}
57
58static inline __unused uint32_t read_amcntenset0_px(void)
59{
60 return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
61 AMCNTENSET0_Pn_MASK;
62}
63
64static inline __unused uint32_t read_amcntenset1_px(void)
65{
66 return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
67 AMCNTENSET1_Pn_MASK;
68}
69
70static inline __unused void write_amcntenset0_px(uint32_t px)
71{
72 uint32_t value = read_amcntenset0();
73
74 value &= ~AMCNTENSET0_Pn_MASK;
75 value |= (px << AMCNTENSET0_Pn_SHIFT) &
76 AMCNTENSET0_Pn_MASK;
77
78 write_amcntenset0(value);
79}
80
81static inline __unused void write_amcntenset1_px(uint32_t px)
82{
83 uint32_t value = read_amcntenset1();
84
85 value &= ~AMCNTENSET1_Pn_MASK;
86 value |= (px << AMCNTENSET1_Pn_SHIFT) &
87 AMCNTENSET1_Pn_MASK;
88
89 write_amcntenset1(value);
90}
91
92static inline __unused void write_amcntenclr0_px(uint32_t px)
93{
94 uint32_t value = read_amcntenclr0();
95
96 value &= ~AMCNTENCLR0_Pn_MASK;
97 value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
98
99 write_amcntenclr0(value);
100}
101
102static inline __unused void write_amcntenclr1_px(uint32_t px)
103{
104 uint32_t value = read_amcntenclr1();
105
106 value &= ~AMCNTENCLR1_Pn_MASK;
107 value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
108
109 write_amcntenclr1(value);
110}
111
112static bool amu_supported(void)
113{
114 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
115}
116
117static bool amu_v1p1_supported(void)
118{
119 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1P1;
120}
121
122#if ENABLE_AMU_AUXILIARY_COUNTERS
123static bool amu_group1_supported(void)
124{
125 return read_amcfgr_ncg() > 0U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100126}
127#endif
128
129/*
130 * Enable counters. This function is meant to be invoked
131 * by the context management library before exiting from EL3.
132 */
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100133void amu_enable(bool el2_unused)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000134{
Chris Kaya5fde282021-05-26 11:58:23 +0100135 if (!amu_supported()) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000136 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100137 }
138
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000139 if (el2_unused) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000140 /*
141 * Non-secure access from EL0 or EL1 to the Activity Monitor
142 * registers do not trap to EL2.
143 */
Chris Kaya5fde282021-05-26 11:58:23 +0100144 write_hcptr_tam(0U);
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100145 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000146
147 /* Enable group 0 counters */
Chris Kaya40141d2021-05-25 12:33:18 +0100148 write_amcntenset0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000149
Chris Kay925fda42021-05-25 10:42:56 +0100150#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kayda819142021-05-25 15:24:18 +0100151 if (amu_group1_supported()) {
Chris Kay925fda42021-05-25 10:42:56 +0100152 /* Enable group 1 counters */
153 write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
154 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100155#endif
johpow01fa59c6f2020-10-02 13:41:11 -0500156
157 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kaya5fde282021-05-26 11:58:23 +0100158 if (!amu_v1p1_supported()) {
johpow01fa59c6f2020-10-02 13:41:11 -0500159 return;
160 }
161
162#if AMU_RESTRICT_COUNTERS
163 /*
164 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
165 * counters at all but the highest implemented EL. This is controlled
166 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
167 * register reads at lower ELs return zero. Reads from the memory
168 * mapped view are unaffected.
169 */
170 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kaya5fde282021-05-26 11:58:23 +0100171 write_amcr_cg1rz(1U);
johpow01fa59c6f2020-10-02 13:41:11 -0500172#else
Chris Kaya5fde282021-05-26 11:58:23 +0100173 write_amcr_cg1rz(0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500174#endif
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000175}
176
177/* Read the group 0 counter identified by the given `idx`. */
Chris Kayf13c6b52021-05-24 21:00:07 +0100178static uint64_t amu_group0_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000179{
Chris Kaya5fde282021-05-26 11:58:23 +0100180 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100181 assert(idx < read_amcgcr_cg0nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000182
183 return amu_group0_cnt_read_internal(idx);
184}
185
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100186/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100187static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000188{
Chris Kaya5fde282021-05-26 11:58:23 +0100189 assert(amu_supported());
Chris Kaya40141d2021-05-25 12:33:18 +0100190 assert(idx < read_amcgcr_cg0nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000191
192 amu_group0_cnt_write_internal(idx, val);
193 isb();
194}
195
Chris Kay925fda42021-05-25 10:42:56 +0100196#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100197/* Read the group 1 counter identified by the given `idx` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100198static uint64_t amu_group1_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000199{
Chris Kaya5fde282021-05-26 11:58:23 +0100200 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100201 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100202 assert(idx < read_amcgcr_cg1nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000203
204 return amu_group1_cnt_read_internal(idx);
205}
206
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100207/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100208static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000209{
Chris Kaya5fde282021-05-26 11:58:23 +0100210 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100211 assert(amu_group1_supported());
Chris Kayda819142021-05-25 15:24:18 +0100212 assert(idx < read_amcgcr_cg1nc());
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000213
214 amu_group1_cnt_write_internal(idx, val);
215 isb();
216}
Chris Kay925fda42021-05-25 10:42:56 +0100217#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000218
219static void *amu_context_save(const void *arg)
220{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100221 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
222 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000223
Chris Kaya5fde282021-05-26 11:58:23 +0100224 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000225 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100226 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000227
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100228 /* Assert that group 0/1 counter configuration is what we expect */
Chris Kaya40141d2021-05-25 12:33:18 +0100229 assert(read_amcntenset0_px() ==
230 ((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U));
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000231
Chris Kay925fda42021-05-25 10:42:56 +0100232#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kayda819142021-05-25 15:24:18 +0100233 if (amu_group1_supported()) {
Chris Kay925fda42021-05-25 10:42:56 +0100234 assert(read_amcntenset1_px() == AMU_GROUP1_COUNTERS_MASK);
235 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100236#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000237 /*
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100238 * Disable group 0/1 counters to avoid other observers like SCP sampling
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000239 * counter values from the future via the memory mapped view.
240 */
Chris Kaya40141d2021-05-25 12:33:18 +0100241 write_amcntenclr0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100242
Chris Kay925fda42021-05-25 10:42:56 +0100243#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kayda819142021-05-25 15:24:18 +0100244 if (amu_group1_supported()) {
Chris Kay925fda42021-05-25 10:42:56 +0100245 write_amcntenclr1_px(AMU_GROUP1_COUNTERS_MASK);
246 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100247#endif
Chris Kay925fda42021-05-25 10:42:56 +0100248
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000249 isb();
250
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100251 /* Save all group 0 counters */
Chris Kaya40141d2021-05-25 12:33:18 +0100252 for (i = 0U; i < read_amcgcr_cg0nc(); i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000253 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100254 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000255
Chris Kay925fda42021-05-25 10:42:56 +0100256#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kayda819142021-05-25 15:24:18 +0100257 if (amu_group1_supported()) {
Chris Kay925fda42021-05-25 10:42:56 +0100258 /* Save group 1 counters */
Chris Kayda819142021-05-25 15:24:18 +0100259 for (i = 0U; i < read_amcgcr_cg1nc(); i++) {
Chris Kay925fda42021-05-25 10:42:56 +0100260 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
261 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
262 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100263 }
264 }
265#endif
Chris Kay925fda42021-05-25 10:42:56 +0100266
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100267 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000268}
269
270static void *amu_context_restore(const void *arg)
271{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100272 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
273 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000274
Chris Kaya5fde282021-05-26 11:58:23 +0100275 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000276 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100277 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000278
279 /* Counters were disabled in `amu_context_save()` */
Chris Kaya5fde282021-05-26 11:58:23 +0100280 assert(read_amcntenset0_px() == 0U);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100281
Chris Kay925fda42021-05-25 10:42:56 +0100282#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kayda819142021-05-25 15:24:18 +0100283 if (amu_group1_supported()) {
Chris Kay925fda42021-05-25 10:42:56 +0100284 assert(read_amcntenset1_px() == 0U);
285 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100286#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000287
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100288 /* Restore all group 0 counters */
Chris Kaya40141d2021-05-25 12:33:18 +0100289 for (i = 0U; i < read_amcgcr_cg0nc(); i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000290 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100291 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000292
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100293 /* Restore group 0 counter configuration */
Chris Kaya40141d2021-05-25 12:33:18 +0100294 write_amcntenset0_px((UINT32_C(1) << read_amcgcr_cg0nc()) - 1U);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000295
Chris Kay925fda42021-05-25 10:42:56 +0100296#if ENABLE_AMU_AUXILIARY_COUNTERS
Chris Kayda819142021-05-25 15:24:18 +0100297 if (amu_group1_supported()) {
Chris Kay925fda42021-05-25 10:42:56 +0100298 /* Restore group 1 counters */
Chris Kayda819142021-05-25 15:24:18 +0100299 for (i = 0U; i < read_amcgcr_cg1nc(); i++) {
Chris Kay925fda42021-05-25 10:42:56 +0100300 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
301 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
302 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100303 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100304
Chris Kay925fda42021-05-25 10:42:56 +0100305 /* Restore group 1 counter configuration */
306 write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
307 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100308#endif
309
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100310 return (void *)0;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100311}
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000312
313SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
314SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);