blob: 2fc35094f611c25905fd25bfa6660879008331bf [file] [log] [blame]
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01001/*
johpow01fa59c6f2020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorov7e6306b2020-07-14 08:17:56 +01007#include <assert.h>
Chris Kaya5fde282021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <stdbool.h>
10
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010011#include <arch.h>
12#include <arch_helpers.h>
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010013
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014#include <lib/el3_runtime/pubsub_events.h>
15#include <lib/extensions/amu.h>
16#include <lib/extensions/amu_private.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000017
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010018#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000019
20static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010021
Chris Kaya5fde282021-05-26 11:58:23 +010022static inline __unused uint32_t read_id_pfr0_amu(void)
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010023{
Chris Kaya5fde282021-05-26 11:58:23 +010024 return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
johpow01fa59c6f2020-10-02 13:41:11 -050025 ID_PFR0_AMU_MASK;
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000026}
27
Chris Kaya5fde282021-05-26 11:58:23 +010028static inline __unused void write_hcptr_tam(uint32_t value)
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010029{
Chris Kaya5fde282021-05-26 11:58:23 +010030 write_hcptr((read_hcptr() & ~TAM_BIT) |
31 ((value << TAM_SHIFT) & TAM_BIT));
32}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010033
Chris Kaya5fde282021-05-26 11:58:23 +010034static inline __unused void write_amcr_cg1rz(uint32_t value)
35{
36 write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
37 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
38}
39
40static inline __unused uint32_t read_amcfgr_ncg(void)
41{
42 return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
43 AMCFGR_NCG_MASK;
44}
45
46static inline __unused uint32_t read_amcgcr_cg1nc(void)
47{
48 return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
49 AMCGCR_CG1NC_MASK;
50}
51
52static inline __unused uint32_t read_amcntenset0_px(void)
53{
54 return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
55 AMCNTENSET0_Pn_MASK;
56}
57
58static inline __unused uint32_t read_amcntenset1_px(void)
59{
60 return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
61 AMCNTENSET1_Pn_MASK;
62}
63
64static inline __unused void write_amcntenset0_px(uint32_t px)
65{
66 uint32_t value = read_amcntenset0();
67
68 value &= ~AMCNTENSET0_Pn_MASK;
69 value |= (px << AMCNTENSET0_Pn_SHIFT) &
70 AMCNTENSET0_Pn_MASK;
71
72 write_amcntenset0(value);
73}
74
75static inline __unused void write_amcntenset1_px(uint32_t px)
76{
77 uint32_t value = read_amcntenset1();
78
79 value &= ~AMCNTENSET1_Pn_MASK;
80 value |= (px << AMCNTENSET1_Pn_SHIFT) &
81 AMCNTENSET1_Pn_MASK;
82
83 write_amcntenset1(value);
84}
85
86static inline __unused void write_amcntenclr0_px(uint32_t px)
87{
88 uint32_t value = read_amcntenclr0();
89
90 value &= ~AMCNTENCLR0_Pn_MASK;
91 value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
92
93 write_amcntenclr0(value);
94}
95
96static inline __unused void write_amcntenclr1_px(uint32_t px)
97{
98 uint32_t value = read_amcntenclr1();
99
100 value &= ~AMCNTENCLR1_Pn_MASK;
101 value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
102
103 write_amcntenclr1(value);
104}
105
106static bool amu_supported(void)
107{
108 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
109}
110
111static bool amu_v1p1_supported(void)
112{
113 return read_id_pfr0_amu() >= ID_PFR0_AMU_V1P1;
114}
115
116#if ENABLE_AMU_AUXILIARY_COUNTERS
117static bool amu_group1_supported(void)
118{
119 return read_amcfgr_ncg() > 0U;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100120}
121#endif
122
123/*
124 * Enable counters. This function is meant to be invoked
125 * by the context management library before exiting from EL3.
126 */
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100127void amu_enable(bool el2_unused)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000128{
Chris Kaya5fde282021-05-26 11:58:23 +0100129 if (!amu_supported()) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000130 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100131 }
132
133#if AMU_GROUP1_NR_COUNTERS
134 /* Check and set presence of group 1 counters */
135 if (!amu_group1_supported()) {
136 ERROR("AMU Counter Group 1 is not implemented\n");
137 panic();
138 }
139
140 /* Check number of group 1 counters */
Chris Kaya5fde282021-05-26 11:58:23 +0100141 uint32_t cnt_num = read_amcgcr_cg1nc();
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100142 VERBOSE("%s%u. %s%u\n",
143 "Number of AMU Group 1 Counters ", cnt_num,
144 "Requested number ", AMU_GROUP1_NR_COUNTERS);
145
146 if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
147 ERROR("%s%u is less than %s%u\n",
148 "Number of AMU Group 1 Counters ", cnt_num,
149 "Requested number ", AMU_GROUP1_NR_COUNTERS);
150 panic();
151 }
152#endif
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100153
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000154 if (el2_unused) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000155 /*
156 * Non-secure access from EL0 or EL1 to the Activity Monitor
157 * registers do not trap to EL2.
158 */
Chris Kaya5fde282021-05-26 11:58:23 +0100159 write_hcptr_tam(0U);
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100160 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000161
162 /* Enable group 0 counters */
Chris Kaya5fde282021-05-26 11:58:23 +0100163 write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000164
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100165#if AMU_GROUP1_NR_COUNTERS
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000166 /* Enable group 1 counters */
Chris Kaya5fde282021-05-26 11:58:23 +0100167 write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100168#endif
johpow01fa59c6f2020-10-02 13:41:11 -0500169
170 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kaya5fde282021-05-26 11:58:23 +0100171 if (!amu_v1p1_supported()) {
johpow01fa59c6f2020-10-02 13:41:11 -0500172 return;
173 }
174
175#if AMU_RESTRICT_COUNTERS
176 /*
177 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
178 * counters at all but the highest implemented EL. This is controlled
179 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
180 * register reads at lower ELs return zero. Reads from the memory
181 * mapped view are unaffected.
182 */
183 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kaya5fde282021-05-26 11:58:23 +0100184 write_amcr_cg1rz(1U);
johpow01fa59c6f2020-10-02 13:41:11 -0500185#else
Chris Kaya5fde282021-05-26 11:58:23 +0100186 write_amcr_cg1rz(0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500187#endif
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000188}
189
190/* Read the group 0 counter identified by the given `idx`. */
Chris Kayf13c6b52021-05-24 21:00:07 +0100191static uint64_t amu_group0_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000192{
Chris Kaya5fde282021-05-26 11:58:23 +0100193 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100194 assert(idx < AMU_GROUP0_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000195
196 return amu_group0_cnt_read_internal(idx);
197}
198
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100199/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100200static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000201{
Chris Kaya5fde282021-05-26 11:58:23 +0100202 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100203 assert(idx < AMU_GROUP0_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000204
205 amu_group0_cnt_write_internal(idx, val);
206 isb();
207}
208
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100209#if AMU_GROUP1_NR_COUNTERS
210/* Read the group 1 counter identified by the given `idx` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100211static uint64_t amu_group1_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000212{
Chris Kaya5fde282021-05-26 11:58:23 +0100213 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100214 assert(amu_group1_supported());
215 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000216
217 return amu_group1_cnt_read_internal(idx);
218}
219
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100220/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100221static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000222{
Chris Kaya5fde282021-05-26 11:58:23 +0100223 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100224 assert(amu_group1_supported());
225 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000226
227 amu_group1_cnt_write_internal(idx, val);
228 isb();
229}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100230#endif /* AMU_GROUP1_NR_COUNTERS */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000231
232static void *amu_context_save(const void *arg)
233{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100234 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
235 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000236
Chris Kaya5fde282021-05-26 11:58:23 +0100237 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000238 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100239 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000240
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100241#if AMU_GROUP1_NR_COUNTERS
242 if (!amu_group1_supported()) {
243 return (void *)-1;
244 }
245#endif
246 /* Assert that group 0/1 counter configuration is what we expect */
Chris Kaya5fde282021-05-26 11:58:23 +0100247 assert(read_amcntenset0_px() == AMU_GROUP0_COUNTERS_MASK);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000248
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100249#if AMU_GROUP1_NR_COUNTERS
Chris Kaya5fde282021-05-26 11:58:23 +0100250 assert(read_amcntenset1_px() == AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100251#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000252 /*
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100253 * Disable group 0/1 counters to avoid other observers like SCP sampling
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000254 * counter values from the future via the memory mapped view.
255 */
Chris Kaya5fde282021-05-26 11:58:23 +0100256 write_amcntenclr0_px(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100257
258#if AMU_GROUP1_NR_COUNTERS
Chris Kaya5fde282021-05-26 11:58:23 +0100259 write_amcntenclr1_px(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100260#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000261 isb();
262
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100263 /* Save all group 0 counters */
264 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000265 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100266 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000267
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100268#if AMU_GROUP1_NR_COUNTERS
269 /* Save group 1 counters */
270 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
271 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
272 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
273 }
274 }
275#endif
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100276 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000277}
278
279static void *amu_context_restore(const void *arg)
280{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100281 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
282 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000283
Chris Kaya5fde282021-05-26 11:58:23 +0100284 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000285 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100286 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000287
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100288#if AMU_GROUP1_NR_COUNTERS
Chris Kaya5fde282021-05-26 11:58:23 +0100289 if (amu_group1_supported()) {
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100290 return (void *)-1;
291 }
292#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000293 /* Counters were disabled in `amu_context_save()` */
Chris Kaya5fde282021-05-26 11:58:23 +0100294 assert(read_amcntenset0_px() == 0U);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100295
296#if AMU_GROUP1_NR_COUNTERS
Chris Kaya5fde282021-05-26 11:58:23 +0100297 assert(read_amcntenset1_px() == 0U);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100298#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000299
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100300 /* Restore all group 0 counters */
301 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000302 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100303 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000304
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100305 /* Restore group 0 counter configuration */
Chris Kaya5fde282021-05-26 11:58:23 +0100306 write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000307
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100308#if AMU_GROUP1_NR_COUNTERS
309 /* Restore group 1 counters */
310 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
311 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
312 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
313 }
314 }
315
316 /* Restore group 1 counter configuration */
Chris Kaya5fde282021-05-26 11:58:23 +0100317 write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100318#endif
319
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100320 return (void *)0;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100321}
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000322
323SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
324SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);