blob: b2a90ee8c34dc0a7d65fc74dc5d87ebc723fcd0d [file] [log] [blame]
Dimitris Papastamose08005a2017-10-12 13:02:29 +01001/*
johpow01fa59c6f2020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamose08005a2017-10-12 13:02:29 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Dimitris Papastamos525c37a2017-11-13 09:49:45 +00007#include <assert.h>
Chris Kaya5fde282021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +01009#include <stdbool.h>
Dimitris Papastamose08005a2017-10-12 13:02:29 +010010
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <arch.h>
johpow01fa59c6f2020-10-02 13:41:11 -050012#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <arch_helpers.h>
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010014
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <lib/el3_runtime/pubsub_events.h>
16#include <lib/extensions/amu.h>
17#include <lib/extensions/amu_private.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000018
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010019#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000020
21static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22
Chris Kaya5fde282021-05-26 11:58:23 +010023static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
Dimitris Papastamose08005a2017-10-12 13:02:29 +010024{
Chris Kaya5fde282021-05-26 11:58:23 +010025 return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
johpow01fa59c6f2020-10-02 13:41:11 -050026 ID_AA64PFR0_AMU_MASK;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010027}
28
Chris Kaya5fde282021-05-26 11:58:23 +010029static inline __unused uint64_t read_hcr_el2_amvoffen(void)
30{
31 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
32 HCR_AMVOFFEN_SHIFT;
33}
34
35static inline __unused void write_cptr_el2_tam(uint64_t value)
36{
37 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
38 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
39}
40
41static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
42{
43 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
44
45 value &= ~TAM_BIT;
46 value |= (tam << TAM_SHIFT) & TAM_BIT;
47
48 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
49}
50
51static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
52{
53 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
54 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
55}
56
57static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
58{
59 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
60 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
61}
62
63static inline __unused uint64_t read_amcfgr_el0_ncg(void)
64{
65 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
66 AMCFGR_EL0_NCG_MASK;
67}
68
69static inline __unused uint64_t read_amcg1idr_el0_voff(void)
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010070{
Chris Kaya5fde282021-05-26 11:58:23 +010071 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
72 AMCG1IDR_VOFF_MASK;
73}
74
75static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
76{
77 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
78 AMCGCR_EL0_CG1NC_MASK;
79}
Dimitris Papastamose08005a2017-10-12 13:02:29 +010080
Chris Kaya5fde282021-05-26 11:58:23 +010081static inline __unused uint64_t read_amcntenset0_el0_px(void)
82{
83 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
84 AMCNTENSET0_EL0_Pn_MASK;
85}
86
87static inline __unused uint64_t read_amcntenset1_el0_px(void)
88{
89 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
90 AMCNTENSET1_EL0_Pn_MASK;
91}
92
93static inline __unused void write_amcntenset0_el0_px(uint64_t px)
94{
95 uint64_t value = read_amcntenset0_el0();
96
97 value &= ~AMCNTENSET0_EL0_Pn_MASK;
98 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
99
100 write_amcntenset0_el0(value);
101}
102
103static inline __unused void write_amcntenset1_el0_px(uint64_t px)
104{
105 uint64_t value = read_amcntenset1_el0();
106
107 value &= ~AMCNTENSET1_EL0_Pn_MASK;
108 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
109
110 write_amcntenset1_el0(value);
111}
112
113static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
114{
115 uint64_t value = read_amcntenclr0_el0();
116
117 value &= ~AMCNTENCLR0_EL0_Pn_MASK;
118 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
119
120 write_amcntenclr0_el0(value);
121}
122
123static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
124{
125 uint64_t value = read_amcntenclr1_el0();
126
127 value &= ~AMCNTENCLR1_EL0_Pn_MASK;
128 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
129
130 write_amcntenclr1_el0(value);
131}
132
133static bool amu_supported(void)
134{
135 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
136}
137
138static bool amu_v1p1_supported(void)
139{
140 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
141}
142
143#if ENABLE_AMU_AUXILIARY_COUNTERS
144static bool amu_group1_supported(void)
145{
146 return read_amcfgr_el0_ncg() > 0U;
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000147}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100148#endif
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000149
150/*
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100151 * Enable counters. This function is meant to be invoked
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000152 * by the context management library before exiting from EL3.
153 */
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100154void amu_enable(bool el2_unused, cpu_context_t *ctx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000155{
Chris Kaya5fde282021-05-26 11:58:23 +0100156 if (!amu_supported()) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000157 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100158 }
159
Chris Kay925fda42021-05-25 10:42:56 +0100160#if ENABLE_AMU_AUXILIARY_COUNTERS
161 if (AMU_GROUP1_NR_COUNTERS > 0U) {
162 /* Check and set presence of group 1 counters */
163 if (!amu_group1_supported()) {
164 ERROR("AMU Counter Group 1 is not implemented\n");
165 panic();
166 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100167
Chris Kay925fda42021-05-25 10:42:56 +0100168 /* Check number of group 1 counters */
169 uint64_t cnt_num = read_amcgcr_el0_cg1nc();
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100170
Chris Kay925fda42021-05-25 10:42:56 +0100171 VERBOSE("%s%llu. %s%u\n",
172 "Number of AMU Group 1 Counters ", cnt_num,
173 "Requested number ", AMU_GROUP1_NR_COUNTERS);
174
175 if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
176 ERROR("%s%llu is less than %s%u\n",
177 "Number of AMU Group 1 Counters ", cnt_num,
178 "Requested number ", AMU_GROUP1_NR_COUNTERS);
179 panic();
180 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100181 }
182#endif
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100183
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000184 if (el2_unused) {
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100185 /*
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000186 * CPTR_EL2.TAM: Set to zero so any accesses to
187 * the Activity Monitor registers do not trap to EL2.
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100188 */
Chris Kaya5fde282021-05-26 11:58:23 +0100189 write_cptr_el2_tam(0U);
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100190 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000191
192 /*
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100193 * Retrieve and update the CPTR_EL3 value from the context mentioned
194 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000195 * the Activity Monitor registers do not trap to EL3.
196 */
Chris Kaya5fde282021-05-26 11:58:23 +0100197 write_cptr_el3_tam(ctx, 0U);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000198
199 /* Enable group 0 counters */
Chris Kaya5fde282021-05-26 11:58:23 +0100200 write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100201
Chris Kay925fda42021-05-25 10:42:56 +0100202#if ENABLE_AMU_AUXILIARY_COUNTERS
203 if (AMU_GROUP1_NR_COUNTERS > 0U) {
204 /* Enable group 1 counters */
205 write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
206 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100207#endif
johpow01fa59c6f2020-10-02 13:41:11 -0500208
209 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kaya5fde282021-05-26 11:58:23 +0100210 if (!amu_v1p1_supported()) {
johpow01fa59c6f2020-10-02 13:41:11 -0500211 return;
212 }
213
214 if (el2_unused) {
215 /* Make sure virtual offsets are disabled if EL2 not used. */
Chris Kaya5fde282021-05-26 11:58:23 +0100216 write_hcr_el2_amvoffen(0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500217 }
218
219#if AMU_RESTRICT_COUNTERS
220 /*
221 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
222 * counters at all but the highest implemented EL. This is controlled
223 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
224 * register reads at lower ELs return zero. Reads from the memory
225 * mapped view are unaffected.
226 */
227 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kaya5fde282021-05-26 11:58:23 +0100228 write_amcr_el0_cg1rz(1U);
johpow01fa59c6f2020-10-02 13:41:11 -0500229#else
Chris Kaya5fde282021-05-26 11:58:23 +0100230 write_amcr_el0_cg1rz(0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500231#endif
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000232}
233
234/* Read the group 0 counter identified by the given `idx`. */
Chris Kayf13c6b52021-05-24 21:00:07 +0100235static uint64_t amu_group0_cnt_read(unsigned int idx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000236{
Chris Kaya5fde282021-05-26 11:58:23 +0100237 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100238 assert(idx < AMU_GROUP0_NR_COUNTERS);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000239
240 return amu_group0_cnt_read_internal(idx);
241}
242
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100243/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100244static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000245{
Chris Kaya5fde282021-05-26 11:58:23 +0100246 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100247 assert(idx < AMU_GROUP0_NR_COUNTERS);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000248
249 amu_group0_cnt_write_internal(idx, val);
250 isb();
251}
252
johpow01fa59c6f2020-10-02 13:41:11 -0500253/*
254 * Read the group 0 offset register for a given index. Index must be 0, 2,
255 * or 3, the register for 1 does not exist.
256 *
257 * Using this function requires FEAT_AMUv1p1 support.
258 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100259static uint64_t amu_group0_voffset_read(unsigned int idx)
johpow01fa59c6f2020-10-02 13:41:11 -0500260{
Chris Kaya5fde282021-05-26 11:58:23 +0100261 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500262 assert(idx < AMU_GROUP0_NR_COUNTERS);
263 assert(idx != 1U);
264
265 return amu_group0_voffset_read_internal(idx);
266}
267
268/*
269 * Write the group 0 offset register for a given index. Index must be 0, 2, or
270 * 3, the register for 1 does not exist.
271 *
272 * Using this function requires FEAT_AMUv1p1 support.
273 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100274static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
johpow01fa59c6f2020-10-02 13:41:11 -0500275{
Chris Kaya5fde282021-05-26 11:58:23 +0100276 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500277 assert(idx < AMU_GROUP0_NR_COUNTERS);
278 assert(idx != 1U);
279
280 amu_group0_voffset_write_internal(idx, val);
281 isb();
282}
283
Chris Kay925fda42021-05-25 10:42:56 +0100284#if ENABLE_AMU_AUXILIARY_COUNTERS
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100285/* Read the group 1 counter identified by the given `idx` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100286static uint64_t amu_group1_cnt_read(unsigned int idx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000287{
Chris Kaya5fde282021-05-26 11:58:23 +0100288 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100289 assert(amu_group1_supported());
290 assert(idx < AMU_GROUP1_NR_COUNTERS);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000291
292 return amu_group1_cnt_read_internal(idx);
293}
294
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100295/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100296static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000297{
Chris Kaya5fde282021-05-26 11:58:23 +0100298 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100299 assert(amu_group1_supported());
300 assert(idx < AMU_GROUP1_NR_COUNTERS);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000301
302 amu_group1_cnt_write_internal(idx, val);
303 isb();
304}
305
306/*
johpow01fa59c6f2020-10-02 13:41:11 -0500307 * Read the group 1 offset register for a given index.
308 *
309 * Using this function requires FEAT_AMUv1p1 support.
310 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100311static uint64_t amu_group1_voffset_read(unsigned int idx)
johpow01fa59c6f2020-10-02 13:41:11 -0500312{
Chris Kaya5fde282021-05-26 11:58:23 +0100313 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500314 assert(amu_group1_supported());
315 assert(idx < AMU_GROUP1_NR_COUNTERS);
Chris Kaya5fde282021-05-26 11:58:23 +0100316 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500317
318 return amu_group1_voffset_read_internal(idx);
319}
320
321/*
322 * Write the group 1 offset register for a given index.
323 *
324 * Using this function requires FEAT_AMUv1p1 support.
325 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100326static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
johpow01fa59c6f2020-10-02 13:41:11 -0500327{
Chris Kaya5fde282021-05-26 11:58:23 +0100328 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500329 assert(amu_group1_supported());
330 assert(idx < AMU_GROUP1_NR_COUNTERS);
Chris Kaya5fde282021-05-26 11:58:23 +0100331 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500332
333 amu_group1_voffset_write_internal(idx, val);
334 isb();
335}
Chris Kay925fda42021-05-25 10:42:56 +0100336#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000337
338static void *amu_context_save(const void *arg)
339{
340 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100341 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000342
Chris Kaya5fde282021-05-26 11:58:23 +0100343 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000344 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100345 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000346
Chris Kay925fda42021-05-25 10:42:56 +0100347#if ENABLE_AMU_AUXILIARY_COUNTERS
348 if (AMU_GROUP1_NR_COUNTERS > 0U) {
349 if (!amu_group1_supported()) {
350 return (void *)-1;
351 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100352 }
353#endif
Chris Kay925fda42021-05-25 10:42:56 +0100354
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000355 /* Assert that group 0/1 counter configuration is what we expect */
Chris Kaya5fde282021-05-26 11:58:23 +0100356 assert(read_amcntenset0_el0_px() == AMU_GROUP0_COUNTERS_MASK);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000357
Chris Kay925fda42021-05-25 10:42:56 +0100358#if ENABLE_AMU_AUXILIARY_COUNTERS
359 if (AMU_GROUP1_NR_COUNTERS > 0U) {
360 assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
361 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100362#endif
Chris Kay925fda42021-05-25 10:42:56 +0100363
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000364 /*
365 * Disable group 0/1 counters to avoid other observers like SCP sampling
366 * counter values from the future via the memory mapped view.
367 */
Chris Kaya5fde282021-05-26 11:58:23 +0100368 write_amcntenclr0_el0_px(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100369
Chris Kay925fda42021-05-25 10:42:56 +0100370#if ENABLE_AMU_AUXILIARY_COUNTERS
371 if (AMU_GROUP1_NR_COUNTERS > 0U) {
372 write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
373 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100374#endif
Chris Kay925fda42021-05-25 10:42:56 +0100375
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000376 isb();
377
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100378 /* Save all group 0 counters */
379 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000380 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100381 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000382
johpow01fa59c6f2020-10-02 13:41:11 -0500383 /* Save group 0 virtual offsets if supported and enabled. */
Chris Kaya5fde282021-05-26 11:58:23 +0100384 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
johpow01fa59c6f2020-10-02 13:41:11 -0500385 /* Not using a loop because count is fixed and index 1 DNE. */
386 ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
387 ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
388 ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
389 }
390
Chris Kay925fda42021-05-25 10:42:56 +0100391#if ENABLE_AMU_AUXILIARY_COUNTERS
392 if (AMU_GROUP1_NR_COUNTERS > 0U) {
393 /* Save group 1 counters */
394 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
395 if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
396 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
397 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100398 }
johpow01fa59c6f2020-10-02 13:41:11 -0500399
Chris Kay925fda42021-05-25 10:42:56 +0100400 /* Save group 1 virtual offsets if supported and enabled. */
401 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
402 uint64_t amcg1idr = read_amcg1idr_el0_voff() &
403 AMU_GROUP1_COUNTERS_MASK;
johpow01fa59c6f2020-10-02 13:41:11 -0500404
Chris Kay925fda42021-05-25 10:42:56 +0100405 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
406 if (((amcg1idr >> i) & 1ULL) != 0ULL) {
407 ctx->group1_voffsets[i] =
408 amu_group1_voffset_read(i);
409 }
johpow01fa59c6f2020-10-02 13:41:11 -0500410 }
411 }
412 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100413#endif
Chris Kay925fda42021-05-25 10:42:56 +0100414
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100415 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000416}
417
418static void *amu_context_restore(const void *arg)
419{
420 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100421 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000422
Chris Kaya5fde282021-05-26 11:58:23 +0100423 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000424 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100425 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000426
Chris Kay925fda42021-05-25 10:42:56 +0100427#if ENABLE_AMU_AUXILIARY_COUNTERS
428 if (AMU_GROUP1_NR_COUNTERS > 0U) {
429 if (!amu_group1_supported()) {
430 return (void *)-1;
431 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100432 }
433#endif
Chris Kay925fda42021-05-25 10:42:56 +0100434
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000435 /* Counters were disabled in `amu_context_save()` */
Chris Kaya5fde282021-05-26 11:58:23 +0100436 assert(read_amcntenset0_el0_px() == 0U);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000437
Chris Kay925fda42021-05-25 10:42:56 +0100438#if ENABLE_AMU_AUXILIARY_COUNTERS
439 if (AMU_GROUP1_NR_COUNTERS > 0U) {
440 assert(read_amcntenset1_el0_px() == 0U);
441 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100442#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000443
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100444 /* Restore all group 0 counters */
445 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
446 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
447 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000448
johpow01fa59c6f2020-10-02 13:41:11 -0500449 /* Restore group 0 virtual offsets if supported and enabled. */
Chris Kaya5fde282021-05-26 11:58:23 +0100450 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
johpow01fa59c6f2020-10-02 13:41:11 -0500451 /* Not using a loop because count is fixed and index 1 DNE. */
452 amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
453 amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
454 amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
455 }
456
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100457 /* Restore group 0 counter configuration */
Chris Kaya5fde282021-05-26 11:58:23 +0100458 write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100459
Chris Kay925fda42021-05-25 10:42:56 +0100460#if ENABLE_AMU_AUXILIARY_COUNTERS
461 if (AMU_GROUP1_NR_COUNTERS > 0U) {
462 /* Restore group 1 counters */
463 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
464 if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
465 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
466 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100467 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000468
Chris Kay925fda42021-05-25 10:42:56 +0100469 /* Restore group 1 virtual offsets if supported and enabled. */
470 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
471 uint64_t amcg1idr = read_amcg1idr_el0_voff() &
472 AMU_GROUP1_COUNTERS_MASK;
johpow01fa59c6f2020-10-02 13:41:11 -0500473
Chris Kay925fda42021-05-25 10:42:56 +0100474 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
475 if (((amcg1idr >> i) & 1ULL) != 0ULL) {
476 amu_group1_voffset_write(i,
477 ctx->group1_voffsets[i]);
478 }
johpow01fa59c6f2020-10-02 13:41:11 -0500479 }
480 }
johpow01fa59c6f2020-10-02 13:41:11 -0500481
Chris Kay925fda42021-05-25 10:42:56 +0100482 /* Restore group 1 counter configuration */
483 write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
484 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100485#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000486
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100487 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000488}
489
490SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
491SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);