blob: d346174f7b252007df69f75a321f11f5bb98a994 [file] [log] [blame]
Dimitris Papastamose08005a2017-10-12 13:02:29 +01001/*
johpow01fa59c6f2020-10-02 13:41:11 -05002 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamose08005a2017-10-12 13:02:29 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Dimitris Papastamos525c37a2017-11-13 09:49:45 +00007#include <assert.h>
Chris Kaya5fde282021-05-26 11:58:23 +01008#include <cdefs.h>
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +01009#include <stdbool.h>
Dimitris Papastamose08005a2017-10-12 13:02:29 +010010
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <arch.h>
johpow01fa59c6f2020-10-02 13:41:11 -050012#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <arch_helpers.h>
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010014
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <lib/el3_runtime/pubsub_events.h>
16#include <lib/extensions/amu.h>
17#include <lib/extensions/amu_private.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000018
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010019#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000020
21static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
22
Chris Kaya5fde282021-05-26 11:58:23 +010023static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
Dimitris Papastamose08005a2017-10-12 13:02:29 +010024{
Chris Kaya5fde282021-05-26 11:58:23 +010025 return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
johpow01fa59c6f2020-10-02 13:41:11 -050026 ID_AA64PFR0_AMU_MASK;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010027}
28
Chris Kaya5fde282021-05-26 11:58:23 +010029static inline __unused uint64_t read_hcr_el2_amvoffen(void)
30{
31 return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
32 HCR_AMVOFFEN_SHIFT;
33}
34
35static inline __unused void write_cptr_el2_tam(uint64_t value)
36{
37 write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
38 ((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
39}
40
41static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
42{
43 uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
44
45 value &= ~TAM_BIT;
46 value |= (tam << TAM_SHIFT) & TAM_BIT;
47
48 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
49}
50
51static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
52{
53 write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
54 ((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
55}
56
57static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
58{
59 write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
60 ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
61}
62
63static inline __unused uint64_t read_amcfgr_el0_ncg(void)
64{
65 return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
66 AMCFGR_EL0_NCG_MASK;
67}
68
69static inline __unused uint64_t read_amcg1idr_el0_voff(void)
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010070{
Chris Kaya5fde282021-05-26 11:58:23 +010071 return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
72 AMCG1IDR_VOFF_MASK;
73}
74
75static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
76{
77 return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
78 AMCGCR_EL0_CG1NC_MASK;
79}
Dimitris Papastamose08005a2017-10-12 13:02:29 +010080
Chris Kaya5fde282021-05-26 11:58:23 +010081static inline __unused uint64_t read_amcntenset0_el0_px(void)
82{
83 return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
84 AMCNTENSET0_EL0_Pn_MASK;
85}
86
87static inline __unused uint64_t read_amcntenset1_el0_px(void)
88{
89 return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
90 AMCNTENSET1_EL0_Pn_MASK;
91}
92
93static inline __unused void write_amcntenset0_el0_px(uint64_t px)
94{
95 uint64_t value = read_amcntenset0_el0();
96
97 value &= ~AMCNTENSET0_EL0_Pn_MASK;
98 value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
99
100 write_amcntenset0_el0(value);
101}
102
103static inline __unused void write_amcntenset1_el0_px(uint64_t px)
104{
105 uint64_t value = read_amcntenset1_el0();
106
107 value &= ~AMCNTENSET1_EL0_Pn_MASK;
108 value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
109
110 write_amcntenset1_el0(value);
111}
112
113static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
114{
115 uint64_t value = read_amcntenclr0_el0();
116
117 value &= ~AMCNTENCLR0_EL0_Pn_MASK;
118 value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
119
120 write_amcntenclr0_el0(value);
121}
122
123static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
124{
125 uint64_t value = read_amcntenclr1_el0();
126
127 value &= ~AMCNTENCLR1_EL0_Pn_MASK;
128 value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
129
130 write_amcntenclr1_el0(value);
131}
132
133static bool amu_supported(void)
134{
135 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
136}
137
138static bool amu_v1p1_supported(void)
139{
140 return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
141}
142
143#if ENABLE_AMU_AUXILIARY_COUNTERS
144static bool amu_group1_supported(void)
145{
146 return read_amcfgr_el0_ncg() > 0U;
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000147}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100148#endif
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000149
150/*
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100151 * Enable counters. This function is meant to be invoked
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000152 * by the context management library before exiting from EL3.
153 */
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100154void amu_enable(bool el2_unused, cpu_context_t *ctx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000155{
Chris Kaya5fde282021-05-26 11:58:23 +0100156 if (!amu_supported()) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000157 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100158 }
159
160#if AMU_GROUP1_NR_COUNTERS
161 /* Check and set presence of group 1 counters */
162 if (!amu_group1_supported()) {
163 ERROR("AMU Counter Group 1 is not implemented\n");
164 panic();
165 }
166
167 /* Check number of group 1 counters */
Chris Kaya5fde282021-05-26 11:58:23 +0100168 uint64_t cnt_num = read_amcgcr_el0_cg1nc();
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100169 VERBOSE("%s%llu. %s%u\n",
170 "Number of AMU Group 1 Counters ", cnt_num,
171 "Requested number ", AMU_GROUP1_NR_COUNTERS);
172
173 if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
174 ERROR("%s%llu is less than %s%u\n",
175 "Number of AMU Group 1 Counters ", cnt_num,
176 "Requested number ", AMU_GROUP1_NR_COUNTERS);
177 panic();
178 }
179#endif
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100180
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000181 if (el2_unused) {
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100182 /*
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000183 * CPTR_EL2.TAM: Set to zero so any accesses to
184 * the Activity Monitor registers do not trap to EL2.
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100185 */
Chris Kaya5fde282021-05-26 11:58:23 +0100186 write_cptr_el2_tam(0U);
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100187 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000188
189 /*
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100190 * Retrieve and update the CPTR_EL3 value from the context mentioned
191 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000192 * the Activity Monitor registers do not trap to EL3.
193 */
Chris Kaya5fde282021-05-26 11:58:23 +0100194 write_cptr_el3_tam(ctx, 0U);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000195
196 /* Enable group 0 counters */
Chris Kaya5fde282021-05-26 11:58:23 +0100197 write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100198
199#if AMU_GROUP1_NR_COUNTERS
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000200 /* Enable group 1 counters */
Chris Kaya5fde282021-05-26 11:58:23 +0100201 write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100202#endif
johpow01fa59c6f2020-10-02 13:41:11 -0500203
204 /* Initialize FEAT_AMUv1p1 features if present. */
Chris Kaya5fde282021-05-26 11:58:23 +0100205 if (!amu_v1p1_supported()) {
johpow01fa59c6f2020-10-02 13:41:11 -0500206 return;
207 }
208
209 if (el2_unused) {
210 /* Make sure virtual offsets are disabled if EL2 not used. */
Chris Kaya5fde282021-05-26 11:58:23 +0100211 write_hcr_el2_amvoffen(0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500212 }
213
214#if AMU_RESTRICT_COUNTERS
215 /*
216 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
217 * counters at all but the highest implemented EL. This is controlled
218 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
219 * register reads at lower ELs return zero. Reads from the memory
220 * mapped view are unaffected.
221 */
222 VERBOSE("AMU group 1 counter access restricted.\n");
Chris Kaya5fde282021-05-26 11:58:23 +0100223 write_amcr_el0_cg1rz(1U);
johpow01fa59c6f2020-10-02 13:41:11 -0500224#else
Chris Kaya5fde282021-05-26 11:58:23 +0100225 write_amcr_el0_cg1rz(0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500226#endif
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000227}
228
229/* Read the group 0 counter identified by the given `idx`. */
Chris Kayf13c6b52021-05-24 21:00:07 +0100230static uint64_t amu_group0_cnt_read(unsigned int idx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000231{
Chris Kaya5fde282021-05-26 11:58:23 +0100232 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100233 assert(idx < AMU_GROUP0_NR_COUNTERS);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000234
235 return amu_group0_cnt_read_internal(idx);
236}
237
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100238/* Write the group 0 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100239static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000240{
Chris Kaya5fde282021-05-26 11:58:23 +0100241 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100242 assert(idx < AMU_GROUP0_NR_COUNTERS);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000243
244 amu_group0_cnt_write_internal(idx, val);
245 isb();
246}
247
johpow01fa59c6f2020-10-02 13:41:11 -0500248/*
249 * Read the group 0 offset register for a given index. Index must be 0, 2,
250 * or 3, the register for 1 does not exist.
251 *
252 * Using this function requires FEAT_AMUv1p1 support.
253 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100254static uint64_t amu_group0_voffset_read(unsigned int idx)
johpow01fa59c6f2020-10-02 13:41:11 -0500255{
Chris Kaya5fde282021-05-26 11:58:23 +0100256 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500257 assert(idx < AMU_GROUP0_NR_COUNTERS);
258 assert(idx != 1U);
259
260 return amu_group0_voffset_read_internal(idx);
261}
262
263/*
264 * Write the group 0 offset register for a given index. Index must be 0, 2, or
265 * 3, the register for 1 does not exist.
266 *
267 * Using this function requires FEAT_AMUv1p1 support.
268 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100269static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
johpow01fa59c6f2020-10-02 13:41:11 -0500270{
Chris Kaya5fde282021-05-26 11:58:23 +0100271 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500272 assert(idx < AMU_GROUP0_NR_COUNTERS);
273 assert(idx != 1U);
274
275 amu_group0_voffset_write_internal(idx, val);
276 isb();
277}
278
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100279#if AMU_GROUP1_NR_COUNTERS
280/* Read the group 1 counter identified by the given `idx` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100281static uint64_t amu_group1_cnt_read(unsigned int idx)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000282{
Chris Kaya5fde282021-05-26 11:58:23 +0100283 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100284 assert(amu_group1_supported());
285 assert(idx < AMU_GROUP1_NR_COUNTERS);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000286
287 return amu_group1_cnt_read_internal(idx);
288}
289
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100290/* Write the group 1 counter identified by the given `idx` with `val` */
Chris Kayf13c6b52021-05-24 21:00:07 +0100291static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000292{
Chris Kaya5fde282021-05-26 11:58:23 +0100293 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100294 assert(amu_group1_supported());
295 assert(idx < AMU_GROUP1_NR_COUNTERS);
Dimitris Papastamos525c37a2017-11-13 09:49:45 +0000296
297 amu_group1_cnt_write_internal(idx, val);
298 isb();
299}
300
301/*
johpow01fa59c6f2020-10-02 13:41:11 -0500302 * Read the group 1 offset register for a given index.
303 *
304 * Using this function requires FEAT_AMUv1p1 support.
305 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100306static uint64_t amu_group1_voffset_read(unsigned int idx)
johpow01fa59c6f2020-10-02 13:41:11 -0500307{
Chris Kaya5fde282021-05-26 11:58:23 +0100308 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500309 assert(amu_group1_supported());
310 assert(idx < AMU_GROUP1_NR_COUNTERS);
Chris Kaya5fde282021-05-26 11:58:23 +0100311 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500312
313 return amu_group1_voffset_read_internal(idx);
314}
315
316/*
317 * Write the group 1 offset register for a given index.
318 *
319 * Using this function requires FEAT_AMUv1p1 support.
320 */
Chris Kayf13c6b52021-05-24 21:00:07 +0100321static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
johpow01fa59c6f2020-10-02 13:41:11 -0500322{
Chris Kaya5fde282021-05-26 11:58:23 +0100323 assert(amu_v1p1_supported());
johpow01fa59c6f2020-10-02 13:41:11 -0500324 assert(amu_group1_supported());
325 assert(idx < AMU_GROUP1_NR_COUNTERS);
Chris Kaya5fde282021-05-26 11:58:23 +0100326 assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
johpow01fa59c6f2020-10-02 13:41:11 -0500327
328 amu_group1_voffset_write_internal(idx, val);
329 isb();
330}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100331#endif /* AMU_GROUP1_NR_COUNTERS */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000332
333static void *amu_context_save(const void *arg)
334{
335 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100336 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000337
Chris Kaya5fde282021-05-26 11:58:23 +0100338 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000339 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100340 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000341
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100342#if AMU_GROUP1_NR_COUNTERS
343 if (!amu_group1_supported()) {
344 return (void *)-1;
345 }
346#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000347 /* Assert that group 0/1 counter configuration is what we expect */
Chris Kaya5fde282021-05-26 11:58:23 +0100348 assert(read_amcntenset0_el0_px() == AMU_GROUP0_COUNTERS_MASK);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000349
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100350#if AMU_GROUP1_NR_COUNTERS
Chris Kaya5fde282021-05-26 11:58:23 +0100351 assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100352#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000353 /*
354 * Disable group 0/1 counters to avoid other observers like SCP sampling
355 * counter values from the future via the memory mapped view.
356 */
Chris Kaya5fde282021-05-26 11:58:23 +0100357 write_amcntenclr0_el0_px(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100358
359#if AMU_GROUP1_NR_COUNTERS
Chris Kaya5fde282021-05-26 11:58:23 +0100360 write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100361#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000362 isb();
363
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100364 /* Save all group 0 counters */
365 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000366 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100367 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000368
johpow01fa59c6f2020-10-02 13:41:11 -0500369 /* Save group 0 virtual offsets if supported and enabled. */
Chris Kaya5fde282021-05-26 11:58:23 +0100370 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
johpow01fa59c6f2020-10-02 13:41:11 -0500371 /* Not using a loop because count is fixed and index 1 DNE. */
372 ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
373 ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
374 ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
375 }
376
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100377#if AMU_GROUP1_NR_COUNTERS
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000378 /* Save group 1 counters */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100379 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
johpow01fa59c6f2020-10-02 13:41:11 -0500380 if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100381 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
382 }
383 }
johpow01fa59c6f2020-10-02 13:41:11 -0500384
385 /* Save group 1 virtual offsets if supported and enabled. */
Chris Kaya5fde282021-05-26 11:58:23 +0100386 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
387 uint64_t amcg1idr = read_amcg1idr_el0_voff() &
388 AMU_GROUP1_COUNTERS_MASK;
johpow01fa59c6f2020-10-02 13:41:11 -0500389
390 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
391 if (((amcg1idr >> i) & 1ULL) != 0ULL) {
392 ctx->group1_voffsets[i] =
393 amu_group1_voffset_read(i);
394 }
395 }
396 }
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100397#endif
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100398 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000399}
400
401static void *amu_context_restore(const void *arg)
402{
403 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100404 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000405
Chris Kaya5fde282021-05-26 11:58:23 +0100406 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000407 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100408 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000409
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100410#if AMU_GROUP1_NR_COUNTERS
411 if (!amu_group1_supported()) {
412 return (void *)-1;
413 }
414#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000415 /* Counters were disabled in `amu_context_save()` */
Chris Kaya5fde282021-05-26 11:58:23 +0100416 assert(read_amcntenset0_el0_px() == 0U);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000417
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100418#if AMU_GROUP1_NR_COUNTERS
Chris Kaya5fde282021-05-26 11:58:23 +0100419 assert(read_amcntenset1_el0_px() == 0U);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100420#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000421
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100422 /* Restore all group 0 counters */
423 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
424 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
425 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000426
johpow01fa59c6f2020-10-02 13:41:11 -0500427 /* Restore group 0 virtual offsets if supported and enabled. */
Chris Kaya5fde282021-05-26 11:58:23 +0100428 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
johpow01fa59c6f2020-10-02 13:41:11 -0500429 /* Not using a loop because count is fixed and index 1 DNE. */
430 amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
431 amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
432 amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
433 }
434
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100435 /* Restore group 0 counter configuration */
Chris Kaya5fde282021-05-26 11:58:23 +0100436 write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100437
438#if AMU_GROUP1_NR_COUNTERS
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000439 /* Restore group 1 counters */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100440 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
johpow01fa59c6f2020-10-02 13:41:11 -0500441 if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000442 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100443 }
444 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000445
johpow01fa59c6f2020-10-02 13:41:11 -0500446 /* Restore group 1 virtual offsets if supported and enabled. */
Chris Kaya5fde282021-05-26 11:58:23 +0100447 if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
448 uint64_t amcg1idr = read_amcg1idr_el0_voff() &
449 AMU_GROUP1_COUNTERS_MASK;
johpow01fa59c6f2020-10-02 13:41:11 -0500450
451 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
452 if (((amcg1idr >> i) & 1ULL) != 0ULL) {
453 amu_group1_voffset_write(i,
454 ctx->group1_voffsets[i]);
455 }
456 }
457 }
458
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100459 /* Restore group 1 counter configuration */
Chris Kaya5fde282021-05-26 11:58:23 +0100460 write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100461#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000462
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100463 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000464}
465
466SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
467SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);