blob: 7e004de31165271c4f0afb73d9c35f892c78b28d [file] [log] [blame]
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01001/*
Alexei Fedorov7e6306b2020-07-14 08:17:56 +01002 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorov7e6306b2020-07-14 08:17:56 +01007#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <stdbool.h>
9
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010010#include <arch.h>
11#include <arch_helpers.h>
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010012
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <lib/el3_runtime/pubsub_events.h>
14#include <lib/extensions/amu.h>
15#include <lib/extensions/amu_private.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000016
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010017#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000018
19static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010020
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010021/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +010022bool amu_supported(void)
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010023{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010024 uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010025
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010026 features &= ID_PFR0_AMU_MASK;
27 return ((features == 1U) || (features == 2U));
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000028}
29
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010030#if AMU_GROUP1_NR_COUNTERS
31/* Check if group 1 counters is implemented */
32bool amu_group1_supported(void)
33{
34 uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
35
36 return (features & AMCFGR_NCG_MASK) == 1U;
37}
38#endif
39
40/*
41 * Enable counters. This function is meant to be invoked
42 * by the context management library before exiting from EL3.
43 */
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +010044void amu_enable(bool el2_unused)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000045{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010046 if (!amu_supported()) {
47 INFO("AMU is not implemented\n");
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000048 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010049 }
50
51#if AMU_GROUP1_NR_COUNTERS
52 /* Check and set presence of group 1 counters */
53 if (!amu_group1_supported()) {
54 ERROR("AMU Counter Group 1 is not implemented\n");
55 panic();
56 }
57
58 /* Check number of group 1 counters */
59 uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
60 AMCGCR_CG1NC_MASK;
61 VERBOSE("%s%u. %s%u\n",
62 "Number of AMU Group 1 Counters ", cnt_num,
63 "Requested number ", AMU_GROUP1_NR_COUNTERS);
64
65 if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
66 ERROR("%s%u is less than %s%u\n",
67 "Number of AMU Group 1 Counters ", cnt_num,
68 "Requested number ", AMU_GROUP1_NR_COUNTERS);
69 panic();
70 }
71#endif
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010072
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000073 if (el2_unused) {
74 uint64_t v;
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000075 /*
76 * Non-secure access from EL0 or EL1 to the Activity Monitor
77 * registers do not trap to EL2.
78 */
79 v = read_hcptr();
80 v &= ~TAM_BIT;
81 write_hcptr(v);
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010082 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000083
84 /* Enable group 0 counters */
85 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000086
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010087#if AMU_GROUP1_NR_COUNTERS
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000088 /* Enable group 1 counters */
89 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010090#endif
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000091}
92
93/* Read the group 0 counter identified by the given `idx`. */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010094uint64_t amu_group0_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000095{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +010096 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010097 assert(idx < AMU_GROUP0_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000098
99 return amu_group0_cnt_read_internal(idx);
100}
101
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100102/* Write the group 0 counter identified by the given `idx` with `val` */
103void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000104{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100105 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100106 assert(idx < AMU_GROUP0_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000107
108 amu_group0_cnt_write_internal(idx, val);
109 isb();
110}
111
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100112#if AMU_GROUP1_NR_COUNTERS
113/* Read the group 1 counter identified by the given `idx` */
114uint64_t amu_group1_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000115{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100116 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100117 assert(amu_group1_supported());
118 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000119
120 return amu_group1_cnt_read_internal(idx);
121}
122
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100123/* Write the group 1 counter identified by the given `idx` with `val` */
124void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000125{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100126 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100127 assert(amu_group1_supported());
128 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000129
130 amu_group1_cnt_write_internal(idx, val);
131 isb();
132}
133
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100134/*
135 * Program the event type register for the given `idx` with
136 * the event number `val`
137 */
138void amu_group1_set_evtype(unsigned int idx, unsigned int val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000139{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100140 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100141 assert(amu_group1_supported());
142 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000143
144 amu_group1_set_evtype_internal(idx, val);
145 isb();
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000146}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100147#endif /* AMU_GROUP1_NR_COUNTERS */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000148
149static void *amu_context_save(const void *arg)
150{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100151 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
152 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000153
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100154 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000155 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100156 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000157
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100158#if AMU_GROUP1_NR_COUNTERS
159 if (!amu_group1_supported()) {
160 return (void *)-1;
161 }
162#endif
163 /* Assert that group 0/1 counter configuration is what we expect */
164 assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000165
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100166#if AMU_GROUP1_NR_COUNTERS
167 assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
168#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000169 /*
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100170 * Disable group 0/1 counters to avoid other observers like SCP sampling
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000171 * counter values from the future via the memory mapped view.
172 */
173 write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100174
175#if AMU_GROUP1_NR_COUNTERS
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000176 write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100177#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000178 isb();
179
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100180 /* Save all group 0 counters */
181 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000182 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100183 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000184
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100185#if AMU_GROUP1_NR_COUNTERS
186 /* Save group 1 counters */
187 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
188 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
189 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
190 }
191 }
192#endif
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100193 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000194}
195
196static void *amu_context_restore(const void *arg)
197{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100198 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
199 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000200
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100201 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000202 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100203 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000204
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100205#if AMU_GROUP1_NR_COUNTERS
206 if (!amu_group1_supported()) {
207 return (void *)-1;
208 }
209#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000210 /* Counters were disabled in `amu_context_save()` */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100211 assert(read_amcntenset0_el0() == 0U);
212
213#if AMU_GROUP1_NR_COUNTERS
214 assert(read_amcntenset1_el0() == 0U);
215#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000216
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100217 /* Restore all group 0 counters */
218 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000219 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100220 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000221
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100222 /* Restore group 0 counter configuration */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000223 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
224
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100225#if AMU_GROUP1_NR_COUNTERS
226 /* Restore group 1 counters */
227 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
228 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
229 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
230 }
231 }
232
233 /* Restore group 1 counter configuration */
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000234 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100235#endif
236
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100237 return (void *)0;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100238}
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000239
240SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
241SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);