blob: 0f75f0791703b5e7232fd58b7fa6fd00b55d46e6 [file] [log] [blame]
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01001/*
Alexei Fedorov7e6306b2020-07-14 08:17:56 +01002 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorov7e6306b2020-07-14 08:17:56 +01007#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <stdbool.h>
9
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010010#include <arch.h>
11#include <arch_helpers.h>
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010012
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <lib/el3_runtime/pubsub_events.h>
14#include <lib/extensions/amu.h>
15#include <lib/extensions/amu_private.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000016
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010017#include <plat/common/platform.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000018
19static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010020
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010021/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +010022bool amu_supported(void)
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010023{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010024 uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010025
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010026 features &= ID_PFR0_AMU_MASK;
27 return ((features == 1U) || (features == 2U));
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000028}
29
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010030#if AMU_GROUP1_NR_COUNTERS
31/* Check if group 1 counters is implemented */
32bool amu_group1_supported(void)
33{
34 uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
35
36 return (features & AMCFGR_NCG_MASK) == 1U;
37}
38#endif
39
40/*
41 * Enable counters. This function is meant to be invoked
42 * by the context management library before exiting from EL3.
43 */
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +010044void amu_enable(bool el2_unused)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000045{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010046 if (!amu_supported()) {
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000047 return;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010048 }
49
50#if AMU_GROUP1_NR_COUNTERS
51 /* Check and set presence of group 1 counters */
52 if (!amu_group1_supported()) {
53 ERROR("AMU Counter Group 1 is not implemented\n");
54 panic();
55 }
56
57 /* Check number of group 1 counters */
58 uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
59 AMCGCR_CG1NC_MASK;
60 VERBOSE("%s%u. %s%u\n",
61 "Number of AMU Group 1 Counters ", cnt_num,
62 "Requested number ", AMU_GROUP1_NR_COUNTERS);
63
64 if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
65 ERROR("%s%u is less than %s%u\n",
66 "Number of AMU Group 1 Counters ", cnt_num,
67 "Requested number ", AMU_GROUP1_NR_COUNTERS);
68 panic();
69 }
70#endif
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010071
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000072 if (el2_unused) {
73 uint64_t v;
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000074 /*
75 * Non-secure access from EL0 or EL1 to the Activity Monitor
76 * registers do not trap to EL2.
77 */
78 v = read_hcptr();
79 v &= ~TAM_BIT;
80 write_hcptr(v);
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010081 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000082
83 /* Enable group 0 counters */
84 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000085
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010086#if AMU_GROUP1_NR_COUNTERS
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000087 /* Enable group 1 counters */
88 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010089#endif
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000090}
91
92/* Read the group 0 counter identified by the given `idx`. */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010093uint64_t amu_group0_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000094{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +010095 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +010096 assert(idx < AMU_GROUP0_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000097
98 return amu_group0_cnt_read_internal(idx);
99}
100
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100101/* Write the group 0 counter identified by the given `idx` with `val` */
102void amu_group0_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000103{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100104 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100105 assert(idx < AMU_GROUP0_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000106
107 amu_group0_cnt_write_internal(idx, val);
108 isb();
109}
110
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100111#if AMU_GROUP1_NR_COUNTERS
112/* Read the group 1 counter identified by the given `idx` */
113uint64_t amu_group1_cnt_read(unsigned int idx)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000114{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100115 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100116 assert(amu_group1_supported());
117 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000118
119 return amu_group1_cnt_read_internal(idx);
120}
121
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100122/* Write the group 1 counter identified by the given `idx` with `val` */
123void amu_group1_cnt_write(unsigned int idx, uint64_t val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000124{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100125 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100126 assert(amu_group1_supported());
127 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000128
129 amu_group1_cnt_write_internal(idx, val);
130 isb();
131}
132
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100133/*
134 * Program the event type register for the given `idx` with
135 * the event number `val`
136 */
137void amu_group1_set_evtype(unsigned int idx, unsigned int val)
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000138{
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100139 assert(amu_supported());
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100140 assert(amu_group1_supported());
141 assert(idx < AMU_GROUP1_NR_COUNTERS);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000142
143 amu_group1_set_evtype_internal(idx, val);
144 isb();
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000145}
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100146#endif /* AMU_GROUP1_NR_COUNTERS */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000147
148static void *amu_context_save(const void *arg)
149{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100150 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
151 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000152
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100153 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000154 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100155 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000156
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100157#if AMU_GROUP1_NR_COUNTERS
158 if (!amu_group1_supported()) {
159 return (void *)-1;
160 }
161#endif
162 /* Assert that group 0/1 counter configuration is what we expect */
163 assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000164
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100165#if AMU_GROUP1_NR_COUNTERS
166 assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
167#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000168 /*
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100169 * Disable group 0/1 counters to avoid other observers like SCP sampling
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000170 * counter values from the future via the memory mapped view.
171 */
172 write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100173
174#if AMU_GROUP1_NR_COUNTERS
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000175 write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100176#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000177 isb();
178
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100179 /* Save all group 0 counters */
180 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000181 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100182 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000183
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100184#if AMU_GROUP1_NR_COUNTERS
185 /* Save group 1 counters */
186 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
187 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
188 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
189 }
190 }
191#endif
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100192 return (void *)0;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000193}
194
195static void *amu_context_restore(const void *arg)
196{
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100197 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
198 unsigned int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000199
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100200 if (!amu_supported()) {
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000201 return (void *)-1;
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100202 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000203
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100204#if AMU_GROUP1_NR_COUNTERS
205 if (!amu_group1_supported()) {
206 return (void *)-1;
207 }
208#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000209 /* Counters were disabled in `amu_context_save()` */
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100210 assert(read_amcntenset0_el0() == 0U);
211
212#if AMU_GROUP1_NR_COUNTERS
213 assert(read_amcntenset1_el0() == 0U);
214#endif
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000215
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100216 /* Restore all group 0 counters */
217 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000218 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100219 }
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000220
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100221 /* Restore group 0 counter configuration */
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000222 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
223
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100224#if AMU_GROUP1_NR_COUNTERS
225 /* Restore group 1 counters */
226 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
227 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
228 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
229 }
230 }
231
232 /* Restore group 1 counter configuration */
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000233 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
Alexei Fedorov7e6306b2020-07-14 08:17:56 +0100234#endif
235
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100236 return (void *)0;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100237}
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000238
239SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
240SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);