blob: 68cc4b34c8523d898e7d4ca917dad7dec4b7dd03 [file] [log] [blame]
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01001/*
Dimitris Papastamos7c4a6e62018-01-15 14:52:57 +00002 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <amu.h>
Joel Hutton0dcdd8d2017-12-21 15:21:20 +00008#include <amu_private.h>
Dimitris Papastamosdda48b02017-10-17 14:03:14 +01009#include <arch.h>
10#include <arch_helpers.h>
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000011#include <platform.h>
12#include <pubsub_events.h>
13
14#define AMU_GROUP0_NR_COUNTERS 4
15
16struct amu_ctx {
17 uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000018 uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000019};
20
21static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010022
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000023int amu_supported(void)
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010024{
25 uint64_t features;
26
27 features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000028 return (features & ID_PFR0_AMU_MASK) == 1;
29}
30
31void amu_enable(int el2_unused)
32{
33 if (!amu_supported())
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000034 return;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010035
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000036 if (el2_unused) {
37 uint64_t v;
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000038 /*
39 * Non-secure access from EL0 or EL1 to the Activity Monitor
40 * registers do not trap to EL2.
41 */
42 v = read_hcptr();
43 v &= ~TAM_BIT;
44 write_hcptr(v);
Dimitris Papastamosdda48b02017-10-17 14:03:14 +010045 }
Dimitris Papastamos525c37a2017-11-13 09:49:45 +000046
47 /* Enable group 0 counters */
48 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +000049
50 /* Enable group 1 counters */
51 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
52}
53
54/* Read the group 0 counter identified by the given `idx`. */
55uint64_t amu_group0_cnt_read(int idx)
56{
57 assert(amu_supported());
58 assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
59
60 return amu_group0_cnt_read_internal(idx);
61}
62
63/* Write the group 0 counter identified by the given `idx` with `val`. */
64void amu_group0_cnt_write(int idx, uint64_t val)
65{
66 assert(amu_supported());
67 assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
68
69 amu_group0_cnt_write_internal(idx, val);
70 isb();
71}
72
73/* Read the group 1 counter identified by the given `idx`. */
74uint64_t amu_group1_cnt_read(int idx)
75{
76 assert(amu_supported());
77 assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
78
79 return amu_group1_cnt_read_internal(idx);
80}
81
82/* Write the group 1 counter identified by the given `idx` with `val`. */
83void amu_group1_cnt_write(int idx, uint64_t val)
84{
85 assert(amu_supported());
86 assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
87
88 amu_group1_cnt_write_internal(idx, val);
89 isb();
90}
91
92void amu_group1_set_evtype(int idx, unsigned int val)
93{
94 assert(amu_supported());
95 assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
96
97 amu_group1_set_evtype_internal(idx, val);
98 isb();
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +000099}
100
101static void *amu_context_save(const void *arg)
102{
103 struct amu_ctx *ctx;
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000104 int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000105
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000106 if (!amu_supported())
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000107 return (void *)-1;
108
109 ctx = &amu_ctxs[plat_my_core_pos()];
110
111 /* Assert that group 0 counter configuration is what we expect */
112 assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK);
113
114 /*
115 * Disable group 0 counters to avoid other observers like SCP sampling
116 * counter values from the future via the memory mapped view.
117 */
118 write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000119 write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000120 isb();
121
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000122 for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
123 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
124
125 for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
126 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000127
128 return 0;
129}
130
131static void *amu_context_restore(const void *arg)
132{
133 struct amu_ctx *ctx;
134 uint64_t features;
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000135 int i;
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000136
137 features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
138 if ((features & ID_PFR0_AMU_MASK) != 1)
139 return (void *)-1;
140
141 ctx = &amu_ctxs[plat_my_core_pos()];
142
143 /* Counters were disabled in `amu_context_save()` */
144 assert(read_amcntenset0() == 0);
145
146 /* Restore group 0 counters */
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000147 for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
148 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
149 for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
150 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000151
152 /* Enable group 0 counters */
153 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
154
Joel Hutton0dcdd8d2017-12-21 15:21:20 +0000155 /* Enable group 1 counters */
156 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000157 return 0;
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100158}
Dimitris Papastamoseaf3e6d2017-11-28 13:47:06 +0000159
160SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
161SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);