blob: b60b8e0f05d04eb84d570c1a644b23e0a9e3f355 [file] [log] [blame]
Soby Mathew748be1d2016-05-05 14:10:46 +01001/*
Boyan Karatotev6468d4a2023-02-16 15:12:45 +00002 * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
Soby Mathew748be1d2016-05-05 14:10:46 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew748be1d2016-05-05 14:10:46 +01005 */
6
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00007#include <assert.h>
8#include <stdbool.h>
9#include <string.h>
10
11#include <platform_def.h>
12
Soby Mathew748be1d2016-05-05 14:10:46 +010013#include <arch.h>
Andre Przywara06ea44e2022-11-17 17:30:43 +000014#include <arch_features.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010015#include <arch_helpers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000016#include <common/bl_common.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010017#include <context.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000018#include <lib/el3_runtime/context_mgmt.h>
19#include <lib/extensions/amu.h>
Boyan Karatotev05504ba2023-02-15 13:21:50 +000020#include <lib/extensions/pmuv3.h>
Manish V Badarkhef356f7e2021-06-29 11:44:20 +010021#include <lib/extensions/sys_reg_trace.h>
Manish V Badarkhe51a97112021-07-08 09:33:18 +010022#include <lib/extensions/trf.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000023#include <lib/utils.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010024
25/*******************************************************************************
26 * Context management library initialisation routine. This library is used by
27 * runtime services to share pointers to 'cpu_context' structures for the secure
28 * and non-secure states. Management of the structures and their associated
29 * memory is not done by the context management library e.g. the PSCI service
30 * manages the cpu context used for entry from and exit to the non-secure state.
31 * The Secure payload manages the context(s) corresponding to the secure state.
32 * It also uses this library to get access to the non-secure
33 * state cpu context pointers.
34 ******************************************************************************/
35void cm_init(void)
36{
37 /*
38 * The context management library has only global data to initialize, but
39 * that will be done when the BSS is zeroed out
40 */
41}
42
43/*******************************************************************************
44 * The following function initializes the cpu_context 'ctx' for
45 * first use, and sets the initial entrypoint state as specified by the
46 * entry_point_info structure.
47 *
48 * The security state to initialize is determined by the SECURE attribute
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +010049 * of the entry_point_info.
Soby Mathew748be1d2016-05-05 14:10:46 +010050 *
51 * The EE and ST attributes are used to configure the endianness and secure
52 * timer availability for the new execution context.
53 *
54 * To prepare the register state for entry call cm_prepare_el3_exit() and
55 * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
Olivier Deprez7d0299f2021-05-25 12:06:03 +020056 * cm_el1_sysregs_context_restore().
Soby Mathew748be1d2016-05-05 14:10:46 +010057 ******************************************************************************/
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +010058void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
Soby Mathew748be1d2016-05-05 14:10:46 +010059{
60 unsigned int security_state;
61 uint32_t scr, sctlr;
62 regs_t *reg_ctx;
63
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +000064 assert(ctx != NULL);
Soby Mathew748be1d2016-05-05 14:10:46 +010065
66 security_state = GET_SECURITY_STATE(ep->h.attr);
67
68 /* Clear any residual register values from the context */
Douglas Raillarda8954fc2017-01-26 15:54:44 +000069 zeromem(ctx, sizeof(*ctx));
Soby Mathew748be1d2016-05-05 14:10:46 +010070
Soby Mathewb4a970a2016-08-31 12:34:33 +010071 reg_ctx = get_regs_ctx(ctx);
72
Soby Mathew748be1d2016-05-05 14:10:46 +010073 /*
74 * Base the context SCR on the current value, adjust for entry point
75 * specific requirements
76 */
77 scr = read_scr();
78 scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
79
80 if (security_state != SECURE)
81 scr |= SCR_NS_BIT;
82
Soby Mathew748be1d2016-05-05 14:10:46 +010083 if (security_state != SECURE) {
Soby Mathewa993c422016-09-29 14:15:57 +010084 /*
David Cunadofee86532017-04-13 22:38:29 +010085 * Set up SCTLR for the Non-secure context.
86 *
87 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
88 *
89 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
90 * required by PSCI specification)
91 *
92 * Set remaining SCTLR fields to their architecturally defined
93 * values. Some fields reset to an IMPLEMENTATION DEFINED value:
94 *
95 * SCTLR.TE: Set to zero so that exceptions to an Exception
96 * Level executing at PL1 are taken to A32 state.
97 *
98 * SCTLR.V: Set to zero to select the normal exception vectors
99 * with base address held in VBAR.
Soby Mathewa993c422016-09-29 14:15:57 +0100100 */
David Cunadofee86532017-04-13 22:38:29 +0100101 assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) ==
102 (EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT));
103
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000104 sctlr = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U;
David Cunadofee86532017-04-13 22:38:29 +0100105 sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT));
Soby Mathew748be1d2016-05-05 14:10:46 +0100106 write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
107 }
108
David Cunadofee86532017-04-13 22:38:29 +0100109 /*
110 * The target exception level is based on the spsr mode requested. If
111 * execution is requested to hyp mode, HVC is enabled via SCR.HCE.
112 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100113 if (GET_M32(ep->spsr) == MODE32_hyp)
114 scr |= SCR_HCE_BIT;
115
David Cunadofee86532017-04-13 22:38:29 +0100116 /*
117 * Store the initialised values for SCTLR and SCR in the cpu_context.
118 * The Hyp mode registers are not part of the saved context and are
119 * set-up in cm_prepare_el3_exit().
120 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100121 write_ctx_reg(reg_ctx, CTX_SCR, scr);
122 write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
123 write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
124
125 /*
126 * Store the r0-r3 value from the entrypoint into the context
127 * Use memcpy as we are in control of the layout of the structures
128 */
129 memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
130}
131
132/*******************************************************************************
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000133 * Enable architecture extensions on first entry to Non-secure world.
134 * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
135 * it is zero.
136 ******************************************************************************/
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100137static void enable_extensions_nonsecure(bool el2_unused)
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000138{
139#if IMAGE_BL32
Andre Przywara906776e2023-03-03 10:30:06 +0000140 if (is_feat_amu_supported()) {
141 amu_enable(el2_unused);
142 }
Manish V Badarkhef356f7e2021-06-29 11:44:20 +0100143
Andre Przywara44e33e02022-11-17 16:42:09 +0000144 if (is_feat_sys_reg_trace_supported()) {
Boyan Karatotev6468d4a2023-02-16 15:12:45 +0000145 sys_reg_trace_init_el3();
Andre Przywara44e33e02022-11-17 16:42:09 +0000146 }
Manish V Badarkhe51a97112021-07-08 09:33:18 +0100147
Andre Przywara06ea44e2022-11-17 17:30:43 +0000148 if (is_feat_trf_supported()) {
Boyan Karatotev6468d4a2023-02-16 15:12:45 +0000149 trf_init_el3();
Andre Przywara06ea44e2022-11-17 17:30:43 +0000150 }
Boyan Karatotev05504ba2023-02-15 13:21:50 +0000151
152 /*
153 * Also applies to PMU < v3. The PMU is only disabled for EL3 and Secure
154 * state execution. This does not affect lower NS ELs.
155 */
Boyan Karatotev6468d4a2023-02-16 15:12:45 +0000156 pmuv3_init_el3();
157#endif /* IMAGE_BL32 */
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000158}
159
160/*******************************************************************************
Soby Mathew748be1d2016-05-05 14:10:46 +0100161 * The following function initializes the cpu_context for a CPU specified by
162 * its `cpu_idx` for first use, and sets the initial entrypoint state as
163 * specified by the entry_point_info structure.
164 ******************************************************************************/
165void cm_init_context_by_index(unsigned int cpu_idx,
166 const entry_point_info_t *ep)
167{
168 cpu_context_t *ctx;
169 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +0100170 cm_setup_context(ctx, ep);
Soby Mathew748be1d2016-05-05 14:10:46 +0100171}
172
173/*******************************************************************************
174 * The following function initializes the cpu_context for the current CPU
175 * for first use, and sets the initial entrypoint state as specified by the
176 * entry_point_info structure.
177 ******************************************************************************/
178void cm_init_my_context(const entry_point_info_t *ep)
179{
180 cpu_context_t *ctx;
181 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +0100182 cm_setup_context(ctx, ep);
Soby Mathew748be1d2016-05-05 14:10:46 +0100183}
184
185/*******************************************************************************
186 * Prepare the CPU system registers for first entry into secure or normal world
187 *
188 * If execution is requested to hyp mode, HSCTLR is initialized
189 * If execution is requested to non-secure PL1, and the CPU supports
190 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
191 * registers.
192 ******************************************************************************/
193void cm_prepare_el3_exit(uint32_t security_state)
194{
David Cunadofee86532017-04-13 22:38:29 +0100195 uint32_t hsctlr, scr;
Soby Mathew748be1d2016-05-05 14:10:46 +0100196 cpu_context_t *ctx = cm_get_context(security_state);
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100197 bool el2_unused = false;
Soby Mathew748be1d2016-05-05 14:10:46 +0100198
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000199 assert(ctx != NULL);
Soby Mathew748be1d2016-05-05 14:10:46 +0100200
201 if (security_state == NON_SECURE) {
202 scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000203 if ((scr & SCR_HCE_BIT) != 0U) {
Soby Mathew748be1d2016-05-05 14:10:46 +0100204 /* Use SCTLR value to initialize HSCTLR */
David Cunadofee86532017-04-13 22:38:29 +0100205 hsctlr = read_ctx_reg(get_regs_ctx(ctx),
Soby Mathew748be1d2016-05-05 14:10:46 +0100206 CTX_NS_SCTLR);
David Cunadofee86532017-04-13 22:38:29 +0100207 hsctlr |= HSCTLR_RES1;
Soby Mathew748be1d2016-05-05 14:10:46 +0100208 /* Temporarily set the NS bit to access HSCTLR */
209 write_scr(read_scr() | SCR_NS_BIT);
210 /*
211 * Make sure the write to SCR is complete so that
212 * we can access HSCTLR
213 */
214 isb();
David Cunadofee86532017-04-13 22:38:29 +0100215 write_hsctlr(hsctlr);
Soby Mathew748be1d2016-05-05 14:10:46 +0100216 isb();
217
218 write_scr(read_scr() & ~SCR_NS_BIT);
219 isb();
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000220 } else if ((read_id_pfr1() &
221 (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) != 0U) {
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100222 el2_unused = true;
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000223
David Cunado5f55e282016-10-31 17:37:34 +0000224 /*
225 * Set the NS bit to access NS copies of certain banked
226 * registers
227 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100228 write_scr(read_scr() | SCR_NS_BIT);
229 isb();
230
David Cunadofee86532017-04-13 22:38:29 +0100231 /*
232 * Hyp / PL2 present but unused, need to disable safely.
233 * HSCTLR can be ignored in this case.
234 *
235 * Set HCR to its architectural reset value so that
236 * Non-secure operations do not trap to Hyp mode.
237 */
238 write_hcr(HCR_RESET_VAL);
Soby Mathew748be1d2016-05-05 14:10:46 +0100239
David Cunadofee86532017-04-13 22:38:29 +0100240 /*
241 * Set HCPTR to its architectural reset value so that
242 * Non-secure access from EL1 or EL0 to trace and to
243 * Advanced SIMD and floating point functionality does
244 * not trap to Hyp mode.
245 */
246 write_hcptr(HCPTR_RESET_VAL);
Soby Mathew748be1d2016-05-05 14:10:46 +0100247
David Cunadofee86532017-04-13 22:38:29 +0100248 /*
249 * Initialise CNTHCTL. All fields are architecturally
250 * UNKNOWN on reset and are set to zero except for
251 * field(s) listed below.
252 *
253 * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of
254 * Non-secure EL0 and EL1 accessed to the physical
255 * timer registers.
256 *
257 * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of
258 * Non-secure EL0 and EL1 accessed to the physical
259 * counter registers.
260 */
261 write_cnthctl(CNTHCTL_RESET_VAL |
262 PL1PCEN_BIT | PL1PCTEN_BIT);
Soby Mathew748be1d2016-05-05 14:10:46 +0100263
David Cunadofee86532017-04-13 22:38:29 +0100264 /*
265 * Initialise CNTVOFF to zero as it resets to an
266 * IMPLEMENTATION DEFINED value.
267 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100268 write64_cntvoff(0);
269
David Cunadofee86532017-04-13 22:38:29 +0100270 /*
271 * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR
272 * respectively.
273 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100274 write_vpidr(read_midr());
275 write_vmpidr(read_mpidr());
276
277 /*
David Cunadofee86532017-04-13 22:38:29 +0100278 * Initialise VTTBR, setting all fields rather than
279 * relying on the hw. Some fields are architecturally
280 * UNKNOWN at reset.
281 *
282 * VTTBR.VMID: Set to zero which is the architecturally
283 * defined reset value. Even though EL1&0 stage 2
284 * address translation is disabled, cache maintenance
285 * operations depend on the VMID.
286 *
287 * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address
288 * translation is disabled.
Soby Mathew748be1d2016-05-05 14:10:46 +0100289 */
David Cunadofee86532017-04-13 22:38:29 +0100290 write64_vttbr(VTTBR_RESET_VAL &
291 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
292 | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
David Cunado5f55e282016-10-31 17:37:34 +0000293
294 /*
David Cunadofee86532017-04-13 22:38:29 +0100295 * Initialise HDCR, setting all the fields rather than
296 * relying on hw.
297 *
298 * HDCR.HPMN: Set to value of PMCR.N which is the
299 * architecturally-defined reset value.
Alexei Fedorov9074dea2019-08-20 15:22:44 +0100300 *
301 * HDCR.HLP: Set to one so that event counter
302 * overflow, that is recorded in PMOVSCLR[0-30],
303 * occurs on the increment that changes
304 * PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU is
305 * implemented. This bit is RES0 in versions of the
306 * architecture earlier than ARMv8.5, setting it to 1
307 * doesn't have any effect on them.
308 * This bit is Reserved, UNK/SBZP in ARMv7.
309 *
310 * HDCR.HPME: Set to zero to disable EL2 Event
311 * counters.
David Cunado5f55e282016-10-31 17:37:34 +0000312 */
Alexei Fedorov9074dea2019-08-20 15:22:44 +0100313#if (ARM_ARCH_MAJOR > 7)
314 write_hdcr((HDCR_RESET_VAL | HDCR_HLP_BIT |
315 ((read_pmcr() & PMCR_N_BITS) >>
316 PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
317#else
318 write_hdcr((HDCR_RESET_VAL |
319 ((read_pmcr() & PMCR_N_BITS) >>
320 PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
321#endif
David Cunadoc14b08e2016-11-25 00:21:59 +0000322 /*
David Cunadofee86532017-04-13 22:38:29 +0100323 * Set HSTR to its architectural reset value so that
324 * access to system registers in the cproc=1111
325 * encoding space do not trap to Hyp mode.
326 */
327 write_hstr(HSTR_RESET_VAL);
328 /*
329 * Set CNTHP_CTL to its architectural reset value to
330 * disable the EL2 physical timer and prevent timer
331 * interrupts. Some fields are architecturally UNKNOWN
332 * on reset and are set to zero.
David Cunadoc14b08e2016-11-25 00:21:59 +0000333 */
David Cunadofee86532017-04-13 22:38:29 +0100334 write_cnthp_ctl(CNTHP_CTL_RESET_VAL);
Soby Mathew748be1d2016-05-05 14:10:46 +0100335 isb();
336
337 write_scr(read_scr() & ~SCR_NS_BIT);
338 isb();
339 }
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000340 enable_extensions_nonsecure(el2_unused);
Soby Mathew748be1d2016-05-05 14:10:46 +0100341 }
342}
Zelalem Awekef92c0cb2022-01-31 16:59:42 -0600343
344/*******************************************************************************
345 * This function is used to exit to Non-secure world. It simply calls the
346 * cm_prepare_el3_exit function for AArch32.
347 ******************************************************************************/
348void cm_prepare_el3_exit_ns(void)
349{
350 cm_prepare_el3_exit(NON_SECURE);
351}