blob: 3760b8f136e3ac81e275d8027b1c7f6aed8abc78 [file] [log] [blame]
Achin Gupta7aea9082014-02-01 07:51:28 +00001/*
Maksims Svecovs1e25c5b2023-02-02 16:10:22 +00002 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
Varun Wadekarcc238bb2022-09-13 12:38:47 +01003 * Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
Achin Gupta7aea9082014-02-01 07:51:28 +00004 *
dp-armfa3cf0b2017-05-03 09:38:09 +01005 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7aea9082014-02-01 07:51:28 +00006 */
7
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008#include <assert.h>
9#include <stdbool.h>
10#include <string.h>
11
12#include <platform_def.h>
13
Achin Gupta27b895e2014-05-04 18:38:28 +010014#include <arch.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000015#include <arch_helpers.h>
Soby Mathew830f0ad2019-07-12 09:23:38 +010016#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000017#include <bl31/interrupt_mgmt.h>
18#include <common/bl_common.h>
Claus Pedersen785e66c2022-09-12 22:42:58 +000019#include <common/debug.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010020#include <context.h>
Zelalem Awekef92c0cb2022-01-31 16:59:42 -060021#include <drivers/arm/gicv3.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000022#include <lib/el3_runtime/context_mgmt.h>
23#include <lib/el3_runtime/pubsub_events.h>
24#include <lib/extensions/amu.h>
johpow0181865962022-01-28 17:06:20 -060025#include <lib/extensions/brbe.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000026#include <lib/extensions/mpam.h>
johpow019baade32021-07-08 14:14:00 -050027#include <lib/extensions/sme.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000028#include <lib/extensions/spe.h>
29#include <lib/extensions/sve.h>
Manish V Badarkhef356f7e2021-06-29 11:44:20 +010030#include <lib/extensions/sys_reg_trace.h>
Manish V Badarkhe20df29c2021-07-02 09:10:56 +010031#include <lib/extensions/trbe.h>
Manish V Badarkhe51a97112021-07-08 09:33:18 +010032#include <lib/extensions/trf.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000033#include <lib/utils.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000034
Jayanth Dodderi Chidanand4b5489c2022-03-28 15:28:55 +010035#if ENABLE_FEAT_TWED
36/* Make sure delay value fits within the range(0-15) */
37CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check);
38#endif /* ENABLE_FEAT_TWED */
Achin Gupta7aea9082014-02-01 07:51:28 +000039
Jayanth Dodderi Chidanand4b5489c2022-03-28 15:28:55 +010040static void manage_extensions_secure(cpu_context_t *ctx);
Zelalem Aweke20126002022-04-08 16:48:05 -050041
42static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
43{
44 u_register_t sctlr_elx, actlr_elx;
45
46 /*
47 * Initialise SCTLR_EL1 to the reset value corresponding to the target
48 * execution state setting all fields rather than relying on the hw.
49 * Some fields have architecturally UNKNOWN reset values and these are
50 * set to zero.
51 *
52 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
53 *
54 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
55 * required by PSCI specification)
56 */
57 sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
58 if (GET_RW(ep->spsr) == MODE_RW_64) {
59 sctlr_elx |= SCTLR_EL1_RES1;
60 } else {
61 /*
62 * If the target execution state is AArch32 then the following
63 * fields need to be set.
64 *
65 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
66 * instructions are not trapped to EL1.
67 *
68 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
69 * instructions are not trapped to EL1.
70 *
71 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
72 * CP15DMB, CP15DSB, and CP15ISB instructions.
73 */
74 sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
75 | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
76 }
77
78#if ERRATA_A75_764081
79 /*
80 * If workaround of errata 764081 for Cortex-A75 is used then set
81 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier.
82 */
83 sctlr_elx |= SCTLR_IESB_BIT;
84#endif
85 /* Store the initialised SCTLR_EL1 value in the cpu_context */
86 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
87
88 /*
89 * Base the context ACTLR_EL1 on the current value, as it is
90 * implementation defined. The context restore process will write
91 * the value from the context to the actual register and can cause
92 * problems for processor cores that don't expect certain bits to
93 * be zero.
94 */
95 actlr_elx = read_actlr_el1();
96 write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
97}
98
Zelalem Aweke42401112022-01-05 17:12:24 -060099/******************************************************************************
100 * This function performs initializations that are specific to SECURE state
101 * and updates the cpu context specified by 'ctx'.
102 *****************************************************************************/
103static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep)
Achin Gupta7aea9082014-02-01 07:51:28 +0000104{
Zelalem Aweke42401112022-01-05 17:12:24 -0600105 u_register_t scr_el3;
106 el3_state_t *state;
107
108 state = get_el3state_ctx(ctx);
109 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
110
111#if defined(IMAGE_BL31) && !defined(SPD_spmd)
Achin Gupta7aea9082014-02-01 07:51:28 +0000112 /*
Zelalem Aweke42401112022-01-05 17:12:24 -0600113 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
114 * indicated by the interrupt routing model for BL31.
115 */
116 scr_el3 |= get_scr_el3_from_routing_model(SECURE);
117#endif
118
119#if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS
120 /* Get Memory Tagging Extension support level */
121 unsigned int mte = get_armv8_5_mte_support();
122#endif
123 /*
124 * Allow access to Allocation Tags when CTX_INCLUDE_MTE_REGS
125 * is set, or when MTE is only implemented at EL0.
Achin Gupta7aea9082014-02-01 07:51:28 +0000126 */
Zelalem Aweke42401112022-01-05 17:12:24 -0600127#if CTX_INCLUDE_MTE_REGS
128 assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY));
129 scr_el3 |= SCR_ATA_BIT;
130#else
131 if (mte == MTE_IMPLEMENTED_EL0) {
132 scr_el3 |= SCR_ATA_BIT;
133 }
134#endif /* CTX_INCLUDE_MTE_REGS */
135
136 /* Enable S-EL2 if the next EL is EL2 and S-EL2 is present */
Andre Przywara6dd2d062023-02-22 16:53:50 +0000137 if ((GET_EL(ep->spsr) == MODE_EL2) && is_feat_sel2_supported()) {
Zelalem Aweke42401112022-01-05 17:12:24 -0600138 if (GET_RW(ep->spsr) != MODE_RW_64) {
139 ERROR("S-EL2 can not be used in AArch32\n.");
140 panic();
141 }
142
143 scr_el3 |= SCR_EEL2_BIT;
144 }
145
146 write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
147
Zelalem Aweke20126002022-04-08 16:48:05 -0500148 /*
149 * Initialize EL1 context registers unless SPMC is running
150 * at S-EL2.
151 */
152#if !SPMD_SPM_AT_SEL2
153 setup_el1_context(ctx, ep);
154#endif
155
Zelalem Aweke42401112022-01-05 17:12:24 -0600156 manage_extensions_secure(ctx);
Achin Gupta7aea9082014-02-01 07:51:28 +0000157}
158
Zelalem Aweke42401112022-01-05 17:12:24 -0600159#if ENABLE_RME
160/******************************************************************************
161 * This function performs initializations that are specific to REALM state
162 * and updates the cpu context specified by 'ctx'.
163 *****************************************************************************/
164static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep)
165{
166 u_register_t scr_el3;
167 el3_state_t *state;
168
169 state = get_el3state_ctx(ctx);
170 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
171
Maksims Svecovs1e25c5b2023-02-02 16:10:22 +0000172 scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT;
173
Andre Przywara902c9022022-11-17 17:30:43 +0000174 if (is_feat_csv2_2_supported()) {
175 /* Enable access to the SCXTNUM_ELx registers. */
176 scr_el3 |= SCR_EnSCXT_BIT;
177 }
Zelalem Aweke42401112022-01-05 17:12:24 -0600178
179 write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
180}
181#endif /* ENABLE_RME */
182
183/******************************************************************************
184 * This function performs initializations that are specific to NON-SECURE state
185 * and updates the cpu context specified by 'ctx'.
186 *****************************************************************************/
187static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep)
188{
189 u_register_t scr_el3;
190 el3_state_t *state;
191
192 state = get_el3state_ctx(ctx);
193 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
194
195 /* SCR_NS: Set the NS bit */
196 scr_el3 |= SCR_NS_BIT;
197
198#if !CTX_INCLUDE_PAUTH_REGS
199 /*
200 * If the pointer authentication registers aren't saved during world
201 * switches the value of the registers can be leaked from the Secure to
202 * the Non-secure world. To prevent this, rather than enabling pointer
203 * authentication everywhere, we only enable it in the Non-secure world.
204 *
205 * If the Secure world wants to use pointer authentication,
206 * CTX_INCLUDE_PAUTH_REGS must be set to 1.
207 */
208 scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
209#endif /* !CTX_INCLUDE_PAUTH_REGS */
210
211 /* Allow access to Allocation Tags when MTE is implemented. */
212 scr_el3 |= SCR_ATA_BIT;
213
Manish Pandey0e3379d2022-10-10 11:43:08 +0100214#if HANDLE_EA_EL3_FIRST_NS
215 /* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */
216 scr_el3 |= SCR_EA_BIT;
217#endif
218
Manish Pandey7c6fcb42022-09-27 14:30:34 +0100219#if RAS_TRAP_NS_ERR_REC_ACCESS
220 /*
221 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR
222 * and RAS ERX registers from EL1 and EL2(from any security state)
223 * are trapped to EL3.
224 * Set here to trap only for NS EL1/EL2
225 *
226 */
227 scr_el3 |= SCR_TERR_BIT;
228#endif
229
Andre Przywara902c9022022-11-17 17:30:43 +0000230 if (is_feat_csv2_2_supported()) {
231 /* Enable access to the SCXTNUM_ELx registers. */
232 scr_el3 |= SCR_EnSCXT_BIT;
233 }
Maksims Svecovs1e25c5b2023-02-02 16:10:22 +0000234
Zelalem Aweke42401112022-01-05 17:12:24 -0600235#ifdef IMAGE_BL31
236 /*
237 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
238 * indicated by the interrupt routing model for BL31.
239 */
240 scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE);
241#endif
242 write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
Zelalem Awekef92c0cb2022-01-31 16:59:42 -0600243
Zelalem Aweke20126002022-04-08 16:48:05 -0500244 /* Initialize EL1 context registers */
245 setup_el1_context(ctx, ep);
246
Zelalem Awekef92c0cb2022-01-31 16:59:42 -0600247 /* Initialize EL2 context registers */
248#if CTX_INCLUDE_EL2_REGS
249
250 /*
251 * Initialize SCTLR_EL2 context register using Endianness value
252 * taken from the entrypoint attribute.
253 */
254 u_register_t sctlr_el2 = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
255 sctlr_el2 |= SCTLR_EL2_RES1;
256 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_SCTLR_EL2,
257 sctlr_el2);
258
259 /*
Varun Wadekarcc238bb2022-09-13 12:38:47 +0100260 * Program the ICC_SRE_EL2 to make sure the correct bits are set
261 * when restoring NS context.
Zelalem Awekef92c0cb2022-01-31 16:59:42 -0600262 */
Varun Wadekarcc238bb2022-09-13 12:38:47 +0100263 u_register_t icc_sre_el2 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT |
264 ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT;
Zelalem Awekef92c0cb2022-01-31 16:59:42 -0600265 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_ICC_SRE_EL2,
266 icc_sre_el2);
Boyan Karatotevecd9f082022-10-26 15:10:39 +0100267
268 /*
269 * Initialize MDCR_EL2.HPMN to its hardware reset value so we don't
270 * throw anyone off who expects this to be sensible.
271 * TODO: A similar thing happens in cm_prepare_el3_exit. They should be
272 * unified with the proper PMU implementation
273 */
274 u_register_t mdcr_el2 = ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) &
275 PMCR_EL0_N_MASK);
276 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2, mdcr_el2);
Juan Pablo Conde72e0da12023-02-22 10:09:52 -0600277
278 if (is_feat_hcx_supported()) {
279 /*
280 * Initialize register HCRX_EL2 with its init value.
281 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a
282 * chance that this can lead to unexpected behavior in lower
283 * ELs that have not been updated since the introduction of
284 * this feature if not properly initialized, especially when
285 * it comes to those bits that enable/disable traps.
286 */
287 write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HCRX_EL2,
288 HCRX_EL2_INIT_VAL);
289 }
Zelalem Awekef92c0cb2022-01-31 16:59:42 -0600290#endif /* CTX_INCLUDE_EL2_REGS */
Zelalem Aweke42401112022-01-05 17:12:24 -0600291}
292
Achin Gupta7aea9082014-02-01 07:51:28 +0000293/*******************************************************************************
Zelalem Aweke42401112022-01-05 17:12:24 -0600294 * The following function performs initialization of the cpu_context 'ctx'
295 * for first use that is common to all security states, and sets the
296 * initial entrypoint state as specified by the entry_point_info structure.
Andrew Thoelke4e126072014-06-04 21:10:52 +0100297 *
Paul Beesley1fbc97b2019-01-11 18:26:51 +0000298 * The EE and ST attributes are used to configure the endianness and secure
Soby Mathewb0082d22015-04-09 13:40:55 +0100299 * timer availability for the new execution context.
Andrew Thoelke4e126072014-06-04 21:10:52 +0100300 ******************************************************************************/
Zelalem Aweke42401112022-01-05 17:12:24 -0600301static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
Andrew Thoelke4e126072014-06-04 21:10:52 +0100302{
Louis Mayencourt1c819c32020-01-24 13:30:28 +0000303 u_register_t scr_el3;
Andrew Thoelke4e126072014-06-04 21:10:52 +0100304 el3_state_t *state;
305 gp_regs_t *gp_regs;
Andrew Thoelke4e126072014-06-04 21:10:52 +0100306
Andrew Thoelke4e126072014-06-04 21:10:52 +0100307 /* Clear any residual register values from the context */
Douglas Raillarda8954fc2017-01-26 15:54:44 +0000308 zeromem(ctx, sizeof(*ctx));
Andrew Thoelke4e126072014-06-04 21:10:52 +0100309
310 /*
David Cunadofee86532017-04-13 22:38:29 +0100311 * SCR_EL3 was initialised during reset sequence in macro
312 * el3_arch_init_common. This code modifies the SCR_EL3 fields that
313 * affect the next EL.
314 *
315 * The following fields are initially set to zero and then updated to
316 * the required value depending on the state of the SPSR_EL3 and the
317 * Security state and entrypoint attributes of the next EL.
Andrew Thoelke4e126072014-06-04 21:10:52 +0100318 */
Louis Mayencourt1c819c32020-01-24 13:30:28 +0000319 scr_el3 = read_scr();
Manish Pandey0e3379d2022-10-10 11:43:08 +0100320 scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_EA_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
Zelalem Aweke42401112022-01-05 17:12:24 -0600321 SCR_ST_BIT | SCR_HCE_BIT | SCR_NSE_BIT);
Zelalem Awekeb6301e62021-07-09 17:54:30 -0500322
David Cunadofee86532017-04-13 22:38:29 +0100323 /*
324 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
325 * Exception level as specified by SPSR.
326 */
Zelalem Awekeb6301e62021-07-09 17:54:30 -0500327 if (GET_RW(ep->spsr) == MODE_RW_64) {
Andrew Thoelke4e126072014-06-04 21:10:52 +0100328 scr_el3 |= SCR_RW_BIT;
Zelalem Awekeb6301e62021-07-09 17:54:30 -0500329 }
Zelalem Aweke42401112022-01-05 17:12:24 -0600330
David Cunadofee86532017-04-13 22:38:29 +0100331 /*
332 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
Zelalem Aweke20126002022-04-08 16:48:05 -0500333 * Secure timer registers to EL3, from AArch64 state only, if specified
334 * by the entrypoint attributes. If SEL2 is present and enabled, the ST
335 * bit always behaves as 1 (i.e. secure physical timer register access
336 * is not trapped)
David Cunadofee86532017-04-13 22:38:29 +0100337 */
Zelalem Awekeb6301e62021-07-09 17:54:30 -0500338 if (EP_GET_ST(ep->h.attr) != 0U) {
Andrew Thoelke4e126072014-06-04 21:10:52 +0100339 scr_el3 |= SCR_ST_BIT;
Zelalem Awekeb6301e62021-07-09 17:54:30 -0500340 }
Andrew Thoelke4e126072014-06-04 21:10:52 +0100341
johpow01f91e59f2021-08-04 19:38:18 -0500342 /*
343 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting
344 * SCR_EL3.HXEn.
345 */
Andre Przywara1d8795e2022-11-15 11:45:19 +0000346 if (is_feat_hcx_supported()) {
347 scr_el3 |= SCR_HXEn_BIT;
348 }
johpow01f91e59f2021-08-04 19:38:18 -0500349
Juan Pablo Conde42305f22022-07-12 16:40:29 -0400350 /*
351 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS
352 * registers are trapped to EL3.
353 */
354#if ENABLE_FEAT_RNG_TRAP
355 scr_el3 |= SCR_TRNDR_BIT;
356#endif
357
Jeenu Viswambharanf00da742017-12-08 12:13:51 +0000358#if FAULT_INJECTION_SUPPORT
359 /* Enable fault injection from lower ELs */
360 scr_el3 |= SCR_FIEN_BIT;
361#endif
362
Antonio Nino Diaz594811b2019-01-31 11:58:00 +0000363 /*
Mark Brownc37eee72023-03-14 20:13:03 +0000364 * SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present.
365 */
366 if (is_feat_tcr2_supported() && (GET_RW(ep->spsr) == MODE_RW_64)) {
367 scr_el3 |= SCR_TCR2EN_BIT;
368 }
369
370 /*
Mark Brown293a6612023-03-14 20:48:43 +0000371 * SCR_EL3.PIEN: Enable permission indirection and overlay
372 * registers for AArch64 if present.
373 */
374 if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) {
375 scr_el3 |= SCR_PIEN_BIT;
376 }
377
378 /*
Mark Brown326f2952023-03-14 21:33:04 +0000379 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present.
380 */
381 if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) {
382 scr_el3 |= SCR_GCSEn_BIT;
383 }
384
385 /*
Zelalem Aweke42401112022-01-05 17:12:24 -0600386 * CPTR_EL3 was initialized out of reset, copy that value to the
387 * context register.
Antonio Nino Diaz594811b2019-01-31 11:58:00 +0000388 */
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100389 write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3());
Max Shvetsovc4502772021-03-22 11:59:37 +0000390
Andrew Thoelke4e126072014-06-04 21:10:52 +0100391 /*
David Cunadofee86532017-04-13 22:38:29 +0100392 * SCR_EL3.HCE: Enable HVC instructions if next execution state is
393 * AArch64 and next EL is EL2, or if next execution state is AArch32 and
394 * next mode is Hyp.
Jimmy Brissonecc3c672020-04-16 10:47:56 -0500395 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the
396 * same conditions as HVC instructions and when the processor supports
397 * ARMv8.6-FGT.
Jimmy Brisson83573892020-04-16 10:48:02 -0500398 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV)
399 * CNTPOFF_EL2 register under the same conditions as HVC instructions
400 * and when the processor supports ECV.
David Cunadofee86532017-04-13 22:38:29 +0100401 */
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000402 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2))
403 || ((GET_RW(ep->spsr) != MODE_RW_64)
404 && (GET_M32(ep->spsr) == MODE32_hyp))) {
David Cunadofee86532017-04-13 22:38:29 +0100405 scr_el3 |= SCR_HCE_BIT;
Jimmy Brissonecc3c672020-04-16 10:47:56 -0500406
Andre Przywarae8920f62022-11-10 14:28:01 +0000407 if (is_feat_fgt_supported()) {
Jimmy Brissonecc3c672020-04-16 10:47:56 -0500408 scr_el3 |= SCR_FGTEN_BIT;
409 }
Jimmy Brisson83573892020-04-16 10:48:02 -0500410
Andre Przywarac3464182022-11-17 17:30:43 +0000411 if (is_feat_ecv_supported()) {
Jimmy Brisson83573892020-04-16 10:48:02 -0500412 scr_el3 |= SCR_ECVEN_BIT;
413 }
David Cunadofee86532017-04-13 22:38:29 +0100414 }
415
johpow013e24c162020-04-22 14:05:13 -0500416 /* Enable WFE trap delay in SCR_EL3 if supported and configured */
Andre Przywara0cf77402023-01-27 12:25:49 +0000417 if (is_feat_twed_supported()) {
418 /* Set delay in SCR_EL3 */
419 scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT);
420 scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK)
421 << SCR_TWEDEL_SHIFT);
johpow013e24c162020-04-22 14:05:13 -0500422
Andre Przywara0cf77402023-01-27 12:25:49 +0000423 /* Enable WFE delay */
424 scr_el3 |= SCR_TWEDEn_BIT;
425 }
johpow013e24c162020-04-22 14:05:13 -0500426
David Cunadofee86532017-04-13 22:38:29 +0100427 /*
Alexei Fedorov503bbf32019-08-13 15:17:53 +0100428 * Populate EL3 state so that we've the right context
429 * before doing ERET
430 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100431 state = get_el3state_ctx(ctx);
432 write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
433 write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
434 write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
435
436 /*
437 * Store the X0-X7 value from the entrypoint into the context
438 * Use memcpy as we are in control of the layout of the structures
439 */
440 gp_regs = get_gpregs_ctx(ctx);
441 memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
442}
443
444/*******************************************************************************
Zelalem Aweke42401112022-01-05 17:12:24 -0600445 * Context management library initialization routine. This library is used by
446 * runtime services to share pointers to 'cpu_context' structures for secure
447 * non-secure and realm states. Management of the structures and their associated
448 * memory is not done by the context management library e.g. the PSCI service
449 * manages the cpu context used for entry from and exit to the non-secure state.
450 * The Secure payload dispatcher service manages the context(s) corresponding to
451 * the secure state. It also uses this library to get access to the non-secure
452 * state cpu context pointers.
453 * Lastly, this library provides the API to make SP_EL3 point to the cpu context
454 * which will be used for programming an entry into a lower EL. The same context
455 * will be used to save state upon exception entry from that EL.
456 ******************************************************************************/
457void __init cm_init(void)
458{
459 /*
Elyes Haouas2be03c02023-02-13 09:14:48 +0100460 * The context management library has only global data to initialize, but
Zelalem Aweke42401112022-01-05 17:12:24 -0600461 * that will be done when the BSS is zeroed out.
462 */
463}
464
465/*******************************************************************************
466 * This is the high-level function used to initialize the cpu_context 'ctx' for
467 * first use. It performs initializations that are common to all security states
468 * and initializations specific to the security state specified in 'ep'
469 ******************************************************************************/
470void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
471{
472 unsigned int security_state;
473
474 assert(ctx != NULL);
475
476 /*
477 * Perform initializations that are common
478 * to all security states
479 */
480 setup_context_common(ctx, ep);
481
482 security_state = GET_SECURITY_STATE(ep->h.attr);
483
484 /* Perform security state specific initializations */
485 switch (security_state) {
486 case SECURE:
487 setup_secure_context(ctx, ep);
488 break;
489#if ENABLE_RME
490 case REALM:
491 setup_realm_context(ctx, ep);
492 break;
493#endif
494 case NON_SECURE:
495 setup_ns_context(ctx, ep);
496 break;
497 default:
498 ERROR("Invalid security state\n");
499 panic();
500 break;
501 }
502}
503
504/*******************************************************************************
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000505 * Enable architecture extensions on first entry to Non-secure world.
506 * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
507 * it is zero.
508 ******************************************************************************/
johpow019baade32021-07-08 14:14:00 -0500509static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000510{
511#if IMAGE_BL31
Andre Przywaraf3e8cfc2022-11-17 16:42:09 +0000512 if (is_feat_spe_supported()) {
513 spe_enable(el2_unused);
514 }
Dimitris Papastamose08005a2017-10-12 13:02:29 +0100515
Andre Przywara906776e2023-03-03 10:30:06 +0000516 if (is_feat_amu_supported()) {
517 amu_enable(el2_unused, ctx);
518 }
David Cunadoce88eee2017-10-20 11:30:57 +0100519
Boyan Karatotev7f5dcc72023-03-08 16:29:26 +0000520 /* Enable SVE and FPU/SIMD */
521 if (is_feat_sve_supported()) {
522 sve_enable(ctx);
523 }
524
Jayanth Dodderi Chidanand605419a2023-03-06 23:56:14 +0000525 if (is_feat_sme_supported()) {
526 sme_enable(ctx);
527 }
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100528
Andre Przywara84b86532022-11-17 16:42:09 +0000529 if (is_feat_mpam_supported()) {
530 mpam_enable(el2_unused);
531 }
Manish V Badarkhe20df29c2021-07-02 09:10:56 +0100532
Andre Przywara191eff62022-11-17 16:42:09 +0000533 if (is_feat_trbe_supported()) {
534 trbe_enable();
535 }
Manish V Badarkhe20df29c2021-07-02 09:10:56 +0100536
Andre Przywarac97c5512022-11-17 16:42:09 +0000537 if (is_feat_brbe_supported()) {
538 brbe_enable();
539 }
johpow0181865962022-01-28 17:06:20 -0600540
Andre Przywara44e33e02022-11-17 16:42:09 +0000541 if (is_feat_sys_reg_trace_supported()) {
542 sys_reg_trace_enable(ctx);
543 }
Manish V Badarkhef356f7e2021-06-29 11:44:20 +0100544
Andre Przywara06ea44e2022-11-17 17:30:43 +0000545 if (is_feat_trf_supported()) {
546 trf_enable();
547 }
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000548#endif
549}
550
551/*******************************************************************************
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100552 * Enable architecture extensions on first entry to Secure world.
553 ******************************************************************************/
johpow019baade32021-07-08 14:14:00 -0500554static void manage_extensions_secure(cpu_context_t *ctx)
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100555{
556#if IMAGE_BL31
Boyan Karatotev7f5dcc72023-03-08 16:29:26 +0000557 if (is_feat_sve_supported()) {
Jayanth Dodderi Chidanandd62c6812023-03-07 10:43:19 +0000558 if (ENABLE_SVE_FOR_SWD) {
559 /*
560 * Enable SVE and FPU in secure context, secure manager must
561 * ensure that the SVE and FPU register contexts are properly
562 * managed.
563 */
564 sve_enable(ctx);
565 } else {
566 /*
567 * Disable SVE and FPU in secure context so non-secure world
568 * can safely use them.
569 */
570 sve_disable(ctx);
571 }
572 }
573
Boyan Karatotev7f5dcc72023-03-08 16:29:26 +0000574 if (is_feat_sme_supported()) {
575 if (ENABLE_SME_FOR_SWD) {
576 /*
577 * Enable SME, SVE, FPU/SIMD in secure context, secure manager
578 * must ensure SME, SVE, and FPU/SIMD context properly managed.
579 */
580 sme_enable(ctx);
581 } else {
582 /*
583 * Disable SME, SVE, FPU/SIMD in secure context so non-secure
584 * world can safely use the associated registers.
585 */
586 sme_disable(ctx);
587 }
588 }
johpow019baade32021-07-08 14:14:00 -0500589#endif /* IMAGE_BL31 */
Arunachalam Ganapathycac7d162021-07-08 09:35:57 +0100590}
591
592/*******************************************************************************
Soby Mathewb0082d22015-04-09 13:40:55 +0100593 * The following function initializes the cpu_context for a CPU specified by
594 * its `cpu_idx` for first use, and sets the initial entrypoint state as
595 * specified by the entry_point_info structure.
596 ******************************************************************************/
597void cm_init_context_by_index(unsigned int cpu_idx,
598 const entry_point_info_t *ep)
599{
600 cpu_context_t *ctx;
601 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +0100602 cm_setup_context(ctx, ep);
Soby Mathewb0082d22015-04-09 13:40:55 +0100603}
604
605/*******************************************************************************
606 * The following function initializes the cpu_context for the current CPU
607 * for first use, and sets the initial entrypoint state as specified by the
608 * entry_point_info structure.
609 ******************************************************************************/
610void cm_init_my_context(const entry_point_info_t *ep)
611{
612 cpu_context_t *ctx;
613 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +0100614 cm_setup_context(ctx, ep);
Soby Mathewb0082d22015-04-09 13:40:55 +0100615}
616
617/*******************************************************************************
Zelalem Awekeb6301e62021-07-09 17:54:30 -0500618 * Prepare the CPU system registers for first entry into realm, secure, or
619 * normal world.
Andrew Thoelke4e126072014-06-04 21:10:52 +0100620 *
621 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
622 * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
623 * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
624 * For all entries, the EL1 registers are initialized from the cpu_context
625 ******************************************************************************/
626void cm_prepare_el3_exit(uint32_t security_state)
627{
Louis Mayencourt1c819c32020-01-24 13:30:28 +0000628 u_register_t sctlr_elx, scr_el3, mdcr_el2;
Andrew Thoelke4e126072014-06-04 21:10:52 +0100629 cpu_context_t *ctx = cm_get_context(security_state);
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100630 bool el2_unused = false;
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000631 uint64_t hcr_el2 = 0U;
Andrew Thoelke4e126072014-06-04 21:10:52 +0100632
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000633 assert(ctx != NULL);
Andrew Thoelke4e126072014-06-04 21:10:52 +0100634
635 if (security_state == NON_SECURE) {
Juan Pablo Conde72e0da12023-02-22 10:09:52 -0600636 uint64_t el2_implemented = el_implemented(2);
637
Louis Mayencourt1c819c32020-01-24 13:30:28 +0000638 scr_el3 = read_ctx_reg(get_el3state_ctx(ctx),
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000639 CTX_SCR_EL3);
Juan Pablo Conde72e0da12023-02-22 10:09:52 -0600640
641 if (((scr_el3 & SCR_HCE_BIT) != 0U)
642 || (el2_implemented != EL_IMPL_NONE)) {
643 /*
644 * If context is not being used for EL2, initialize
645 * HCRX_EL2 with its init value here.
646 */
647 if (is_feat_hcx_supported()) {
648 write_hcrx_el2(HCRX_EL2_INIT_VAL);
649 }
650 }
651
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000652 if ((scr_el3 & SCR_HCE_BIT) != 0U) {
Andrew Thoelke4e126072014-06-04 21:10:52 +0100653 /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
Max Shvetsovc9e2c922020-02-17 16:15:47 +0000654 sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx),
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000655 CTX_SCTLR_EL1);
Ken Kuang00eac152017-08-23 16:03:29 +0800656 sctlr_elx &= SCTLR_EE_BIT;
Andrew Thoelke4e126072014-06-04 21:10:52 +0100657 sctlr_elx |= SCTLR_EL2_RES1;
Louis Mayencourt78a0aed2019-02-20 12:11:41 +0000658#if ERRATA_A75_764081
659 /*
660 * If workaround of errata 764081 for Cortex-A75 is used
661 * then set SCTLR_EL2.IESB to enable Implicit Error
662 * Synchronization Barrier.
663 */
664 sctlr_elx |= SCTLR_IESB_BIT;
665#endif
Andrew Thoelke4e126072014-06-04 21:10:52 +0100666 write_sctlr_el2(sctlr_elx);
Juan Pablo Conde72e0da12023-02-22 10:09:52 -0600667 } else if (el2_implemented != EL_IMPL_NONE) {
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100668 el2_unused = true;
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000669
David Cunadofee86532017-04-13 22:38:29 +0100670 /*
671 * EL2 present but unused, need to disable safely.
672 * SCTLR_EL2 can be ignored in this case.
673 *
Jeenu Viswambharancbad6612018-08-15 14:29:29 +0100674 * Set EL2 register width appropriately: Set HCR_EL2
675 * field to match SCR_EL3.RW.
David Cunadofee86532017-04-13 22:38:29 +0100676 */
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000677 if ((scr_el3 & SCR_RW_BIT) != 0U)
Jeenu Viswambharancbad6612018-08-15 14:29:29 +0100678 hcr_el2 |= HCR_RW_BIT;
679
680 /*
681 * For Armv8.3 pointer authentication feature, disable
682 * traps to EL2 when accessing key registers or using
683 * pointer authentication instructions from lower ELs.
684 */
685 hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT);
686
687 write_hcr_el2(hcr_el2);
Andrew Thoelke4e126072014-06-04 21:10:52 +0100688
David Cunadofee86532017-04-13 22:38:29 +0100689 /*
690 * Initialise CPTR_EL2 setting all fields rather than
691 * relying on the hw. All fields have architecturally
692 * UNKNOWN reset values.
693 *
694 * CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1
695 * accesses to the CPACR_EL1 or CPACR from both
696 * Execution states do not trap to EL2.
697 *
698 * CPTR_EL2.TTA: Set to zero so that Non-secure System
699 * register accesses to the trace registers from both
700 * Execution states do not trap to EL2.
Manish V Badarkhef356f7e2021-06-29 11:44:20 +0100701 * If PE trace unit System registers are not implemented
702 * then this bit is reserved, and must be set to zero.
David Cunadofee86532017-04-13 22:38:29 +0100703 *
704 * CPTR_EL2.TFP: Set to zero so that Non-secure accesses
705 * to SIMD and floating-point functionality from both
706 * Execution states do not trap to EL2.
707 */
708 write_cptr_el2(CPTR_EL2_RESET_VAL &
709 ~(CPTR_EL2_TCPAC_BIT | CPTR_EL2_TTA_BIT
710 | CPTR_EL2_TFP_BIT));
Andrew Thoelke4e126072014-06-04 21:10:52 +0100711
David Cunadofee86532017-04-13 22:38:29 +0100712 /*
Paul Beesley1fbc97b2019-01-11 18:26:51 +0000713 * Initialise CNTHCTL_EL2. All fields are
David Cunadofee86532017-04-13 22:38:29 +0100714 * architecturally UNKNOWN on reset and are set to zero
715 * except for field(s) listed below.
716 *
Zelalem Awekeb6301e62021-07-09 17:54:30 -0500717 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to
David Cunadofee86532017-04-13 22:38:29 +0100718 * Hyp mode of Non-secure EL0 and EL1 accesses to the
719 * physical timer registers.
720 *
721 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to
722 * Hyp mode of Non-secure EL0 and EL1 accesses to the
723 * physical counter registers.
724 */
725 write_cnthctl_el2(CNTHCTL_RESET_VAL |
726 EL1PCEN_BIT | EL1PCTEN_BIT);
Andrew Thoelke4e126072014-06-04 21:10:52 +0100727
David Cunadofee86532017-04-13 22:38:29 +0100728 /*
729 * Initialise CNTVOFF_EL2 to zero as it resets to an
730 * architecturally UNKNOWN value.
731 */
Soby Mathewfeddfcf2014-08-29 14:41:58 +0100732 write_cntvoff_el2(0);
733
David Cunadofee86532017-04-13 22:38:29 +0100734 /*
735 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and
736 * MPIDR_EL1 respectively.
737 */
Andrew Thoelke4e126072014-06-04 21:10:52 +0100738 write_vpidr_el2(read_midr_el1());
739 write_vmpidr_el2(read_mpidr_el1());
Sandrine Bailleux8b0eafe2015-11-25 17:00:44 +0000740
741 /*
David Cunadofee86532017-04-13 22:38:29 +0100742 * Initialise VTTBR_EL2. All fields are architecturally
743 * UNKNOWN on reset.
744 *
745 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage
746 * 2 address translation is disabled, cache maintenance
747 * operations depend on the VMID.
748 *
749 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address
750 * translation is disabled.
Sandrine Bailleux8b0eafe2015-11-25 17:00:44 +0000751 */
David Cunadofee86532017-04-13 22:38:29 +0100752 write_vttbr_el2(VTTBR_RESET_VAL &
753 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
754 | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
755
David Cunado5f55e282016-10-31 17:37:34 +0000756 /*
David Cunadofee86532017-04-13 22:38:29 +0100757 * Initialise MDCR_EL2, setting all fields rather than
758 * relying on hw. Some fields are architecturally
759 * UNKNOWN on reset.
760 *
Alexei Fedorov503bbf32019-08-13 15:17:53 +0100761 * MDCR_EL2.HLP: Set to one so that event counter
762 * overflow, that is recorded in PMOVSCLR_EL0[0-30],
763 * occurs on the increment that changes
764 * PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is
765 * implemented. This bit is RES0 in versions of the
766 * architecture earlier than ARMv8.5, setting it to 1
767 * doesn't have any effect on them.
768 *
769 * MDCR_EL2.TTRF: Set to zero so that access to Trace
770 * Filter Control register TRFCR_EL1 at EL1 is not
771 * trapped to EL2. This bit is RES0 in versions of
772 * the architecture earlier than ARMv8.4.
773 *
774 * MDCR_EL2.HPMD: Set to one so that event counting is
775 * prohibited at EL2. This bit is RES0 in versions of
776 * the architecture earlier than ARMv8.1, setting it
777 * to 1 doesn't have any effect on them.
778 *
779 * MDCR_EL2.TPMS: Set to zero so that accesses to
780 * Statistical Profiling control registers from EL1
781 * do not trap to EL2. This bit is RES0 when SPE is
782 * not implemented.
783 *
David Cunadofee86532017-04-13 22:38:29 +0100784 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
785 * EL1 System register accesses to the Debug ROM
786 * registers are not trapped to EL2.
787 *
788 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1
789 * System register accesses to the powerdown debug
790 * registers are not trapped to EL2.
791 *
792 * MDCR_EL2.TDA: Set to zero so that System register
793 * accesses to the debug registers do not trap to EL2.
794 *
795 * MDCR_EL2.TDE: Set to zero so that debug exceptions
796 * are not routed to EL2.
797 *
798 * MDCR_EL2.HPME: Set to zero to disable EL2 Performance
799 * Monitors.
800 *
801 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
802 * EL1 accesses to all Performance Monitors registers
803 * are not trapped to EL2.
804 *
805 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
806 * and EL1 accesses to the PMCR_EL0 or PMCR are not
807 * trapped to EL2.
808 *
809 * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
810 * architecturally-defined reset value.
Manish V Badarkhee1cccb42021-06-23 20:02:39 +0100811 *
812 * MDCR_EL2.E2TB: Set to zero so that the trace Buffer
813 * owning exception level is NS-EL1 and, tracing is
814 * prohibited at NS-EL2. These bits are RES0 when
815 * FEAT_TRBE is not implemented.
David Cunado5f55e282016-10-31 17:37:34 +0000816 */
Alexei Fedorov503bbf32019-08-13 15:17:53 +0100817 mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP |
818 MDCR_EL2_HPMD) |
819 ((read_pmcr_el0() & PMCR_EL0_N_BITS)
820 >> PMCR_EL0_N_SHIFT)) &
821 ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS |
822 MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
823 MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT |
824 MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT |
Manish V Badarkhee1cccb42021-06-23 20:02:39 +0100825 MDCR_EL2_TPMCR_BIT |
826 MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1));
dp-armee3457b2017-05-23 09:32:49 +0100827
dp-armee3457b2017-05-23 09:32:49 +0100828 write_mdcr_el2(mdcr_el2);
829
David Cunadoc14b08e2016-11-25 00:21:59 +0000830 /*
David Cunadofee86532017-04-13 22:38:29 +0100831 * Initialise HSTR_EL2. All fields are architecturally
832 * UNKNOWN on reset.
833 *
834 * HSTR_EL2.T<n>: Set all these fields to zero so that
835 * Non-secure EL0 or EL1 accesses to System registers
836 * do not trap to EL2.
David Cunadoc14b08e2016-11-25 00:21:59 +0000837 */
David Cunadofee86532017-04-13 22:38:29 +0100838 write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK));
David Cunadoc14b08e2016-11-25 00:21:59 +0000839 /*
David Cunadofee86532017-04-13 22:38:29 +0100840 * Initialise CNTHP_CTL_EL2. All fields are
841 * architecturally UNKNOWN on reset.
842 *
843 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2
844 * physical timer and prevent timer interrupts.
David Cunadoc14b08e2016-11-25 00:21:59 +0000845 */
David Cunadofee86532017-04-13 22:38:29 +0100846 write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
847 ~(CNTHP_CTL_ENABLE_BIT));
Andrew Thoelke4e126072014-06-04 21:10:52 +0100848 }
johpow019baade32021-07-08 14:14:00 -0500849 manage_extensions_nonsecure(el2_unused, ctx);
Andrew Thoelke4e126072014-06-04 21:10:52 +0100850 }
851
Dimitris Papastamosa7921b92017-10-13 15:27:58 +0100852 cm_el1_sysregs_context_restore(security_state);
853 cm_set_next_eret_context(security_state);
Andrew Thoelke4e126072014-06-04 21:10:52 +0100854}
855
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000856#if CTX_INCLUDE_EL2_REGS
Andre Przywara5d6d2ab2022-11-10 14:40:37 +0000857
858static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx)
859{
Andre Przywara8258f142023-02-15 15:56:15 +0000860 write_ctx_reg(ctx, CTX_HDFGRTR_EL2, read_hdfgrtr_el2());
861 if (is_feat_amu_supported()) {
862 write_ctx_reg(ctx, CTX_HAFGRTR_EL2, read_hafgrtr_el2());
Andre Przywara5d6d2ab2022-11-10 14:40:37 +0000863 }
Andre Przywara8258f142023-02-15 15:56:15 +0000864 write_ctx_reg(ctx, CTX_HDFGWTR_EL2, read_hdfgwtr_el2());
865 write_ctx_reg(ctx, CTX_HFGITR_EL2, read_hfgitr_el2());
866 write_ctx_reg(ctx, CTX_HFGRTR_EL2, read_hfgrtr_el2());
867 write_ctx_reg(ctx, CTX_HFGWTR_EL2, read_hfgwtr_el2());
Andre Przywara5d6d2ab2022-11-10 14:40:37 +0000868}
869
870static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx)
871{
Andre Przywara8258f142023-02-15 15:56:15 +0000872 write_hdfgrtr_el2(read_ctx_reg(ctx, CTX_HDFGRTR_EL2));
873 if (is_feat_amu_supported()) {
874 write_hafgrtr_el2(read_ctx_reg(ctx, CTX_HAFGRTR_EL2));
Andre Przywara5d6d2ab2022-11-10 14:40:37 +0000875 }
Andre Przywara8258f142023-02-15 15:56:15 +0000876 write_hdfgwtr_el2(read_ctx_reg(ctx, CTX_HDFGWTR_EL2));
877 write_hfgitr_el2(read_ctx_reg(ctx, CTX_HFGITR_EL2));
878 write_hfgrtr_el2(read_ctx_reg(ctx, CTX_HFGRTR_EL2));
879 write_hfgwtr_el2(read_ctx_reg(ctx, CTX_HFGWTR_EL2));
Andre Przywara5d6d2ab2022-11-10 14:40:37 +0000880}
881
Andre Przywara84b86532022-11-17 16:42:09 +0000882static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx)
883{
884 u_register_t mpam_idr = read_mpamidr_el1();
885
886 write_ctx_reg(ctx, CTX_MPAM2_EL2, read_mpam2_el2());
887
888 /*
889 * The context registers that we intend to save would be part of the
890 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
891 */
892 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
893 return;
894 }
895
896 /*
897 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if
898 * MPAMIDR_HAS_HCR_BIT == 1.
899 */
900 write_ctx_reg(ctx, CTX_MPAMHCR_EL2, read_mpamhcr_el2());
901 write_ctx_reg(ctx, CTX_MPAMVPM0_EL2, read_mpamvpm0_el2());
902 write_ctx_reg(ctx, CTX_MPAMVPMV_EL2, read_mpamvpmv_el2());
903
904 /*
905 * The number of MPAMVPM registers is implementation defined, their
906 * number is stored in the MPAMIDR_EL1 register.
907 */
908 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
909 case 7:
910 write_ctx_reg(ctx, CTX_MPAMVPM7_EL2, read_mpamvpm7_el2());
911 __fallthrough;
912 case 6:
913 write_ctx_reg(ctx, CTX_MPAMVPM6_EL2, read_mpamvpm6_el2());
914 __fallthrough;
915 case 5:
916 write_ctx_reg(ctx, CTX_MPAMVPM5_EL2, read_mpamvpm5_el2());
917 __fallthrough;
918 case 4:
919 write_ctx_reg(ctx, CTX_MPAMVPM4_EL2, read_mpamvpm4_el2());
920 __fallthrough;
921 case 3:
922 write_ctx_reg(ctx, CTX_MPAMVPM3_EL2, read_mpamvpm3_el2());
923 __fallthrough;
924 case 2:
925 write_ctx_reg(ctx, CTX_MPAMVPM2_EL2, read_mpamvpm2_el2());
926 __fallthrough;
927 case 1:
928 write_ctx_reg(ctx, CTX_MPAMVPM1_EL2, read_mpamvpm1_el2());
929 break;
930 }
931}
932
933static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx)
934{
935 u_register_t mpam_idr = read_mpamidr_el1();
936
937 write_mpam2_el2(read_ctx_reg(ctx, CTX_MPAM2_EL2));
938
939 if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
940 return;
941 }
942
943 write_mpamhcr_el2(read_ctx_reg(ctx, CTX_MPAMHCR_EL2));
944 write_mpamvpm0_el2(read_ctx_reg(ctx, CTX_MPAMVPM0_EL2));
945 write_mpamvpmv_el2(read_ctx_reg(ctx, CTX_MPAMVPMV_EL2));
946
947 switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
948 case 7:
949 write_mpamvpm7_el2(read_ctx_reg(ctx, CTX_MPAMVPM7_EL2));
950 __fallthrough;
951 case 6:
952 write_mpamvpm6_el2(read_ctx_reg(ctx, CTX_MPAMVPM6_EL2));
953 __fallthrough;
954 case 5:
955 write_mpamvpm5_el2(read_ctx_reg(ctx, CTX_MPAMVPM5_EL2));
956 __fallthrough;
957 case 4:
958 write_mpamvpm4_el2(read_ctx_reg(ctx, CTX_MPAMVPM4_EL2));
959 __fallthrough;
960 case 3:
961 write_mpamvpm3_el2(read_ctx_reg(ctx, CTX_MPAMVPM3_EL2));
962 __fallthrough;
963 case 2:
964 write_mpamvpm2_el2(read_ctx_reg(ctx, CTX_MPAMVPM2_EL2));
965 __fallthrough;
966 case 1:
967 write_mpamvpm1_el2(read_ctx_reg(ctx, CTX_MPAMVPM1_EL2));
968 break;
969 }
970}
971
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000972/*******************************************************************************
973 * Save EL2 sysreg context
974 ******************************************************************************/
975void cm_el2_sysregs_context_save(uint32_t security_state)
976{
977 u_register_t scr_el3 = read_scr();
978
979 /*
Zelalem Awekeb6301e62021-07-09 17:54:30 -0500980 * Always save the non-secure and realm EL2 context, only save the
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000981 * S-EL2 context if S-EL2 is enabled.
982 */
Zelalem Awekeb6301e62021-07-09 17:54:30 -0500983 if ((security_state != SECURE) ||
Ruari Phipps4283ed12020-07-28 11:26:29 +0100984 ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000985 cpu_context_t *ctx;
Zelalem Aweke5362beb2022-04-04 17:42:48 -0500986 el2_sysregs_t *el2_sysregs_ctx;
Max Shvetsovbdf502d2020-02-25 13:56:19 +0000987
988 ctx = cm_get_context(security_state);
989 assert(ctx != NULL);
990
Zelalem Aweke5362beb2022-04-04 17:42:48 -0500991 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
992
993 el2_sysregs_context_save_common(el2_sysregs_ctx);
Zelalem Aweke5362beb2022-04-04 17:42:48 -0500994#if CTX_INCLUDE_MTE_REGS
995 el2_sysregs_context_save_mte(el2_sysregs_ctx);
996#endif
Andre Przywara84b86532022-11-17 16:42:09 +0000997 if (is_feat_mpam_supported()) {
998 el2_sysregs_context_save_mpam(el2_sysregs_ctx);
999 }
Andre Przywara5d6d2ab2022-11-10 14:40:37 +00001000
Andre Przywara8258f142023-02-15 15:56:15 +00001001 if (is_feat_fgt_supported()) {
1002 el2_sysregs_context_save_fgt(el2_sysregs_ctx);
1003 }
Andre Przywara5d6d2ab2022-11-10 14:40:37 +00001004
Andre Przywarac3464182022-11-17 17:30:43 +00001005 if (is_feat_ecv_v2_supported()) {
1006 write_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2,
1007 read_cntpoff_el2());
1008 }
1009
Andre Przywara98908b32022-11-17 16:42:09 +00001010 if (is_feat_vhe_supported()) {
1011 write_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2,
1012 read_contextidr_el2());
1013 write_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2,
1014 read_ttbr1_el2());
1015 }
Andre Przywara870627e2023-01-27 12:25:49 +00001016
1017 if (is_feat_ras_supported()) {
1018 write_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2,
1019 read_vdisr_el2());
1020 write_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2,
1021 read_vsesr_el2());
1022 }
Andre Przywaraedc449d2023-01-27 14:09:20 +00001023
1024 if (is_feat_nv2_supported()) {
1025 write_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2,
1026 read_vncr_el2());
1027 }
1028
Andre Przywara06ea44e2022-11-17 17:30:43 +00001029 if (is_feat_trf_supported()) {
1030 write_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2, read_trfcr_el2());
1031 }
Andre Przywara902c9022022-11-17 17:30:43 +00001032
1033 if (is_feat_csv2_2_supported()) {
1034 write_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2,
1035 read_scxtnum_el2());
1036 }
1037
Andre Przywara1d8795e2022-11-15 11:45:19 +00001038 if (is_feat_hcx_supported()) {
1039 write_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2, read_hcrx_el2());
1040 }
Mark Brownc37eee72023-03-14 20:13:03 +00001041 if (is_feat_tcr2_supported()) {
1042 write_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2, read_tcr2_el2());
1043 }
Mark Brown293a6612023-03-14 20:48:43 +00001044 if (is_feat_sxpie_supported()) {
1045 write_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2, read_pire0_el2());
1046 write_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2, read_pir_el2());
1047 }
1048 if (is_feat_s2pie_supported()) {
1049 write_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2, read_s2pir_el2());
1050 }
1051 if (is_feat_sxpoe_supported()) {
1052 write_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2, read_por_el2());
1053 }
Mark Brown326f2952023-03-14 21:33:04 +00001054 if (is_feat_gcs_supported()) {
1055 write_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2, read_gcspr_el2());
1056 write_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2, read_gcscr_el2());
1057 }
Max Shvetsovbdf502d2020-02-25 13:56:19 +00001058 }
1059}
1060
1061/*******************************************************************************
1062 * Restore EL2 sysreg context
1063 ******************************************************************************/
1064void cm_el2_sysregs_context_restore(uint32_t security_state)
1065{
1066 u_register_t scr_el3 = read_scr();
1067
1068 /*
Zelalem Awekeb6301e62021-07-09 17:54:30 -05001069 * Always restore the non-secure and realm EL2 context, only restore the
Max Shvetsovbdf502d2020-02-25 13:56:19 +00001070 * S-EL2 context if S-EL2 is enabled.
1071 */
Zelalem Awekeb6301e62021-07-09 17:54:30 -05001072 if ((security_state != SECURE) ||
Ruari Phipps4283ed12020-07-28 11:26:29 +01001073 ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
Max Shvetsovbdf502d2020-02-25 13:56:19 +00001074 cpu_context_t *ctx;
Zelalem Aweke5362beb2022-04-04 17:42:48 -05001075 el2_sysregs_t *el2_sysregs_ctx;
Max Shvetsovbdf502d2020-02-25 13:56:19 +00001076
1077 ctx = cm_get_context(security_state);
1078 assert(ctx != NULL);
1079
Zelalem Aweke5362beb2022-04-04 17:42:48 -05001080 el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
1081
1082 el2_sysregs_context_restore_common(el2_sysregs_ctx);
Zelalem Aweke5362beb2022-04-04 17:42:48 -05001083#if CTX_INCLUDE_MTE_REGS
1084 el2_sysregs_context_restore_mte(el2_sysregs_ctx);
1085#endif
Andre Przywara84b86532022-11-17 16:42:09 +00001086 if (is_feat_mpam_supported()) {
1087 el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
1088 }
Andre Przywara5d6d2ab2022-11-10 14:40:37 +00001089
Andre Przywara8258f142023-02-15 15:56:15 +00001090 if (is_feat_fgt_supported()) {
1091 el2_sysregs_context_restore_fgt(el2_sysregs_ctx);
1092 }
Andre Przywara5d6d2ab2022-11-10 14:40:37 +00001093
Andre Przywarac3464182022-11-17 17:30:43 +00001094 if (is_feat_ecv_v2_supported()) {
1095 write_cntpoff_el2(read_ctx_reg(el2_sysregs_ctx,
1096 CTX_CNTPOFF_EL2));
1097 }
1098
Andre Przywara98908b32022-11-17 16:42:09 +00001099 if (is_feat_vhe_supported()) {
1100 write_contextidr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2));
1101 write_ttbr1_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2));
1102 }
Andre Przywara870627e2023-01-27 12:25:49 +00001103
1104 if (is_feat_ras_supported()) {
1105 write_vdisr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2));
1106 write_vsesr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2));
1107 }
Andre Przywaraedc449d2023-01-27 14:09:20 +00001108
1109 if (is_feat_nv2_supported()) {
1110 write_vncr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2));
1111 }
Andre Przywara06ea44e2022-11-17 17:30:43 +00001112 if (is_feat_trf_supported()) {
1113 write_trfcr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2));
1114 }
Andre Przywara902c9022022-11-17 17:30:43 +00001115
1116 if (is_feat_csv2_2_supported()) {
1117 write_scxtnum_el2(read_ctx_reg(el2_sysregs_ctx,
1118 CTX_SCXTNUM_EL2));
1119 }
1120
Andre Przywara1d8795e2022-11-15 11:45:19 +00001121 if (is_feat_hcx_supported()) {
1122 write_hcrx_el2(read_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2));
1123 }
Mark Brownc37eee72023-03-14 20:13:03 +00001124 if (is_feat_tcr2_supported()) {
1125 write_tcr2_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2));
1126 }
Mark Brown293a6612023-03-14 20:48:43 +00001127 if (is_feat_sxpie_supported()) {
1128 write_pire0_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2));
1129 write_pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2));
1130 }
1131 if (is_feat_s2pie_supported()) {
1132 write_s2pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2));
1133 }
1134 if (is_feat_sxpoe_supported()) {
1135 write_por_el2(read_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2));
1136 }
Mark Brown326f2952023-03-14 21:33:04 +00001137 if (is_feat_gcs_supported()) {
1138 write_gcscr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2));
1139 write_gcspr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2));
1140 }
Max Shvetsovbdf502d2020-02-25 13:56:19 +00001141 }
1142}
1143#endif /* CTX_INCLUDE_EL2_REGS */
1144
Andrew Thoelke4e126072014-06-04 21:10:52 +01001145/*******************************************************************************
Zelalem Awekef92c0cb2022-01-31 16:59:42 -06001146 * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS
1147 * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly
1148 * updating EL1 and EL2 registers. Otherwise, it calls the generic
1149 * cm_prepare_el3_exit function.
1150 ******************************************************************************/
1151void cm_prepare_el3_exit_ns(void)
1152{
1153#if CTX_INCLUDE_EL2_REGS
1154 cpu_context_t *ctx = cm_get_context(NON_SECURE);
1155 assert(ctx != NULL);
1156
Zelalem Aweke20126002022-04-08 16:48:05 -05001157 /* Assert that EL2 is used. */
1158#if ENABLE_ASSERTIONS
1159 el3_state_t *state = get_el3state_ctx(ctx);
1160 u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
1161#endif
1162 assert(((scr_el3 & SCR_HCE_BIT) != 0UL) &&
1163 (el_implemented(2U) != EL_IMPL_NONE));
1164
Zelalem Awekef92c0cb2022-01-31 16:59:42 -06001165 /*
1166 * Currently some extensions are configured using
1167 * direct register updates. Therefore, do this here
1168 * instead of when setting up context.
1169 */
1170 manage_extensions_nonsecure(0, ctx);
1171
1172 /*
1173 * Set the NS bit to be able to access the ICC_SRE_EL2
1174 * register when restoring context.
1175 */
1176 write_scr_el3(read_scr_el3() | SCR_NS_BIT);
1177
Olivier Depreze4793dd2022-05-09 17:34:02 +02001178 /*
1179 * Ensure the NS bit change is committed before the EL2/EL1
1180 * state restoration.
1181 */
1182 isb();
1183
Zelalem Awekef92c0cb2022-01-31 16:59:42 -06001184 /* Restore EL2 and EL1 sysreg contexts */
1185 cm_el2_sysregs_context_restore(NON_SECURE);
1186 cm_el1_sysregs_context_restore(NON_SECURE);
1187 cm_set_next_eret_context(NON_SECURE);
1188#else
1189 cm_prepare_el3_exit(NON_SECURE);
1190#endif /* CTX_INCLUDE_EL2_REGS */
1191}
1192
1193/*******************************************************************************
Soby Mathew2ed46e92014-07-04 16:02:26 +01001194 * The next four functions are used by runtime services to save and restore
1195 * EL1 context on the 'cpu_context' structure for the specified security
Achin Gupta7aea9082014-02-01 07:51:28 +00001196 * state.
1197 ******************************************************************************/
Achin Gupta7aea9082014-02-01 07:51:28 +00001198void cm_el1_sysregs_context_save(uint32_t security_state)
1199{
Dan Handleye2712bc2014-04-10 15:37:22 +01001200 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +00001201
Andrew Thoelkea2f65532014-05-14 17:09:32 +01001202 ctx = cm_get_context(security_state);
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +00001203 assert(ctx != NULL);
Achin Gupta7aea9082014-02-01 07:51:28 +00001204
Max Shvetsovc9e2c922020-02-17 16:15:47 +00001205 el1_sysregs_context_save(get_el1_sysregs_ctx(ctx));
Dimitris Papastamosa7921b92017-10-13 15:27:58 +01001206
1207#if IMAGE_BL31
1208 if (security_state == SECURE)
1209 PUBLISH_EVENT(cm_exited_secure_world);
1210 else
1211 PUBLISH_EVENT(cm_exited_normal_world);
1212#endif
Achin Gupta7aea9082014-02-01 07:51:28 +00001213}
1214
1215void cm_el1_sysregs_context_restore(uint32_t security_state)
1216{
Dan Handleye2712bc2014-04-10 15:37:22 +01001217 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +00001218
Andrew Thoelkea2f65532014-05-14 17:09:32 +01001219 ctx = cm_get_context(security_state);
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +00001220 assert(ctx != NULL);
Achin Gupta7aea9082014-02-01 07:51:28 +00001221
Max Shvetsovc9e2c922020-02-17 16:15:47 +00001222 el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx));
Dimitris Papastamosa7921b92017-10-13 15:27:58 +01001223
1224#if IMAGE_BL31
1225 if (security_state == SECURE)
1226 PUBLISH_EVENT(cm_entering_secure_world);
1227 else
1228 PUBLISH_EVENT(cm_entering_normal_world);
1229#endif
Achin Gupta7aea9082014-02-01 07:51:28 +00001230}
1231
1232/*******************************************************************************
Andrew Thoelke4e126072014-06-04 21:10:52 +01001233 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
1234 * given security state with the given entrypoint
Jeenu Viswambharancaa84932014-02-06 10:36:15 +00001235 ******************************************************************************/
Soby Mathewa0fedc42016-06-16 14:52:04 +01001236void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
Jeenu Viswambharancaa84932014-02-06 10:36:15 +00001237{
Dan Handleye2712bc2014-04-10 15:37:22 +01001238 cpu_context_t *ctx;
1239 el3_state_t *state;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +00001240
Andrew Thoelkea2f65532014-05-14 17:09:32 +01001241 ctx = cm_get_context(security_state);
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +00001242 assert(ctx != NULL);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +00001243
Andrew Thoelke4e126072014-06-04 21:10:52 +01001244 /* Populate EL3 state so that ERET jumps to the correct entry */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +00001245 state = get_el3state_ctx(ctx);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +00001246 write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +00001247}
1248
1249/*******************************************************************************
Andrew Thoelke4e126072014-06-04 21:10:52 +01001250 * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
1251 * pertaining to the given security state
Achin Gupta607084e2014-02-09 18:24:19 +00001252 ******************************************************************************/
Andrew Thoelke4e126072014-06-04 21:10:52 +01001253void cm_set_elr_spsr_el3(uint32_t security_state,
Soby Mathewa0fedc42016-06-16 14:52:04 +01001254 uintptr_t entrypoint, uint32_t spsr)
Achin Gupta607084e2014-02-09 18:24:19 +00001255{
Dan Handleye2712bc2014-04-10 15:37:22 +01001256 cpu_context_t *ctx;
1257 el3_state_t *state;
Achin Gupta607084e2014-02-09 18:24:19 +00001258
Andrew Thoelkea2f65532014-05-14 17:09:32 +01001259 ctx = cm_get_context(security_state);
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +00001260 assert(ctx != NULL);
Achin Gupta607084e2014-02-09 18:24:19 +00001261
1262 /* Populate EL3 state so that ERET jumps to the correct entry */
1263 state = get_el3state_ctx(ctx);
1264 write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
Andrew Thoelke4e126072014-06-04 21:10:52 +01001265 write_ctx_reg(state, CTX_SPSR_EL3, spsr);
Achin Gupta607084e2014-02-09 18:24:19 +00001266}
1267
1268/*******************************************************************************
Achin Gupta27b895e2014-05-04 18:38:28 +01001269 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
1270 * pertaining to the given security state using the value and bit position
1271 * specified in the parameters. It preserves all other bits.
1272 ******************************************************************************/
1273void cm_write_scr_el3_bit(uint32_t security_state,
1274 uint32_t bit_pos,
1275 uint32_t value)
1276{
1277 cpu_context_t *ctx;
1278 el3_state_t *state;
Louis Mayencourt1c819c32020-01-24 13:30:28 +00001279 u_register_t scr_el3;
Achin Gupta27b895e2014-05-04 18:38:28 +01001280
Andrew Thoelkea2f65532014-05-14 17:09:32 +01001281 ctx = cm_get_context(security_state);
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +00001282 assert(ctx != NULL);
Achin Gupta27b895e2014-05-04 18:38:28 +01001283
1284 /* Ensure that the bit position is a valid one */
Jimmy Brissoned202072020-08-04 16:18:52 -05001285 assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
Achin Gupta27b895e2014-05-04 18:38:28 +01001286
1287 /* Ensure that the 'value' is only a bit wide */
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +00001288 assert(value <= 1U);
Achin Gupta27b895e2014-05-04 18:38:28 +01001289
1290 /*
1291 * Get the SCR_EL3 value from the cpu context, clear the desired bit
1292 * and set it to its new value.
1293 */
1294 state = get_el3state_ctx(ctx);
Louis Mayencourt1c819c32020-01-24 13:30:28 +00001295 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
Jimmy Brissoned202072020-08-04 16:18:52 -05001296 scr_el3 &= ~(1UL << bit_pos);
Louis Mayencourt1c819c32020-01-24 13:30:28 +00001297 scr_el3 |= (u_register_t)value << bit_pos;
Achin Gupta27b895e2014-05-04 18:38:28 +01001298 write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
1299}
1300
1301/*******************************************************************************
1302 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
1303 * given security state.
1304 ******************************************************************************/
Louis Mayencourt1c819c32020-01-24 13:30:28 +00001305u_register_t cm_get_scr_el3(uint32_t security_state)
Achin Gupta27b895e2014-05-04 18:38:28 +01001306{
1307 cpu_context_t *ctx;
1308 el3_state_t *state;
1309
Andrew Thoelkea2f65532014-05-14 17:09:32 +01001310 ctx = cm_get_context(security_state);
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +00001311 assert(ctx != NULL);
Achin Gupta27b895e2014-05-04 18:38:28 +01001312
1313 /* Populate EL3 state so that ERET jumps to the correct entry */
1314 state = get_el3state_ctx(ctx);
Louis Mayencourt1c819c32020-01-24 13:30:28 +00001315 return read_ctx_reg(state, CTX_SCR_EL3);
Achin Gupta27b895e2014-05-04 18:38:28 +01001316}
1317
1318/*******************************************************************************
Jeenu Viswambharancaa84932014-02-06 10:36:15 +00001319 * This function is used to program the context that's used for exception
1320 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
1321 * the required security state
Achin Gupta7aea9082014-02-01 07:51:28 +00001322 ******************************************************************************/
1323void cm_set_next_eret_context(uint32_t security_state)
1324{
Dan Handleye2712bc2014-04-10 15:37:22 +01001325 cpu_context_t *ctx;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +00001326
Andrew Thoelkea2f65532014-05-14 17:09:32 +01001327 ctx = cm_get_context(security_state);
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +00001328 assert(ctx != NULL);
Achin Gupta7aea9082014-02-01 07:51:28 +00001329
Andrew Thoelke4e126072014-06-04 21:10:52 +01001330 cm_set_next_context(ctx);
Achin Gupta7aea9082014-02-01 07:51:28 +00001331}