Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 1 | /* |
Zelalem Aweke | f92c0cb | 2022-01-31 16:59:42 -0600 | [diff] [blame] | 2 | * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved. |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 7 | #include <assert.h> |
| 8 | #include <stdbool.h> |
| 9 | #include <string.h> |
| 10 | |
| 11 | #include <platform_def.h> |
| 12 | |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 13 | #include <arch.h> |
Andre Przywara | 06ea44e | 2022-11-17 17:30:43 +0000 | [diff] [blame] | 14 | #include <arch_features.h> |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 15 | #include <arch_helpers.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 16 | #include <common/bl_common.h> |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 17 | #include <context.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 18 | #include <lib/el3_runtime/context_mgmt.h> |
| 19 | #include <lib/extensions/amu.h> |
Manish V Badarkhe | f356f7e | 2021-06-29 11:44:20 +0100 | [diff] [blame] | 20 | #include <lib/extensions/sys_reg_trace.h> |
Manish V Badarkhe | 51a9711 | 2021-07-08 09:33:18 +0100 | [diff] [blame] | 21 | #include <lib/extensions/trf.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 22 | #include <lib/utils.h> |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 23 | |
| 24 | /******************************************************************************* |
| 25 | * Context management library initialisation routine. This library is used by |
| 26 | * runtime services to share pointers to 'cpu_context' structures for the secure |
| 27 | * and non-secure states. Management of the structures and their associated |
| 28 | * memory is not done by the context management library e.g. the PSCI service |
| 29 | * manages the cpu context used for entry from and exit to the non-secure state. |
| 30 | * The Secure payload manages the context(s) corresponding to the secure state. |
| 31 | * It also uses this library to get access to the non-secure |
| 32 | * state cpu context pointers. |
| 33 | ******************************************************************************/ |
| 34 | void cm_init(void) |
| 35 | { |
| 36 | /* |
| 37 | * The context management library has only global data to initialize, but |
| 38 | * that will be done when the BSS is zeroed out |
| 39 | */ |
| 40 | } |
| 41 | |
| 42 | /******************************************************************************* |
| 43 | * The following function initializes the cpu_context 'ctx' for |
| 44 | * first use, and sets the initial entrypoint state as specified by the |
| 45 | * entry_point_info structure. |
| 46 | * |
| 47 | * The security state to initialize is determined by the SECURE attribute |
Antonio Nino Diaz | 28dce9e | 2018-05-22 10:09:10 +0100 | [diff] [blame] | 48 | * of the entry_point_info. |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 49 | * |
| 50 | * The EE and ST attributes are used to configure the endianness and secure |
| 51 | * timer availability for the new execution context. |
| 52 | * |
| 53 | * To prepare the register state for entry call cm_prepare_el3_exit() and |
| 54 | * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to |
Olivier Deprez | 7d0299f | 2021-05-25 12:06:03 +0200 | [diff] [blame] | 55 | * cm_el1_sysregs_context_restore(). |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 56 | ******************************************************************************/ |
Antonio Nino Diaz | 28dce9e | 2018-05-22 10:09:10 +0100 | [diff] [blame] | 57 | void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 58 | { |
| 59 | unsigned int security_state; |
| 60 | uint32_t scr, sctlr; |
| 61 | regs_t *reg_ctx; |
| 62 | |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 63 | assert(ctx != NULL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 64 | |
| 65 | security_state = GET_SECURITY_STATE(ep->h.attr); |
| 66 | |
| 67 | /* Clear any residual register values from the context */ |
Douglas Raillard | a8954fc | 2017-01-26 15:54:44 +0000 | [diff] [blame] | 68 | zeromem(ctx, sizeof(*ctx)); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 69 | |
Soby Mathew | b4a970a | 2016-08-31 12:34:33 +0100 | [diff] [blame] | 70 | reg_ctx = get_regs_ctx(ctx); |
| 71 | |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 72 | /* |
| 73 | * Base the context SCR on the current value, adjust for entry point |
| 74 | * specific requirements |
| 75 | */ |
| 76 | scr = read_scr(); |
| 77 | scr &= ~(SCR_NS_BIT | SCR_HCE_BIT); |
| 78 | |
| 79 | if (security_state != SECURE) |
| 80 | scr |= SCR_NS_BIT; |
| 81 | |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 82 | if (security_state != SECURE) { |
Soby Mathew | a993c42 | 2016-09-29 14:15:57 +0100 | [diff] [blame] | 83 | /* |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 84 | * Set up SCTLR for the Non-secure context. |
| 85 | * |
| 86 | * SCTLR.EE: Endianness is taken from the entrypoint attributes. |
| 87 | * |
| 88 | * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as |
| 89 | * required by PSCI specification) |
| 90 | * |
| 91 | * Set remaining SCTLR fields to their architecturally defined |
| 92 | * values. Some fields reset to an IMPLEMENTATION DEFINED value: |
| 93 | * |
| 94 | * SCTLR.TE: Set to zero so that exceptions to an Exception |
| 95 | * Level executing at PL1 are taken to A32 state. |
| 96 | * |
| 97 | * SCTLR.V: Set to zero to select the normal exception vectors |
| 98 | * with base address held in VBAR. |
Soby Mathew | a993c42 | 2016-09-29 14:15:57 +0100 | [diff] [blame] | 99 | */ |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 100 | assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) == |
| 101 | (EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT)); |
| 102 | |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 103 | sctlr = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U; |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 104 | sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT)); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 105 | write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); |
| 106 | } |
| 107 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 108 | /* |
| 109 | * The target exception level is based on the spsr mode requested. If |
| 110 | * execution is requested to hyp mode, HVC is enabled via SCR.HCE. |
| 111 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 112 | if (GET_M32(ep->spsr) == MODE32_hyp) |
| 113 | scr |= SCR_HCE_BIT; |
| 114 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 115 | /* |
| 116 | * Store the initialised values for SCTLR and SCR in the cpu_context. |
| 117 | * The Hyp mode registers are not part of the saved context and are |
| 118 | * set-up in cm_prepare_el3_exit(). |
| 119 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 120 | write_ctx_reg(reg_ctx, CTX_SCR, scr); |
| 121 | write_ctx_reg(reg_ctx, CTX_LR, ep->pc); |
| 122 | write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr); |
| 123 | |
| 124 | /* |
| 125 | * Store the r0-r3 value from the entrypoint into the context |
| 126 | * Use memcpy as we are in control of the layout of the structures |
| 127 | */ |
| 128 | memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); |
| 129 | } |
| 130 | |
| 131 | /******************************************************************************* |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 132 | * Enable architecture extensions on first entry to Non-secure world. |
| 133 | * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise |
| 134 | * it is zero. |
| 135 | ******************************************************************************/ |
Antonio Nino Diaz | 033b4bb | 2018-10-25 16:52:26 +0100 | [diff] [blame] | 136 | static void enable_extensions_nonsecure(bool el2_unused) |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 137 | { |
| 138 | #if IMAGE_BL32 |
Andre Przywara | 906776e | 2023-03-03 10:30:06 +0000 | [diff] [blame] | 139 | if (is_feat_amu_supported()) { |
| 140 | amu_enable(el2_unused); |
| 141 | } |
Manish V Badarkhe | f356f7e | 2021-06-29 11:44:20 +0100 | [diff] [blame] | 142 | |
Andre Przywara | 44e33e0 | 2022-11-17 16:42:09 +0000 | [diff] [blame] | 143 | if (is_feat_sys_reg_trace_supported()) { |
| 144 | sys_reg_trace_enable(); |
| 145 | } |
Manish V Badarkhe | 51a9711 | 2021-07-08 09:33:18 +0100 | [diff] [blame] | 146 | |
Andre Przywara | 06ea44e | 2022-11-17 17:30:43 +0000 | [diff] [blame] | 147 | if (is_feat_trf_supported()) { |
| 148 | trf_enable(); |
| 149 | } |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 150 | #endif |
| 151 | } |
| 152 | |
| 153 | /******************************************************************************* |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 154 | * The following function initializes the cpu_context for a CPU specified by |
| 155 | * its `cpu_idx` for first use, and sets the initial entrypoint state as |
| 156 | * specified by the entry_point_info structure. |
| 157 | ******************************************************************************/ |
| 158 | void cm_init_context_by_index(unsigned int cpu_idx, |
| 159 | const entry_point_info_t *ep) |
| 160 | { |
| 161 | cpu_context_t *ctx; |
| 162 | ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); |
Antonio Nino Diaz | 28dce9e | 2018-05-22 10:09:10 +0100 | [diff] [blame] | 163 | cm_setup_context(ctx, ep); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | /******************************************************************************* |
| 167 | * The following function initializes the cpu_context for the current CPU |
| 168 | * for first use, and sets the initial entrypoint state as specified by the |
| 169 | * entry_point_info structure. |
| 170 | ******************************************************************************/ |
| 171 | void cm_init_my_context(const entry_point_info_t *ep) |
| 172 | { |
| 173 | cpu_context_t *ctx; |
| 174 | ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); |
Antonio Nino Diaz | 28dce9e | 2018-05-22 10:09:10 +0100 | [diff] [blame] | 175 | cm_setup_context(ctx, ep); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 176 | } |
| 177 | |
| 178 | /******************************************************************************* |
| 179 | * Prepare the CPU system registers for first entry into secure or normal world |
| 180 | * |
| 181 | * If execution is requested to hyp mode, HSCTLR is initialized |
| 182 | * If execution is requested to non-secure PL1, and the CPU supports |
| 183 | * HYP mode then HYP mode is disabled by configuring all necessary HYP mode |
| 184 | * registers. |
| 185 | ******************************************************************************/ |
| 186 | void cm_prepare_el3_exit(uint32_t security_state) |
| 187 | { |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 188 | uint32_t hsctlr, scr; |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 189 | cpu_context_t *ctx = cm_get_context(security_state); |
Antonio Nino Diaz | 033b4bb | 2018-10-25 16:52:26 +0100 | [diff] [blame] | 190 | bool el2_unused = false; |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 191 | |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 192 | assert(ctx != NULL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 193 | |
| 194 | if (security_state == NON_SECURE) { |
| 195 | scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 196 | if ((scr & SCR_HCE_BIT) != 0U) { |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 197 | /* Use SCTLR value to initialize HSCTLR */ |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 198 | hsctlr = read_ctx_reg(get_regs_ctx(ctx), |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 199 | CTX_NS_SCTLR); |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 200 | hsctlr |= HSCTLR_RES1; |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 201 | /* Temporarily set the NS bit to access HSCTLR */ |
| 202 | write_scr(read_scr() | SCR_NS_BIT); |
| 203 | /* |
| 204 | * Make sure the write to SCR is complete so that |
| 205 | * we can access HSCTLR |
| 206 | */ |
| 207 | isb(); |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 208 | write_hsctlr(hsctlr); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 209 | isb(); |
| 210 | |
| 211 | write_scr(read_scr() & ~SCR_NS_BIT); |
| 212 | isb(); |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 213 | } else if ((read_id_pfr1() & |
| 214 | (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) != 0U) { |
Antonio Nino Diaz | 033b4bb | 2018-10-25 16:52:26 +0100 | [diff] [blame] | 215 | el2_unused = true; |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 216 | |
David Cunado | 5f55e28 | 2016-10-31 17:37:34 +0000 | [diff] [blame] | 217 | /* |
| 218 | * Set the NS bit to access NS copies of certain banked |
| 219 | * registers |
| 220 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 221 | write_scr(read_scr() | SCR_NS_BIT); |
| 222 | isb(); |
| 223 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 224 | /* |
| 225 | * Hyp / PL2 present but unused, need to disable safely. |
| 226 | * HSCTLR can be ignored in this case. |
| 227 | * |
| 228 | * Set HCR to its architectural reset value so that |
| 229 | * Non-secure operations do not trap to Hyp mode. |
| 230 | */ |
| 231 | write_hcr(HCR_RESET_VAL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 232 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 233 | /* |
| 234 | * Set HCPTR to its architectural reset value so that |
| 235 | * Non-secure access from EL1 or EL0 to trace and to |
| 236 | * Advanced SIMD and floating point functionality does |
| 237 | * not trap to Hyp mode. |
| 238 | */ |
| 239 | write_hcptr(HCPTR_RESET_VAL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 240 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 241 | /* |
| 242 | * Initialise CNTHCTL. All fields are architecturally |
| 243 | * UNKNOWN on reset and are set to zero except for |
| 244 | * field(s) listed below. |
| 245 | * |
| 246 | * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of |
| 247 | * Non-secure EL0 and EL1 accessed to the physical |
| 248 | * timer registers. |
| 249 | * |
| 250 | * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of |
| 251 | * Non-secure EL0 and EL1 accessed to the physical |
| 252 | * counter registers. |
| 253 | */ |
| 254 | write_cnthctl(CNTHCTL_RESET_VAL | |
| 255 | PL1PCEN_BIT | PL1PCTEN_BIT); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 256 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 257 | /* |
| 258 | * Initialise CNTVOFF to zero as it resets to an |
| 259 | * IMPLEMENTATION DEFINED value. |
| 260 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 261 | write64_cntvoff(0); |
| 262 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 263 | /* |
| 264 | * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR |
| 265 | * respectively. |
| 266 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 267 | write_vpidr(read_midr()); |
| 268 | write_vmpidr(read_mpidr()); |
| 269 | |
| 270 | /* |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 271 | * Initialise VTTBR, setting all fields rather than |
| 272 | * relying on the hw. Some fields are architecturally |
| 273 | * UNKNOWN at reset. |
| 274 | * |
| 275 | * VTTBR.VMID: Set to zero which is the architecturally |
| 276 | * defined reset value. Even though EL1&0 stage 2 |
| 277 | * address translation is disabled, cache maintenance |
| 278 | * operations depend on the VMID. |
| 279 | * |
| 280 | * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address |
| 281 | * translation is disabled. |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 282 | */ |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 283 | write64_vttbr(VTTBR_RESET_VAL & |
| 284 | ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) |
| 285 | | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); |
David Cunado | 5f55e28 | 2016-10-31 17:37:34 +0000 | [diff] [blame] | 286 | |
| 287 | /* |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 288 | * Initialise HDCR, setting all the fields rather than |
| 289 | * relying on hw. |
| 290 | * |
| 291 | * HDCR.HPMN: Set to value of PMCR.N which is the |
| 292 | * architecturally-defined reset value. |
Alexei Fedorov | 9074dea | 2019-08-20 15:22:44 +0100 | [diff] [blame] | 293 | * |
| 294 | * HDCR.HLP: Set to one so that event counter |
| 295 | * overflow, that is recorded in PMOVSCLR[0-30], |
| 296 | * occurs on the increment that changes |
| 297 | * PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU is |
| 298 | * implemented. This bit is RES0 in versions of the |
| 299 | * architecture earlier than ARMv8.5, setting it to 1 |
| 300 | * doesn't have any effect on them. |
| 301 | * This bit is Reserved, UNK/SBZP in ARMv7. |
| 302 | * |
| 303 | * HDCR.HPME: Set to zero to disable EL2 Event |
| 304 | * counters. |
David Cunado | 5f55e28 | 2016-10-31 17:37:34 +0000 | [diff] [blame] | 305 | */ |
Alexei Fedorov | 9074dea | 2019-08-20 15:22:44 +0100 | [diff] [blame] | 306 | #if (ARM_ARCH_MAJOR > 7) |
| 307 | write_hdcr((HDCR_RESET_VAL | HDCR_HLP_BIT | |
| 308 | ((read_pmcr() & PMCR_N_BITS) >> |
| 309 | PMCR_N_SHIFT)) & ~HDCR_HPME_BIT); |
| 310 | #else |
| 311 | write_hdcr((HDCR_RESET_VAL | |
| 312 | ((read_pmcr() & PMCR_N_BITS) >> |
| 313 | PMCR_N_SHIFT)) & ~HDCR_HPME_BIT); |
| 314 | #endif |
David Cunado | c14b08e | 2016-11-25 00:21:59 +0000 | [diff] [blame] | 315 | /* |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 316 | * Set HSTR to its architectural reset value so that |
| 317 | * access to system registers in the cproc=1111 |
| 318 | * encoding space do not trap to Hyp mode. |
| 319 | */ |
| 320 | write_hstr(HSTR_RESET_VAL); |
| 321 | /* |
| 322 | * Set CNTHP_CTL to its architectural reset value to |
| 323 | * disable the EL2 physical timer and prevent timer |
| 324 | * interrupts. Some fields are architecturally UNKNOWN |
| 325 | * on reset and are set to zero. |
David Cunado | c14b08e | 2016-11-25 00:21:59 +0000 | [diff] [blame] | 326 | */ |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 327 | write_cnthp_ctl(CNTHP_CTL_RESET_VAL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 328 | isb(); |
| 329 | |
| 330 | write_scr(read_scr() & ~SCR_NS_BIT); |
| 331 | isb(); |
| 332 | } |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 333 | enable_extensions_nonsecure(el2_unused); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 334 | } |
| 335 | } |
Zelalem Aweke | f92c0cb | 2022-01-31 16:59:42 -0600 | [diff] [blame] | 336 | |
| 337 | /******************************************************************************* |
| 338 | * This function is used to exit to Non-secure world. It simply calls the |
| 339 | * cm_prepare_el3_exit function for AArch32. |
| 340 | ******************************************************************************/ |
| 341 | void cm_prepare_el3_exit_ns(void) |
| 342 | { |
| 343 | cm_prepare_el3_exit(NON_SECURE); |
| 344 | } |