Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 1 | /* |
Boyan Karatotev | 05504ba | 2023-02-15 13:21:50 +0000 | [diff] [blame] | 2 | * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved. |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 3 | * |
dp-arm | fa3cf0b | 2017-05-03 09:38:09 +0100 | [diff] [blame] | 4 | * SPDX-License-Identifier: BSD-3-Clause |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 7 | #include <assert.h> |
| 8 | #include <stdbool.h> |
| 9 | #include <string.h> |
| 10 | |
| 11 | #include <platform_def.h> |
| 12 | |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 13 | #include <arch.h> |
Andre Przywara | 06ea44e | 2022-11-17 17:30:43 +0000 | [diff] [blame] | 14 | #include <arch_features.h> |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 15 | #include <arch_helpers.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 16 | #include <common/bl_common.h> |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 17 | #include <context.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 18 | #include <lib/el3_runtime/context_mgmt.h> |
| 19 | #include <lib/extensions/amu.h> |
Boyan Karatotev | 05504ba | 2023-02-15 13:21:50 +0000 | [diff] [blame] | 20 | #include <lib/extensions/pmuv3.h> |
Manish V Badarkhe | f356f7e | 2021-06-29 11:44:20 +0100 | [diff] [blame] | 21 | #include <lib/extensions/sys_reg_trace.h> |
Manish V Badarkhe | 51a9711 | 2021-07-08 09:33:18 +0100 | [diff] [blame] | 22 | #include <lib/extensions/trf.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 23 | #include <lib/utils.h> |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 24 | |
| 25 | /******************************************************************************* |
| 26 | * Context management library initialisation routine. This library is used by |
| 27 | * runtime services to share pointers to 'cpu_context' structures for the secure |
| 28 | * and non-secure states. Management of the structures and their associated |
| 29 | * memory is not done by the context management library e.g. the PSCI service |
| 30 | * manages the cpu context used for entry from and exit to the non-secure state. |
| 31 | * The Secure payload manages the context(s) corresponding to the secure state. |
| 32 | * It also uses this library to get access to the non-secure |
| 33 | * state cpu context pointers. |
| 34 | ******************************************************************************/ |
| 35 | void cm_init(void) |
| 36 | { |
| 37 | /* |
| 38 | * The context management library has only global data to initialize, but |
| 39 | * that will be done when the BSS is zeroed out |
| 40 | */ |
| 41 | } |
| 42 | |
| 43 | /******************************************************************************* |
| 44 | * The following function initializes the cpu_context 'ctx' for |
| 45 | * first use, and sets the initial entrypoint state as specified by the |
| 46 | * entry_point_info structure. |
| 47 | * |
| 48 | * The security state to initialize is determined by the SECURE attribute |
Antonio Nino Diaz | 28dce9e | 2018-05-22 10:09:10 +0100 | [diff] [blame] | 49 | * of the entry_point_info. |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 50 | * |
| 51 | * The EE and ST attributes are used to configure the endianness and secure |
| 52 | * timer availability for the new execution context. |
| 53 | * |
| 54 | * To prepare the register state for entry call cm_prepare_el3_exit() and |
| 55 | * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to |
Olivier Deprez | 7d0299f | 2021-05-25 12:06:03 +0200 | [diff] [blame] | 56 | * cm_el1_sysregs_context_restore(). |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 57 | ******************************************************************************/ |
Antonio Nino Diaz | 28dce9e | 2018-05-22 10:09:10 +0100 | [diff] [blame] | 58 | void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 59 | { |
| 60 | unsigned int security_state; |
| 61 | uint32_t scr, sctlr; |
| 62 | regs_t *reg_ctx; |
| 63 | |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 64 | assert(ctx != NULL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 65 | |
| 66 | security_state = GET_SECURITY_STATE(ep->h.attr); |
| 67 | |
| 68 | /* Clear any residual register values from the context */ |
Douglas Raillard | a8954fc | 2017-01-26 15:54:44 +0000 | [diff] [blame] | 69 | zeromem(ctx, sizeof(*ctx)); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 70 | |
Soby Mathew | b4a970a | 2016-08-31 12:34:33 +0100 | [diff] [blame] | 71 | reg_ctx = get_regs_ctx(ctx); |
| 72 | |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 73 | /* |
| 74 | * Base the context SCR on the current value, adjust for entry point |
| 75 | * specific requirements |
| 76 | */ |
| 77 | scr = read_scr(); |
| 78 | scr &= ~(SCR_NS_BIT | SCR_HCE_BIT); |
| 79 | |
| 80 | if (security_state != SECURE) |
| 81 | scr |= SCR_NS_BIT; |
| 82 | |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 83 | if (security_state != SECURE) { |
Soby Mathew | a993c42 | 2016-09-29 14:15:57 +0100 | [diff] [blame] | 84 | /* |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 85 | * Set up SCTLR for the Non-secure context. |
| 86 | * |
| 87 | * SCTLR.EE: Endianness is taken from the entrypoint attributes. |
| 88 | * |
| 89 | * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as |
| 90 | * required by PSCI specification) |
| 91 | * |
| 92 | * Set remaining SCTLR fields to their architecturally defined |
| 93 | * values. Some fields reset to an IMPLEMENTATION DEFINED value: |
| 94 | * |
| 95 | * SCTLR.TE: Set to zero so that exceptions to an Exception |
| 96 | * Level executing at PL1 are taken to A32 state. |
| 97 | * |
| 98 | * SCTLR.V: Set to zero to select the normal exception vectors |
| 99 | * with base address held in VBAR. |
Soby Mathew | a993c42 | 2016-09-29 14:15:57 +0100 | [diff] [blame] | 100 | */ |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 101 | assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) == |
| 102 | (EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT)); |
| 103 | |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 104 | sctlr = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U; |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 105 | sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT)); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 106 | write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); |
| 107 | } |
| 108 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 109 | /* |
| 110 | * The target exception level is based on the spsr mode requested. If |
| 111 | * execution is requested to hyp mode, HVC is enabled via SCR.HCE. |
| 112 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 113 | if (GET_M32(ep->spsr) == MODE32_hyp) |
| 114 | scr |= SCR_HCE_BIT; |
| 115 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 116 | /* |
| 117 | * Store the initialised values for SCTLR and SCR in the cpu_context. |
| 118 | * The Hyp mode registers are not part of the saved context and are |
| 119 | * set-up in cm_prepare_el3_exit(). |
| 120 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 121 | write_ctx_reg(reg_ctx, CTX_SCR, scr); |
| 122 | write_ctx_reg(reg_ctx, CTX_LR, ep->pc); |
| 123 | write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr); |
| 124 | |
| 125 | /* |
| 126 | * Store the r0-r3 value from the entrypoint into the context |
| 127 | * Use memcpy as we are in control of the layout of the structures |
| 128 | */ |
| 129 | memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); |
| 130 | } |
| 131 | |
| 132 | /******************************************************************************* |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 133 | * Enable architecture extensions on first entry to Non-secure world. |
| 134 | * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise |
| 135 | * it is zero. |
| 136 | ******************************************************************************/ |
Antonio Nino Diaz | 033b4bb | 2018-10-25 16:52:26 +0100 | [diff] [blame] | 137 | static void enable_extensions_nonsecure(bool el2_unused) |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 138 | { |
| 139 | #if IMAGE_BL32 |
Andre Przywara | 906776e | 2023-03-03 10:30:06 +0000 | [diff] [blame] | 140 | if (is_feat_amu_supported()) { |
| 141 | amu_enable(el2_unused); |
| 142 | } |
Manish V Badarkhe | f356f7e | 2021-06-29 11:44:20 +0100 | [diff] [blame] | 143 | |
Andre Przywara | 44e33e0 | 2022-11-17 16:42:09 +0000 | [diff] [blame] | 144 | if (is_feat_sys_reg_trace_supported()) { |
| 145 | sys_reg_trace_enable(); |
| 146 | } |
Manish V Badarkhe | 51a9711 | 2021-07-08 09:33:18 +0100 | [diff] [blame] | 147 | |
Andre Przywara | 06ea44e | 2022-11-17 17:30:43 +0000 | [diff] [blame] | 148 | if (is_feat_trf_supported()) { |
| 149 | trf_enable(); |
| 150 | } |
Boyan Karatotev | 05504ba | 2023-02-15 13:21:50 +0000 | [diff] [blame] | 151 | |
| 152 | /* |
| 153 | * Also applies to PMU < v3. The PMU is only disabled for EL3 and Secure |
| 154 | * state execution. This does not affect lower NS ELs. |
| 155 | */ |
| 156 | pmuv3_disable_el3(); |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 157 | #endif |
| 158 | } |
| 159 | |
| 160 | /******************************************************************************* |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 161 | * The following function initializes the cpu_context for a CPU specified by |
| 162 | * its `cpu_idx` for first use, and sets the initial entrypoint state as |
| 163 | * specified by the entry_point_info structure. |
| 164 | ******************************************************************************/ |
| 165 | void cm_init_context_by_index(unsigned int cpu_idx, |
| 166 | const entry_point_info_t *ep) |
| 167 | { |
| 168 | cpu_context_t *ctx; |
| 169 | ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); |
Antonio Nino Diaz | 28dce9e | 2018-05-22 10:09:10 +0100 | [diff] [blame] | 170 | cm_setup_context(ctx, ep); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | /******************************************************************************* |
| 174 | * The following function initializes the cpu_context for the current CPU |
| 175 | * for first use, and sets the initial entrypoint state as specified by the |
| 176 | * entry_point_info structure. |
| 177 | ******************************************************************************/ |
| 178 | void cm_init_my_context(const entry_point_info_t *ep) |
| 179 | { |
| 180 | cpu_context_t *ctx; |
| 181 | ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); |
Antonio Nino Diaz | 28dce9e | 2018-05-22 10:09:10 +0100 | [diff] [blame] | 182 | cm_setup_context(ctx, ep); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | /******************************************************************************* |
| 186 | * Prepare the CPU system registers for first entry into secure or normal world |
| 187 | * |
| 188 | * If execution is requested to hyp mode, HSCTLR is initialized |
| 189 | * If execution is requested to non-secure PL1, and the CPU supports |
| 190 | * HYP mode then HYP mode is disabled by configuring all necessary HYP mode |
| 191 | * registers. |
| 192 | ******************************************************************************/ |
| 193 | void cm_prepare_el3_exit(uint32_t security_state) |
| 194 | { |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 195 | uint32_t hsctlr, scr; |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 196 | cpu_context_t *ctx = cm_get_context(security_state); |
Antonio Nino Diaz | 033b4bb | 2018-10-25 16:52:26 +0100 | [diff] [blame] | 197 | bool el2_unused = false; |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 198 | |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 199 | assert(ctx != NULL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 200 | |
| 201 | if (security_state == NON_SECURE) { |
| 202 | scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 203 | if ((scr & SCR_HCE_BIT) != 0U) { |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 204 | /* Use SCTLR value to initialize HSCTLR */ |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 205 | hsctlr = read_ctx_reg(get_regs_ctx(ctx), |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 206 | CTX_NS_SCTLR); |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 207 | hsctlr |= HSCTLR_RES1; |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 208 | /* Temporarily set the NS bit to access HSCTLR */ |
| 209 | write_scr(read_scr() | SCR_NS_BIT); |
| 210 | /* |
| 211 | * Make sure the write to SCR is complete so that |
| 212 | * we can access HSCTLR |
| 213 | */ |
| 214 | isb(); |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 215 | write_hsctlr(hsctlr); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 216 | isb(); |
| 217 | |
| 218 | write_scr(read_scr() & ~SCR_NS_BIT); |
| 219 | isb(); |
Antonio Nino Diaz | 864ca6f | 2018-10-31 15:25:35 +0000 | [diff] [blame] | 220 | } else if ((read_id_pfr1() & |
| 221 | (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) != 0U) { |
Antonio Nino Diaz | 033b4bb | 2018-10-25 16:52:26 +0100 | [diff] [blame] | 222 | el2_unused = true; |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 223 | |
David Cunado | 5f55e28 | 2016-10-31 17:37:34 +0000 | [diff] [blame] | 224 | /* |
| 225 | * Set the NS bit to access NS copies of certain banked |
| 226 | * registers |
| 227 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 228 | write_scr(read_scr() | SCR_NS_BIT); |
| 229 | isb(); |
| 230 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 231 | /* |
| 232 | * Hyp / PL2 present but unused, need to disable safely. |
| 233 | * HSCTLR can be ignored in this case. |
| 234 | * |
| 235 | * Set HCR to its architectural reset value so that |
| 236 | * Non-secure operations do not trap to Hyp mode. |
| 237 | */ |
| 238 | write_hcr(HCR_RESET_VAL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 239 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 240 | /* |
| 241 | * Set HCPTR to its architectural reset value so that |
| 242 | * Non-secure access from EL1 or EL0 to trace and to |
| 243 | * Advanced SIMD and floating point functionality does |
| 244 | * not trap to Hyp mode. |
| 245 | */ |
| 246 | write_hcptr(HCPTR_RESET_VAL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 247 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 248 | /* |
| 249 | * Initialise CNTHCTL. All fields are architecturally |
| 250 | * UNKNOWN on reset and are set to zero except for |
| 251 | * field(s) listed below. |
| 252 | * |
| 253 | * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of |
| 254 | * Non-secure EL0 and EL1 accessed to the physical |
| 255 | * timer registers. |
| 256 | * |
| 257 | * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of |
| 258 | * Non-secure EL0 and EL1 accessed to the physical |
| 259 | * counter registers. |
| 260 | */ |
| 261 | write_cnthctl(CNTHCTL_RESET_VAL | |
| 262 | PL1PCEN_BIT | PL1PCTEN_BIT); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 263 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 264 | /* |
| 265 | * Initialise CNTVOFF to zero as it resets to an |
| 266 | * IMPLEMENTATION DEFINED value. |
| 267 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 268 | write64_cntvoff(0); |
| 269 | |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 270 | /* |
| 271 | * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR |
| 272 | * respectively. |
| 273 | */ |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 274 | write_vpidr(read_midr()); |
| 275 | write_vmpidr(read_mpidr()); |
| 276 | |
| 277 | /* |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 278 | * Initialise VTTBR, setting all fields rather than |
| 279 | * relying on the hw. Some fields are architecturally |
| 280 | * UNKNOWN at reset. |
| 281 | * |
| 282 | * VTTBR.VMID: Set to zero which is the architecturally |
| 283 | * defined reset value. Even though EL1&0 stage 2 |
| 284 | * address translation is disabled, cache maintenance |
| 285 | * operations depend on the VMID. |
| 286 | * |
| 287 | * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address |
| 288 | * translation is disabled. |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 289 | */ |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 290 | write64_vttbr(VTTBR_RESET_VAL & |
| 291 | ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) |
| 292 | | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); |
David Cunado | 5f55e28 | 2016-10-31 17:37:34 +0000 | [diff] [blame] | 293 | |
| 294 | /* |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 295 | * Initialise HDCR, setting all the fields rather than |
| 296 | * relying on hw. |
| 297 | * |
| 298 | * HDCR.HPMN: Set to value of PMCR.N which is the |
| 299 | * architecturally-defined reset value. |
Alexei Fedorov | 9074dea | 2019-08-20 15:22:44 +0100 | [diff] [blame] | 300 | * |
| 301 | * HDCR.HLP: Set to one so that event counter |
| 302 | * overflow, that is recorded in PMOVSCLR[0-30], |
| 303 | * occurs on the increment that changes |
| 304 | * PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU is |
| 305 | * implemented. This bit is RES0 in versions of the |
| 306 | * architecture earlier than ARMv8.5, setting it to 1 |
| 307 | * doesn't have any effect on them. |
| 308 | * This bit is Reserved, UNK/SBZP in ARMv7. |
| 309 | * |
| 310 | * HDCR.HPME: Set to zero to disable EL2 Event |
| 311 | * counters. |
David Cunado | 5f55e28 | 2016-10-31 17:37:34 +0000 | [diff] [blame] | 312 | */ |
Alexei Fedorov | 9074dea | 2019-08-20 15:22:44 +0100 | [diff] [blame] | 313 | #if (ARM_ARCH_MAJOR > 7) |
| 314 | write_hdcr((HDCR_RESET_VAL | HDCR_HLP_BIT | |
| 315 | ((read_pmcr() & PMCR_N_BITS) >> |
| 316 | PMCR_N_SHIFT)) & ~HDCR_HPME_BIT); |
| 317 | #else |
| 318 | write_hdcr((HDCR_RESET_VAL | |
| 319 | ((read_pmcr() & PMCR_N_BITS) >> |
| 320 | PMCR_N_SHIFT)) & ~HDCR_HPME_BIT); |
| 321 | #endif |
David Cunado | c14b08e | 2016-11-25 00:21:59 +0000 | [diff] [blame] | 322 | /* |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 323 | * Set HSTR to its architectural reset value so that |
| 324 | * access to system registers in the cproc=1111 |
| 325 | * encoding space do not trap to Hyp mode. |
| 326 | */ |
| 327 | write_hstr(HSTR_RESET_VAL); |
| 328 | /* |
| 329 | * Set CNTHP_CTL to its architectural reset value to |
| 330 | * disable the EL2 physical timer and prevent timer |
| 331 | * interrupts. Some fields are architecturally UNKNOWN |
| 332 | * on reset and are set to zero. |
David Cunado | c14b08e | 2016-11-25 00:21:59 +0000 | [diff] [blame] | 333 | */ |
David Cunado | fee8653 | 2017-04-13 22:38:29 +0100 | [diff] [blame] | 334 | write_cnthp_ctl(CNTHP_CTL_RESET_VAL); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 335 | isb(); |
| 336 | |
| 337 | write_scr(read_scr() & ~SCR_NS_BIT); |
| 338 | isb(); |
| 339 | } |
Dimitris Papastamos | 1e6f93e | 2017-11-07 09:55:29 +0000 | [diff] [blame] | 340 | enable_extensions_nonsecure(el2_unused); |
Soby Mathew | 748be1d | 2016-05-05 14:10:46 +0100 | [diff] [blame] | 341 | } |
| 342 | } |
Zelalem Aweke | f92c0cb | 2022-01-31 16:59:42 -0600 | [diff] [blame] | 343 | |
| 344 | /******************************************************************************* |
| 345 | * This function is used to exit to Non-secure world. It simply calls the |
| 346 | * cm_prepare_el3_exit function for AArch32. |
| 347 | ******************************************************************************/ |
| 348 | void cm_prepare_el3_exit_ns(void) |
| 349 | { |
| 350 | cm_prepare_el3_exit(NON_SECURE); |
| 351 | } |