blob: 8835fa1352b0272188bcd060eee19ed3e795371f [file] [log] [blame]
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +00001/*
Antonio Nino Diaz3c817f42018-03-21 10:49:27 +00002 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +00003 *
David Cunadodedfde52017-05-11 17:30:06 +01004 * SPDX-License-Identifier: BSD-3-Clause
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +00005 */
6
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00007#include <stdbool.h>
8#include <string.h>
9
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +000010#include <arch_helpers.h>
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +000011#include <context.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012#include <lib/el3_runtime/context_mgmt.h>
13#include <lib/psci/psci.h>
14#include <lib/utils.h>
Antonio Nino Diazbd7b7402019-01-25 14:30:04 +000015#include <plat/arm/common/arm_sip_svc.h>
16#include <plat/arm/common/plat_arm.h>
Antonio Nino Diaz3c817f42018-03-21 10:49:27 +000017#include <smccc_helpers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000018
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +000019/*
20 * Handle SMC from a lower exception level to switch its execution state
21 * (either from AArch64 to AArch32, or vice versa).
22 *
23 * smc_fid:
24 * SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or
25 * ARM_SIP_SVC_STATE_SWITCH_32.
26 * pc_hi, pc_lo:
27 * PC upon re-entry to the calling exception level; width dependent on the
28 * calling exception level.
29 * cookie_hi, cookie_lo:
30 * Opaque pointer pairs received from the caller to pass it back, upon
31 * re-entry.
32 * handle:
33 * Handle to saved context.
34 */
35int arm_execution_state_switch(unsigned int smc_fid,
36 uint32_t pc_hi,
37 uint32_t pc_lo,
38 uint32_t cookie_hi,
39 uint32_t cookie_lo,
40 void *handle)
41{
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +010042 bool caller_64, thumb = false, from_el2;
43 unsigned int el, endianness;
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +000044 u_register_t spsr, pc, scr, sctlr;
45 entry_point_info_t ep;
46 cpu_context_t *ctx = (cpu_context_t *) handle;
47 el3_state_t *el3_ctx = get_el3state_ctx(ctx);
48
Bence Szépkúti16362c62019-10-24 15:53:23 +020049 /* Validate supplied entry point */
50 pc = (u_register_t) (((uint64_t) pc_hi << 32) | pc_lo);
51 if (arm_validate_ns_entrypoint(pc) != 0)
52 goto invalid_param;
53
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +000054 /* That the SMC originated from NS is already validated by the caller */
55
56 /*
57 * Disallow state switch if any of the secondaries have been brought up.
58 */
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +010059 if (psci_secondaries_brought_up() != 0)
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +000060 goto exec_denied;
61
62 spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
63 caller_64 = (GET_RW(spsr) == MODE_RW_64);
64
65 if (caller_64) {
66 /*
67 * If the call originated from AArch64, expect 32-bit pointers when
68 * switching to AArch32.
69 */
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +010070 if ((pc_hi != 0U) || (cookie_hi != 0U))
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +000071 goto invalid_param;
72
73 pc = pc_lo;
74
75 /* Instruction state when entering AArch32 */
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +010076 thumb = (pc & 1U) != 0U;
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +000077 } else {
78 /* Construct AArch64 PC */
79 pc = (((u_register_t) pc_hi) << 32) | pc_lo;
80 }
81
82 /* Make sure PC is 4-byte aligned, except for Thumb */
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +010083 if (((pc & 0x3U) != 0U) && !thumb)
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +000084 goto invalid_param;
85
86 /*
87 * EL3 controls register width of the immediate lower EL only. Expect
88 * this request from EL2/Hyp unless:
89 *
90 * - EL2 is not implemented;
91 * - EL2 is implemented, but was disabled. This can be inferred from
92 * SCR_EL3.HCE.
93 */
94 from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) :
95 (GET_M32(spsr) == MODE32_hyp);
96 scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3);
97 if (!from_el2) {
98 /* The call is from NS privilege level other than HYP */
99
100 /*
101 * Disallow switching state if there's a Hypervisor in place;
102 * this request must be taken up with the Hypervisor instead.
103 */
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +0100104 if ((scr & SCR_HCE_BIT) != 0U)
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +0000105 goto exec_denied;
106 }
107
108 /*
109 * Return to the caller using the same endianness. Extract
110 * endianness bit from the respective system control register
111 * directly.
112 */
113 sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +0100114 endianness = ((sctlr & SCTLR_EE_BIT) != 0U) ? 1U : 0U;
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +0000115
116 /* Construct SPSR for the exception state we're about to switch to */
117 if (caller_64) {
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +0100118 unsigned long long impl;
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +0000119
120 /*
121 * Switching from AArch64 to AArch32. Ensure this CPU implements
122 * the target EL in AArch32.
123 */
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000124 impl = from_el2 ? el_implemented(2) : el_implemented(1);
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +0000125 if (impl != EL_IMPL_A64_A32)
126 goto exec_denied;
127
128 /* Return to the equivalent AArch32 privilege level */
129 el = from_el2 ? MODE32_hyp : MODE32_svc;
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +0100130 spsr = SPSR_MODE32((u_register_t) el,
131 thumb ? SPSR_T_THUMB : SPSR_T_ARM,
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +0000132 endianness, DISABLE_ALL_EXCEPTIONS);
133 } else {
134 /*
135 * Switching from AArch32 to AArch64. Since it's not possible to
136 * implement an EL as AArch32-only (from which this call was
137 * raised), it's safe to assume AArch64 is also implemented.
138 */
139 el = from_el2 ? MODE_EL2 : MODE_EL1;
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +0100140 spsr = SPSR_64((u_register_t) el, MODE_SP_ELX,
141 DISABLE_ALL_EXCEPTIONS);
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +0000142 }
143
144 /*
145 * Use the context management library to re-initialize the existing
146 * context with the execution state flipped. Since the library takes
147 * entry_point_info_t pointer as the argument, construct a dummy one
148 * with PC, state width, endianness, security etc. appropriately set.
149 * Other entries in the entry point structure are irrelevant for
150 * purpose.
151 */
152 zeromem(&ep, sizeof(ep));
153 ep.pc = pc;
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +0100154 ep.spsr = (uint32_t) spsr;
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +0000155 SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
Jeenu Viswambharan210f0a82018-08-02 10:14:12 +0100156 ((unsigned int) ((endianness != 0U) ? EP_EE_BIG :
157 EP_EE_LITTLE)
158 | NON_SECURE | EP_ST_DISABLE));
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +0000159
160 /*
161 * Re-initialize the system register context, and exit EL3 as if for the
162 * first time. State switch is effectively a soft reset of the
163 * calling EL.
164 */
165 cm_init_my_context(&ep);
166 cm_prepare_el3_exit(NON_SECURE);
167
168 /*
169 * State switch success. The caller of SMC wouldn't see the SMC
170 * returning. Instead, execution starts at the supplied entry point,
171 * with context pointers populated in registers 0 and 1.
172 */
173 SMC_RET2(handle, cookie_hi, cookie_lo);
174
175invalid_param:
176 SMC_RET1(handle, STATE_SW_E_PARAM);
177
178exec_denied:
Jeenu Viswambharanbc1a9292017-02-16 14:55:15 +0000179 /* State switch denied */
180 SMC_RET1(handle, STATE_SW_E_DENIED);
181}