blob: a4702fcc60f3d2eb7040f80c1089131c569667af [file] [log] [blame]
Soby Mathew748be1d2016-05-05 14:10:46 +01001/*
Antonio Nino Diaz3c817f42018-03-21 10:49:27 +00002 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathew748be1d2016-05-05 14:10:46 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew748be1d2016-05-05 14:10:46 +01005 */
6
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00007#include <assert.h>
8#include <stdbool.h>
9#include <string.h>
10
11#include <platform_def.h>
12
Soby Mathew748be1d2016-05-05 14:10:46 +010013#include <arch.h>
14#include <arch_helpers.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <common/bl_common.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010016#include <context.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000017#include <lib/el3_runtime/context_mgmt.h>
18#include <lib/extensions/amu.h>
19#include <lib/utils.h>
20#include <plat/common/platform.h>
Antonio Nino Diaz3c817f42018-03-21 10:49:27 +000021#include <smccc_helpers.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010022
23/*******************************************************************************
24 * Context management library initialisation routine. This library is used by
25 * runtime services to share pointers to 'cpu_context' structures for the secure
26 * and non-secure states. Management of the structures and their associated
27 * memory is not done by the context management library e.g. the PSCI service
28 * manages the cpu context used for entry from and exit to the non-secure state.
29 * The Secure payload manages the context(s) corresponding to the secure state.
30 * It also uses this library to get access to the non-secure
31 * state cpu context pointers.
32 ******************************************************************************/
33void cm_init(void)
34{
35 /*
36 * The context management library has only global data to initialize, but
37 * that will be done when the BSS is zeroed out
38 */
39}
40
41/*******************************************************************************
42 * The following function initializes the cpu_context 'ctx' for
43 * first use, and sets the initial entrypoint state as specified by the
44 * entry_point_info structure.
45 *
46 * The security state to initialize is determined by the SECURE attribute
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +010047 * of the entry_point_info.
Soby Mathew748be1d2016-05-05 14:10:46 +010048 *
49 * The EE and ST attributes are used to configure the endianness and secure
50 * timer availability for the new execution context.
51 *
52 * To prepare the register state for entry call cm_prepare_el3_exit() and
53 * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
54 * cm_e1_sysreg_context_restore().
55 ******************************************************************************/
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +010056void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
Soby Mathew748be1d2016-05-05 14:10:46 +010057{
58 unsigned int security_state;
59 uint32_t scr, sctlr;
60 regs_t *reg_ctx;
61
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +000062 assert(ctx != NULL);
Soby Mathew748be1d2016-05-05 14:10:46 +010063
64 security_state = GET_SECURITY_STATE(ep->h.attr);
65
66 /* Clear any residual register values from the context */
Douglas Raillarda8954fc2017-01-26 15:54:44 +000067 zeromem(ctx, sizeof(*ctx));
Soby Mathew748be1d2016-05-05 14:10:46 +010068
Soby Mathewb4a970a2016-08-31 12:34:33 +010069 reg_ctx = get_regs_ctx(ctx);
70
Soby Mathew748be1d2016-05-05 14:10:46 +010071 /*
72 * Base the context SCR on the current value, adjust for entry point
73 * specific requirements
74 */
75 scr = read_scr();
76 scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
77
78 if (security_state != SECURE)
79 scr |= SCR_NS_BIT;
80
Soby Mathew748be1d2016-05-05 14:10:46 +010081 if (security_state != SECURE) {
Soby Mathewa993c422016-09-29 14:15:57 +010082 /*
David Cunadofee86532017-04-13 22:38:29 +010083 * Set up SCTLR for the Non-secure context.
84 *
85 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
86 *
87 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
88 * required by PSCI specification)
89 *
90 * Set remaining SCTLR fields to their architecturally defined
91 * values. Some fields reset to an IMPLEMENTATION DEFINED value:
92 *
93 * SCTLR.TE: Set to zero so that exceptions to an Exception
94 * Level executing at PL1 are taken to A32 state.
95 *
96 * SCTLR.V: Set to zero to select the normal exception vectors
97 * with base address held in VBAR.
Soby Mathewa993c422016-09-29 14:15:57 +010098 */
David Cunadofee86532017-04-13 22:38:29 +010099 assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) ==
100 (EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT));
101
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000102 sctlr = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U;
David Cunadofee86532017-04-13 22:38:29 +0100103 sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT));
Soby Mathew748be1d2016-05-05 14:10:46 +0100104 write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
105 }
106
David Cunadofee86532017-04-13 22:38:29 +0100107 /*
108 * The target exception level is based on the spsr mode requested. If
109 * execution is requested to hyp mode, HVC is enabled via SCR.HCE.
110 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100111 if (GET_M32(ep->spsr) == MODE32_hyp)
112 scr |= SCR_HCE_BIT;
113
David Cunadofee86532017-04-13 22:38:29 +0100114 /*
115 * Store the initialised values for SCTLR and SCR in the cpu_context.
116 * The Hyp mode registers are not part of the saved context and are
117 * set-up in cm_prepare_el3_exit().
118 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100119 write_ctx_reg(reg_ctx, CTX_SCR, scr);
120 write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
121 write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
122
123 /*
124 * Store the r0-r3 value from the entrypoint into the context
125 * Use memcpy as we are in control of the layout of the structures
126 */
127 memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
128}
129
130/*******************************************************************************
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000131 * Enable architecture extensions on first entry to Non-secure world.
132 * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
133 * it is zero.
134 ******************************************************************************/
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100135static void enable_extensions_nonsecure(bool el2_unused)
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000136{
137#if IMAGE_BL32
Dimitris Papastamosdda48b02017-10-17 14:03:14 +0100138#if ENABLE_AMU
139 amu_enable(el2_unused);
140#endif
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000141#endif
142}
143
144/*******************************************************************************
Soby Mathew748be1d2016-05-05 14:10:46 +0100145 * The following function initializes the cpu_context for a CPU specified by
146 * its `cpu_idx` for first use, and sets the initial entrypoint state as
147 * specified by the entry_point_info structure.
148 ******************************************************************************/
149void cm_init_context_by_index(unsigned int cpu_idx,
150 const entry_point_info_t *ep)
151{
152 cpu_context_t *ctx;
153 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +0100154 cm_setup_context(ctx, ep);
Soby Mathew748be1d2016-05-05 14:10:46 +0100155}
156
157/*******************************************************************************
158 * The following function initializes the cpu_context for the current CPU
159 * for first use, and sets the initial entrypoint state as specified by the
160 * entry_point_info structure.
161 ******************************************************************************/
162void cm_init_my_context(const entry_point_info_t *ep)
163{
164 cpu_context_t *ctx;
165 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
Antonio Nino Diaz28dce9e2018-05-22 10:09:10 +0100166 cm_setup_context(ctx, ep);
Soby Mathew748be1d2016-05-05 14:10:46 +0100167}
168
169/*******************************************************************************
170 * Prepare the CPU system registers for first entry into secure or normal world
171 *
172 * If execution is requested to hyp mode, HSCTLR is initialized
173 * If execution is requested to non-secure PL1, and the CPU supports
174 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
175 * registers.
176 ******************************************************************************/
177void cm_prepare_el3_exit(uint32_t security_state)
178{
David Cunadofee86532017-04-13 22:38:29 +0100179 uint32_t hsctlr, scr;
Soby Mathew748be1d2016-05-05 14:10:46 +0100180 cpu_context_t *ctx = cm_get_context(security_state);
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100181 bool el2_unused = false;
Soby Mathew748be1d2016-05-05 14:10:46 +0100182
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000183 assert(ctx != NULL);
Soby Mathew748be1d2016-05-05 14:10:46 +0100184
185 if (security_state == NON_SECURE) {
186 scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000187 if ((scr & SCR_HCE_BIT) != 0U) {
Soby Mathew748be1d2016-05-05 14:10:46 +0100188 /* Use SCTLR value to initialize HSCTLR */
David Cunadofee86532017-04-13 22:38:29 +0100189 hsctlr = read_ctx_reg(get_regs_ctx(ctx),
Soby Mathew748be1d2016-05-05 14:10:46 +0100190 CTX_NS_SCTLR);
David Cunadofee86532017-04-13 22:38:29 +0100191 hsctlr |= HSCTLR_RES1;
Soby Mathew748be1d2016-05-05 14:10:46 +0100192 /* Temporarily set the NS bit to access HSCTLR */
193 write_scr(read_scr() | SCR_NS_BIT);
194 /*
195 * Make sure the write to SCR is complete so that
196 * we can access HSCTLR
197 */
198 isb();
David Cunadofee86532017-04-13 22:38:29 +0100199 write_hsctlr(hsctlr);
Soby Mathew748be1d2016-05-05 14:10:46 +0100200 isb();
201
202 write_scr(read_scr() & ~SCR_NS_BIT);
203 isb();
Antonio Nino Diaz864ca6f2018-10-31 15:25:35 +0000204 } else if ((read_id_pfr1() &
205 (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) != 0U) {
Antonio Nino Diaz033b4bb2018-10-25 16:52:26 +0100206 el2_unused = true;
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000207
David Cunado5f55e282016-10-31 17:37:34 +0000208 /*
209 * Set the NS bit to access NS copies of certain banked
210 * registers
211 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100212 write_scr(read_scr() | SCR_NS_BIT);
213 isb();
214
David Cunadofee86532017-04-13 22:38:29 +0100215 /*
216 * Hyp / PL2 present but unused, need to disable safely.
217 * HSCTLR can be ignored in this case.
218 *
219 * Set HCR to its architectural reset value so that
220 * Non-secure operations do not trap to Hyp mode.
221 */
222 write_hcr(HCR_RESET_VAL);
Soby Mathew748be1d2016-05-05 14:10:46 +0100223
David Cunadofee86532017-04-13 22:38:29 +0100224 /*
225 * Set HCPTR to its architectural reset value so that
226 * Non-secure access from EL1 or EL0 to trace and to
227 * Advanced SIMD and floating point functionality does
228 * not trap to Hyp mode.
229 */
230 write_hcptr(HCPTR_RESET_VAL);
Soby Mathew748be1d2016-05-05 14:10:46 +0100231
David Cunadofee86532017-04-13 22:38:29 +0100232 /*
233 * Initialise CNTHCTL. All fields are architecturally
234 * UNKNOWN on reset and are set to zero except for
235 * field(s) listed below.
236 *
237 * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of
238 * Non-secure EL0 and EL1 accessed to the physical
239 * timer registers.
240 *
241 * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of
242 * Non-secure EL0 and EL1 accessed to the physical
243 * counter registers.
244 */
245 write_cnthctl(CNTHCTL_RESET_VAL |
246 PL1PCEN_BIT | PL1PCTEN_BIT);
Soby Mathew748be1d2016-05-05 14:10:46 +0100247
David Cunadofee86532017-04-13 22:38:29 +0100248 /*
249 * Initialise CNTVOFF to zero as it resets to an
250 * IMPLEMENTATION DEFINED value.
251 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100252 write64_cntvoff(0);
253
David Cunadofee86532017-04-13 22:38:29 +0100254 /*
255 * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR
256 * respectively.
257 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100258 write_vpidr(read_midr());
259 write_vmpidr(read_mpidr());
260
261 /*
David Cunadofee86532017-04-13 22:38:29 +0100262 * Initialise VTTBR, setting all fields rather than
263 * relying on the hw. Some fields are architecturally
264 * UNKNOWN at reset.
265 *
266 * VTTBR.VMID: Set to zero which is the architecturally
267 * defined reset value. Even though EL1&0 stage 2
268 * address translation is disabled, cache maintenance
269 * operations depend on the VMID.
270 *
271 * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address
272 * translation is disabled.
Soby Mathew748be1d2016-05-05 14:10:46 +0100273 */
David Cunadofee86532017-04-13 22:38:29 +0100274 write64_vttbr(VTTBR_RESET_VAL &
275 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
276 | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
David Cunado5f55e282016-10-31 17:37:34 +0000277
278 /*
David Cunadofee86532017-04-13 22:38:29 +0100279 * Initialise HDCR, setting all the fields rather than
280 * relying on hw.
281 *
282 * HDCR.HPMN: Set to value of PMCR.N which is the
283 * architecturally-defined reset value.
David Cunado5f55e282016-10-31 17:37:34 +0000284 */
David Cunadofee86532017-04-13 22:38:29 +0100285 write_hdcr(HDCR_RESET_VAL |
286 ((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT));
David Cunadoc14b08e2016-11-25 00:21:59 +0000287
288 /*
David Cunadofee86532017-04-13 22:38:29 +0100289 * Set HSTR to its architectural reset value so that
290 * access to system registers in the cproc=1111
291 * encoding space do not trap to Hyp mode.
292 */
293 write_hstr(HSTR_RESET_VAL);
294 /*
295 * Set CNTHP_CTL to its architectural reset value to
296 * disable the EL2 physical timer and prevent timer
297 * interrupts. Some fields are architecturally UNKNOWN
298 * on reset and are set to zero.
David Cunadoc14b08e2016-11-25 00:21:59 +0000299 */
David Cunadofee86532017-04-13 22:38:29 +0100300 write_cnthp_ctl(CNTHP_CTL_RESET_VAL);
Soby Mathew748be1d2016-05-05 14:10:46 +0100301 isb();
302
303 write_scr(read_scr() & ~SCR_NS_BIT);
304 isb();
305 }
Dimitris Papastamos1e6f93e2017-11-07 09:55:29 +0000306 enable_extensions_nonsecure(el2_unused);
Soby Mathew748be1d2016-05-05 14:10:46 +0100307 }
308}