blob: 020f3a36a8d071b51e43825720ba406ca0cb8e8c [file] [log] [blame]
Soby Mathew748be1d2016-05-05 14:10:46 +01001/*
Douglas Raillarda8954fc2017-01-26 15:54:44 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathew748be1d2016-05-05 14:10:46 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew748be1d2016-05-05 14:10:46 +01005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <bl_common.h>
11#include <context.h>
12#include <context_mgmt.h>
13#include <platform.h>
14#include <platform_def.h>
15#include <smcc_helpers.h>
16#include <string.h>
Douglas Raillarda8954fc2017-01-26 15:54:44 +000017#include <utils.h>
Soby Mathew748be1d2016-05-05 14:10:46 +010018
19/*******************************************************************************
20 * Context management library initialisation routine. This library is used by
21 * runtime services to share pointers to 'cpu_context' structures for the secure
22 * and non-secure states. Management of the structures and their associated
23 * memory is not done by the context management library e.g. the PSCI service
24 * manages the cpu context used for entry from and exit to the non-secure state.
25 * The Secure payload manages the context(s) corresponding to the secure state.
26 * It also uses this library to get access to the non-secure
27 * state cpu context pointers.
28 ******************************************************************************/
29void cm_init(void)
30{
31 /*
32 * The context management library has only global data to initialize, but
33 * that will be done when the BSS is zeroed out
34 */
35}
36
37/*******************************************************************************
38 * The following function initializes the cpu_context 'ctx' for
39 * first use, and sets the initial entrypoint state as specified by the
40 * entry_point_info structure.
41 *
42 * The security state to initialize is determined by the SECURE attribute
43 * of the entry_point_info. The function returns a pointer to the initialized
44 * context and sets this as the next context to return to.
45 *
46 * The EE and ST attributes are used to configure the endianness and secure
47 * timer availability for the new execution context.
48 *
49 * To prepare the register state for entry call cm_prepare_el3_exit() and
50 * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
51 * cm_e1_sysreg_context_restore().
52 ******************************************************************************/
53static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
54{
55 unsigned int security_state;
56 uint32_t scr, sctlr;
57 regs_t *reg_ctx;
58
59 assert(ctx);
60
61 security_state = GET_SECURITY_STATE(ep->h.attr);
62
63 /* Clear any residual register values from the context */
Douglas Raillarda8954fc2017-01-26 15:54:44 +000064 zeromem(ctx, sizeof(*ctx));
Soby Mathew748be1d2016-05-05 14:10:46 +010065
Soby Mathewb4a970a2016-08-31 12:34:33 +010066 reg_ctx = get_regs_ctx(ctx);
67
Soby Mathew748be1d2016-05-05 14:10:46 +010068 /*
69 * Base the context SCR on the current value, adjust for entry point
70 * specific requirements
71 */
72 scr = read_scr();
73 scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
74
75 if (security_state != SECURE)
76 scr |= SCR_NS_BIT;
77
78 /*
79 * Set up SCTLR for the Non Secure context.
80 * EE bit is taken from the entrypoint attributes
81 * M, C and I bits must be zero (as required by PSCI specification)
82 *
83 * The target exception level is based on the spsr mode requested.
84 * If execution is requested to hyp mode, HVC is enabled
85 * via SCR.HCE.
86 *
87 * Always compute the SCTLR_EL1 value and save in the cpu_context
88 * - the HYP registers are set up by cm_preapre_ns_entry() as they
89 * are not part of the stored cpu_context
90 *
91 * TODO: In debug builds the spsr should be validated and checked
92 * against the CPU support, security state, endianness and pc
93 */
94 if (security_state != SECURE) {
95 sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
Soby Mathewa993c422016-09-29 14:15:57 +010096 /*
97 * In addition to SCTLR_RES1, set the CP15_BEN, nTWI & nTWE
98 * bits that architecturally reset to 1.
99 */
100 sctlr |= SCTLR_RES1 | SCTLR_CP15BEN_BIT |
101 SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
Soby Mathew748be1d2016-05-05 14:10:46 +0100102 write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
103 }
104
105 if (GET_M32(ep->spsr) == MODE32_hyp)
106 scr |= SCR_HCE_BIT;
107
Soby Mathew748be1d2016-05-05 14:10:46 +0100108 write_ctx_reg(reg_ctx, CTX_SCR, scr);
109 write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
110 write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
111
112 /*
113 * Store the r0-r3 value from the entrypoint into the context
114 * Use memcpy as we are in control of the layout of the structures
115 */
116 memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
117}
118
119/*******************************************************************************
120 * The following function initializes the cpu_context for a CPU specified by
121 * its `cpu_idx` for first use, and sets the initial entrypoint state as
122 * specified by the entry_point_info structure.
123 ******************************************************************************/
124void cm_init_context_by_index(unsigned int cpu_idx,
125 const entry_point_info_t *ep)
126{
127 cpu_context_t *ctx;
128 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
129 cm_init_context_common(ctx, ep);
130}
131
132/*******************************************************************************
133 * The following function initializes the cpu_context for the current CPU
134 * for first use, and sets the initial entrypoint state as specified by the
135 * entry_point_info structure.
136 ******************************************************************************/
137void cm_init_my_context(const entry_point_info_t *ep)
138{
139 cpu_context_t *ctx;
140 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
141 cm_init_context_common(ctx, ep);
142}
143
144/*******************************************************************************
145 * Prepare the CPU system registers for first entry into secure or normal world
146 *
147 * If execution is requested to hyp mode, HSCTLR is initialized
148 * If execution is requested to non-secure PL1, and the CPU supports
149 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
150 * registers.
151 ******************************************************************************/
152void cm_prepare_el3_exit(uint32_t security_state)
153{
154 uint32_t sctlr, scr, hcptr;
155 cpu_context_t *ctx = cm_get_context(security_state);
156
157 assert(ctx);
158
159 if (security_state == NON_SECURE) {
160 scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
161 if (scr & SCR_HCE_BIT) {
162 /* Use SCTLR value to initialize HSCTLR */
163 sctlr = read_ctx_reg(get_regs_ctx(ctx),
164 CTX_NS_SCTLR);
165 sctlr |= HSCTLR_RES1;
166 /* Temporarily set the NS bit to access HSCTLR */
167 write_scr(read_scr() | SCR_NS_BIT);
168 /*
169 * Make sure the write to SCR is complete so that
170 * we can access HSCTLR
171 */
172 isb();
173 write_hsctlr(sctlr);
174 isb();
175
176 write_scr(read_scr() & ~SCR_NS_BIT);
177 isb();
178 } else if (read_id_pfr1() &
179 (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
David Cunado5f55e282016-10-31 17:37:34 +0000180 /*
181 * Set the NS bit to access NS copies of certain banked
182 * registers
183 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100184 write_scr(read_scr() | SCR_NS_BIT);
185 isb();
186
187 /* PL2 present but unused, need to disable safely */
188 write_hcr(0);
189
190 /* HSCTLR : can be ignored when bypassing */
191
192 /* HCPTR : disable all traps TCPAC, TTA, TCP */
193 hcptr = read_hcptr();
194 hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
195 write_hcptr(hcptr);
196
197 /* Enable EL1 access to timer */
198 write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);
199
200 /* Reset CNTVOFF_EL2 */
201 write64_cntvoff(0);
202
203 /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
204 write_vpidr(read_midr());
205 write_vmpidr(read_mpidr());
206
207 /*
208 * Reset VTTBR.
209 * Needed because cache maintenance operations depend on
210 * the VMID even when non-secure EL1&0 stage 2 address
211 * translation are disabled.
212 */
213 write64_vttbr(0);
David Cunado5f55e282016-10-31 17:37:34 +0000214
215 /*
216 * Avoid unexpected debug traps in case where HDCR
217 * is not completely reset by the hardware - set
218 * HDCR.HPMN to PMCR.N and zero the remaining bits.
219 * The HDCR.HPMN and PMCR.N fields are the same size
220 * (5 bits) and HPMN is at offset zero within HDCR.
221 */
222 write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT);
David Cunadoc14b08e2016-11-25 00:21:59 +0000223
224 /*
225 * Reset CNTHP_CTL to disable the EL2 physical timer and
226 * therefore prevent timer interrupts.
227 */
228 write_cnthp_ctl(0);
Soby Mathew748be1d2016-05-05 14:10:46 +0100229 isb();
230
231 write_scr(read_scr() & ~SCR_NS_BIT);
232 isb();
233 }
234 }
235}