blob: 51b77595ac0e3ebc27b9a9ee4c830b5938ea077c [file] [log] [blame]
Soby Mathew748be1d2016-05-05 14:10:46 +01001/*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <bl_common.h>
35#include <context.h>
36#include <context_mgmt.h>
37#include <platform.h>
38#include <platform_def.h>
39#include <smcc_helpers.h>
40#include <string.h>
41
42/*******************************************************************************
43 * Context management library initialisation routine. This library is used by
44 * runtime services to share pointers to 'cpu_context' structures for the secure
45 * and non-secure states. Management of the structures and their associated
46 * memory is not done by the context management library e.g. the PSCI service
47 * manages the cpu context used for entry from and exit to the non-secure state.
48 * The Secure payload manages the context(s) corresponding to the secure state.
49 * It also uses this library to get access to the non-secure
50 * state cpu context pointers.
51 ******************************************************************************/
52void cm_init(void)
53{
54 /*
55 * The context management library has only global data to initialize, but
56 * that will be done when the BSS is zeroed out
57 */
58}
59
60/*******************************************************************************
61 * The following function initializes the cpu_context 'ctx' for
62 * first use, and sets the initial entrypoint state as specified by the
63 * entry_point_info structure.
64 *
65 * The security state to initialize is determined by the SECURE attribute
66 * of the entry_point_info. The function returns a pointer to the initialized
67 * context and sets this as the next context to return to.
68 *
69 * The EE and ST attributes are used to configure the endianness and secure
70 * timer availability for the new execution context.
71 *
72 * To prepare the register state for entry call cm_prepare_el3_exit() and
73 * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
74 * cm_e1_sysreg_context_restore().
75 ******************************************************************************/
76static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
77{
78 unsigned int security_state;
79 uint32_t scr, sctlr;
80 regs_t *reg_ctx;
81
82 assert(ctx);
83
84 security_state = GET_SECURITY_STATE(ep->h.attr);
85
86 /* Clear any residual register values from the context */
87 memset(ctx, 0, sizeof(*ctx));
88
Soby Mathewb4a970a2016-08-31 12:34:33 +010089 reg_ctx = get_regs_ctx(ctx);
90
Soby Mathew748be1d2016-05-05 14:10:46 +010091 /*
92 * Base the context SCR on the current value, adjust for entry point
93 * specific requirements
94 */
95 scr = read_scr();
96 scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
97
98 if (security_state != SECURE)
99 scr |= SCR_NS_BIT;
100
101 /*
102 * Set up SCTLR for the Non Secure context.
103 * EE bit is taken from the entrypoint attributes
104 * M, C and I bits must be zero (as required by PSCI specification)
105 *
106 * The target exception level is based on the spsr mode requested.
107 * If execution is requested to hyp mode, HVC is enabled
108 * via SCR.HCE.
109 *
110 * Always compute the SCTLR_EL1 value and save in the cpu_context
111 * - the HYP registers are set up by cm_preapre_ns_entry() as they
112 * are not part of the stored cpu_context
113 *
114 * TODO: In debug builds the spsr should be validated and checked
115 * against the CPU support, security state, endianness and pc
116 */
117 if (security_state != SECURE) {
118 sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
Soby Mathewa993c422016-09-29 14:15:57 +0100119 /*
120 * In addition to SCTLR_RES1, set the CP15_BEN, nTWI & nTWE
121 * bits that architecturally reset to 1.
122 */
123 sctlr |= SCTLR_RES1 | SCTLR_CP15BEN_BIT |
124 SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
Soby Mathew748be1d2016-05-05 14:10:46 +0100125 write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
126 }
127
128 if (GET_M32(ep->spsr) == MODE32_hyp)
129 scr |= SCR_HCE_BIT;
130
Soby Mathew748be1d2016-05-05 14:10:46 +0100131 write_ctx_reg(reg_ctx, CTX_SCR, scr);
132 write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
133 write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
134
135 /*
136 * Store the r0-r3 value from the entrypoint into the context
137 * Use memcpy as we are in control of the layout of the structures
138 */
139 memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
140}
141
142/*******************************************************************************
143 * The following function initializes the cpu_context for a CPU specified by
144 * its `cpu_idx` for first use, and sets the initial entrypoint state as
145 * specified by the entry_point_info structure.
146 ******************************************************************************/
147void cm_init_context_by_index(unsigned int cpu_idx,
148 const entry_point_info_t *ep)
149{
150 cpu_context_t *ctx;
151 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
152 cm_init_context_common(ctx, ep);
153}
154
155/*******************************************************************************
156 * The following function initializes the cpu_context for the current CPU
157 * for first use, and sets the initial entrypoint state as specified by the
158 * entry_point_info structure.
159 ******************************************************************************/
160void cm_init_my_context(const entry_point_info_t *ep)
161{
162 cpu_context_t *ctx;
163 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
164 cm_init_context_common(ctx, ep);
165}
166
167/*******************************************************************************
168 * Prepare the CPU system registers for first entry into secure or normal world
169 *
170 * If execution is requested to hyp mode, HSCTLR is initialized
171 * If execution is requested to non-secure PL1, and the CPU supports
172 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
173 * registers.
174 ******************************************************************************/
175void cm_prepare_el3_exit(uint32_t security_state)
176{
177 uint32_t sctlr, scr, hcptr;
178 cpu_context_t *ctx = cm_get_context(security_state);
179
180 assert(ctx);
181
182 if (security_state == NON_SECURE) {
183 scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
184 if (scr & SCR_HCE_BIT) {
185 /* Use SCTLR value to initialize HSCTLR */
186 sctlr = read_ctx_reg(get_regs_ctx(ctx),
187 CTX_NS_SCTLR);
188 sctlr |= HSCTLR_RES1;
189 /* Temporarily set the NS bit to access HSCTLR */
190 write_scr(read_scr() | SCR_NS_BIT);
191 /*
192 * Make sure the write to SCR is complete so that
193 * we can access HSCTLR
194 */
195 isb();
196 write_hsctlr(sctlr);
197 isb();
198
199 write_scr(read_scr() & ~SCR_NS_BIT);
200 isb();
201 } else if (read_id_pfr1() &
202 (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
David Cunado5f55e282016-10-31 17:37:34 +0000203 /*
204 * Set the NS bit to access NS copies of certain banked
205 * registers
206 */
Soby Mathew748be1d2016-05-05 14:10:46 +0100207 write_scr(read_scr() | SCR_NS_BIT);
208 isb();
209
210 /* PL2 present but unused, need to disable safely */
211 write_hcr(0);
212
213 /* HSCTLR : can be ignored when bypassing */
214
215 /* HCPTR : disable all traps TCPAC, TTA, TCP */
216 hcptr = read_hcptr();
217 hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
218 write_hcptr(hcptr);
219
220 /* Enable EL1 access to timer */
221 write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);
222
223 /* Reset CNTVOFF_EL2 */
224 write64_cntvoff(0);
225
226 /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
227 write_vpidr(read_midr());
228 write_vmpidr(read_mpidr());
229
230 /*
231 * Reset VTTBR.
232 * Needed because cache maintenance operations depend on
233 * the VMID even when non-secure EL1&0 stage 2 address
234 * translation are disabled.
235 */
236 write64_vttbr(0);
David Cunado5f55e282016-10-31 17:37:34 +0000237
238 /*
239 * Avoid unexpected debug traps in case where HDCR
240 * is not completely reset by the hardware - set
241 * HDCR.HPMN to PMCR.N and zero the remaining bits.
242 * The HDCR.HPMN and PMCR.N fields are the same size
243 * (5 bits) and HPMN is at offset zero within HDCR.
244 */
245 write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT);
David Cunadoc14b08e2016-11-25 00:21:59 +0000246
247 /*
248 * Reset CNTHP_CTL to disable the EL2 physical timer and
249 * therefore prevent timer interrupts.
250 */
251 write_cnthp_ctl(0);
Soby Mathew748be1d2016-05-05 14:10:46 +0100252 isb();
253
254 write_scr(read_scr() & ~SCR_NS_BIT);
255 isb();
256 }
257 }
258}