blob: 2e7e62d7bb36be6ce3a5061ef1f6c0ef59d70313 [file] [log] [blame]
Achin Gupta7aea9082014-02-01 07:51:28 +00001/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Achin Gupta27b895e2014-05-04 18:38:28 +010031#include <arch.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000032#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <assert.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000034#include <bl_common.h>
Soby Mathew5e5c2072014-04-07 15:28:55 +010035#include <bl31.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010036#include <context.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000037#include <context_mgmt.h>
Achin Gupta191e86e2014-05-09 10:03:15 +010038#include <interrupt_mgmt.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010039#include <platform.h>
40#include <runtime_svc.h>
Achin Gupta7aea9082014-02-01 07:51:28 +000041
42/*******************************************************************************
43 * Data structure which holds the pointers to non-secure and secure security
44 * state contexts for each cpu. It is aligned to the cache line boundary to
45 * allow efficient concurrent manipulation of these pointers on different cpus
46 ******************************************************************************/
47typedef struct {
48 void *ptr[2];
Dan Handleye2712bc2014-04-10 15:37:22 +010049} __aligned (CACHE_WRITEBACK_GRANULE) context_info_t;
Achin Gupta7aea9082014-02-01 07:51:28 +000050
Dan Handleye2712bc2014-04-10 15:37:22 +010051static context_info_t cm_context_info[PLATFORM_CORE_COUNT];
Achin Gupta7aea9082014-02-01 07:51:28 +000052
Soby Mathew5e5c2072014-04-07 15:28:55 +010053/* The per_cpu_ptr_cache_t space allocation */
54static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT];
55
Achin Gupta7aea9082014-02-01 07:51:28 +000056/*******************************************************************************
57 * Context management library initialisation routine. This library is used by
58 * runtime services to share pointers to 'cpu_context' structures for the secure
59 * and non-secure states. Management of the structures and their associated
60 * memory is not done by the context management library e.g. the PSCI service
61 * manages the cpu context used for entry from and exit to the non-secure state.
62 * The Secure payload dispatcher service manages the context(s) corresponding to
63 * the secure state. It also uses this library to get access to the non-secure
64 * state cpu context pointers.
65 * Lastly, this library provides the api to make SP_EL3 point to the cpu context
66 * which will used for programming an entry into a lower EL. The same context
67 * will used to save state upon exception entry from that EL.
68 ******************************************************************************/
69void cm_init()
70{
71 /*
72 * The context management library has only global data to intialize, but
73 * that will be done when the BSS is zeroed out
74 */
75}
76
77/*******************************************************************************
78 * This function returns a pointer to the most recent 'cpu_context' structure
79 * that was set as the context for the specified security state. NULL is
80 * returned if no such structure has been specified.
81 ******************************************************************************/
82void *cm_get_context(uint64_t mpidr, uint32_t security_state)
83{
84 uint32_t linear_id = platform_get_core_pos(mpidr);
85
86 assert(security_state <= NON_SECURE);
87
88 return cm_context_info[linear_id].ptr[security_state];
89}
90
91/*******************************************************************************
92 * This function sets the pointer to the current 'cpu_context' structure for the
93 * specified security state.
94 ******************************************************************************/
95void cm_set_context(uint64_t mpidr, void *context, uint32_t security_state)
96{
97 uint32_t linear_id = platform_get_core_pos(mpidr);
98
99 assert(security_state <= NON_SECURE);
100
101 cm_context_info[linear_id].ptr[security_state] = context;
102}
103
104/*******************************************************************************
105 * The next four functions are used by runtime services to save and restore EL3
106 * and EL1 contexts on the 'cpu_context' structure for the specified security
107 * state.
108 ******************************************************************************/
109void cm_el3_sysregs_context_save(uint32_t security_state)
110{
Dan Handleye2712bc2014-04-10 15:37:22 +0100111 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000112
113 ctx = cm_get_context(read_mpidr(), security_state);
114 assert(ctx);
115
116 el3_sysregs_context_save(get_el3state_ctx(ctx));
117}
118
119void cm_el3_sysregs_context_restore(uint32_t security_state)
120{
Dan Handleye2712bc2014-04-10 15:37:22 +0100121 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000122
123 ctx = cm_get_context(read_mpidr(), security_state);
124 assert(ctx);
125
126 el3_sysregs_context_restore(get_el3state_ctx(ctx));
127}
128
129void cm_el1_sysregs_context_save(uint32_t security_state)
130{
Dan Handleye2712bc2014-04-10 15:37:22 +0100131 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000132
133 ctx = cm_get_context(read_mpidr(), security_state);
134 assert(ctx);
135
136 el1_sysregs_context_save(get_sysregs_ctx(ctx));
137}
138
139void cm_el1_sysregs_context_restore(uint32_t security_state)
140{
Dan Handleye2712bc2014-04-10 15:37:22 +0100141 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000142
143 ctx = cm_get_context(read_mpidr(), security_state);
144 assert(ctx);
145
146 el1_sysregs_context_restore(get_sysregs_ctx(ctx));
147}
148
149/*******************************************************************************
Achin Gupta27b895e2014-05-04 18:38:28 +0100150 * This function populates 'cpu_context' pertaining to the given security state
151 * with the entrypoint, SPSR and SCR values so that an ERET from this security
152 * state correctly restores corresponding values to drop the CPU to the next
153 * exception level
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000154 ******************************************************************************/
155void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
156 uint32_t spsr, uint32_t scr)
157{
Dan Handleye2712bc2014-04-10 15:37:22 +0100158 cpu_context_t *ctx;
159 el3_state_t *state;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000160
161 ctx = cm_get_context(read_mpidr(), security_state);
162 assert(ctx);
163
Achin Gupta191e86e2014-05-09 10:03:15 +0100164 /* Program the interrupt routing model for this security state */
165 scr &= ~SCR_FIQ_BIT;
166 scr &= ~SCR_IRQ_BIT;
167 scr |= get_scr_el3_from_routing_model(security_state);
168
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000169 /* Populate EL3 state so that we've the right context before doing ERET */
170 state = get_el3state_ctx(ctx);
171 write_ctx_reg(state, CTX_SPSR_EL3, spsr);
172 write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
173 write_ctx_reg(state, CTX_SCR_EL3, scr);
174}
175
176/*******************************************************************************
Achin Gupta27b895e2014-05-04 18:38:28 +0100177 * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
178 * given security state with the given entrypoint
Achin Gupta607084e2014-02-09 18:24:19 +0000179 ******************************************************************************/
Achin Gupta27b895e2014-05-04 18:38:28 +0100180void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
Achin Gupta607084e2014-02-09 18:24:19 +0000181{
Dan Handleye2712bc2014-04-10 15:37:22 +0100182 cpu_context_t *ctx;
183 el3_state_t *state;
Achin Gupta607084e2014-02-09 18:24:19 +0000184
185 ctx = cm_get_context(read_mpidr(), security_state);
186 assert(ctx);
187
188 /* Populate EL3 state so that ERET jumps to the correct entry */
189 state = get_el3state_ctx(ctx);
190 write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
191}
192
193/*******************************************************************************
Achin Gupta27b895e2014-05-04 18:38:28 +0100194 * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
195 * pertaining to the given security state using the value and bit position
196 * specified in the parameters. It preserves all other bits.
197 ******************************************************************************/
198void cm_write_scr_el3_bit(uint32_t security_state,
199 uint32_t bit_pos,
200 uint32_t value)
201{
202 cpu_context_t *ctx;
203 el3_state_t *state;
204 uint32_t scr_el3;
205
206 ctx = cm_get_context(read_mpidr(), security_state);
207 assert(ctx);
208
209 /* Ensure that the bit position is a valid one */
210 assert((1 << bit_pos) & SCR_VALID_BIT_MASK);
211
212 /* Ensure that the 'value' is only a bit wide */
213 assert(value <= 1);
214
215 /*
216 * Get the SCR_EL3 value from the cpu context, clear the desired bit
217 * and set it to its new value.
218 */
219 state = get_el3state_ctx(ctx);
220 scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
221 scr_el3 &= ~(1 << bit_pos);
222 scr_el3 |= value << bit_pos;
223 write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
224}
225
226/*******************************************************************************
227 * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
228 * given security state.
229 ******************************************************************************/
230uint32_t cm_get_scr_el3(uint32_t security_state)
231{
232 cpu_context_t *ctx;
233 el3_state_t *state;
234
235 ctx = cm_get_context(read_mpidr(), security_state);
236 assert(ctx);
237
238 /* Populate EL3 state so that ERET jumps to the correct entry */
239 state = get_el3state_ctx(ctx);
240 return read_ctx_reg(state, CTX_SCR_EL3);
241}
242
243/*******************************************************************************
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000244 * This function is used to program the context that's used for exception
245 * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
246 * the required security state
Achin Gupta7aea9082014-02-01 07:51:28 +0000247 ******************************************************************************/
248void cm_set_next_eret_context(uint32_t security_state)
249{
Dan Handleye2712bc2014-04-10 15:37:22 +0100250 cpu_context_t *ctx;
Achin Gupta7aea9082014-02-01 07:51:28 +0000251#if DEBUG
252 uint64_t sp_mode;
253#endif
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000254
Achin Gupta7aea9082014-02-01 07:51:28 +0000255 ctx = cm_get_context(read_mpidr(), security_state);
256 assert(ctx);
257
258#if DEBUG
259 /*
260 * Check that this function is called with SP_EL0 as the stack
261 * pointer
262 */
263 __asm__ volatile("mrs %0, SPSel\n"
264 : "=r" (sp_mode));
265
266 assert(sp_mode == MODE_SP_EL0);
267#endif
268
269 __asm__ volatile("msr spsel, #1\n"
270 "mov sp, %0\n"
271 "msr spsel, #0\n"
272 : : "r" (ctx));
273}
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000274
Soby Mathew5e5c2072014-04-07 15:28:55 +0100275/************************************************************************
276 * The following function is used to populate the per cpu pointer cache.
277 * The pointer will be stored in the tpidr_el3 register.
278 *************************************************************************/
279void cm_init_pcpu_ptr_cache()
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000280{
Soby Mathew5e5c2072014-04-07 15:28:55 +0100281 unsigned long mpidr = read_mpidr();
282 uint32_t linear_id = platform_get_core_pos(mpidr);
283 per_cpu_ptr_cache_t *pcpu_ptr_cache;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000284
Soby Mathew5e5c2072014-04-07 15:28:55 +0100285 pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id];
286 assert(pcpu_ptr_cache);
287 pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000288
Soby Mathew5e5c2072014-04-07 15:28:55 +0100289 cm_set_pcpu_ptr_cache(pcpu_ptr_cache);
290}
291
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000292
Soby Mathew5e5c2072014-04-07 15:28:55 +0100293void cm_set_pcpu_ptr_cache(const void *pcpu_ptr)
294{
295 write_tpidr_el3((unsigned long)pcpu_ptr);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000296}
Soby Mathew5e5c2072014-04-07 15:28:55 +0100297
298void *cm_get_pcpu_ptr_cache(void)
299{
300 return (void *)read_tpidr_el3();
301}
302