blob: 8fadb7a5ac46e4b2993ac64d5b4d7c28f96f87fd [file] [log] [blame]
Achin Gupta375f5382014-02-18 18:12:48 +00001/*
David Cunado28f69ab2017-04-05 11:34:03 +01002 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
Achin Gupta375f5382014-02-18 18:12:48 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta375f5382014-02-18 18:12:48 +00005 */
6
Dan Handley2bd4ef22014-04-09 13:14:54 +01007#ifndef __TSPD_PRIVATE_H__
8#define __TSPD_PRIVATE_H__
Achin Gupta375f5382014-02-18 18:12:48 +00009
Achin Gupta375f5382014-02-18 18:12:48 +000010#include <arch.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010011#include <context.h>
Achin Guptaaeaab682014-05-09 13:21:31 +010012#include <interrupt_mgmt.h>
Dan Handleyed6ff952014-05-14 17:44:19 +010013#include <platform_def.h>
Achin Gupta375f5382014-02-18 18:12:48 +000014#include <psci.h>
Achin Gupta375f5382014-02-18 18:12:48 +000015
16/*******************************************************************************
17 * Secure Payload PM state information e.g. SP is suspended, uninitialised etc
Achin Gupta18d6eaf2014-05-04 18:23:26 +010018 * and macros to access the state information in the per-cpu 'state' flags
Achin Gupta375f5382014-02-18 18:12:48 +000019 ******************************************************************************/
Achin Gupta18d6eaf2014-05-04 18:23:26 +010020#define TSP_PSTATE_OFF 0
21#define TSP_PSTATE_ON 1
22#define TSP_PSTATE_SUSPEND 2
23#define TSP_PSTATE_SHIFT 0
24#define TSP_PSTATE_MASK 0x3
25#define get_tsp_pstate(state) ((state >> TSP_PSTATE_SHIFT) & TSP_PSTATE_MASK)
26#define clr_tsp_pstate(state) (state &= ~(TSP_PSTATE_MASK \
27 << TSP_PSTATE_SHIFT))
28#define set_tsp_pstate(st, pst) do { \
29 clr_tsp_pstate(st); \
30 st |= (pst & TSP_PSTATE_MASK) << \
31 TSP_PSTATE_SHIFT; \
32 } while (0);
33
34
35/*
David Cunado28f69ab2017-04-05 11:34:03 +010036 * This flag is used by the TSPD to determine if the TSP is servicing a yielding
Achin Gupta18d6eaf2014-05-04 18:23:26 +010037 * SMC request prior to programming the next entry into the TSP e.g. if TSP
38 * execution is preempted by a non-secure interrupt and handed control to the
39 * normal world. If another request which is distinct from what the TSP was
40 * previously doing arrives, then this flag will be help the TSPD to either
41 * reject the new request or service it while ensuring that the previous context
42 * is not corrupted.
43 */
David Cunado28f69ab2017-04-05 11:34:03 +010044#define YIELD_SMC_ACTIVE_FLAG_SHIFT 2
45#define YIELD_SMC_ACTIVE_FLAG_MASK 1
46#define get_yield_smc_active_flag(state) \
47 ((state >> YIELD_SMC_ACTIVE_FLAG_SHIFT) \
48 & YIELD_SMC_ACTIVE_FLAG_MASK)
49#define set_yield_smc_active_flag(state) (state |= \
50 1 << YIELD_SMC_ACTIVE_FLAG_SHIFT)
51#define clr_yield_smc_active_flag(state) (state &= \
52 ~(YIELD_SMC_ACTIVE_FLAG_MASK \
53 << YIELD_SMC_ACTIVE_FLAG_SHIFT))
Achin Gupta375f5382014-02-18 18:12:48 +000054
55/*******************************************************************************
56 * Secure Payload execution state information i.e. aarch32 or aarch64
57 ******************************************************************************/
58#define TSP_AARCH32 MODE_RW_32
59#define TSP_AARCH64 MODE_RW_64
60
61/*******************************************************************************
62 * The SPD should know the type of Secure Payload.
63 ******************************************************************************/
64#define TSP_TYPE_UP PSCI_TOS_NOT_UP_MIG_CAP
65#define TSP_TYPE_UPM PSCI_TOS_UP_MIG_CAP
66#define TSP_TYPE_MP PSCI_TOS_NOT_PRESENT_MP
67
68/*******************************************************************************
69 * Secure Payload migrate type information as known to the SPD. We assume that
70 * the SPD is dealing with an MP Secure Payload.
71 ******************************************************************************/
72#define TSP_MIGRATE_INFO TSP_TYPE_MP
73
74/*******************************************************************************
75 * Number of cpus that the present on this platform. TODO: Rely on a topology
76 * tree to determine this in the future to avoid assumptions about mpidr
77 * allocation
78 ******************************************************************************/
79#define TSPD_CORE_COUNT PLATFORM_CORE_COUNT
80
81/*******************************************************************************
82 * Constants that allow assembler code to preserve callee-saved registers of the
83 * C runtime context while performing a security state switch.
84 ******************************************************************************/
85#define TSPD_C_RT_CTX_X19 0x0
86#define TSPD_C_RT_CTX_X20 0x8
87#define TSPD_C_RT_CTX_X21 0x10
88#define TSPD_C_RT_CTX_X22 0x18
89#define TSPD_C_RT_CTX_X23 0x20
90#define TSPD_C_RT_CTX_X24 0x28
91#define TSPD_C_RT_CTX_X25 0x30
92#define TSPD_C_RT_CTX_X26 0x38
93#define TSPD_C_RT_CTX_X27 0x40
94#define TSPD_C_RT_CTX_X28 0x48
95#define TSPD_C_RT_CTX_X29 0x50
96#define TSPD_C_RT_CTX_X30 0x58
97#define TSPD_C_RT_CTX_SIZE 0x60
98#define TSPD_C_RT_CTX_ENTRIES (TSPD_C_RT_CTX_SIZE >> DWORD_SHIFT)
99
Soby Mathew47903c02015-01-13 15:48:26 +0000100/*******************************************************************************
101 * Constants that allow assembler code to preserve caller-saved registers of the
102 * SP context while performing a TSP preemption.
103 * Note: These offsets have to match with the offsets for the corresponding
104 * registers in cpu_context as we are using memcpy to copy the values from
105 * cpu_context to sp_ctx.
106 ******************************************************************************/
107#define TSPD_SP_CTX_X0 0x0
108#define TSPD_SP_CTX_X1 0x8
109#define TSPD_SP_CTX_X2 0x10
110#define TSPD_SP_CTX_X3 0x18
111#define TSPD_SP_CTX_X4 0x20
112#define TSPD_SP_CTX_X5 0x28
113#define TSPD_SP_CTX_X6 0x30
114#define TSPD_SP_CTX_X7 0x38
115#define TSPD_SP_CTX_X8 0x40
116#define TSPD_SP_CTX_X9 0x48
117#define TSPD_SP_CTX_X10 0x50
118#define TSPD_SP_CTX_X11 0x58
119#define TSPD_SP_CTX_X12 0x60
120#define TSPD_SP_CTX_X13 0x68
121#define TSPD_SP_CTX_X14 0x70
122#define TSPD_SP_CTX_X15 0x78
123#define TSPD_SP_CTX_X16 0x80
124#define TSPD_SP_CTX_X17 0x88
125#define TSPD_SP_CTX_SIZE 0x90
126#define TSPD_SP_CTX_ENTRIES (TSPD_SP_CTX_SIZE >> DWORD_SHIFT)
127
Achin Gupta375f5382014-02-18 18:12:48 +0000128#ifndef __ASSEMBLY__
129
Dan Handley2bd4ef22014-04-09 13:14:54 +0100130#include <cassert.h>
131#include <stdint.h>
132
Soby Mathew9f71f702014-05-09 20:49:17 +0100133/*
134 * The number of arguments to save during a SMC call for TSP.
135 * Currently only x1 and x2 are used by TSP.
136 */
137#define TSP_NUM_ARGS 0x2
138
Achin Gupta375f5382014-02-18 18:12:48 +0000139/* AArch64 callee saved general purpose register context structure. */
140DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES);
141
142/*
143 * Compile time assertion to ensure that both the compiler and linker
144 * have the same double word aligned view of the size of the C runtime
145 * register context.
146 */
Dan Handleye2712bc2014-04-10 15:37:22 +0100147CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t), \
Achin Gupta375f5382014-02-18 18:12:48 +0000148 assert_spd_c_rt_regs_size_mismatch);
149
Soby Mathew47903c02015-01-13 15:48:26 +0000150/* SEL1 Secure payload (SP) caller saved register context structure. */
151DEFINE_REG_STRUCT(sp_ctx_regs, TSPD_SP_CTX_ENTRIES);
152
153/*
154 * Compile time assertion to ensure that both the compiler and linker
155 * have the same double word aligned view of the size of the C runtime
156 * register context.
157 */
158CASSERT(TSPD_SP_CTX_SIZE == sizeof(sp_ctx_regs_t), \
159 assert_spd_sp_regs_size_mismatch);
160
Achin Gupta375f5382014-02-18 18:12:48 +0000161/*******************************************************************************
162 * Structure which helps the SPD to maintain the per-cpu state of the SP.
Soby Mathewbec98512015-09-03 18:29:38 +0100163 * 'saved_spsr_el3' - temporary copy to allow S-EL1 interrupt handling when
164 * the TSP has been preempted.
165 * 'saved_elr_el3' - temporary copy to allow S-EL1 interrupt handling when
166 * the TSP has been preempted.
Achin Guptaaeaab682014-05-09 13:21:31 +0100167 * 'state' - collection of flags to track SP state e.g. on/off
168 * 'mpidr' - mpidr to associate a context with a cpu
169 * 'c_rt_ctx' - stack address to restore C runtime context from after
170 * returning from a synchronous entry into the SP.
171 * 'cpu_ctx' - space to maintain SP architectural state
Soby Mathew9f71f702014-05-09 20:49:17 +0100172 * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations
173 * which will queried using the TSP_GET_ARGS SMC by TSP.
Soby Mathew47903c02015-01-13 15:48:26 +0000174 * 'sp_ctx' - space to save the SEL1 Secure Payload(SP) caller saved
175 * register context after it has been preempted by an EL3
176 * routed NS interrupt and when a Secure Interrupt is taken
177 * to SP.
Achin Gupta375f5382014-02-18 18:12:48 +0000178 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100179typedef struct tsp_context {
Achin Guptaaeaab682014-05-09 13:21:31 +0100180 uint64_t saved_elr_el3;
181 uint32_t saved_spsr_el3;
Achin Gupta375f5382014-02-18 18:12:48 +0000182 uint32_t state;
183 uint64_t mpidr;
184 uint64_t c_rt_ctx;
Dan Handleye2712bc2014-04-10 15:37:22 +0100185 cpu_context_t cpu_ctx;
Soby Mathew9f71f702014-05-09 20:49:17 +0100186 uint64_t saved_tsp_args[TSP_NUM_ARGS];
Soby Mathewbec98512015-09-03 18:29:38 +0100187#if TSP_NS_INTR_ASYNC_PREEMPT
Soby Mathew47903c02015-01-13 15:48:26 +0000188 sp_ctx_regs_t sp_ctx;
189#endif
Dan Handleye2712bc2014-04-10 15:37:22 +0100190} tsp_context_t;
Achin Gupta375f5382014-02-18 18:12:48 +0000191
Soby Mathew9f71f702014-05-09 20:49:17 +0100192/* Helper macros to store and retrieve tsp args from tsp_context */
193#define store_tsp_args(tsp_ctx, x1, x2) do {\
194 tsp_ctx->saved_tsp_args[0] = x1;\
195 tsp_ctx->saved_tsp_args[1] = x2;\
196 } while (0)
197
198#define get_tsp_args(tsp_ctx, x1, x2) do {\
199 x1 = tsp_ctx->saved_tsp_args[0];\
200 x2 = tsp_ctx->saved_tsp_args[1];\
201 } while (0)
202
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000203/* TSPD power management handlers */
Dan Handleye2712bc2014-04-10 15:37:22 +0100204extern const spd_pm_ops_t tspd_pm;
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000205
Achin Gupta375f5382014-02-18 18:12:48 +0000206/*******************************************************************************
Dan Handley2bd4ef22014-04-09 13:14:54 +0100207 * Forward declarations
208 ******************************************************************************/
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100209struct tsp_vectors;
Dan Handley2bd4ef22014-04-09 13:14:54 +0100210
211/*******************************************************************************
Achin Gupta375f5382014-02-18 18:12:48 +0000212 * Function & Data prototypes
213 ******************************************************************************/
Dan Handleya17fefa2014-05-14 12:38:32 +0100214uint64_t tspd_enter_sp(uint64_t *c_rt_ctx);
215void __dead2 tspd_exit_sp(uint64_t c_rt_ctx, uint64_t ret);
216uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx);
217void __dead2 tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret);
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100218void tspd_init_tsp_ep_state(struct entry_point_info *tsp_ep,
219 uint32_t rw,
220 uint64_t pc,
221 tsp_context_t *tsp_ctx);
Douglas Raillardf2129652016-11-24 15:43:19 +0000222int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx);
Vikram Kanigiri9d70f0f2014-07-15 16:46:43 +0100223
Dan Handleye2712bc2014-04-10 15:37:22 +0100224extern tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
Andrew Thoelke891c4ca2014-05-20 21:43:27 +0100225extern struct tsp_vectors *tsp_vectors;
Achin Gupta375f5382014-02-18 18:12:48 +0000226#endif /*__ASSEMBLY__*/
227
Dan Handley2bd4ef22014-04-09 13:14:54 +0100228#endif /* __TSPD_PRIVATE_H__ */