blob: aae6cd5e2b4ba74b3db36920cff224f8ea4476d8 [file] [log] [blame]
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +00001/*
2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00007#include <assert.h>
8#include <string.h>
9
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +000010#include <arch.h>
11#include <arch_helpers.h>
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +000012#include <context.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013#include <common/debug.h>
14#include <lib/el3_runtime/context_mgmt.h>
15#include <lib/xlat_tables/xlat_tables_v2.h>
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +000016#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000017#include <plat/common/common_def.h>
18#include <plat/common/platform.h>
19#include <services/secure_partition.h>
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +000020
21#include "spm_private.h"
22#include "spm_shim_private.h"
23
24/* Setup context of the Secure Partition */
25void spm_sp_setup(sp_context_t *sp_ctx)
26{
27 cpu_context_t *ctx = &(sp_ctx->cpu_ctx);
28
29 /*
30 * Initialize CPU context
31 * ----------------------
32 */
33
34 entry_point_info_t ep_info = {0};
35
36 SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
37
38 /* Setup entrypoint and SPSR */
39 ep_info.pc = BL32_BASE;
40 ep_info.spsr = SPSR_64(MODE_EL0, MODE_SP_EL0, DISABLE_ALL_EXCEPTIONS);
41
42 /*
43 * X0: Virtual address of a buffer shared between EL3 and Secure EL0.
44 * The buffer will be mapped in the Secure EL1 translation regime
45 * with Normal IS WBWA attributes and RO data and Execute Never
46 * instruction access permissions.
47 *
48 * X1: Size of the buffer in bytes
49 *
50 * X2: cookie value (Implementation Defined)
51 *
52 * X3: cookie value (Implementation Defined)
53 *
54 * X4 to X7 = 0
55 */
56 ep_info.args.arg0 = PLAT_SPM_BUF_BASE;
57 ep_info.args.arg1 = PLAT_SPM_BUF_SIZE;
58 ep_info.args.arg2 = PLAT_SPM_COOKIE_0;
59 ep_info.args.arg3 = PLAT_SPM_COOKIE_1;
60
61 cm_setup_context(ctx, &ep_info);
62
63 /*
64 * SP_EL0: A non-zero value will indicate to the SP that the SPM has
65 * initialized the stack pointer for the current CPU through
66 * implementation defined means. The value will be 0 otherwise.
67 */
68 write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0,
69 PLAT_SP_IMAGE_STACK_BASE + PLAT_SP_IMAGE_STACK_PCPU_SIZE);
70
71 /*
72 * Setup translation tables
73 * ------------------------
74 */
75
76#if ENABLE_ASSERTIONS
77
78 /* Get max granularity supported by the platform. */
79 unsigned int max_granule = xlat_arch_get_max_supported_granule_size();
80
81 VERBOSE("Max translation granule size supported: %u KiB\n",
82 max_granule / 1024U);
83
84 unsigned int max_granule_mask = max_granule - 1U;
85
86 /* Base must be aligned to the max granularity */
Ard Biesheuvel8b034fc2018-12-29 19:43:21 +010087 assert((PLAT_SP_IMAGE_NS_BUF_BASE & max_granule_mask) == 0);
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +000088
89 /* Size must be a multiple of the max granularity */
Ard Biesheuvel8b034fc2018-12-29 19:43:21 +010090 assert((PLAT_SP_IMAGE_NS_BUF_SIZE & max_granule_mask) == 0);
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +000091
92#endif /* ENABLE_ASSERTIONS */
93
94 /* This region contains the exception vectors used at S-EL1. */
95 const mmap_region_t sel1_exception_vectors =
96 MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
97 SPM_SHIM_EXCEPTIONS_SIZE,
98 MT_CODE | MT_SECURE | MT_PRIVILEGED);
99 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
100 &sel1_exception_vectors);
101
102 mmap_add_ctx(sp_ctx->xlat_ctx_handle,
103 plat_get_secure_partition_mmap(NULL));
104
105 init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
106
107 /*
108 * MMU-related registers
109 * ---------------------
110 */
111 xlat_ctx_t *xlat_ctx = sp_ctx->xlat_ctx_handle;
112
113 uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
114
115 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table,
116 xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
117 EL1_EL0_REGIME);
118
119 write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1,
120 mmu_cfg_params[MMU_CFG_MAIR]);
121
122 write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1,
123 mmu_cfg_params[MMU_CFG_TCR]);
124
125 write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
126 mmu_cfg_params[MMU_CFG_TTBR0]);
127
128 /* Setup SCTLR_EL1 */
129 u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);
130
131 sctlr_el1 |=
132 /*SCTLR_EL1_RES1 |*/
133 /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
134 SCTLR_UCI_BIT |
135 /* RW regions at xlat regime EL1&0 are forced to be XN. */
136 SCTLR_WXN_BIT |
137 /* Don't trap to EL1 execution of WFI or WFE at EL0. */
138 SCTLR_NTWI_BIT | SCTLR_NTWE_BIT |
139 /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
140 SCTLR_UCT_BIT |
141 /* Don't trap to EL1 execution of DZ ZVA at EL0. */
142 SCTLR_DZE_BIT |
143 /* Enable SP Alignment check for EL0 */
144 SCTLR_SA0_BIT |
145 /* Allow cacheable data and instr. accesses to normal memory. */
146 SCTLR_C_BIT | SCTLR_I_BIT |
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +0000147 /* Enable MMU. */
148 SCTLR_M_BIT
149 ;
150
151 sctlr_el1 &= ~(
152 /* Explicit data accesses at EL0 are little-endian. */
153 SCTLR_E0E_BIT |
Ard Biesheuvelf9234da2019-01-03 12:03:49 +0100154 /*
155 * Alignment fault checking disabled when at EL1 and EL0 as
156 * the UEFI spec permits unaligned accesses.
157 */
158 SCTLR_A_BIT |
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +0000159 /* Accesses to DAIF from EL0 are trapped to EL1. */
160 SCTLR_UMA_BIT
161 );
162
163 write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
164
165 /*
166 * Setup other system registers
167 * ----------------------------
168 */
169
170 /* Shim Exception Vector Base Address */
171 write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1,
172 SPM_SHIM_EXCEPTIONS_PTR);
173
Ard Biesheuvel923cd772019-01-01 11:01:41 +0100174 write_ctx_reg(get_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
175 EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
176
Antonio Nino Diaz8cd7ea32018-10-30 11:08:08 +0000177 /*
178 * FPEN: Allow the Secure Partition to access FP/SIMD registers.
179 * Note that SPM will not do any saving/restoring of these registers on
180 * behalf of the SP. This falls under the SP's responsibility.
181 * TTA: Enable access to trace registers.
182 * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
183 */
184 write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1,
185 CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
186
187 /*
188 * Prepare information in buffer shared between EL3 and S-EL0
189 * ----------------------------------------------------------
190 */
191
192 void *shared_buf_ptr = (void *) PLAT_SPM_BUF_BASE;
193
194 /* Copy the boot information into the shared buffer with the SP. */
195 assert((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t)
196 <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE));
197
198 assert(PLAT_SPM_BUF_BASE <= (UINTPTR_MAX - PLAT_SPM_BUF_SIZE + 1));
199
200 const secure_partition_boot_info_t *sp_boot_info =
201 plat_get_secure_partition_boot_info(NULL);
202
203 assert(sp_boot_info != NULL);
204
205 memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info,
206 sizeof(secure_partition_boot_info_t));
207
208 /* Pointer to the MP information from the platform port. */
209 secure_partition_mp_info_t *sp_mp_info =
210 ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info;
211
212 assert(sp_mp_info != NULL);
213
214 /*
215 * Point the shared buffer MP information pointer to where the info will
216 * be populated, just after the boot info.
217 */
218 ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info =
219 (secure_partition_mp_info_t *) ((uintptr_t)shared_buf_ptr
220 + sizeof(secure_partition_boot_info_t));
221
222 /*
223 * Update the shared buffer pointer to where the MP information for the
224 * payload will be populated
225 */
226 shared_buf_ptr = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info;
227
228 /*
229 * Copy the cpu information into the shared buffer area after the boot
230 * information.
231 */
232 assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT);
233
234 assert((uintptr_t)shared_buf_ptr
235 <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE -
236 (sp_boot_info->num_cpus * sizeof(*sp_mp_info))));
237
238 memcpy(shared_buf_ptr, (const void *) sp_mp_info,
239 sp_boot_info->num_cpus * sizeof(*sp_mp_info));
240
241 /*
242 * Calculate the linear indices of cores in boot information for the
243 * secure partition and flag the primary CPU
244 */
245 sp_mp_info = (secure_partition_mp_info_t *) shared_buf_ptr;
246
247 for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) {
248 u_register_t mpidr = sp_mp_info[index].mpidr;
249
250 sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr);
251 if (plat_my_core_pos() == sp_mp_info[index].linear_id)
252 sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU;
253 }
254}