blob: 5d5bc519f32771d394dd87da471a28efb006eeb4 [file] [log] [blame]
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +01001/*
Antonio Nino Diazeef32382019-03-29 13:48:50 +00002 * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
Antonio Nino Diaz7cb362a2019-03-27 13:45:52 +00008#include <arch_features.h>
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +01009#include <arch_helpers.h>
10#include <assert.h>
11#include <errno.h>
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000012#include <string.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013
14#include <platform_def.h>
15
16#include <lib/object_pool.h>
17#include <lib/utils.h>
18#include <lib/utils_def.h>
19#include <lib/xlat_tables/xlat_tables_v2.h>
20#include <plat/common/platform.h>
21#include <services/sp_res_desc.h>
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010022
23#include "spm_private.h"
24#include "spm_shim_private.h"
25
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000026/*******************************************************************************
27 * Instantiation of translation table context
28 ******************************************************************************/
29
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010030/* Place translation tables by default along with the ones used by BL31. */
31#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
32#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
33#endif
34
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000035/*
36 * Allocate elements of the translation contexts for the Secure Partitions.
37 */
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010038
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000039/* Allocate an array of mmap_region per partition. */
40static struct mmap_region sp_mmap_regions[PLAT_SP_IMAGE_MMAP_REGIONS + 1]
41 [PLAT_SPM_MAX_PARTITIONS];
42static OBJECT_POOL(sp_mmap_regions_pool, sp_mmap_regions,
43 sizeof(mmap_region_t) * (PLAT_SP_IMAGE_MMAP_REGIONS + 1),
44 PLAT_SPM_MAX_PARTITIONS);
45
46/* Allocate individual translation tables. */
47static uint64_t sp_xlat_tables[XLAT_TABLE_ENTRIES]
48 [(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS]
49 __aligned(XLAT_TABLE_SIZE) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
50static OBJECT_POOL(sp_xlat_tables_pool, sp_xlat_tables,
51 XLAT_TABLE_ENTRIES * sizeof(uint64_t),
52 (PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS);
53
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000054/* Allocate arrays. */
55static int sp_xlat_mapped_regions[PLAT_SP_IMAGE_MAX_XLAT_TABLES]
56 [PLAT_SPM_MAX_PARTITIONS];
57static OBJECT_POOL(sp_xlat_mapped_regions_pool, sp_xlat_mapped_regions,
58 sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES, PLAT_SPM_MAX_PARTITIONS);
59
60/* Allocate individual contexts. */
61static xlat_ctx_t sp_xlat_ctx[PLAT_SPM_MAX_PARTITIONS];
62static OBJECT_POOL(sp_xlat_ctx_pool, sp_xlat_ctx, sizeof(xlat_ctx_t),
63 PLAT_SPM_MAX_PARTITIONS);
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010064
65/* Get handle of Secure Partition translation context */
Antonio Nino Diaz18e312d2019-03-27 13:04:46 +000066void spm_sp_xlat_context_alloc(sp_context_t *sp_ctx)
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010067{
Antonio Nino Diaz7cb362a2019-03-27 13:45:52 +000068 /* Allocate xlat context elements */
69
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000070 xlat_ctx_t *ctx = pool_alloc(&sp_xlat_ctx_pool);
71
72 struct mmap_region *mmap = pool_alloc(&sp_mmap_regions_pool);
73
Antonio Nino Diaz18e312d2019-03-27 13:04:46 +000074 uint64_t *base_table = pool_alloc(&sp_xlat_tables_pool);
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000075 uint64_t **tables = pool_alloc_n(&sp_xlat_tables_pool,
76 PLAT_SP_IMAGE_MAX_XLAT_TABLES);
77
78 int *mapped_regions = pool_alloc(&sp_xlat_mapped_regions_pool);
79
Antonio Nino Diaz7cb362a2019-03-27 13:45:52 +000080 /* Calculate the size of the virtual address space needed */
81
82 uintptr_t va_size = 0U;
83 struct sp_rd_sect_mem_region *rdmem;
84
85 for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
86 uintptr_t end_va = (uintptr_t)rdmem->base +
87 (uintptr_t)rdmem->size;
88
89 if (end_va > va_size)
90 va_size = end_va;
91 }
92
93 if (va_size == 0U) {
94 ERROR("No regions in resource description.\n");
95 panic();
96 }
97
98 /*
99 * Get the power of two that is greater or equal to the top VA. The
100 * values of base and size in the resource description are 32-bit wide
101 * so the values will never overflow when using a uintptr_t.
102 */
103 if (!IS_POWER_OF_TWO(va_size)) {
104 va_size = 1ULL <<
105 ((sizeof(va_size) * 8) - __builtin_clzll(va_size));
106 }
107
108 if (va_size > PLAT_VIRT_ADDR_SPACE_SIZE) {
109 ERROR("Resource description requested too much virtual memory.\n");
110 panic();
111 }
112
113 uintptr_t min_va_size;
114
115 /* The following sizes are only valid for 4KB pages */
116 assert(PAGE_SIZE == (4U * 1024U));
117
118 if (is_armv8_4_ttst_present()) {
119 VERBOSE("Using ARMv8.4-TTST\n");
120 min_va_size = 1ULL << (64 - TCR_TxSZ_MAX_TTST);
121 } else {
122 min_va_size = 1ULL << (64 - TCR_TxSZ_MAX);
123 }
124
125 if (va_size < min_va_size) {
126 va_size = min_va_size;
127 }
128
129 /* Initialize xlat context */
130
131 xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1ULL,
132 va_size - 1ULL, mmap,
Antonio Nino Diaz675d1552018-10-30 11:36:47 +0000133 PLAT_SP_IMAGE_MMAP_REGIONS, tables,
134 PLAT_SP_IMAGE_MAX_XLAT_TABLES, base_table,
135 EL1_EL0_REGIME, mapped_regions);
136
Antonio Nino Diaz18e312d2019-03-27 13:04:46 +0000137 sp_ctx->xlat_ctx_handle = ctx;
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +0100138};
139
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +0000140/*******************************************************************************
Antonio Nino Diaz37f97a52019-03-27 11:10:31 +0000141 * Translation table context used for S-EL1 exception vectors
142 ******************************************************************************/
143
144REGISTER_XLAT_CONTEXT2(spm_sel1, SPM_SHIM_MMAP_REGIONS, SPM_SHIM_XLAT_TABLES,
145 SPM_SHIM_XLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
146 EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME);
147
148void spm_exceptions_xlat_init_context(void)
149{
150 /* This region contains the exception vectors used at S-EL1. */
151 mmap_region_t sel1_exception_vectors =
152 MAP_REGION(SPM_SHIM_EXCEPTIONS_PTR,
153 0x0UL,
154 SPM_SHIM_EXCEPTIONS_SIZE,
155 MT_CODE | MT_SECURE | MT_PRIVILEGED);
156
157 mmap_add_region_ctx(&spm_sel1_xlat_ctx,
158 &sel1_exception_vectors);
159
160 init_xlat_tables_ctx(&spm_sel1_xlat_ctx);
161}
162
163uint64_t *spm_exceptions_xlat_get_base_table(void)
164{
165 return spm_sel1_xlat_ctx.base_table;
166}
167
168/*******************************************************************************
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +0000169 * Functions to allocate memory for regions.
170 ******************************************************************************/
171
172/*
173 * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
174 * reserved for SPM to use as heap to allocate memory regions of Secure
175 * Partitions. This is only done at boot.
176 */
177static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U,
178 PLAT_SPM_HEAP_SIZE);
179
180static uintptr_t spm_alloc_heap(size_t size)
181{
182 return (uintptr_t)pool_alloc_n(&spm_heap_mem, size);
183}
184
185/*******************************************************************************
186 * Functions to map memory regions described in the resource description.
187 ******************************************************************************/
188static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr)
189{
190 unsigned int index = attr & RD_MEM_MASK;
191
192 const unsigned int mmap_attr_arr[8] = {
193 MT_DEVICE | MT_RW | MT_SECURE, /* RD_MEM_DEVICE */
194 MT_CODE | MT_SECURE, /* RD_MEM_NORMAL_CODE */
195 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_DATA */
196 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_BSS */
197 MT_RO_DATA | MT_SECURE, /* RD_MEM_NORMAL_RODATA */
198 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
199 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
200 MT_MEMORY | MT_RW | MT_SECURE /* RD_MEM_NORMAL_MISCELLANEOUS */
201 };
202
203 if (index >= ARRAY_SIZE(mmap_attr_arr)) {
204 ERROR("Unsupported RD memory attributes 0x%x\n", attr);
205 panic();
206 }
207
208 return mmap_attr_arr[index];
209}
210
211/*
212 * The data provided in the resource description structure is not directly
213 * compatible with a mmap_region structure. This function handles the conversion
214 * and maps it.
215 */
216static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
217{
218 int rc;
219 mmap_region_t mmap;
220
221 /* Location of the SP image */
222 uintptr_t sp_size = sp_ctx->image_size;
223 uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address;
224 unsigned long long sp_base_pa = sp_ctx->image_base;
225
226 /* Location of the memory region to map */
227 size_t rd_size = rdmem->size;
228 uintptr_t rd_base_va = rdmem->base;
229 unsigned long long rd_base_pa;
230
231 unsigned int memtype = rdmem->attr & RD_MEM_MASK;
232
Antonio Nino Diazeef32382019-03-29 13:48:50 +0000233 if (rd_size == 0U) {
234 VERBOSE("Memory region '%s' is empty. Ignored.\n", rdmem->name);
235 return;
236 }
237
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +0000238 VERBOSE("Adding memory region '%s'\n", rdmem->name);
239
240 mmap.granularity = REGION_DEFAULT_GRANULARITY;
241
242 /* Check if the RD region is inside of the SP image or not */
243 int is_outside = (rd_base_va + rd_size <= sp_base_va) ||
244 (sp_base_va + sp_size <= rd_base_va);
245
246 /* Set to 1 if it is needed to zero this region */
247 int zero_region = 0;
248
249 switch (memtype) {
250 case RD_MEM_DEVICE:
251 /* Device regions are mapped 1:1 */
252 rd_base_pa = rd_base_va;
253 break;
254
255 case RD_MEM_NORMAL_CODE:
256 case RD_MEM_NORMAL_RODATA:
257 {
258 if (is_outside == 1) {
259 ERROR("Code and rodata sections must be fully contained in the image.");
260 panic();
261 }
262
263 /* Get offset into the image */
264 rd_base_pa = sp_base_pa + rd_base_va - sp_base_va;
265 break;
266 }
267 case RD_MEM_NORMAL_DATA:
268 {
269 if (is_outside == 1) {
270 ERROR("Data sections must be fully contained in the image.");
271 panic();
272 }
273
274 rd_base_pa = spm_alloc_heap(rd_size);
275
276 /* Get offset into the image */
277 void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va);
278
279 VERBOSE(" Copying data from %p to 0x%llx\n", img_pa, rd_base_pa);
280
281 /* Map destination */
282 rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa,
283 rd_size, MT_MEMORY | MT_RW | MT_SECURE);
284 if (rc != 0) {
285 ERROR("Unable to map data region at EL3: %d\n", rc);
286 panic();
287 }
288
289 /* Copy original data to destination */
290 memcpy((void *)rd_base_pa, img_pa, rd_size);
291
292 /* Unmap destination region */
293 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
294 if (rc != 0) {
295 ERROR("Unable to remove data region at EL3: %d\n", rc);
296 panic();
297 }
298
299 break;
300 }
301 case RD_MEM_NORMAL_MISCELLANEOUS:
302 /* Allow SPM to change the attributes of the region. */
303 mmap.granularity = PAGE_SIZE;
304 rd_base_pa = spm_alloc_heap(rd_size);
305 zero_region = 1;
306 break;
307
308 case RD_MEM_NORMAL_SPM_SP_SHARED_MEM:
309 if ((sp_ctx->spm_sp_buffer_base != 0) ||
310 (sp_ctx->spm_sp_buffer_size != 0)) {
311 ERROR("A partition must have only one SPM<->SP buffer.\n");
312 panic();
313 }
314 rd_base_pa = spm_alloc_heap(rd_size);
315 zero_region = 1;
316 /* Save location of this buffer, it is needed by SPM */
317 sp_ctx->spm_sp_buffer_base = rd_base_pa;
318 sp_ctx->spm_sp_buffer_size = rd_size;
319 break;
320
321 case RD_MEM_NORMAL_CLIENT_SHARED_MEM:
322 /* Fallthrough */
323 case RD_MEM_NORMAL_BSS:
324 rd_base_pa = spm_alloc_heap(rd_size);
325 zero_region = 1;
326 break;
327
328 default:
329 panic();
330 }
331
332 mmap.base_pa = rd_base_pa;
333 mmap.base_va = rd_base_va;
334 mmap.size = rd_size;
335
336 /* Only S-EL0 mappings supported for now */
337 mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER;
338
339 VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
340 mmap.base_va, mmap.base_pa, mmap.size, mmap.attr);
341
342 /* Map region in the context of the Secure Partition */
343 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap);
344
345 if (zero_region == 1) {
346 VERBOSE(" Zeroing region...\n");
347
348 rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa,
349 mmap.size, MT_MEMORY | MT_RW | MT_SECURE);
350 if (rc != 0) {
351 ERROR("Unable to map memory at EL3 to zero: %d\n",
352 rc);
353 panic();
354 }
355
356 zeromem((void *)mmap.base_pa, mmap.size);
357
358 /*
359 * Unmap destination region unless it is the SPM<->SP buffer,
360 * which must be used by SPM.
361 */
362 if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) {
363 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
364 if (rc != 0) {
365 ERROR("Unable to remove region at EL3: %d\n", rc);
366 panic();
367 }
368 }
369 }
370}
371
372void sp_map_memory_regions(sp_context_t *sp_ctx)
373{
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +0000374 struct sp_rd_sect_mem_region *rdmem;
375
376 for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
377 map_rdmem(sp_ctx, rdmem);
378 }
379
380 init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
381}