blob: bbe392ddd3fe86dd42bbfe16c1e756cf62443052 [file] [log] [blame]
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +01001/*
2 * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <errno.h>
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000011#include <object_pool.h>
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010012#include <platform_def.h>
13#include <platform.h>
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000014#include <sp_res_desc.h>
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000015#include <string.h>
16#include <utils.h>
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000017#include <utils_def.h>
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010018#include <xlat_tables_v2.h>
19
20#include "spm_private.h"
21#include "spm_shim_private.h"
22
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000023/*******************************************************************************
24 * Instantiation of translation table context
25 ******************************************************************************/
26
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010027/* Place translation tables by default along with the ones used by BL31. */
28#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
29#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
30#endif
31
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000032/*
33 * Allocate elements of the translation contexts for the Secure Partitions.
34 */
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010035
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000036/* Allocate an array of mmap_region per partition. */
37static struct mmap_region sp_mmap_regions[PLAT_SP_IMAGE_MMAP_REGIONS + 1]
38 [PLAT_SPM_MAX_PARTITIONS];
39static OBJECT_POOL(sp_mmap_regions_pool, sp_mmap_regions,
40 sizeof(mmap_region_t) * (PLAT_SP_IMAGE_MMAP_REGIONS + 1),
41 PLAT_SPM_MAX_PARTITIONS);
42
43/* Allocate individual translation tables. */
44static uint64_t sp_xlat_tables[XLAT_TABLE_ENTRIES]
45 [(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS]
46 __aligned(XLAT_TABLE_SIZE) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
47static OBJECT_POOL(sp_xlat_tables_pool, sp_xlat_tables,
48 XLAT_TABLE_ENTRIES * sizeof(uint64_t),
49 (PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS);
50
51/* Allocate base translation tables. */
52static uint64_t sp_xlat_base_tables
53 [GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)]
54 [PLAT_SPM_MAX_PARTITIONS]
55 __aligned(GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
56 * sizeof(uint64_t))
57 __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
58static OBJECT_POOL(sp_xlat_base_tables_pool, sp_xlat_base_tables,
59 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) * sizeof(uint64_t),
60 PLAT_SPM_MAX_PARTITIONS);
61
62/* Allocate arrays. */
63static int sp_xlat_mapped_regions[PLAT_SP_IMAGE_MAX_XLAT_TABLES]
64 [PLAT_SPM_MAX_PARTITIONS];
65static OBJECT_POOL(sp_xlat_mapped_regions_pool, sp_xlat_mapped_regions,
66 sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES, PLAT_SPM_MAX_PARTITIONS);
67
68/* Allocate individual contexts. */
69static xlat_ctx_t sp_xlat_ctx[PLAT_SPM_MAX_PARTITIONS];
70static OBJECT_POOL(sp_xlat_ctx_pool, sp_xlat_ctx, sizeof(xlat_ctx_t),
71 PLAT_SPM_MAX_PARTITIONS);
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010072
73/* Get handle of Secure Partition translation context */
Antonio Nino Diaz8cc23f92018-10-30 11:35:30 +000074xlat_ctx_t *spm_sp_xlat_context_alloc(void)
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010075{
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000076 xlat_ctx_t *ctx = pool_alloc(&sp_xlat_ctx_pool);
77
78 struct mmap_region *mmap = pool_alloc(&sp_mmap_regions_pool);
79
80 uint64_t *base_table = pool_alloc(&sp_xlat_base_tables_pool);
81 uint64_t **tables = pool_alloc_n(&sp_xlat_tables_pool,
82 PLAT_SP_IMAGE_MAX_XLAT_TABLES);
83
84 int *mapped_regions = pool_alloc(&sp_xlat_mapped_regions_pool);
85
86 xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1,
87 PLAT_VIRT_ADDR_SPACE_SIZE - 1, mmap,
88 PLAT_SP_IMAGE_MMAP_REGIONS, tables,
89 PLAT_SP_IMAGE_MAX_XLAT_TABLES, base_table,
90 EL1_EL0_REGIME, mapped_regions);
91
92 return ctx;
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010093};
94
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000095/*******************************************************************************
96 * Functions to allocate memory for regions.
97 ******************************************************************************/
98
99/*
100 * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
101 * reserved for SPM to use as heap to allocate memory regions of Secure
102 * Partitions. This is only done at boot.
103 */
104static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U,
105 PLAT_SPM_HEAP_SIZE);
106
107static uintptr_t spm_alloc_heap(size_t size)
108{
109 return (uintptr_t)pool_alloc_n(&spm_heap_mem, size);
110}
111
112/*******************************************************************************
113 * Functions to map memory regions described in the resource description.
114 ******************************************************************************/
115static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr)
116{
117 unsigned int index = attr & RD_MEM_MASK;
118
119 const unsigned int mmap_attr_arr[8] = {
120 MT_DEVICE | MT_RW | MT_SECURE, /* RD_MEM_DEVICE */
121 MT_CODE | MT_SECURE, /* RD_MEM_NORMAL_CODE */
122 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_DATA */
123 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_BSS */
124 MT_RO_DATA | MT_SECURE, /* RD_MEM_NORMAL_RODATA */
125 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
126 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
127 MT_MEMORY | MT_RW | MT_SECURE /* RD_MEM_NORMAL_MISCELLANEOUS */
128 };
129
130 if (index >= ARRAY_SIZE(mmap_attr_arr)) {
131 ERROR("Unsupported RD memory attributes 0x%x\n", attr);
132 panic();
133 }
134
135 return mmap_attr_arr[index];
136}
137
138/*
139 * The data provided in the resource description structure is not directly
140 * compatible with a mmap_region structure. This function handles the conversion
141 * and maps it.
142 */
143static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
144{
145 int rc;
146 mmap_region_t mmap;
147
148 /* Location of the SP image */
149 uintptr_t sp_size = sp_ctx->image_size;
150 uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address;
151 unsigned long long sp_base_pa = sp_ctx->image_base;
152
153 /* Location of the memory region to map */
154 size_t rd_size = rdmem->size;
155 uintptr_t rd_base_va = rdmem->base;
156 unsigned long long rd_base_pa;
157
158 unsigned int memtype = rdmem->attr & RD_MEM_MASK;
159
160 VERBOSE("Adding memory region '%s'\n", rdmem->name);
161
162 mmap.granularity = REGION_DEFAULT_GRANULARITY;
163
164 /* Check if the RD region is inside of the SP image or not */
165 int is_outside = (rd_base_va + rd_size <= sp_base_va) ||
166 (sp_base_va + sp_size <= rd_base_va);
167
168 /* Set to 1 if it is needed to zero this region */
169 int zero_region = 0;
170
171 switch (memtype) {
172 case RD_MEM_DEVICE:
173 /* Device regions are mapped 1:1 */
174 rd_base_pa = rd_base_va;
175 break;
176
177 case RD_MEM_NORMAL_CODE:
178 case RD_MEM_NORMAL_RODATA:
179 {
180 if (is_outside == 1) {
181 ERROR("Code and rodata sections must be fully contained in the image.");
182 panic();
183 }
184
185 /* Get offset into the image */
186 rd_base_pa = sp_base_pa + rd_base_va - sp_base_va;
187 break;
188 }
189 case RD_MEM_NORMAL_DATA:
190 {
191 if (is_outside == 1) {
192 ERROR("Data sections must be fully contained in the image.");
193 panic();
194 }
195
196 rd_base_pa = spm_alloc_heap(rd_size);
197
198 /* Get offset into the image */
199 void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va);
200
201 VERBOSE(" Copying data from %p to 0x%llx\n", img_pa, rd_base_pa);
202
203 /* Map destination */
204 rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa,
205 rd_size, MT_MEMORY | MT_RW | MT_SECURE);
206 if (rc != 0) {
207 ERROR("Unable to map data region at EL3: %d\n", rc);
208 panic();
209 }
210
211 /* Copy original data to destination */
212 memcpy((void *)rd_base_pa, img_pa, rd_size);
213
214 /* Unmap destination region */
215 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
216 if (rc != 0) {
217 ERROR("Unable to remove data region at EL3: %d\n", rc);
218 panic();
219 }
220
221 break;
222 }
223 case RD_MEM_NORMAL_MISCELLANEOUS:
224 /* Allow SPM to change the attributes of the region. */
225 mmap.granularity = PAGE_SIZE;
226 rd_base_pa = spm_alloc_heap(rd_size);
227 zero_region = 1;
228 break;
229
230 case RD_MEM_NORMAL_SPM_SP_SHARED_MEM:
231 if ((sp_ctx->spm_sp_buffer_base != 0) ||
232 (sp_ctx->spm_sp_buffer_size != 0)) {
233 ERROR("A partition must have only one SPM<->SP buffer.\n");
234 panic();
235 }
236 rd_base_pa = spm_alloc_heap(rd_size);
237 zero_region = 1;
238 /* Save location of this buffer, it is needed by SPM */
239 sp_ctx->spm_sp_buffer_base = rd_base_pa;
240 sp_ctx->spm_sp_buffer_size = rd_size;
241 break;
242
243 case RD_MEM_NORMAL_CLIENT_SHARED_MEM:
244 /* Fallthrough */
245 case RD_MEM_NORMAL_BSS:
246 rd_base_pa = spm_alloc_heap(rd_size);
247 zero_region = 1;
248 break;
249
250 default:
251 panic();
252 }
253
254 mmap.base_pa = rd_base_pa;
255 mmap.base_va = rd_base_va;
256 mmap.size = rd_size;
257
258 /* Only S-EL0 mappings supported for now */
259 mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER;
260
261 VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
262 mmap.base_va, mmap.base_pa, mmap.size, mmap.attr);
263
264 /* Map region in the context of the Secure Partition */
265 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap);
266
267 if (zero_region == 1) {
268 VERBOSE(" Zeroing region...\n");
269
270 rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa,
271 mmap.size, MT_MEMORY | MT_RW | MT_SECURE);
272 if (rc != 0) {
273 ERROR("Unable to map memory at EL3 to zero: %d\n",
274 rc);
275 panic();
276 }
277
278 zeromem((void *)mmap.base_pa, mmap.size);
279
280 /*
281 * Unmap destination region unless it is the SPM<->SP buffer,
282 * which must be used by SPM.
283 */
284 if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) {
285 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
286 if (rc != 0) {
287 ERROR("Unable to remove region at EL3: %d\n", rc);
288 panic();
289 }
290 }
291 }
292}
293
294void sp_map_memory_regions(sp_context_t *sp_ctx)
295{
296 /* This region contains the exception vectors used at S-EL1. */
297 const mmap_region_t sel1_exception_vectors =
298 MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
299 SPM_SHIM_EXCEPTIONS_SIZE,
300 MT_CODE | MT_SECURE | MT_PRIVILEGED);
301
302 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
303 &sel1_exception_vectors);
304
305 struct sp_rd_sect_mem_region *rdmem;
306
307 for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
308 map_rdmem(sp_ctx, rdmem);
309 }
310
311 init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
312}