blob: 1619f9720aab5602101dd98f0d9d51dcfd094b91 [file] [log] [blame]
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +01001/*
Antonio Nino Diazeef32382019-03-29 13:48:50 +00002 * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <errno.h>
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000011#include <string.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012
13#include <platform_def.h>
14
15#include <lib/object_pool.h>
16#include <lib/utils.h>
17#include <lib/utils_def.h>
18#include <lib/xlat_tables/xlat_tables_v2.h>
19#include <plat/common/platform.h>
20#include <services/sp_res_desc.h>
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010021
22#include "spm_private.h"
23#include "spm_shim_private.h"
24
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000025/*******************************************************************************
26 * Instantiation of translation table context
27 ******************************************************************************/
28
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010029/* Place translation tables by default along with the ones used by BL31. */
30#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
31#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
32#endif
33
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000034/*
35 * Allocate elements of the translation contexts for the Secure Partitions.
36 */
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010037
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000038/* Allocate an array of mmap_region per partition. */
39static struct mmap_region sp_mmap_regions[PLAT_SP_IMAGE_MMAP_REGIONS + 1]
40 [PLAT_SPM_MAX_PARTITIONS];
41static OBJECT_POOL(sp_mmap_regions_pool, sp_mmap_regions,
42 sizeof(mmap_region_t) * (PLAT_SP_IMAGE_MMAP_REGIONS + 1),
43 PLAT_SPM_MAX_PARTITIONS);
44
45/* Allocate individual translation tables. */
46static uint64_t sp_xlat_tables[XLAT_TABLE_ENTRIES]
47 [(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS]
48 __aligned(XLAT_TABLE_SIZE) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
49static OBJECT_POOL(sp_xlat_tables_pool, sp_xlat_tables,
50 XLAT_TABLE_ENTRIES * sizeof(uint64_t),
51 (PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS);
52
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000053/* Allocate arrays. */
54static int sp_xlat_mapped_regions[PLAT_SP_IMAGE_MAX_XLAT_TABLES]
55 [PLAT_SPM_MAX_PARTITIONS];
56static OBJECT_POOL(sp_xlat_mapped_regions_pool, sp_xlat_mapped_regions,
57 sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES, PLAT_SPM_MAX_PARTITIONS);
58
59/* Allocate individual contexts. */
60static xlat_ctx_t sp_xlat_ctx[PLAT_SPM_MAX_PARTITIONS];
61static OBJECT_POOL(sp_xlat_ctx_pool, sp_xlat_ctx, sizeof(xlat_ctx_t),
62 PLAT_SPM_MAX_PARTITIONS);
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010063
64/* Get handle of Secure Partition translation context */
Antonio Nino Diaz18e312d2019-03-27 13:04:46 +000065void spm_sp_xlat_context_alloc(sp_context_t *sp_ctx)
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010066{
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000067 xlat_ctx_t *ctx = pool_alloc(&sp_xlat_ctx_pool);
68
69 struct mmap_region *mmap = pool_alloc(&sp_mmap_regions_pool);
70
Antonio Nino Diaz18e312d2019-03-27 13:04:46 +000071 uint64_t *base_table = pool_alloc(&sp_xlat_tables_pool);
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000072 uint64_t **tables = pool_alloc_n(&sp_xlat_tables_pool,
73 PLAT_SP_IMAGE_MAX_XLAT_TABLES);
74
75 int *mapped_regions = pool_alloc(&sp_xlat_mapped_regions_pool);
76
77 xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1,
78 PLAT_VIRT_ADDR_SPACE_SIZE - 1, mmap,
79 PLAT_SP_IMAGE_MMAP_REGIONS, tables,
80 PLAT_SP_IMAGE_MAX_XLAT_TABLES, base_table,
81 EL1_EL0_REGIME, mapped_regions);
82
Antonio Nino Diaz18e312d2019-03-27 13:04:46 +000083 sp_ctx->xlat_ctx_handle = ctx;
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010084};
85
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000086/*******************************************************************************
Antonio Nino Diaz37f97a52019-03-27 11:10:31 +000087 * Translation table context used for S-EL1 exception vectors
88 ******************************************************************************/
89
90REGISTER_XLAT_CONTEXT2(spm_sel1, SPM_SHIM_MMAP_REGIONS, SPM_SHIM_XLAT_TABLES,
91 SPM_SHIM_XLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
92 EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME);
93
94void spm_exceptions_xlat_init_context(void)
95{
96 /* This region contains the exception vectors used at S-EL1. */
97 mmap_region_t sel1_exception_vectors =
98 MAP_REGION(SPM_SHIM_EXCEPTIONS_PTR,
99 0x0UL,
100 SPM_SHIM_EXCEPTIONS_SIZE,
101 MT_CODE | MT_SECURE | MT_PRIVILEGED);
102
103 mmap_add_region_ctx(&spm_sel1_xlat_ctx,
104 &sel1_exception_vectors);
105
106 init_xlat_tables_ctx(&spm_sel1_xlat_ctx);
107}
108
109uint64_t *spm_exceptions_xlat_get_base_table(void)
110{
111 return spm_sel1_xlat_ctx.base_table;
112}
113
114/*******************************************************************************
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +0000115 * Functions to allocate memory for regions.
116 ******************************************************************************/
117
118/*
119 * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
120 * reserved for SPM to use as heap to allocate memory regions of Secure
121 * Partitions. This is only done at boot.
122 */
123static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U,
124 PLAT_SPM_HEAP_SIZE);
125
126static uintptr_t spm_alloc_heap(size_t size)
127{
128 return (uintptr_t)pool_alloc_n(&spm_heap_mem, size);
129}
130
131/*******************************************************************************
132 * Functions to map memory regions described in the resource description.
133 ******************************************************************************/
134static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr)
135{
136 unsigned int index = attr & RD_MEM_MASK;
137
138 const unsigned int mmap_attr_arr[8] = {
139 MT_DEVICE | MT_RW | MT_SECURE, /* RD_MEM_DEVICE */
140 MT_CODE | MT_SECURE, /* RD_MEM_NORMAL_CODE */
141 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_DATA */
142 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_BSS */
143 MT_RO_DATA | MT_SECURE, /* RD_MEM_NORMAL_RODATA */
144 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
145 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
146 MT_MEMORY | MT_RW | MT_SECURE /* RD_MEM_NORMAL_MISCELLANEOUS */
147 };
148
149 if (index >= ARRAY_SIZE(mmap_attr_arr)) {
150 ERROR("Unsupported RD memory attributes 0x%x\n", attr);
151 panic();
152 }
153
154 return mmap_attr_arr[index];
155}
156
157/*
158 * The data provided in the resource description structure is not directly
159 * compatible with a mmap_region structure. This function handles the conversion
160 * and maps it.
161 */
162static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
163{
164 int rc;
165 mmap_region_t mmap;
166
167 /* Location of the SP image */
168 uintptr_t sp_size = sp_ctx->image_size;
169 uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address;
170 unsigned long long sp_base_pa = sp_ctx->image_base;
171
172 /* Location of the memory region to map */
173 size_t rd_size = rdmem->size;
174 uintptr_t rd_base_va = rdmem->base;
175 unsigned long long rd_base_pa;
176
177 unsigned int memtype = rdmem->attr & RD_MEM_MASK;
178
Antonio Nino Diazeef32382019-03-29 13:48:50 +0000179 if (rd_size == 0U) {
180 VERBOSE("Memory region '%s' is empty. Ignored.\n", rdmem->name);
181 return;
182 }
183
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +0000184 VERBOSE("Adding memory region '%s'\n", rdmem->name);
185
186 mmap.granularity = REGION_DEFAULT_GRANULARITY;
187
188 /* Check if the RD region is inside of the SP image or not */
189 int is_outside = (rd_base_va + rd_size <= sp_base_va) ||
190 (sp_base_va + sp_size <= rd_base_va);
191
192 /* Set to 1 if it is needed to zero this region */
193 int zero_region = 0;
194
195 switch (memtype) {
196 case RD_MEM_DEVICE:
197 /* Device regions are mapped 1:1 */
198 rd_base_pa = rd_base_va;
199 break;
200
201 case RD_MEM_NORMAL_CODE:
202 case RD_MEM_NORMAL_RODATA:
203 {
204 if (is_outside == 1) {
205 ERROR("Code and rodata sections must be fully contained in the image.");
206 panic();
207 }
208
209 /* Get offset into the image */
210 rd_base_pa = sp_base_pa + rd_base_va - sp_base_va;
211 break;
212 }
213 case RD_MEM_NORMAL_DATA:
214 {
215 if (is_outside == 1) {
216 ERROR("Data sections must be fully contained in the image.");
217 panic();
218 }
219
220 rd_base_pa = spm_alloc_heap(rd_size);
221
222 /* Get offset into the image */
223 void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va);
224
225 VERBOSE(" Copying data from %p to 0x%llx\n", img_pa, rd_base_pa);
226
227 /* Map destination */
228 rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa,
229 rd_size, MT_MEMORY | MT_RW | MT_SECURE);
230 if (rc != 0) {
231 ERROR("Unable to map data region at EL3: %d\n", rc);
232 panic();
233 }
234
235 /* Copy original data to destination */
236 memcpy((void *)rd_base_pa, img_pa, rd_size);
237
238 /* Unmap destination region */
239 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
240 if (rc != 0) {
241 ERROR("Unable to remove data region at EL3: %d\n", rc);
242 panic();
243 }
244
245 break;
246 }
247 case RD_MEM_NORMAL_MISCELLANEOUS:
248 /* Allow SPM to change the attributes of the region. */
249 mmap.granularity = PAGE_SIZE;
250 rd_base_pa = spm_alloc_heap(rd_size);
251 zero_region = 1;
252 break;
253
254 case RD_MEM_NORMAL_SPM_SP_SHARED_MEM:
255 if ((sp_ctx->spm_sp_buffer_base != 0) ||
256 (sp_ctx->spm_sp_buffer_size != 0)) {
257 ERROR("A partition must have only one SPM<->SP buffer.\n");
258 panic();
259 }
260 rd_base_pa = spm_alloc_heap(rd_size);
261 zero_region = 1;
262 /* Save location of this buffer, it is needed by SPM */
263 sp_ctx->spm_sp_buffer_base = rd_base_pa;
264 sp_ctx->spm_sp_buffer_size = rd_size;
265 break;
266
267 case RD_MEM_NORMAL_CLIENT_SHARED_MEM:
268 /* Fallthrough */
269 case RD_MEM_NORMAL_BSS:
270 rd_base_pa = spm_alloc_heap(rd_size);
271 zero_region = 1;
272 break;
273
274 default:
275 panic();
276 }
277
278 mmap.base_pa = rd_base_pa;
279 mmap.base_va = rd_base_va;
280 mmap.size = rd_size;
281
282 /* Only S-EL0 mappings supported for now */
283 mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER;
284
285 VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
286 mmap.base_va, mmap.base_pa, mmap.size, mmap.attr);
287
288 /* Map region in the context of the Secure Partition */
289 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap);
290
291 if (zero_region == 1) {
292 VERBOSE(" Zeroing region...\n");
293
294 rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa,
295 mmap.size, MT_MEMORY | MT_RW | MT_SECURE);
296 if (rc != 0) {
297 ERROR("Unable to map memory at EL3 to zero: %d\n",
298 rc);
299 panic();
300 }
301
302 zeromem((void *)mmap.base_pa, mmap.size);
303
304 /*
305 * Unmap destination region unless it is the SPM<->SP buffer,
306 * which must be used by SPM.
307 */
308 if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) {
309 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
310 if (rc != 0) {
311 ERROR("Unable to remove region at EL3: %d\n", rc);
312 panic();
313 }
314 }
315 }
316}
317
318void sp_map_memory_regions(sp_context_t *sp_ctx)
319{
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +0000320 struct sp_rd_sect_mem_region *rdmem;
321
322 for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
323 map_rdmem(sp_ctx, rdmem);
324 }
325
326 init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
327}