blob: 5f83096507c9b68ce9861000d64174acb24aadd9 [file] [log] [blame]
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +01001/*
Antonio Nino Diazeef32382019-03-29 13:48:50 +00002 * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <errno.h>
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000011#include <string.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012
13#include <platform_def.h>
14
15#include <lib/object_pool.h>
16#include <lib/utils.h>
17#include <lib/utils_def.h>
18#include <lib/xlat_tables/xlat_tables_v2.h>
19#include <plat/common/platform.h>
20#include <services/sp_res_desc.h>
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010021
22#include "spm_private.h"
23#include "spm_shim_private.h"
24
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000025/*******************************************************************************
26 * Instantiation of translation table context
27 ******************************************************************************/
28
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010029/* Place translation tables by default along with the ones used by BL31. */
30#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
31#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
32#endif
33
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000034/*
35 * Allocate elements of the translation contexts for the Secure Partitions.
36 */
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010037
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000038/* Allocate an array of mmap_region per partition. */
39static struct mmap_region sp_mmap_regions[PLAT_SP_IMAGE_MMAP_REGIONS + 1]
40 [PLAT_SPM_MAX_PARTITIONS];
41static OBJECT_POOL(sp_mmap_regions_pool, sp_mmap_regions,
42 sizeof(mmap_region_t) * (PLAT_SP_IMAGE_MMAP_REGIONS + 1),
43 PLAT_SPM_MAX_PARTITIONS);
44
45/* Allocate individual translation tables. */
46static uint64_t sp_xlat_tables[XLAT_TABLE_ENTRIES]
47 [(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS]
48 __aligned(XLAT_TABLE_SIZE) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
49static OBJECT_POOL(sp_xlat_tables_pool, sp_xlat_tables,
50 XLAT_TABLE_ENTRIES * sizeof(uint64_t),
51 (PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS);
52
53/* Allocate base translation tables. */
54static uint64_t sp_xlat_base_tables
55 [GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)]
56 [PLAT_SPM_MAX_PARTITIONS]
57 __aligned(GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
58 * sizeof(uint64_t))
59 __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
60static OBJECT_POOL(sp_xlat_base_tables_pool, sp_xlat_base_tables,
61 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) * sizeof(uint64_t),
62 PLAT_SPM_MAX_PARTITIONS);
63
64/* Allocate arrays. */
65static int sp_xlat_mapped_regions[PLAT_SP_IMAGE_MAX_XLAT_TABLES]
66 [PLAT_SPM_MAX_PARTITIONS];
67static OBJECT_POOL(sp_xlat_mapped_regions_pool, sp_xlat_mapped_regions,
68 sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES, PLAT_SPM_MAX_PARTITIONS);
69
70/* Allocate individual contexts. */
71static xlat_ctx_t sp_xlat_ctx[PLAT_SPM_MAX_PARTITIONS];
72static OBJECT_POOL(sp_xlat_ctx_pool, sp_xlat_ctx, sizeof(xlat_ctx_t),
73 PLAT_SPM_MAX_PARTITIONS);
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010074
75/* Get handle of Secure Partition translation context */
Antonio Nino Diaz8cc23f92018-10-30 11:35:30 +000076xlat_ctx_t *spm_sp_xlat_context_alloc(void)
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010077{
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000078 xlat_ctx_t *ctx = pool_alloc(&sp_xlat_ctx_pool);
79
80 struct mmap_region *mmap = pool_alloc(&sp_mmap_regions_pool);
81
82 uint64_t *base_table = pool_alloc(&sp_xlat_base_tables_pool);
83 uint64_t **tables = pool_alloc_n(&sp_xlat_tables_pool,
84 PLAT_SP_IMAGE_MAX_XLAT_TABLES);
85
86 int *mapped_regions = pool_alloc(&sp_xlat_mapped_regions_pool);
87
88 xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1,
89 PLAT_VIRT_ADDR_SPACE_SIZE - 1, mmap,
90 PLAT_SP_IMAGE_MMAP_REGIONS, tables,
91 PLAT_SP_IMAGE_MAX_XLAT_TABLES, base_table,
92 EL1_EL0_REGIME, mapped_regions);
93
94 return ctx;
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010095};
96
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000097/*******************************************************************************
98 * Functions to allocate memory for regions.
99 ******************************************************************************/
100
101/*
102 * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
103 * reserved for SPM to use as heap to allocate memory regions of Secure
104 * Partitions. This is only done at boot.
105 */
106static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U,
107 PLAT_SPM_HEAP_SIZE);
108
109static uintptr_t spm_alloc_heap(size_t size)
110{
111 return (uintptr_t)pool_alloc_n(&spm_heap_mem, size);
112}
113
114/*******************************************************************************
115 * Functions to map memory regions described in the resource description.
116 ******************************************************************************/
117static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr)
118{
119 unsigned int index = attr & RD_MEM_MASK;
120
121 const unsigned int mmap_attr_arr[8] = {
122 MT_DEVICE | MT_RW | MT_SECURE, /* RD_MEM_DEVICE */
123 MT_CODE | MT_SECURE, /* RD_MEM_NORMAL_CODE */
124 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_DATA */
125 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_BSS */
126 MT_RO_DATA | MT_SECURE, /* RD_MEM_NORMAL_RODATA */
127 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
128 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
129 MT_MEMORY | MT_RW | MT_SECURE /* RD_MEM_NORMAL_MISCELLANEOUS */
130 };
131
132 if (index >= ARRAY_SIZE(mmap_attr_arr)) {
133 ERROR("Unsupported RD memory attributes 0x%x\n", attr);
134 panic();
135 }
136
137 return mmap_attr_arr[index];
138}
139
140/*
141 * The data provided in the resource description structure is not directly
142 * compatible with a mmap_region structure. This function handles the conversion
143 * and maps it.
144 */
145static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
146{
147 int rc;
148 mmap_region_t mmap;
149
150 /* Location of the SP image */
151 uintptr_t sp_size = sp_ctx->image_size;
152 uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address;
153 unsigned long long sp_base_pa = sp_ctx->image_base;
154
155 /* Location of the memory region to map */
156 size_t rd_size = rdmem->size;
157 uintptr_t rd_base_va = rdmem->base;
158 unsigned long long rd_base_pa;
159
160 unsigned int memtype = rdmem->attr & RD_MEM_MASK;
161
Antonio Nino Diazeef32382019-03-29 13:48:50 +0000162 if (rd_size == 0U) {
163 VERBOSE("Memory region '%s' is empty. Ignored.\n", rdmem->name);
164 return;
165 }
166
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +0000167 VERBOSE("Adding memory region '%s'\n", rdmem->name);
168
169 mmap.granularity = REGION_DEFAULT_GRANULARITY;
170
171 /* Check if the RD region is inside of the SP image or not */
172 int is_outside = (rd_base_va + rd_size <= sp_base_va) ||
173 (sp_base_va + sp_size <= rd_base_va);
174
175 /* Set to 1 if it is needed to zero this region */
176 int zero_region = 0;
177
178 switch (memtype) {
179 case RD_MEM_DEVICE:
180 /* Device regions are mapped 1:1 */
181 rd_base_pa = rd_base_va;
182 break;
183
184 case RD_MEM_NORMAL_CODE:
185 case RD_MEM_NORMAL_RODATA:
186 {
187 if (is_outside == 1) {
188 ERROR("Code and rodata sections must be fully contained in the image.");
189 panic();
190 }
191
192 /* Get offset into the image */
193 rd_base_pa = sp_base_pa + rd_base_va - sp_base_va;
194 break;
195 }
196 case RD_MEM_NORMAL_DATA:
197 {
198 if (is_outside == 1) {
199 ERROR("Data sections must be fully contained in the image.");
200 panic();
201 }
202
203 rd_base_pa = spm_alloc_heap(rd_size);
204
205 /* Get offset into the image */
206 void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va);
207
208 VERBOSE(" Copying data from %p to 0x%llx\n", img_pa, rd_base_pa);
209
210 /* Map destination */
211 rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa,
212 rd_size, MT_MEMORY | MT_RW | MT_SECURE);
213 if (rc != 0) {
214 ERROR("Unable to map data region at EL3: %d\n", rc);
215 panic();
216 }
217
218 /* Copy original data to destination */
219 memcpy((void *)rd_base_pa, img_pa, rd_size);
220
221 /* Unmap destination region */
222 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
223 if (rc != 0) {
224 ERROR("Unable to remove data region at EL3: %d\n", rc);
225 panic();
226 }
227
228 break;
229 }
230 case RD_MEM_NORMAL_MISCELLANEOUS:
231 /* Allow SPM to change the attributes of the region. */
232 mmap.granularity = PAGE_SIZE;
233 rd_base_pa = spm_alloc_heap(rd_size);
234 zero_region = 1;
235 break;
236
237 case RD_MEM_NORMAL_SPM_SP_SHARED_MEM:
238 if ((sp_ctx->spm_sp_buffer_base != 0) ||
239 (sp_ctx->spm_sp_buffer_size != 0)) {
240 ERROR("A partition must have only one SPM<->SP buffer.\n");
241 panic();
242 }
243 rd_base_pa = spm_alloc_heap(rd_size);
244 zero_region = 1;
245 /* Save location of this buffer, it is needed by SPM */
246 sp_ctx->spm_sp_buffer_base = rd_base_pa;
247 sp_ctx->spm_sp_buffer_size = rd_size;
248 break;
249
250 case RD_MEM_NORMAL_CLIENT_SHARED_MEM:
251 /* Fallthrough */
252 case RD_MEM_NORMAL_BSS:
253 rd_base_pa = spm_alloc_heap(rd_size);
254 zero_region = 1;
255 break;
256
257 default:
258 panic();
259 }
260
261 mmap.base_pa = rd_base_pa;
262 mmap.base_va = rd_base_va;
263 mmap.size = rd_size;
264
265 /* Only S-EL0 mappings supported for now */
266 mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER;
267
268 VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
269 mmap.base_va, mmap.base_pa, mmap.size, mmap.attr);
270
271 /* Map region in the context of the Secure Partition */
272 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap);
273
274 if (zero_region == 1) {
275 VERBOSE(" Zeroing region...\n");
276
277 rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa,
278 mmap.size, MT_MEMORY | MT_RW | MT_SECURE);
279 if (rc != 0) {
280 ERROR("Unable to map memory at EL3 to zero: %d\n",
281 rc);
282 panic();
283 }
284
285 zeromem((void *)mmap.base_pa, mmap.size);
286
287 /*
288 * Unmap destination region unless it is the SPM<->SP buffer,
289 * which must be used by SPM.
290 */
291 if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) {
292 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
293 if (rc != 0) {
294 ERROR("Unable to remove region at EL3: %d\n", rc);
295 panic();
296 }
297 }
298 }
299}
300
301void sp_map_memory_regions(sp_context_t *sp_ctx)
302{
303 /* This region contains the exception vectors used at S-EL1. */
304 const mmap_region_t sel1_exception_vectors =
305 MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
306 SPM_SHIM_EXCEPTIONS_SIZE,
307 MT_CODE | MT_SECURE | MT_PRIVILEGED);
308
309 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
310 &sel1_exception_vectors);
311
312 struct sp_rd_sect_mem_region *rdmem;
313
314 for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
315 map_rdmem(sp_ctx, rdmem);
316 }
317
318 init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
319}