blob: 443b0b983f03fbe0a6f1e06666f23e7c43ff43a1 [file] [log] [blame]
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +01001/*
2 * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <errno.h>
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000011#include <object_pool.h>
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010012#include <platform_def.h>
13#include <platform.h>
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000014#include <sp_res_desc.h>
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010015#include <spm_svc.h>
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000016#include <string.h>
17#include <utils.h>
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000018#include <utils_def.h>
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010019#include <xlat_tables_v2.h>
20
21#include "spm_private.h"
22#include "spm_shim_private.h"
23
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000024/*******************************************************************************
25 * Instantiation of translation table context
26 ******************************************************************************/
27
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000028/* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */
29static spinlock_t mem_attr_smc_lock;
30
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010031/* Place translation tables by default along with the ones used by BL31. */
32#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
33#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
34#endif
35
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000036/*
37 * Allocate elements of the translation contexts for the Secure Partitions.
38 */
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010039
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000040/* Allocate an array of mmap_region per partition. */
41static struct mmap_region sp_mmap_regions[PLAT_SP_IMAGE_MMAP_REGIONS + 1]
42 [PLAT_SPM_MAX_PARTITIONS];
43static OBJECT_POOL(sp_mmap_regions_pool, sp_mmap_regions,
44 sizeof(mmap_region_t) * (PLAT_SP_IMAGE_MMAP_REGIONS + 1),
45 PLAT_SPM_MAX_PARTITIONS);
46
47/* Allocate individual translation tables. */
48static uint64_t sp_xlat_tables[XLAT_TABLE_ENTRIES]
49 [(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS]
50 __aligned(XLAT_TABLE_SIZE) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
51static OBJECT_POOL(sp_xlat_tables_pool, sp_xlat_tables,
52 XLAT_TABLE_ENTRIES * sizeof(uint64_t),
53 (PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS);
54
55/* Allocate base translation tables. */
56static uint64_t sp_xlat_base_tables
57 [GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)]
58 [PLAT_SPM_MAX_PARTITIONS]
59 __aligned(GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
60 * sizeof(uint64_t))
61 __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
62static OBJECT_POOL(sp_xlat_base_tables_pool, sp_xlat_base_tables,
63 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) * sizeof(uint64_t),
64 PLAT_SPM_MAX_PARTITIONS);
65
66/* Allocate arrays. */
67static int sp_xlat_mapped_regions[PLAT_SP_IMAGE_MAX_XLAT_TABLES]
68 [PLAT_SPM_MAX_PARTITIONS];
69static OBJECT_POOL(sp_xlat_mapped_regions_pool, sp_xlat_mapped_regions,
70 sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES, PLAT_SPM_MAX_PARTITIONS);
71
72/* Allocate individual contexts. */
73static xlat_ctx_t sp_xlat_ctx[PLAT_SPM_MAX_PARTITIONS];
74static OBJECT_POOL(sp_xlat_ctx_pool, sp_xlat_ctx, sizeof(xlat_ctx_t),
75 PLAT_SPM_MAX_PARTITIONS);
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010076
77/* Get handle of Secure Partition translation context */
Antonio Nino Diaz8cc23f92018-10-30 11:35:30 +000078xlat_ctx_t *spm_sp_xlat_context_alloc(void)
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010079{
Antonio Nino Diaz675d1552018-10-30 11:36:47 +000080 xlat_ctx_t *ctx = pool_alloc(&sp_xlat_ctx_pool);
81
82 struct mmap_region *mmap = pool_alloc(&sp_mmap_regions_pool);
83
84 uint64_t *base_table = pool_alloc(&sp_xlat_base_tables_pool);
85 uint64_t **tables = pool_alloc_n(&sp_xlat_tables_pool,
86 PLAT_SP_IMAGE_MAX_XLAT_TABLES);
87
88 int *mapped_regions = pool_alloc(&sp_xlat_mapped_regions_pool);
89
90 xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1,
91 PLAT_VIRT_ADDR_SPACE_SIZE - 1, mmap,
92 PLAT_SP_IMAGE_MMAP_REGIONS, tables,
93 PLAT_SP_IMAGE_MAX_XLAT_TABLES, base_table,
94 EL1_EL0_REGIME, mapped_regions);
95
96 return ctx;
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +010097};
98
Antonio Nino Diazbb7d1cd2018-10-30 11:34:23 +000099/*******************************************************************************
100 * Functions to allocate memory for regions.
101 ******************************************************************************/
102
103/*
104 * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
105 * reserved for SPM to use as heap to allocate memory regions of Secure
106 * Partitions. This is only done at boot.
107 */
108static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U,
109 PLAT_SPM_HEAP_SIZE);
110
111static uintptr_t spm_alloc_heap(size_t size)
112{
113 return (uintptr_t)pool_alloc_n(&spm_heap_mem, size);
114}
115
116/*******************************************************************************
117 * Functions to map memory regions described in the resource description.
118 ******************************************************************************/
119static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr)
120{
121 unsigned int index = attr & RD_MEM_MASK;
122
123 const unsigned int mmap_attr_arr[8] = {
124 MT_DEVICE | MT_RW | MT_SECURE, /* RD_MEM_DEVICE */
125 MT_CODE | MT_SECURE, /* RD_MEM_NORMAL_CODE */
126 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_DATA */
127 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_BSS */
128 MT_RO_DATA | MT_SECURE, /* RD_MEM_NORMAL_RODATA */
129 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
130 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
131 MT_MEMORY | MT_RW | MT_SECURE /* RD_MEM_NORMAL_MISCELLANEOUS */
132 };
133
134 if (index >= ARRAY_SIZE(mmap_attr_arr)) {
135 ERROR("Unsupported RD memory attributes 0x%x\n", attr);
136 panic();
137 }
138
139 return mmap_attr_arr[index];
140}
141
142/*
143 * The data provided in the resource description structure is not directly
144 * compatible with a mmap_region structure. This function handles the conversion
145 * and maps it.
146 */
147static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
148{
149 int rc;
150 mmap_region_t mmap;
151
152 /* Location of the SP image */
153 uintptr_t sp_size = sp_ctx->image_size;
154 uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address;
155 unsigned long long sp_base_pa = sp_ctx->image_base;
156
157 /* Location of the memory region to map */
158 size_t rd_size = rdmem->size;
159 uintptr_t rd_base_va = rdmem->base;
160 unsigned long long rd_base_pa;
161
162 unsigned int memtype = rdmem->attr & RD_MEM_MASK;
163
164 VERBOSE("Adding memory region '%s'\n", rdmem->name);
165
166 mmap.granularity = REGION_DEFAULT_GRANULARITY;
167
168 /* Check if the RD region is inside of the SP image or not */
169 int is_outside = (rd_base_va + rd_size <= sp_base_va) ||
170 (sp_base_va + sp_size <= rd_base_va);
171
172 /* Set to 1 if it is needed to zero this region */
173 int zero_region = 0;
174
175 switch (memtype) {
176 case RD_MEM_DEVICE:
177 /* Device regions are mapped 1:1 */
178 rd_base_pa = rd_base_va;
179 break;
180
181 case RD_MEM_NORMAL_CODE:
182 case RD_MEM_NORMAL_RODATA:
183 {
184 if (is_outside == 1) {
185 ERROR("Code and rodata sections must be fully contained in the image.");
186 panic();
187 }
188
189 /* Get offset into the image */
190 rd_base_pa = sp_base_pa + rd_base_va - sp_base_va;
191 break;
192 }
193 case RD_MEM_NORMAL_DATA:
194 {
195 if (is_outside == 1) {
196 ERROR("Data sections must be fully contained in the image.");
197 panic();
198 }
199
200 rd_base_pa = spm_alloc_heap(rd_size);
201
202 /* Get offset into the image */
203 void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va);
204
205 VERBOSE(" Copying data from %p to 0x%llx\n", img_pa, rd_base_pa);
206
207 /* Map destination */
208 rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa,
209 rd_size, MT_MEMORY | MT_RW | MT_SECURE);
210 if (rc != 0) {
211 ERROR("Unable to map data region at EL3: %d\n", rc);
212 panic();
213 }
214
215 /* Copy original data to destination */
216 memcpy((void *)rd_base_pa, img_pa, rd_size);
217
218 /* Unmap destination region */
219 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
220 if (rc != 0) {
221 ERROR("Unable to remove data region at EL3: %d\n", rc);
222 panic();
223 }
224
225 break;
226 }
227 case RD_MEM_NORMAL_MISCELLANEOUS:
228 /* Allow SPM to change the attributes of the region. */
229 mmap.granularity = PAGE_SIZE;
230 rd_base_pa = spm_alloc_heap(rd_size);
231 zero_region = 1;
232 break;
233
234 case RD_MEM_NORMAL_SPM_SP_SHARED_MEM:
235 if ((sp_ctx->spm_sp_buffer_base != 0) ||
236 (sp_ctx->spm_sp_buffer_size != 0)) {
237 ERROR("A partition must have only one SPM<->SP buffer.\n");
238 panic();
239 }
240 rd_base_pa = spm_alloc_heap(rd_size);
241 zero_region = 1;
242 /* Save location of this buffer, it is needed by SPM */
243 sp_ctx->spm_sp_buffer_base = rd_base_pa;
244 sp_ctx->spm_sp_buffer_size = rd_size;
245 break;
246
247 case RD_MEM_NORMAL_CLIENT_SHARED_MEM:
248 /* Fallthrough */
249 case RD_MEM_NORMAL_BSS:
250 rd_base_pa = spm_alloc_heap(rd_size);
251 zero_region = 1;
252 break;
253
254 default:
255 panic();
256 }
257
258 mmap.base_pa = rd_base_pa;
259 mmap.base_va = rd_base_va;
260 mmap.size = rd_size;
261
262 /* Only S-EL0 mappings supported for now */
263 mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER;
264
265 VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
266 mmap.base_va, mmap.base_pa, mmap.size, mmap.attr);
267
268 /* Map region in the context of the Secure Partition */
269 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap);
270
271 if (zero_region == 1) {
272 VERBOSE(" Zeroing region...\n");
273
274 rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa,
275 mmap.size, MT_MEMORY | MT_RW | MT_SECURE);
276 if (rc != 0) {
277 ERROR("Unable to map memory at EL3 to zero: %d\n",
278 rc);
279 panic();
280 }
281
282 zeromem((void *)mmap.base_pa, mmap.size);
283
284 /*
285 * Unmap destination region unless it is the SPM<->SP buffer,
286 * which must be used by SPM.
287 */
288 if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) {
289 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
290 if (rc != 0) {
291 ERROR("Unable to remove region at EL3: %d\n", rc);
292 panic();
293 }
294 }
295 }
296}
297
298void sp_map_memory_regions(sp_context_t *sp_ctx)
299{
300 /* This region contains the exception vectors used at S-EL1. */
301 const mmap_region_t sel1_exception_vectors =
302 MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
303 SPM_SHIM_EXCEPTIONS_SIZE,
304 MT_CODE | MT_SECURE | MT_PRIVILEGED);
305
306 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
307 &sel1_exception_vectors);
308
309 struct sp_rd_sect_mem_region *rdmem;
310
311 for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
312 map_rdmem(sp_ctx, rdmem);
313 }
314
315 init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
316}
317
318/*******************************************************************************
319 * Functions to manipulate memory regions
320 ******************************************************************************/
321
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +0100322/*
323 * Attributes are encoded using a different format in the SMC interface than in
324 * the Trusted Firmware, where the mmap_attr_t enum type is used. This function
325 * converts an attributes value from the SMC format to the mmap_attr_t format by
326 * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
327 * The other fields are left as 0 because they are ignored by the function
Antonio Nino Diaz6c4c9ee2018-08-05 15:34:10 +0100328 * xlat_change_mem_attributes_ctx().
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +0100329 */
330static unsigned int smc_attr_to_mmap_attr(unsigned int attributes)
331{
332 unsigned int tf_attr = 0U;
333
334 unsigned int access = (attributes & SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
335 >> SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
336
337 if (access == SP_MEMORY_ATTRIBUTES_ACCESS_RW) {
338 tf_attr |= MT_RW | MT_USER;
339 } else if (access == SP_MEMORY_ATTRIBUTES_ACCESS_RO) {
340 tf_attr |= MT_RO | MT_USER;
341 } else {
342 /* Other values are reserved. */
343 assert(access == SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS);
344 /* The only requirement is that there's no access from EL0 */
345 tf_attr |= MT_RO | MT_PRIVILEGED;
346 }
347
348 if ((attributes & SP_MEMORY_ATTRIBUTES_NON_EXEC) == 0) {
349 tf_attr |= MT_EXECUTE;
350 } else {
351 tf_attr |= MT_EXECUTE_NEVER;
352 }
353
354 return tf_attr;
355}
356
357/*
358 * This function converts attributes from the Trusted Firmware format into the
359 * SMC interface format.
360 */
361static unsigned int smc_mmap_to_smc_attr(unsigned int attr)
362{
363 unsigned int smc_attr = 0U;
364
365 unsigned int data_access;
366
367 if ((attr & MT_USER) == 0) {
368 /* No access from EL0. */
369 data_access = SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS;
370 } else {
371 if ((attr & MT_RW) != 0) {
372 assert(MT_TYPE(attr) != MT_DEVICE);
373 data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RW;
374 } else {
375 data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RO;
376 }
377 }
378
379 smc_attr |= (data_access & SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
380 << SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
381
382 if ((attr & MT_EXECUTE_NEVER) != 0U) {
383 smc_attr |= SP_MEMORY_ATTRIBUTES_NON_EXEC;
384 }
385
386 return smc_attr;
387}
388
Antonio Nino Diaz28759312018-05-22 16:26:48 +0100389int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +0100390 uintptr_t base_va)
391{
392 uint32_t attributes;
393
394 spin_lock(&mem_attr_smc_lock);
395
Antonio Nino Diaz6c4c9ee2018-08-05 15:34:10 +0100396 int rc = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +0100397 base_va, &attributes);
398
399 spin_unlock(&mem_attr_smc_lock);
400
Antonio Nino Diaz6c4c9ee2018-08-05 15:34:10 +0100401 /* Convert error codes of xlat_get_mem_attributes_ctx() into SPM. */
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +0100402 assert((rc == 0) || (rc == -EINVAL));
403
404 if (rc == 0) {
405 return (int32_t) smc_mmap_to_smc_attr(attributes);
406 } else {
407 return SPM_INVALID_PARAMETER;
408 }
409}
410
Antonio Nino Diaz28759312018-05-22 16:26:48 +0100411int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +0100412 u_register_t page_address,
413 u_register_t pages_count,
414 u_register_t smc_attributes)
415{
416 uintptr_t base_va = (uintptr_t) page_address;
417 size_t size = (size_t) (pages_count * PAGE_SIZE);
418 uint32_t attributes = (uint32_t) smc_attributes;
419
420 INFO(" Start address : 0x%lx\n", base_va);
421 INFO(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
422 INFO(" Attributes : 0x%x\n", attributes);
423
424 spin_lock(&mem_attr_smc_lock);
425
Antonio Nino Diaz6c4c9ee2018-08-05 15:34:10 +0100426 int ret = xlat_change_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +0100427 base_va, size,
428 smc_attr_to_mmap_attr(attributes));
429
430 spin_unlock(&mem_attr_smc_lock);
431
Antonio Nino Diaz6c4c9ee2018-08-05 15:34:10 +0100432 /* Convert error codes of xlat_change_mem_attributes_ctx() into SPM. */
Antonio Nino Diaz7b28b542018-05-22 16:45:35 +0100433 assert((ret == 0) || (ret == -EINVAL));
434
435 return (ret == 0) ? SPM_SUCCESS : SPM_INVALID_PARAMETER;
436}