blob: bb6d18459b5e8a7a3ba5381309aa5cd00eb3249c [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
Masahiro Yamadab03d4cd2019-12-26 17:58:52 +09002 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00007#include <assert.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00008#include <errno.h>
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01009#include <stdbool.h>
Antonio Nino Diaz4b32e622018-08-16 16:52:57 +010010#include <stdint.h>
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000011#include <string.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000012
13#include <platform_def.h>
14
Alexei Fedorov90f2e882019-05-24 12:17:09 +010015#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000016#include <arch_helpers.h>
17#include <common/debug.h>
18#include <lib/utils_def.h>
19#include <lib/xlat_tables/xlat_tables_defs.h>
20#include <lib/xlat_tables/xlat_tables_v2.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010021
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000022#include "xlat_tables_private.h"
23
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +010024/* Helper function that cleans the data cache only if it is enabled. */
Varun Wadekar6bd85492019-01-30 08:31:07 -080025static inline __attribute__((unused)) void xlat_clean_dcache_range(uintptr_t addr, size_t size)
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +010026{
27 if (is_dcache_enabled())
28 clean_dcache_range(addr, size);
29}
30
Antonio Nino Diazac998032017-02-27 17:23:54 +000031#if PLAT_XLAT_TABLES_DYNAMIC
32
33/*
34 * The following functions assume that they will be called using subtables only.
35 * The base table can't be unmapped, so it is not needed to do any special
36 * handling for it.
37 */
38
39/*
40 * Returns the index of the array corresponding to the specified translation
41 * table.
42 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010043static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000044{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010045 for (int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000046 if (ctx->tables[i] == table)
47 return i;
48
49 /*
50 * Maybe we were asked to get the index of the base level table, which
51 * should never happen.
52 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010053 assert(false);
Antonio Nino Diazac998032017-02-27 17:23:54 +000054
55 return -1;
56}
57
58/* Returns a pointer to an empty translation table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010059static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
Antonio Nino Diazac998032017-02-27 17:23:54 +000060{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010061 for (int i = 0; i < ctx->tables_num; i++)
Antonio Nino Diazac998032017-02-27 17:23:54 +000062 if (ctx->tables_mapped_regions[i] == 0)
63 return ctx->tables[i];
64
65 return NULL;
66}
67
68/* Increments region count for a given table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010069static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
70 const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000071{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010072 int idx = xlat_table_get_index(ctx, table);
73
74 ctx->tables_mapped_regions[idx]++;
Antonio Nino Diazac998032017-02-27 17:23:54 +000075}
76
77/* Decrements region count for a given table. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010078static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
79 const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000080{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010081 int idx = xlat_table_get_index(ctx, table);
82
83 ctx->tables_mapped_regions[idx]--;
Antonio Nino Diazac998032017-02-27 17:23:54 +000084}
85
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010086/* Returns 0 if the specified table isn't empty, otherwise 1. */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010087static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
Antonio Nino Diazac998032017-02-27 17:23:54 +000088{
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +010089 return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0;
Antonio Nino Diazac998032017-02-27 17:23:54 +000090}
91
92#else /* PLAT_XLAT_TABLES_DYNAMIC */
93
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000094/* Returns a pointer to the first empty translation table. */
95static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
96{
97 assert(ctx->next_table < ctx->tables_num);
98
99 return ctx->tables[ctx->next_table++];
100}
101
Antonio Nino Diazac998032017-02-27 17:23:54 +0000102#endif /* PLAT_XLAT_TABLES_DYNAMIC */
103
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100104/*
105 * Returns a block/page table descriptor for the given level and attributes.
106 */
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100107uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100108 unsigned long long addr_pa, unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000109{
110 uint64_t desc;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100111 uint32_t mem_type;
Pramod Kumar316b73b2020-02-19 10:39:10 +0530112 uint32_t shareability_type;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000113
114 /* Make sure that the granularity is fine enough to map this address. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100115 assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000116
117 desc = addr_pa;
118 /*
119 * There are different translation table descriptors for level 3 and the
120 * rest.
121 */
122 desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
123 /*
Antonio Nino Diaz0842bd62018-07-12 15:54:10 +0100124 * Always set the access flag, as this library assumes access flag
125 * faults aren't managed.
126 */
127 desc |= LOWER_ATTRS(ACCESS_FLAG);
128 /*
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000129 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
130 * memory region attributes.
131 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100132 desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
133 desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000134
135 /*
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100136 * Do not allow unprivileged access when the mapping is for a privileged
137 * EL. For translation regimes that do not have mappings for access for
138 * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
139 */
140 if (ctx->xlat_regime == EL1_EL0_REGIME) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100141 if ((attr & MT_USER) != 0U) {
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100142 /* EL0 mapping requested, so we give User access */
143 desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
144 } else {
145 /* EL1 mapping requested, no User access granted */
146 desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
147 }
148 } else {
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100149 assert((ctx->xlat_regime == EL2_REGIME) ||
150 (ctx->xlat_regime == EL3_REGIME));
Antonio Nino Diaz49074492018-04-26 12:59:08 +0100151 desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100152 }
153
154 /*
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000155 * Deduce shareability domain and executability of the memory region
156 * from the memory type of the attributes (MT_TYPE).
157 *
158 * Data accesses to device memory and non-cacheable normal memory are
159 * coherent for all observers in the system, and correspondingly are
160 * always treated as being Outer Shareable. Therefore, for these 2 types
161 * of memory, it is not strictly needed to set the shareability field
162 * in the translation tables.
163 */
164 mem_type = MT_TYPE(attr);
165 if (mem_type == MT_DEVICE) {
166 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
167 /*
168 * Always map device memory as execute-never.
169 * This is to avoid the possibility of a speculative instruction
170 * fetch, which could be an issue if this memory region
171 * corresponds to a read-sensitive peripheral.
172 */
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100173 desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100174
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000175 } else { /* Normal memory */
176 /*
177 * Always map read-write normal memory as execute-never.
Antonio Nino Diaz0842bd62018-07-12 15:54:10 +0100178 * This library assumes that it is used by software that does
179 * not self-modify its code, therefore R/W memory is reserved
180 * for data storage, which must not be executable.
181 *
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000182 * Note that setting the XN bit here is for consistency only.
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100183 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000184 * which makes any writable memory region to be treated as
185 * execute-never, regardless of the value of the XN bit in the
186 * translation table.
187 *
188 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100189 * attribute to figure out the value of the XN bit. The actual
190 * XN bit(s) to set in the descriptor depends on the context's
191 * translation regime and the policy applied in
192 * xlat_arch_regime_get_xn_desc().
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000193 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100194 if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +0100195 desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100196 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000197
Pramod Kumar316b73b2020-02-19 10:39:10 +0530198 shareability_type = MT_SHAREABILITY(attr);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000199 if (mem_type == MT_MEMORY) {
Pramod Kumar316b73b2020-02-19 10:39:10 +0530200 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX);
201 if (shareability_type == MT_SHAREABILITY_NSH) {
202 desc |= LOWER_ATTRS(NSH);
203 } else if (shareability_type == MT_SHAREABILITY_OSH) {
204 desc |= LOWER_ATTRS(OSH);
205 } else {
206 desc |= LOWER_ATTRS(ISH);
207 }
Alexei Fedorov90f2e882019-05-24 12:17:09 +0100208
209 /* Check if Branch Target Identification is enabled */
210#if ENABLE_BTI
211 /* Set GP bit for block and page code entries
212 * if BTI mechanism is implemented.
213 */
214 if (is_armv8_5_bti_present() &&
215 ((attr & (MT_TYPE_MASK | MT_RW |
216 MT_EXECUTE_NEVER)) == MT_CODE)) {
217 desc |= GP;
218 }
219#endif
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000220 } else {
221 assert(mem_type == MT_NON_CACHEABLE);
222 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
223 }
224 }
225
226 return desc;
227}
228
229/*
230 * Enumeration of actions that can be made when mapping table entries depending
231 * on the previous value in that entry and information about the region being
232 * mapped.
233 */
234typedef enum {
235
236 /* Do nothing */
237 ACTION_NONE,
238
239 /* Write a block (or page, if in level 3) entry. */
240 ACTION_WRITE_BLOCK_ENTRY,
241
242 /*
243 * Create a new table and write a table entry pointing to it. Recurse
244 * into it for further processing.
245 */
246 ACTION_CREATE_NEW_TABLE,
247
248 /*
249 * There is a table descriptor in this entry, read it and recurse into
250 * that table for further processing.
251 */
252 ACTION_RECURSE_INTO_TABLE,
253
254} action_t;
255
David Pu5619c802019-02-22 02:23:57 -0800256/*
257 * Function that returns the first VA of the table affected by the specified
258 * mmap region.
259 */
260static uintptr_t xlat_tables_find_start_va(mmap_region_t *mm,
261 const uintptr_t table_base_va,
262 const unsigned int level)
263{
264 uintptr_t table_idx_va;
265
266 if (mm->base_va > table_base_va) {
267 /* Find the first index of the table affected by the region. */
268 table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
269 } else {
270 /* Start from the beginning of the table. */
271 table_idx_va = table_base_va;
272 }
273
274 return table_idx_va;
275}
276
277/*
278 * Function that returns table index for the given VA and level arguments.
279 */
280static inline unsigned int xlat_tables_va_to_index(const uintptr_t table_base_va,
281 const uintptr_t va,
282 const unsigned int level)
283{
284 return (unsigned int)((va - table_base_va) >> XLAT_ADDR_SHIFT(level));
285}
286
Antonio Nino Diazac998032017-02-27 17:23:54 +0000287#if PLAT_XLAT_TABLES_DYNAMIC
288
289/*
David Pu1507d412019-02-22 02:15:57 -0800290 * From the given arguments, it decides which action to take when unmapping the
291 * specified region.
292 */
293static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm,
294 const uintptr_t table_idx_va, const uintptr_t table_idx_end_va,
295 const unsigned int level, const uint64_t desc_type)
296{
297 action_t action;
298 uintptr_t region_end_va = mm->base_va + mm->size - 1U;
299
300 if ((mm->base_va <= table_idx_va) &&
301 (region_end_va >= table_idx_end_va)) {
302 /* Region covers all block */
303
304 if (level == 3U) {
305 /*
306 * Last level, only page descriptors allowed,
307 * erase it.
308 */
309 assert(desc_type == PAGE_DESC);
310
311 action = ACTION_WRITE_BLOCK_ENTRY;
312 } else {
313 /*
314 * Other levels can have table descriptors. If
315 * so, recurse into it and erase descriptors
316 * inside it as needed. If there is a block
317 * descriptor, just erase it. If an invalid
318 * descriptor is found, this table isn't
319 * actually mapped, which shouldn't happen.
320 */
321 if (desc_type == TABLE_DESC) {
322 action = ACTION_RECURSE_INTO_TABLE;
323 } else {
324 assert(desc_type == BLOCK_DESC);
325 action = ACTION_WRITE_BLOCK_ENTRY;
326 }
327 }
328
329 } else if ((mm->base_va <= table_idx_end_va) ||
330 (region_end_va >= table_idx_va)) {
331 /*
332 * Region partially covers block.
333 *
334 * It can't happen in level 3.
335 *
336 * There must be a table descriptor here, if not there
337 * was a problem when mapping the region.
338 */
339 assert(level < 3U);
340 assert(desc_type == TABLE_DESC);
341
342 action = ACTION_RECURSE_INTO_TABLE;
343 } else {
344 /* The region doesn't cover the block at all */
345 action = ACTION_NONE;
346 }
347
348 return action;
349}
350/*
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000351 * Recursive function that writes to the translation tables and unmaps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000352 * specified region.
353 */
354static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
355 const uintptr_t table_base_va,
356 uint64_t *const table_base,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100357 const unsigned int table_entries,
Varun Wadekar66231d12017-06-07 09:57:42 -0700358 const unsigned int level)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000359{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100360 assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
Antonio Nino Diazac998032017-02-27 17:23:54 +0000361
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000362 uint64_t *subtable;
363 uint64_t desc;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000364
365 uintptr_t table_idx_va;
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000366 uintptr_t table_idx_end_va; /* End VA of this entry */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000367
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100368 uintptr_t region_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000369
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000370 unsigned int table_idx;
371
David Pu5619c802019-02-22 02:23:57 -0800372 table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
373 table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000374
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000375 while (table_idx < table_entries) {
David Puce81ba12019-02-22 02:36:20 -0800376
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000377 table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
David Puce81ba12019-02-22 02:36:20 -0800378
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000379 desc = table_base[table_idx];
380 uint64_t desc_type = desc & DESC_MASK;
David Puce81ba12019-02-22 02:36:20 -0800381
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000382 action_t action = xlat_tables_unmap_region_action(mm,
383 table_idx_va, table_idx_end_va, level,
384 desc_type);
David Puce81ba12019-02-22 02:36:20 -0800385
386 if (action == ACTION_WRITE_BLOCK_ENTRY) {
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000387
388 table_base[table_idx] = INVALID_DESC;
Antonio Nino Diazad5dc7f2018-07-11 09:46:45 +0100389 xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000390
391 } else if (action == ACTION_RECURSE_INTO_TABLE) {
392
393 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
394
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000395 /* Recurse to write into subtable */
396 xlat_tables_unmap_region(ctx, mm, table_idx_va,
397 subtable, XLAT_TABLE_ENTRIES,
398 level + 1U);
399#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
400 xlat_clean_dcache_range((uintptr_t)subtable,
401 XLAT_TABLE_ENTRIES * sizeof(uint64_t));
402#endif
403 /*
404 * If the subtable is now empty, remove its reference.
405 */
406 if (xlat_table_is_empty(ctx, subtable)) {
407 table_base[table_idx] = INVALID_DESC;
408 xlat_arch_tlbi_va(table_idx_va,
409 ctx->xlat_regime);
410 }
David Puce81ba12019-02-22 02:36:20 -0800411
Antonio Nino Diazac998032017-02-27 17:23:54 +0000412 } else {
413 assert(action == ACTION_NONE);
David Puce81ba12019-02-22 02:36:20 -0800414 }
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000415
416 table_idx++;
417 table_idx_va += XLAT_BLOCK_SIZE(level);
418
419 /* If reached the end of the region, exit */
420 if (region_end_va <= table_idx_va)
421 break;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000422 }
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000423
424 if (level > ctx->base_level)
425 xlat_table_dec_regions_count(ctx, table_base);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000426}
427
428#endif /* PLAT_XLAT_TABLES_DYNAMIC */
429
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000430/*
431 * From the given arguments, it decides which action to take when mapping the
432 * specified region.
433 */
434static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100435 unsigned int desc_type, unsigned long long dest_pa,
436 uintptr_t table_entry_base_va, unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000437{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100438 uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000439 uintptr_t table_entry_end_va =
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100440 table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000441
442 /*
443 * The descriptor types allowed depend on the current table level.
444 */
445
446 if ((mm->base_va <= table_entry_base_va) &&
447 (mm_end_va >= table_entry_end_va)) {
448
449 /*
450 * Table entry is covered by region
451 * --------------------------------
452 *
453 * This means that this table entry can describe the whole
454 * translation with this granularity in principle.
455 */
456
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100457 if (level == 3U) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000458 /*
459 * Last level, only page descriptors are allowed.
460 */
461 if (desc_type == PAGE_DESC) {
462 /*
463 * There's another region mapped here, don't
464 * overwrite.
465 */
466 return ACTION_NONE;
467 } else {
468 assert(desc_type == INVALID_DESC);
469 return ACTION_WRITE_BLOCK_ENTRY;
470 }
471
472 } else {
473
474 /*
475 * Other levels. Table descriptors are allowed. Block
476 * descriptors too, but they have some limitations.
477 */
478
479 if (desc_type == TABLE_DESC) {
480 /* There's already a table, recurse into it. */
481 return ACTION_RECURSE_INTO_TABLE;
482
483 } else if (desc_type == INVALID_DESC) {
484 /*
485 * There's nothing mapped here, create a new
486 * entry.
487 *
488 * Check if the destination granularity allows
489 * us to use a block descriptor or we need a
490 * finer table for it.
491 *
492 * Also, check if the current level allows block
493 * descriptors. If not, create a table instead.
494 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100495 if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
496 || (level < MIN_LVL_BLOCK_DESC) ||
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100497 (mm->granularity < XLAT_BLOCK_SIZE(level)))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000498 return ACTION_CREATE_NEW_TABLE;
499 else
500 return ACTION_WRITE_BLOCK_ENTRY;
501
502 } else {
503 /*
504 * There's another region mapped here, don't
505 * overwrite.
506 */
507 assert(desc_type == BLOCK_DESC);
508
509 return ACTION_NONE;
510 }
511 }
512
513 } else if ((mm->base_va <= table_entry_end_va) ||
514 (mm_end_va >= table_entry_base_va)) {
515
516 /*
517 * Region partially covers table entry
518 * -----------------------------------
519 *
520 * This means that this table entry can't describe the whole
521 * translation, a finer table is needed.
522
523 * There cannot be partial block overlaps in level 3. If that
524 * happens, some of the preliminary checks when adding the
525 * mmap region failed to detect that PA and VA must at least be
526 * aligned to PAGE_SIZE.
527 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100528 assert(level < 3U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000529
530 if (desc_type == INVALID_DESC) {
531 /*
532 * The block is not fully covered by the region. Create
533 * a new table, recurse into it and try to map the
534 * region with finer granularity.
535 */
536 return ACTION_CREATE_NEW_TABLE;
537
538 } else {
539 assert(desc_type == TABLE_DESC);
540 /*
541 * The block is not fully covered by the region, but
542 * there is already a table here. Recurse into it and
543 * try to map with finer granularity.
544 *
545 * PAGE_DESC for level 3 has the same value as
546 * TABLE_DESC, but this code can't run on a level 3
547 * table because there can't be overlaps in level 3.
548 */
549 return ACTION_RECURSE_INTO_TABLE;
550 }
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100551 } else {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000552
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100553 /*
554 * This table entry is outside of the region specified in the
555 * arguments, don't write anything to it.
556 */
557 return ACTION_NONE;
558 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000559}
560
561/*
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000562 * Recursive function that writes to the translation tables and maps the
Antonio Nino Diazac998032017-02-27 17:23:54 +0000563 * specified region. On success, it returns the VA of the last byte that was
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100564 * successfully mapped. On error, it returns the VA of the next entry that
Antonio Nino Diazac998032017-02-27 17:23:54 +0000565 * should have been mapped.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000566 */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000567static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000568 uintptr_t table_base_va,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000569 uint64_t *const table_base,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100570 unsigned int table_entries,
571 unsigned int level)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000572{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100573 assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000574
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100575 uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000576
577 uintptr_t table_idx_va;
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000578 unsigned long long table_idx_pa;
579
580 uint64_t *subtable;
581 uint64_t desc;
582
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100583 unsigned int table_idx;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000584
David Pu5619c802019-02-22 02:23:57 -0800585 table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
586 table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000587
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000588#if PLAT_XLAT_TABLES_DYNAMIC
589 if (level > ctx->base_level)
590 xlat_table_inc_regions_count(ctx, table_base);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000591#endif
592
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000593 while (table_idx < table_entries) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000594
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000595 desc = table_base[table_idx];
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000596
597 table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
598
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000599 action_t action = xlat_tables_map_region_action(mm,
600 (uint32_t)(desc & DESC_MASK), table_idx_pa,
601 table_idx_va, level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000602
David Pud1b7aa12019-02-22 02:31:40 -0800603 if (action == ACTION_WRITE_BLOCK_ENTRY) {
David Pud1b7aa12019-02-22 02:31:40 -0800604
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000605 table_base[table_idx] =
606 xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
607 level);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000608
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000609 } else if (action == ACTION_CREATE_NEW_TABLE) {
610 uintptr_t end_va;
611
612 subtable = xlat_table_get_empty(ctx);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000613 if (subtable == NULL) {
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000614 /* Not enough free tables to map this region */
Antonio Nino Diazac998032017-02-27 17:23:54 +0000615 return table_idx_va;
616 }
617
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000618 /* Point to new subtable from this one. */
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300619 table_base[table_idx] =
620 TABLE_DESC | (uintptr_t)subtable;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000621
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000622 /* Recurse to write into subtable */
623 end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
624 subtable, XLAT_TABLE_ENTRIES,
625 level + 1U);
626#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
627 xlat_clean_dcache_range((uintptr_t)subtable,
628 XLAT_TABLE_ENTRIES * sizeof(uint64_t));
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +0100629#endif
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000630 if (end_va !=
631 (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
632 return end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000633
David Pud1b7aa12019-02-22 02:31:40 -0800634 } else if (action == ACTION_RECURSE_INTO_TABLE) {
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000635 uintptr_t end_va;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000636
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000637 subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
638 /* Recurse to write into subtable */
639 end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
640 subtable, XLAT_TABLE_ENTRIES,
641 level + 1U);
642#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
643 xlat_clean_dcache_range((uintptr_t)subtable,
644 XLAT_TABLE_ENTRIES * sizeof(uint64_t));
David Pud1b7aa12019-02-22 02:31:40 -0800645#endif
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000646 if (end_va !=
647 (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
648 return end_va;
649
David Pud1b7aa12019-02-22 02:31:40 -0800650 } else {
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000651
David Pud1b7aa12019-02-22 02:31:40 -0800652 assert(action == ACTION_NONE);
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000653
David Pud1b7aa12019-02-22 02:31:40 -0800654 }
Antonio Nino Diazff93d442019-03-19 14:12:09 +0000655
656 table_idx++;
657 table_idx_va += XLAT_BLOCK_SIZE(level);
658
659 /* If reached the end of the region, exit */
660 if (mm_end_va <= table_idx_va)
661 break;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000662 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000663
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100664 return table_idx_va - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000665}
666
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000667/*
668 * Function that verifies that a region can be mapped.
669 * Returns:
670 * 0: Success, the mapping is allowed.
671 * EINVAL: Invalid values were used as arguments.
672 * ERANGE: The memory limits were surpassed.
673 * ENOMEM: There is not enough memory in the mmap array.
674 * EPERM: Region overlaps another one in an invalid way.
675 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100676static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000677{
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100678 unsigned long long base_pa = mm->base_pa;
679 uintptr_t base_va = mm->base_va;
680 size_t size = mm->size;
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100681 size_t granularity = mm->granularity;
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100682
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100683 unsigned long long end_pa = base_pa + size - 1U;
684 uintptr_t end_va = base_va + size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000685
686 if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
687 !IS_PAGE_ALIGNED(size))
688 return -EINVAL;
689
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100690 if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
691 (granularity != XLAT_BLOCK_SIZE(2U)) &&
692 (granularity != XLAT_BLOCK_SIZE(3U))) {
Sandrine Bailleux8f23fa82017-09-28 21:58:12 +0100693 return -EINVAL;
694 }
695
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000696 /* Check for overflows */
697 if ((base_pa > end_pa) || (base_va > end_va))
698 return -ERANGE;
699
Masahiro Yamadab03d4cd2019-12-26 17:58:52 +0900700 if (end_va > ctx->va_max_address)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000701 return -ERANGE;
702
Masahiro Yamadab03d4cd2019-12-26 17:58:52 +0900703 if (end_pa > ctx->pa_max_address)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000704 return -ERANGE;
705
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100706 /* Check that there is space in the ctx->mmap array */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100707 if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000708 return -ENOMEM;
709
710 /* Check for PAs and VAs overlaps with all other regions */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100711 for (const mmap_region_t *mm_cursor = ctx->mmap;
712 mm_cursor->size != 0U; ++mm_cursor) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000713
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100714 uintptr_t mm_cursor_end_va = mm_cursor->base_va
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100715 + mm_cursor->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000716
717 /*
718 * Check if one of the regions is completely inside the other
719 * one.
720 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100721 bool fully_overlapped_va =
722 ((base_va >= mm_cursor->base_va) &&
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100723 (end_va <= mm_cursor_end_va)) ||
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100724 ((mm_cursor->base_va >= base_va) &&
725 (mm_cursor_end_va <= end_va));
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000726
727 /*
728 * Full VA overlaps are only allowed if both regions are
729 * identity mapped (zero offset) or have the same VA to PA
730 * offset. Also, make sure that it's not the exact same area.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000731 * This can only be done with static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000732 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100733 if (fully_overlapped_va) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000734
Antonio Nino Diazac998032017-02-27 17:23:54 +0000735#if PLAT_XLAT_TABLES_DYNAMIC
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100736 if (((mm->attr & MT_DYNAMIC) != 0U) ||
737 ((mm_cursor->attr & MT_DYNAMIC) != 0U))
Antonio Nino Diazac998032017-02-27 17:23:54 +0000738 return -EPERM;
739#endif /* PLAT_XLAT_TABLES_DYNAMIC */
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100740 if ((mm_cursor->base_va - mm_cursor->base_pa) !=
741 (base_va - base_pa))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000742 return -EPERM;
743
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100744 if ((base_va == mm_cursor->base_va) &&
745 (size == mm_cursor->size))
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000746 return -EPERM;
747
748 } else {
749 /*
750 * If the regions do not have fully overlapping VAs,
751 * then they must have fully separated VAs and PAs.
752 * Partial overlaps are not allowed
753 */
754
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100755 unsigned long long mm_cursor_end_pa =
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100756 mm_cursor->base_pa + mm_cursor->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000757
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100758 bool separated_pa = (end_pa < mm_cursor->base_pa) ||
759 (base_pa > mm_cursor_end_pa);
760 bool separated_va = (end_va < mm_cursor->base_va) ||
761 (base_va > mm_cursor_end_va);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000762
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100763 if (!separated_va || !separated_pa)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000764 return -EPERM;
765 }
766 }
767
768 return 0;
769}
770
Sandrine Bailleux66342932017-07-18 13:26:36 +0100771void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000772{
John Tsichritzisfdd92482018-05-25 09:12:48 +0100773 mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700774 const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100775 const mmap_region_t *mm_last;
776 unsigned long long end_pa = mm->base_pa + mm->size - 1U;
777 uintptr_t end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000778 int ret;
779
780 /* Ignore empty regions */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100781 if (mm->size == 0U)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000782 return;
783
Antonio Nino Diazac998032017-02-27 17:23:54 +0000784 /* Static regions must be added before initializing the xlat tables. */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100785 assert(!ctx->initialized);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000786
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100787 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000788 if (ret != 0) {
789 ERROR("mmap_add_region_check() failed. error %d\n", ret);
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +0100790 assert(false);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000791 return;
792 }
793
794 /*
795 * Find correct place in mmap to insert new region.
796 *
797 * 1 - Lower region VA end first.
798 * 2 - Smaller region size first.
799 *
800 * VA 0 0xFF
801 *
802 * 1st |------|
803 * 2nd |------------|
804 * 3rd |------|
805 * 4th |---|
806 * 5th |---|
807 * 6th |----------|
808 * 7th |-------------------------------------|
809 *
810 * This is required for overlapping regions only. It simplifies adding
811 * regions with the loop in xlat_tables_init_internal because the outer
812 * ones won't overwrite block or page descriptors of regions added
813 * previously.
Antonio Nino Diazac998032017-02-27 17:23:54 +0000814 *
815 * Overlapping is only allowed for static regions.
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000816 */
817
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100818 while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
819 && (mm_cursor->size != 0U)) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000820 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100821 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000822
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100823 while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
824 (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000825 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100826 }
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000827
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700828 /*
829 * Find the last entry marker in the mmap
830 */
831 mm_last = ctx->mmap;
832 while ((mm_last->size != 0U) && (mm_last < mm_end)) {
833 ++mm_last;
834 }
835
836 /*
837 * Check if we have enough space in the memory mapping table.
838 * This shouldn't happen as we have checked in mmap_add_region_check
839 * that there is free space.
840 */
841 assert(mm_last->size == 0U);
Jeenu Viswambharan58e81482018-04-27 15:06:57 +0100842
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000843 /* Make room for new region by moving other regions up by one place */
John Tsichritzisfdd92482018-05-25 09:12:48 +0100844 mm_destination = mm_cursor + 1;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100845 (void)memmove(mm_destination, mm_cursor,
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000846 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
847
848 /*
849 * Check we haven't lost the empty sentinel from the end of the array.
850 * This shouldn't happen as we have checked in mmap_add_region_check
851 * that there is free space.
852 */
Varun Wadekarccbd2e32018-04-03 10:44:41 -0700853 assert(mm_end->size == 0U);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000854
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100855 *mm_cursor = *mm;
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000856
857 if (end_pa > ctx->max_pa)
858 ctx->max_pa = end_pa;
859 if (end_va > ctx->max_va)
860 ctx->max_va = end_va;
861}
862
Antonio Nino Diazc0033282018-11-20 16:03:11 +0000863/*
864 * Determine the table level closest to the initial lookup level that
865 * can describe this translation. Then, align base VA to the next block
866 * at the determined level.
867 */
868static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
869{
870 /*
871 * By or'ing the size and base PA the alignment will be the one
872 * corresponding to the smallest boundary of the two of them.
873 *
874 * There are three different cases. For example (for 4 KiB page size):
875 *
876 * +--------------+------------------++--------------+
877 * | PA alignment | Size multiple of || VA alignment |
878 * +--------------+------------------++--------------+
879 * | 2 MiB | 2 MiB || 2 MiB | (1)
880 * | 2 MiB | 4 KiB || 4 KiB | (2)
881 * | 4 KiB | 2 MiB || 4 KiB | (3)
882 * +--------------+------------------++--------------+
883 *
884 * - In (1), it is possible to take advantage of the alignment of the PA
885 * and the size of the region to use a level 2 translation table
886 * instead of a level 3 one.
887 *
888 * - In (2), the size is smaller than a block entry of level 2, so it is
889 * needed to use a level 3 table to describe the region or the library
890 * will map more memory than the desired one.
891 *
892 * - In (3), even though the region has the size of one level 2 block
893 * entry, it isn't possible to describe the translation with a level 2
894 * block entry because of the alignment of the base PA.
895 *
896 * Only bits 47:21 of a level 2 block descriptor are used by the MMU,
897 * bits 20:0 of the resulting address are 0 in this case. Because of
898 * this, the PA generated as result of this translation is aligned to
899 * 2 MiB. The PA that was requested to be mapped is aligned to 4 KiB,
900 * though, which means that the resulting translation is incorrect.
901 * The only way to prevent this is by using a finer granularity.
902 */
903 unsigned long long align_check;
904
905 align_check = mm->base_pa | (unsigned long long)mm->size;
906
907 /*
908 * Assume it is always aligned to level 3. There's no need to check that
909 * level because its block size is PAGE_SIZE. The checks to verify that
910 * the addresses and size are aligned to PAGE_SIZE are inside
911 * mmap_add_region.
912 */
913 for (unsigned int level = ctx->base_level; level <= 2U; ++level) {
914
915 if ((align_check & XLAT_BLOCK_MASK(level)) != 0U)
916 continue;
917
918 mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level));
919 return;
920 }
921}
922
923void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
924{
925 mm->base_va = ctx->max_va + 1UL;
926
927 assert(mm->size > 0U);
928
929 mmap_alloc_va_align_ctx(ctx, mm);
930
931 /* Detect overflows. More checks are done in mmap_add_region_check(). */
932 assert(mm->base_va > ctx->max_va);
933
934 mmap_add_region_ctx(ctx, mm);
935}
936
Sandrine Bailleux66342932017-07-18 13:26:36 +0100937void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
938{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100939 const mmap_region_t *mm_cursor = mm;
940
Antonio Nino Diaz2cb864c2018-10-08 16:11:11 +0100941 while (mm_cursor->granularity != 0U) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100942 mmap_add_region_ctx(ctx, mm_cursor);
943 mm_cursor++;
Sandrine Bailleux66342932017-07-18 13:26:36 +0100944 }
945}
946
Antonio Nino Diazac998032017-02-27 17:23:54 +0000947#if PLAT_XLAT_TABLES_DYNAMIC
948
949int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
950{
951 mmap_region_t *mm_cursor = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100952 const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
953 unsigned long long end_pa = mm->base_pa + mm->size - 1U;
954 uintptr_t end_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000955 int ret;
956
957 /* Nothing to do */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100958 if (mm->size == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +0000959 return 0;
960
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100961 /* Now this region is a dynamic one */
962 mm->attr |= MT_DYNAMIC;
963
964 ret = mmap_add_region_check(ctx, mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000965 if (ret != 0)
966 return ret;
967
968 /*
969 * Find the adequate entry in the mmap array in the same way done for
970 * static regions in mmap_add_region_ctx().
971 */
972
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100973 while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
974 && (mm_cursor->size != 0U)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000975 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100976 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000977
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100978 while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
979 (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +0000980 ++mm_cursor;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100981 }
Antonio Nino Diazac998032017-02-27 17:23:54 +0000982
983 /* Make room for new region by moving other regions up by one place */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100984 (void)memmove(mm_cursor + 1U, mm_cursor,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +0100985 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000986
987 /*
988 * Check we haven't lost the empty sentinal from the end of the array.
989 * This shouldn't happen as we have checked in mmap_add_region_check
990 * that there is free space.
991 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100992 assert(mm_last->size == 0U);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000993
Douglas Raillardf68d2ed2017-09-12 10:31:49 +0100994 *mm_cursor = *mm;
Antonio Nino Diazac998032017-02-27 17:23:54 +0000995
996 /*
997 * Update the translation tables if the xlat tables are initialized. If
998 * not, this region will be mapped when they are initialized.
999 */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001000 if (ctx->initialized) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001001 end_va = xlat_tables_map_region(ctx, mm_cursor,
1002 0U, ctx->base_table, ctx->base_table_entries,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +01001003 ctx->base_level);
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +01001004#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1005 xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1006 ctx->base_table_entries * sizeof(uint64_t));
1007#endif
Antonio Nino Diazac998032017-02-27 17:23:54 +00001008 /* Failed to map, remove mmap entry, unmap and return error. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001009 if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
1010 (void)memmove(mm_cursor, mm_cursor + 1U,
Douglas Raillard6a5f8f12017-09-21 08:42:21 +01001011 (uintptr_t)mm_last - (uintptr_t)mm_cursor);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001012
1013 /*
1014 * Check if the mapping function actually managed to map
1015 * anything. If not, just return now.
1016 */
Antonio Nino Diaz3f518922018-01-05 11:30:36 +00001017 if (mm->base_va >= end_va)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001018 return -ENOMEM;
1019
1020 /*
Douglas Raillard6a5f8f12017-09-21 08:42:21 +01001021 * Something went wrong after mapping some table
1022 * entries, undo every change done up to this point.
Antonio Nino Diazac998032017-02-27 17:23:54 +00001023 */
1024 mmap_region_t unmap_mm = {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001025 .base_pa = 0U,
Antonio Nino Diazac998032017-02-27 17:23:54 +00001026 .base_va = mm->base_va,
1027 .size = end_va - mm->base_va,
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001028 .attr = 0U
Antonio Nino Diazac998032017-02-27 17:23:54 +00001029 };
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001030 xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
1031 ctx->base_table, ctx->base_table_entries,
1032 ctx->base_level);
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +01001033#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1034 xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1035 ctx->base_table_entries * sizeof(uint64_t));
1036#endif
Antonio Nino Diazac998032017-02-27 17:23:54 +00001037 return -ENOMEM;
1038 }
1039
1040 /*
1041 * Make sure that all entries are written to the memory. There
1042 * is no need to invalidate entries when mapping dynamic regions
1043 * because new table/block/page descriptors only replace old
1044 * invalid descriptors, that aren't TLB cached.
1045 */
1046 dsbishst();
1047 }
1048
1049 if (end_pa > ctx->max_pa)
1050 ctx->max_pa = end_pa;
1051 if (end_va > ctx->max_va)
1052 ctx->max_va = end_va;
1053
1054 return 0;
1055}
1056
Antonio Nino Diazc0033282018-11-20 16:03:11 +00001057int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
1058{
1059 mm->base_va = ctx->max_va + 1UL;
1060
1061 if (mm->size == 0U)
1062 return 0;
1063
1064 mmap_alloc_va_align_ctx(ctx, mm);
1065
1066 /* Detect overflows. More checks are done in mmap_add_region_check(). */
1067 if (mm->base_va < ctx->max_va) {
1068 return -ENOMEM;
1069 }
1070
1071 return mmap_add_dynamic_region_ctx(ctx, mm);
1072}
1073
Antonio Nino Diazac998032017-02-27 17:23:54 +00001074/*
1075 * Removes the region with given base Virtual Address and size from the given
1076 * context.
1077 *
1078 * Returns:
1079 * 0: Success.
1080 * EINVAL: Invalid values were used as arguments (region not found).
1081 * EPERM: Tried to remove a static region.
1082 */
1083int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
1084 size_t size)
1085{
1086 mmap_region_t *mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001087 const mmap_region_t *mm_last = mm + ctx->mmap_num;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001088 int update_max_va_needed = 0;
1089 int update_max_pa_needed = 0;
1090
1091 /* Check sanity of mmap array. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001092 assert(mm[ctx->mmap_num].size == 0U);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001093
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001094 while (mm->size != 0U) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001095 if ((mm->base_va == base_va) && (mm->size == size))
1096 break;
1097 ++mm;
1098 }
1099
1100 /* Check that the region was found */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001101 if (mm->size == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001102 return -EINVAL;
1103
1104 /* If the region is static it can't be removed */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001105 if ((mm->attr & MT_DYNAMIC) == 0U)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001106 return -EPERM;
1107
1108 /* Check if this region is using the top VAs or PAs. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001109 if ((mm->base_va + mm->size - 1U) == ctx->max_va)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001110 update_max_va_needed = 1;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001111 if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
Antonio Nino Diazac998032017-02-27 17:23:54 +00001112 update_max_pa_needed = 1;
1113
1114 /* Update the translation tables if needed */
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001115 if (ctx->initialized) {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001116 xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
Antonio Nino Diazac998032017-02-27 17:23:54 +00001117 ctx->base_table_entries,
1118 ctx->base_level);
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +01001119#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1120 xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1121 ctx->base_table_entries * sizeof(uint64_t));
1122#endif
Antonio Nino Diazac998032017-02-27 17:23:54 +00001123 xlat_arch_tlbi_va_sync();
1124 }
1125
1126 /* Remove this region by moving the rest down by one place. */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001127 (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001128
1129 /* Check if we need to update the max VAs and PAs */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001130 if (update_max_va_needed == 1) {
1131 ctx->max_va = 0U;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001132 mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001133 while (mm->size != 0U) {
1134 if ((mm->base_va + mm->size - 1U) > ctx->max_va)
1135 ctx->max_va = mm->base_va + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001136 ++mm;
1137 }
1138 }
1139
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001140 if (update_max_pa_needed == 1) {
1141 ctx->max_pa = 0U;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001142 mm = ctx->mmap;
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001143 while (mm->size != 0U) {
1144 if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
1145 ctx->max_pa = mm->base_pa + mm->size - 1U;
Antonio Nino Diazac998032017-02-27 17:23:54 +00001146 ++mm;
1147 }
1148 }
1149
1150 return 0;
1151}
1152
Antonio Nino Diaz675d1552018-10-30 11:36:47 +00001153void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
1154 uintptr_t va_max, struct mmap_region *mmap,
1155 unsigned int mmap_num, uint64_t **tables,
1156 unsigned int tables_num, uint64_t *base_table,
1157 int xlat_regime, int *mapped_regions)
1158{
1159 ctx->xlat_regime = xlat_regime;
1160
1161 ctx->pa_max_address = pa_max;
1162 ctx->va_max_address = va_max;
1163
1164 ctx->mmap = mmap;
1165 ctx->mmap_num = mmap_num;
1166 memset(ctx->mmap, 0, sizeof(struct mmap_region) * mmap_num);
1167
1168 ctx->tables = (void *) tables;
1169 ctx->tables_num = tables_num;
1170
1171 uintptr_t va_space_size = va_max + 1;
1172 ctx->base_level = GET_XLAT_TABLE_LEVEL_BASE(va_space_size);
1173 ctx->base_table = base_table;
1174 ctx->base_table_entries = GET_NUM_BASE_LEVEL_ENTRIES(va_space_size);
1175
1176 ctx->tables_mapped_regions = mapped_regions;
1177
1178 ctx->max_pa = 0;
1179 ctx->max_va = 0;
1180 ctx->initialized = 0;
1181}
1182
Antonio Nino Diazac998032017-02-27 17:23:54 +00001183#endif /* PLAT_XLAT_TABLES_DYNAMIC */
1184
Daniel Boulby5a03a252018-08-30 16:48:56 +01001185void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001186{
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +01001187 assert(ctx != NULL);
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001188 assert(!ctx->initialized);
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001189 assert((ctx->xlat_regime == EL3_REGIME) ||
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +01001190 (ctx->xlat_regime == EL2_REGIME) ||
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001191 (ctx->xlat_regime == EL1_EL0_REGIME));
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001192 assert(!is_mmu_enabled_ctx(ctx));
Sandrine Bailleux66342932017-07-18 13:26:36 +01001193
Antonio Nino Diazdcf9d922017-10-04 16:52:15 +01001194 mmap_region_t *mm = ctx->mmap;
Sandrine Bailleux66342932017-07-18 13:26:36 +01001195
Sathees Balya74155972019-01-25 11:36:01 +00001196 assert(ctx->va_max_address >=
1197 (xlat_get_min_virt_addr_space_size() - 1U));
1198 assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U));
1199 assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U));
1200
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +01001201 xlat_mmap_print(mm);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001202
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001203 /* All tables must be zeroed before mapping any region. */
1204
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001205 for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001206 ctx->base_table[i] = INVALID_DESC;
1207
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001208 for (int j = 0; j < ctx->tables_num; j++) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001209#if PLAT_XLAT_TABLES_DYNAMIC
1210 ctx->tables_mapped_regions[j] = 0;
1211#endif
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001212 for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001213 ctx->tables[j][i] = INVALID_DESC;
1214 }
1215
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001216 while (mm->size != 0U) {
1217 uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
1218 ctx->base_table, ctx->base_table_entries,
1219 ctx->base_level);
Antonio Nino Diaz37a5efa2018-08-07 12:47:12 +01001220#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
1221 xlat_clean_dcache_range((uintptr_t)ctx->base_table,
1222 ctx->base_table_entries * sizeof(uint64_t));
1223#endif
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001224 if (end_va != (mm->base_va + mm->size - 1U)) {
Antonio Nino Diazac998032017-02-27 17:23:54 +00001225 ERROR("Not enough memory to map region:\n"
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +01001226 " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
1227 mm->base_va, mm->base_pa, mm->size, mm->attr);
Antonio Nino Diazac998032017-02-27 17:23:54 +00001228 panic();
1229 }
1230
1231 mm++;
1232 }
1233
Sandrine Bailleux46c53a22017-07-11 15:11:10 +01001234 assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
Sandrine Bailleux66342932017-07-18 13:26:36 +01001235 assert(ctx->max_va <= ctx->va_max_address);
1236 assert(ctx->max_pa <= ctx->pa_max_address);
1237
Antonio Nino Diaz5c97bd12018-08-02 09:57:29 +01001238 ctx->initialized = true;
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +01001239
1240 xlat_tables_print(ctx);
Sandrine Bailleux66342932017-07-18 13:26:36 +01001241}