blob: f58240470a325318ece118bac27554e8d6b01507 [file] [log] [blame]
Etienne Carriereb4502772017-10-24 22:47:59 +02001/*
2 * Copyright (c) 2016-2017, Linaro Limited. All rights reserved.
Alexei Fedorovdc88b582020-07-28 12:26:36 +01003 * Copyright (c) 2014-2020, Arm Limited. All rights reserved.
Etienne Carriereb4502772017-10-24 22:47:59 +02004 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * All rights reserved.
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 */
9
10#include <assert.h>
11#include <stdio.h>
12#include <string.h>
13
14#include <platform_def.h>
15
16#include <arch.h>
17#include <arch_helpers.h>
18#include <common/debug.h>
19#include <lib/cassert.h>
20#include <lib/utils.h>
21#include <lib/xlat_tables/xlat_tables.h>
22
23#include "../xlat_tables_private.h"
24
25#ifdef ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING
26#error "ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING flag is set. \
27This module is to be used when LPAE is not supported"
28#endif
29
30CASSERT(PLAT_VIRT_ADDR_SPACE_SIZE == (1ULL << 32), invalid_vaddr_space_size);
31CASSERT(PLAT_PHY_ADDR_SPACE_SIZE == (1ULL << 32), invalid_paddr_space_size);
32
Alexei Fedorovdc88b582020-07-28 12:26:36 +010033#define MMU32B_UNSET_DESC ~0UL
34#define MMU32B_INVALID_DESC 0UL
Etienne Carriereb4502772017-10-24 22:47:59 +020035
36#define MT_UNKNOWN ~0U
37
38/*
39 * MMU related values
40 */
41
42/* Sharable */
Alexei Fedorovdc88b582020-07-28 12:26:36 +010043#define MMU32B_TTB_S (1U << 1)
Etienne Carriereb4502772017-10-24 22:47:59 +020044
45/* Not Outer Sharable */
Alexei Fedorovdc88b582020-07-28 12:26:36 +010046#define MMU32B_TTB_NOS (1U << 5)
Etienne Carriereb4502772017-10-24 22:47:59 +020047
48/* Normal memory, Inner Non-cacheable */
Alexei Fedorovdc88b582020-07-28 12:26:36 +010049#define MMU32B_TTB_IRGN_NC 0U
Etienne Carriereb4502772017-10-24 22:47:59 +020050
51/* Normal memory, Inner Write-Back Write-Allocate Cacheable */
Alexei Fedorovdc88b582020-07-28 12:26:36 +010052#define MMU32B_TTB_IRGN_WBWA (1U << 6)
Etienne Carriereb4502772017-10-24 22:47:59 +020053
54/* Normal memory, Inner Write-Through Cacheable */
Alexei Fedorovdc88b582020-07-28 12:26:36 +010055#define MMU32B_TTB_IRGN_WT 1U
Etienne Carriereb4502772017-10-24 22:47:59 +020056
57/* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
Alexei Fedorovdc88b582020-07-28 12:26:36 +010058#define MMU32B_TTB_IRGN_WB (1U | (1U << 6))
Etienne Carriereb4502772017-10-24 22:47:59 +020059
60/* Normal memory, Outer Write-Back Write-Allocate Cacheable */
Alexei Fedorovdc88b582020-07-28 12:26:36 +010061#define MMU32B_TTB_RNG_WBWA (1U << 3)
Etienne Carriereb4502772017-10-24 22:47:59 +020062
63#define MMU32B_DEFAULT_ATTRS \
64 (MMU32B_TTB_S | MMU32B_TTB_NOS | \
65 MMU32B_TTB_IRGN_WBWA | MMU32B_TTB_RNG_WBWA)
66
67/* armv7 memory mapping attributes: section mapping */
Alexei Fedorovdc88b582020-07-28 12:26:36 +010068#define SECTION_SECURE (0U << 19)
69#define SECTION_NOTSECURE (1U << 19)
70#define SECTION_SHARED (1U << 16)
71#define SECTION_NOTGLOBAL (1U << 17)
72#define SECTION_ACCESS_FLAG (1U << 10)
73#define SECTION_UNPRIV (1U << 11)
74#define SECTION_RO (1U << 15)
Etienne Carriereb4502772017-10-24 22:47:59 +020075#define SECTION_TEX(tex) ((((tex) >> 2) << 12) | \
76 ((((tex) >> 1) & 0x1) << 3) | \
77 (((tex) & 0x1) << 2))
78#define SECTION_DEVICE SECTION_TEX(MMU32B_ATTR_DEVICE_INDEX)
79#define SECTION_NORMAL SECTION_TEX(MMU32B_ATTR_DEVICE_INDEX)
80#define SECTION_NORMAL_CACHED \
81 SECTION_TEX(MMU32B_ATTR_IWBWA_OWBWA_INDEX)
82
Alexei Fedorovdc88b582020-07-28 12:26:36 +010083#define SECTION_XN (1U << 4)
84#define SECTION_PXN (1U << 0)
85#define SECTION_SECTION (2U << 0)
Etienne Carriereb4502772017-10-24 22:47:59 +020086
Alexei Fedorovdc88b582020-07-28 12:26:36 +010087#define SECTION_PT_NOTSECURE (1U << 3)
88#define SECTION_PT_PT (1U << 0)
Etienne Carriereb4502772017-10-24 22:47:59 +020089
Alexei Fedorovdc88b582020-07-28 12:26:36 +010090#define SMALL_PAGE_SMALL_PAGE (1U << 1)
91#define SMALL_PAGE_SHARED (1U << 10)
92#define SMALL_PAGE_NOTGLOBAL (1U << 11)
Etienne Carriereb4502772017-10-24 22:47:59 +020093#define SMALL_PAGE_TEX(tex) ((((tex) >> 2) << 6) | \
94 ((((tex) >> 1) & 0x1) << 3) | \
95 (((tex) & 0x1) << 2))
96#define SMALL_PAGE_DEVICE \
97 SMALL_PAGE_TEX(MMU32B_ATTR_DEVICE_INDEX)
98#define SMALL_PAGE_NORMAL \
99 SMALL_PAGE_TEX(MMU32B_ATTR_DEVICE_INDEX)
100#define SMALL_PAGE_NORMAL_CACHED \
101 SMALL_PAGE_TEX(MMU32B_ATTR_IWBWA_OWBWA_INDEX)
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100102#define SMALL_PAGE_ACCESS_FLAG (1U << 4)
103#define SMALL_PAGE_UNPRIV (1U << 5)
104#define SMALL_PAGE_RO (1U << 9)
105#define SMALL_PAGE_XN (1U << 0)
Etienne Carriereb4502772017-10-24 22:47:59 +0200106
107/* The TEX, C and B bits concatenated */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100108#define MMU32B_ATTR_DEVICE_INDEX 0U
109#define MMU32B_ATTR_IWBWA_OWBWA_INDEX 1U
Etienne Carriereb4502772017-10-24 22:47:59 +0200110
111#define MMU32B_PRRR_IDX(idx, tr, nos) (((tr) << (2 * (idx))) | \
112 ((uint32_t)(nos) << ((idx) + 24)))
113#define MMU32B_NMRR_IDX(idx, ir, or) (((ir) << (2 * (idx))) | \
114 ((uint32_t)(or) << (2 * (idx) + 16)))
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100115#define MMU32B_PRRR_DS0 (1U << 16)
116#define MMU32B_PRRR_DS1 (1U << 17)
117#define MMU32B_PRRR_NS0 (1U << 18)
118#define MMU32B_PRRR_NS1 (1U << 19)
Etienne Carriereb4502772017-10-24 22:47:59 +0200119
120#define DACR_DOMAIN(num, perm) ((perm) << ((num) * 2))
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100121#define DACR_DOMAIN_PERM_NO_ACCESS 0U
122#define DACR_DOMAIN_PERM_CLIENT 1U
123#define DACR_DOMAIN_PERM_MANAGER 3U
Etienne Carriereb4502772017-10-24 22:47:59 +0200124
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100125#define NUM_1MB_IN_4GB (1UL << 12)
126#define NUM_4K_IN_1MB (1UL << 8)
Etienne Carriereb4502772017-10-24 22:47:59 +0200127
128#define ONE_MB_SHIFT 20
129
130/* mmu 32b integration */
131#define MMU32B_L1_TABLE_SIZE (NUM_1MB_IN_4GB * 4)
132#define MMU32B_L2_TABLE_SIZE (NUM_4K_IN_1MB * 4)
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100133#define MMU32B_L1_TABLE_ALIGN (1U << 14)
134#define MMU32B_L2_TABLE_ALIGN (1U << 10)
Etienne Carriereb4502772017-10-24 22:47:59 +0200135
136static unsigned int next_xlat;
137static unsigned long long xlat_max_pa;
138static uintptr_t xlat_max_va;
139
140static uint32_t mmu_l1_base[NUM_1MB_IN_4GB]
Chris Kay33bfc5e2023-02-14 11:30:04 +0000141 __aligned(MMU32B_L1_TABLE_ALIGN) __attribute__((section(".xlat_table")));
Etienne Carriereb4502772017-10-24 22:47:59 +0200142
143static uint32_t mmu_l2_base[MAX_XLAT_TABLES][NUM_4K_IN_1MB]
Chris Kay33bfc5e2023-02-14 11:30:04 +0000144 __aligned(MMU32B_L2_TABLE_ALIGN) __attribute__((section(".xlat_table")));
Etienne Carriereb4502772017-10-24 22:47:59 +0200145
146/*
147 * Array of all memory regions stored in order of ascending base address.
148 * The list is terminated by the first entry with size == 0.
149 */
150static mmap_region_t mmap[MAX_MMAP_REGIONS + 1];
151
152void print_mmap(void)
153{
154#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
155 mmap_region_t *mm = mmap;
156
157 printf("init xlat - l1:%p l2:%p (%d)\n",
158 (void *)mmu_l1_base, (void *)mmu_l2_base, MAX_XLAT_TABLES);
159 printf("mmap:\n");
160 while (mm->size) {
161 printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
162 (void *)mm->base_va, mm->base_pa,
163 mm->size, mm->attr);
164 ++mm;
165 };
166 printf("\n");
167#endif
168}
169
170void mmap_add(const mmap_region_t *mm)
171{
172 const mmap_region_t *mm_cursor = mm;
173
174 while ((mm_cursor->size != 0U) || (mm_cursor->attr != 0U)) {
175 mmap_add_region(mm_cursor->base_pa, mm_cursor->base_va,
176 mm_cursor->size, mm_cursor->attr);
177 mm_cursor++;
178 }
179}
180
181void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
182 size_t size, unsigned int attr)
183{
184 mmap_region_t *mm = mmap;
185 const mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1U;
186 unsigned long long end_pa = base_pa + size - 1U;
187 uintptr_t end_va = base_va + size - 1U;
188
189 assert(IS_PAGE_ALIGNED(base_pa));
190 assert(IS_PAGE_ALIGNED(base_va));
191 assert(IS_PAGE_ALIGNED(size));
192
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100193 if (size == 0U) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200194 return;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100195 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200196
197 assert(base_pa < end_pa); /* Check for overflows */
198 assert(base_va < end_va);
199
200 assert((base_va + (uintptr_t)size - (uintptr_t)1) <=
201 (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
202 assert((base_pa + (unsigned long long)size - 1ULL) <=
203 (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
204
205#if ENABLE_ASSERTIONS
206
207 /* Check for PAs and VAs overlaps with all other regions */
208 for (mm = mmap; mm->size; ++mm) {
209
210 uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
211
212 /*
213 * Check if one of the regions is completely inside the other
214 * one.
215 */
216 bool fully_overlapped_va =
217 ((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
218 ((mm->base_va >= base_va) && (mm_end_va <= end_va));
219
220 /*
221 * Full VA overlaps are only allowed if both regions are
222 * identity mapped (zero offset) or have the same VA to PA
223 * offset. Also, make sure that it's not the exact same area.
224 */
225 if (fully_overlapped_va) {
226 assert((mm->base_va - mm->base_pa) ==
227 (base_va - base_pa));
228 assert((base_va != mm->base_va) || (size != mm->size));
229 } else {
230 /*
231 * If the regions do not have fully overlapping VAs,
232 * then they must have fully separated VAs and PAs.
233 * Partial overlaps are not allowed
234 */
235
236 unsigned long long mm_end_pa =
237 mm->base_pa + mm->size - 1;
238
239 bool separated_pa = (end_pa < mm->base_pa) ||
240 (base_pa > mm_end_pa);
241 bool separated_va = (end_va < mm->base_va) ||
242 (base_va > mm_end_va);
243
244 assert(separated_va && separated_pa);
245 }
246 }
247
248 mm = mmap; /* Restore pointer to the start of the array */
249
250#endif /* ENABLE_ASSERTIONS */
251
252 /* Find correct place in mmap to insert new region */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100253 while ((mm->base_va < base_va) && (mm->size != 0U)) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200254 ++mm;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100255 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200256
257 /*
258 * If a section is contained inside another one with the same base
259 * address, it must be placed after the one it is contained in:
260 *
261 * 1st |-----------------------|
262 * 2nd |------------|
263 * 3rd |------|
264 *
265 * This is required for mmap_region_attr() to get the attributes of the
266 * small region correctly.
267 */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100268 while ((mm->base_va == base_va) && (mm->size > size)) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200269 ++mm;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100270 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200271
272 /* Make room for new region by moving other regions up by one place */
273 (void)memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
274
Elyes Haouas2be03c02023-02-13 09:14:48 +0100275 /* Check we haven't lost the empty sentinel from the end of the array */
Etienne Carriereb4502772017-10-24 22:47:59 +0200276 assert(mm_last->size == 0U);
277
278 mm->base_pa = base_pa;
279 mm->base_va = base_va;
280 mm->size = size;
281 mm->attr = attr;
282
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100283 if (end_pa > xlat_max_pa) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200284 xlat_max_pa = end_pa;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100285 }
286 if (end_va > xlat_max_va) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200287 xlat_max_va = end_va;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100288 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200289}
290
291/* map all memory as shared/global/domain0/no-usr access */
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300292static uint32_t mmap_desc(unsigned attr, unsigned int addr_pa,
293 unsigned int level)
Etienne Carriereb4502772017-10-24 22:47:59 +0200294{
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300295 uint32_t desc;
Etienne Carriereb4502772017-10-24 22:47:59 +0200296
297 switch (level) {
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100298 case 1U:
299 assert((addr_pa & (MMU32B_L1_TABLE_ALIGN - 1)) == 0U);
Etienne Carriereb4502772017-10-24 22:47:59 +0200300
301 desc = SECTION_SECTION | SECTION_SHARED;
302
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100303 desc |= (attr & MT_NS) != 0U ? SECTION_NOTSECURE : 0U;
Etienne Carriereb4502772017-10-24 22:47:59 +0200304
305 desc |= SECTION_ACCESS_FLAG;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100306 desc |= (attr & MT_RW) != 0U ? 0U : SECTION_RO;
Etienne Carriereb4502772017-10-24 22:47:59 +0200307
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100308 desc |= (attr & MT_MEMORY) != 0U ?
Etienne Carriereb4502772017-10-24 22:47:59 +0200309 SECTION_NORMAL_CACHED : SECTION_DEVICE;
310
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100311 if (((attr & MT_RW) != 0U) || ((attr & MT_MEMORY) == 0U)) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200312 desc |= SECTION_XN;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100313 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200314 break;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100315 case 2U:
316 assert((addr_pa & (MMU32B_L2_TABLE_ALIGN - 1)) == 0U);
Etienne Carriereb4502772017-10-24 22:47:59 +0200317
318 desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED;
319
320 desc |= SMALL_PAGE_ACCESS_FLAG;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100321 desc |= (attr & MT_RW) != 0U ? 0U : SMALL_PAGE_RO;
Etienne Carriereb4502772017-10-24 22:47:59 +0200322
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100323 desc |= (attr & MT_MEMORY) != 0U ?
Etienne Carriereb4502772017-10-24 22:47:59 +0200324 SMALL_PAGE_NORMAL_CACHED : SMALL_PAGE_DEVICE;
325
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100326 if (((attr & MT_RW) != 0U) || ((attr & MT_MEMORY) == 0U)) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200327 desc |= SMALL_PAGE_XN;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100328 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200329 break;
330 default:
331 panic();
332 }
333#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
334 /* dump only the non-lpae level 2 tables */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100335 if (level == 2U) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200336 printf(attr & MT_MEMORY ? "MEM" : "dev");
337 printf(attr & MT_RW ? "-rw" : "-RO");
338 printf(attr & MT_NS ? "-NS" : "-S");
339 }
340#endif
341 return desc | addr_pa;
342}
343
344static unsigned int mmap_region_attr(const mmap_region_t *mm, uintptr_t base_va,
345 size_t size, unsigned int *attr)
346{
347 /* Don't assume that the area is contained in the first region */
348 unsigned int ret = MT_UNKNOWN;
349
350 /*
351 * Get attributes from last (innermost) region that contains the
352 * requested area. Don't stop as soon as one region doesn't contain it
353 * because there may be other internal regions that contain this area:
354 *
355 * |-----------------------------1-----------------------------|
356 * |----2----| |-------3-------| |----5----|
357 * |--4--|
358 *
359 * |---| <- Area we want the attributes of.
360 *
361 * In this example, the area is contained in regions 1, 3 and 4 but not
362 * in region 2. The loop shouldn't stop at region 2 as inner regions
363 * have priority over outer regions, it should stop at region 5.
364 */
365 for ( ; ; ++mm) {
366
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100367 if (mm->size == 0U) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200368 return ret; /* Reached end of list */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100369 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200370
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100371 if (mm->base_va > (base_va + size - 1U)) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200372 return ret; /* Next region is after area so end */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100373 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200374
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100375 if ((mm->base_va + mm->size - 1U) < base_va) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200376 continue; /* Next region has already been overtaken */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100377 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200378
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100379 if ((ret == 0U) && (mm->attr == *attr)) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200380 continue; /* Region doesn't override attribs so skip */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100381 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200382
383 if ((mm->base_va > base_va) ||
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100384 ((mm->base_va + mm->size - 1U) <
385 (base_va + size - 1U))) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200386 return MT_UNKNOWN; /* Region doesn't fully cover area */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100387 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200388
389 *attr = mm->attr;
390 ret = 0U;
391 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200392}
393
394static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300395 unsigned int base_va,
396 uint32_t *table,
Etienne Carriereb4502772017-10-24 22:47:59 +0200397 unsigned int level)
398{
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100399 unsigned int level_size_shift = (level == 1U) ?
Etienne Carriereb4502772017-10-24 22:47:59 +0200400 ONE_MB_SHIFT : FOUR_KB_SHIFT;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100401 unsigned int level_size = 1U << level_size_shift;
402 unsigned int level_index_mask = (level == 1U) ?
Etienne Carriereb4502772017-10-24 22:47:59 +0200403 (NUM_1MB_IN_4GB - 1) << ONE_MB_SHIFT :
404 (NUM_4K_IN_1MB - 1) << FOUR_KB_SHIFT;
405
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100406 assert((level == 1U) || (level == 2U));
Etienne Carriereb4502772017-10-24 22:47:59 +0200407
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100408 VERBOSE("init xlat table at %p (level%1u)\n", (void *)table, level);
Etienne Carriereb4502772017-10-24 22:47:59 +0200409
410 do {
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300411 uint32_t desc = MMU32B_UNSET_DESC;
Etienne Carriereb4502772017-10-24 22:47:59 +0200412
413 if (mm->base_va + mm->size <= base_va) {
414 /* Area now after the region so skip it */
415 ++mm;
416 continue;
417 }
418#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
419 /* dump only non-lpae level 2 tables content */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100420 if (level == 2U) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200421 printf(" 0x%lx %x " + 6 - 2 * level,
422 base_va, level_size);
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100423 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200424#endif
425 if (mm->base_va >= base_va + level_size) {
426 /* Next region is after area so nothing to map yet */
427 desc = MMU32B_INVALID_DESC;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100428 } else if ((mm->base_va <= base_va) &&
429 (mm->base_va + mm->size) >=
430 (base_va + level_size)) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200431 /* Next region covers all of area */
432 unsigned int attr = mm->attr;
433 unsigned int r = mmap_region_attr(mm, base_va,
434 level_size, &attr);
435
436 if (r == 0U) {
437 desc = mmap_desc(attr,
438 base_va - mm->base_va + mm->base_pa,
439 level);
440 }
441 }
442
443 if (desc == MMU32B_UNSET_DESC) {
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300444 uintptr_t xlat_table;
Etienne Carriereb4502772017-10-24 22:47:59 +0200445
446 /*
447 * Area not covered by a region so need finer table
448 * Reuse next level table if any (assert attrib matching).
449 * Otherwise allocate a xlat table.
450 */
451 if (*table) {
452 assert((*table & 3) == SECTION_PT_PT);
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100453 assert(((*table & SECTION_PT_NOTSECURE) == 0U)
454 == ((mm->attr & MT_NS) == 0U));
Etienne Carriereb4502772017-10-24 22:47:59 +0200455
456 xlat_table = (*table) &
457 ~(MMU32B_L1_TABLE_ALIGN - 1);
458 desc = *table;
459 } else {
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300460 xlat_table = (uintptr_t)mmu_l2_base +
Etienne Carriereb4502772017-10-24 22:47:59 +0200461 next_xlat * MMU32B_L2_TABLE_SIZE;
Justin Chadwell0aab9602019-07-23 09:48:38 +0100462 next_xlat++;
463 assert(next_xlat <= MAX_XLAT_TABLES);
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100464 (void)memset((char *)xlat_table, 0,
Etienne Carriereb4502772017-10-24 22:47:59 +0200465 MMU32B_L2_TABLE_SIZE);
466
467 desc = xlat_table | SECTION_PT_PT;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100468 desc |= (mm->attr & MT_NS) != 0U ?
Etienne Carriereb4502772017-10-24 22:47:59 +0200469 SECTION_PT_NOTSECURE : 0;
470 }
471 /* Recurse to fill in new table */
472 mm = init_xlation_table_inner(mm, base_va,
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300473 (uint32_t *)xlat_table,
Etienne Carriereb4502772017-10-24 22:47:59 +0200474 level + 1);
475 }
476#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
477 /* dump only non-lpae level 2 tables content */
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100478 if (level == 2U) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200479 printf("\n");
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100480 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200481#endif
482 *table++ = desc;
483 base_va += level_size;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100484 } while ((mm->size != 0U) && ((base_va & level_index_mask) != 0U));
Etienne Carriereb4502772017-10-24 22:47:59 +0200485
486 return mm;
487}
488
489void init_xlat_tables(void)
490{
491 print_mmap();
492
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100493 assert(((unsigned int)mmu_l1_base & (MMU32B_L1_TABLE_ALIGN - 1)) == 0U);
494 assert(((unsigned int)mmu_l2_base & (MMU32B_L2_TABLE_ALIGN - 1)) == 0U);
Etienne Carriereb4502772017-10-24 22:47:59 +0200495
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100496 (void)memset(mmu_l1_base, 0, MMU32B_L1_TABLE_SIZE);
Etienne Carriereb4502772017-10-24 22:47:59 +0200497
Deepika Bhavnanieb2b2b62019-09-03 21:06:17 +0300498 init_xlation_table_inner(mmap, 0, (uint32_t *)mmu_l1_base, 1);
Etienne Carriereb4502772017-10-24 22:47:59 +0200499
500 VERBOSE("init xlat - max_va=%p, max_pa=%llx\n",
501 (void *)xlat_max_va, xlat_max_pa);
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100502 assert(xlat_max_pa <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1));
Etienne Carriereb4502772017-10-24 22:47:59 +0200503}
504
505/*******************************************************************************
506 * Function for enabling the MMU in Secure PL1, assuming that the
507 * page-tables have already been created.
508 ******************************************************************************/
509void enable_mmu_svc_mon(unsigned int flags)
510{
511 unsigned int prrr;
512 unsigned int nmrr;
513 unsigned int sctlr;
514
515 assert(IS_IN_SECURE());
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100516 assert((read_sctlr() & SCTLR_M_BIT) == 0U);
Etienne Carriereb4502772017-10-24 22:47:59 +0200517
518 /* Enable Access flag (simplified access permissions) and TEX remap */
519 write_sctlr(read_sctlr() | SCTLR_AFE_BIT | SCTLR_TRE_BIT);
520
Elyes Haouas183638f2023-02-13 10:05:41 +0100521 prrr = MMU32B_PRRR_IDX(MMU32B_ATTR_DEVICE_INDEX, 1, 0)
Etienne Carriereb4502772017-10-24 22:47:59 +0200522 | MMU32B_PRRR_IDX(MMU32B_ATTR_IWBWA_OWBWA_INDEX, 2, 1);
Elyes Haouas183638f2023-02-13 10:05:41 +0100523 nmrr = MMU32B_NMRR_IDX(MMU32B_ATTR_DEVICE_INDEX, 0, 0)
Etienne Carriereb4502772017-10-24 22:47:59 +0200524 | MMU32B_NMRR_IDX(MMU32B_ATTR_IWBWA_OWBWA_INDEX, 1, 1);
525
526 prrr |= MMU32B_PRRR_NS1 | MMU32B_PRRR_DS1;
527
528 write_prrr(prrr);
529 write_nmrr(nmrr);
530
531 /* Program Domain access control register: domain 0 only */
532 write_dacr(DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT));
533
534 /* Invalidate TLBs at the current exception level */
535 tlbiall();
536
537 /* set MMU base xlat table entry (use only TTBR0) */
538 write_ttbr0((uint32_t)mmu_l1_base | MMU32B_DEFAULT_ATTRS);
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100539 write_ttbr1(0U);
Etienne Carriereb4502772017-10-24 22:47:59 +0200540
541 /*
542 * Ensure all translation table writes have drained
543 * into memory, the TLB invalidation is complete,
544 * and translation register writes are committed
545 * before enabling the MMU
546 */
547 dsb();
548 isb();
549
550 sctlr = read_sctlr();
551 sctlr |= SCTLR_M_BIT;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100552#ifdef ARMV7_SUPPORTS_VIRTUALIZATION
Etienne Carriereb4502772017-10-24 22:47:59 +0200553 sctlr |= SCTLR_WXN_BIT;
554#endif
555
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100556 if ((flags & DISABLE_DCACHE) != 0U) {
Etienne Carriereb4502772017-10-24 22:47:59 +0200557 sctlr &= ~SCTLR_C_BIT;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100558 } else {
Etienne Carriereb4502772017-10-24 22:47:59 +0200559 sctlr |= SCTLR_C_BIT;
Alexei Fedorovdc88b582020-07-28 12:26:36 +0100560 }
Etienne Carriereb4502772017-10-24 22:47:59 +0200561
562 write_sctlr(sctlr);
563
564 /* Ensure the MMU enable takes effect immediately */
565 isb();
566}