blob: ae9244a5e74bf507e9a74de740cf36fdceb35568 [file] [log] [blame]
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +01001/*
Govindraj Rajaeee28e72023-08-01 15:52:40 -05002 * Copyright (c) 2017-2020, Arm Limited and Contributors. All rights reserved.
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Petre-Ionut Tudore5a6fef2019-11-07 15:18:03 +00007#include <arch_helpers.h>
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +01008#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010010#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
12#include <common/debug.h>
13#include <lib/xlat_tables/xlat_tables_defs.h>
14#include <lib/xlat_tables/xlat_tables_v2.h>
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010015
16#include "xlat_tables_private.h"
17
18/*
Antonio Nino Diaz67f799e2018-07-15 16:42:01 +010019 * MMU configuration register values for the active translation context. Used
20 * from the MMU assembly helpers.
21 */
22uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
23
24/*
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010025 * Allocate and initialise the default translation context for the BL image
26 * currently executing.
27 */
Masahiro Yamada0db23752020-03-06 19:21:26 +090028REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
Masahiro Yamadaf5f203a2020-03-26 13:18:48 +090029 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
Masahiro Yamada0db23752020-03-06 19:21:26 +090030
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010031void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
32 unsigned int attr)
33{
34 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
35
36 mmap_add_region_ctx(&tf_xlat_ctx, &mm);
37}
38
39void mmap_add(const mmap_region_t *mm)
40{
41 mmap_add_ctx(&tf_xlat_ctx, mm);
42}
43
Antonio Nino Diazc0033282018-11-20 16:03:11 +000044void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
45 size_t size, unsigned int attr)
46{
47 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
48
49 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
50
51 *base_va = mm.base_va;
52}
53
54void mmap_add_alloc_va(mmap_region_t *mm)
55{
56 while (mm->granularity != 0U) {
57 assert(mm->base_va == 0U);
58 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
59 mm++;
60 }
61}
62
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010063#if PLAT_XLAT_TABLES_DYNAMIC
64
65int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
66 size_t size, unsigned int attr)
67{
68 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
69
70 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
71}
72
Antonio Nino Diazc0033282018-11-20 16:03:11 +000073int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
74 uintptr_t *base_va, size_t size,
75 unsigned int attr)
76{
77 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
78
79 int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
80
81 *base_va = mm.base_va;
82
83 return rc;
84}
85
86
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010087int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
88{
89 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
90 base_va, size);
91}
92
93#endif /* PLAT_XLAT_TABLES_DYNAMIC */
94
Daniel Boulby5a03a252018-08-30 16:48:56 +010095void __init init_xlat_tables(void)
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010096{
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +010097 assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
98
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010099 unsigned int current_el = xlat_arch_current_el();
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100100
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100101 if (current_el == 1U) {
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100102 tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100103 } else if (current_el == 2U) {
104 tf_xlat_ctx.xlat_regime = EL2_REGIME;
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100105 } else {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100106 assert(current_el == 3U);
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100107 tf_xlat_ctx.xlat_regime = EL3_REGIME;
108 }
109
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100110 init_xlat_tables_ctx(&tf_xlat_ctx);
111}
112
Antonio Nino Diaz6c4c9ee2018-08-05 15:34:10 +0100113int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
114{
115 return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
116}
117
118int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
119{
120 return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
121}
122
Petre-Ionut Tudore5a6fef2019-11-07 15:18:03 +0000123#if PLAT_RO_XLAT_TABLES
124/* Change the memory attributes of the descriptors which resolve the address
125 * range that belongs to the translation tables themselves, which are by default
126 * mapped as part of read-write data in the BL image's memory.
127 *
128 * Since the translation tables map themselves via these level 3 (page)
129 * descriptors, any change applied to them with the MMU on would introduce a
130 * chicken and egg problem because of the break-before-make sequence.
131 * Eventually, it would reach the descriptor that resolves the very table it
132 * belongs to and the invalidation (break step) would cause the subsequent write
133 * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled
134 * before making the change.
135 *
136 * No assumption is made about what data this function needs, therefore all the
137 * caches are flushed in order to ensure coherency. A future optimization would
138 * be to only flush the required data to main memory.
139 */
140int xlat_make_tables_readonly(void)
141{
142 assert(tf_xlat_ctx.initialized == true);
143#ifdef __aarch64__
144 if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
145 disable_mmu_el1();
146 } else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) {
147 disable_mmu_el3();
148 } else {
149 assert(tf_xlat_ctx.xlat_regime == EL2_REGIME);
150 return -1;
151 }
152
153 /* Flush all caches. */
154 dcsw_op_all(DCCISW);
155#else /* !__aarch64__ */
156 assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME);
157 /* On AArch32, we flush the caches before disabling the MMU. The reason
158 * for this is that the dcsw_op_all AArch32 function pushes some
159 * registers onto the stack under the assumption that it is writing to
160 * cache, which is not true with the MMU off. This would result in the
161 * stack becoming corrupted and a wrong/junk value for the LR being
162 * restored at the end of the routine.
163 */
164 dcsw_op_all(DC_OP_CISW);
165 disable_mmu_secure();
166#endif
167
168 int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx,
169 (uintptr_t)tf_xlat_ctx.tables,
170 tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE,
171 MT_RO_DATA | MT_SECURE);
172
173#ifdef __aarch64__
174 if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
175 enable_mmu_el1(0U);
176 } else {
177 assert(tf_xlat_ctx.xlat_regime == EL3_REGIME);
178 enable_mmu_el3(0U);
179 }
180#else /* !__aarch64__ */
181 enable_mmu_svc_mon(0U);
182#endif
183
184 if (rc == 0) {
185 tf_xlat_ctx.readonly_tables = true;
186 }
187
188 return rc;
189}
190#endif /* PLAT_RO_XLAT_TABLES */
191
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100192/*
193 * If dynamic allocation of new regions is disabled then by the time we call the
194 * function enabling the MMU, we'll have registered all the memory regions to
195 * map for the system's lifetime. Therefore, at this point we know the maximum
196 * physical address that will ever be mapped.
197 *
198 * If dynamic allocation is enabled then we can't make any such assumption
199 * because the maximum physical address could get pushed while adding a new
200 * region. Therefore, in this case we have to assume that the whole address
201 * space size might be mapped.
202 */
Amit Nagal1daa5ed2023-11-03 13:24:56 +0530203#if PLAT_XLAT_TABLES_DYNAMIC
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100204#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
205#else
206#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
207#endif
208
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700209#ifdef __aarch64__
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100210
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700211void enable_mmu_el1(unsigned int flags)
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100212{
Antonio Nino Diaz67f799e2018-07-15 16:42:01 +0100213 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
214 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100215 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700216 enable_mmu_direct_el1(flags);
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100217}
218
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700219void enable_mmu_el2(unsigned int flags)
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100220{
221 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
222 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
223 tf_xlat_ctx.va_max_address, EL2_REGIME);
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700224 enable_mmu_direct_el2(flags);
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100225}
226
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700227void enable_mmu_el3(unsigned int flags)
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100228{
Antonio Nino Diaz67f799e2018-07-15 16:42:01 +0100229 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
230 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700231 tf_xlat_ctx.va_max_address, EL3_REGIME);
232 enable_mmu_direct_el3(flags);
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100233}
234
Masahiro Yamadafb10bfd2020-03-26 13:18:48 +0900235void enable_mmu(unsigned int flags)
236{
237 switch (get_current_el_maybe_constant()) {
238 case 1:
239 enable_mmu_el1(flags);
240 break;
241 case 2:
242 enable_mmu_el2(flags);
243 break;
244 case 3:
245 enable_mmu_el3(flags);
246 break;
247 default:
248 panic();
249 }
250}
251
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700252#else /* !__aarch64__ */
253
254void enable_mmu_svc_mon(unsigned int flags)
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100255{
256 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
257 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700258 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
259 enable_mmu_direct_svc_mon(flags);
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100260}
261
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700262void enable_mmu_hyp(unsigned int flags)
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100263{
Antonio Nino Diaz67f799e2018-07-15 16:42:01 +0100264 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
265 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700266 tf_xlat_ctx.va_max_address, EL2_REGIME);
267 enable_mmu_direct_hyp(flags);
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100268}
269
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700270#endif /* __aarch64__ */