blob: adca57875b4fea46a1db001c69a32fd4e08a1f54 [file] [log] [blame]
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +01001/*
Petre-Ionut Tudore5a6fef2019-11-07 15:18:03 +00002 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Petre-Ionut Tudore5a6fef2019-11-07 15:18:03 +00007#include <arch_helpers.h>
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +01008#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010010#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
12#include <common/debug.h>
13#include <lib/xlat_tables/xlat_tables_defs.h>
14#include <lib/xlat_tables/xlat_tables_v2.h>
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010015
16#include "xlat_tables_private.h"
17
18/*
Antonio Nino Diaz67f799e2018-07-15 16:42:01 +010019 * MMU configuration register values for the active translation context. Used
20 * from the MMU assembly helpers.
21 */
22uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
23
24/*
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010025 * Allocate and initialise the default translation context for the BL image
26 * currently executing.
27 */
Petre-Ionut Tudore5a6fef2019-11-07 15:18:03 +000028#if PLAT_RO_XLAT_TABLES
29REGISTER_XLAT_CONTEXT_RO_BASE_TABLE(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
30 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
31 EL_REGIME_INVALID, "xlat_table");
32#else
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010033REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
34 PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
Petre-Ionut Tudore5a6fef2019-11-07 15:18:03 +000035#endif
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010036
37void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
38 unsigned int attr)
39{
40 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
41
42 mmap_add_region_ctx(&tf_xlat_ctx, &mm);
43}
44
45void mmap_add(const mmap_region_t *mm)
46{
47 mmap_add_ctx(&tf_xlat_ctx, mm);
48}
49
Antonio Nino Diazc0033282018-11-20 16:03:11 +000050void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
51 size_t size, unsigned int attr)
52{
53 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
54
55 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
56
57 *base_va = mm.base_va;
58}
59
60void mmap_add_alloc_va(mmap_region_t *mm)
61{
62 while (mm->granularity != 0U) {
63 assert(mm->base_va == 0U);
64 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
65 mm++;
66 }
67}
68
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010069#if PLAT_XLAT_TABLES_DYNAMIC
70
71int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
72 size_t size, unsigned int attr)
73{
74 mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
75
76 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
77}
78
Antonio Nino Diazc0033282018-11-20 16:03:11 +000079int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
80 uintptr_t *base_va, size_t size,
81 unsigned int attr)
82{
83 mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
84
85 int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
86
87 *base_va = mm.base_va;
88
89 return rc;
90}
91
92
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +010093int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
94{
95 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
96 base_va, size);
97}
98
99#endif /* PLAT_XLAT_TABLES_DYNAMIC */
100
Daniel Boulby5a03a252018-08-30 16:48:56 +0100101void __init init_xlat_tables(void)
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100102{
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100103 assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
104
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100105 unsigned int current_el = xlat_arch_current_el();
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100106
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100107 if (current_el == 1U) {
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100108 tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100109 } else if (current_el == 2U) {
110 tf_xlat_ctx.xlat_regime = EL2_REGIME;
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100111 } else {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100112 assert(current_el == 3U);
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100113 tf_xlat_ctx.xlat_regime = EL3_REGIME;
114 }
115
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100116 init_xlat_tables_ctx(&tf_xlat_ctx);
117}
118
Antonio Nino Diaz6c4c9ee2018-08-05 15:34:10 +0100119int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
120{
121 return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
122}
123
124int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
125{
126 return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
127}
128
Petre-Ionut Tudore5a6fef2019-11-07 15:18:03 +0000129#if PLAT_RO_XLAT_TABLES
130/* Change the memory attributes of the descriptors which resolve the address
131 * range that belongs to the translation tables themselves, which are by default
132 * mapped as part of read-write data in the BL image's memory.
133 *
134 * Since the translation tables map themselves via these level 3 (page)
135 * descriptors, any change applied to them with the MMU on would introduce a
136 * chicken and egg problem because of the break-before-make sequence.
137 * Eventually, it would reach the descriptor that resolves the very table it
138 * belongs to and the invalidation (break step) would cause the subsequent write
139 * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled
140 * before making the change.
141 *
142 * No assumption is made about what data this function needs, therefore all the
143 * caches are flushed in order to ensure coherency. A future optimization would
144 * be to only flush the required data to main memory.
145 */
146int xlat_make_tables_readonly(void)
147{
148 assert(tf_xlat_ctx.initialized == true);
149#ifdef __aarch64__
150 if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
151 disable_mmu_el1();
152 } else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) {
153 disable_mmu_el3();
154 } else {
155 assert(tf_xlat_ctx.xlat_regime == EL2_REGIME);
156 return -1;
157 }
158
159 /* Flush all caches. */
160 dcsw_op_all(DCCISW);
161#else /* !__aarch64__ */
162 assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME);
163 /* On AArch32, we flush the caches before disabling the MMU. The reason
164 * for this is that the dcsw_op_all AArch32 function pushes some
165 * registers onto the stack under the assumption that it is writing to
166 * cache, which is not true with the MMU off. This would result in the
167 * stack becoming corrupted and a wrong/junk value for the LR being
168 * restored at the end of the routine.
169 */
170 dcsw_op_all(DC_OP_CISW);
171 disable_mmu_secure();
172#endif
173
174 int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx,
175 (uintptr_t)tf_xlat_ctx.tables,
176 tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE,
177 MT_RO_DATA | MT_SECURE);
178
179#ifdef __aarch64__
180 if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
181 enable_mmu_el1(0U);
182 } else {
183 assert(tf_xlat_ctx.xlat_regime == EL3_REGIME);
184 enable_mmu_el3(0U);
185 }
186#else /* !__aarch64__ */
187 enable_mmu_svc_mon(0U);
188#endif
189
190 if (rc == 0) {
191 tf_xlat_ctx.readonly_tables = true;
192 }
193
194 return rc;
195}
196#endif /* PLAT_RO_XLAT_TABLES */
197
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100198/*
199 * If dynamic allocation of new regions is disabled then by the time we call the
200 * function enabling the MMU, we'll have registered all the memory regions to
201 * map for the system's lifetime. Therefore, at this point we know the maximum
202 * physical address that will ever be mapped.
203 *
204 * If dynamic allocation is enabled then we can't make any such assumption
205 * because the maximum physical address could get pushed while adding a new
206 * region. Therefore, in this case we have to assume that the whole address
207 * space size might be mapped.
208 */
209#ifdef PLAT_XLAT_TABLES_DYNAMIC
210#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
211#else
212#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
213#endif
214
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700215#ifdef __aarch64__
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100216
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700217void enable_mmu_el1(unsigned int flags)
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100218{
Antonio Nino Diaz67f799e2018-07-15 16:42:01 +0100219 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
220 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
Antonio Nino Diaz9d596c42018-07-12 15:43:07 +0100221 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700222 enable_mmu_direct_el1(flags);
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100223}
224
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700225void enable_mmu_el2(unsigned int flags)
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100226{
227 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
228 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
229 tf_xlat_ctx.va_max_address, EL2_REGIME);
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700230 enable_mmu_direct_el2(flags);
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100231}
232
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700233void enable_mmu_el3(unsigned int flags)
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100234{
Antonio Nino Diaz67f799e2018-07-15 16:42:01 +0100235 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
236 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700237 tf_xlat_ctx.va_max_address, EL3_REGIME);
238 enable_mmu_direct_el3(flags);
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100239}
240
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700241#else /* !__aarch64__ */
242
243void enable_mmu_svc_mon(unsigned int flags)
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100244{
245 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
246 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700247 tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
248 enable_mmu_direct_svc_mon(flags);
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100249}
250
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700251void enable_mmu_hyp(unsigned int flags)
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100252{
Antonio Nino Diaz67f799e2018-07-15 16:42:01 +0100253 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
254 tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700255 tf_xlat_ctx.va_max_address, EL2_REGIME);
256 enable_mmu_direct_hyp(flags);
Antonio Nino Diazf1b84f62018-07-03 11:58:49 +0100257}
258
Julius Werner8e0ef0f2019-07-09 14:02:43 -0700259#endif /* __aarch64__ */