blob: 5f389f3658e6158f633bfc978ff0ceeaa4c6eff8 [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <bl_common.h>
11#include <cassert.h>
12#include <common_def.h>
13#include <platform_def.h>
14#include <sys/types.h>
15#include <utils.h>
16#include <xlat_tables_v2.h>
17#include "../xlat_tables_private.h"
18
19#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
20# define IMAGE_EL 3
21#else
22# define IMAGE_EL 1
23#endif
24
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000025static unsigned long long calc_physical_addr_size_bits(
26 unsigned long long max_addr)
27{
28 /* Physical address can't exceed 48 bits */
29 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
30
31 /* 48 bits address */
32 if (max_addr & ADDR_MASK_44_TO_47)
33 return TCR_PS_BITS_256TB;
34
35 /* 44 bits address */
36 if (max_addr & ADDR_MASK_42_TO_43)
37 return TCR_PS_BITS_16TB;
38
39 /* 42 bits address */
40 if (max_addr & ADDR_MASK_40_TO_41)
41 return TCR_PS_BITS_4TB;
42
43 /* 40 bits address */
44 if (max_addr & ADDR_MASK_36_TO_39)
45 return TCR_PS_BITS_1TB;
46
47 /* 36 bits address */
48 if (max_addr & ADDR_MASK_32_TO_35)
49 return TCR_PS_BITS_64GB;
50
51 return TCR_PS_BITS_4GB;
52}
53
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000054#if ENABLE_ASSERTIONS
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000055/* Physical Address ranges supported in the AArch64 Memory Model */
56static const unsigned int pa_range_bits_arr[] = {
57 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
58 PARANGE_0101
59};
60
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +010061unsigned long long xlat_arch_get_max_supported_pa(void)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000062{
63 u_register_t pa_range = read_id_aa64mmfr0_el1() &
64 ID_AA64MMFR0_EL1_PARANGE_MASK;
65
66 /* All other values are reserved */
67 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
68
69 return (1ull << pa_range_bits_arr[pa_range]) - 1ull;
70}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000071#endif /* ENABLE_ASSERTIONS*/
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000072
73int is_mmu_enabled(void)
74{
75#if IMAGE_EL == 1
76 assert(IS_IN_EL(1));
77 return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
78#elif IMAGE_EL == 3
79 assert(IS_IN_EL(3));
80 return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
81#endif
82}
83
Antonio Nino Diazac998032017-02-27 17:23:54 +000084#if PLAT_XLAT_TABLES_DYNAMIC
85
86void xlat_arch_tlbi_va(uintptr_t va)
87{
88 /*
89 * Ensure the translation table write has drained into memory before
90 * invalidating the TLB entry.
91 */
92 dsbishst();
93
94#if IMAGE_EL == 1
95 assert(IS_IN_EL(1));
96 tlbivaae1is(TLBI_ADDR(va));
97#elif IMAGE_EL == 3
98 assert(IS_IN_EL(3));
99 tlbivae3is(TLBI_ADDR(va));
100#endif
101}
102
103void xlat_arch_tlbi_va_sync(void)
104{
105 /*
106 * A TLB maintenance instruction can complete at any time after
107 * it is issued, but is only guaranteed to be complete after the
108 * execution of DSB by the PE that executed the TLB maintenance
109 * instruction. After the TLB invalidate instruction is
110 * complete, no new memory accesses using the invalidated TLB
111 * entries will be observed by any observer of the system
112 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
113 * "Ordering and completion of TLB maintenance instructions".
114 */
115 dsbish();
116
117 /*
118 * The effects of a completed TLB maintenance instruction are
119 * only guaranteed to be visible on the PE that executed the
120 * instruction after the execution of an ISB instruction by the
121 * PE that executed the TLB maintenance instruction.
122 */
123 isb();
124}
125
126#endif /* PLAT_XLAT_TABLES_DYNAMIC */
127
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100128int xlat_arch_current_el(void)
129{
130 int el = GET_EL(read_CurrentEl());
131
132 assert(el > 0);
133
134 return el;
135}
136
137uint64_t xlat_arch_get_xn_desc(int el)
138{
139 if (el == 3) {
140 return UPPER_ATTRS(XN);
141 } else {
142 assert(el == 1);
143 return UPPER_ATTRS(PXN);
144 }
145}
146
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000147/*******************************************************************************
148 * Macro generating the code for the function enabling the MMU in the given
149 * exception level, assuming that the pagetables have already been created.
150 *
151 * _el: Exception level at which the function will run
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000152 * _tlbi_fct: Function to invalidate the TLBs at the current
153 * exception level
154 ******************************************************************************/
Sandrine Bailleux1423d052017-05-31 13:38:51 +0100155#define DEFINE_ENABLE_MMU_EL(_el, _tlbi_fct) \
156 static void enable_mmu_internal_el##_el(int flags, \
157 uint64_t mair, \
158 uint64_t tcr, \
159 uint64_t ttbr) \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000160 { \
Sandrine Bailleux1423d052017-05-31 13:38:51 +0100161 uint32_t sctlr = read_sctlr_el##_el(); \
162 assert((sctlr & SCTLR_M_BIT) == 0); \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000163 \
164 /* Invalidate TLBs at the current exception level */ \
165 _tlbi_fct(); \
166 \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000167 write_mair_el##_el(mair); \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000168 write_tcr_el##_el(tcr); \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000169 write_ttbr0_el##_el(ttbr); \
170 \
171 /* Ensure all translation table writes have drained */ \
172 /* into memory, the TLB invalidation is complete, */ \
173 /* and translation register writes are committed */ \
174 /* before enabling the MMU */ \
Antonio Nino Diaz3f13c352017-02-24 11:39:22 +0000175 dsbish(); \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000176 isb(); \
177 \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000178 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000179 if (flags & DISABLE_DCACHE) \
180 sctlr &= ~SCTLR_C_BIT; \
181 else \
182 sctlr |= SCTLR_C_BIT; \
183 \
184 write_sctlr_el##_el(sctlr); \
185 \
186 /* Ensure the MMU enable takes effect immediately */ \
187 isb(); \
188 }
189
190/* Define EL1 and EL3 variants of the function enabling the MMU */
191#if IMAGE_EL == 1
Sandrine Bailleux1423d052017-05-31 13:38:51 +0100192DEFINE_ENABLE_MMU_EL(1, tlbivmalle1)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000193#elif IMAGE_EL == 3
Sandrine Bailleux1423d052017-05-31 13:38:51 +0100194DEFINE_ENABLE_MMU_EL(3, tlbialle3)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000195#endif
196
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +0100197void enable_mmu_arch(unsigned int flags,
198 uint64_t *base_table,
Sandrine Bailleux46c53a22017-07-11 15:11:10 +0100199 unsigned long long max_pa,
200 uintptr_t max_va)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000201{
Sandrine Bailleux1423d052017-05-31 13:38:51 +0100202 uint64_t mair, ttbr, tcr;
203
204 /* Set attributes in the right indices of the MAIR. */
205 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
206 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
207 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
208
209 ttbr = (uint64_t) base_table;
210
211 /*
212 * Set TCR bits as well.
213 */
214
215 /*
216 * Limit the input address ranges and memory region sizes translated
217 * using TTBR0 to the given virtual address space size.
218 */
Sandrine Bailleux46c53a22017-07-11 15:11:10 +0100219 assert(max_va < UINTPTR_MAX);
220 uintptr_t virtual_addr_space_size = max_va + 1;
221 assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
222 /*
223 * __builtin_ctzl(0) is undefined but here we are guaranteed that
224 * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
225 */
226 tcr = 64 - __builtin_ctzl(virtual_addr_space_size);
Sandrine Bailleux1423d052017-05-31 13:38:51 +0100227
228 /*
229 * Set the cacheability and shareability attributes for memory
230 * associated with translation table walks.
231 */
232 if (flags & XLAT_TABLE_NC) {
233 /* Inner & outer non-cacheable non-shareable. */
234 tcr |= TCR_SH_NON_SHAREABLE |
235 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
236 } else {
237 /* Inner & outer WBWA & shareable. */
238 tcr |= TCR_SH_INNER_SHAREABLE |
239 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA;
240 }
241
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +0100242 /*
243 * It is safer to restrict the max physical address accessible by the
244 * hardware as much as possible.
245 */
Sandrine Bailleux1423d052017-05-31 13:38:51 +0100246 unsigned long long tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
Sandrine Bailleuxc5b63772017-05-31 13:31:48 +0100247
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000248#if IMAGE_EL == 1
249 assert(IS_IN_EL(1));
Sandrine Bailleux1423d052017-05-31 13:38:51 +0100250 tcr |= tcr_ps_bits << TCR_EL1_IPS_SHIFT;
251 enable_mmu_internal_el1(flags, mair, tcr, ttbr);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000252#elif IMAGE_EL == 3
253 assert(IS_IN_EL(3));
Sandrine Bailleux1423d052017-05-31 13:38:51 +0100254 tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
255 enable_mmu_internal_el3(flags, mair, tcr, ttbr);
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000256#endif
257}