blob: 14f6cd6a037385e0c8c4e9a93b1f4b0b2fd1961c [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <bl_common.h>
11#include <cassert.h>
12#include <common_def.h>
13#include <platform_def.h>
14#include <sys/types.h>
15#include <utils.h>
16#include <xlat_tables_v2.h>
17#include "../xlat_tables_private.h"
18
19#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
20# define IMAGE_EL 3
21#else
22# define IMAGE_EL 1
23#endif
24
25static unsigned long long tcr_ps_bits;
26
27static unsigned long long calc_physical_addr_size_bits(
28 unsigned long long max_addr)
29{
30 /* Physical address can't exceed 48 bits */
31 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
32
33 /* 48 bits address */
34 if (max_addr & ADDR_MASK_44_TO_47)
35 return TCR_PS_BITS_256TB;
36
37 /* 44 bits address */
38 if (max_addr & ADDR_MASK_42_TO_43)
39 return TCR_PS_BITS_16TB;
40
41 /* 42 bits address */
42 if (max_addr & ADDR_MASK_40_TO_41)
43 return TCR_PS_BITS_4TB;
44
45 /* 40 bits address */
46 if (max_addr & ADDR_MASK_36_TO_39)
47 return TCR_PS_BITS_1TB;
48
49 /* 36 bits address */
50 if (max_addr & ADDR_MASK_32_TO_35)
51 return TCR_PS_BITS_64GB;
52
53 return TCR_PS_BITS_4GB;
54}
55
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000056#if ENABLE_ASSERTIONS
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000057/* Physical Address ranges supported in the AArch64 Memory Model */
58static const unsigned int pa_range_bits_arr[] = {
59 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
60 PARANGE_0101
61};
62
Antonio Nino Diaz9b11f392017-05-08 16:43:53 +010063static unsigned long long xlat_arch_get_max_supported_pa(void)
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000064{
65 u_register_t pa_range = read_id_aa64mmfr0_el1() &
66 ID_AA64MMFR0_EL1_PARANGE_MASK;
67
68 /* All other values are reserved */
69 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
70
71 return (1ull << pa_range_bits_arr[pa_range]) - 1ull;
72}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000073#endif /* ENABLE_ASSERTIONS*/
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000074
75int is_mmu_enabled(void)
76{
77#if IMAGE_EL == 1
78 assert(IS_IN_EL(1));
79 return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
80#elif IMAGE_EL == 3
81 assert(IS_IN_EL(3));
82 return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
83#endif
84}
85
Antonio Nino Diazac998032017-02-27 17:23:54 +000086#if PLAT_XLAT_TABLES_DYNAMIC
87
88void xlat_arch_tlbi_va(uintptr_t va)
89{
90 /*
91 * Ensure the translation table write has drained into memory before
92 * invalidating the TLB entry.
93 */
94 dsbishst();
95
96#if IMAGE_EL == 1
97 assert(IS_IN_EL(1));
98 tlbivaae1is(TLBI_ADDR(va));
99#elif IMAGE_EL == 3
100 assert(IS_IN_EL(3));
101 tlbivae3is(TLBI_ADDR(va));
102#endif
103}
104
105void xlat_arch_tlbi_va_sync(void)
106{
107 /*
108 * A TLB maintenance instruction can complete at any time after
109 * it is issued, but is only guaranteed to be complete after the
110 * execution of DSB by the PE that executed the TLB maintenance
111 * instruction. After the TLB invalidate instruction is
112 * complete, no new memory accesses using the invalidated TLB
113 * entries will be observed by any observer of the system
114 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
115 * "Ordering and completion of TLB maintenance instructions".
116 */
117 dsbish();
118
119 /*
120 * The effects of a completed TLB maintenance instruction are
121 * only guaranteed to be visible on the PE that executed the
122 * instruction after the execution of an ISB instruction by the
123 * PE that executed the TLB maintenance instruction.
124 */
125 isb();
126}
127
128#endif /* PLAT_XLAT_TABLES_DYNAMIC */
129
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100130int xlat_arch_current_el(void)
131{
132 int el = GET_EL(read_CurrentEl());
133
134 assert(el > 0);
135
136 return el;
137}
138
139uint64_t xlat_arch_get_xn_desc(int el)
140{
141 if (el == 3) {
142 return UPPER_ATTRS(XN);
143 } else {
144 assert(el == 1);
145 return UPPER_ATTRS(PXN);
146 }
147}
148
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000149void init_xlat_tables_arch(unsigned long long max_pa)
150{
151 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
152 xlat_arch_get_max_supported_pa());
153
Antonio Nino Diazac998032017-02-27 17:23:54 +0000154 /*
155 * If dynamic allocation of new regions is enabled the code can't make
156 * assumptions about the max physical address because it could change
157 * after adding new regions. If this functionality is disabled it is
158 * safer to restrict the max physical address as much as possible.
159 */
160#ifdef PLAT_XLAT_TABLES_DYNAMIC
161 tcr_ps_bits = calc_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE);
162#else
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000163 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000164#endif
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000165}
166
167/*******************************************************************************
168 * Macro generating the code for the function enabling the MMU in the given
169 * exception level, assuming that the pagetables have already been created.
170 *
171 * _el: Exception level at which the function will run
172 * _tcr_extra: Extra bits to set in the TCR register. This mask will
173 * be OR'ed with the default TCR value.
174 * _tlbi_fct: Function to invalidate the TLBs at the current
175 * exception level
176 ******************************************************************************/
177#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
178 void enable_mmu_internal_el##_el(unsigned int flags, \
179 uint64_t *base_table) \
180 { \
181 uint64_t mair, tcr, ttbr; \
182 uint32_t sctlr; \
183 \
184 assert(IS_IN_EL(_el)); \
185 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
186 \
187 /* Invalidate TLBs at the current exception level */ \
188 _tlbi_fct(); \
189 \
190 /* Set attributes in the right indices of the MAIR */ \
191 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
192 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
193 ATTR_IWBWA_OWBWA_NTR_INDEX); \
194 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
195 ATTR_NON_CACHEABLE_INDEX); \
196 write_mair_el##_el(mair); \
197 \
198 /* Set TCR bits as well. */ \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000199 /* Set T0SZ to (64 - width of virtual address space) */ \
Summer Qindaf5dbb2017-03-16 17:16:34 +0000200 if (flags & XLAT_TABLE_NC) { \
201 /* Inner & outer non-cacheable non-shareable. */\
202 tcr = TCR_SH_NON_SHAREABLE | \
203 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
204 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
205 } else { \
206 /* Inner & outer WBWA & shareable. */ \
207 tcr = TCR_SH_INNER_SHAREABLE | \
208 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
209 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
210 } \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000211 tcr |= _tcr_extra; \
212 write_tcr_el##_el(tcr); \
213 \
214 /* Set TTBR bits as well */ \
215 ttbr = (uint64_t) base_table; \
216 write_ttbr0_el##_el(ttbr); \
217 \
218 /* Ensure all translation table writes have drained */ \
219 /* into memory, the TLB invalidation is complete, */ \
220 /* and translation register writes are committed */ \
221 /* before enabling the MMU */ \
Antonio Nino Diaz3f13c352017-02-24 11:39:22 +0000222 dsbish(); \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000223 isb(); \
224 \
225 sctlr = read_sctlr_el##_el(); \
226 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
227 \
228 if (flags & DISABLE_DCACHE) \
229 sctlr &= ~SCTLR_C_BIT; \
230 else \
231 sctlr |= SCTLR_C_BIT; \
232 \
233 write_sctlr_el##_el(sctlr); \
234 \
235 /* Ensure the MMU enable takes effect immediately */ \
236 isb(); \
237 }
238
239/* Define EL1 and EL3 variants of the function enabling the MMU */
240#if IMAGE_EL == 1
241DEFINE_ENABLE_MMU_EL(1,
242 (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
243 tlbivmalle1)
244#elif IMAGE_EL == 3
245DEFINE_ENABLE_MMU_EL(3,
246 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
247 tlbialle3)
248#endif
249
250void enable_mmu_arch(unsigned int flags, uint64_t *base_table)
251{
252#if IMAGE_EL == 1
253 assert(IS_IN_EL(1));
254 enable_mmu_internal_el1(flags, base_table);
255#elif IMAGE_EL == 3
256 assert(IS_IN_EL(3));
257 enable_mmu_internal_el3(flags, base_table);
258#endif
259}