blob: 24266b2d1efffcec988152bae2a37d9c40180f04 [file] [log] [blame]
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +00001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <bl_common.h>
35#include <cassert.h>
36#include <common_def.h>
37#include <platform_def.h>
38#include <sys/types.h>
39#include <utils.h>
40#include <xlat_tables_v2.h>
41#include "../xlat_tables_private.h"
42
43#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
44# define IMAGE_EL 3
45#else
46# define IMAGE_EL 1
47#endif
48
49static unsigned long long tcr_ps_bits;
50
51static unsigned long long calc_physical_addr_size_bits(
52 unsigned long long max_addr)
53{
54 /* Physical address can't exceed 48 bits */
55 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
56
57 /* 48 bits address */
58 if (max_addr & ADDR_MASK_44_TO_47)
59 return TCR_PS_BITS_256TB;
60
61 /* 44 bits address */
62 if (max_addr & ADDR_MASK_42_TO_43)
63 return TCR_PS_BITS_16TB;
64
65 /* 42 bits address */
66 if (max_addr & ADDR_MASK_40_TO_41)
67 return TCR_PS_BITS_4TB;
68
69 /* 40 bits address */
70 if (max_addr & ADDR_MASK_36_TO_39)
71 return TCR_PS_BITS_1TB;
72
73 /* 36 bits address */
74 if (max_addr & ADDR_MASK_32_TO_35)
75 return TCR_PS_BITS_64GB;
76
77 return TCR_PS_BITS_4GB;
78}
79
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000080#if ENABLE_ASSERTIONS
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000081/* Physical Address ranges supported in the AArch64 Memory Model */
82static const unsigned int pa_range_bits_arr[] = {
83 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
84 PARANGE_0101
85};
86
87unsigned long long xlat_arch_get_max_supported_pa(void)
88{
89 u_register_t pa_range = read_id_aa64mmfr0_el1() &
90 ID_AA64MMFR0_EL1_PARANGE_MASK;
91
92 /* All other values are reserved */
93 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
94
95 return (1ull << pa_range_bits_arr[pa_range]) - 1ull;
96}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000097#endif /* ENABLE_ASSERTIONS*/
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +000098
99int is_mmu_enabled(void)
100{
101#if IMAGE_EL == 1
102 assert(IS_IN_EL(1));
103 return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
104#elif IMAGE_EL == 3
105 assert(IS_IN_EL(3));
106 return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
107#endif
108}
109
Antonio Nino Diazac998032017-02-27 17:23:54 +0000110#if PLAT_XLAT_TABLES_DYNAMIC
111
112void xlat_arch_tlbi_va(uintptr_t va)
113{
114 /*
115 * Ensure the translation table write has drained into memory before
116 * invalidating the TLB entry.
117 */
118 dsbishst();
119
120#if IMAGE_EL == 1
121 assert(IS_IN_EL(1));
122 tlbivaae1is(TLBI_ADDR(va));
123#elif IMAGE_EL == 3
124 assert(IS_IN_EL(3));
125 tlbivae3is(TLBI_ADDR(va));
126#endif
127}
128
129void xlat_arch_tlbi_va_sync(void)
130{
131 /*
132 * A TLB maintenance instruction can complete at any time after
133 * it is issued, but is only guaranteed to be complete after the
134 * execution of DSB by the PE that executed the TLB maintenance
135 * instruction. After the TLB invalidate instruction is
136 * complete, no new memory accesses using the invalidated TLB
137 * entries will be observed by any observer of the system
138 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
139 * "Ordering and completion of TLB maintenance instructions".
140 */
141 dsbish();
142
143 /*
144 * The effects of a completed TLB maintenance instruction are
145 * only guaranteed to be visible on the PE that executed the
146 * instruction after the execution of an ISB instruction by the
147 * PE that executed the TLB maintenance instruction.
148 */
149 isb();
150}
151
152#endif /* PLAT_XLAT_TABLES_DYNAMIC */
153
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000154void init_xlat_tables_arch(unsigned long long max_pa)
155{
156 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
157 xlat_arch_get_max_supported_pa());
158
Antonio Nino Diazac998032017-02-27 17:23:54 +0000159 /*
160 * If dynamic allocation of new regions is enabled the code can't make
161 * assumptions about the max physical address because it could change
162 * after adding new regions. If this functionality is disabled it is
163 * safer to restrict the max physical address as much as possible.
164 */
165#ifdef PLAT_XLAT_TABLES_DYNAMIC
166 tcr_ps_bits = calc_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE);
167#else
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000168 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
Antonio Nino Diazac998032017-02-27 17:23:54 +0000169#endif
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000170}
171
172/*******************************************************************************
173 * Macro generating the code for the function enabling the MMU in the given
174 * exception level, assuming that the pagetables have already been created.
175 *
176 * _el: Exception level at which the function will run
177 * _tcr_extra: Extra bits to set in the TCR register. This mask will
178 * be OR'ed with the default TCR value.
179 * _tlbi_fct: Function to invalidate the TLBs at the current
180 * exception level
181 ******************************************************************************/
182#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
183 void enable_mmu_internal_el##_el(unsigned int flags, \
184 uint64_t *base_table) \
185 { \
186 uint64_t mair, tcr, ttbr; \
187 uint32_t sctlr; \
188 \
189 assert(IS_IN_EL(_el)); \
190 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
191 \
192 /* Invalidate TLBs at the current exception level */ \
193 _tlbi_fct(); \
194 \
195 /* Set attributes in the right indices of the MAIR */ \
196 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
197 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
198 ATTR_IWBWA_OWBWA_NTR_INDEX); \
199 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
200 ATTR_NON_CACHEABLE_INDEX); \
201 write_mair_el##_el(mair); \
202 \
203 /* Set TCR bits as well. */ \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000204 /* Set T0SZ to (64 - width of virtual address space) */ \
Summer Qindaf5dbb2017-03-16 17:16:34 +0000205 if (flags & XLAT_TABLE_NC) { \
206 /* Inner & outer non-cacheable non-shareable. */\
207 tcr = TCR_SH_NON_SHAREABLE | \
208 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
209 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
210 } else { \
211 /* Inner & outer WBWA & shareable. */ \
212 tcr = TCR_SH_INNER_SHAREABLE | \
213 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
214 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
215 } \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000216 tcr |= _tcr_extra; \
217 write_tcr_el##_el(tcr); \
218 \
219 /* Set TTBR bits as well */ \
220 ttbr = (uint64_t) base_table; \
221 write_ttbr0_el##_el(ttbr); \
222 \
223 /* Ensure all translation table writes have drained */ \
224 /* into memory, the TLB invalidation is complete, */ \
225 /* and translation register writes are committed */ \
226 /* before enabling the MMU */ \
Antonio Nino Diaz3f13c352017-02-24 11:39:22 +0000227 dsbish(); \
Antonio Nino Diaz233c7c12017-03-08 14:40:23 +0000228 isb(); \
229 \
230 sctlr = read_sctlr_el##_el(); \
231 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
232 \
233 if (flags & DISABLE_DCACHE) \
234 sctlr &= ~SCTLR_C_BIT; \
235 else \
236 sctlr |= SCTLR_C_BIT; \
237 \
238 write_sctlr_el##_el(sctlr); \
239 \
240 /* Ensure the MMU enable takes effect immediately */ \
241 isb(); \
242 }
243
244/* Define EL1 and EL3 variants of the function enabling the MMU */
245#if IMAGE_EL == 1
246DEFINE_ENABLE_MMU_EL(1,
247 (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
248 tlbivmalle1)
249#elif IMAGE_EL == 3
250DEFINE_ENABLE_MMU_EL(3,
251 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
252 tlbialle3)
253#endif
254
255void enable_mmu_arch(unsigned int flags, uint64_t *base_table)
256{
257#if IMAGE_EL == 1
258 assert(IS_IN_EL(1));
259 enable_mmu_internal_el1(flags, base_table);
260#elif IMAGE_EL == 3
261 assert(IS_IN_EL(3));
262 enable_mmu_internal_el3(flags, base_table);
263#endif
264}