blob: 7be36ca33889ce1cb4536ca0c4b97a2989c4358d [file] [log] [blame]
Soby Mathew44170c42016-03-22 15:51:08 +00001/*
Summer Qindaf5dbb2017-03-16 17:16:34 +00002 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathew44170c42016-03-22 15:51:08 +00003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000034#include <bl_common.h>
Soby Mathew44170c42016-03-22 15:51:08 +000035#include <cassert.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000036#include <common_def.h>
Soby Mathew44170c42016-03-22 15:51:08 +000037#include <platform_def.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000038#include <sys/types.h>
Sandrine Bailleux7659a262016-07-05 09:55:03 +010039#include <utils.h>
Soby Mathew44170c42016-03-22 15:51:08 +000040#include <xlat_tables.h>
41#include "../xlat_tables_private.h"
42
Soby Mathew44170c42016-03-22 15:51:08 +000043/*
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010044 * Each platform can define the size of the virtual address space, which is
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000045 * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the
46 * width of said address space. The value of TCR.TxSZ must be in the range 16
47 * to 39 [1], which means that the virtual address space width must be in the
48 * range 48 to 25 bits.
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010049 *
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000050 * Here we calculate the initial lookup level from the value of
51 * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual
52 * address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2
53 * from 30 to 25. Wider or narrower address spaces are not supported. As a
54 * result, level 3 cannot be used as initial lookup level with 4 KB
55 * granularity. [2]
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010056 *
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000057 * For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
58 * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
59 * D4-11 in the ARM ARM, the initial lookup level for an address space like
60 * that is 1.
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010061 *
62 * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
63 * information:
64 * [1] Page 1730: 'Input address size', 'For all translation stages'.
65 * [2] Section D4.2.5
Soby Mathew44170c42016-03-22 15:51:08 +000066 */
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010067
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000068#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010069
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000070# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010071
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000072#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010073
74# define XLAT_TABLE_LEVEL_BASE 0
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000075# define NUM_BASE_LEVEL_ENTRIES \
76 (PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
Soby Mathew44170c42016-03-22 15:51:08 +000077
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000078#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
Soby Mathew44170c42016-03-22 15:51:08 +000079
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010080# define XLAT_TABLE_LEVEL_BASE 1
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000081# define NUM_BASE_LEVEL_ENTRIES \
82 (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010083
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000084#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010085
86# define XLAT_TABLE_LEVEL_BASE 2
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000087# define NUM_BASE_LEVEL_ENTRIES \
88 (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010089
90#else
91
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000092# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010093
94#endif
95
96static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
97 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew44170c42016-03-22 15:51:08 +000098
99static unsigned long long tcr_ps_bits;
100
101static unsigned long long calc_physical_addr_size_bits(
102 unsigned long long max_addr)
103{
104 /* Physical address can't exceed 48 bits */
105 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
106
107 /* 48 bits address */
108 if (max_addr & ADDR_MASK_44_TO_47)
109 return TCR_PS_BITS_256TB;
110
111 /* 44 bits address */
112 if (max_addr & ADDR_MASK_42_TO_43)
113 return TCR_PS_BITS_16TB;
114
115 /* 42 bits address */
116 if (max_addr & ADDR_MASK_40_TO_41)
117 return TCR_PS_BITS_4TB;
118
119 /* 40 bits address */
120 if (max_addr & ADDR_MASK_36_TO_39)
121 return TCR_PS_BITS_1TB;
122
123 /* 36 bits address */
124 if (max_addr & ADDR_MASK_32_TO_35)
125 return TCR_PS_BITS_64GB;
126
127 return TCR_PS_BITS_4GB;
128}
129
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +0000130#if ENABLE_ASSERTIONS
Antonio Nino Diazd1beee22016-12-13 15:28:54 +0000131/* Physical Address ranges supported in the AArch64 Memory Model */
132static const unsigned int pa_range_bits_arr[] = {
133 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
134 PARANGE_0101
135};
136
137static unsigned long long get_max_supported_pa(void)
138{
139 u_register_t pa_range = read_id_aa64mmfr0_el1() &
140 ID_AA64MMFR0_EL1_PARANGE_MASK;
141
142 /* All other values are reserved */
143 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
144
145 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
146}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +0000147#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +0000148
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100149int xlat_arch_current_el(void)
150{
151 int el = GET_EL(read_CurrentEl());
152
153 assert(el > 0);
154
155 return el;
156}
157
158uint64_t xlat_arch_get_xn_desc(int el)
159{
160 if (el == 3) {
161 return UPPER_ATTRS(XN);
162 } else {
163 assert(el == 1);
164 return UPPER_ATTRS(PXN);
165 }
166}
167
Soby Mathew44170c42016-03-22 15:51:08 +0000168void init_xlat_tables(void)
169{
170 unsigned long long max_pa;
171 uintptr_t max_va;
172 print_mmap();
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100173 init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
174 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +0000175
176 assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
177 assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
178 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
179
Soby Mathew44170c42016-03-22 15:51:08 +0000180 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
Soby Mathew44170c42016-03-22 15:51:08 +0000181}
182
183/*******************************************************************************
184 * Macro generating the code for the function enabling the MMU in the given
185 * exception level, assuming that the pagetables have already been created.
186 *
187 * _el: Exception level at which the function will run
188 * _tcr_extra: Extra bits to set in the TCR register. This mask will
189 * be OR'ed with the default TCR value.
190 * _tlbi_fct: Function to invalidate the TLBs at the current
191 * exception level
192 ******************************************************************************/
193#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
194 void enable_mmu_el##_el(unsigned int flags) \
195 { \
196 uint64_t mair, tcr, ttbr; \
197 uint32_t sctlr; \
198 \
199 assert(IS_IN_EL(_el)); \
200 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
201 \
202 /* Set attributes in the right indices of the MAIR */ \
203 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
204 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
205 ATTR_IWBWA_OWBWA_NTR_INDEX); \
206 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
207 ATTR_NON_CACHEABLE_INDEX); \
208 write_mair_el##_el(mair); \
209 \
210 /* Invalidate TLBs at the current exception level */ \
211 _tlbi_fct(); \
212 \
213 /* Set TCR bits as well. */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100214 /* Set T0SZ to (64 - width of virtual address space) */ \
Summer Qindaf5dbb2017-03-16 17:16:34 +0000215 if (flags & XLAT_TABLE_NC) { \
216 /* Inner & outer non-cacheable non-shareable. */\
217 tcr = TCR_SH_NON_SHAREABLE | \
218 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
219 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
220 } else { \
221 /* Inner & outer WBWA & shareable. */ \
222 tcr = TCR_SH_INNER_SHAREABLE | \
223 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
224 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
225 } \
Soby Mathew44170c42016-03-22 15:51:08 +0000226 tcr |= _tcr_extra; \
227 write_tcr_el##_el(tcr); \
228 \
229 /* Set TTBR bits as well */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100230 ttbr = (uint64_t) base_xlation_table; \
Soby Mathew44170c42016-03-22 15:51:08 +0000231 write_ttbr0_el##_el(ttbr); \
232 \
233 /* Ensure all translation table writes have drained */ \
234 /* into memory, the TLB invalidation is complete, */ \
235 /* and translation register writes are committed */ \
236 /* before enabling the MMU */ \
Antonio Nino Diaz3f13c352017-02-24 11:39:22 +0000237 dsbish(); \
Soby Mathew44170c42016-03-22 15:51:08 +0000238 isb(); \
239 \
240 sctlr = read_sctlr_el##_el(); \
241 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
242 \
243 if (flags & DISABLE_DCACHE) \
244 sctlr &= ~SCTLR_C_BIT; \
245 else \
246 sctlr |= SCTLR_C_BIT; \
247 \
248 write_sctlr_el##_el(sctlr); \
249 \
250 /* Ensure the MMU enable takes effect immediately */ \
251 isb(); \
252 }
253
254/* Define EL1 and EL3 variants of the function enabling the MMU */
255DEFINE_ENABLE_MMU_EL(1,
256 (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
257 tlbivmalle1)
258DEFINE_ENABLE_MMU_EL(3,
259 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
260 tlbialle3)