blob: ecb120220cf4c11f7a71976f9f20a1cf518fd935 [file] [log] [blame]
Soby Mathew44170c42016-03-22 15:51:08 +00001/*
Summer Qindaf5dbb2017-03-16 17:16:34 +00002 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathew44170c42016-03-22 15:51:08 +00003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000034#include <bl_common.h>
Soby Mathew44170c42016-03-22 15:51:08 +000035#include <cassert.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000036#include <common_def.h>
Soby Mathew44170c42016-03-22 15:51:08 +000037#include <platform_def.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000038#include <sys/types.h>
Sandrine Bailleux7659a262016-07-05 09:55:03 +010039#include <utils.h>
Soby Mathew44170c42016-03-22 15:51:08 +000040#include <xlat_tables.h>
41#include "../xlat_tables_private.h"
42
Soby Mathew44170c42016-03-22 15:51:08 +000043/*
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010044 * Each platform can define the size of the virtual address space, which is
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000045 * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the
46 * width of said address space. The value of TCR.TxSZ must be in the range 16
47 * to 39 [1], which means that the virtual address space width must be in the
48 * range 48 to 25 bits.
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010049 *
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000050 * Here we calculate the initial lookup level from the value of
51 * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual
52 * address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2
53 * from 30 to 25. Wider or narrower address spaces are not supported. As a
54 * result, level 3 cannot be used as initial lookup level with 4 KB
55 * granularity. [2]
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010056 *
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000057 * For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
58 * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
59 * D4-11 in the ARM ARM, the initial lookup level for an address space like
60 * that is 1.
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010061 *
62 * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
63 * information:
64 * [1] Page 1730: 'Input address size', 'For all translation stages'.
65 * [2] Section D4.2.5
Soby Mathew44170c42016-03-22 15:51:08 +000066 */
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010067
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000068#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010069
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000070# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010071
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000072#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010073
74# define XLAT_TABLE_LEVEL_BASE 0
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000075# define NUM_BASE_LEVEL_ENTRIES \
76 (PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
Soby Mathew44170c42016-03-22 15:51:08 +000077
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000078#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
Soby Mathew44170c42016-03-22 15:51:08 +000079
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010080# define XLAT_TABLE_LEVEL_BASE 1
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000081# define NUM_BASE_LEVEL_ENTRIES \
82 (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010083
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000084#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010085
86# define XLAT_TABLE_LEVEL_BASE 2
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000087# define NUM_BASE_LEVEL_ENTRIES \
88 (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010089
90#else
91
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000092# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010093
94#endif
95
96static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
97 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew44170c42016-03-22 15:51:08 +000098
99static unsigned long long tcr_ps_bits;
100
101static unsigned long long calc_physical_addr_size_bits(
102 unsigned long long max_addr)
103{
104 /* Physical address can't exceed 48 bits */
105 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
106
107 /* 48 bits address */
108 if (max_addr & ADDR_MASK_44_TO_47)
109 return TCR_PS_BITS_256TB;
110
111 /* 44 bits address */
112 if (max_addr & ADDR_MASK_42_TO_43)
113 return TCR_PS_BITS_16TB;
114
115 /* 42 bits address */
116 if (max_addr & ADDR_MASK_40_TO_41)
117 return TCR_PS_BITS_4TB;
118
119 /* 40 bits address */
120 if (max_addr & ADDR_MASK_36_TO_39)
121 return TCR_PS_BITS_1TB;
122
123 /* 36 bits address */
124 if (max_addr & ADDR_MASK_32_TO_35)
125 return TCR_PS_BITS_64GB;
126
127 return TCR_PS_BITS_4GB;
128}
129
Antonio Nino Diazd1beee22016-12-13 15:28:54 +0000130#if DEBUG
131/* Physical Address ranges supported in the AArch64 Memory Model */
132static const unsigned int pa_range_bits_arr[] = {
133 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
134 PARANGE_0101
135};
136
137static unsigned long long get_max_supported_pa(void)
138{
139 u_register_t pa_range = read_id_aa64mmfr0_el1() &
140 ID_AA64MMFR0_EL1_PARANGE_MASK;
141
142 /* All other values are reserved */
143 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
144
145 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
146}
147#endif
148
Soby Mathew44170c42016-03-22 15:51:08 +0000149void init_xlat_tables(void)
150{
151 unsigned long long max_pa;
152 uintptr_t max_va;
153 print_mmap();
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100154 init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
155 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +0000156
157 assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
158 assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
159 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
160
Soby Mathew44170c42016-03-22 15:51:08 +0000161 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
Soby Mathew44170c42016-03-22 15:51:08 +0000162}
163
164/*******************************************************************************
165 * Macro generating the code for the function enabling the MMU in the given
166 * exception level, assuming that the pagetables have already been created.
167 *
168 * _el: Exception level at which the function will run
169 * _tcr_extra: Extra bits to set in the TCR register. This mask will
170 * be OR'ed with the default TCR value.
171 * _tlbi_fct: Function to invalidate the TLBs at the current
172 * exception level
173 ******************************************************************************/
174#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
175 void enable_mmu_el##_el(unsigned int flags) \
176 { \
177 uint64_t mair, tcr, ttbr; \
178 uint32_t sctlr; \
179 \
180 assert(IS_IN_EL(_el)); \
181 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
182 \
183 /* Set attributes in the right indices of the MAIR */ \
184 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
185 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
186 ATTR_IWBWA_OWBWA_NTR_INDEX); \
187 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
188 ATTR_NON_CACHEABLE_INDEX); \
189 write_mair_el##_el(mair); \
190 \
191 /* Invalidate TLBs at the current exception level */ \
192 _tlbi_fct(); \
193 \
194 /* Set TCR bits as well. */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100195 /* Set T0SZ to (64 - width of virtual address space) */ \
Summer Qindaf5dbb2017-03-16 17:16:34 +0000196 if (flags & XLAT_TABLE_NC) { \
197 /* Inner & outer non-cacheable non-shareable. */\
198 tcr = TCR_SH_NON_SHAREABLE | \
199 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
200 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
201 } else { \
202 /* Inner & outer WBWA & shareable. */ \
203 tcr = TCR_SH_INNER_SHAREABLE | \
204 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
205 (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
206 } \
Soby Mathew44170c42016-03-22 15:51:08 +0000207 tcr |= _tcr_extra; \
208 write_tcr_el##_el(tcr); \
209 \
210 /* Set TTBR bits as well */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100211 ttbr = (uint64_t) base_xlation_table; \
Soby Mathew44170c42016-03-22 15:51:08 +0000212 write_ttbr0_el##_el(ttbr); \
213 \
214 /* Ensure all translation table writes have drained */ \
215 /* into memory, the TLB invalidation is complete, */ \
216 /* and translation register writes are committed */ \
217 /* before enabling the MMU */ \
Antonio Nino Diaz3f13c352017-02-24 11:39:22 +0000218 dsbish(); \
Soby Mathew44170c42016-03-22 15:51:08 +0000219 isb(); \
220 \
221 sctlr = read_sctlr_el##_el(); \
222 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
223 \
224 if (flags & DISABLE_DCACHE) \
225 sctlr &= ~SCTLR_C_BIT; \
226 else \
227 sctlr |= SCTLR_C_BIT; \
228 \
229 write_sctlr_el##_el(sctlr); \
230 \
231 /* Ensure the MMU enable takes effect immediately */ \
232 isb(); \
233 }
234
235/* Define EL1 and EL3 variants of the function enabling the MMU */
236DEFINE_ENABLE_MMU_EL(1,
237 (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
238 tlbivmalle1)
239DEFINE_ENABLE_MMU_EL(3,
240 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
241 tlbialle3)