blob: e64fd3ef73218cbfd57000365dace96116f100b8 [file] [log] [blame]
Soby Mathew44170c42016-03-22 15:51:08 +00001/*
Antonio Nino Diazb9ae5db2018-05-02 11:23:56 +01002 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathew44170c42016-03-22 15:51:08 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew44170c42016-03-22 15:51:08 +00005 */
6
Soby Mathew44170c42016-03-22 15:51:08 +00007#include <assert.h>
Antonio Nino Diaz4b32e622018-08-16 16:52:57 +01008#include <stdint.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009
10#include <platform_def.h>
11
12#include <arch.h>
Sathees Balya74155972019-01-25 11:36:01 +000013#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000014#include <common/bl_common.h>
15#include <lib/utils.h>
16#include <lib/xlat_tables/xlat_tables.h>
17#include <lib/xlat_tables/xlat_tables_arch.h>
18#include <plat/common/common_def.h>
19
Soby Mathew44170c42016-03-22 15:51:08 +000020#include "../xlat_tables_private.h"
21
Sandrine Bailleux090c8492017-05-19 09:59:37 +010022#define XLAT_TABLE_LEVEL_BASE \
23 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010024
Sandrine Bailleux090c8492017-05-19 09:59:37 +010025#define NUM_BASE_LEVEL_ENTRIES \
26 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010027
28static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
29 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew44170c42016-03-22 15:51:08 +000030
31static unsigned long long tcr_ps_bits;
32
33static unsigned long long calc_physical_addr_size_bits(
34 unsigned long long max_addr)
35{
36 /* Physical address can't exceed 48 bits */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010037 assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
Soby Mathew44170c42016-03-22 15:51:08 +000038
39 /* 48 bits address */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010040 if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
Soby Mathew44170c42016-03-22 15:51:08 +000041 return TCR_PS_BITS_256TB;
42
43 /* 44 bits address */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010044 if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
Soby Mathew44170c42016-03-22 15:51:08 +000045 return TCR_PS_BITS_16TB;
46
47 /* 42 bits address */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010048 if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
Soby Mathew44170c42016-03-22 15:51:08 +000049 return TCR_PS_BITS_4TB;
50
51 /* 40 bits address */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010052 if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
Soby Mathew44170c42016-03-22 15:51:08 +000053 return TCR_PS_BITS_1TB;
54
55 /* 36 bits address */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010056 if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
Soby Mathew44170c42016-03-22 15:51:08 +000057 return TCR_PS_BITS_64GB;
58
59 return TCR_PS_BITS_4GB;
60}
61
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000062#if ENABLE_ASSERTIONS
Antonio Nino Diazb9ae5db2018-05-02 11:23:56 +010063/*
64 * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
65 * supported in ARMv8.2 onwards.
66 */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000067static const unsigned int pa_range_bits_arr[] = {
68 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
Antonio Nino Diazb9ae5db2018-05-02 11:23:56 +010069 PARANGE_0101, PARANGE_0110
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000070};
71
72static unsigned long long get_max_supported_pa(void)
73{
74 u_register_t pa_range = read_id_aa64mmfr0_el1() &
75 ID_AA64MMFR0_EL1_PARANGE_MASK;
76
77 /* All other values are reserved */
78 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
79
80 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
81}
Sathees Balya74155972019-01-25 11:36:01 +000082
83/*
84 * Return minimum virtual address space size supported by the architecture
85 */
86static uintptr_t xlat_get_min_virt_addr_space_size(void)
87{
88 uintptr_t ret;
89
90 if (is_armv8_4_ttst_present())
91 ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
92 else
93 ret = MIN_VIRT_ADDR_SPACE_SIZE;
94
95 return ret;
96}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000097#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000098
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010099unsigned int xlat_arch_current_el(void)
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100100{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100101 unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100102
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100103 assert(el > 0U);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100104
105 return el;
106}
107
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100108uint64_t xlat_arch_get_xn_desc(unsigned int el)
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100109{
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100110 if (el == 3U) {
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100111 return UPPER_ATTRS(XN);
112 } else {
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100113 assert(el == 1U);
Antonio Nino Diazefabaa92017-04-27 13:30:22 +0100114 return UPPER_ATTRS(PXN);
115 }
116}
117
Soby Mathew44170c42016-03-22 15:51:08 +0000118void init_xlat_tables(void)
119{
120 unsigned long long max_pa;
121 uintptr_t max_va;
Sathees Balya74155972019-01-25 11:36:01 +0000122
123 assert(PLAT_VIRT_ADDR_SPACE_SIZE >=
124 (xlat_get_min_virt_addr_space_size() - 1U));
125 assert(PLAT_VIRT_ADDR_SPACE_SIZE <= MAX_VIRT_ADDR_SPACE_SIZE);
126 assert(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE));
127
Soby Mathew44170c42016-03-22 15:51:08 +0000128 print_mmap();
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100129 init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100130 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +0000131
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100132 assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
133 assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
134 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
Antonio Nino Diazd1beee22016-12-13 15:28:54 +0000135
Soby Mathew44170c42016-03-22 15:51:08 +0000136 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
Soby Mathew44170c42016-03-22 15:51:08 +0000137}
138
139/*******************************************************************************
140 * Macro generating the code for the function enabling the MMU in the given
141 * exception level, assuming that the pagetables have already been created.
142 *
143 * _el: Exception level at which the function will run
144 * _tcr_extra: Extra bits to set in the TCR register. This mask will
145 * be OR'ed with the default TCR value.
146 * _tlbi_fct: Function to invalidate the TLBs at the current
147 * exception level
148 ******************************************************************************/
149#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
150 void enable_mmu_el##_el(unsigned int flags) \
151 { \
152 uint64_t mair, tcr, ttbr; \
153 uint32_t sctlr; \
154 \
155 assert(IS_IN_EL(_el)); \
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100156 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0U); \
Soby Mathew44170c42016-03-22 15:51:08 +0000157 \
158 /* Set attributes in the right indices of the MAIR */ \
159 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
160 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
161 ATTR_IWBWA_OWBWA_NTR_INDEX); \
162 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
163 ATTR_NON_CACHEABLE_INDEX); \
164 write_mair_el##_el(mair); \
165 \
166 /* Invalidate TLBs at the current exception level */ \
167 _tlbi_fct(); \
168 \
169 /* Set TCR bits as well. */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100170 /* Set T0SZ to (64 - width of virtual address space) */ \
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100171 int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\
172 \
173 if ((flags & XLAT_TABLE_NC) != 0U) { \
Summer Qindaf5dbb2017-03-16 17:16:34 +0000174 /* Inner & outer non-cacheable non-shareable. */\
175 tcr = TCR_SH_NON_SHAREABLE | \
176 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100177 (uint64_t) t0sz; \
Summer Qindaf5dbb2017-03-16 17:16:34 +0000178 } else { \
179 /* Inner & outer WBWA & shareable. */ \
180 tcr = TCR_SH_INNER_SHAREABLE | \
181 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100182 (uint64_t) t0sz; \
Summer Qindaf5dbb2017-03-16 17:16:34 +0000183 } \
Soby Mathew44170c42016-03-22 15:51:08 +0000184 tcr |= _tcr_extra; \
185 write_tcr_el##_el(tcr); \
186 \
187 /* Set TTBR bits as well */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100188 ttbr = (uint64_t) base_xlation_table; \
Soby Mathew44170c42016-03-22 15:51:08 +0000189 write_ttbr0_el##_el(ttbr); \
190 \
191 /* Ensure all translation table writes have drained */ \
192 /* into memory, the TLB invalidation is complete, */ \
193 /* and translation register writes are committed */ \
194 /* before enabling the MMU */ \
Antonio Nino Diaz3f13c352017-02-24 11:39:22 +0000195 dsbish(); \
Soby Mathew44170c42016-03-22 15:51:08 +0000196 isb(); \
197 \
198 sctlr = read_sctlr_el##_el(); \
199 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
200 \
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100201 if ((flags & DISABLE_DCACHE) != 0U) \
Soby Mathew44170c42016-03-22 15:51:08 +0000202 sctlr &= ~SCTLR_C_BIT; \
203 else \
204 sctlr |= SCTLR_C_BIT; \
205 \
206 write_sctlr_el##_el(sctlr); \
207 \
208 /* Ensure the MMU enable takes effect immediately */ \
209 isb(); \
Jeenu Viswambharan9f142612018-04-27 15:06:57 +0100210 } \
211 \
212 void enable_mmu_direct_el##_el(unsigned int flags) \
213 { \
214 enable_mmu_el##_el(flags); \
Soby Mathew44170c42016-03-22 15:51:08 +0000215 }
216
217/* Define EL1 and EL3 variants of the function enabling the MMU */
218DEFINE_ENABLE_MMU_EL(1,
Antonio Nino Diazc8274a82017-09-15 10:30:34 +0100219 /*
220 * TCR_EL1.EPD1: Disable translation table walk for addresses
221 * that are translated using TTBR1_EL1.
222 */
223 TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
Soby Mathew44170c42016-03-22 15:51:08 +0000224 tlbivmalle1)
225DEFINE_ENABLE_MMU_EL(3,
226 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
227 tlbialle3)