blob: a72c6454c6ac5731b3324f92b6afeb42a453d89c [file] [log] [blame]
Soby Mathew44170c42016-03-22 15:51:08 +00001/*
Antonio Nino Diazb9ae5db2018-05-02 11:23:56 +01002 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathew44170c42016-03-22 15:51:08 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew44170c42016-03-22 15:51:08 +00005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000010#include <bl_common.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000011#include <common_def.h>
Soby Mathew44170c42016-03-22 15:51:08 +000012#include <platform_def.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000013#include <sys/types.h>
Sandrine Bailleux7659a262016-07-05 09:55:03 +010014#include <utils.h>
Soby Mathew44170c42016-03-22 15:51:08 +000015#include <xlat_tables.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010016#include <xlat_tables_arch.h>
Soby Mathew44170c42016-03-22 15:51:08 +000017#include "../xlat_tables_private.h"
18
Sandrine Bailleux090c8492017-05-19 09:59:37 +010019#define XLAT_TABLE_LEVEL_BASE \
20 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010021
Sandrine Bailleux090c8492017-05-19 09:59:37 +010022#define NUM_BASE_LEVEL_ENTRIES \
23 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010024
25static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
26 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew44170c42016-03-22 15:51:08 +000027
28static unsigned long long tcr_ps_bits;
29
30static unsigned long long calc_physical_addr_size_bits(
31 unsigned long long max_addr)
32{
33 /* Physical address can't exceed 48 bits */
34 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
35
36 /* 48 bits address */
37 if (max_addr & ADDR_MASK_44_TO_47)
38 return TCR_PS_BITS_256TB;
39
40 /* 44 bits address */
41 if (max_addr & ADDR_MASK_42_TO_43)
42 return TCR_PS_BITS_16TB;
43
44 /* 42 bits address */
45 if (max_addr & ADDR_MASK_40_TO_41)
46 return TCR_PS_BITS_4TB;
47
48 /* 40 bits address */
49 if (max_addr & ADDR_MASK_36_TO_39)
50 return TCR_PS_BITS_1TB;
51
52 /* 36 bits address */
53 if (max_addr & ADDR_MASK_32_TO_35)
54 return TCR_PS_BITS_64GB;
55
56 return TCR_PS_BITS_4GB;
57}
58
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000059#if ENABLE_ASSERTIONS
Antonio Nino Diazb9ae5db2018-05-02 11:23:56 +010060/*
61 * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
62 * supported in ARMv8.2 onwards.
63 */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000064static const unsigned int pa_range_bits_arr[] = {
65 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
Antonio Nino Diazb9ae5db2018-05-02 11:23:56 +010066 PARANGE_0101, PARANGE_0110
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000067};
68
69static unsigned long long get_max_supported_pa(void)
70{
71 u_register_t pa_range = read_id_aa64mmfr0_el1() &
72 ID_AA64MMFR0_EL1_PARANGE_MASK;
73
74 /* All other values are reserved */
75 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
76
77 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
78}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000079#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000080
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010081int xlat_arch_current_el(void)
82{
83 int el = GET_EL(read_CurrentEl());
84
85 assert(el > 0);
86
87 return el;
88}
89
90uint64_t xlat_arch_get_xn_desc(int el)
91{
92 if (el == 3) {
93 return UPPER_ATTRS(XN);
94 } else {
95 assert(el == 1);
96 return UPPER_ATTRS(PXN);
97 }
98}
99
Soby Mathew44170c42016-03-22 15:51:08 +0000100void init_xlat_tables(void)
101{
102 unsigned long long max_pa;
103 uintptr_t max_va;
104 print_mmap();
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100105 init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
106 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +0000107
108 assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
109 assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
110 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
111
Soby Mathew44170c42016-03-22 15:51:08 +0000112 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
Soby Mathew44170c42016-03-22 15:51:08 +0000113}
114
115/*******************************************************************************
116 * Macro generating the code for the function enabling the MMU in the given
117 * exception level, assuming that the pagetables have already been created.
118 *
119 * _el: Exception level at which the function will run
120 * _tcr_extra: Extra bits to set in the TCR register. This mask will
121 * be OR'ed with the default TCR value.
122 * _tlbi_fct: Function to invalidate the TLBs at the current
123 * exception level
124 ******************************************************************************/
125#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
126 void enable_mmu_el##_el(unsigned int flags) \
127 { \
128 uint64_t mair, tcr, ttbr; \
129 uint32_t sctlr; \
130 \
131 assert(IS_IN_EL(_el)); \
132 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
133 \
134 /* Set attributes in the right indices of the MAIR */ \
135 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
136 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
137 ATTR_IWBWA_OWBWA_NTR_INDEX); \
138 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
139 ATTR_NON_CACHEABLE_INDEX); \
140 write_mair_el##_el(mair); \
141 \
142 /* Invalidate TLBs at the current exception level */ \
143 _tlbi_fct(); \
144 \
145 /* Set TCR bits as well. */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100146 /* Set T0SZ to (64 - width of virtual address space) */ \
Summer Qindaf5dbb2017-03-16 17:16:34 +0000147 if (flags & XLAT_TABLE_NC) { \
148 /* Inner & outer non-cacheable non-shareable. */\
149 tcr = TCR_SH_NON_SHAREABLE | \
150 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
Sandrine Bailleux12e86442017-07-19 10:11:13 +0100151 (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
Summer Qindaf5dbb2017-03-16 17:16:34 +0000152 } else { \
153 /* Inner & outer WBWA & shareable. */ \
154 tcr = TCR_SH_INNER_SHAREABLE | \
155 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
Sandrine Bailleux12e86442017-07-19 10:11:13 +0100156 (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
Summer Qindaf5dbb2017-03-16 17:16:34 +0000157 } \
Soby Mathew44170c42016-03-22 15:51:08 +0000158 tcr |= _tcr_extra; \
159 write_tcr_el##_el(tcr); \
160 \
161 /* Set TTBR bits as well */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100162 ttbr = (uint64_t) base_xlation_table; \
Soby Mathew44170c42016-03-22 15:51:08 +0000163 write_ttbr0_el##_el(ttbr); \
164 \
165 /* Ensure all translation table writes have drained */ \
166 /* into memory, the TLB invalidation is complete, */ \
167 /* and translation register writes are committed */ \
168 /* before enabling the MMU */ \
Antonio Nino Diaz3f13c352017-02-24 11:39:22 +0000169 dsbish(); \
Soby Mathew44170c42016-03-22 15:51:08 +0000170 isb(); \
171 \
172 sctlr = read_sctlr_el##_el(); \
173 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
174 \
175 if (flags & DISABLE_DCACHE) \
176 sctlr &= ~SCTLR_C_BIT; \
177 else \
178 sctlr |= SCTLR_C_BIT; \
179 \
180 write_sctlr_el##_el(sctlr); \
181 \
182 /* Ensure the MMU enable takes effect immediately */ \
183 isb(); \
184 }
185
186/* Define EL1 and EL3 variants of the function enabling the MMU */
187DEFINE_ENABLE_MMU_EL(1,
Antonio Nino Diazc8274a82017-09-15 10:30:34 +0100188 /*
189 * TCR_EL1.EPD1: Disable translation table walk for addresses
190 * that are translated using TTBR1_EL1.
191 */
192 TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
Soby Mathew44170c42016-03-22 15:51:08 +0000193 tlbivmalle1)
194DEFINE_ENABLE_MMU_EL(3,
195 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
196 tlbialle3)