blob: 28ae1f73becff92d6d40b5a1a536823baed75866 [file] [log] [blame]
Soby Mathew44170c42016-03-22 15:51:08 +00001/*
Summer Qindaf5dbb2017-03-16 17:16:34 +00002 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathew44170c42016-03-22 15:51:08 +00003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew44170c42016-03-22 15:51:08 +00005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000010#include <bl_common.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000011#include <common_def.h>
Soby Mathew44170c42016-03-22 15:51:08 +000012#include <platform_def.h>
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000013#include <sys/types.h>
Sandrine Bailleux7659a262016-07-05 09:55:03 +010014#include <utils.h>
Soby Mathew44170c42016-03-22 15:51:08 +000015#include <xlat_tables.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010016#include <xlat_tables_arch.h>
Soby Mathew44170c42016-03-22 15:51:08 +000017#include "../xlat_tables_private.h"
18
Sandrine Bailleux090c8492017-05-19 09:59:37 +010019#define XLAT_TABLE_LEVEL_BASE \
20 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010021
Sandrine Bailleux090c8492017-05-19 09:59:37 +010022#define NUM_BASE_LEVEL_ENTRIES \
23 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010024
25static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
26 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew44170c42016-03-22 15:51:08 +000027
28static unsigned long long tcr_ps_bits;
29
30static unsigned long long calc_physical_addr_size_bits(
31 unsigned long long max_addr)
32{
33 /* Physical address can't exceed 48 bits */
34 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
35
36 /* 48 bits address */
37 if (max_addr & ADDR_MASK_44_TO_47)
38 return TCR_PS_BITS_256TB;
39
40 /* 44 bits address */
41 if (max_addr & ADDR_MASK_42_TO_43)
42 return TCR_PS_BITS_16TB;
43
44 /* 42 bits address */
45 if (max_addr & ADDR_MASK_40_TO_41)
46 return TCR_PS_BITS_4TB;
47
48 /* 40 bits address */
49 if (max_addr & ADDR_MASK_36_TO_39)
50 return TCR_PS_BITS_1TB;
51
52 /* 36 bits address */
53 if (max_addr & ADDR_MASK_32_TO_35)
54 return TCR_PS_BITS_64GB;
55
56 return TCR_PS_BITS_4GB;
57}
58
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000059#if ENABLE_ASSERTIONS
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000060/* Physical Address ranges supported in the AArch64 Memory Model */
61static const unsigned int pa_range_bits_arr[] = {
62 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
63 PARANGE_0101
64};
65
66static unsigned long long get_max_supported_pa(void)
67{
68 u_register_t pa_range = read_id_aa64mmfr0_el1() &
69 ID_AA64MMFR0_EL1_PARANGE_MASK;
70
71 /* All other values are reserved */
72 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
73
74 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
75}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000076#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000077
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010078int xlat_arch_current_el(void)
79{
80 int el = GET_EL(read_CurrentEl());
81
82 assert(el > 0);
83
84 return el;
85}
86
87uint64_t xlat_arch_get_xn_desc(int el)
88{
89 if (el == 3) {
90 return UPPER_ATTRS(XN);
91 } else {
92 assert(el == 1);
93 return UPPER_ATTRS(PXN);
94 }
95}
96
Soby Mathew44170c42016-03-22 15:51:08 +000097void init_xlat_tables(void)
98{
99 unsigned long long max_pa;
100 uintptr_t max_va;
101 print_mmap();
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100102 init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
103 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +0000104
105 assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
106 assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
107 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
108
Soby Mathew44170c42016-03-22 15:51:08 +0000109 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
Soby Mathew44170c42016-03-22 15:51:08 +0000110}
111
112/*******************************************************************************
113 * Macro generating the code for the function enabling the MMU in the given
114 * exception level, assuming that the pagetables have already been created.
115 *
116 * _el: Exception level at which the function will run
117 * _tcr_extra: Extra bits to set in the TCR register. This mask will
118 * be OR'ed with the default TCR value.
119 * _tlbi_fct: Function to invalidate the TLBs at the current
120 * exception level
121 ******************************************************************************/
122#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
123 void enable_mmu_el##_el(unsigned int flags) \
124 { \
125 uint64_t mair, tcr, ttbr; \
126 uint32_t sctlr; \
127 \
128 assert(IS_IN_EL(_el)); \
129 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
130 \
131 /* Set attributes in the right indices of the MAIR */ \
132 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
133 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
134 ATTR_IWBWA_OWBWA_NTR_INDEX); \
135 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
136 ATTR_NON_CACHEABLE_INDEX); \
137 write_mair_el##_el(mair); \
138 \
139 /* Invalidate TLBs at the current exception level */ \
140 _tlbi_fct(); \
141 \
142 /* Set TCR bits as well. */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100143 /* Set T0SZ to (64 - width of virtual address space) */ \
Summer Qindaf5dbb2017-03-16 17:16:34 +0000144 if (flags & XLAT_TABLE_NC) { \
145 /* Inner & outer non-cacheable non-shareable. */\
146 tcr = TCR_SH_NON_SHAREABLE | \
147 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
Sandrine Bailleux12e86442017-07-19 10:11:13 +0100148 (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
Summer Qindaf5dbb2017-03-16 17:16:34 +0000149 } else { \
150 /* Inner & outer WBWA & shareable. */ \
151 tcr = TCR_SH_INNER_SHAREABLE | \
152 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
Sandrine Bailleux12e86442017-07-19 10:11:13 +0100153 (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
Summer Qindaf5dbb2017-03-16 17:16:34 +0000154 } \
Soby Mathew44170c42016-03-22 15:51:08 +0000155 tcr |= _tcr_extra; \
156 write_tcr_el##_el(tcr); \
157 \
158 /* Set TTBR bits as well */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100159 ttbr = (uint64_t) base_xlation_table; \
Soby Mathew44170c42016-03-22 15:51:08 +0000160 write_ttbr0_el##_el(ttbr); \
161 \
162 /* Ensure all translation table writes have drained */ \
163 /* into memory, the TLB invalidation is complete, */ \
164 /* and translation register writes are committed */ \
165 /* before enabling the MMU */ \
Antonio Nino Diaz3f13c352017-02-24 11:39:22 +0000166 dsbish(); \
Soby Mathew44170c42016-03-22 15:51:08 +0000167 isb(); \
168 \
169 sctlr = read_sctlr_el##_el(); \
170 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
171 \
172 if (flags & DISABLE_DCACHE) \
173 sctlr &= ~SCTLR_C_BIT; \
174 else \
175 sctlr |= SCTLR_C_BIT; \
176 \
177 write_sctlr_el##_el(sctlr); \
178 \
179 /* Ensure the MMU enable takes effect immediately */ \
180 isb(); \
181 }
182
183/* Define EL1 and EL3 variants of the function enabling the MMU */
184DEFINE_ENABLE_MMU_EL(1,
Antonio Nino Diazc8274a82017-09-15 10:30:34 +0100185 /*
186 * TCR_EL1.EPD1: Disable translation table walk for addresses
187 * that are translated using TTBR1_EL1.
188 */
189 TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
Soby Mathew44170c42016-03-22 15:51:08 +0000190 tlbivmalle1)
191DEFINE_ENABLE_MMU_EL(3,
192 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
193 tlbialle3)