blob: 5b639b7af122c3cfab2c1507897519e406e5254b [file] [log] [blame]
Soby Mathew44170c42016-03-22 15:51:08 +00001/*
2 * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <cassert.h>
35#include <platform_def.h>
Sandrine Bailleux7659a262016-07-05 09:55:03 +010036#include <utils.h>
Soby Mathew44170c42016-03-22 15:51:08 +000037#include <xlat_tables.h>
38#include "../xlat_tables_private.h"
39
Soby Mathew44170c42016-03-22 15:51:08 +000040/*
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010041 * Each platform can define the size of the virtual address space, which is
42 * defined in ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the width of
43 * said address space. The value of TCR.TxSZ must be in the range 16 to 39 [1],
44 * which means that the virtual address space width must be in the range 48 to
45 * 25 bits.
46 *
47 * Here we calculate the initial lookup level from the value of ADDR_SPACE_SIZE.
48 * For a 4 KB page size, level 0 supports virtual address spaces of widths 48 to
49 * 40 bits, level 1 from 39 to 31, and level 2 from 30 to 25. Wider or narrower
50 * address spaces are not supported. As a result, level 3 cannot be used as
51 * initial lookup level with 4 KB granularity. [2]
52 *
53 * For example, for a 35-bit address space (i.e. ADDR_SPACE_SIZE == 1 << 35),
54 * TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table D4-11 in
55 * the ARM ARM, the initial lookup level for such an address space is 1.
56 *
57 * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
58 * information:
59 * [1] Page 1730: 'Input address size', 'For all translation stages'.
60 * [2] Section D4.2.5
Soby Mathew44170c42016-03-22 15:51:08 +000061 */
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010062
63#if ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
64
65# error "ADDR_SPACE_SIZE is too big."
66
67#elif ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
68
69# define XLAT_TABLE_LEVEL_BASE 0
70# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
Soby Mathew44170c42016-03-22 15:51:08 +000071
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010072#elif ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
Soby Mathew44170c42016-03-22 15:51:08 +000073
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010074# define XLAT_TABLE_LEVEL_BASE 1
75# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
76
77#elif ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
78
79# define XLAT_TABLE_LEVEL_BASE 2
80# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
81
82#else
83
84# error "ADDR_SPACE_SIZE is too small."
85
86#endif
87
88static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
89 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew44170c42016-03-22 15:51:08 +000090
91static unsigned long long tcr_ps_bits;
92
93static unsigned long long calc_physical_addr_size_bits(
94 unsigned long long max_addr)
95{
96 /* Physical address can't exceed 48 bits */
97 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
98
99 /* 48 bits address */
100 if (max_addr & ADDR_MASK_44_TO_47)
101 return TCR_PS_BITS_256TB;
102
103 /* 44 bits address */
104 if (max_addr & ADDR_MASK_42_TO_43)
105 return TCR_PS_BITS_16TB;
106
107 /* 42 bits address */
108 if (max_addr & ADDR_MASK_40_TO_41)
109 return TCR_PS_BITS_4TB;
110
111 /* 40 bits address */
112 if (max_addr & ADDR_MASK_36_TO_39)
113 return TCR_PS_BITS_1TB;
114
115 /* 36 bits address */
116 if (max_addr & ADDR_MASK_32_TO_35)
117 return TCR_PS_BITS_64GB;
118
119 return TCR_PS_BITS_4GB;
120}
121
122void init_xlat_tables(void)
123{
124 unsigned long long max_pa;
125 uintptr_t max_va;
126 print_mmap();
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100127 init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
128 &max_va, &max_pa);
Soby Mathew44170c42016-03-22 15:51:08 +0000129 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
130 assert(max_va < ADDR_SPACE_SIZE);
131}
132
133/*******************************************************************************
134 * Macro generating the code for the function enabling the MMU in the given
135 * exception level, assuming that the pagetables have already been created.
136 *
137 * _el: Exception level at which the function will run
138 * _tcr_extra: Extra bits to set in the TCR register. This mask will
139 * be OR'ed with the default TCR value.
140 * _tlbi_fct: Function to invalidate the TLBs at the current
141 * exception level
142 ******************************************************************************/
143#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
144 void enable_mmu_el##_el(unsigned int flags) \
145 { \
146 uint64_t mair, tcr, ttbr; \
147 uint32_t sctlr; \
148 \
149 assert(IS_IN_EL(_el)); \
150 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
151 \
152 /* Set attributes in the right indices of the MAIR */ \
153 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
154 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
155 ATTR_IWBWA_OWBWA_NTR_INDEX); \
156 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
157 ATTR_NON_CACHEABLE_INDEX); \
158 write_mair_el##_el(mair); \
159 \
160 /* Invalidate TLBs at the current exception level */ \
161 _tlbi_fct(); \
162 \
163 /* Set TCR bits as well. */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100164 /* Inner & outer WBWA & shareable. */ \
165 /* Set T0SZ to (64 - width of virtual address space) */ \
Soby Mathew44170c42016-03-22 15:51:08 +0000166 tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
167 TCR_RGN_INNER_WBA | \
168 (64 - __builtin_ctzl(ADDR_SPACE_SIZE)); \
169 tcr |= _tcr_extra; \
170 write_tcr_el##_el(tcr); \
171 \
172 /* Set TTBR bits as well */ \
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100173 ttbr = (uint64_t) base_xlation_table; \
Soby Mathew44170c42016-03-22 15:51:08 +0000174 write_ttbr0_el##_el(ttbr); \
175 \
176 /* Ensure all translation table writes have drained */ \
177 /* into memory, the TLB invalidation is complete, */ \
178 /* and translation register writes are committed */ \
179 /* before enabling the MMU */ \
180 dsb(); \
181 isb(); \
182 \
183 sctlr = read_sctlr_el##_el(); \
184 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
185 \
186 if (flags & DISABLE_DCACHE) \
187 sctlr &= ~SCTLR_C_BIT; \
188 else \
189 sctlr |= SCTLR_C_BIT; \
190 \
191 write_sctlr_el##_el(sctlr); \
192 \
193 /* Ensure the MMU enable takes effect immediately */ \
194 isb(); \
195 }
196
197/* Define EL1 and EL3 variants of the function enabling the MMU */
198DEFINE_ENABLE_MMU_EL(1,
199 (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
200 tlbivmalle1)
201DEFINE_ENABLE_MMU_EL(3,
202 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
203 tlbialle3)