blob: d494112976fef77dda024eb52467fe180981234d [file] [log] [blame]
Jon Medhurstbb1fe202014-01-24 15:41:33 +00001/*
2 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handleyb226a4d2014-05-16 14:08:45 +010031#include <arch.h>
32#include <arch_helpers.h>
Jon Medhurstbb1fe202014-01-24 15:41:33 +000033#include <assert.h>
Lin Ma741a3822014-06-27 16:56:30 -070034#include <cassert.h>
Dan Handleyed6ff952014-05-14 17:44:19 +010035#include <platform_def.h>
Jon Medhurstbb1fe202014-01-24 15:41:33 +000036#include <string.h>
37#include <xlat_tables.h>
38
39
40#ifndef DEBUG_XLAT_TABLE
41#define DEBUG_XLAT_TABLE 0
42#endif
43
44#if DEBUG_XLAT_TABLE
45#define debug_print(...) printf(__VA_ARGS__)
46#else
47#define debug_print(...) ((void)0)
48#endif
49
Lin Ma741a3822014-06-27 16:56:30 -070050CASSERT(ADDR_SPACE_SIZE > 0, assert_valid_addr_space_size);
Jon Medhurstbb1fe202014-01-24 15:41:33 +000051
52#define UNSET_DESC ~0ul
53
54#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
55
Dan Handleyb226a4d2014-05-16 14:08:45 +010056static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
Jon Medhurstbb1fe202014-01-24 15:41:33 +000057__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
58
59static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
60__aligned(XLAT_TABLE_SIZE) __attribute__((section("xlat_table")));
61
62static unsigned next_xlat;
Lin Ma741a3822014-06-27 16:56:30 -070063static unsigned long max_pa;
64static unsigned long max_va;
65static unsigned long tcr_ps_bits;
Jon Medhurstbb1fe202014-01-24 15:41:33 +000066
67/*
68 * Array of all memory regions stored in order of ascending base address.
69 * The list is terminated by the first entry with size == 0.
70 */
Dan Handleye2712bc2014-04-10 15:37:22 +010071static mmap_region_t mmap[MAX_MMAP_REGIONS + 1];
Jon Medhurstbb1fe202014-01-24 15:41:33 +000072
73
74static void print_mmap(void)
75{
76#if DEBUG_XLAT_TABLE
77 debug_print("mmap:\n");
Dan Handleye2712bc2014-04-10 15:37:22 +010078 mmap_region_t *mm = mmap;
Jon Medhurstbb1fe202014-01-24 15:41:33 +000079 while (mm->size) {
Lin Ma13592362014-06-02 11:45:36 -070080 debug_print(" %010lx %010lx %10lx %x\n", mm->base_va,
81 mm->base_pa, mm->size, mm->attr);
Jon Medhurstbb1fe202014-01-24 15:41:33 +000082 ++mm;
83 };
84 debug_print("\n");
85#endif
86}
87
Lin Ma13592362014-06-02 11:45:36 -070088void mmap_add_region(unsigned long base_pa, unsigned long base_va,
89 unsigned long size, unsigned attr)
Jon Medhurstbb1fe202014-01-24 15:41:33 +000090{
Dan Handleye2712bc2014-04-10 15:37:22 +010091 mmap_region_t *mm = mmap;
92 mmap_region_t *mm_last = mm + sizeof(mmap) / sizeof(mmap[0]) - 1;
Lin Ma741a3822014-06-27 16:56:30 -070093 unsigned long pa_end = base_pa + size - 1;
94 unsigned long va_end = base_va + size - 1;
Jon Medhurstbb1fe202014-01-24 15:41:33 +000095
Lin Ma13592362014-06-02 11:45:36 -070096 assert(IS_PAGE_ALIGNED(base_pa));
97 assert(IS_PAGE_ALIGNED(base_va));
Jon Medhurstbb1fe202014-01-24 15:41:33 +000098 assert(IS_PAGE_ALIGNED(size));
99
100 if (!size)
101 return;
102
103 /* Find correct place in mmap to insert new region */
Lin Ma13592362014-06-02 11:45:36 -0700104 while (mm->base_va < base_va && mm->size)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000105 ++mm;
106
107 /* Make room for new region by moving other regions up by one place */
108 memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
109
110 /* Check we haven't lost the empty sentinal from the end of the array */
111 assert(mm_last->size == 0);
112
Lin Ma13592362014-06-02 11:45:36 -0700113 mm->base_pa = base_pa;
114 mm->base_va = base_va;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000115 mm->size = size;
116 mm->attr = attr;
Lin Ma741a3822014-06-27 16:56:30 -0700117
118 if (pa_end > max_pa)
119 max_pa = pa_end;
120 if (va_end > max_va)
121 max_va = va_end;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000122}
123
Dan Handleye2712bc2014-04-10 15:37:22 +0100124void mmap_add(const mmap_region_t *mm)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000125{
126 while (mm->size) {
Lin Ma13592362014-06-02 11:45:36 -0700127 mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000128 ++mm;
129 }
130}
131
Lin Ma13592362014-06-02 11:45:36 -0700132static unsigned long mmap_desc(unsigned attr, unsigned long addr_pa,
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000133 unsigned level)
134{
Lin Ma13592362014-06-02 11:45:36 -0700135 unsigned long desc = addr_pa;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000136
137 desc |= level == 3 ? TABLE_DESC : BLOCK_DESC;
138
139 desc |= attr & MT_NS ? LOWER_ATTRS(NS) : 0;
140
141 desc |= attr & MT_RW ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
142
143 desc |= LOWER_ATTRS(ACCESS_FLAG);
144
145 if (attr & MT_MEMORY) {
146 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
147 if (attr & MT_RW)
148 desc |= UPPER_ATTRS(XN);
149 } else {
150 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
151 desc |= UPPER_ATTRS(XN);
152 }
153
154 debug_print(attr & MT_MEMORY ? "MEM" : "DEV");
155 debug_print(attr & MT_RW ? "-RW" : "-RO");
156 debug_print(attr & MT_NS ? "-NS" : "-S");
157
158 return desc;
159}
160
Lin Ma13592362014-06-02 11:45:36 -0700161static int mmap_region_attr(mmap_region_t *mm, unsigned long base_va,
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000162 unsigned long size)
163{
164 int attr = mm->attr;
165
166 for (;;) {
167 ++mm;
168
169 if (!mm->size)
170 return attr; /* Reached end of list */
171
Lin Ma13592362014-06-02 11:45:36 -0700172 if (mm->base_va >= base_va + size)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000173 return attr; /* Next region is after area so end */
174
Lin Ma13592362014-06-02 11:45:36 -0700175 if (mm->base_va + mm->size <= base_va)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000176 continue; /* Next region has already been overtaken */
177
178 if ((mm->attr & attr) == attr)
179 continue; /* Region doesn't override attribs so skip */
180
181 attr &= mm->attr;
182
Lin Ma13592362014-06-02 11:45:36 -0700183 if (mm->base_va > base_va ||
184 mm->base_va + mm->size < base_va + size)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000185 return -1; /* Region doesn't fully cover our area */
186 }
187}
188
Lin Ma13592362014-06-02 11:45:36 -0700189static mmap_region_t *init_xlation_table(mmap_region_t *mm,
190 unsigned long base_va,
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000191 unsigned long *table, unsigned level)
192{
193 unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
194 XLAT_TABLE_ENTRIES_SHIFT;
195 unsigned level_size = 1 << level_size_shift;
Lin Ma0b9d59f2014-05-20 11:25:55 -0700196 unsigned long level_index_mask = XLAT_TABLE_ENTRIES_MASK << level_size_shift;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000197
198 assert(level <= 3);
199
200 debug_print("New xlat table:\n");
201
202 do {
203 unsigned long desc = UNSET_DESC;
204
Lin Ma13592362014-06-02 11:45:36 -0700205 if (mm->base_va + mm->size <= base_va) {
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000206 /* Area now after the region so skip it */
207 ++mm;
208 continue;
209 }
210
Lin Ma13592362014-06-02 11:45:36 -0700211 debug_print(" %010lx %8lx " + 6 - 2 * level, base_va,
212 level_size);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000213
Lin Ma13592362014-06-02 11:45:36 -0700214 if (mm->base_va >= base_va + level_size) {
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000215 /* Next region is after area so nothing to map yet */
216 desc = INVALID_DESC;
Lin Ma13592362014-06-02 11:45:36 -0700217 } else if (mm->base_va <= base_va && mm->base_va + mm->size >=
218 base_va + level_size) {
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000219 /* Next region covers all of area */
Lin Ma13592362014-06-02 11:45:36 -0700220 int attr = mmap_region_attr(mm, base_va, level_size);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000221 if (attr >= 0)
Lin Ma13592362014-06-02 11:45:36 -0700222 desc = mmap_desc(attr,
223 base_va - mm->base_va + mm->base_pa,
224 level);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000225 }
226 /* else Next region only partially covers area, so need */
227
228 if (desc == UNSET_DESC) {
229 /* Area not covered by a region so need finer table */
230 unsigned long *new_table = xlat_tables[next_xlat++];
231 assert(next_xlat <= MAX_XLAT_TABLES);
232 desc = TABLE_DESC | (unsigned long)new_table;
233
234 /* Recurse to fill in new table */
Lin Ma13592362014-06-02 11:45:36 -0700235 mm = init_xlation_table(mm, base_va,
236 new_table, level+1);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000237 }
238
239 debug_print("\n");
240
241 *table++ = desc;
Lin Ma13592362014-06-02 11:45:36 -0700242 base_va += level_size;
243 } while (mm->size && (base_va & level_index_mask));
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000244
245 return mm;
246}
247
Lin Ma741a3822014-06-27 16:56:30 -0700248static unsigned int calc_physical_addr_size_bits(unsigned long max_addr)
249{
250 /* Physical address can't exceed 48 bits */
251 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
252
253 /* 48 bits address */
254 if (max_addr & ADDR_MASK_44_TO_47)
255 return TCR_PS_BITS_256TB;
256
257 /* 44 bits address */
258 if (max_addr & ADDR_MASK_42_TO_43)
259 return TCR_PS_BITS_16TB;
260
261 /* 42 bits address */
262 if (max_addr & ADDR_MASK_40_TO_41)
263 return TCR_PS_BITS_4TB;
264
265 /* 40 bits address */
266 if (max_addr & ADDR_MASK_36_TO_39)
267 return TCR_PS_BITS_1TB;
268
269 /* 36 bits address */
270 if (max_addr & ADDR_MASK_32_TO_35)
271 return TCR_PS_BITS_64GB;
272
273 return TCR_PS_BITS_4GB;
274}
275
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000276void init_xlat_tables(void)
277{
278 print_mmap();
279 init_xlation_table(mmap, 0, l1_xlation_table, 1);
Lin Ma741a3822014-06-27 16:56:30 -0700280 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
281 assert(max_va < ADDR_SPACE_SIZE);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000282}
Dan Handleyb226a4d2014-05-16 14:08:45 +0100283
284/*******************************************************************************
285 * Macro generating the code for the function enabling the MMU in the given
286 * exception level, assuming that the pagetables have already been created.
287 *
288 * _el: Exception level at which the function will run
289 * _tcr_extra: Extra bits to set in the TCR register. This mask will
290 * be OR'ed with the default TCR value.
291 * _tlbi_fct: Function to invalidate the TLBs at the current
292 * exception level
293 ******************************************************************************/
294#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
Achin Guptae9982542014-06-26 08:59:07 +0100295 void enable_mmu_el##_el(uint32_t flags) \
Dan Handleyb226a4d2014-05-16 14:08:45 +0100296 { \
297 uint64_t mair, tcr, ttbr; \
298 uint32_t sctlr; \
299 \
300 assert(IS_IN_EL(_el)); \
301 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
302 \
303 /* Set attributes in the right indices of the MAIR */ \
304 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
305 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
306 ATTR_IWBWA_OWBWA_NTR_INDEX); \
307 write_mair_el##_el(mair); \
308 \
309 /* Invalidate TLBs at the current exception level */ \
310 _tlbi_fct(); \
311 \
312 /* Set TCR bits as well. */ \
313 /* Inner & outer WBWA & shareable + T0SZ = 32 */ \
314 tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
Lin Ma741a3822014-06-27 16:56:30 -0700315 TCR_RGN_INNER_WBA | \
316 (64 - __builtin_ctzl(ADDR_SPACE_SIZE)); \
Dan Handleyb226a4d2014-05-16 14:08:45 +0100317 tcr |= _tcr_extra; \
318 write_tcr_el##_el(tcr); \
319 \
320 /* Set TTBR bits as well */ \
321 ttbr = (uint64_t) l1_xlation_table; \
322 write_ttbr0_el##_el(ttbr); \
323 \
324 /* Ensure all translation table writes have drained */ \
325 /* into memory, the TLB invalidation is complete, */ \
326 /* and translation register writes are committed */ \
327 /* before enabling the MMU */ \
328 dsb(); \
329 isb(); \
330 \
331 sctlr = read_sctlr_el##_el(); \
332 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; \
Achin Guptae9982542014-06-26 08:59:07 +0100333 sctlr |= SCTLR_A_BIT; \
334 \
335 if (flags & DISABLE_DCACHE) \
336 sctlr &= ~SCTLR_C_BIT; \
337 else \
338 sctlr |= SCTLR_C_BIT; \
339 \
Dan Handleyb226a4d2014-05-16 14:08:45 +0100340 write_sctlr_el##_el(sctlr); \
341 \
342 /* Ensure the MMU enable takes effect immediately */ \
343 isb(); \
344 }
345
346/* Define EL1 and EL3 variants of the function enabling the MMU */
Lin Ma741a3822014-06-27 16:56:30 -0700347DEFINE_ENABLE_MMU_EL(1,
348 (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
349 tlbivmalle1)
350DEFINE_ENABLE_MMU_EL(3,
351 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
352 tlbialle3)