blob: d28731f636eb09adb15bfac9fd6958962e5db4ed [file] [log] [blame]
Jon Medhurstbb1fe202014-01-24 15:41:33 +00001/*
2 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handleyb226a4d2014-05-16 14:08:45 +010031#include <arch.h>
32#include <arch_helpers.h>
Jon Medhurstbb1fe202014-01-24 15:41:33 +000033#include <assert.h>
Vikram Kanigiri725b1332015-03-04 10:34:27 +000034#include <bl_common.h>
Lin Ma741a3822014-06-27 16:56:30 -070035#include <cassert.h>
Soby Mathewc9bac9c2016-01-19 17:52:28 +000036#include <debug.h>
Dan Handleyed6ff952014-05-14 17:44:19 +010037#include <platform_def.h>
Jon Medhurstbb1fe202014-01-24 15:41:33 +000038#include <string.h>
39#include <xlat_tables.h>
40
Soby Mathewc9bac9c2016-01-19 17:52:28 +000041#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
42#define LVL0_SPACER ""
43#define LVL1_SPACER " "
44#define LVL2_SPACER " "
45#define LVL3_SPACER " "
46#define get_level_spacer(level) \
47 (((level) == 0) ? LVL0_SPACER : \
48 (((level) == 1) ? LVL1_SPACER : \
49 (((level) == 2) ? LVL2_SPACER : LVL3_SPACER)))
50#define debug_print(...) tf_printf(__VA_ARGS__)
Jon Medhurstbb1fe202014-01-24 15:41:33 +000051#else
52#define debug_print(...) ((void)0)
53#endif
54
Lin Ma741a3822014-06-27 16:56:30 -070055CASSERT(ADDR_SPACE_SIZE > 0, assert_valid_addr_space_size);
Jon Medhurstbb1fe202014-01-24 15:41:33 +000056
57#define UNSET_DESC ~0ul
58
59#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
60
Dan Handleyb226a4d2014-05-16 14:08:45 +010061static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
Jon Medhurstbb1fe202014-01-24 15:41:33 +000062__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
63
64static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
Soren Brinkmann46dd1702016-01-14 10:11:05 -080065__aligned(XLAT_TABLE_SIZE) __section("xlat_table");
Jon Medhurstbb1fe202014-01-24 15:41:33 +000066
67static unsigned next_xlat;
Lin Ma741a3822014-06-27 16:56:30 -070068static unsigned long max_pa;
69static unsigned long max_va;
70static unsigned long tcr_ps_bits;
Jon Medhurstbb1fe202014-01-24 15:41:33 +000071
72/*
73 * Array of all memory regions stored in order of ascending base address.
74 * The list is terminated by the first entry with size == 0.
75 */
Dan Handleye2712bc2014-04-10 15:37:22 +010076static mmap_region_t mmap[MAX_MMAP_REGIONS + 1];
Jon Medhurstbb1fe202014-01-24 15:41:33 +000077
78
79static void print_mmap(void)
80{
Soby Mathewc9bac9c2016-01-19 17:52:28 +000081#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
Jon Medhurstbb1fe202014-01-24 15:41:33 +000082 debug_print("mmap:\n");
Dan Handleye2712bc2014-04-10 15:37:22 +010083 mmap_region_t *mm = mmap;
Jon Medhurstbb1fe202014-01-24 15:41:33 +000084 while (mm->size) {
Soby Mathewc9bac9c2016-01-19 17:52:28 +000085 debug_print(" VA:0x%lx PA:0x%lx size:0x%lx attr:0x%x\n",
86 mm->base_va, mm->base_pa, mm->size, mm->attr);
Jon Medhurstbb1fe202014-01-24 15:41:33 +000087 ++mm;
88 };
89 debug_print("\n");
90#endif
91}
92
Lin Ma13592362014-06-02 11:45:36 -070093void mmap_add_region(unsigned long base_pa, unsigned long base_va,
94 unsigned long size, unsigned attr)
Jon Medhurstbb1fe202014-01-24 15:41:33 +000095{
Dan Handleye2712bc2014-04-10 15:37:22 +010096 mmap_region_t *mm = mmap;
Vikram Kanigiri725b1332015-03-04 10:34:27 +000097 mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
Lin Ma741a3822014-06-27 16:56:30 -070098 unsigned long pa_end = base_pa + size - 1;
99 unsigned long va_end = base_va + size - 1;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000100
Lin Ma13592362014-06-02 11:45:36 -0700101 assert(IS_PAGE_ALIGNED(base_pa));
102 assert(IS_PAGE_ALIGNED(base_va));
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000103 assert(IS_PAGE_ALIGNED(size));
104
105 if (!size)
106 return;
107
108 /* Find correct place in mmap to insert new region */
Lin Ma13592362014-06-02 11:45:36 -0700109 while (mm->base_va < base_va && mm->size)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000110 ++mm;
111
112 /* Make room for new region by moving other regions up by one place */
113 memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
114
115 /* Check we haven't lost the empty sentinal from the end of the array */
116 assert(mm_last->size == 0);
117
Lin Ma13592362014-06-02 11:45:36 -0700118 mm->base_pa = base_pa;
119 mm->base_va = base_va;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000120 mm->size = size;
121 mm->attr = attr;
Lin Ma741a3822014-06-27 16:56:30 -0700122
123 if (pa_end > max_pa)
124 max_pa = pa_end;
125 if (va_end > max_va)
126 max_va = va_end;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000127}
128
Dan Handleye2712bc2014-04-10 15:37:22 +0100129void mmap_add(const mmap_region_t *mm)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000130{
131 while (mm->size) {
Lin Ma13592362014-06-02 11:45:36 -0700132 mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000133 ++mm;
134 }
135}
136
Lin Ma13592362014-06-02 11:45:36 -0700137static unsigned long mmap_desc(unsigned attr, unsigned long addr_pa,
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000138 unsigned level)
139{
Lin Ma13592362014-06-02 11:45:36 -0700140 unsigned long desc = addr_pa;
Sandrine Bailleux52b1ba62016-03-01 14:01:03 +0000141 int mem_type;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000142
143 desc |= level == 3 ? TABLE_DESC : BLOCK_DESC;
144
145 desc |= attr & MT_NS ? LOWER_ATTRS(NS) : 0;
146
147 desc |= attr & MT_RW ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
148
149 desc |= LOWER_ATTRS(ACCESS_FLAG);
150
Sandrine Bailleux52b1ba62016-03-01 14:01:03 +0000151 mem_type = MT_TYPE(attr);
152 if (mem_type == MT_MEMORY) {
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000153 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
154 if (attr & MT_RW)
155 desc |= UPPER_ATTRS(XN);
Sandrine Bailleux52b1ba62016-03-01 14:01:03 +0000156 } else if (mem_type == MT_NON_CACHEABLE) {
157 desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
158 if (attr & MT_RW)
159 desc |= UPPER_ATTRS(XN);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000160 } else {
Sandrine Bailleux52b1ba62016-03-01 14:01:03 +0000161 assert(mem_type == MT_DEVICE);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000162 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
163 desc |= UPPER_ATTRS(XN);
164 }
165
Sandrine Bailleux52b1ba62016-03-01 14:01:03 +0000166 debug_print((mem_type == MT_MEMORY) ? "MEM" :
167 ((mem_type == MT_NON_CACHEABLE) ? "NC" : "DEV"));
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000168 debug_print(attr & MT_RW ? "-RW" : "-RO");
169 debug_print(attr & MT_NS ? "-NS" : "-S");
170
171 return desc;
172}
173
Lin Ma13592362014-06-02 11:45:36 -0700174static int mmap_region_attr(mmap_region_t *mm, unsigned long base_va,
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000175 unsigned long size)
176{
177 int attr = mm->attr;
Sandrine Bailleux52b1ba62016-03-01 14:01:03 +0000178 int old_mem_type, new_mem_type;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000179
180 for (;;) {
181 ++mm;
182
183 if (!mm->size)
184 return attr; /* Reached end of list */
185
Lin Ma13592362014-06-02 11:45:36 -0700186 if (mm->base_va >= base_va + size)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000187 return attr; /* Next region is after area so end */
188
Lin Ma13592362014-06-02 11:45:36 -0700189 if (mm->base_va + mm->size <= base_va)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000190 continue; /* Next region has already been overtaken */
191
192 if ((mm->attr & attr) == attr)
193 continue; /* Region doesn't override attribs so skip */
194
Sandrine Bailleux52b1ba62016-03-01 14:01:03 +0000195 /*
196 * Update memory mapping attributes in 2 steps:
197 * 1) Update access permissions and security state flags
198 * 2) Update memory type.
199 *
200 * See xlat_tables.h for details about the attributes priority
201 * system and the rules dictating whether attributes should be
202 * updated.
203 */
204 old_mem_type = MT_TYPE(attr);
205 new_mem_type = MT_TYPE(mm->attr);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000206 attr &= mm->attr;
Sandrine Bailleux52b1ba62016-03-01 14:01:03 +0000207 if (new_mem_type < old_mem_type)
208 attr = (attr & ~MT_TYPE_MASK) | new_mem_type;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000209
Lin Ma13592362014-06-02 11:45:36 -0700210 if (mm->base_va > base_va ||
211 mm->base_va + mm->size < base_va + size)
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000212 return -1; /* Region doesn't fully cover our area */
213 }
214}
215
Lin Ma13592362014-06-02 11:45:36 -0700216static mmap_region_t *init_xlation_table(mmap_region_t *mm,
217 unsigned long base_va,
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000218 unsigned long *table, unsigned level)
219{
220 unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
221 XLAT_TABLE_ENTRIES_SHIFT;
222 unsigned level_size = 1 << level_size_shift;
Lin Ma0b9d59f2014-05-20 11:25:55 -0700223 unsigned long level_index_mask = XLAT_TABLE_ENTRIES_MASK << level_size_shift;
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000224
225 assert(level <= 3);
226
227 debug_print("New xlat table:\n");
228
229 do {
230 unsigned long desc = UNSET_DESC;
231
Lin Ma13592362014-06-02 11:45:36 -0700232 if (mm->base_va + mm->size <= base_va) {
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000233 /* Area now after the region so skip it */
234 ++mm;
235 continue;
236 }
237
Soby Mathewc9bac9c2016-01-19 17:52:28 +0000238 debug_print("%s VA:0x%lx size:0x%x ", get_level_spacer(level),
239 base_va, level_size);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000240
Lin Ma13592362014-06-02 11:45:36 -0700241 if (mm->base_va >= base_va + level_size) {
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000242 /* Next region is after area so nothing to map yet */
243 desc = INVALID_DESC;
Lin Ma13592362014-06-02 11:45:36 -0700244 } else if (mm->base_va <= base_va && mm->base_va + mm->size >=
245 base_va + level_size) {
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000246 /* Next region covers all of area */
Lin Ma13592362014-06-02 11:45:36 -0700247 int attr = mmap_region_attr(mm, base_va, level_size);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000248 if (attr >= 0)
Lin Ma13592362014-06-02 11:45:36 -0700249 desc = mmap_desc(attr,
250 base_va - mm->base_va + mm->base_pa,
251 level);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000252 }
253 /* else Next region only partially covers area, so need */
254
255 if (desc == UNSET_DESC) {
256 /* Area not covered by a region so need finer table */
257 unsigned long *new_table = xlat_tables[next_xlat++];
258 assert(next_xlat <= MAX_XLAT_TABLES);
259 desc = TABLE_DESC | (unsigned long)new_table;
260
261 /* Recurse to fill in new table */
Lin Ma13592362014-06-02 11:45:36 -0700262 mm = init_xlation_table(mm, base_va,
263 new_table, level+1);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000264 }
265
266 debug_print("\n");
267
268 *table++ = desc;
Lin Ma13592362014-06-02 11:45:36 -0700269 base_va += level_size;
270 } while (mm->size && (base_va & level_index_mask));
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000271
272 return mm;
273}
274
Lin Ma741a3822014-06-27 16:56:30 -0700275static unsigned int calc_physical_addr_size_bits(unsigned long max_addr)
276{
277 /* Physical address can't exceed 48 bits */
278 assert((max_addr & ADDR_MASK_48_TO_63) == 0);
279
280 /* 48 bits address */
281 if (max_addr & ADDR_MASK_44_TO_47)
282 return TCR_PS_BITS_256TB;
283
284 /* 44 bits address */
285 if (max_addr & ADDR_MASK_42_TO_43)
286 return TCR_PS_BITS_16TB;
287
288 /* 42 bits address */
289 if (max_addr & ADDR_MASK_40_TO_41)
290 return TCR_PS_BITS_4TB;
291
292 /* 40 bits address */
293 if (max_addr & ADDR_MASK_36_TO_39)
294 return TCR_PS_BITS_1TB;
295
296 /* 36 bits address */
297 if (max_addr & ADDR_MASK_32_TO_35)
298 return TCR_PS_BITS_64GB;
299
300 return TCR_PS_BITS_4GB;
301}
302
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000303void init_xlat_tables(void)
304{
305 print_mmap();
306 init_xlation_table(mmap, 0, l1_xlation_table, 1);
Lin Ma741a3822014-06-27 16:56:30 -0700307 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
308 assert(max_va < ADDR_SPACE_SIZE);
Jon Medhurstbb1fe202014-01-24 15:41:33 +0000309}
Dan Handleyb226a4d2014-05-16 14:08:45 +0100310
311/*******************************************************************************
312 * Macro generating the code for the function enabling the MMU in the given
313 * exception level, assuming that the pagetables have already been created.
314 *
315 * _el: Exception level at which the function will run
316 * _tcr_extra: Extra bits to set in the TCR register. This mask will
317 * be OR'ed with the default TCR value.
318 * _tlbi_fct: Function to invalidate the TLBs at the current
319 * exception level
320 ******************************************************************************/
321#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
Achin Guptae9982542014-06-26 08:59:07 +0100322 void enable_mmu_el##_el(uint32_t flags) \
Dan Handleyb226a4d2014-05-16 14:08:45 +0100323 { \
324 uint64_t mair, tcr, ttbr; \
325 uint32_t sctlr; \
326 \
327 assert(IS_IN_EL(_el)); \
328 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
329 \
330 /* Set attributes in the right indices of the MAIR */ \
331 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
332 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
333 ATTR_IWBWA_OWBWA_NTR_INDEX); \
Sandrine Bailleux52b1ba62016-03-01 14:01:03 +0000334 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
335 ATTR_NON_CACHEABLE_INDEX); \
Dan Handleyb226a4d2014-05-16 14:08:45 +0100336 write_mair_el##_el(mair); \
337 \
338 /* Invalidate TLBs at the current exception level */ \
339 _tlbi_fct(); \
340 \
341 /* Set TCR bits as well. */ \
342 /* Inner & outer WBWA & shareable + T0SZ = 32 */ \
343 tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
Lin Ma741a3822014-06-27 16:56:30 -0700344 TCR_RGN_INNER_WBA | \
345 (64 - __builtin_ctzl(ADDR_SPACE_SIZE)); \
Dan Handleyb226a4d2014-05-16 14:08:45 +0100346 tcr |= _tcr_extra; \
347 write_tcr_el##_el(tcr); \
348 \
349 /* Set TTBR bits as well */ \
350 ttbr = (uint64_t) l1_xlation_table; \
351 write_ttbr0_el##_el(ttbr); \
352 \
353 /* Ensure all translation table writes have drained */ \
354 /* into memory, the TLB invalidation is complete, */ \
355 /* and translation register writes are committed */ \
356 /* before enabling the MMU */ \
357 dsb(); \
358 isb(); \
359 \
360 sctlr = read_sctlr_el##_el(); \
Achin Gupta9f098352014-07-18 18:38:28 +0100361 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
Achin Guptae9982542014-06-26 08:59:07 +0100362 \
363 if (flags & DISABLE_DCACHE) \
364 sctlr &= ~SCTLR_C_BIT; \
365 else \
366 sctlr |= SCTLR_C_BIT; \
367 \
Dan Handleyb226a4d2014-05-16 14:08:45 +0100368 write_sctlr_el##_el(sctlr); \
369 \
370 /* Ensure the MMU enable takes effect immediately */ \
371 isb(); \
372 }
373
374/* Define EL1 and EL3 variants of the function enabling the MMU */
Lin Ma741a3822014-06-27 16:56:30 -0700375DEFINE_ENABLE_MMU_EL(1,
376 (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
377 tlbivmalle1)
378DEFINE_ENABLE_MMU_EL(3,
379 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
380 tlbialle3)