blob: 9c1562407be3ead109b94d1f2d55e227f3ab4505 [file] [log] [blame]
Soby Mathew935c2e72016-06-30 15:11:07 +01001/*
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathew935c2e72016-06-30 15:11:07 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew935c2e72016-06-30 15:11:07 +01005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <cassert.h>
11#include <platform_def.h>
12#include <utils.h>
13#include <xlat_tables.h>
14#include "../xlat_tables_private.h"
15
16/*
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010017 * Each platform can define the size of the virtual address space, which is
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000018 * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus
19 * the width of said address space. The value of TTBCR.TxSZ must be in the
20 * range 0 to 7 [1], which means that the virtual address space width must be
21 * in the range 32 to 25 bits.
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010022 *
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000023 * Here we calculate the initial lookup level from the value of
24 * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual
25 * address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or
26 * narrower address spaces are not supported. As a result, level 3 cannot be
27 * used as initial lookup level with 4 KB granularity [1].
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010028 *
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000029 * For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
30 * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
31 * G4-5 in the ARM ARM, the initial lookup level for an address space like that
32 * is 1.
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010033 *
34 * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
35 * information:
36 * [1] Section G4.6.5
Soby Mathew935c2e72016-06-30 15:11:07 +010037 */
Soby Mathew935c2e72016-06-30 15:11:07 +010038
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000039#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
Soby Mathew935c2e72016-06-30 15:11:07 +010040
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000041# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010042
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000043#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010044
45# define XLAT_TABLE_LEVEL_BASE 1
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000046# define NUM_BASE_LEVEL_ENTRIES \
47 (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010048
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000049#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010050
51# define XLAT_TABLE_LEVEL_BASE 2
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000052# define NUM_BASE_LEVEL_ENTRIES \
53 (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010054
55#else
56
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000057# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010058
59#endif
60
61static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
62 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew935c2e72016-06-30 15:11:07 +010063
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000064#if ENABLE_ASSERTIONS
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000065static unsigned long long get_max_supported_pa(void)
66{
67 /* Physical address space size for long descriptor format. */
68 return (1ULL << 40) - 1ULL;
69}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000070#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000071
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010072int xlat_arch_current_el(void)
73{
74 /*
75 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
76 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
77 */
78 return 3;
79}
80
81uint64_t xlat_arch_get_xn_desc(int el __unused)
82{
83 return UPPER_ATTRS(XN);
84}
85
Soby Mathew935c2e72016-06-30 15:11:07 +010086void init_xlat_tables(void)
87{
88 unsigned long long max_pa;
89 uintptr_t max_va;
90 print_mmap();
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010091 init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
92 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000093
94 assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
95 assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
96 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
Soby Mathew935c2e72016-06-30 15:11:07 +010097}
98
99/*******************************************************************************
100 * Function for enabling the MMU in Secure PL1, assuming that the
101 * page-tables have already been created.
102 ******************************************************************************/
103void enable_mmu_secure(unsigned int flags)
104{
105 unsigned int mair0, ttbcr, sctlr;
106 uint64_t ttbr0;
107
108 assert(IS_IN_SECURE());
109 assert((read_sctlr() & SCTLR_M_BIT) == 0);
110
111 /* Set attributes in the right indices of the MAIR */
112 mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
113 mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
114 ATTR_IWBWA_OWBWA_NTR_INDEX);
115 mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
116 ATTR_NON_CACHEABLE_INDEX);
117 write_mair0(mair0);
118
119 /* Invalidate TLBs at the current exception level */
120 tlbiall();
121
122 /*
Summer Qindaf5dbb2017-03-16 17:16:34 +0000123 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
Soby Mathew935c2e72016-06-30 15:11:07 +0100124 */
Summer Qindaf5dbb2017-03-16 17:16:34 +0000125 if (flags & XLAT_TABLE_NC) {
126 /* Inner & outer non-cacheable non-shareable. */
127 ttbcr = TTBCR_EAE_BIT |
128 TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
129 TTBCR_RGN0_INNER_NC |
130 (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
131 } else {
132 /* Inner & outer WBWA & shareable. */
133 ttbcr = TTBCR_EAE_BIT |
134 TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
135 TTBCR_RGN0_INNER_WBA |
136 (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
137 }
Soby Mathew935c2e72016-06-30 15:11:07 +0100138 ttbcr |= TTBCR_EPD1_BIT;
139 write_ttbcr(ttbcr);
140
141 /* Set TTBR0 bits as well */
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100142 ttbr0 = (uintptr_t) base_xlation_table;
Soby Mathew935c2e72016-06-30 15:11:07 +0100143 write64_ttbr0(ttbr0);
144 write64_ttbr1(0);
145
146 /*
147 * Ensure all translation table writes have drained
148 * into memory, the TLB invalidation is complete,
149 * and translation register writes are committed
150 * before enabling the MMU
151 */
Dimitris Papastamos12f8be52017-06-20 09:25:10 +0100152 dsbish();
Soby Mathew935c2e72016-06-30 15:11:07 +0100153 isb();
154
155 sctlr = read_sctlr();
156 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
157
158 if (flags & DISABLE_DCACHE)
159 sctlr &= ~SCTLR_C_BIT;
160 else
161 sctlr |= SCTLR_C_BIT;
162
163 write_sctlr(sctlr);
164
165 /* Ensure the MMU enable takes effect immediately */
166 isb();
167}