blob: 720d4461d56c749aa1cb664cb06a5a8836d1d5a8 [file] [log] [blame]
Soby Mathew935c2e72016-06-30 15:11:07 +01001/*
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +00002 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
Soby Mathew935c2e72016-06-30 15:11:07 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew935c2e72016-06-30 15:11:07 +01005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
Soby Mathew935c2e72016-06-30 15:11:07 +010010#include <platform_def.h>
11#include <utils.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010012#include <xlat_tables_arch.h>
Soby Mathew935c2e72016-06-30 15:11:07 +010013#include <xlat_tables.h>
14#include "../xlat_tables_private.h"
15
Etienne Carriere0af78b62017-11-08 13:53:47 +010016#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
17#error ARMv7 target does not support LPAE MMU descriptors
18#endif
19
Sandrine Bailleux090c8492017-05-19 09:59:37 +010020#define XLAT_TABLE_LEVEL_BASE \
21 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010022
Sandrine Bailleux090c8492017-05-19 09:59:37 +010023#define NUM_BASE_LEVEL_ENTRIES \
24 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010025
26static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
27 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew935c2e72016-06-30 15:11:07 +010028
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000029#if ENABLE_ASSERTIONS
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000030static unsigned long long get_max_supported_pa(void)
31{
32 /* Physical address space size for long descriptor format. */
33 return (1ULL << 40) - 1ULL;
34}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000035#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000036
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010037int xlat_arch_current_el(void)
38{
39 /*
40 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
41 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
42 */
43 return 3;
44}
45
46uint64_t xlat_arch_get_xn_desc(int el __unused)
47{
48 return UPPER_ATTRS(XN);
49}
50
Soby Mathew935c2e72016-06-30 15:11:07 +010051void init_xlat_tables(void)
52{
53 unsigned long long max_pa;
54 uintptr_t max_va;
55 print_mmap();
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010056 init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
57 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000058
59 assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
60 assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
61 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
Soby Mathew935c2e72016-06-30 15:11:07 +010062}
63
64/*******************************************************************************
65 * Function for enabling the MMU in Secure PL1, assuming that the
66 * page-tables have already been created.
67 ******************************************************************************/
68void enable_mmu_secure(unsigned int flags)
69{
70 unsigned int mair0, ttbcr, sctlr;
71 uint64_t ttbr0;
72
73 assert(IS_IN_SECURE());
74 assert((read_sctlr() & SCTLR_M_BIT) == 0);
75
76 /* Set attributes in the right indices of the MAIR */
77 mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
78 mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
79 ATTR_IWBWA_OWBWA_NTR_INDEX);
80 mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
81 ATTR_NON_CACHEABLE_INDEX);
82 write_mair0(mair0);
83
84 /* Invalidate TLBs at the current exception level */
85 tlbiall();
86
87 /*
Summer Qindaf5dbb2017-03-16 17:16:34 +000088 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
Soby Mathew935c2e72016-06-30 15:11:07 +010089 */
Summer Qindaf5dbb2017-03-16 17:16:34 +000090 if (flags & XLAT_TABLE_NC) {
91 /* Inner & outer non-cacheable non-shareable. */
92 ttbcr = TTBCR_EAE_BIT |
93 TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
94 TTBCR_RGN0_INNER_NC |
Sandrine Bailleux12e86442017-07-19 10:11:13 +010095 (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
Summer Qindaf5dbb2017-03-16 17:16:34 +000096 } else {
97 /* Inner & outer WBWA & shareable. */
98 ttbcr = TTBCR_EAE_BIT |
99 TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
100 TTBCR_RGN0_INNER_WBA |
Sandrine Bailleux12e86442017-07-19 10:11:13 +0100101 (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
Summer Qindaf5dbb2017-03-16 17:16:34 +0000102 }
Soby Mathew935c2e72016-06-30 15:11:07 +0100103 ttbcr |= TTBCR_EPD1_BIT;
104 write_ttbcr(ttbcr);
105
106 /* Set TTBR0 bits as well */
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100107 ttbr0 = (uintptr_t) base_xlation_table;
Soby Mathew935c2e72016-06-30 15:11:07 +0100108 write64_ttbr0(ttbr0);
109 write64_ttbr1(0);
110
111 /*
112 * Ensure all translation table writes have drained
113 * into memory, the TLB invalidation is complete,
114 * and translation register writes are committed
115 * before enabling the MMU
116 */
Dimitris Papastamos12f8be52017-06-20 09:25:10 +0100117 dsbish();
Soby Mathew935c2e72016-06-30 15:11:07 +0100118 isb();
119
120 sctlr = read_sctlr();
121 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
122
123 if (flags & DISABLE_DCACHE)
124 sctlr &= ~SCTLR_C_BIT;
125 else
126 sctlr |= SCTLR_C_BIT;
127
128 write_sctlr(sctlr);
129
130 /* Ensure the MMU enable takes effect immediately */
131 isb();
132}