blob: 033e2375f0b47ab52c4ca0e5495a1802c9c10810 [file] [log] [blame]
Soby Mathew935c2e72016-06-30 15:11:07 +01001/*
Jeenu Viswambharan9f142612018-04-27 15:06:57 +01002 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathew935c2e72016-06-30 15:11:07 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew935c2e72016-06-30 15:11:07 +01005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
Soby Mathew935c2e72016-06-30 15:11:07 +010010#include <platform_def.h>
11#include <utils.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010012#include <xlat_tables_arch.h>
Soby Mathew935c2e72016-06-30 15:11:07 +010013#include <xlat_tables.h>
14#include "../xlat_tables_private.h"
15
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010016#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
Etienne Carriere0af78b62017-11-08 13:53:47 +010017#error ARMv7 target does not support LPAE MMU descriptors
18#endif
19
Sandrine Bailleux090c8492017-05-19 09:59:37 +010020#define XLAT_TABLE_LEVEL_BASE \
21 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010022
Sandrine Bailleux090c8492017-05-19 09:59:37 +010023#define NUM_BASE_LEVEL_ENTRIES \
24 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010025
26static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
27 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew935c2e72016-06-30 15:11:07 +010028
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000029#if ENABLE_ASSERTIONS
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000030static unsigned long long get_max_supported_pa(void)
31{
32 /* Physical address space size for long descriptor format. */
33 return (1ULL << 40) - 1ULL;
34}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000035#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000036
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010037unsigned int xlat_arch_current_el(void)
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010038{
39 /*
40 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
41 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
42 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010043 return 3U;
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010044}
45
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010046uint64_t xlat_arch_get_xn_desc(unsigned int el __unused)
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010047{
48 return UPPER_ATTRS(XN);
49}
50
Soby Mathew935c2e72016-06-30 15:11:07 +010051void init_xlat_tables(void)
52{
53 unsigned long long max_pa;
54 uintptr_t max_va;
55 print_mmap();
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010056 init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010057 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000058
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010059 assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
60 assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
61 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
Soby Mathew935c2e72016-06-30 15:11:07 +010062}
63
64/*******************************************************************************
65 * Function for enabling the MMU in Secure PL1, assuming that the
66 * page-tables have already been created.
67 ******************************************************************************/
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +010068#if !ERROR_DEPRECATED
Soby Mathew935c2e72016-06-30 15:11:07 +010069void enable_mmu_secure(unsigned int flags)
70{
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +010071 enable_mmu_svc_mon(flags);
72}
73
74void enable_mmu_direct(unsigned int flags)
75{
76 enable_mmu_direct_svc_mon(flags);
77}
78#endif
79
80void enable_mmu_svc_mon(unsigned int flags)
81{
Soby Mathew935c2e72016-06-30 15:11:07 +010082 unsigned int mair0, ttbcr, sctlr;
83 uint64_t ttbr0;
84
85 assert(IS_IN_SECURE());
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010086 assert((read_sctlr() & SCTLR_M_BIT) == 0U);
Soby Mathew935c2e72016-06-30 15:11:07 +010087
88 /* Set attributes in the right indices of the MAIR */
89 mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
90 mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
91 ATTR_IWBWA_OWBWA_NTR_INDEX);
92 mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
93 ATTR_NON_CACHEABLE_INDEX);
94 write_mair0(mair0);
95
96 /* Invalidate TLBs at the current exception level */
97 tlbiall();
98
99 /*
Summer Qindaf5dbb2017-03-16 17:16:34 +0000100 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
Soby Mathew935c2e72016-06-30 15:11:07 +0100101 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100102 int t0sz = 32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);
103
104 if ((flags & XLAT_TABLE_NC) != 0U) {
Summer Qindaf5dbb2017-03-16 17:16:34 +0000105 /* Inner & outer non-cacheable non-shareable. */
106 ttbcr = TTBCR_EAE_BIT |
107 TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100108 TTBCR_RGN0_INNER_NC | (uint32_t) t0sz;
Summer Qindaf5dbb2017-03-16 17:16:34 +0000109 } else {
110 /* Inner & outer WBWA & shareable. */
111 ttbcr = TTBCR_EAE_BIT |
112 TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100113 TTBCR_RGN0_INNER_WBA | (uint32_t) t0sz;
Summer Qindaf5dbb2017-03-16 17:16:34 +0000114 }
Soby Mathew935c2e72016-06-30 15:11:07 +0100115 ttbcr |= TTBCR_EPD1_BIT;
116 write_ttbcr(ttbcr);
117
118 /* Set TTBR0 bits as well */
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100119 ttbr0 = (uintptr_t) base_xlation_table;
Soby Mathew935c2e72016-06-30 15:11:07 +0100120 write64_ttbr0(ttbr0);
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100121 write64_ttbr1(0U);
Soby Mathew935c2e72016-06-30 15:11:07 +0100122
123 /*
124 * Ensure all translation table writes have drained
125 * into memory, the TLB invalidation is complete,
126 * and translation register writes are committed
127 * before enabling the MMU
128 */
Dimitris Papastamos12f8be52017-06-20 09:25:10 +0100129 dsbish();
Soby Mathew935c2e72016-06-30 15:11:07 +0100130 isb();
131
132 sctlr = read_sctlr();
133 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
134
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100135 if ((flags & DISABLE_DCACHE) != 0U)
Soby Mathew935c2e72016-06-30 15:11:07 +0100136 sctlr &= ~SCTLR_C_BIT;
137 else
138 sctlr |= SCTLR_C_BIT;
139
140 write_sctlr(sctlr);
141
142 /* Ensure the MMU enable takes effect immediately */
143 isb();
144}
Jeenu Viswambharan9f142612018-04-27 15:06:57 +0100145
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100146void enable_mmu_direct_svc_mon(unsigned int flags)
Jeenu Viswambharan9f142612018-04-27 15:06:57 +0100147{
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100148 enable_mmu_svc_mon(flags);
Jeenu Viswambharan9f142612018-04-27 15:06:57 +0100149}