blob: 87b15b8cf971b4b0be86aa51e920807dcddd1378 [file] [log] [blame]
Soby Mathew935c2e72016-06-30 15:11:07 +01001/*
Jeenu Viswambharan9f142612018-04-27 15:06:57 +01002 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathew935c2e72016-06-30 15:11:07 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew935c2e72016-06-30 15:11:07 +01005 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
Soby Mathew935c2e72016-06-30 15:11:07 +010010#include <platform_def.h>
11#include <utils.h>
Sandrine Bailleux090c8492017-05-19 09:59:37 +010012#include <xlat_tables_arch.h>
Soby Mathew935c2e72016-06-30 15:11:07 +010013#include <xlat_tables.h>
14#include "../xlat_tables_private.h"
15
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010016#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
Etienne Carriere0af78b62017-11-08 13:53:47 +010017#error ARMv7 target does not support LPAE MMU descriptors
18#endif
19
Sandrine Bailleux090c8492017-05-19 09:59:37 +010020#define XLAT_TABLE_LEVEL_BASE \
21 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010022
Sandrine Bailleux090c8492017-05-19 09:59:37 +010023#define NUM_BASE_LEVEL_ENTRIES \
24 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010025
26static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
27 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew935c2e72016-06-30 15:11:07 +010028
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000029#if ENABLE_ASSERTIONS
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000030static unsigned long long get_max_supported_pa(void)
31{
32 /* Physical address space size for long descriptor format. */
33 return (1ULL << 40) - 1ULL;
34}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000035#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000036
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010037unsigned int xlat_arch_current_el(void)
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010038{
39 /*
40 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
41 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
42 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010043 return 3U;
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010044}
45
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010046uint64_t xlat_arch_get_xn_desc(unsigned int el __unused)
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010047{
48 return UPPER_ATTRS(XN);
49}
50
Soby Mathew935c2e72016-06-30 15:11:07 +010051void init_xlat_tables(void)
52{
53 unsigned long long max_pa;
54 uintptr_t max_va;
55 print_mmap();
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010056 init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010057 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000058
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010059 assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
60 assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
61 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
Soby Mathew935c2e72016-06-30 15:11:07 +010062}
63
64/*******************************************************************************
65 * Function for enabling the MMU in Secure PL1, assuming that the
66 * page-tables have already been created.
67 ******************************************************************************/
68void enable_mmu_secure(unsigned int flags)
69{
70 unsigned int mair0, ttbcr, sctlr;
71 uint64_t ttbr0;
72
73 assert(IS_IN_SECURE());
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010074 assert((read_sctlr() & SCTLR_M_BIT) == 0U);
Soby Mathew935c2e72016-06-30 15:11:07 +010075
76 /* Set attributes in the right indices of the MAIR */
77 mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
78 mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
79 ATTR_IWBWA_OWBWA_NTR_INDEX);
80 mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
81 ATTR_NON_CACHEABLE_INDEX);
82 write_mair0(mair0);
83
84 /* Invalidate TLBs at the current exception level */
85 tlbiall();
86
87 /*
Summer Qindaf5dbb2017-03-16 17:16:34 +000088 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
Soby Mathew935c2e72016-06-30 15:11:07 +010089 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010090 int t0sz = 32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);
91
92 if ((flags & XLAT_TABLE_NC) != 0U) {
Summer Qindaf5dbb2017-03-16 17:16:34 +000093 /* Inner & outer non-cacheable non-shareable. */
94 ttbcr = TTBCR_EAE_BIT |
95 TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010096 TTBCR_RGN0_INNER_NC | (uint32_t) t0sz;
Summer Qindaf5dbb2017-03-16 17:16:34 +000097 } else {
98 /* Inner & outer WBWA & shareable. */
99 ttbcr = TTBCR_EAE_BIT |
100 TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100101 TTBCR_RGN0_INNER_WBA | (uint32_t) t0sz;
Summer Qindaf5dbb2017-03-16 17:16:34 +0000102 }
Soby Mathew935c2e72016-06-30 15:11:07 +0100103 ttbcr |= TTBCR_EPD1_BIT;
104 write_ttbcr(ttbcr);
105
106 /* Set TTBR0 bits as well */
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100107 ttbr0 = (uintptr_t) base_xlation_table;
Soby Mathew935c2e72016-06-30 15:11:07 +0100108 write64_ttbr0(ttbr0);
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100109 write64_ttbr1(0U);
Soby Mathew935c2e72016-06-30 15:11:07 +0100110
111 /*
112 * Ensure all translation table writes have drained
113 * into memory, the TLB invalidation is complete,
114 * and translation register writes are committed
115 * before enabling the MMU
116 */
Dimitris Papastamos12f8be52017-06-20 09:25:10 +0100117 dsbish();
Soby Mathew935c2e72016-06-30 15:11:07 +0100118 isb();
119
120 sctlr = read_sctlr();
121 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
122
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100123 if ((flags & DISABLE_DCACHE) != 0U)
Soby Mathew935c2e72016-06-30 15:11:07 +0100124 sctlr &= ~SCTLR_C_BIT;
125 else
126 sctlr |= SCTLR_C_BIT;
127
128 write_sctlr(sctlr);
129
130 /* Ensure the MMU enable takes effect immediately */
131 isb();
132}
Jeenu Viswambharan9f142612018-04-27 15:06:57 +0100133
134void enable_mmu_direct(unsigned int flags)
135{
136 enable_mmu_secure(flags);
137}