blob: 4b01b9b7ab171f5ebdecfeabe1442ee56c19e85c [file] [log] [blame]
Soby Mathew935c2e72016-06-30 15:11:07 +01001/*
Jeenu Viswambharan9f142612018-04-27 15:06:57 +01002 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathew935c2e72016-06-30 15:11:07 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew935c2e72016-06-30 15:11:07 +01005 */
6
Soby Mathew935c2e72016-06-30 15:11:07 +01007#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00008
Soby Mathew935c2e72016-06-30 15:11:07 +01009#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000010
11#include <arch.h>
12#include <arch_helpers.h>
13#include <lib/utils.h>
14#include <lib/xlat_tables/xlat_tables_arch.h>
15#include <lib/xlat_tables/xlat_tables.h>
16
Soby Mathew935c2e72016-06-30 15:11:07 +010017#include "../xlat_tables_private.h"
18
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010019#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
Etienne Carriere0af78b62017-11-08 13:53:47 +010020#error ARMv7 target does not support LPAE MMU descriptors
21#endif
22
Sandrine Bailleux090c8492017-05-19 09:59:37 +010023#define XLAT_TABLE_LEVEL_BASE \
24 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010025
Sandrine Bailleux090c8492017-05-19 09:59:37 +010026#define NUM_BASE_LEVEL_ENTRIES \
27 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010028
29static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
30 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
Soby Mathew935c2e72016-06-30 15:11:07 +010031
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000032#if ENABLE_ASSERTIONS
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000033static unsigned long long get_max_supported_pa(void)
34{
35 /* Physical address space size for long descriptor format. */
36 return (1ULL << 40) - 1ULL;
37}
Antonio Nino Diaz3759e3f2017-03-22 15:48:51 +000038#endif /* ENABLE_ASSERTIONS */
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000039
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010040unsigned int xlat_arch_current_el(void)
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010041{
42 /*
43 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
44 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
45 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010046 return 3U;
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010047}
48
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010049uint64_t xlat_arch_get_xn_desc(unsigned int el __unused)
Antonio Nino Diazefabaa92017-04-27 13:30:22 +010050{
51 return UPPER_ATTRS(XN);
52}
53
Soby Mathew935c2e72016-06-30 15:11:07 +010054void init_xlat_tables(void)
55{
56 unsigned long long max_pa;
57 uintptr_t max_va;
Sathees Balya74155972019-01-25 11:36:01 +000058
59 assert(PLAT_VIRT_ADDR_SPACE_SIZE >= MIN_VIRT_ADDR_SPACE_SIZE);
60 assert(PLAT_VIRT_ADDR_SPACE_SIZE <= MAX_VIRT_ADDR_SPACE_SIZE);
61 assert(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE));
62
Soby Mathew935c2e72016-06-30 15:11:07 +010063 print_mmap();
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010064 init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
Antonio Nino Diazd48ae612016-08-02 09:21:41 +010065 &max_va, &max_pa);
Antonio Nino Diazd1beee22016-12-13 15:28:54 +000066
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010067 assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
68 assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
69 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
Soby Mathew935c2e72016-06-30 15:11:07 +010070}
71
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +010072void enable_mmu_svc_mon(unsigned int flags)
73{
Soby Mathew935c2e72016-06-30 15:11:07 +010074 unsigned int mair0, ttbcr, sctlr;
75 uint64_t ttbr0;
76
77 assert(IS_IN_SECURE());
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010078 assert((read_sctlr() & SCTLR_M_BIT) == 0U);
Soby Mathew935c2e72016-06-30 15:11:07 +010079
80 /* Set attributes in the right indices of the MAIR */
81 mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
82 mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
83 ATTR_IWBWA_OWBWA_NTR_INDEX);
84 mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
85 ATTR_NON_CACHEABLE_INDEX);
86 write_mair0(mair0);
87
88 /* Invalidate TLBs at the current exception level */
89 tlbiall();
90
91 /*
Summer Qindaf5dbb2017-03-16 17:16:34 +000092 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
Soby Mathew935c2e72016-06-30 15:11:07 +010093 */
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +010094 int t0sz = 32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);
95
96 if ((flags & XLAT_TABLE_NC) != 0U) {
Summer Qindaf5dbb2017-03-16 17:16:34 +000097 /* Inner & outer non-cacheable non-shareable. */
98 ttbcr = TTBCR_EAE_BIT |
99 TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100100 TTBCR_RGN0_INNER_NC | (uint32_t) t0sz;
Summer Qindaf5dbb2017-03-16 17:16:34 +0000101 } else {
102 /* Inner & outer WBWA & shareable. */
103 ttbcr = TTBCR_EAE_BIT |
104 TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100105 TTBCR_RGN0_INNER_WBA | (uint32_t) t0sz;
Summer Qindaf5dbb2017-03-16 17:16:34 +0000106 }
Soby Mathew935c2e72016-06-30 15:11:07 +0100107 ttbcr |= TTBCR_EPD1_BIT;
108 write_ttbcr(ttbcr);
109
110 /* Set TTBR0 bits as well */
Antonio Nino Diazd48ae612016-08-02 09:21:41 +0100111 ttbr0 = (uintptr_t) base_xlation_table;
Soby Mathew935c2e72016-06-30 15:11:07 +0100112 write64_ttbr0(ttbr0);
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100113 write64_ttbr1(0U);
Soby Mathew935c2e72016-06-30 15:11:07 +0100114
115 /*
116 * Ensure all translation table writes have drained
117 * into memory, the TLB invalidation is complete,
118 * and translation register writes are committed
119 * before enabling the MMU
120 */
Dimitris Papastamos12f8be52017-06-20 09:25:10 +0100121 dsbish();
Soby Mathew935c2e72016-06-30 15:11:07 +0100122 isb();
123
124 sctlr = read_sctlr();
125 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
126
Antonio Nino Diaz50eb3742018-07-24 10:20:53 +0100127 if ((flags & DISABLE_DCACHE) != 0U)
Soby Mathew935c2e72016-06-30 15:11:07 +0100128 sctlr &= ~SCTLR_C_BIT;
129 else
130 sctlr |= SCTLR_C_BIT;
131
132 write_sctlr(sctlr);
133
134 /* Ensure the MMU enable takes effect immediately */
135 isb();
136}
Jeenu Viswambharan9f142612018-04-27 15:06:57 +0100137
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100138void enable_mmu_direct_svc_mon(unsigned int flags)
Jeenu Viswambharan9f142612018-04-27 15:06:57 +0100139{
Antonio Nino Diaz128de8d2018-08-07 19:59:49 +0100140 enable_mmu_svc_mon(flags);
Jeenu Viswambharan9f142612018-04-27 15:06:57 +0100141}