blob: 45f6df9f122465a380ba9ab9392c80d90371bec0 [file] [log] [blame]
Jeenu Viswambharan5c503042017-05-26 14:15:40 +01001/*
Olivier Deprez73ad7312022-02-04 12:30:11 +01002 * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
Jeenu Viswambharan5c503042017-05-26 14:15:40 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +01007#include <common/debug.h>
Daniel Boulby844b4872018-09-18 13:36:39 +01008#include <cdefs.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <drivers/arm/smmu_v3.h>
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060010#include <drivers/delay_timer.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <lib/mmio.h>
Olivier Deprez73ad7312022-02-04 12:30:11 +010012#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010014/* SMMU poll number of retries */
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060015#define SMMU_POLL_TIMEOUT_US U(1000)
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010016
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010017static int __init smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
18 uint32_t value)
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010019{
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060020 uint32_t reg_val;
21 uint64_t timeout;
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010022
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060023 /* Set 1ms timeout value */
24 timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US);
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010025 do {
26 reg_val = mmio_read_32(smmu_reg);
27 if ((reg_val & mask) == value)
28 return 0;
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060029 } while (!timeout_elapsed(timeout));
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010030
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060031 ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg);
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010032 ERROR("Read value 0x%x, expected 0x%x\n", reg_val,
33 value == 0U ? reg_val & ~mask : reg_val | mask);
34 return -1;
Antonio Nino Diazfeacba32018-08-21 16:12:29 +010035}
36
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010037/*
Alexei Fedorov896799a2019-05-09 12:14:40 +010038 * Abort all incoming transactions in order to implement a default
39 * deny policy on reset.
40 */
41int __init smmuv3_security_init(uintptr_t smmu_base)
42{
43 /* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */
44 if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
45 return -1;
46
47 /*
48 * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU,
49 * so just abort all incoming transactions.
50 */
51 mmio_setbits_32(smmu_base + SMMU_GBPA,
52 SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
53
54 if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
55 return -1;
56
57 /* Check if the SMMU supports secure state */
58 if ((mmio_read_32(smmu_base + SMMU_S_IDR1) &
59 SMMU_S_IDR1_SECURE_IMPL) == 0U)
60 return 0;
61
62 /* Abort all incoming secure transactions */
63 if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U)
64 return -1;
65
66 mmio_setbits_32(smmu_base + SMMU_S_GBPA,
67 SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT);
68
69 return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U);
70}
71
72/*
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010073 * Initialize the SMMU by invalidating all secure caches and TLBs.
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010074 * Abort all incoming transactions in order to implement a default
75 * deny policy on reset
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010076 */
Daniel Boulby844b4872018-09-18 13:36:39 +010077int __init smmuv3_init(uintptr_t smmu_base)
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010078{
Alexei Fedorov896799a2019-05-09 12:14:40 +010079 /* Abort all incoming transactions */
80 if (smmuv3_security_init(smmu_base) != 0)
81 return -1;
82
Olivier Deprez73ad7312022-02-04 12:30:11 +010083#if ENABLE_RME
84
85 if (get_armv9_2_feat_rme_support() != 0U) {
86 if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) &
87 SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) {
88 WARN("Skip SMMU GPC configuration.\n");
89 } else {
90 uint64_t gpccr_el3 = read_gpccr_el3();
91 uint64_t gptbr_el3 = read_gptbr_el3();
92
93 /* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */
94 gpccr_el3 &= ~(1UL << 16);
95
96 /*
97 * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec,
98 * but SMMU model only accepts 32b access.
99 */
100 mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG,
101 gpccr_el3);
102
103 /*
104 * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0]
105 * whereas it maps to SMMU_ROOT_GPT_BASE[51:12]
106 * hence needs a 12 bit left shit.
107 */
108 mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE,
109 gptbr_el3 << 12);
110
111 /*
112 * ACCESSEN=1: SMMU- and client-originated accesses are
113 * not terminated by this mechanism.
114 * GPCEN=1: All clients and SMMU-originated accesses,
115 * except GPT-walks, are subject to GPC.
116 */
117 mmio_setbits_32(smmu_base + SMMU_ROOT_CR0,
118 SMMU_ROOT_CR0_GPCEN |
119 SMMU_ROOT_CR0_ACCESSEN);
120
121 /* Poll for ACCESSEN and GPCEN ack bits. */
122 if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK,
123 SMMU_ROOT_CR0_GPCEN |
124 SMMU_ROOT_CR0_ACCESSEN,
125 SMMU_ROOT_CR0_GPCEN |
126 SMMU_ROOT_CR0_ACCESSEN) != 0) {
127 WARN("Failed enabling SMMU GPC.\n");
128
129 /*
130 * Do not return in error, but fall back to
131 * invalidating all entries through the secure
132 * register file.
133 */
134 }
135 }
136 }
137
138#endif /* ENABLE_RME */
139
Jeenu Viswambharan5c503042017-05-26 14:15:40 +0100140 /*
Alexei Fedorov896799a2019-05-09 12:14:40 +0100141 * Initiate invalidation of secure caches and TLBs if the SMMU
142 * supports secure state. If not, it's implementation defined
143 * as to how SMMU_S_INIT register is accessed.
Olivier Deprez73ad7312022-02-04 12:30:11 +0100144 * Arm SMMU Arch RME supplement, section 3.4: all SMMU registers
145 * specified to be accessible only in secure physical address space are
146 * additionally accessible in root physical address space in an SMMU
147 * with RME.
148 * Section 3.3: as GPT information is permitted to be cached in a TLB,
149 * the SMMU_S_INIT.INV_ALL mechanism also invalidates GPT information
150 * cached in TLBs.
Jeenu Viswambharan5c503042017-05-26 14:15:40 +0100151 */
Alexei Fedorov896799a2019-05-09 12:14:40 +0100152 mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL);
Jeenu Viswambharan5c503042017-05-26 14:15:40 +0100153
Alexei Fedorov896799a2019-05-09 12:14:40 +0100154 /* Wait for global invalidation operation to finish */
155 return smmuv3_poll(smmu_base + SMMU_S_INIT,
156 SMMU_S_INIT_INV_ALL, 0U);
Jeenu Viswambharan5c503042017-05-26 14:15:40 +0100157}