blob: 29b1b5de21a3afc6a5cb40486582770c72b9e364 [file] [log] [blame]
Jeenu Viswambharan5c503042017-05-26 14:15:40 +01001/*
Vivek Gautamc2d46352023-12-14 13:58:21 +05302 * Copyright (c) 2017-2024, ARM Limited and Contributors. All rights reserved.
Jeenu Viswambharan5c503042017-05-26 14:15:40 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +01007#include <common/debug.h>
Daniel Boulby844b4872018-09-18 13:36:39 +01008#include <cdefs.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009#include <drivers/arm/smmu_v3.h>
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060010#include <drivers/delay_timer.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011#include <lib/mmio.h>
Olivier Deprez73ad7312022-02-04 12:30:11 +010012#include <arch_features.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000013
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010014/* SMMU poll number of retries */
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060015#define SMMU_POLL_TIMEOUT_US U(1000)
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010016
Lucian Paul-Trifub666e712022-03-25 14:30:20 +000017static int smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010018 uint32_t value)
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010019{
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060020 uint32_t reg_val;
21 uint64_t timeout;
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010022
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060023 /* Set 1ms timeout value */
24 timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US);
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010025 do {
26 reg_val = mmio_read_32(smmu_reg);
27 if ((reg_val & mask) == value)
28 return 0;
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060029 } while (!timeout_elapsed(timeout));
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010030
Deepika Bhavnanibda60d32019-10-31 14:09:52 -060031 ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg);
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010032 ERROR("Read value 0x%x, expected 0x%x\n", reg_val,
33 value == 0U ? reg_val & ~mask : reg_val | mask);
34 return -1;
Antonio Nino Diazfeacba32018-08-21 16:12:29 +010035}
36
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010037/*
Alexei Fedorov896799a2019-05-09 12:14:40 +010038 * Abort all incoming transactions in order to implement a default
39 * deny policy on reset.
40 */
41int __init smmuv3_security_init(uintptr_t smmu_base)
42{
43 /* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */
44 if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
45 return -1;
46
47 /*
48 * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU,
49 * so just abort all incoming transactions.
50 */
51 mmio_setbits_32(smmu_base + SMMU_GBPA,
52 SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
53
54 if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
55 return -1;
56
57 /* Check if the SMMU supports secure state */
58 if ((mmio_read_32(smmu_base + SMMU_S_IDR1) &
59 SMMU_S_IDR1_SECURE_IMPL) == 0U)
60 return 0;
61
62 /* Abort all incoming secure transactions */
63 if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U)
64 return -1;
65
66 mmio_setbits_32(smmu_base + SMMU_S_GBPA,
67 SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT);
68
69 return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U);
70}
71
72/*
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010073 * Initialize the SMMU by invalidating all secure caches and TLBs.
Alexei Fedorov6b4a5f02019-04-26 12:07:07 +010074 * Abort all incoming transactions in order to implement a default
75 * deny policy on reset
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010076 */
Daniel Boulby844b4872018-09-18 13:36:39 +010077int __init smmuv3_init(uintptr_t smmu_base)
Jeenu Viswambharan5c503042017-05-26 14:15:40 +010078{
Alexei Fedorov896799a2019-05-09 12:14:40 +010079 /* Abort all incoming transactions */
80 if (smmuv3_security_init(smmu_base) != 0)
81 return -1;
82
Vivek Gautamc2d46352023-12-14 13:58:21 +053083 /*
84 * Initiate invalidation of secure caches and TLBs if the SMMU
85 * supports secure state. If not, it's implementation defined
86 * as to how SMMU_S_INIT register is accessed.
87 * As per Arm SMMUv3 specification the SMMU_S_INIT register in a SMMU
88 * with RME implementation has following properties:
89 * a) all SMMU registers that are specified to be accessible only in
90 * the Secure physical address space are additionally accessible in
91 * Root physical address space.
92 * b) as GPT information is permitted to be cached in a TLB, the
93 * SMMU_S_INIT.INV_ALL operation also invalidates all GPT information
94 * cached in TLBs.
95 * Additionally, it is Root firmware’s responsibility to write to
96 * INV_ALL before enabling SMMU_ROOT_CR0.{ACCESSEN,GPCEN}.
97 */
98 mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL);
99
100 /* Wait for global invalidation operation to finish */
101 if (smmuv3_poll(smmu_base + SMMU_S_INIT,
102 SMMU_S_INIT_INV_ALL, 0U) != 0) {
103 return -1;
104 }
105
Olivier Deprez73ad7312022-02-04 12:30:11 +0100106#if ENABLE_RME
107
108 if (get_armv9_2_feat_rme_support() != 0U) {
109 if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) &
110 SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) {
111 WARN("Skip SMMU GPC configuration.\n");
112 } else {
113 uint64_t gpccr_el3 = read_gpccr_el3();
114 uint64_t gptbr_el3 = read_gptbr_el3();
115
116 /* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */
117 gpccr_el3 &= ~(1UL << 16);
118
119 /*
120 * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec,
121 * but SMMU model only accepts 32b access.
122 */
123 mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG,
124 gpccr_el3);
125
126 /*
127 * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0]
128 * whereas it maps to SMMU_ROOT_GPT_BASE[51:12]
129 * hence needs a 12 bit left shit.
130 */
131 mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE,
132 gptbr_el3 << 12);
133
134 /*
135 * ACCESSEN=1: SMMU- and client-originated accesses are
136 * not terminated by this mechanism.
137 * GPCEN=1: All clients and SMMU-originated accesses,
138 * except GPT-walks, are subject to GPC.
139 */
140 mmio_setbits_32(smmu_base + SMMU_ROOT_CR0,
141 SMMU_ROOT_CR0_GPCEN |
142 SMMU_ROOT_CR0_ACCESSEN);
143
144 /* Poll for ACCESSEN and GPCEN ack bits. */
145 if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK,
146 SMMU_ROOT_CR0_GPCEN |
147 SMMU_ROOT_CR0_ACCESSEN,
148 SMMU_ROOT_CR0_GPCEN |
149 SMMU_ROOT_CR0_ACCESSEN) != 0) {
150 WARN("Failed enabling SMMU GPC.\n");
151
152 /*
153 * Do not return in error, but fall back to
154 * invalidating all entries through the secure
155 * register file.
156 */
157 }
158 }
159 }
160
161#endif /* ENABLE_RME */
162
Vivek Gautamc2d46352023-12-14 13:58:21 +0530163 return 0;
Jeenu Viswambharan5c503042017-05-26 14:15:40 +0100164}
Lucian Paul-Trifub666e712022-03-25 14:30:20 +0000165
166int smmuv3_ns_set_abort_all(uintptr_t smmu_base)
167{
168 /* Attribute update has completed when SMMU_GBPA.Update bit is 0 */
169 if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
170 return -1;
171 }
172
173 /*
174 * Set GBPA's ABORT bit. Other GBPA fields are presumably ignored then,
175 * so simply preserve their value.
176 */
177 mmio_setbits_32(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
178 if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
179 return -1;
180 }
181
182 /* Disable the SMMU to engage the GBPA fields previously configured. */
183 mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN);
184 if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U) {
185 return -1;
186 }
187
188 return 0;
189}