Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 1 | /* |
Vivek Gautam | c2d4635 | 2023-12-14 13:58:21 +0530 | [diff] [blame] | 2 | * Copyright (c) 2017-2024, ARM Limited and Contributors. All rights reserved. |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 7 | #include <common/debug.h> |
Daniel Boulby | 844b487 | 2018-09-18 13:36:39 +0100 | [diff] [blame] | 8 | #include <cdefs.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 9 | #include <drivers/arm/smmu_v3.h> |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 10 | #include <drivers/delay_timer.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 11 | #include <lib/mmio.h> |
Olivier Deprez | 73ad731 | 2022-02-04 12:30:11 +0100 | [diff] [blame] | 12 | #include <arch_features.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 13 | |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 14 | /* SMMU poll number of retries */ |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 15 | #define SMMU_POLL_TIMEOUT_US U(1000) |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 16 | |
Lucian Paul-Trifu | b666e71 | 2022-03-25 14:30:20 +0000 | [diff] [blame] | 17 | static int smmuv3_poll(uintptr_t smmu_reg, uint32_t mask, |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 18 | uint32_t value) |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 19 | { |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 20 | uint32_t reg_val; |
| 21 | uint64_t timeout; |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 22 | |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 23 | /* Set 1ms timeout value */ |
| 24 | timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US); |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 25 | do { |
| 26 | reg_val = mmio_read_32(smmu_reg); |
| 27 | if ((reg_val & mask) == value) |
| 28 | return 0; |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 29 | } while (!timeout_elapsed(timeout)); |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 30 | |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 31 | ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg); |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 32 | ERROR("Read value 0x%x, expected 0x%x\n", reg_val, |
| 33 | value == 0U ? reg_val & ~mask : reg_val | mask); |
| 34 | return -1; |
Antonio Nino Diaz | feacba3 | 2018-08-21 16:12:29 +0100 | [diff] [blame] | 35 | } |
| 36 | |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 37 | /* |
Alexei Fedorov | 896799a | 2019-05-09 12:14:40 +0100 | [diff] [blame] | 38 | * Abort all incoming transactions in order to implement a default |
| 39 | * deny policy on reset. |
| 40 | */ |
| 41 | int __init smmuv3_security_init(uintptr_t smmu_base) |
| 42 | { |
| 43 | /* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */ |
| 44 | if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) |
| 45 | return -1; |
| 46 | |
| 47 | /* |
| 48 | * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU, |
| 49 | * so just abort all incoming transactions. |
| 50 | */ |
| 51 | mmio_setbits_32(smmu_base + SMMU_GBPA, |
| 52 | SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT); |
| 53 | |
| 54 | if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) |
| 55 | return -1; |
| 56 | |
| 57 | /* Check if the SMMU supports secure state */ |
| 58 | if ((mmio_read_32(smmu_base + SMMU_S_IDR1) & |
| 59 | SMMU_S_IDR1_SECURE_IMPL) == 0U) |
| 60 | return 0; |
| 61 | |
| 62 | /* Abort all incoming secure transactions */ |
| 63 | if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U) |
| 64 | return -1; |
| 65 | |
| 66 | mmio_setbits_32(smmu_base + SMMU_S_GBPA, |
| 67 | SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT); |
| 68 | |
| 69 | return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U); |
| 70 | } |
| 71 | |
Vijayenthiran Subramaniam | 884cc02 | 2023-12-21 17:34:05 +0530 | [diff] [blame] | 72 | /* Initialize the SMMU by invalidating all secure caches and TLBs. */ |
Daniel Boulby | 844b487 | 2018-09-18 13:36:39 +0100 | [diff] [blame] | 73 | int __init smmuv3_init(uintptr_t smmu_base) |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 74 | { |
Vivek Gautam | c2d4635 | 2023-12-14 13:58:21 +0530 | [diff] [blame] | 75 | /* |
| 76 | * Initiate invalidation of secure caches and TLBs if the SMMU |
| 77 | * supports secure state. If not, it's implementation defined |
| 78 | * as to how SMMU_S_INIT register is accessed. |
| 79 | * As per Arm SMMUv3 specification the SMMU_S_INIT register in a SMMU |
| 80 | * with RME implementation has following properties: |
| 81 | * a) all SMMU registers that are specified to be accessible only in |
| 82 | * the Secure physical address space are additionally accessible in |
| 83 | * Root physical address space. |
| 84 | * b) as GPT information is permitted to be cached in a TLB, the |
| 85 | * SMMU_S_INIT.INV_ALL operation also invalidates all GPT information |
| 86 | * cached in TLBs. |
| 87 | * Additionally, it is Root firmware’s responsibility to write to |
| 88 | * INV_ALL before enabling SMMU_ROOT_CR0.{ACCESSEN,GPCEN}. |
| 89 | */ |
| 90 | mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL); |
| 91 | |
| 92 | /* Wait for global invalidation operation to finish */ |
| 93 | if (smmuv3_poll(smmu_base + SMMU_S_INIT, |
| 94 | SMMU_S_INIT_INV_ALL, 0U) != 0) { |
| 95 | return -1; |
| 96 | } |
| 97 | |
Olivier Deprez | 73ad731 | 2022-02-04 12:30:11 +0100 | [diff] [blame] | 98 | #if ENABLE_RME |
| 99 | |
| 100 | if (get_armv9_2_feat_rme_support() != 0U) { |
| 101 | if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) & |
| 102 | SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) { |
| 103 | WARN("Skip SMMU GPC configuration.\n"); |
| 104 | } else { |
| 105 | uint64_t gpccr_el3 = read_gpccr_el3(); |
| 106 | uint64_t gptbr_el3 = read_gptbr_el3(); |
| 107 | |
| 108 | /* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */ |
| 109 | gpccr_el3 &= ~(1UL << 16); |
| 110 | |
| 111 | /* |
| 112 | * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec, |
| 113 | * but SMMU model only accepts 32b access. |
| 114 | */ |
| 115 | mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG, |
| 116 | gpccr_el3); |
| 117 | |
| 118 | /* |
| 119 | * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0] |
| 120 | * whereas it maps to SMMU_ROOT_GPT_BASE[51:12] |
| 121 | * hence needs a 12 bit left shit. |
| 122 | */ |
| 123 | mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE, |
| 124 | gptbr_el3 << 12); |
| 125 | |
| 126 | /* |
| 127 | * ACCESSEN=1: SMMU- and client-originated accesses are |
| 128 | * not terminated by this mechanism. |
| 129 | * GPCEN=1: All clients and SMMU-originated accesses, |
| 130 | * except GPT-walks, are subject to GPC. |
| 131 | */ |
| 132 | mmio_setbits_32(smmu_base + SMMU_ROOT_CR0, |
| 133 | SMMU_ROOT_CR0_GPCEN | |
| 134 | SMMU_ROOT_CR0_ACCESSEN); |
| 135 | |
| 136 | /* Poll for ACCESSEN and GPCEN ack bits. */ |
| 137 | if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK, |
| 138 | SMMU_ROOT_CR0_GPCEN | |
| 139 | SMMU_ROOT_CR0_ACCESSEN, |
| 140 | SMMU_ROOT_CR0_GPCEN | |
| 141 | SMMU_ROOT_CR0_ACCESSEN) != 0) { |
| 142 | WARN("Failed enabling SMMU GPC.\n"); |
| 143 | |
| 144 | /* |
| 145 | * Do not return in error, but fall back to |
| 146 | * invalidating all entries through the secure |
| 147 | * register file. |
| 148 | */ |
| 149 | } |
| 150 | } |
| 151 | } |
| 152 | |
| 153 | #endif /* ENABLE_RME */ |
| 154 | |
Vivek Gautam | c2d4635 | 2023-12-14 13:58:21 +0530 | [diff] [blame] | 155 | return 0; |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 156 | } |
Lucian Paul-Trifu | b666e71 | 2022-03-25 14:30:20 +0000 | [diff] [blame] | 157 | |
| 158 | int smmuv3_ns_set_abort_all(uintptr_t smmu_base) |
| 159 | { |
| 160 | /* Attribute update has completed when SMMU_GBPA.Update bit is 0 */ |
| 161 | if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) { |
| 162 | return -1; |
| 163 | } |
| 164 | |
| 165 | /* |
| 166 | * Set GBPA's ABORT bit. Other GBPA fields are presumably ignored then, |
| 167 | * so simply preserve their value. |
| 168 | */ |
| 169 | mmio_setbits_32(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT); |
| 170 | if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) { |
| 171 | return -1; |
| 172 | } |
| 173 | |
| 174 | /* Disable the SMMU to engage the GBPA fields previously configured. */ |
| 175 | mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN); |
| 176 | if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U) { |
| 177 | return -1; |
| 178 | } |
| 179 | |
| 180 | return 0; |
| 181 | } |