Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 1 | /* |
Olivier Deprez | 73ad731 | 2022-02-04 12:30:11 +0100 | [diff] [blame] | 2 | * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved. |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 7 | #include <common/debug.h> |
Daniel Boulby | 844b487 | 2018-09-18 13:36:39 +0100 | [diff] [blame] | 8 | #include <cdefs.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 9 | #include <drivers/arm/smmu_v3.h> |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 10 | #include <drivers/delay_timer.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 11 | #include <lib/mmio.h> |
Olivier Deprez | 73ad731 | 2022-02-04 12:30:11 +0100 | [diff] [blame] | 12 | #include <arch_features.h> |
Antonio Nino Diaz | e0f9063 | 2018-12-14 00:18:21 +0000 | [diff] [blame] | 13 | |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 14 | /* SMMU poll number of retries */ |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 15 | #define SMMU_POLL_TIMEOUT_US U(1000) |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 16 | |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 17 | static int __init smmuv3_poll(uintptr_t smmu_reg, uint32_t mask, |
| 18 | uint32_t value) |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 19 | { |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 20 | uint32_t reg_val; |
| 21 | uint64_t timeout; |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 22 | |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 23 | /* Set 1ms timeout value */ |
| 24 | timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US); |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 25 | do { |
| 26 | reg_val = mmio_read_32(smmu_reg); |
| 27 | if ((reg_val & mask) == value) |
| 28 | return 0; |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 29 | } while (!timeout_elapsed(timeout)); |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 30 | |
Deepika Bhavnani | bda60d3 | 2019-10-31 14:09:52 -0600 | [diff] [blame] | 31 | ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg); |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 32 | ERROR("Read value 0x%x, expected 0x%x\n", reg_val, |
| 33 | value == 0U ? reg_val & ~mask : reg_val | mask); |
| 34 | return -1; |
Antonio Nino Diaz | feacba3 | 2018-08-21 16:12:29 +0100 | [diff] [blame] | 35 | } |
| 36 | |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 37 | /* |
Alexei Fedorov | 896799a | 2019-05-09 12:14:40 +0100 | [diff] [blame] | 38 | * Abort all incoming transactions in order to implement a default |
| 39 | * deny policy on reset. |
| 40 | */ |
| 41 | int __init smmuv3_security_init(uintptr_t smmu_base) |
| 42 | { |
| 43 | /* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */ |
| 44 | if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) |
| 45 | return -1; |
| 46 | |
| 47 | /* |
| 48 | * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU, |
| 49 | * so just abort all incoming transactions. |
| 50 | */ |
| 51 | mmio_setbits_32(smmu_base + SMMU_GBPA, |
| 52 | SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT); |
| 53 | |
| 54 | if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) |
| 55 | return -1; |
| 56 | |
| 57 | /* Check if the SMMU supports secure state */ |
| 58 | if ((mmio_read_32(smmu_base + SMMU_S_IDR1) & |
| 59 | SMMU_S_IDR1_SECURE_IMPL) == 0U) |
| 60 | return 0; |
| 61 | |
| 62 | /* Abort all incoming secure transactions */ |
| 63 | if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U) |
| 64 | return -1; |
| 65 | |
| 66 | mmio_setbits_32(smmu_base + SMMU_S_GBPA, |
| 67 | SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT); |
| 68 | |
| 69 | return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U); |
| 70 | } |
| 71 | |
| 72 | /* |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 73 | * Initialize the SMMU by invalidating all secure caches and TLBs. |
Alexei Fedorov | 6b4a5f0 | 2019-04-26 12:07:07 +0100 | [diff] [blame] | 74 | * Abort all incoming transactions in order to implement a default |
| 75 | * deny policy on reset |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 76 | */ |
Daniel Boulby | 844b487 | 2018-09-18 13:36:39 +0100 | [diff] [blame] | 77 | int __init smmuv3_init(uintptr_t smmu_base) |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 78 | { |
Alexei Fedorov | 896799a | 2019-05-09 12:14:40 +0100 | [diff] [blame] | 79 | /* Abort all incoming transactions */ |
| 80 | if (smmuv3_security_init(smmu_base) != 0) |
| 81 | return -1; |
| 82 | |
Olivier Deprez | 73ad731 | 2022-02-04 12:30:11 +0100 | [diff] [blame] | 83 | #if ENABLE_RME |
| 84 | |
| 85 | if (get_armv9_2_feat_rme_support() != 0U) { |
| 86 | if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) & |
| 87 | SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) { |
| 88 | WARN("Skip SMMU GPC configuration.\n"); |
| 89 | } else { |
| 90 | uint64_t gpccr_el3 = read_gpccr_el3(); |
| 91 | uint64_t gptbr_el3 = read_gptbr_el3(); |
| 92 | |
| 93 | /* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */ |
| 94 | gpccr_el3 &= ~(1UL << 16); |
| 95 | |
| 96 | /* |
| 97 | * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec, |
| 98 | * but SMMU model only accepts 32b access. |
| 99 | */ |
| 100 | mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG, |
| 101 | gpccr_el3); |
| 102 | |
| 103 | /* |
| 104 | * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0] |
| 105 | * whereas it maps to SMMU_ROOT_GPT_BASE[51:12] |
| 106 | * hence needs a 12 bit left shit. |
| 107 | */ |
| 108 | mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE, |
| 109 | gptbr_el3 << 12); |
| 110 | |
| 111 | /* |
| 112 | * ACCESSEN=1: SMMU- and client-originated accesses are |
| 113 | * not terminated by this mechanism. |
| 114 | * GPCEN=1: All clients and SMMU-originated accesses, |
| 115 | * except GPT-walks, are subject to GPC. |
| 116 | */ |
| 117 | mmio_setbits_32(smmu_base + SMMU_ROOT_CR0, |
| 118 | SMMU_ROOT_CR0_GPCEN | |
| 119 | SMMU_ROOT_CR0_ACCESSEN); |
| 120 | |
| 121 | /* Poll for ACCESSEN and GPCEN ack bits. */ |
| 122 | if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK, |
| 123 | SMMU_ROOT_CR0_GPCEN | |
| 124 | SMMU_ROOT_CR0_ACCESSEN, |
| 125 | SMMU_ROOT_CR0_GPCEN | |
| 126 | SMMU_ROOT_CR0_ACCESSEN) != 0) { |
| 127 | WARN("Failed enabling SMMU GPC.\n"); |
| 128 | |
| 129 | /* |
| 130 | * Do not return in error, but fall back to |
| 131 | * invalidating all entries through the secure |
| 132 | * register file. |
| 133 | */ |
| 134 | } |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | #endif /* ENABLE_RME */ |
| 139 | |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 140 | /* |
Alexei Fedorov | 896799a | 2019-05-09 12:14:40 +0100 | [diff] [blame] | 141 | * Initiate invalidation of secure caches and TLBs if the SMMU |
| 142 | * supports secure state. If not, it's implementation defined |
| 143 | * as to how SMMU_S_INIT register is accessed. |
Olivier Deprez | 73ad731 | 2022-02-04 12:30:11 +0100 | [diff] [blame] | 144 | * Arm SMMU Arch RME supplement, section 3.4: all SMMU registers |
| 145 | * specified to be accessible only in secure physical address space are |
| 146 | * additionally accessible in root physical address space in an SMMU |
| 147 | * with RME. |
| 148 | * Section 3.3: as GPT information is permitted to be cached in a TLB, |
| 149 | * the SMMU_S_INIT.INV_ALL mechanism also invalidates GPT information |
| 150 | * cached in TLBs. |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 151 | */ |
Alexei Fedorov | 896799a | 2019-05-09 12:14:40 +0100 | [diff] [blame] | 152 | mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL); |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 153 | |
Alexei Fedorov | 896799a | 2019-05-09 12:14:40 +0100 | [diff] [blame] | 154 | /* Wait for global invalidation operation to finish */ |
| 155 | return smmuv3_poll(smmu_base + SMMU_S_INIT, |
| 156 | SMMU_S_INIT_INV_ALL, 0U); |
Jeenu Viswambharan | 5c50304 | 2017-05-26 14:15:40 +0100 | [diff] [blame] | 157 | } |