Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 1 | /* |
developer | ca791f8 | 2022-12-30 13:33:04 +0800 | [diff] [blame] | 2 | * Copyright (c) 2022-2023, MediaTek Inc. All rights reserved. |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
| 7 | #include <stddef.h> |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 8 | #include <mtk_iommu_priv.h> |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 9 | |
| 10 | /* defination */ |
| 11 | /* smi larb */ |
| 12 | #define SMI_LARB_NON_SEC_CON(port) (0x380 + ((port) << 2)) |
| 13 | #define PATH_SEL_MASK (0xf0000) /* to sram (INT) */ |
| 14 | #define SMI_LARB_SEC_CON_INT(port) (0xf00 + ((port) << 2)) |
| 15 | #define SMI_LARB_SEC_CON(port) (0xf80 + ((port) << 2)) |
| 16 | #define MMU_MASK BIT(0) |
| 17 | #define MMU_EN(en) ((!!(en)) << 0) |
| 18 | #define SEC_MASK BIT(1) |
| 19 | #define SEC_EN(en) ((!!(en)) << 1) |
| 20 | #define DOMAIN_MASK (0x1f << 4) |
| 21 | #define SMI_MMU_EN(port) (0x1 << (port)) |
| 22 | |
| 23 | /* infra master */ |
| 24 | #define IFR_CFG_MMU_EN_MSK(r_bit) (0x3 << (r_bit)) |
| 25 | |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 26 | /* secure iommu */ |
| 27 | #define MMU_INT_CONTROL0 (0x120) |
| 28 | #define INT_CLR BIT(12) |
| 29 | #define MMU_FAULT_ST1 (0x134) |
| 30 | #define MMU_AXI_0_ERR_MASK GENMASK(6, 0) |
| 31 | #define MMU_AXI_FAULT_STATUS(bus) (0x13c + (bus) * 8) |
| 32 | #define MMU_AXI_INVLD_PA(bus) (0x140 + (bus) * 8) |
| 33 | #define MMU_AXI_INT_ID(bus) (0x150 + (bus) * 4) |
| 34 | |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 35 | /* smi larb configure */ |
| 36 | /* |
| 37 | * If multimedia security config is enabled, the SMI config register must be |
| 38 | * configurated in security world. |
| 39 | * And the SRAM path is also configurated here to enhance security. |
| 40 | */ |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 41 | #ifdef ATF_MTK_SMI_LARB_CFG_SUPPORT |
| 42 | |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 43 | static void mtk_smi_larb_port_config_to_sram( |
| 44 | const struct mtk_smi_larb_config *larb, |
| 45 | uint32_t port_id) |
| 46 | { |
| 47 | mmio_clrbits_32(larb->base + SMI_LARB_SEC_CON_INT(port_id), |
| 48 | MMU_MASK | SEC_MASK | DOMAIN_MASK); |
| 49 | |
| 50 | mmio_setbits_32(larb->base + SMI_LARB_NON_SEC_CON(port_id), |
| 51 | PATH_SEL_MASK); |
| 52 | } |
| 53 | |
| 54 | static void mtk_smi_port_config(const struct mtk_smi_larb_config *larb, |
| 55 | uint32_t port_id, uint8_t mmu_en, uint8_t sec_en) |
| 56 | { |
| 57 | mmio_clrsetbits_32(larb->base + SMI_LARB_SEC_CON(port_id), |
| 58 | MMU_MASK | SEC_MASK | DOMAIN_MASK, |
| 59 | MMU_EN(mmu_en) | SEC_EN(sec_en)); |
| 60 | } |
| 61 | |
| 62 | static int mtk_smi_larb_port_config_sec(uint32_t larb_id, uint32_t mmu_en_msk) |
| 63 | { |
| 64 | uint32_t port_id, port_nr; |
| 65 | const struct mtk_smi_larb_config *larb; |
| 66 | uint32_t to_sram; |
| 67 | uint8_t mmu_en; |
| 68 | |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 69 | if (larb_id >= g_larb_num) { |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 70 | return MTK_SIP_E_INVALID_PARAM; |
| 71 | } |
| 72 | |
| 73 | larb = &g_larb_cfg[larb_id]; |
| 74 | port_nr = larb->port_nr; |
| 75 | to_sram = larb->to_sram; |
| 76 | |
| 77 | for (port_id = 0; port_id < port_nr; port_id++) { |
| 78 | if ((to_sram & BIT(port_id)) > 0U) { |
| 79 | mtk_smi_larb_port_config_to_sram(larb, port_id); |
| 80 | continue; |
| 81 | } |
| 82 | mmu_en = !!(mmu_en_msk & SMI_MMU_EN(port_id)); |
| 83 | mtk_smi_port_config(larb, port_id, mmu_en, 0); |
| 84 | } |
| 85 | |
| 86 | return MTK_SIP_E_SUCCESS; |
| 87 | } |
| 88 | |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 89 | #endif /* ATF_MTK_SMI_LARB_CFG_SUPPORT */ |
| 90 | |
| 91 | /* infra iommu configure */ |
| 92 | #ifdef ATF_MTK_INFRA_MASTER_CFG_SUPPORT |
| 93 | |
developer | ca791f8 | 2022-12-30 13:33:04 +0800 | [diff] [blame] | 94 | static int mtk_infra_master_config_sec(uint32_t dev_id_msk, uint32_t enable) |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 95 | { |
| 96 | const struct mtk_ifr_mst_config *ifr_cfg; |
developer | ca791f8 | 2022-12-30 13:33:04 +0800 | [diff] [blame] | 97 | uint32_t dev_id, reg_addr, reg_mask; |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 98 | |
| 99 | mtk_infra_iommu_enable_protect(); |
| 100 | |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 101 | if (dev_id_msk >= BIT(g_ifr_mst_num)) { |
developer | ca791f8 | 2022-12-30 13:33:04 +0800 | [diff] [blame] | 102 | return MTK_SIP_E_INVALID_PARAM; |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 103 | } |
| 104 | |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 105 | for (dev_id = 0U; dev_id < g_ifr_mst_num; dev_id++) { |
developer | ca791f8 | 2022-12-30 13:33:04 +0800 | [diff] [blame] | 106 | if ((dev_id_msk & BIT(dev_id)) == 0U) { |
| 107 | continue; |
| 108 | } |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 109 | |
developer | ca791f8 | 2022-12-30 13:33:04 +0800 | [diff] [blame] | 110 | ifr_cfg = &g_ifr_mst_cfg[dev_id]; |
| 111 | reg_addr = g_ifr_mst_cfg_base[(ifr_cfg->cfg_addr_idx)] + |
| 112 | g_ifr_mst_cfg_offs[(ifr_cfg->cfg_addr_idx)]; |
| 113 | reg_mask = IFR_CFG_MMU_EN_MSK(ifr_cfg->r_mmu_en_bit); |
| 114 | |
| 115 | if (enable > 0U) { |
| 116 | mmio_setbits_32(reg_addr, reg_mask); |
| 117 | } else { |
| 118 | mmio_clrbits_32(reg_addr, reg_mask); |
| 119 | } |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 120 | } |
| 121 | |
| 122 | return MTK_SIP_E_SUCCESS; |
| 123 | } |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 124 | #endif /* ATF_MTK_INFRA_MASTER_CFG_SUPPORT */ |
| 125 | |
| 126 | /* secure iommu */ |
| 127 | #ifdef ATF_MTK_IOMMU_CFG_SUPPORT |
| 128 | /* Report secure IOMMU fault status to normal world for the debug version */ |
| 129 | static int mtk_secure_iommu_fault_report(uint32_t sec_mmu_base, |
| 130 | uint32_t *f_sta, uint32_t *f_pa, |
| 131 | uint32_t *f_id) |
| 132 | { |
| 133 | const struct mtk_secure_iommu_config *mmu_cfg = NULL; |
| 134 | uint32_t __maybe_unused bus_id, fault_type; |
| 135 | uint32_t i; |
| 136 | int ret = MTK_SIP_E_NOT_SUPPORTED; |
| 137 | |
| 138 | for (i = 0; i < g_sec_iommu_num; i++) { |
| 139 | if (g_sec_iommu_cfg[i].base == sec_mmu_base) { |
| 140 | mmu_cfg = &g_sec_iommu_cfg[i]; |
| 141 | break; |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | if (!mmu_cfg) |
| 146 | return MTK_SIP_E_INVALID_PARAM; |
| 147 | #if DEBUG |
| 148 | fault_type = mmio_read_32(mmu_cfg->base + MMU_FAULT_ST1); |
| 149 | bus_id = (fault_type & MMU_AXI_0_ERR_MASK) ? 0 : 1; |
| 150 | |
| 151 | if (f_sta) |
| 152 | *f_sta = mmio_read_32(mmu_cfg->base + MMU_AXI_FAULT_STATUS(bus_id)); |
| 153 | if (f_pa) |
| 154 | *f_pa = mmio_read_32(mmu_cfg->base + MMU_AXI_INVLD_PA(bus_id)); |
| 155 | if (f_id) |
| 156 | *f_id = mmio_read_32(mmu_cfg->base + MMU_AXI_INT_ID(bus_id)); |
| 157 | ret = MTK_SIP_E_SUCCESS; |
| 158 | #endif |
| 159 | mmio_setbits_32(mmu_cfg->base + MMU_INT_CONTROL0, INT_CLR); |
| 160 | |
| 161 | return ret; |
| 162 | } |
| 163 | #endif /* ATF_MTK_IOMMU_CFG_SUPPORT */ |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 164 | |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 165 | u_register_t mtk_iommu_handler(u_register_t x1, u_register_t x2, |
| 166 | u_register_t x3, u_register_t x4, |
| 167 | void *handle, struct smccc_res *smccc_ret) |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 168 | { |
| 169 | uint32_t cmd_id = x1, mdl_id = x2, val = x3; |
| 170 | int ret = MTK_SIP_E_NOT_SUPPORTED; |
| 171 | |
| 172 | (void)x4; |
| 173 | (void)handle; |
| 174 | |
| 175 | switch (cmd_id) { |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 176 | #ifdef ATF_MTK_SMI_LARB_CFG_SUPPORT |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 177 | case IOMMU_ATF_CMD_CONFIG_SMI_LARB: |
| 178 | ret = mtk_smi_larb_port_config_sec(mdl_id, val); |
| 179 | break; |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 180 | #endif |
| 181 | #ifdef ATF_MTK_INFRA_MASTER_CFG_SUPPORT |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 182 | case IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU: |
| 183 | ret = mtk_infra_master_config_sec(mdl_id, val); |
| 184 | break; |
kiwi liu | 2c02424 | 2023-11-16 16:46:11 +0800 | [diff] [blame] | 185 | #endif |
| 186 | #ifdef ATF_MTK_IOMMU_CFG_SUPPORT |
| 187 | case IOMMU_ATF_CMD_GET_SECURE_IOMMU_STATUS: |
| 188 | (void)val; |
| 189 | ret = mtk_secure_iommu_fault_report(mdl_id, |
| 190 | (uint32_t *)&smccc_ret->a1, |
| 191 | (uint32_t *)&smccc_ret->a2, |
| 192 | (uint32_t *)&smccc_ret->a3); |
| 193 | break; |
| 194 | #endif |
Chengci Xu | db1e75b | 2022-07-20 16:20:15 +0800 | [diff] [blame] | 195 | default: |
| 196 | break; |
| 197 | } |
| 198 | |
| 199 | return ret; |
| 200 | } |
| 201 | DECLARE_SMC_HANDLER(MTK_SIP_IOMMU_CONTROL, mtk_iommu_handler); |