blob: 7d7011430521621c076456d1c5ec6ace84a2802f [file] [log] [blame]
Chengci Xudb1e75b2022-07-20 16:20:15 +08001/*
developerca791f82022-12-30 13:33:04 +08002 * Copyright (c) 2022-2023, MediaTek Inc. All rights reserved.
Chengci Xudb1e75b2022-07-20 16:20:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <stddef.h>
kiwi liu2c024242023-11-16 16:46:11 +08008#include <mtk_iommu_priv.h>
Chengci Xudb1e75b2022-07-20 16:20:15 +08009
10/* defination */
11/* smi larb */
12#define SMI_LARB_NON_SEC_CON(port) (0x380 + ((port) << 2))
13#define PATH_SEL_MASK (0xf0000) /* to sram (INT) */
14#define SMI_LARB_SEC_CON_INT(port) (0xf00 + ((port) << 2))
15#define SMI_LARB_SEC_CON(port) (0xf80 + ((port) << 2))
16#define MMU_MASK BIT(0)
17#define MMU_EN(en) ((!!(en)) << 0)
18#define SEC_MASK BIT(1)
19#define SEC_EN(en) ((!!(en)) << 1)
20#define DOMAIN_MASK (0x1f << 4)
21#define SMI_MMU_EN(port) (0x1 << (port))
22
23/* infra master */
24#define IFR_CFG_MMU_EN_MSK(r_bit) (0x3 << (r_bit))
25
kiwi liu2c024242023-11-16 16:46:11 +080026/* secure iommu */
27#define MMU_INT_CONTROL0 (0x120)
28#define INT_CLR BIT(12)
29#define MMU_FAULT_ST1 (0x134)
30#define MMU_AXI_0_ERR_MASK GENMASK(6, 0)
31#define MMU_AXI_FAULT_STATUS(bus) (0x13c + (bus) * 8)
32#define MMU_AXI_INVLD_PA(bus) (0x140 + (bus) * 8)
33#define MMU_AXI_INT_ID(bus) (0x150 + (bus) * 4)
34
Chengci Xudb1e75b2022-07-20 16:20:15 +080035/* smi larb configure */
36/*
37 * If multimedia security config is enabled, the SMI config register must be
38 * configurated in security world.
39 * And the SRAM path is also configurated here to enhance security.
40 */
kiwi liu2c024242023-11-16 16:46:11 +080041#ifdef ATF_MTK_SMI_LARB_CFG_SUPPORT
42
Chengci Xudb1e75b2022-07-20 16:20:15 +080043static void mtk_smi_larb_port_config_to_sram(
44 const struct mtk_smi_larb_config *larb,
45 uint32_t port_id)
46{
47 mmio_clrbits_32(larb->base + SMI_LARB_SEC_CON_INT(port_id),
48 MMU_MASK | SEC_MASK | DOMAIN_MASK);
49
50 mmio_setbits_32(larb->base + SMI_LARB_NON_SEC_CON(port_id),
51 PATH_SEL_MASK);
52}
53
54static void mtk_smi_port_config(const struct mtk_smi_larb_config *larb,
55 uint32_t port_id, uint8_t mmu_en, uint8_t sec_en)
56{
57 mmio_clrsetbits_32(larb->base + SMI_LARB_SEC_CON(port_id),
58 MMU_MASK | SEC_MASK | DOMAIN_MASK,
59 MMU_EN(mmu_en) | SEC_EN(sec_en));
60}
61
62static int mtk_smi_larb_port_config_sec(uint32_t larb_id, uint32_t mmu_en_msk)
63{
64 uint32_t port_id, port_nr;
65 const struct mtk_smi_larb_config *larb;
66 uint32_t to_sram;
67 uint8_t mmu_en;
68
kiwi liu2c024242023-11-16 16:46:11 +080069 if (larb_id >= g_larb_num) {
Chengci Xudb1e75b2022-07-20 16:20:15 +080070 return MTK_SIP_E_INVALID_PARAM;
71 }
72
73 larb = &g_larb_cfg[larb_id];
74 port_nr = larb->port_nr;
75 to_sram = larb->to_sram;
76
77 for (port_id = 0; port_id < port_nr; port_id++) {
78 if ((to_sram & BIT(port_id)) > 0U) {
79 mtk_smi_larb_port_config_to_sram(larb, port_id);
80 continue;
81 }
82 mmu_en = !!(mmu_en_msk & SMI_MMU_EN(port_id));
83 mtk_smi_port_config(larb, port_id, mmu_en, 0);
84 }
85
86 return MTK_SIP_E_SUCCESS;
87}
88
kiwi liu2c024242023-11-16 16:46:11 +080089#endif /* ATF_MTK_SMI_LARB_CFG_SUPPORT */
90
91/* infra iommu configure */
92#ifdef ATF_MTK_INFRA_MASTER_CFG_SUPPORT
93
developerca791f82022-12-30 13:33:04 +080094static int mtk_infra_master_config_sec(uint32_t dev_id_msk, uint32_t enable)
Chengci Xudb1e75b2022-07-20 16:20:15 +080095{
96 const struct mtk_ifr_mst_config *ifr_cfg;
developerca791f82022-12-30 13:33:04 +080097 uint32_t dev_id, reg_addr, reg_mask;
Chengci Xudb1e75b2022-07-20 16:20:15 +080098
99 mtk_infra_iommu_enable_protect();
100
kiwi liu2c024242023-11-16 16:46:11 +0800101 if (dev_id_msk >= BIT(g_ifr_mst_num)) {
developerca791f82022-12-30 13:33:04 +0800102 return MTK_SIP_E_INVALID_PARAM;
Chengci Xudb1e75b2022-07-20 16:20:15 +0800103 }
104
kiwi liu2c024242023-11-16 16:46:11 +0800105 for (dev_id = 0U; dev_id < g_ifr_mst_num; dev_id++) {
developerca791f82022-12-30 13:33:04 +0800106 if ((dev_id_msk & BIT(dev_id)) == 0U) {
107 continue;
108 }
Chengci Xudb1e75b2022-07-20 16:20:15 +0800109
developerca791f82022-12-30 13:33:04 +0800110 ifr_cfg = &g_ifr_mst_cfg[dev_id];
111 reg_addr = g_ifr_mst_cfg_base[(ifr_cfg->cfg_addr_idx)] +
112 g_ifr_mst_cfg_offs[(ifr_cfg->cfg_addr_idx)];
113 reg_mask = IFR_CFG_MMU_EN_MSK(ifr_cfg->r_mmu_en_bit);
114
115 if (enable > 0U) {
116 mmio_setbits_32(reg_addr, reg_mask);
117 } else {
118 mmio_clrbits_32(reg_addr, reg_mask);
119 }
Chengci Xudb1e75b2022-07-20 16:20:15 +0800120 }
121
122 return MTK_SIP_E_SUCCESS;
123}
kiwi liu2c024242023-11-16 16:46:11 +0800124#endif /* ATF_MTK_INFRA_MASTER_CFG_SUPPORT */
125
126/* secure iommu */
127#ifdef ATF_MTK_IOMMU_CFG_SUPPORT
128/* Report secure IOMMU fault status to normal world for the debug version */
129static int mtk_secure_iommu_fault_report(uint32_t sec_mmu_base,
130 uint32_t *f_sta, uint32_t *f_pa,
131 uint32_t *f_id)
132{
133 const struct mtk_secure_iommu_config *mmu_cfg = NULL;
134 uint32_t __maybe_unused bus_id, fault_type;
135 uint32_t i;
136 int ret = MTK_SIP_E_NOT_SUPPORTED;
137
138 for (i = 0; i < g_sec_iommu_num; i++) {
139 if (g_sec_iommu_cfg[i].base == sec_mmu_base) {
140 mmu_cfg = &g_sec_iommu_cfg[i];
141 break;
142 }
143 }
144
145 if (!mmu_cfg)
146 return MTK_SIP_E_INVALID_PARAM;
147#if DEBUG
148 fault_type = mmio_read_32(mmu_cfg->base + MMU_FAULT_ST1);
149 bus_id = (fault_type & MMU_AXI_0_ERR_MASK) ? 0 : 1;
150
151 if (f_sta)
152 *f_sta = mmio_read_32(mmu_cfg->base + MMU_AXI_FAULT_STATUS(bus_id));
153 if (f_pa)
154 *f_pa = mmio_read_32(mmu_cfg->base + MMU_AXI_INVLD_PA(bus_id));
155 if (f_id)
156 *f_id = mmio_read_32(mmu_cfg->base + MMU_AXI_INT_ID(bus_id));
157 ret = MTK_SIP_E_SUCCESS;
158#endif
159 mmio_setbits_32(mmu_cfg->base + MMU_INT_CONTROL0, INT_CLR);
160
161 return ret;
162}
163#endif /* ATF_MTK_IOMMU_CFG_SUPPORT */
Chengci Xudb1e75b2022-07-20 16:20:15 +0800164
kiwi liu2c024242023-11-16 16:46:11 +0800165u_register_t mtk_iommu_handler(u_register_t x1, u_register_t x2,
166 u_register_t x3, u_register_t x4,
167 void *handle, struct smccc_res *smccc_ret)
Chengci Xudb1e75b2022-07-20 16:20:15 +0800168{
169 uint32_t cmd_id = x1, mdl_id = x2, val = x3;
170 int ret = MTK_SIP_E_NOT_SUPPORTED;
171
172 (void)x4;
173 (void)handle;
174
175 switch (cmd_id) {
kiwi liu2c024242023-11-16 16:46:11 +0800176#ifdef ATF_MTK_SMI_LARB_CFG_SUPPORT
Chengci Xudb1e75b2022-07-20 16:20:15 +0800177 case IOMMU_ATF_CMD_CONFIG_SMI_LARB:
178 ret = mtk_smi_larb_port_config_sec(mdl_id, val);
179 break;
kiwi liu2c024242023-11-16 16:46:11 +0800180#endif
181#ifdef ATF_MTK_INFRA_MASTER_CFG_SUPPORT
Chengci Xudb1e75b2022-07-20 16:20:15 +0800182 case IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU:
183 ret = mtk_infra_master_config_sec(mdl_id, val);
184 break;
kiwi liu2c024242023-11-16 16:46:11 +0800185#endif
186#ifdef ATF_MTK_IOMMU_CFG_SUPPORT
187 case IOMMU_ATF_CMD_GET_SECURE_IOMMU_STATUS:
188 (void)val;
189 ret = mtk_secure_iommu_fault_report(mdl_id,
190 (uint32_t *)&smccc_ret->a1,
191 (uint32_t *)&smccc_ret->a2,
192 (uint32_t *)&smccc_ret->a3);
193 break;
194#endif
Chengci Xudb1e75b2022-07-20 16:20:15 +0800195 default:
196 break;
197 }
198
199 return ret;
200}
201DECLARE_SMC_HANDLER(MTK_SIP_IOMMU_CONTROL, mtk_iommu_handler);