developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 1 | From f70e83ccdca85840c3bf9e7a31fb871a12724dc2 Mon Sep 17 00:00:00 2001 |
| 2 | From: Sujuan Chen <sujuan.chen@mediatek.com> |
| 3 | Date: Thu, 28 Jul 2022 14:49:16 +0800 |
| 4 | Subject: [PATCH 3/3] add wed ser support |
| 5 | |
| 6 | Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
| 7 | --- |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 8 | drivers/net/ethernet/mediatek/mtk_eth_soc.c | 9 +- |
developer | 553bdd9 | 2022-08-12 09:58:45 +0800 | [diff] [blame^] | 9 | drivers/net/ethernet/mediatek/mtk_wed.c | 347 ++++++++++++++----- |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 10 | drivers/net/ethernet/mediatek/mtk_wed.h | 2 + |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 11 | drivers/net/ethernet/mediatek/mtk_wed_regs.h | 12 + |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 12 | include/linux/soc/mediatek/mtk_wed.h | 28 +- |
developer | 553bdd9 | 2022-08-12 09:58:45 +0800 | [diff] [blame^] | 13 | 5 files changed, 297 insertions(+), 101 deletions(-) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 14 | |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 15 | diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
| 16 | index c582bb9..5259141 100644 |
| 17 | --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
| 18 | +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
| 19 | @@ -3220,10 +3220,14 @@ static void mtk_pending_work(struct work_struct *work) |
| 20 | mtk_prepare_reset_fe(eth); |
| 21 | |
| 22 | /* Trigger Wifi SER reset */ |
| 23 | +#ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| 24 | + mtk_wed_fe_reset(MTK_FE_START_RESET); |
| 25 | +#else |
| 26 | call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]); |
| 27 | rtnl_unlock(); |
| 28 | wait_for_completion_timeout(&wait_ser_done, 5000); |
| 29 | rtnl_lock(); |
| 30 | +#endif |
| 31 | |
| 32 | while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) |
| 33 | cpu_relax(); |
| 34 | @@ -3284,8 +3288,11 @@ static void mtk_pending_work(struct work_struct *work) |
| 35 | |
| 36 | call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]); |
| 37 | pr_info("[%s] HNAT reset done !\n", __func__); |
| 38 | - |
| 39 | +#ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| 40 | + mtk_wed_fe_reset(MTK_FE_RESET_DONE); |
| 41 | +#else |
| 42 | call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]); |
| 43 | +#endif |
| 44 | pr_info("[%s] WiFi SER reset done !\n", __func__); |
| 45 | |
| 46 | atomic_dec(&reset_lock); |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 47 | diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c |
developer | 553bdd9 | 2022-08-12 09:58:45 +0800 | [diff] [blame^] | 48 | index 7552795..c98d749 100644 |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 49 | --- a/drivers/net/ethernet/mediatek/mtk_wed.c |
| 50 | +++ b/drivers/net/ethernet/mediatek/mtk_wed.c |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 51 | @@ -13,8 +13,10 @@ |
| 52 | #include <linux/debugfs.h> |
| 53 | #include <linux/iopoll.h> |
| 54 | #include <linux/soc/mediatek/mtk_wed.h> |
| 55 | +#include <net/rtnetlink.h> |
| 56 | |
| 57 | #include "mtk_eth_soc.h" |
| 58 | +#include "mtk_eth_reset.h" |
| 59 | #include "mtk_wed_regs.h" |
| 60 | #include "mtk_wed.h" |
| 61 | #include "mtk_ppe.h" |
| 62 | @@ -71,23 +73,27 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 63 | return wdma_r32(dev, MTK_WDMA_GLO_CFG); |
| 64 | } |
| 65 | |
| 66 | -static void |
| 67 | +static int |
| 68 | mtk_wdma_rx_reset(struct mtk_wed_device *dev) |
| 69 | { |
| 70 | u32 status; |
| 71 | u32 mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; |
| 72 | - int i; |
| 73 | + int busy, i; |
| 74 | |
| 75 | wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); |
| 76 | - if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, |
| 77 | - !(status & mask), 0, 1000)) |
| 78 | - WARN_ON_ONCE(1); |
| 79 | + busy = readx_poll_timeout(mtk_wdma_read_reset, dev, status, |
| 80 | + !(status & mask), 0, 10000); |
| 81 | + |
| 82 | + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); |
| 83 | + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| 84 | |
| 85 | for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) |
| 86 | if (!dev->rx_wdma[i].desc) { |
| 87 | wdma_w32(dev, MTK_WDMA_RING_RX(i) + |
| 88 | MTK_WED_RING_OFS_CPU_IDX, 0); |
| 89 | } |
| 90 | + |
| 91 | + return busy; |
| 92 | } |
| 93 | |
| 94 | static void |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 95 | @@ -99,14 +105,14 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 96 | |
| 97 | wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); |
| 98 | if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, |
| 99 | - !(status & mask), 0, 1000)) |
| 100 | + !(status & mask), 0, 10000)) |
| 101 | WARN_ON_ONCE(1); |
| 102 | |
| 103 | + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); |
| 104 | + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| 105 | for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 106 | - if (!dev->tx_wdma[i].desc) { |
developer | 553bdd9 | 2022-08-12 09:58:45 +0800 | [diff] [blame^] | 107 | - wdma_w32(dev, MTK_WDMA_RING_TX(i) + |
| 108 | - MTK_WED_RING_OFS_CPU_IDX, 0); |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 109 | - } |
developer | 553bdd9 | 2022-08-12 09:58:45 +0800 | [diff] [blame^] | 110 | + wdma_w32(dev, MTK_WDMA_RING_TX(i) + |
| 111 | + MTK_WED_RING_OFS_CPU_IDX, 0); |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | static u32 |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 115 | @@ -505,8 +511,8 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) |
| 116 | wifi_w32(dev, dev->wlan.wpdma_rx_glo - |
| 117 | dev->wlan.phy_base, val); |
| 118 | } else { |
| 119 | - dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n", |
| 120 | - dev->hw->index); |
| 121 | + dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n", |
| 122 | + dev->hw->index, idx); |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | @@ -557,7 +563,7 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev) |
| 127 | FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, |
| 128 | 0x2)); |
| 129 | |
| 130 | - for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++) |
| 131 | + for (idx = 0; idx < dev->hw->ring_num; idx++) |
| 132 | mtk_wed_check_wfdma_rx_fill(dev, idx); |
| 133 | } |
| 134 | } |
| 135 | @@ -597,26 +603,31 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev) |
| 136 | } |
| 137 | |
| 138 | static void |
| 139 | -mtk_wed_stop(struct mtk_wed_device *dev) |
| 140 | +mtk_wed_stop(struct mtk_wed_device *dev, bool reset) |
| 141 | { |
| 142 | - mtk_wed_dma_disable(dev); |
| 143 | - mtk_wed_set_512_support(dev, false); |
| 144 | - |
| 145 | if (dev->ver > MTK_WED_V1) { |
| 146 | wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); |
| 147 | wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); |
| 148 | } |
| 149 | mtk_wed_set_ext_int(dev, false); |
| 150 | |
| 151 | - wed_clr(dev, MTK_WED_CTRL, |
| 152 | - MTK_WED_CTRL_WDMA_INT_AGENT_EN | |
| 153 | - MTK_WED_CTRL_WPDMA_INT_AGENT_EN | |
| 154 | - MTK_WED_CTRL_WED_TX_BM_EN | |
| 155 | - MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
| 156 | - |
| 157 | - if (dev->ver > MTK_WED_V1) { |
| 158 | + if (!reset) { |
| 159 | + mtk_wed_dma_disable(dev); |
| 160 | + mtk_wed_set_512_support(dev, false); |
| 161 | + if (dev->ver > MTK_WED_V1) { |
| 162 | + wed_clr(dev, MTK_WED_CTRL, |
| 163 | + MTK_WED_CTRL_RX_RRO_QM_EN | |
| 164 | + MTK_WED_CTRL_RX_ROUTE_QM_EN | |
| 165 | + MTK_WED_CTRL_WED_RX_BM_EN); |
| 166 | + } else { |
| 167 | + regmap_write(dev->hw->mirror, |
| 168 | + dev->hw->index * 4, 0); |
| 169 | + } |
| 170 | wed_clr(dev, MTK_WED_CTRL, |
| 171 | - MTK_WED_CTRL_WED_RX_BM_EN); |
| 172 | + MTK_WED_CTRL_WDMA_INT_AGENT_EN | |
| 173 | + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | |
| 174 | + MTK_WED_CTRL_WED_TX_BM_EN | |
| 175 | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
| 176 | } |
| 177 | |
| 178 | wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); |
| 179 | @@ -634,16 +645,13 @@ mtk_wed_detach(struct mtk_wed_device *dev) |
| 180 | |
| 181 | mutex_lock(&hw_lock); |
| 182 | |
| 183 | - mtk_wed_stop(dev); |
| 184 | + mtk_wed_stop(dev, false); |
| 185 | |
| 186 | - wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); |
| 187 | - wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| 188 | + mtk_wdma_rx_reset(dev); |
| 189 | |
| 190 | mtk_wed_reset(dev, MTK_WED_RESET_WED); |
| 191 | |
| 192 | - wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); |
| 193 | - wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); |
| 194 | - wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| 195 | + mtk_wdma_tx_reset(dev); |
| 196 | |
| 197 | mtk_wed_free_buffer(dev); |
| 198 | mtk_wed_free_tx_rings(dev); |
| 199 | @@ -653,8 +661,6 @@ mtk_wed_detach(struct mtk_wed_device *dev) |
| 200 | mtk_wed_wo_exit(hw); |
| 201 | } |
| 202 | |
| 203 | - mtk_wdma_rx_reset(dev); |
| 204 | - |
| 205 | if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) { |
| 206 | wlan_node = dev->wlan.pci_dev->dev.of_node; |
| 207 | if (of_dma_is_coherent(wlan_node)) |
| 208 | @@ -748,7 +754,7 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev) |
| 209 | { |
| 210 | u32 mask, set; |
| 211 | |
| 212 | - mtk_wed_stop(dev); |
| 213 | + mtk_wed_stop(dev, false); |
| 214 | mtk_wed_reset(dev, MTK_WED_RESET_WED); |
| 215 | |
| 216 | if (dev->ver > MTK_WED_V1) |
| 217 | @@ -961,44 +967,127 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx) |
| 218 | } |
| 219 | |
| 220 | static u32 |
| 221 | -mtk_wed_check_busy(struct mtk_wed_device *dev) |
| 222 | +mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| 223 | { |
| 224 | - if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) |
| 225 | - return true; |
| 226 | - |
| 227 | - if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & |
| 228 | - MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) |
| 229 | - return true; |
| 230 | - |
| 231 | - if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) |
| 232 | - return true; |
| 233 | - |
| 234 | - if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & |
| 235 | - MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) |
| 236 | - return true; |
| 237 | - |
| 238 | - if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & |
| 239 | - MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) |
| 240 | - return true; |
| 241 | - |
| 242 | - if (wed_r32(dev, MTK_WED_CTRL) & |
| 243 | - (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) |
| 244 | + if (wed_r32(dev, reg) & mask) |
| 245 | return true; |
| 246 | |
| 247 | return false; |
| 248 | } |
| 249 | |
| 250 | static int |
| 251 | -mtk_wed_poll_busy(struct mtk_wed_device *dev) |
| 252 | +mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| 253 | { |
| 254 | - int sleep = 15000; |
| 255 | + int sleep = 1000; |
| 256 | int timeout = 100 * sleep; |
| 257 | u32 val; |
| 258 | |
| 259 | return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, |
| 260 | - timeout, false, dev); |
| 261 | + timeout, false, dev, reg, mask); |
developer | 553bdd9 | 2022-08-12 09:58:45 +0800 | [diff] [blame^] | 262 | +} |
| 263 | + |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 264 | +static void |
| 265 | +mtk_wed_rx_reset(struct mtk_wed_device *dev) |
| 266 | +{ |
| 267 | + struct mtk_wed_wo *wo = dev->hw->wed_wo; |
| 268 | + u8 state = WO_STATE_SER_RESET; |
| 269 | + bool busy = false; |
| 270 | + int i; |
| 271 | + |
| 272 | + mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE, |
| 273 | + &state, sizeof(state), true); |
| 274 | + |
| 275 | + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); |
| 276 | + busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
| 277 | + MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); |
| 278 | + if (busy) { |
| 279 | + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); |
| 280 | + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); |
| 281 | + } else { |
| 282 | + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, |
| 283 | + MTK_WED_WPDMA_RX_D_RST_CRX_IDX | |
| 284 | + MTK_WED_WPDMA_RX_D_RST_DRV_IDX); |
| 285 | + |
| 286 | + wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
| 287 | + MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | |
| 288 | + MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); |
| 289 | + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
| 290 | + MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | |
| 291 | + MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); |
| 292 | + |
| 293 | + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); |
| 294 | + } |
| 295 | + |
| 296 | + /* reset rro qm */ |
| 297 | + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); |
| 298 | + busy = mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
| 299 | + MTK_WED_CTRL_RX_RRO_QM_BUSY); |
| 300 | + if (busy) { |
| 301 | + mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); |
| 302 | + } else { |
| 303 | + wed_set(dev, MTK_WED_RROQM_RST_IDX, |
| 304 | + MTK_WED_RROQM_RST_IDX_MIOD | |
| 305 | + MTK_WED_RROQM_RST_IDX_FDBK); |
| 306 | + wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); |
| 307 | + } |
| 308 | + |
| 309 | + /* reset route qm */ |
| 310 | + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); |
| 311 | + busy = mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
| 312 | + MTK_WED_CTRL_RX_ROUTE_QM_BUSY); |
| 313 | + if (busy) { |
| 314 | + mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); |
| 315 | + } else { |
| 316 | + wed_set(dev, MTK_WED_RTQM_GLO_CFG, |
| 317 | + MTK_WED_RTQM_Q_RST); |
| 318 | + } |
| 319 | + |
| 320 | + /* reset tx wdma */ |
| 321 | + mtk_wdma_tx_reset(dev); |
| 322 | + |
| 323 | + /* reset tx wdma drv */ |
| 324 | + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); |
| 325 | + mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
| 326 | + MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); |
| 327 | + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); |
| 328 | + |
| 329 | + /* reset wed rx dma */ |
| 330 | + busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, |
| 331 | + MTK_WED_GLO_CFG_RX_DMA_BUSY); |
| 332 | + wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); |
| 333 | + if (busy) { |
| 334 | + mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); |
| 335 | + } else { |
| 336 | + wed_set(dev, MTK_WED_RESET_IDX, |
| 337 | + MTK_WED_RESET_IDX_RX); |
| 338 | + wed_w32(dev, MTK_WED_RESET_IDX, 0); |
| 339 | + } |
| 340 | + |
| 341 | + /* reset rx bm */ |
| 342 | + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); |
| 343 | + mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
| 344 | + MTK_WED_CTRL_WED_RX_BM_BUSY); |
| 345 | + mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); |
| 346 | + |
| 347 | + /* wo change to enable state */ |
| 348 | + state = WO_STATE_ENABLE; |
| 349 | + mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE, |
| 350 | + &state, sizeof(state), true); |
| 351 | + |
| 352 | + /* wed_rx_ring_reset */ |
| 353 | + for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { |
| 354 | + struct mtk_wdma_desc *desc = dev->rx_ring[i].desc; |
| 355 | + |
| 356 | + if (!desc) |
| 357 | + continue; |
| 358 | + |
| 359 | + mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false); |
| 360 | + } |
| 361 | + |
| 362 | + mtk_wed_free_rx_bm(dev); |
developer | 553bdd9 | 2022-08-12 09:58:45 +0800 | [diff] [blame^] | 363 | } |
| 364 | |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 365 | + |
| 366 | static void |
| 367 | mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| 368 | { |
| 369 | @@ -1012,25 +1101,28 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| 370 | if (!desc) |
| 371 | continue; |
| 372 | |
| 373 | - mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true); |
| 374 | + mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, 1, true); |
| 375 | } |
| 376 | |
| 377 | - if (mtk_wed_poll_busy(dev)) |
| 378 | - busy = mtk_wed_check_busy(dev); |
| 379 | + /* 1.Reset WED Tx DMA */ |
| 380 | + wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); |
| 381 | + busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_BUSY); |
| 382 | |
| 383 | if (busy) { |
| 384 | mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); |
| 385 | } else { |
| 386 | wed_w32(dev, MTK_WED_RESET_IDX, |
| 387 | - MTK_WED_RESET_IDX_TX | |
| 388 | - MTK_WED_RESET_IDX_RX); |
| 389 | + MTK_WED_RESET_IDX_TX); |
| 390 | wed_w32(dev, MTK_WED_RESET_IDX, 0); |
| 391 | } |
| 392 | |
| 393 | - wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); |
| 394 | - wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| 395 | + /* 2. Reset WDMA Rx DMA/Driver_Engine */ |
| 396 | + busy = !!mtk_wdma_rx_reset(dev); |
| 397 | |
| 398 | - mtk_wdma_rx_reset(dev); |
| 399 | + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); |
| 400 | + busy = !!(busy || |
| 401 | + mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, |
| 402 | + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)); |
| 403 | |
| 404 | if (busy) { |
| 405 | mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); |
| 406 | @@ -1047,15 +1139,30 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| 407 | MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); |
| 408 | } |
| 409 | |
| 410 | + /* 3. Reset WED WPDMA Tx Driver Engine */ |
| 411 | + wed_clr(dev, MTK_WED_CTRL, |
| 412 | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
| 413 | + |
| 414 | for (i = 0; i < 100; i++) { |
| 415 | val = wed_r32(dev, MTK_WED_TX_BM_INTF); |
| 416 | if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) |
| 417 | break; |
| 418 | } |
| 419 | - |
| 420 | mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); |
| 421 | + |
| 422 | + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); |
| 423 | mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); |
| 424 | |
| 425 | + /* 4. Reset WED WPDMA Tx Driver Engine */ |
| 426 | + busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, |
| 427 | + MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); |
| 428 | + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, |
| 429 | + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | |
| 430 | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); |
| 431 | + |
| 432 | + busy = !!(busy || |
| 433 | + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, |
| 434 | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY)); |
| 435 | if (busy) { |
| 436 | mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); |
| 437 | mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); |
| 438 | @@ -1065,6 +1172,16 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| 439 | MTK_WED_WPDMA_RESET_IDX_TX | |
| 440 | MTK_WED_WPDMA_RESET_IDX_RX); |
| 441 | wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); |
| 442 | + if (dev->ver > MTK_WED_V1) { |
| 443 | + wed_w32(dev, MTK_WED_RESET_IDX, |
| 444 | + MTK_WED_RESET_WPDMA_IDX_RX); |
| 445 | + wed_w32(dev, MTK_WED_RESET_IDX, 0); |
| 446 | + } |
| 447 | + } |
| 448 | + |
| 449 | + if (dev->ver > MTK_WED_V1) { |
| 450 | + dev->init_done = false; |
| 451 | + mtk_wed_rx_reset(dev); |
| 452 | } |
| 453 | |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 454 | } |
| 455 | @@ -1101,13 +1218,15 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, |
| 456 | } |
| 457 | |
| 458 | static int |
| 459 | -mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size) |
| 460 | +mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, |
| 461 | + int idx, int size, bool reset) |
| 462 | { |
| 463 | struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; |
| 464 | |
| 465 | - if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, |
| 466 | - dev->ver, true)) |
| 467 | - return -ENOMEM; |
| 468 | + if(!reset) |
| 469 | + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, |
| 470 | + dev->ver, true)) |
| 471 | + return -ENOMEM; |
| 472 | |
| 473 | wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, |
| 474 | wdma->desc_phys); |
| 475 | @@ -1124,13 +1243,15 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 476 | } |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 477 | |
| 478 | static int |
| 479 | -mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size) |
| 480 | +mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, |
| 481 | + int idx, int size, bool reset) |
| 482 | { |
| 483 | struct mtk_wed_ring *wdma = &dev->rx_wdma[idx]; |
| 484 | |
| 485 | - if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, |
| 486 | - dev->ver, true)) |
| 487 | - return -ENOMEM; |
| 488 | + if (!reset) |
| 489 | + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, |
| 490 | + dev->ver, true)) |
| 491 | + return -ENOMEM; |
| 492 | |
| 493 | wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, |
| 494 | wdma->desc_phys); |
| 495 | @@ -1140,7 +1261,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size) |
| 496 | MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); |
| 497 | wdma_w32(dev, |
| 498 | MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); |
| 499 | - |
| 500 | + if (reset) |
| 501 | + mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE, |
| 502 | + dev->ver, true); |
| 503 | if (idx == 0) { |
| 504 | wed_w32(dev, MTK_WED_WDMA_RING_TX |
| 505 | + MTK_WED_RING_OFS_BASE, wdma->desc_phys); |
| 506 | @@ -1253,9 +1376,12 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 507 | { |
| 508 | int i, ret; |
| 509 | |
| 510 | + if (dev->ver > MTK_WED_V1) |
| 511 | + ret = mtk_wed_rx_bm_alloc(dev); |
| 512 | + |
| 513 | for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) |
| 514 | if (!dev->tx_wdma[i].desc) |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 515 | - mtk_wed_wdma_rx_ring_setup(dev, i, 16); |
| 516 | + mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); |
| 517 | |
| 518 | mtk_wed_hw_init(dev); |
| 519 | |
| 520 | @@ -1340,10 +1466,6 @@ mtk_wed_attach(struct mtk_wed_device *dev) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 521 | goto error; |
| 522 | |
| 523 | if (dev->ver > MTK_WED_V1) { |
| 524 | - ret = mtk_wed_rx_bm_alloc(dev); |
| 525 | - if (ret) |
| 526 | - goto error; |
| 527 | - |
| 528 | ret = mtk_wed_rro_alloc(dev); |
| 529 | if (ret) |
| 530 | goto error; |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 531 | @@ -1351,6 +1473,10 @@ mtk_wed_attach(struct mtk_wed_device *dev) |
| 532 | |
| 533 | mtk_wed_hw_init_early(dev); |
| 534 | |
| 535 | + init_completion(&dev->fe_reset_done); |
| 536 | + init_completion(&dev->wlan_reset_done); |
| 537 | + atomic_set(&dev->fe_reset, 0); |
| 538 | + |
| 539 | if (dev->ver == MTK_WED_V1) |
| 540 | regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, |
| 541 | BIT(hw->index), 0); |
| 542 | @@ -1367,7 +1493,8 @@ out: |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 543 | } |
| 544 | |
| 545 | static int |
| 546 | -mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) |
| 547 | +mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, |
| 548 | + void __iomem *regs, bool reset) |
| 549 | { |
| 550 | struct mtk_wed_ring *ring = &dev->tx_ring[idx]; |
| 551 | |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 552 | @@ -1385,10 +1512,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 553 | |
| 554 | BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); |
| 555 | |
| 556 | - if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true)) |
| 557 | - return -ENOMEM; |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 558 | + if (!reset) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 559 | + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, |
| 560 | + 1, true)) |
| 561 | + return -ENOMEM; |
| 562 | |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 563 | - if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) |
| 564 | + if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset)) |
| 565 | return -ENOMEM; |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 566 | |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 567 | ring->reg_base = MTK_WED_RING_TX(idx); |
| 568 | @@ -1436,21 +1565,24 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 569 | } |
| 570 | |
| 571 | static int |
| 572 | -mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) |
| 573 | +mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, |
| 574 | + int idx, void __iomem *regs, bool reset) |
| 575 | { |
| 576 | struct mtk_wed_ring *ring = &dev->rx_ring[idx]; |
| 577 | |
| 578 | BUG_ON(idx > ARRAY_SIZE(dev->rx_ring)); |
| 579 | |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 580 | + if (!reset) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 581 | + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, |
| 582 | + 1, false)) |
| 583 | + return -ENOMEM; |
| 584 | |
| 585 | - if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false)) |
| 586 | - return -ENOMEM; |
| 587 | - |
| 588 | - if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 589 | + if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset)) |
| 590 | return -ENOMEM; |
| 591 | |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 592 | ring->reg_base = MTK_WED_RING_RX_DATA(idx); |
| 593 | ring->wpdma = regs; |
| 594 | + dev->hw->ring_num = idx + 1; |
| 595 | |
| 596 | /* WPDMA -> WED */ |
| 597 | wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 598 | @@ -1492,6 +1624,41 @@ mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) |
| 599 | wed_w32(dev, MTK_WED_INT_MASK, mask); |
| 600 | } |
| 601 | |
| 602 | +void mtk_wed_fe_reset(int cmd) |
| 603 | +{ |
| 604 | + int i; |
| 605 | + |
| 606 | + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { |
| 607 | + struct mtk_wed_hw *hw = hw_list[i]; |
| 608 | + struct mtk_wed_device *dev; |
| 609 | + |
| 610 | + dev = hw->wed_dev ; |
| 611 | + if (!dev) |
| 612 | + continue; |
| 613 | + |
| 614 | + switch (cmd) { |
| 615 | + case MTK_FE_START_RESET: |
| 616 | + pr_info("%s: receive fe reset start event, trigger SER\n", __func__); |
| 617 | + atomic_set(&dev->fe_reset, 1); |
| 618 | + dev->wlan.ser_trigger(dev); |
| 619 | + rtnl_unlock(); |
| 620 | + wait_for_completion(&dev->wlan_reset_done); |
| 621 | + rtnl_lock(); |
| 622 | + |
| 623 | + break; |
| 624 | + case MTK_FE_RESET_DONE: |
| 625 | + pr_info("%s: receive fe reset done event, continue SER\n", __func__); |
| 626 | + complete(&dev->fe_reset_done); |
| 627 | + break; |
| 628 | + default: |
| 629 | + break; |
| 630 | + } |
| 631 | + |
| 632 | + } |
| 633 | + |
| 634 | + return; |
| 635 | +} |
| 636 | + |
| 637 | int mtk_wed_flow_add(int index) |
| 638 | { |
| 639 | struct mtk_wed_hw *hw = hw_list[index]; |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 640 | diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 641 | index 8ef5253..f757eac 100644 |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 642 | --- a/drivers/net/ethernet/mediatek/mtk_wed.h |
| 643 | +++ b/drivers/net/ethernet/mediatek/mtk_wed.h |
| 644 | @@ -47,6 +47,7 @@ struct mtk_wed_hw { |
| 645 | u32 num_flows; |
| 646 | u32 wdma_phy; |
| 647 | char dirname[5]; |
| 648 | + int ring_num; |
| 649 | int irq; |
| 650 | int index; |
| 651 | u32 ver; |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 652 | @@ -196,5 +197,6 @@ void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb); |
| 653 | int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd, |
| 654 | const void *data, int len, bool wait_resp); |
| 655 | int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget); |
| 656 | +void mtk_wed_fe_reset(int cmd); |
| 657 | |
| 658 | #endif |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 659 | diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| 660 | index 9d021e2..cfcd94f 100644 |
| 661 | --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| 662 | +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| 663 | @@ -38,11 +38,15 @@ struct mtk_wdma_desc { |
| 664 | |
| 665 | #define MTK_WED_RESET 0x008 |
| 666 | #define MTK_WED_RESET_TX_BM BIT(0) |
| 667 | +#define MTK_WED_RESET_RX_BM BIT(1) |
| 668 | #define MTK_WED_RESET_TX_FREE_AGENT BIT(4) |
| 669 | #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) |
| 670 | #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) |
| 671 | +#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10) |
| 672 | #define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11) |
| 673 | #define MTK_WED_RESET_WED_TX_DMA BIT(12) |
| 674 | +#define MTK_WED_RESET_WED_RX_DMA BIT(13) |
| 675 | +#define MTK_WED_RESET_WDMA_TX_DRV BIT(16) |
| 676 | #define MTK_WED_RESET_WDMA_RX_DRV BIT(17) |
| 677 | #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) |
| 678 | #define MTK_WED_RESET_RX_RRO_QM BIT(20) |
| 679 | @@ -186,7 +190,12 @@ struct mtk_wdma_desc { |
| 680 | |
| 681 | #define MTK_WED_RESET_IDX 0x20c |
| 682 | #define MTK_WED_RESET_IDX_TX GENMASK(3, 0) |
| 683 | +#if defined(CONFIG_MEDIATEK_NETSYS_V2) |
| 684 | +#define MTK_WED_RESET_IDX_RX GENMASK(7, 6) |
| 685 | +#else |
| 686 | #define MTK_WED_RESET_IDX_RX GENMASK(17, 16) |
| 687 | +#endif |
| 688 | +#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30) |
| 689 | |
| 690 | #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) |
| 691 | #define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4) |
| 692 | @@ -300,6 +309,9 @@ struct mtk_wdma_desc { |
| 693 | |
| 694 | #define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c |
| 695 | #define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0) |
| 696 | +#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1) |
| 697 | +#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3) |
| 698 | +#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4) |
| 699 | #define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7) |
| 700 | #define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24) |
| 701 | |
| 702 | diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 703 | index 9a9cc1b..31f4a26 100644 |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 704 | --- a/include/linux/soc/mediatek/mtk_wed.h |
| 705 | +++ b/include/linux/soc/mediatek/mtk_wed.h |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 706 | @@ -114,23 +114,27 @@ struct mtk_wed_device { |
| 707 | u32 (*init_rx_buf)(struct mtk_wed_device *wed, |
| 708 | int pkt_num); |
| 709 | void (*release_rx_buf)(struct mtk_wed_device *wed); |
| 710 | + void (*ser_trigger)(struct mtk_wed_device *wed); |
| 711 | } wlan; |
| 712 | + struct completion fe_reset_done; |
| 713 | + struct completion wlan_reset_done; |
| 714 | + atomic_t fe_reset; |
| 715 | #endif |
| 716 | }; |
| 717 | |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 718 | struct mtk_wed_ops { |
| 719 | int (*attach)(struct mtk_wed_device *dev); |
| 720 | int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, |
| 721 | - void __iomem *regs); |
| 722 | + void __iomem *regs, bool reset); |
| 723 | int (*txfree_ring_setup)(struct mtk_wed_device *dev, |
| 724 | void __iomem *regs); |
| 725 | int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring, |
| 726 | - void __iomem *regs); |
| 727 | + void __iomem *regs, bool reset); |
| 728 | int (*msg_update)(struct mtk_wed_device *dev, int cmd_id, |
| 729 | void *data, int len); |
| 730 | void (*detach)(struct mtk_wed_device *dev); |
| 731 | |
| 732 | - void (*stop)(struct mtk_wed_device *dev); |
| 733 | + void (*stop)(struct mtk_wed_device *dev, bool reset); |
| 734 | void (*start)(struct mtk_wed_device *dev, u32 irq_mask); |
| 735 | void (*reset_dma)(struct mtk_wed_device *dev); |
| 736 | |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 737 | @@ -169,12 +173,13 @@ mtk_wed_device_attach(struct mtk_wed_device *dev) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 738 | #define mtk_wed_device_active(_dev) !!(_dev)->ops |
| 739 | #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) |
| 740 | #define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask) |
| 741 | -#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \ |
| 742 | - (_dev)->ops->tx_ring_setup(_dev, _ring, _regs) |
| 743 | +#define mtk_wed_device_stop(_dev, _reset) (_dev)->ops->stop(_dev, _reset) |
| 744 | +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) \ |
| 745 | + (_dev)->ops->tx_ring_setup(_dev, _ring, _regs, _reset) |
| 746 | #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \ |
| 747 | (_dev)->ops->txfree_ring_setup(_dev, _regs) |
| 748 | -#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \ |
| 749 | - (_dev)->ops->rx_ring_setup(_dev, _ring, _regs) |
| 750 | +#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \ |
| 751 | + (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset) |
| 752 | #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \ |
| 753 | (_dev)->ops->msg_update(_dev, _id, _msg, _len) |
| 754 | #define mtk_wed_device_reg_read(_dev, _reg) \ |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 755 | @@ -185,6 +190,8 @@ mtk_wed_device_attach(struct mtk_wed_device *dev) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 756 | (_dev)->ops->irq_get(_dev, _mask) |
| 757 | #define mtk_wed_device_irq_set_mask(_dev, _mask) \ |
| 758 | (_dev)->ops->irq_set_mask(_dev, _mask) |
| 759 | +#define mtk_wed_device_dma_reset(_dev) \ |
| 760 | + (_dev)->ops->reset_dma(_dev) |
| 761 | #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \ |
| 762 | (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash) |
| 763 | #else |
developer | 2ed23d4 | 2022-08-09 16:20:46 +0800 | [diff] [blame] | 764 | @@ -194,14 +201,15 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) |
developer | c1b2cd1 | 2022-07-28 18:35:24 +0800 | [diff] [blame] | 765 | } |
| 766 | #define mtk_wed_device_detach(_dev) do {} while (0) |
| 767 | #define mtk_wed_device_start(_dev, _mask) do {} while (0) |
| 768 | -#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV |
| 769 | +#define mtk_wed_device_stop(_dev, _reset) do {} while (0) |
| 770 | +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV |
| 771 | #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV |
| 772 | -#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV |
| 773 | -#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV |
| 774 | +#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV |
| 775 | #define mtk_wed_device_reg_read(_dev, _reg) 0 |
| 776 | #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) |
| 777 | #define mtk_wed_device_irq_get(_dev, _mask) 0 |
| 778 | #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) |
| 779 | +#define mtk_wed_device_dma_reset(_dev) do {} while (0) |
| 780 | #define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0) |
| 781 | #endif |
| 782 | |
| 783 | -- |
| 784 | 2.18.0 |
| 785 | |