blob: c53746f2dd9ac1f3f71a1519036337b0976c247c [file] [log] [blame]
developer58aa0682023-09-18 14:02:26 +08001From de9cc10d4e2d5aad4801dc92fb37c42478a4ab68 Mon Sep 17 00:00:00 2001
developerc1b2cd12022-07-28 18:35:24 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
developer58aa0682023-09-18 14:02:26 +08003Date: Mon, 18 Sep 2023 11:05:45 +0800
4Subject: [PATCH 10/22] add-wed-ser-support
developerc1b2cd12022-07-28 18:35:24 +08005
developerc1b2cd12022-07-28 18:35:24 +08006---
developerfd8e1152023-02-14 11:29:23 +08007 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 +
8 drivers/net/ethernet/mediatek/mtk_wed.c | 361 ++++++++++++++-----
9 drivers/net/ethernet/mediatek/mtk_wed.h | 11 +
developerc1b2cd12022-07-28 18:35:24 +080010 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 12 +
developerfd8e1152023-02-14 11:29:23 +080011 include/linux/soc/mediatek/mtk_wed.h | 27 +-
12 5 files changed, 320 insertions(+), 99 deletions(-)
developerc1b2cd12022-07-28 18:35:24 +080013
developer2ed23d42022-08-09 16:20:46 +080014diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer58aa0682023-09-18 14:02:26 +080015index 2cab49a..c1399c5 100644
developer2ed23d42022-08-09 16:20:46 +080016--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
17+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer58aa0682023-09-18 14:02:26 +080018@@ -4158,6 +4158,9 @@ static void mtk_pending_work(struct work_struct *work)
developerbc6b5852022-11-22 21:09:44 +080019 for (i = 0; i < MTK_MAC_COUNT; i++) {
20 if (!eth->netdev[i])
21 continue;
developer2ed23d42022-08-09 16:20:46 +080022+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
developerfd8e1152023-02-14 11:29:23 +080023+ mtk_wed_fe_reset();
developer2ed23d42022-08-09 16:20:46 +080024+#else
developer4780eea2022-12-27 16:45:15 +080025 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
26 pr_info("send MTK_FE_STOP_TRAFFIC event\n");
27 call_netdevice_notifiers(MTK_FE_STOP_TRAFFIC,
developer58aa0682023-09-18 14:02:26 +080028@@ -4183,6 +4186,7 @@ static void mtk_pending_work(struct work_struct *work)
developerfd8e1152023-02-14 11:29:23 +080029 pr_warn("wait for MTK_FE_START_RESET\n");
developer58aa0682023-09-18 14:02:26 +080030 }
developerbc6b5852022-11-22 21:09:44 +080031 rtnl_lock();
developer2ed23d42022-08-09 16:20:46 +080032+#endif
developerbc6b5852022-11-22 21:09:44 +080033 break;
34 }
developer2ed23d42022-08-09 16:20:46 +080035
developer58aa0682023-09-18 14:02:26 +080036@@ -4221,6 +4225,9 @@ static void mtk_pending_work(struct work_struct *work)
developer4780eea2022-12-27 16:45:15 +080037 for (i = 0; i < MTK_MAC_COUNT; i++) {
38 if (!eth->netdev[i])
developerbc6b5852022-11-22 21:09:44 +080039 continue;
developer2ed23d42022-08-09 16:20:46 +080040+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
developerfd8e1152023-02-14 11:29:23 +080041+ mtk_wed_fe_reset_complete();
developer2ed23d42022-08-09 16:20:46 +080042+#else
developer4780eea2022-12-27 16:45:15 +080043 if (mtk_reset_flag == MTK_FE_STOP_TRAFFIC) {
44 pr_info("send MTK_FE_START_TRAFFIC event\n");
45 call_netdevice_notifiers(MTK_FE_START_TRAFFIC,
developer58aa0682023-09-18 14:02:26 +080046@@ -4230,6 +4237,7 @@ static void mtk_pending_work(struct work_struct *work)
developer4780eea2022-12-27 16:45:15 +080047 call_netdevice_notifiers(MTK_FE_RESET_DONE,
48 eth->netdev[i]);
49 }
developer2ed23d42022-08-09 16:20:46 +080050+#endif
developer4780eea2022-12-27 16:45:15 +080051 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE,
52 eth->netdev[i]);
developerbc6b5852022-11-22 21:09:44 +080053 break;
developerc1b2cd12022-07-28 18:35:24 +080054diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developer58aa0682023-09-18 14:02:26 +080055index 4b2f1a2..ae31412 100644
developerc1b2cd12022-07-28 18:35:24 +080056--- a/drivers/net/ethernet/mediatek/mtk_wed.c
57+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
developer2ed23d42022-08-09 16:20:46 +080058@@ -13,8 +13,10 @@
59 #include <linux/debugfs.h>
60 #include <linux/iopoll.h>
61 #include <linux/soc/mediatek/mtk_wed.h>
62+#include <net/rtnetlink.h>
63
64 #include "mtk_eth_soc.h"
65+#include "mtk_eth_reset.h"
66 #include "mtk_wed_regs.h"
67 #include "mtk_wed.h"
68 #include "mtk_ppe.h"
69@@ -71,23 +73,27 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +080070 return wdma_r32(dev, MTK_WDMA_GLO_CFG);
71 }
72
73-static void
74+static int
75 mtk_wdma_rx_reset(struct mtk_wed_device *dev)
76 {
77 u32 status;
78 u32 mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
79- int i;
80+ int busy, i;
81
82 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
83- if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
84- !(status & mask), 0, 1000))
85- WARN_ON_ONCE(1);
86+ busy = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
87+ !(status & mask), 0, 10000);
88+
89+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
90+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
91
92 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
93 if (!dev->rx_wdma[i].desc) {
94 wdma_w32(dev, MTK_WDMA_RING_RX(i) +
95 MTK_WED_RING_OFS_CPU_IDX, 0);
96 }
97+
98+ return busy;
99 }
100
101 static void
developer2ed23d42022-08-09 16:20:46 +0800102@@ -99,14 +105,14 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800103
104 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
105 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
106- !(status & mask), 0, 1000))
107+ !(status & mask), 0, 10000))
108 WARN_ON_ONCE(1);
109
110+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
111+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
112 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
developer2ed23d42022-08-09 16:20:46 +0800113- if (!dev->tx_wdma[i].desc) {
developer553bdd92022-08-12 09:58:45 +0800114- wdma_w32(dev, MTK_WDMA_RING_TX(i) +
115- MTK_WED_RING_OFS_CPU_IDX, 0);
developer2ed23d42022-08-09 16:20:46 +0800116- }
developer553bdd92022-08-12 09:58:45 +0800117+ wdma_w32(dev, MTK_WDMA_RING_TX(i) +
118+ MTK_WED_RING_OFS_CPU_IDX, 0);
developer2ed23d42022-08-09 16:20:46 +0800119 }
120
121 static u32
developerfd8e1152023-02-14 11:29:23 +0800122@@ -172,6 +178,51 @@ mtk_wed_wo_reset(struct mtk_wed_device *dev)
123 iounmap((void *)reg);
124 }
125
126+void mtk_wed_fe_reset(void)
127+{
128+ int i;
129+
130+ mutex_lock(&hw_lock);
131+
132+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
133+ struct mtk_wed_hw *hw = hw_list[i];
134+ struct mtk_wed_device *dev = hw->wed_dev;
135+ int err;
136+
137+ if (!dev || !dev->wlan.reset)
138+ continue;
139+
140+ pr_info("%s: receive fe reset start event, trigger SER\n", __func__);
141+
142+ /* reset callback blocks until WLAN reset is completed */
143+ err = dev->wlan.reset(dev);
144+ if (err)
145+ dev_err(dev->dev, "wlan reset failed: %d\n", err);
146+ }
147+
148+ mutex_unlock(&hw_lock);
149+}
150+
151+void mtk_wed_fe_reset_complete(void)
152+{
153+ int i;
154+
155+ mutex_lock(&hw_lock);
156+
157+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
158+ struct mtk_wed_hw *hw = hw_list[i];
159+ struct mtk_wed_device *dev = hw->wed_dev;
160+
161+ if (!dev || !dev->wlan.reset_complete)
162+ continue;
163+
164+ pr_info("%s: receive fe reset done event, continue SER\n", __func__);
165+ dev->wlan.reset_complete(dev);
166+ }
167+
168+ mutex_unlock(&hw_lock);
169+}
170+
171 static struct mtk_wed_hw *
172 mtk_wed_assign(struct mtk_wed_device *dev)
173 {
174@@ -505,8 +556,8 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
developerc1b2cd12022-07-28 18:35:24 +0800175 wifi_w32(dev, dev->wlan.wpdma_rx_glo -
176 dev->wlan.phy_base, val);
177 } else {
178- dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
179- dev->hw->index);
180+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
181+ dev->hw->index, idx);
182 }
183 }
184
developerfd8e1152023-02-14 11:29:23 +0800185@@ -557,7 +608,7 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800186 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
187 0x2));
188
189- for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++)
190+ for (idx = 0; idx < dev->hw->ring_num; idx++)
191 mtk_wed_check_wfdma_rx_fill(dev, idx);
192 }
193 }
developerfd8e1152023-02-14 11:29:23 +0800194@@ -594,36 +645,45 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
195 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
196 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
197 }
198+
199+ mtk_wed_set_512_support(dev, false);
developerc1b2cd12022-07-28 18:35:24 +0800200 }
201
202 static void
developerfd8e1152023-02-14 11:29:23 +0800203 mtk_wed_stop(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800204 {
205- mtk_wed_dma_disable(dev);
206- mtk_wed_set_512_support(dev, false);
207-
208 if (dev->ver > MTK_WED_V1) {
209 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
210 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
211 }
212 mtk_wed_set_ext_int(dev, false);
213
developerfd8e1152023-02-14 11:29:23 +0800214+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
215+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
216+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
217+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
218+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
219+}
220+
221+static void
222+mtk_wed_deinit(struct mtk_wed_device *dev)
223+{
224+ mtk_wed_stop(dev);
225+ mtk_wed_dma_disable(dev);
226+
227 wed_clr(dev, MTK_WED_CTRL,
228 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
229 MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
230 MTK_WED_CTRL_WED_TX_BM_EN |
231 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
232
developerc1b2cd12022-07-28 18:35:24 +0800233- if (dev->ver > MTK_WED_V1) {
developerfd8e1152023-02-14 11:29:23 +0800234- wed_clr(dev, MTK_WED_CTRL,
developerc1b2cd12022-07-28 18:35:24 +0800235- MTK_WED_CTRL_WED_RX_BM_EN);
developerfd8e1152023-02-14 11:29:23 +0800236- }
237+ if (dev->hw->ver == 1)
238+ return;
developerc1b2cd12022-07-28 18:35:24 +0800239
developerfd8e1152023-02-14 11:29:23 +0800240- wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
241- wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
242- wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
243- wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
244- wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
245+ wed_clr(dev, MTK_WED_CTRL,
246+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
247+ MTK_WED_CTRL_WED_RX_BM_EN |
248+ MTK_WED_CTRL_RX_RRO_QM_EN);
249 }
250
251 static void
252@@ -634,16 +694,13 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800253
254 mutex_lock(&hw_lock);
255
256- mtk_wed_stop(dev);
developerfd8e1152023-02-14 11:29:23 +0800257+ mtk_wed_deinit(dev);
developerc1b2cd12022-07-28 18:35:24 +0800258
259- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
260- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
261+ mtk_wdma_rx_reset(dev);
262
263 mtk_wed_reset(dev, MTK_WED_RESET_WED);
264
265- wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
266- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
267- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
268+ mtk_wdma_tx_reset(dev);
269
270 mtk_wed_free_buffer(dev);
271 mtk_wed_free_tx_rings(dev);
developerfd8e1152023-02-14 11:29:23 +0800272@@ -653,8 +710,6 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800273 mtk_wed_wo_exit(hw);
274 }
275
276- mtk_wdma_rx_reset(dev);
277-
developer144824b2022-11-25 21:27:43 +0800278 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
developerc1b2cd12022-07-28 18:35:24 +0800279 wlan_node = dev->wlan.pci_dev->dev.of_node;
280 if (of_dma_is_coherent(wlan_node))
developerfd8e1152023-02-14 11:29:23 +0800281@@ -748,7 +803,7 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800282 {
283 u32 mask, set;
284
285- mtk_wed_stop(dev);
developerfd8e1152023-02-14 11:29:23 +0800286+ mtk_wed_deinit(dev);
developerc1b2cd12022-07-28 18:35:24 +0800287 mtk_wed_reset(dev, MTK_WED_RESET_WED);
288
289 if (dev->ver > MTK_WED_V1)
developerfd8e1152023-02-14 11:29:23 +0800290@@ -961,44 +1016,127 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
developerc1b2cd12022-07-28 18:35:24 +0800291 }
292
293 static u32
294-mtk_wed_check_busy(struct mtk_wed_device *dev)
295+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
296 {
297- if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
298- return true;
299-
300- if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
301- MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
302- return true;
303-
304- if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
305- return true;
306-
307- if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
308- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
309- return true;
310-
311- if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
312- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
313- return true;
314-
315- if (wed_r32(dev, MTK_WED_CTRL) &
316- (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
317+ if (wed_r32(dev, reg) & mask)
318 return true;
319
320 return false;
321 }
322
323 static int
324-mtk_wed_poll_busy(struct mtk_wed_device *dev)
325+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
326 {
327- int sleep = 15000;
328+ int sleep = 1000;
329 int timeout = 100 * sleep;
330 u32 val;
331
332 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
333- timeout, false, dev);
334+ timeout, false, dev, reg, mask);
developerfd8e1152023-02-14 11:29:23 +0800335 }
336
developerc1b2cd12022-07-28 18:35:24 +0800337+static void
338+mtk_wed_rx_reset(struct mtk_wed_device *dev)
339+{
340+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
341+ u8 state = WO_STATE_SER_RESET;
342+ bool busy = false;
343+ int i;
344+
developer144824b2022-11-25 21:27:43 +0800345+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, MTK_WED_WO_CMD_CHANGE_STATE,
developerc1b2cd12022-07-28 18:35:24 +0800346+ &state, sizeof(state), true);
347+
348+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
349+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
350+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
351+ if (busy) {
352+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
353+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
354+ } else {
355+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
356+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
357+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
358+
359+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
360+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
361+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
362+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
363+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
364+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
365+
366+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
367+ }
368+
369+ /* reset rro qm */
370+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
371+ busy = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
372+ MTK_WED_CTRL_RX_RRO_QM_BUSY);
373+ if (busy) {
374+ mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
375+ } else {
376+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
377+ MTK_WED_RROQM_RST_IDX_MIOD |
378+ MTK_WED_RROQM_RST_IDX_FDBK);
379+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
380+ }
381+
382+ /* reset route qm */
383+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
384+ busy = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
385+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
386+ if (busy) {
387+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
388+ } else {
389+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
390+ MTK_WED_RTQM_Q_RST);
391+ }
392+
393+ /* reset tx wdma */
394+ mtk_wdma_tx_reset(dev);
395+
396+ /* reset tx wdma drv */
397+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
398+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
399+ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
400+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
401+
402+ /* reset wed rx dma */
403+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
404+ MTK_WED_GLO_CFG_RX_DMA_BUSY);
405+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
406+ if (busy) {
407+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
408+ } else {
409+ wed_set(dev, MTK_WED_RESET_IDX,
410+ MTK_WED_RESET_IDX_RX);
411+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
412+ }
413+
414+ /* reset rx bm */
415+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
416+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
417+ MTK_WED_CTRL_WED_RX_BM_BUSY);
418+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
419+
420+ /* wo change to enable state */
421+ state = WO_STATE_ENABLE;
developer144824b2022-11-25 21:27:43 +0800422+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, MTK_WED_WO_CMD_CHANGE_STATE,
developerc1b2cd12022-07-28 18:35:24 +0800423+ &state, sizeof(state), true);
424+
425+ /* wed_rx_ring_reset */
426+ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
427+ struct mtk_wdma_desc *desc = dev->rx_ring[i].desc;
428+
429+ if (!desc)
430+ continue;
431+
432+ mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false);
433+ }
434+
435+ mtk_wed_free_rx_bm(dev);
developerfd8e1152023-02-14 11:29:23 +0800436+}
437+
developerc1b2cd12022-07-28 18:35:24 +0800438+
439 static void
440 mtk_wed_reset_dma(struct mtk_wed_device *dev)
441 {
developerfd8e1152023-02-14 11:29:23 +0800442@@ -1012,25 +1150,28 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800443 if (!desc)
444 continue;
445
446- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true);
447+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, 1, true);
448 }
449
450- if (mtk_wed_poll_busy(dev))
451- busy = mtk_wed_check_busy(dev);
452+ /* 1.Reset WED Tx DMA */
453+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
454+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_BUSY);
455
456 if (busy) {
457 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
458 } else {
459 wed_w32(dev, MTK_WED_RESET_IDX,
460- MTK_WED_RESET_IDX_TX |
461- MTK_WED_RESET_IDX_RX);
462+ MTK_WED_RESET_IDX_TX);
463 wed_w32(dev, MTK_WED_RESET_IDX, 0);
464 }
465
466- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
467- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
468+ /* 2. Reset WDMA Rx DMA/Driver_Engine */
469+ busy = !!mtk_wdma_rx_reset(dev);
470
471- mtk_wdma_rx_reset(dev);
472+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
473+ busy = !!(busy ||
474+ mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
475+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY));
476
477 if (busy) {
478 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
developerfd8e1152023-02-14 11:29:23 +0800479@@ -1047,15 +1188,30 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800480 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
481 }
482
483+ /* 3. Reset WED WPDMA Tx Driver Engine */
484+ wed_clr(dev, MTK_WED_CTRL,
485+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
486+
487 for (i = 0; i < 100; i++) {
488 val = wed_r32(dev, MTK_WED_TX_BM_INTF);
489 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
490 break;
491 }
492-
493 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
494+
495+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
496 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
497
498+ /* 4. Reset WED WPDMA Tx Driver Engine */
499+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
500+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
501+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
502+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
503+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
504+
505+ busy = !!(busy ||
506+ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
507+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY));
508 if (busy) {
509 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
510 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
developerfd8e1152023-02-14 11:29:23 +0800511@@ -1065,6 +1221,16 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800512 MTK_WED_WPDMA_RESET_IDX_TX |
513 MTK_WED_WPDMA_RESET_IDX_RX);
514 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
515+ if (dev->ver > MTK_WED_V1) {
516+ wed_w32(dev, MTK_WED_RESET_IDX,
517+ MTK_WED_RESET_WPDMA_IDX_RX);
518+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
519+ }
520+ }
521+
522+ if (dev->ver > MTK_WED_V1) {
523+ dev->init_done = false;
524+ mtk_wed_rx_reset(dev);
525 }
526
developer2ed23d42022-08-09 16:20:46 +0800527 }
developerfd8e1152023-02-14 11:29:23 +0800528@@ -1101,13 +1267,15 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
developer2ed23d42022-08-09 16:20:46 +0800529 }
530
531 static int
532-mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
533+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev,
534+ int idx, int size, bool reset)
535 {
536 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
537
538- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
539- dev->ver, true))
540- return -ENOMEM;
541+ if(!reset)
542+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
543+ dev->ver, true))
544+ return -ENOMEM;
545
546 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
547 wdma->desc_phys);
developerfd8e1152023-02-14 11:29:23 +0800548@@ -1124,13 +1292,15 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
developerc1b2cd12022-07-28 18:35:24 +0800549 }
developer2ed23d42022-08-09 16:20:46 +0800550
551 static int
552-mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
553+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
554+ int idx, int size, bool reset)
555 {
556 struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
557
558- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
559- dev->ver, true))
560- return -ENOMEM;
561+ if (!reset)
562+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
563+ dev->ver, true))
564+ return -ENOMEM;
565
566 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
567 wdma->desc_phys);
developerfd8e1152023-02-14 11:29:23 +0800568@@ -1140,7 +1310,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
developer2ed23d42022-08-09 16:20:46 +0800569 MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
570 wdma_w32(dev,
571 MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
572-
573+ if (reset)
574+ mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE,
575+ dev->ver, true);
576 if (idx == 0) {
577 wed_w32(dev, MTK_WED_WDMA_RING_TX
578 + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
developerfd8e1152023-02-14 11:29:23 +0800579@@ -1253,9 +1425,12 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developerc1b2cd12022-07-28 18:35:24 +0800580 {
581 int i, ret;
582
583+ if (dev->ver > MTK_WED_V1)
584+ ret = mtk_wed_rx_bm_alloc(dev);
585+
586 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
587 if (!dev->tx_wdma[i].desc)
developer2ed23d42022-08-09 16:20:46 +0800588- mtk_wed_wdma_rx_ring_setup(dev, i, 16);
589+ mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
590
591 mtk_wed_hw_init(dev);
592
developerfd8e1152023-02-14 11:29:23 +0800593@@ -1347,10 +1522,6 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800594 goto error;
595
596 if (dev->ver > MTK_WED_V1) {
597- ret = mtk_wed_rx_bm_alloc(dev);
598- if (ret)
599- goto error;
600-
601 ret = mtk_wed_rro_alloc(dev);
602 if (ret)
603 goto error;
developerfd8e1152023-02-14 11:29:23 +0800604@@ -1358,6 +1529,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer2ed23d42022-08-09 16:20:46 +0800605
606 mtk_wed_hw_init_early(dev);
607
608+ init_completion(&dev->fe_reset_done);
609+ init_completion(&dev->wlan_reset_done);
610+ atomic_set(&dev->fe_reset, 0);
611+
612 if (dev->ver == MTK_WED_V1)
613 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
614 BIT(hw->index), 0);
developer58aa0682023-09-18 14:02:26 +0800615@@ -1374,7 +1549,8 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800616 }
617
618 static int
619-mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
620+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx,
621+ void __iomem *regs, bool reset)
622 {
623 struct mtk_wed_ring *ring = &dev->tx_ring[idx];
624
developerfd8e1152023-02-14 11:29:23 +0800625@@ -1392,10 +1568,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
developerc1b2cd12022-07-28 18:35:24 +0800626
627 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
628
629- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true))
630- return -ENOMEM;
developer2ed23d42022-08-09 16:20:46 +0800631+ if (!reset)
developerc1b2cd12022-07-28 18:35:24 +0800632+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
633+ 1, true))
634+ return -ENOMEM;
635
developer2ed23d42022-08-09 16:20:46 +0800636- if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
637+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
638 return -ENOMEM;
developerc1b2cd12022-07-28 18:35:24 +0800639
developer2ed23d42022-08-09 16:20:46 +0800640 ring->reg_base = MTK_WED_RING_TX(idx);
developerfd8e1152023-02-14 11:29:23 +0800641@@ -1443,21 +1621,24 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developerc1b2cd12022-07-28 18:35:24 +0800642 }
643
644 static int
645-mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
646+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
647+ int idx, void __iomem *regs, bool reset)
648 {
649 struct mtk_wed_ring *ring = &dev->rx_ring[idx];
650
651 BUG_ON(idx > ARRAY_SIZE(dev->rx_ring));
652
developer2ed23d42022-08-09 16:20:46 +0800653+ if (!reset)
developerc1b2cd12022-07-28 18:35:24 +0800654+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
655+ 1, false))
656+ return -ENOMEM;
657
658- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false))
659- return -ENOMEM;
660-
661- if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
developer2ed23d42022-08-09 16:20:46 +0800662+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
663 return -ENOMEM;
664
developerc1b2cd12022-07-28 18:35:24 +0800665 ring->reg_base = MTK_WED_RING_RX_DATA(idx);
666 ring->wpdma = regs;
667+ dev->hw->ring_num = idx + 1;
668
669 /* WPDMA -> WED */
670 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
671diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
developerfd8e1152023-02-14 11:29:23 +0800672index 8ef5253..490873c 100644
developerc1b2cd12022-07-28 18:35:24 +0800673--- a/drivers/net/ethernet/mediatek/mtk_wed.h
674+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
675@@ -47,6 +47,7 @@ struct mtk_wed_hw {
676 u32 num_flows;
677 u32 wdma_phy;
678 char dirname[5];
679+ int ring_num;
680 int irq;
681 int index;
682 u32 ver;
developerfd8e1152023-02-14 11:29:23 +0800683@@ -158,6 +159,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
684 void mtk_wed_exit(void);
685 int mtk_wed_flow_add(int index);
686 void mtk_wed_flow_remove(int index);
687+void mtk_wed_fe_reset(void);
688+void mtk_wed_fe_reset_complete(void);
689+
690 #else
691 static inline void
692 mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
693@@ -175,6 +179,13 @@ static inline int mtk_wed_flow_add(int index)
694 static inline void mtk_wed_flow_remove(int index)
695 {
696 }
697+static inline void mtk_wed_fe_reset(void)
698+{
699+}
700+
701+static inline void mtk_wed_fe_reset_complete(void)
702+{
703+}
developer2ed23d42022-08-09 16:20:46 +0800704 #endif
developerfd8e1152023-02-14 11:29:23 +0800705
706 #ifdef CONFIG_DEBUG_FS
developerc1b2cd12022-07-28 18:35:24 +0800707diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
developer58aa0682023-09-18 14:02:26 +0800708index 31871f7..403a36b 100644
developerc1b2cd12022-07-28 18:35:24 +0800709--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
710+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
711@@ -38,11 +38,15 @@ struct mtk_wdma_desc {
712
713 #define MTK_WED_RESET 0x008
714 #define MTK_WED_RESET_TX_BM BIT(0)
715+#define MTK_WED_RESET_RX_BM BIT(1)
716 #define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
717 #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
718 #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
719+#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10)
720 #define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
721 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
722+#define MTK_WED_RESET_WED_RX_DMA BIT(13)
723+#define MTK_WED_RESET_WDMA_TX_DRV BIT(16)
724 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
725 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
726 #define MTK_WED_RESET_RX_RRO_QM BIT(20)
developer58aa0682023-09-18 14:02:26 +0800727@@ -185,7 +189,12 @@ struct mtk_wdma_desc {
developerc1b2cd12022-07-28 18:35:24 +0800728
729 #define MTK_WED_RESET_IDX 0x20c
730 #define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
731+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
732+#define MTK_WED_RESET_IDX_RX GENMASK(7, 6)
733+#else
734 #define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
735+#endif
736+#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
737
738 #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
739 #define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
developer58aa0682023-09-18 14:02:26 +0800740@@ -299,6 +308,9 @@ struct mtk_wdma_desc {
developerc1b2cd12022-07-28 18:35:24 +0800741
742 #define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
743 #define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
744+#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1)
745+#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3)
746+#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4)
747 #define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
748 #define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
749
750diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
developerfd8e1152023-02-14 11:29:23 +0800751index e8fca31..98ed390 100644
developerc1b2cd12022-07-28 18:35:24 +0800752--- a/include/linux/soc/mediatek/mtk_wed.h
753+++ b/include/linux/soc/mediatek/mtk_wed.h
developerfd8e1152023-02-14 11:29:23 +0800754@@ -163,18 +163,23 @@ struct mtk_wed_device {
developer2ed23d42022-08-09 16:20:46 +0800755 void (*release_rx_buf)(struct mtk_wed_device *wed);
developer144824b2022-11-25 21:27:43 +0800756 void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
757 struct mtk_wed_wo_rx_stats *stats);
developerfd8e1152023-02-14 11:29:23 +0800758+ int (*reset)(struct mtk_wed_device *wed);
759+ void (*reset_complete)(struct mtk_wed_device *wed);
developer2ed23d42022-08-09 16:20:46 +0800760 } wlan;
761+ struct completion fe_reset_done;
762+ struct completion wlan_reset_done;
763+ atomic_t fe_reset;
764 #endif
765 };
766
developerc1b2cd12022-07-28 18:35:24 +0800767 struct mtk_wed_ops {
768 int (*attach)(struct mtk_wed_device *dev);
769 int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
770- void __iomem *regs);
771+ void __iomem *regs, bool reset);
772 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
773 void __iomem *regs);
774 int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
775- void __iomem *regs);
776+ void __iomem *regs, bool reset);
777 int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
778 void *data, int len);
779 void (*detach)(struct mtk_wed_device *dev);
developerfd8e1152023-02-14 11:29:23 +0800780@@ -228,12 +233,13 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800781 #define mtk_wed_device_active(_dev) !!(_dev)->ops
782 #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
783 #define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
784-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
785- (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
developerfd8e1152023-02-14 11:29:23 +0800786+#define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
developerc1b2cd12022-07-28 18:35:24 +0800787+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) \
788+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs, _reset)
789 #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
790 (_dev)->ops->txfree_ring_setup(_dev, _regs)
791-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
792- (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
793+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
794+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
795 #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
796 (_dev)->ops->msg_update(_dev, _id, _msg, _len)
797 #define mtk_wed_device_reg_read(_dev, _reg) \
developerfd8e1152023-02-14 11:29:23 +0800798@@ -244,6 +250,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800799 (_dev)->ops->irq_get(_dev, _mask)
800 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
801 (_dev)->ops->irq_set_mask(_dev, _mask)
802+#define mtk_wed_device_dma_reset(_dev) \
803+ (_dev)->ops->reset_dma(_dev)
804 #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
805 (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
806 #else
developerfd8e1152023-02-14 11:29:23 +0800807@@ -253,14 +261,15 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800808 }
809 #define mtk_wed_device_detach(_dev) do {} while (0)
810 #define mtk_wed_device_start(_dev, _mask) do {} while (0)
811-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
developerfd8e1152023-02-14 11:29:23 +0800812+#define mtk_wed_device_stop(_dev) do {} while (0)
developerc1b2cd12022-07-28 18:35:24 +0800813+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
814 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
815-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
816-#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
817+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
818 #define mtk_wed_device_reg_read(_dev, _reg) 0
819 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
820 #define mtk_wed_device_irq_get(_dev, _mask) 0
821 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
822+#define mtk_wed_device_dma_reset(_dev) do {} while (0)
823 #define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
824 #endif
825
826--
8272.18.0
828