blob: a5ad0afac565bbba37c85deda9b6a6d4a9237816 [file] [log] [blame]
developerc1b2cd12022-07-28 18:35:24 +08001From f70e83ccdca85840c3bf9e7a31fb871a12724dc2 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 28 Jul 2022 14:49:16 +0800
4Subject: [PATCH 3/3] add wed ser support
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
developer2ed23d42022-08-09 16:20:46 +08008 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 9 +-
9 drivers/net/ethernet/mediatek/mtk_wed.c | 343 ++++++++++++++-----
10 drivers/net/ethernet/mediatek/mtk_wed.h | 2 +
developerc1b2cd12022-07-28 18:35:24 +080011 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 12 +
developer2ed23d42022-08-09 16:20:46 +080012 include/linux/soc/mediatek/mtk_wed.h | 28 +-
13 5 files changed, 295 insertions(+), 99 deletions(-)
developerc1b2cd12022-07-28 18:35:24 +080014
developer2ed23d42022-08-09 16:20:46 +080015diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
16index c582bb9..5259141 100644
17--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
18+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19@@ -3220,10 +3220,14 @@ static void mtk_pending_work(struct work_struct *work)
20 mtk_prepare_reset_fe(eth);
21
22 /* Trigger Wifi SER reset */
23+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
24+ mtk_wed_fe_reset(MTK_FE_START_RESET);
25+#else
26 call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]);
27 rtnl_unlock();
28 wait_for_completion_timeout(&wait_ser_done, 5000);
29 rtnl_lock();
30+#endif
31
32 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
33 cpu_relax();
34@@ -3284,8 +3288,11 @@ static void mtk_pending_work(struct work_struct *work)
35
36 call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]);
37 pr_info("[%s] HNAT reset done !\n", __func__);
38-
39+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
40+ mtk_wed_fe_reset(MTK_FE_RESET_DONE);
41+#else
42 call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]);
43+#endif
44 pr_info("[%s] WiFi SER reset done !\n", __func__);
45
46 atomic_dec(&reset_lock);
developerc1b2cd12022-07-28 18:35:24 +080047diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developer2ed23d42022-08-09 16:20:46 +080048index 7552795..bb6ec97 100644
developerc1b2cd12022-07-28 18:35:24 +080049--- a/drivers/net/ethernet/mediatek/mtk_wed.c
50+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
developer2ed23d42022-08-09 16:20:46 +080051@@ -13,8 +13,10 @@
52 #include <linux/debugfs.h>
53 #include <linux/iopoll.h>
54 #include <linux/soc/mediatek/mtk_wed.h>
55+#include <net/rtnetlink.h>
56
57 #include "mtk_eth_soc.h"
58+#include "mtk_eth_reset.h"
59 #include "mtk_wed_regs.h"
60 #include "mtk_wed.h"
61 #include "mtk_ppe.h"
62@@ -71,23 +73,27 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +080063 return wdma_r32(dev, MTK_WDMA_GLO_CFG);
64 }
65
66-static void
67+static int
68 mtk_wdma_rx_reset(struct mtk_wed_device *dev)
69 {
70 u32 status;
71 u32 mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
72- int i;
73+ int busy, i;
74
75 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
76- if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
77- !(status & mask), 0, 1000))
78- WARN_ON_ONCE(1);
79+ busy = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
80+ !(status & mask), 0, 10000);
81+
82+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
83+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
84
85 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
86 if (!dev->rx_wdma[i].desc) {
87 wdma_w32(dev, MTK_WDMA_RING_RX(i) +
88 MTK_WED_RING_OFS_CPU_IDX, 0);
89 }
90+
91+ return busy;
92 }
93
94 static void
developer2ed23d42022-08-09 16:20:46 +080095@@ -99,14 +105,14 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +080096
97 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
98 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
99- !(status & mask), 0, 1000))
100+ !(status & mask), 0, 10000))
101 WARN_ON_ONCE(1);
102
103+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
104+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
105 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
developer2ed23d42022-08-09 16:20:46 +0800106- if (!dev->tx_wdma[i].desc) {
107 wdma_w32(dev, MTK_WDMA_RING_TX(i) +
108 MTK_WED_RING_OFS_CPU_IDX, 0);
109- }
110 }
111
112 static u32
developerc1b2cd12022-07-28 18:35:24 +0800113@@ -505,8 +511,8 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
114 wifi_w32(dev, dev->wlan.wpdma_rx_glo -
115 dev->wlan.phy_base, val);
116 } else {
117- dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
118- dev->hw->index);
119+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
120+ dev->hw->index, idx);
121 }
122 }
123
124@@ -557,7 +563,7 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
125 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
126 0x2));
127
128- for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++)
129+ for (idx = 0; idx < dev->hw->ring_num; idx++)
130 mtk_wed_check_wfdma_rx_fill(dev, idx);
131 }
132 }
133@@ -597,26 +603,31 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
134 }
135
136 static void
137-mtk_wed_stop(struct mtk_wed_device *dev)
138+mtk_wed_stop(struct mtk_wed_device *dev, bool reset)
139 {
140- mtk_wed_dma_disable(dev);
141- mtk_wed_set_512_support(dev, false);
142-
143 if (dev->ver > MTK_WED_V1) {
144 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
145 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
146 }
147 mtk_wed_set_ext_int(dev, false);
148
149- wed_clr(dev, MTK_WED_CTRL,
150- MTK_WED_CTRL_WDMA_INT_AGENT_EN |
151- MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
152- MTK_WED_CTRL_WED_TX_BM_EN |
153- MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
154-
155- if (dev->ver > MTK_WED_V1) {
156+ if (!reset) {
157+ mtk_wed_dma_disable(dev);
158+ mtk_wed_set_512_support(dev, false);
159+ if (dev->ver > MTK_WED_V1) {
160+ wed_clr(dev, MTK_WED_CTRL,
161+ MTK_WED_CTRL_RX_RRO_QM_EN |
162+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
163+ MTK_WED_CTRL_WED_RX_BM_EN);
164+ } else {
165+ regmap_write(dev->hw->mirror,
166+ dev->hw->index * 4, 0);
167+ }
168 wed_clr(dev, MTK_WED_CTRL,
169- MTK_WED_CTRL_WED_RX_BM_EN);
170+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
171+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
172+ MTK_WED_CTRL_WED_TX_BM_EN |
173+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
174 }
175
176 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
177@@ -634,16 +645,13 @@ mtk_wed_detach(struct mtk_wed_device *dev)
178
179 mutex_lock(&hw_lock);
180
181- mtk_wed_stop(dev);
182+ mtk_wed_stop(dev, false);
183
184- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
185- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
186+ mtk_wdma_rx_reset(dev);
187
188 mtk_wed_reset(dev, MTK_WED_RESET_WED);
189
190- wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
191- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
192- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
193+ mtk_wdma_tx_reset(dev);
194
195 mtk_wed_free_buffer(dev);
196 mtk_wed_free_tx_rings(dev);
197@@ -653,8 +661,6 @@ mtk_wed_detach(struct mtk_wed_device *dev)
198 mtk_wed_wo_exit(hw);
199 }
200
201- mtk_wdma_rx_reset(dev);
202-
203 if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
204 wlan_node = dev->wlan.pci_dev->dev.of_node;
205 if (of_dma_is_coherent(wlan_node))
206@@ -748,7 +754,7 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
207 {
208 u32 mask, set;
209
210- mtk_wed_stop(dev);
211+ mtk_wed_stop(dev, false);
212 mtk_wed_reset(dev, MTK_WED_RESET_WED);
213
214 if (dev->ver > MTK_WED_V1)
215@@ -961,44 +967,127 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
216 }
217
218 static u32
219-mtk_wed_check_busy(struct mtk_wed_device *dev)
220+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
221 {
222- if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
223- return true;
224-
225- if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
226- MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
227- return true;
228-
229- if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
230- return true;
231-
232- if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
233- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
234- return true;
235-
236- if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
237- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
238- return true;
239-
240- if (wed_r32(dev, MTK_WED_CTRL) &
241- (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
242+ if (wed_r32(dev, reg) & mask)
243 return true;
244
245 return false;
246 }
247
248 static int
249-mtk_wed_poll_busy(struct mtk_wed_device *dev)
250+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
251 {
252- int sleep = 15000;
253+ int sleep = 1000;
254 int timeout = 100 * sleep;
255 u32 val;
256
257 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
258- timeout, false, dev);
259+ timeout, false, dev, reg, mask);
260 }
261
262+static void
263+mtk_wed_rx_reset(struct mtk_wed_device *dev)
264+{
265+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
266+ u8 state = WO_STATE_SER_RESET;
267+ bool busy = false;
268+ int i;
269+
270+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE,
271+ &state, sizeof(state), true);
272+
273+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
274+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
275+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
276+ if (busy) {
277+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
278+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
279+ } else {
280+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
281+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
282+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
283+
284+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
285+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
286+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
287+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
288+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
289+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
290+
291+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
292+ }
293+
294+ /* reset rro qm */
295+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
296+ busy = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
297+ MTK_WED_CTRL_RX_RRO_QM_BUSY);
298+ if (busy) {
299+ mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
300+ } else {
301+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
302+ MTK_WED_RROQM_RST_IDX_MIOD |
303+ MTK_WED_RROQM_RST_IDX_FDBK);
304+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
305+ }
306+
307+ /* reset route qm */
308+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
309+ busy = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
310+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
311+ if (busy) {
312+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
313+ } else {
314+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
315+ MTK_WED_RTQM_Q_RST);
316+ }
317+
318+ /* reset tx wdma */
319+ mtk_wdma_tx_reset(dev);
320+
321+ /* reset tx wdma drv */
322+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
323+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
324+ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
325+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
326+
327+ /* reset wed rx dma */
328+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
329+ MTK_WED_GLO_CFG_RX_DMA_BUSY);
330+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
331+ if (busy) {
332+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
333+ } else {
334+ wed_set(dev, MTK_WED_RESET_IDX,
335+ MTK_WED_RESET_IDX_RX);
336+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
337+ }
338+
339+ /* reset rx bm */
340+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
341+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
342+ MTK_WED_CTRL_WED_RX_BM_BUSY);
343+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
344+
345+ /* wo change to enable state */
346+ state = WO_STATE_ENABLE;
347+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE,
348+ &state, sizeof(state), true);
349+
350+ /* wed_rx_ring_reset */
351+ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
352+ struct mtk_wdma_desc *desc = dev->rx_ring[i].desc;
353+
354+ if (!desc)
355+ continue;
356+
357+ mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false);
358+ }
359+
360+ mtk_wed_free_rx_bm(dev);
361+}
362+
363+
364 static void
365 mtk_wed_reset_dma(struct mtk_wed_device *dev)
366 {
367@@ -1012,25 +1101,28 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
368 if (!desc)
369 continue;
370
371- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true);
372+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, 1, true);
373 }
374
375- if (mtk_wed_poll_busy(dev))
376- busy = mtk_wed_check_busy(dev);
377+ /* 1.Reset WED Tx DMA */
378+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
379+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_BUSY);
380
381 if (busy) {
382 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
383 } else {
384 wed_w32(dev, MTK_WED_RESET_IDX,
385- MTK_WED_RESET_IDX_TX |
386- MTK_WED_RESET_IDX_RX);
387+ MTK_WED_RESET_IDX_TX);
388 wed_w32(dev, MTK_WED_RESET_IDX, 0);
389 }
390
391- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
392- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
393+ /* 2. Reset WDMA Rx DMA/Driver_Engine */
394+ busy = !!mtk_wdma_rx_reset(dev);
395
396- mtk_wdma_rx_reset(dev);
397+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
398+ busy = !!(busy ||
399+ mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
400+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY));
401
402 if (busy) {
403 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
404@@ -1047,15 +1139,30 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
405 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
406 }
407
408+ /* 3. Reset WED WPDMA Tx Driver Engine */
409+ wed_clr(dev, MTK_WED_CTRL,
410+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
411+
412 for (i = 0; i < 100; i++) {
413 val = wed_r32(dev, MTK_WED_TX_BM_INTF);
414 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
415 break;
416 }
417-
418 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
419+
420+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
421 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
422
423+ /* 4. Reset WED WPDMA Tx Driver Engine */
424+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
425+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
426+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
427+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
428+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
429+
430+ busy = !!(busy ||
431+ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
432+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY));
433 if (busy) {
434 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
435 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
436@@ -1065,6 +1172,16 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
437 MTK_WED_WPDMA_RESET_IDX_TX |
438 MTK_WED_WPDMA_RESET_IDX_RX);
439 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
440+ if (dev->ver > MTK_WED_V1) {
441+ wed_w32(dev, MTK_WED_RESET_IDX,
442+ MTK_WED_RESET_WPDMA_IDX_RX);
443+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
444+ }
445+ }
446+
447+ if (dev->ver > MTK_WED_V1) {
448+ dev->init_done = false;
449+ mtk_wed_rx_reset(dev);
450 }
451
developer2ed23d42022-08-09 16:20:46 +0800452 }
453@@ -1101,13 +1218,15 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
454 }
455
456 static int
457-mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
458+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev,
459+ int idx, int size, bool reset)
460 {
461 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
462
463- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
464- dev->ver, true))
465- return -ENOMEM;
466+ if(!reset)
467+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
468+ dev->ver, true))
469+ return -ENOMEM;
470
471 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
472 wdma->desc_phys);
473@@ -1124,13 +1243,15 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
developerc1b2cd12022-07-28 18:35:24 +0800474 }
developer2ed23d42022-08-09 16:20:46 +0800475
476 static int
477-mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
478+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
479+ int idx, int size, bool reset)
480 {
481 struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
482
483- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
484- dev->ver, true))
485- return -ENOMEM;
486+ if (!reset)
487+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
488+ dev->ver, true))
489+ return -ENOMEM;
490
491 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
492 wdma->desc_phys);
493@@ -1140,7 +1261,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
494 MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
495 wdma_w32(dev,
496 MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
497-
498+ if (reset)
499+ mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE,
500+ dev->ver, true);
501 if (idx == 0) {
502 wed_w32(dev, MTK_WED_WDMA_RING_TX
503 + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
504@@ -1253,9 +1376,12 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developerc1b2cd12022-07-28 18:35:24 +0800505 {
506 int i, ret;
507
508+ if (dev->ver > MTK_WED_V1)
509+ ret = mtk_wed_rx_bm_alloc(dev);
510+
511 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
512 if (!dev->tx_wdma[i].desc)
developer2ed23d42022-08-09 16:20:46 +0800513- mtk_wed_wdma_rx_ring_setup(dev, i, 16);
514+ mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
515
516 mtk_wed_hw_init(dev);
517
518@@ -1340,10 +1466,6 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800519 goto error;
520
521 if (dev->ver > MTK_WED_V1) {
522- ret = mtk_wed_rx_bm_alloc(dev);
523- if (ret)
524- goto error;
525-
526 ret = mtk_wed_rro_alloc(dev);
527 if (ret)
528 goto error;
developer2ed23d42022-08-09 16:20:46 +0800529@@ -1351,6 +1473,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
530
531 mtk_wed_hw_init_early(dev);
532
533+ init_completion(&dev->fe_reset_done);
534+ init_completion(&dev->wlan_reset_done);
535+ atomic_set(&dev->fe_reset, 0);
536+
537 if (dev->ver == MTK_WED_V1)
538 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
539 BIT(hw->index), 0);
540@@ -1367,7 +1493,8 @@ out:
developerc1b2cd12022-07-28 18:35:24 +0800541 }
542
543 static int
544-mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
545+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx,
546+ void __iomem *regs, bool reset)
547 {
548 struct mtk_wed_ring *ring = &dev->tx_ring[idx];
549
developer2ed23d42022-08-09 16:20:46 +0800550@@ -1385,10 +1512,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
developerc1b2cd12022-07-28 18:35:24 +0800551
552 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
553
554- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true))
555- return -ENOMEM;
developer2ed23d42022-08-09 16:20:46 +0800556+ if (!reset)
developerc1b2cd12022-07-28 18:35:24 +0800557+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
558+ 1, true))
559+ return -ENOMEM;
560
developer2ed23d42022-08-09 16:20:46 +0800561- if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
562+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
563 return -ENOMEM;
developerc1b2cd12022-07-28 18:35:24 +0800564
developer2ed23d42022-08-09 16:20:46 +0800565 ring->reg_base = MTK_WED_RING_TX(idx);
566@@ -1436,21 +1565,24 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developerc1b2cd12022-07-28 18:35:24 +0800567 }
568
569 static int
570-mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
571+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
572+ int idx, void __iomem *regs, bool reset)
573 {
574 struct mtk_wed_ring *ring = &dev->rx_ring[idx];
575
576 BUG_ON(idx > ARRAY_SIZE(dev->rx_ring));
577
developer2ed23d42022-08-09 16:20:46 +0800578+ if (!reset)
developerc1b2cd12022-07-28 18:35:24 +0800579+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
580+ 1, false))
581+ return -ENOMEM;
582
583- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false))
584- return -ENOMEM;
585-
586- if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
developer2ed23d42022-08-09 16:20:46 +0800587+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
588 return -ENOMEM;
589
developerc1b2cd12022-07-28 18:35:24 +0800590 ring->reg_base = MTK_WED_RING_RX_DATA(idx);
591 ring->wpdma = regs;
592+ dev->hw->ring_num = idx + 1;
593
594 /* WPDMA -> WED */
595 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
developer2ed23d42022-08-09 16:20:46 +0800596@@ -1492,6 +1624,41 @@ mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
597 wed_w32(dev, MTK_WED_INT_MASK, mask);
598 }
599
600+void mtk_wed_fe_reset(int cmd)
601+{
602+ int i;
603+
604+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
605+ struct mtk_wed_hw *hw = hw_list[i];
606+ struct mtk_wed_device *dev;
607+
608+ dev = hw->wed_dev ;
609+ if (!dev)
610+ continue;
611+
612+ switch (cmd) {
613+ case MTK_FE_START_RESET:
614+ pr_info("%s: receive fe reset start event, trigger SER\n", __func__);
615+ atomic_set(&dev->fe_reset, 1);
616+ dev->wlan.ser_trigger(dev);
617+ rtnl_unlock();
618+ wait_for_completion(&dev->wlan_reset_done);
619+ rtnl_lock();
620+
621+ break;
622+ case MTK_FE_RESET_DONE:
623+ pr_info("%s: receive fe reset done event, continue SER\n", __func__);
624+ complete(&dev->fe_reset_done);
625+ break;
626+ default:
627+ break;
628+ }
629+
630+ }
631+
632+ return;
633+}
634+
635 int mtk_wed_flow_add(int index)
636 {
637 struct mtk_wed_hw *hw = hw_list[index];
developerc1b2cd12022-07-28 18:35:24 +0800638diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
developer2ed23d42022-08-09 16:20:46 +0800639index 8ef5253..f757eac 100644
developerc1b2cd12022-07-28 18:35:24 +0800640--- a/drivers/net/ethernet/mediatek/mtk_wed.h
641+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
642@@ -47,6 +47,7 @@ struct mtk_wed_hw {
643 u32 num_flows;
644 u32 wdma_phy;
645 char dirname[5];
646+ int ring_num;
647 int irq;
648 int index;
649 u32 ver;
developer2ed23d42022-08-09 16:20:46 +0800650@@ -196,5 +197,6 @@ void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
651 int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd,
652 const void *data, int len, bool wait_resp);
653 int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget);
654+void mtk_wed_fe_reset(int cmd);
655
656 #endif
developerc1b2cd12022-07-28 18:35:24 +0800657diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
658index 9d021e2..cfcd94f 100644
659--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
660+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
661@@ -38,11 +38,15 @@ struct mtk_wdma_desc {
662
663 #define MTK_WED_RESET 0x008
664 #define MTK_WED_RESET_TX_BM BIT(0)
665+#define MTK_WED_RESET_RX_BM BIT(1)
666 #define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
667 #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
668 #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
669+#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10)
670 #define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
671 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
672+#define MTK_WED_RESET_WED_RX_DMA BIT(13)
673+#define MTK_WED_RESET_WDMA_TX_DRV BIT(16)
674 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
675 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
676 #define MTK_WED_RESET_RX_RRO_QM BIT(20)
677@@ -186,7 +190,12 @@ struct mtk_wdma_desc {
678
679 #define MTK_WED_RESET_IDX 0x20c
680 #define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
681+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
682+#define MTK_WED_RESET_IDX_RX GENMASK(7, 6)
683+#else
684 #define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
685+#endif
686+#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
687
688 #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
689 #define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
690@@ -300,6 +309,9 @@ struct mtk_wdma_desc {
691
692 #define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
693 #define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
694+#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1)
695+#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3)
696+#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4)
697 #define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
698 #define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
699
700diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
developer2ed23d42022-08-09 16:20:46 +0800701index 9a9cc1b..31f4a26 100644
developerc1b2cd12022-07-28 18:35:24 +0800702--- a/include/linux/soc/mediatek/mtk_wed.h
703+++ b/include/linux/soc/mediatek/mtk_wed.h
developer2ed23d42022-08-09 16:20:46 +0800704@@ -114,23 +114,27 @@ struct mtk_wed_device {
705 u32 (*init_rx_buf)(struct mtk_wed_device *wed,
706 int pkt_num);
707 void (*release_rx_buf)(struct mtk_wed_device *wed);
708+ void (*ser_trigger)(struct mtk_wed_device *wed);
709 } wlan;
710+ struct completion fe_reset_done;
711+ struct completion wlan_reset_done;
712+ atomic_t fe_reset;
713 #endif
714 };
715
developerc1b2cd12022-07-28 18:35:24 +0800716 struct mtk_wed_ops {
717 int (*attach)(struct mtk_wed_device *dev);
718 int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
719- void __iomem *regs);
720+ void __iomem *regs, bool reset);
721 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
722 void __iomem *regs);
723 int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
724- void __iomem *regs);
725+ void __iomem *regs, bool reset);
726 int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
727 void *data, int len);
728 void (*detach)(struct mtk_wed_device *dev);
729
730- void (*stop)(struct mtk_wed_device *dev);
731+ void (*stop)(struct mtk_wed_device *dev, bool reset);
732 void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
733 void (*reset_dma)(struct mtk_wed_device *dev);
734
developer2ed23d42022-08-09 16:20:46 +0800735@@ -169,12 +173,13 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800736 #define mtk_wed_device_active(_dev) !!(_dev)->ops
737 #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
738 #define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
739-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
740- (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
741+#define mtk_wed_device_stop(_dev, _reset) (_dev)->ops->stop(_dev, _reset)
742+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) \
743+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs, _reset)
744 #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
745 (_dev)->ops->txfree_ring_setup(_dev, _regs)
746-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
747- (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
748+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
749+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
750 #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
751 (_dev)->ops->msg_update(_dev, _id, _msg, _len)
752 #define mtk_wed_device_reg_read(_dev, _reg) \
developer2ed23d42022-08-09 16:20:46 +0800753@@ -185,6 +190,8 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800754 (_dev)->ops->irq_get(_dev, _mask)
755 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
756 (_dev)->ops->irq_set_mask(_dev, _mask)
757+#define mtk_wed_device_dma_reset(_dev) \
758+ (_dev)->ops->reset_dma(_dev)
759 #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
760 (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
761 #else
developer2ed23d42022-08-09 16:20:46 +0800762@@ -194,14 +201,15 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800763 }
764 #define mtk_wed_device_detach(_dev) do {} while (0)
765 #define mtk_wed_device_start(_dev, _mask) do {} while (0)
766-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
767+#define mtk_wed_device_stop(_dev, _reset) do {} while (0)
768+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
769 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
770-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
771-#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
772+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
773 #define mtk_wed_device_reg_read(_dev, _reg) 0
774 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
775 #define mtk_wed_device_irq_get(_dev, _mask) 0
776 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
777+#define mtk_wed_device_dma_reset(_dev) do {} while (0)
778 #define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
779 #endif
780
781--
7822.18.0
783