blob: fcc5e3a1160323ab7519cfea851e1d564499d163 [file] [log] [blame]
developerc1b2cd12022-07-28 18:35:24 +08001From f70e83ccdca85840c3bf9e7a31fb871a12724dc2 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 28 Jul 2022 14:49:16 +0800
4Subject: [PATCH 3/3] add wed ser support
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 drivers/net/ethernet/mediatek/mtk_wed.c | 298 ++++++++++++++-----
9 drivers/net/ethernet/mediatek/mtk_wed.h | 1 +
10 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 12 +
11 include/linux/soc/mediatek/mtk_wed.h | 24 +-
12 4 files changed, 245 insertions(+), 90 deletions(-)
13
14diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
15index 0750def..7b6b777 100644
16--- a/drivers/net/ethernet/mediatek/mtk_wed.c
17+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
18@@ -71,23 +71,27 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
19 return wdma_r32(dev, MTK_WDMA_GLO_CFG);
20 }
21
22-static void
23+static int
24 mtk_wdma_rx_reset(struct mtk_wed_device *dev)
25 {
26 u32 status;
27 u32 mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
28- int i;
29+ int busy, i;
30
31 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
32- if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
33- !(status & mask), 0, 1000))
34- WARN_ON_ONCE(1);
35+ busy = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
36+ !(status & mask), 0, 10000);
37+
38+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
39+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
40
41 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
42 if (!dev->rx_wdma[i].desc) {
43 wdma_w32(dev, MTK_WDMA_RING_RX(i) +
44 MTK_WED_RING_OFS_CPU_IDX, 0);
45 }
46+
47+ return busy;
48 }
49
50 static void
51@@ -99,9 +103,11 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
52
53 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
54 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
55- !(status & mask), 0, 1000))
56+ !(status & mask), 0, 10000))
57 WARN_ON_ONCE(1);
58
59+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
60+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
61 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
62 if (!dev->tx_wdma[i].desc) {
63 wdma_w32(dev, MTK_WDMA_RING_TX(i) +
64@@ -505,8 +511,8 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
65 wifi_w32(dev, dev->wlan.wpdma_rx_glo -
66 dev->wlan.phy_base, val);
67 } else {
68- dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
69- dev->hw->index);
70+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
71+ dev->hw->index, idx);
72 }
73 }
74
75@@ -557,7 +563,7 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
76 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
77 0x2));
78
79- for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++)
80+ for (idx = 0; idx < dev->hw->ring_num; idx++)
81 mtk_wed_check_wfdma_rx_fill(dev, idx);
82 }
83 }
84@@ -597,26 +603,31 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
85 }
86
87 static void
88-mtk_wed_stop(struct mtk_wed_device *dev)
89+mtk_wed_stop(struct mtk_wed_device *dev, bool reset)
90 {
91- mtk_wed_dma_disable(dev);
92- mtk_wed_set_512_support(dev, false);
93-
94 if (dev->ver > MTK_WED_V1) {
95 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
96 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
97 }
98 mtk_wed_set_ext_int(dev, false);
99
100- wed_clr(dev, MTK_WED_CTRL,
101- MTK_WED_CTRL_WDMA_INT_AGENT_EN |
102- MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
103- MTK_WED_CTRL_WED_TX_BM_EN |
104- MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
105-
106- if (dev->ver > MTK_WED_V1) {
107+ if (!reset) {
108+ mtk_wed_dma_disable(dev);
109+ mtk_wed_set_512_support(dev, false);
110+ if (dev->ver > MTK_WED_V1) {
111+ wed_clr(dev, MTK_WED_CTRL,
112+ MTK_WED_CTRL_RX_RRO_QM_EN |
113+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
114+ MTK_WED_CTRL_WED_RX_BM_EN);
115+ } else {
116+ regmap_write(dev->hw->mirror,
117+ dev->hw->index * 4, 0);
118+ }
119 wed_clr(dev, MTK_WED_CTRL,
120- MTK_WED_CTRL_WED_RX_BM_EN);
121+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
122+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
123+ MTK_WED_CTRL_WED_TX_BM_EN |
124+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
125 }
126
127 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
128@@ -634,16 +645,13 @@ mtk_wed_detach(struct mtk_wed_device *dev)
129
130 mutex_lock(&hw_lock);
131
132- mtk_wed_stop(dev);
133+ mtk_wed_stop(dev, false);
134
135- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
136- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
137+ mtk_wdma_rx_reset(dev);
138
139 mtk_wed_reset(dev, MTK_WED_RESET_WED);
140
141- wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
142- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
143- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
144+ mtk_wdma_tx_reset(dev);
145
146 mtk_wed_free_buffer(dev);
147 mtk_wed_free_tx_rings(dev);
148@@ -653,8 +661,6 @@ mtk_wed_detach(struct mtk_wed_device *dev)
149 mtk_wed_wo_exit(hw);
150 }
151
152- mtk_wdma_rx_reset(dev);
153-
154 if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
155 wlan_node = dev->wlan.pci_dev->dev.of_node;
156 if (of_dma_is_coherent(wlan_node))
157@@ -748,7 +754,7 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
158 {
159 u32 mask, set;
160
161- mtk_wed_stop(dev);
162+ mtk_wed_stop(dev, false);
163 mtk_wed_reset(dev, MTK_WED_RESET_WED);
164
165 if (dev->ver > MTK_WED_V1)
166@@ -961,44 +967,127 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
167 }
168
169 static u32
170-mtk_wed_check_busy(struct mtk_wed_device *dev)
171+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
172 {
173- if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
174- return true;
175-
176- if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
177- MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
178- return true;
179-
180- if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
181- return true;
182-
183- if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
184- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
185- return true;
186-
187- if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
188- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
189- return true;
190-
191- if (wed_r32(dev, MTK_WED_CTRL) &
192- (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
193+ if (wed_r32(dev, reg) & mask)
194 return true;
195
196 return false;
197 }
198
199 static int
200-mtk_wed_poll_busy(struct mtk_wed_device *dev)
201+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
202 {
203- int sleep = 15000;
204+ int sleep = 1000;
205 int timeout = 100 * sleep;
206 u32 val;
207
208 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
209- timeout, false, dev);
210+ timeout, false, dev, reg, mask);
211 }
212
213+static void
214+mtk_wed_rx_reset(struct mtk_wed_device *dev)
215+{
216+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
217+ u8 state = WO_STATE_SER_RESET;
218+ bool busy = false;
219+ int i;
220+
221+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE,
222+ &state, sizeof(state), true);
223+
224+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
225+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
226+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
227+ if (busy) {
228+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
229+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
230+ } else {
231+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
232+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
233+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
234+
235+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
236+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
237+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
238+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
239+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
240+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
241+
242+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
243+ }
244+
245+ /* reset rro qm */
246+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
247+ busy = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
248+ MTK_WED_CTRL_RX_RRO_QM_BUSY);
249+ if (busy) {
250+ mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
251+ } else {
252+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
253+ MTK_WED_RROQM_RST_IDX_MIOD |
254+ MTK_WED_RROQM_RST_IDX_FDBK);
255+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
256+ }
257+
258+ /* reset route qm */
259+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
260+ busy = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
261+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
262+ if (busy) {
263+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
264+ } else {
265+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
266+ MTK_WED_RTQM_Q_RST);
267+ }
268+
269+ /* reset tx wdma */
270+ mtk_wdma_tx_reset(dev);
271+
272+ /* reset tx wdma drv */
273+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
274+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
275+ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
276+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
277+
278+ /* reset wed rx dma */
279+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
280+ MTK_WED_GLO_CFG_RX_DMA_BUSY);
281+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
282+ if (busy) {
283+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
284+ } else {
285+ wed_set(dev, MTK_WED_RESET_IDX,
286+ MTK_WED_RESET_IDX_RX);
287+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
288+ }
289+
290+ /* reset rx bm */
291+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
292+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
293+ MTK_WED_CTRL_WED_RX_BM_BUSY);
294+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
295+
296+ /* wo change to enable state */
297+ state = WO_STATE_ENABLE;
298+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE,
299+ &state, sizeof(state), true);
300+
301+ /* wed_rx_ring_reset */
302+ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
303+ struct mtk_wdma_desc *desc = dev->rx_ring[i].desc;
304+
305+ if (!desc)
306+ continue;
307+
308+ mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false);
309+ }
310+
311+ mtk_wed_free_rx_bm(dev);
312+}
313+
314+
315 static void
316 mtk_wed_reset_dma(struct mtk_wed_device *dev)
317 {
318@@ -1012,25 +1101,28 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
319 if (!desc)
320 continue;
321
322- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true);
323+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, 1, true);
324 }
325
326- if (mtk_wed_poll_busy(dev))
327- busy = mtk_wed_check_busy(dev);
328+ /* 1.Reset WED Tx DMA */
329+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
330+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_BUSY);
331
332 if (busy) {
333 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
334 } else {
335 wed_w32(dev, MTK_WED_RESET_IDX,
336- MTK_WED_RESET_IDX_TX |
337- MTK_WED_RESET_IDX_RX);
338+ MTK_WED_RESET_IDX_TX);
339 wed_w32(dev, MTK_WED_RESET_IDX, 0);
340 }
341
342- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
343- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
344+ /* 2. Reset WDMA Rx DMA/Driver_Engine */
345+ busy = !!mtk_wdma_rx_reset(dev);
346
347- mtk_wdma_rx_reset(dev);
348+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
349+ busy = !!(busy ||
350+ mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
351+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY));
352
353 if (busy) {
354 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
355@@ -1047,15 +1139,30 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
356 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
357 }
358
359+ /* 3. Reset WED WPDMA Tx Driver Engine */
360+ wed_clr(dev, MTK_WED_CTRL,
361+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
362+
363 for (i = 0; i < 100; i++) {
364 val = wed_r32(dev, MTK_WED_TX_BM_INTF);
365 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
366 break;
367 }
368-
369 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
370+
371+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
372 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
373
374+ /* 4. Reset WED WPDMA Tx Driver Engine */
375+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
376+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
377+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
378+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
379+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
380+
381+ busy = !!(busy ||
382+ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
383+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY));
384 if (busy) {
385 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
386 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
387@@ -1065,6 +1172,16 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
388 MTK_WED_WPDMA_RESET_IDX_TX |
389 MTK_WED_WPDMA_RESET_IDX_RX);
390 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
391+ if (dev->ver > MTK_WED_V1) {
392+ wed_w32(dev, MTK_WED_RESET_IDX,
393+ MTK_WED_RESET_WPDMA_IDX_RX);
394+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
395+ }
396+ }
397+
398+ if (dev->ver > MTK_WED_V1) {
399+ dev->init_done = false;
400+ mtk_wed_rx_reset(dev);
401 }
402
403 }
404@@ -1253,6 +1370,9 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
405 {
406 int i, ret;
407
408+ if (dev->ver > MTK_WED_V1)
409+ ret = mtk_wed_rx_bm_alloc(dev);
410+
411 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
412 if (!dev->tx_wdma[i].desc)
413 mtk_wed_wdma_rx_ring_setup(dev, i, 16);
414@@ -1340,10 +1460,6 @@ mtk_wed_attach(struct mtk_wed_device *dev)
415 goto error;
416
417 if (dev->ver > MTK_WED_V1) {
418- ret = mtk_wed_rx_bm_alloc(dev);
419- if (ret)
420- goto error;
421-
422 ret = mtk_wed_rro_alloc(dev);
423 if (ret)
424 goto error;
425@@ -1367,7 +1483,8 @@ out:
426 }
427
428 static int
429-mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
430+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx,
431+ void __iomem *regs, bool reset)
432 {
433 struct mtk_wed_ring *ring = &dev->tx_ring[idx];
434
435@@ -1385,12 +1502,15 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
436
437 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
438
439- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true))
440- return -ENOMEM;
441-
442- if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
443- return -ENOMEM;
444+ if (!reset) {
445+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
446+ 1, true))
447+ return -ENOMEM;
448
449+ if (mtk_wed_wdma_rx_ring_setup(dev, idx,
450+ MTK_WED_WDMA_RING_SIZE))
451+ return -ENOMEM;
452+ }
453 ring->reg_base = MTK_WED_RING_TX(idx);
454 ring->wpdma = regs;
455
456@@ -1436,21 +1556,39 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
457 }
458
459 static int
460-mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
461+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
462+ int idx, void __iomem *regs, bool reset)
463 {
464 struct mtk_wed_ring *ring = &dev->rx_ring[idx];
465
466 BUG_ON(idx > ARRAY_SIZE(dev->rx_ring));
467
468+ if (!reset) {
469+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
470+ 1, false))
471+ return -ENOMEM;
472
473- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false))
474- return -ENOMEM;
475-
476- if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
477- return -ENOMEM;
478-
479+ if (mtk_wed_wdma_tx_ring_setup(dev, idx,
480+ MTK_WED_WDMA_RING_SIZE))
481+ return -ENOMEM;
482+ } else {
483+ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
484+
485+ mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE, dev->ver, true);
486+ if (idx == 0) {
487+ wed_w32(dev, MTK_WED_WDMA_RING_TX
488+ + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
489+ wed_w32(dev, MTK_WED_WDMA_RING_TX
490+ + MTK_WED_RING_OFS_COUNT, MTK_WED_WDMA_RING_SIZE);
491+ wed_w32(dev, MTK_WED_WDMA_RING_TX
492+ + MTK_WED_RING_OFS_CPU_IDX, 0);
493+ wed_w32(dev, MTK_WED_WDMA_RING_TX
494+ + MTK_WED_RING_OFS_DMA_IDX, 0);
495+ }
496+ }
497 ring->reg_base = MTK_WED_RING_RX_DATA(idx);
498 ring->wpdma = regs;
499+ dev->hw->ring_num = idx + 1;
500
501 /* WPDMA -> WED */
502 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
503diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
504index 8ef5253..27aba72 100644
505--- a/drivers/net/ethernet/mediatek/mtk_wed.h
506+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
507@@ -47,6 +47,7 @@ struct mtk_wed_hw {
508 u32 num_flows;
509 u32 wdma_phy;
510 char dirname[5];
511+ int ring_num;
512 int irq;
513 int index;
514 u32 ver;
515diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
516index 9d021e2..cfcd94f 100644
517--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
518+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
519@@ -38,11 +38,15 @@ struct mtk_wdma_desc {
520
521 #define MTK_WED_RESET 0x008
522 #define MTK_WED_RESET_TX_BM BIT(0)
523+#define MTK_WED_RESET_RX_BM BIT(1)
524 #define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
525 #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
526 #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
527+#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10)
528 #define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
529 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
530+#define MTK_WED_RESET_WED_RX_DMA BIT(13)
531+#define MTK_WED_RESET_WDMA_TX_DRV BIT(16)
532 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
533 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
534 #define MTK_WED_RESET_RX_RRO_QM BIT(20)
535@@ -186,7 +190,12 @@ struct mtk_wdma_desc {
536
537 #define MTK_WED_RESET_IDX 0x20c
538 #define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
539+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
540+#define MTK_WED_RESET_IDX_RX GENMASK(7, 6)
541+#else
542 #define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
543+#endif
544+#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
545
546 #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
547 #define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
548@@ -300,6 +309,9 @@ struct mtk_wdma_desc {
549
550 #define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
551 #define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
552+#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1)
553+#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3)
554+#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4)
555 #define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
556 #define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
557
558diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
559index 9a9cc1b..bb23a7a 100644
560--- a/include/linux/soc/mediatek/mtk_wed.h
561+++ b/include/linux/soc/mediatek/mtk_wed.h
562@@ -121,16 +121,16 @@ struct mtk_wed_device {
563 struct mtk_wed_ops {
564 int (*attach)(struct mtk_wed_device *dev);
565 int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
566- void __iomem *regs);
567+ void __iomem *regs, bool reset);
568 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
569 void __iomem *regs);
570 int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
571- void __iomem *regs);
572+ void __iomem *regs, bool reset);
573 int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
574 void *data, int len);
575 void (*detach)(struct mtk_wed_device *dev);
576
577- void (*stop)(struct mtk_wed_device *dev);
578+ void (*stop)(struct mtk_wed_device *dev, bool reset);
579 void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
580 void (*reset_dma)(struct mtk_wed_device *dev);
581
582@@ -169,12 +169,13 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
583 #define mtk_wed_device_active(_dev) !!(_dev)->ops
584 #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
585 #define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
586-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
587- (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
588+#define mtk_wed_device_stop(_dev, _reset) (_dev)->ops->stop(_dev, _reset)
589+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) \
590+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs, _reset)
591 #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
592 (_dev)->ops->txfree_ring_setup(_dev, _regs)
593-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
594- (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
595+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
596+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
597 #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
598 (_dev)->ops->msg_update(_dev, _id, _msg, _len)
599 #define mtk_wed_device_reg_read(_dev, _reg) \
600@@ -185,6 +186,8 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
601 (_dev)->ops->irq_get(_dev, _mask)
602 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
603 (_dev)->ops->irq_set_mask(_dev, _mask)
604+#define mtk_wed_device_dma_reset(_dev) \
605+ (_dev)->ops->reset_dma(_dev)
606 #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
607 (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
608 #else
609@@ -194,14 +197,15 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
610 }
611 #define mtk_wed_device_detach(_dev) do {} while (0)
612 #define mtk_wed_device_start(_dev, _mask) do {} while (0)
613-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
614+#define mtk_wed_device_stop(_dev, _reset) do {} while (0)
615+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
616 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
617-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
618-#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
619+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
620 #define mtk_wed_device_reg_read(_dev, _reg) 0
621 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
622 #define mtk_wed_device_irq_get(_dev, _mask) 0
623 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
624+#define mtk_wed_device_dma_reset(_dev) do {} while (0)
625 #define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
626 #endif
627
628--
6292.18.0
630