blob: bc87d6787abeaf9bce8e5599b4832370aa5a4390 [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From bc8244ada5c668374813f7f9b73d990bf2695aaf Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Wed, 15 Jun 2022 14:38:54 +0800
4Subject: [PATCH 8/8] 9997-add-wed-rx-support-for-mt7896
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7986a.dtsi | 42 +-
9 arch/arm64/boot/dts/mediatek/mt7986b.dtsi | 42 +-
10 drivers/net/ethernet/mediatek/Makefile | 2 +-
11 drivers/net/ethernet/mediatek/mtk_wed.c | 544 +++++++++++++++--
12 drivers/net/ethernet/mediatek/mtk_wed.h | 50 ++
13 drivers/net/ethernet/mediatek/mtk_wed_ccif.c | 121 ++++
14 drivers/net/ethernet/mediatek/mtk_wed_ccif.h | 45 ++
15 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 90 +++
16 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 561 ++++++++++++++++++
17 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 125 ++++
18 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 145 ++++-
19 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 548 +++++++++++++++++
20 drivers/net/ethernet/mediatek/mtk_wed_wo.h | 334 +++++++++++
21 include/linux/soc/mediatek/mtk_wed.h | 63 +-
22 14 files changed, 2643 insertions(+), 69 deletions(-)
23 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_wed.c
24 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.c
25 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.h
26 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c
27 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.h
28 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
29 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h
30
31diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
32index 644255b35..ddcc0b809 100644
33--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
34+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
35@@ -65,6 +65,12 @@
36 interrupt-parent = <&gic>;
37 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
38 mediatek,wed_pcie = <&wed_pcie>;
39+ mediatek,ap2woccif = <&ap2woccif0>;
40+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
41+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
42+ mediatek,wocpu_boot = <&cpu_boot>;
43+ mediatek,wocpu_emi = <&wocpu0_emi>;
44+ mediatek,wocpu_data = <&wocpu_data>;
45 };
46
47 wed1: wed@15011000 {
48@@ -74,15 +80,26 @@
49 interrupt-parent = <&gic>;
50 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
51 mediatek,wed_pcie = <&wed_pcie>;
52+ mediatek,ap2woccif = <&ap2woccif1>;
53+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
54+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
55+ mediatek,wocpu_boot = <&cpu_boot>;
56+ mediatek,wocpu_emi = <&wocpu1_emi>;
57+ mediatek,wocpu_data = <&wocpu_data>;
58 };
59
60- ap2woccif: ap2woccif@151A5000 {
61- compatible = "mediatek,ap2woccif";
62- reg = <0 0x151A5000 0 0x1000>,
63- <0 0x151AD000 0 0x1000>;
64+ ap2woccif0: ap2woccif@151A5000 {
65+ compatible = "mediatek,ap2woccif", "syscon";
66+ reg = <0 0x151A5000 0 0x1000>;
67 interrupt-parent = <&gic>;
68- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
69- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
70+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
71+ };
72+
73+ ap2woccif1: ap2woccif@0x151AD000 {
74+ compatible = "mediatek,ap2woccif", "syscon";
75+ reg = <0 0x151AD000 0 0x1000>;
76+ interrupt-parent = <&gic>;
77+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
78 };
79
80 wocpu0_ilm: wocpu0_ilm@151E0000 {
81@@ -95,10 +112,17 @@
82 reg = <0 0x151F0000 0 0x8000>;
83 };
84
85- wocpu_dlm: wocpu_dlm@151E8000 {
86+ wocpu0_dlm: wocpu_dlm@151E8000 {
87+ compatible = "mediatek,wocpu_dlm";
88+ reg = <0 0x151E8000 0 0x2000>;
89+
90+ resets = <&ethsysrst 0>;
91+ reset-names = "wocpu_rst";
92+ };
93+
94+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
95 compatible = "mediatek,wocpu_dlm";
96- reg = <0 0x151E8000 0 0x2000>,
97- <0 0x151F8000 0 0x2000>;
98+ reg = <0 0x151F8000 0 0x2000>;
99
100 resets = <&ethsysrst 0>;
101 reset-names = "wocpu_rst";
102diff --git a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
103index 67bf86f6a..6710b388b 100644
104--- a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
105+++ b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
106@@ -65,6 +65,12 @@
107 interrupt-parent = <&gic>;
108 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
109 mediatek,wed_pcie = <&wed_pcie>;
110+ mediatek,ap2woccif = <&ap2woccif0>;
111+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
112+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
113+ mediatek,wocpu_boot = <&cpu_boot>;
114+ mediatek,wocpu_emi = <&wocpu0_emi>;
115+ mediatek,wocpu_data = <&wocpu_data>;
116 };
117
118 wed1: wed@15011000 {
119@@ -74,15 +80,26 @@
120 interrupt-parent = <&gic>;
121 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
122 mediatek,wed_pcie = <&wed_pcie>;
123+ mediatek,ap2woccif = <&ap2woccif1>;
124+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
125+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
126+ mediatek,wocpu_boot = <&cpu_boot>;
127+ mediatek,wocpu_emi = <&wocpu1_emi>;
128+ mediatek,wocpu_data = <&wocpu_data>;
129 };
130
131- ap2woccif: ap2woccif@151A5000 {
132- compatible = "mediatek,ap2woccif";
133- reg = <0 0x151A5000 0 0x1000>,
134- <0 0x151AD000 0 0x1000>;
135+ ap2woccif0: ap2woccif@151A5000 {
136+ compatible = "mediatek,ap2woccif", "syscon";
137+ reg = <0 0x151A5000 0 0x1000>;
138 interrupt-parent = <&gic>;
139- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
140- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
141+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
142+ };
143+
144+ ap2woccif1: ap2woccif@0x151AD000 {
145+ compatible = "mediatek,ap2woccif", "syscon";
146+ reg = <0 0x151AD000 0 0x1000>;
147+ interrupt-parent = <&gic>;
148+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
149 };
150
151 wocpu0_ilm: wocpu0_ilm@151E0000 {
152@@ -95,10 +112,17 @@
153 reg = <0 0x151F0000 0 0x8000>;
154 };
155
156- wocpu_dlm: wocpu_dlm@151E8000 {
157+ wocpu0_dlm: wocpu_dlm@151E8000 {
158+ compatible = "mediatek,wocpu_dlm";
159+ reg = <0 0x151E8000 0 0x2000>;
160+
161+ resets = <&ethsysrst 0>;
162+ reset-names = "wocpu_rst";
163+ };
164+
165+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
166 compatible = "mediatek,wocpu_dlm";
167- reg = <0 0x151E8000 0 0x2000>,
168- <0 0x151F8000 0 0x2000>;
169+ reg = <0 0x151F8000 0 0x2000>;
170
171 resets = <&ethsysrst 0>;
172 reset-names = "wocpu_rst";
173diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
174index 3528f1b3c..0c724a55c 100644
175--- a/drivers/net/ethernet/mediatek/Makefile
176+++ b/drivers/net/ethernet/mediatek/Makefile
177@@ -10,5 +10,5 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
178 ifdef CONFIG_DEBUG_FS
179 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
180 endif
181-obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
182+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o mtk_wed_wo.o mtk_wed_mcu.o mtk_wed_ccif.o
183 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
184diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
185old mode 100644
186new mode 100755
187index 48b0353bb..c4aab12b0
188--- a/drivers/net/ethernet/mediatek/mtk_wed.c
189+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
190@@ -13,11 +13,19 @@
191 #include <linux/debugfs.h>
192 #include <linux/iopoll.h>
193 #include <linux/soc/mediatek/mtk_wed.h>
194+
195 #include "mtk_eth_soc.h"
196 #include "mtk_wed_regs.h"
197 #include "mtk_wed.h"
198 #include "mtk_ppe.h"
199-
200+#include "mtk_wed_mcu.h"
201+#include "mtk_wed_wo.h"
202+
203+struct wo_cmd_ring {
204+ u32 q_base;
205+ u32 cnt;
206+ u32 unit;
207+};
208 static struct mtk_wed_hw *hw_list[2];
209 static DEFINE_MUTEX(hw_lock);
210
211@@ -51,6 +59,12 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
212 wdma_m32(dev, reg, 0, mask);
213 }
214
215+static void
216+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
217+{
218+ wdma_m32(dev, reg, mask, 0);
219+}
220+
221 static u32
222 mtk_wed_read_reset(struct mtk_wed_device *dev)
223 {
224@@ -68,6 +82,48 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
225 WARN_ON_ONCE(1);
226 }
227
228+static void
229+mtk_wed_wo_reset(struct mtk_wed_device *dev)
230+{
231+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
232+ u8 state = WO_STATE_DISABLE;
233+ u8 state_done = WOIF_DISABLE_DONE;
234+ void __iomem *reg;
235+ u32 value;
236+ unsigned long timeout = jiffies + WOCPU_TIMEOUT;
237+
238+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE,
239+ &state, sizeof(state), false);
240+
241+ do {
242+ value = wed_r32(dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_WO_STATUS);
243+ } while (value != state_done && !time_after(jiffies, timeout));
244+
245+ reg = ioremap(WOCPU_MCUSYS_RESET_ADDR, 4);
246+ value = readl((void *)reg);
247+ switch(dev->hw->index) {
248+ case 0:
249+ value |= WOCPU_WO0_MCUSYS_RESET_MASK;
250+ writel(value, (void *)reg);
251+ value &= ~WOCPU_WO0_MCUSYS_RESET_MASK;
252+ writel(value, (void *)reg);
253+ break;
254+ case 1:
255+ value |= WOCPU_WO1_MCUSYS_RESET_MASK;
256+ writel(value, (void *)reg);
257+ value &= ~WOCPU_WO1_MCUSYS_RESET_MASK;
258+ writel(value, (void *)reg);
259+ break;
260+ default:
261+ dev_err(dev->hw->dev, "wrong mtk_wed%d\n",
262+ dev->hw->index);
263+
264+ break;
265+ }
266+
267+ iounmap((void *)reg);
268+}
269+
270 static struct mtk_wed_hw *
271 mtk_wed_assign(struct mtk_wed_device *dev)
272 {
273@@ -205,6 +261,42 @@ free_pagelist:
274 kfree(page_list);
275 }
276
277+static int
278+mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
279+{
280+ struct mtk_rxbm_desc *desc;
281+ dma_addr_t desc_phys;
282+ int ring_size;
283+
284+ ring_size = dev->wlan.rx_nbuf;
285+ dev->rx_buf_ring.size = ring_size;
286+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
287+ &desc_phys, GFP_KERNEL);
288+ if (!desc)
289+ return -ENOMEM;
290+
291+ dev->rx_buf_ring.desc = desc;
292+ dev->rx_buf_ring.desc_phys = desc_phys;
293+
294+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_pkt);
295+ return 0;
296+}
297+
298+static void
299+mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
300+{
301+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
302+ int ring_size =dev->rx_buf_ring.size;
303+
304+ if (!desc)
305+ return;
306+
307+ dev->wlan.release_rx_buf(dev);
308+
309+ dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
310+ desc, dev->buf_ring.desc_phys);
311+}
312+
313 static void
314 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
315 {
316@@ -226,13 +318,22 @@ mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
317 mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
318 }
319
320+static void
321+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
322+{
323+ mtk_wed_free_rx_bm(dev);
324+ mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
325+}
326+
327 static void
328 mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
329 {
330 u32 wdma_mask;
331
332 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
333-
334+ if (dev->ver > MTK_WED_V1)
335+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
336+ GENMASK(1, 0));
337 /* wed control cr set */
338 wed_set(dev, MTK_WED_CTRL,
339 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
340@@ -251,7 +352,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
341 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
342 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
343 } else {
344- /* initail tx interrupt trigger */
345+
346 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
347 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
348 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
349@@ -262,22 +363,30 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
350 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
351 dev->wlan.tx_tbit[1]));
352
353- /* initail txfree interrupt trigger */
354 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
355 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
356 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
357 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
358 dev->wlan.txfree_tbit));
359+
360+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
361+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
362+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
363+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
364+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
365+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
366+ dev->wlan.rx_tbit[0]) |
367+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
368+ dev->wlan.rx_tbit[1]));
369 }
370- /* initail wdma interrupt agent */
371 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
372 if (dev->ver == MTK_WED_V1) {
373 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
374 } else {
375 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
376 wed_set(dev, MTK_WED_WDMA_INT_CTRL,
377- FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,dev->wdma_idx));
378-
379+ FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,
380+ dev->wdma_idx));
381 }
382
383 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
384@@ -312,6 +421,39 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
385 }
386 }
387
388+static void
389+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
390+{
391+#define MTK_WFMDA_RX_DMA_EN BIT(2)
392+
393+ int timeout = 3;
394+ u32 cur_idx, regs;
395+
396+ do {
397+ regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
398+ MTK_WED_RING_OFS_COUNT;
399+ cur_idx = wed_r32(dev, regs);
400+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
401+ break;
402+
403+ usleep_range(100000, 200000);
404+ } while (timeout-- > 0);
405+
406+ if (timeout) {
407+ unsigned int val;
408+
409+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
410+ dev->wlan.phy_base);
411+ val |= MTK_WFMDA_RX_DMA_EN;
412+
413+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
414+ dev->wlan.phy_base, val);
415+ } else {
416+ dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
417+ dev->hw->index);
418+ }
419+}
420+
421 static void
422 mtk_wed_dma_enable(struct mtk_wed_device *dev)
423 {
424@@ -336,9 +478,14 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
425 wdma_set(dev, MTK_WDMA_GLO_CFG,
426 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
427 } else {
428+ int idx = 0;
429+
430 wed_set(dev, MTK_WED_WPDMA_CTRL,
431 MTK_WED_WPDMA_CTRL_SDL1_FIXED);
432
433+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
434+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
435+
436 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
437 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
438 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
439@@ -346,6 +493,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
440 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
441 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
442 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
443+
444+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
445+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
446+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
447+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
448+ 0x2));
449+
450+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++)
451+ mtk_wed_check_wfdma_rx_fill(dev, idx);
452 }
453 }
454
455@@ -363,19 +519,23 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
456 MTK_WED_GLO_CFG_TX_DMA_EN |
457 MTK_WED_GLO_CFG_RX_DMA_EN);
458
459- wdma_m32(dev, MTK_WDMA_GLO_CFG,
460+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
461 MTK_WDMA_GLO_CFG_TX_DMA_EN |
462 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
463- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
464+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
465
466 if (dev->ver == MTK_WED_V1) {
467 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
468- wdma_m32(dev, MTK_WDMA_GLO_CFG,
469- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
470+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
471+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
472 } else {
473 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
474 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
475 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
476+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
477+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
478+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
479+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
480 }
481 }
482
483@@ -395,6 +555,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
484 MTK_WED_CTRL_WED_TX_BM_EN |
485 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
486
487+ if (dev->ver > MTK_WED_V1) {
488+ wed_clr(dev, MTK_WED_CTRL,
489+ MTK_WED_CTRL_WED_RX_BM_EN);
490+ }
491+
492 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
493 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
494 wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
developerf50c1802022-07-05 20:35:53 +0800495@@ -416,9 +581,17 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800496 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
497
498 mtk_wed_reset(dev, MTK_WED_RESET_WED);
developerf50c1802022-07-05 20:35:53 +0800499+ if (dev->ver > MTK_WED_V1)
500+ mtk_wed_wo_reset(dev);
developer8cb3ac72022-07-04 10:55:14 +0800501+
502+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
503+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
504+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
505
506 mtk_wed_free_buffer(dev);
507 mtk_wed_free_tx_rings(dev);
developerf50c1802022-07-05 20:35:53 +0800508+ if (dev->ver > MTK_WED_V1)
509+ mtk_wed_free_rx_rings(dev);
developer8cb3ac72022-07-04 10:55:14 +0800510
511 if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
512 wlan_node = dev->wlan.pci_dev->dev.of_node;
developerf50c1802022-07-05 20:35:53 +0800513@@ -477,7 +650,6 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800514 value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
515 value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
516
517- /* pcie interrupt status trigger register */
518 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
519 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
520
developerf50c1802022-07-05 20:35:53 +0800521@@ -501,6 +673,9 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800522 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
523 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
524 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
525+
526+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
527+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
528 } else {
529 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
530 }
531@@ -549,24 +722,92 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
532 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
533 MTK_WDMA_RING_RX(0)));
534 }
535+}
536+
537+static void
538+mtk_wed_rx_bm_hw_init(struct mtk_wed_device *dev)
539+{
540+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
541+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_pkt_size));
542+
543+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
544
545+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
546+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_pkt));
547+
548+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
549+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
550+
551+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
552 }
553
554 static void
555-mtk_wed_hw_init(struct mtk_wed_device *dev)
556+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
557+{
558+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
559+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
560+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
561+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
562+ MTK_WED_MIOD_ENTRY_CNT >> 2));
563+
564+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_desc_phys);
565+
566+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
567+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
568+
569+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_desc_phys);
570+
571+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
572+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
573+
574+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
575+
576+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.rro_ring.desc_phys);
577+
578+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
579+ MTK_WED_RROQM_RST_IDX_MIOD |
580+ MTK_WED_RROQM_RST_IDX_FDBK);
581+
582+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
583+
584+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT -1);
585+
586+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
587+}
588+
589+static void
590+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
591+{
592+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
593+
594+ do {
595+ udelay(100);
596+
597+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
598+ break;
599+ } while (1);
600+
601+ /* configure RX_ROUTE_QM */
602+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
603+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
604+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
605+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
606+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
607+
608+ /* enable RX_ROUTE_QM */
609+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
610+}
611+
612+static void
613+mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
614 {
615 int size = dev->buf_ring.size;
616 int rev_size = MTK_WED_TX_RING_SIZE / 2;
617 int thr = 1;
618
619- if (dev->init_done)
620- return;
621-
622- dev->init_done = true;
623- mtk_wed_set_ext_int(dev, false);
624-
625 if (dev->ver > MTK_WED_V1) {
626- size = MTK_WED_WDMA_RING_SIZE * 2 + dev->buf_ring.size;
627+ size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
628+ dev->buf_ring.size;
629 rev_size = size;
630 thr = 0;
631 }
developerf50c1802022-07-05 20:35:53 +0800632@@ -609,13 +852,48 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800633 }
634
635 static void
636-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale)
637+mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
638 {
639+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
640+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX0 |
641+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX1 |
642+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX0 |
643+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX1);
644+
645+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
646+
647+ mtk_wed_rx_bm_hw_init(dev);
648+ mtk_wed_rro_hw_init(dev);
649+ mtk_wed_route_qm_hw_init(dev);
650+}
651+
652+static void
653+mtk_wed_hw_init(struct mtk_wed_device *dev)
654+{
655+ if (dev->init_done)
656+ return;
657+
658+ dev->init_done = true;
659+ mtk_wed_set_ext_int(dev, false);
660+ mtk_wed_tx_hw_init(dev);
661+ if (dev->ver > MTK_WED_V1)
662+ mtk_wed_rx_hw_init(dev);
663+}
664+
665+static void
666+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
667+{
668+ __le32 ctrl;
669 int i;
670
671+ if (tx)
672+ ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
673+ else
674+ ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
675+
676 for (i = 0; i < size; i++) {
677 desc->buf0 = 0;
678- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
679+ desc->ctrl = ctrl;
680 desc->buf1 = 0;
681 desc->info = 0;
682 desc += scale;
developerf50c1802022-07-05 20:35:53 +0800683@@ -674,7 +952,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800684 if (!desc)
685 continue;
686
687- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver);
688+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true);
689 }
690
691 if (mtk_wed_poll_busy(dev))
developerf50c1802022-07-05 20:35:53 +0800692@@ -729,9 +1007,24 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800693
694 }
695
696+static int
697+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
698+ int size)
699+{
700+ ring->desc = dma_alloc_coherent(dev->hw->dev,
701+ size * sizeof(*ring->desc),
702+ &ring->desc_phys, GFP_KERNEL);
703+ if (!ring->desc)
704+ return -ENOMEM;
705+
706+ ring->size = size;
707+ memset(ring->desc, 0, size);
708+ return 0;
709+}
710+
711 static int
712 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
713- int size, int scale)
714+ int size, int scale, bool tx)
715 {
716 ring->desc = dma_alloc_coherent(dev->hw->dev,
717 size * sizeof(*ring->desc) * scale,
developerf50c1802022-07-05 20:35:53 +0800718@@ -740,17 +1033,18 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
developer8cb3ac72022-07-04 10:55:14 +0800719 return -ENOMEM;
720
721 ring->size = size;
722- mtk_wed_ring_reset(ring->desc, size, scale);
723+ mtk_wed_ring_reset(ring->desc, size, scale, tx);
724
725 return 0;
726 }
727
728 static int
729-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
730+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
731 {
732 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
733
734- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, dev->ver))
735+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
736+ dev->ver, true))
737 return -ENOMEM;
738
739 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
developerf50c1802022-07-05 20:35:53 +0800740@@ -767,22 +1061,143 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
developer8cb3ac72022-07-04 10:55:14 +0800741 return 0;
742 }
743
744+static int
745+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
746+{
747+ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
748+
749+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
750+ dev->ver, true))
751+ return -ENOMEM;
752+
753+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
754+ wdma->desc_phys);
755+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
756+ size);
757+ wdma_w32(dev,
758+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
759+ wdma_w32(dev,
760+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
761+
762+ if (idx == 0) {
763+ wed_w32(dev, MTK_WED_WDMA_RING_TX
764+ + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
765+ wed_w32(dev, MTK_WED_WDMA_RING_TX
766+ + MTK_WED_RING_OFS_COUNT, size);
767+ wed_w32(dev, MTK_WED_WDMA_RING_TX
768+ + MTK_WED_RING_OFS_CPU_IDX, 0);
769+ wed_w32(dev, MTK_WED_WDMA_RING_TX
770+ + MTK_WED_RING_OFS_DMA_IDX, 0);
771+ }
772+
773+ return 0;
774+}
775+
776+static int
777+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
778+{
779+ struct device_node *np, *node = dev->hw->node;
780+ struct mtk_wed_ring *ring;
781+ struct resource res;
782+ int ret;
783+
784+ np = of_parse_phandle(node, "mediatek,wocpu_dlm", 0);
785+ if (!np)
786+ return -ENODEV;
787+
788+ ret = of_address_to_resource(np, 0, &res);
789+ if (ret)
790+ return ret;
791+
792+ dev->rro.rro_desc = ioremap(res.start, resource_size(&res));
793+
794+ ring = &dev->rro.rro_ring;
795+
796+ dev->rro.miod_desc_phys = res.start;
797+
798+ dev->rro.mcu_view_miod = MTK_WED_WOCPU_VIEW_MIOD_BASE;
799+ dev->rro.fdbk_desc_phys = MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT
800+ + dev->rro.miod_desc_phys;
801+
802+ if (mtk_wed_rro_ring_alloc(dev, ring, MTK_WED_RRO_QUE_CNT))
803+ return -ENOMEM;
804+
805+ return 0;
806+}
807+
808+static int
809+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
810+{
811+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
812+ struct {
813+ struct wo_cmd_ring ring[2];
814+
815+ u32 wed;
816+ u8 ver;
817+ } req = {
818+ .ring = {
819+ [0] = {
820+ .q_base = dev->rro.mcu_view_miod,
821+ .cnt = MTK_WED_MIOD_CNT,
822+ .unit = MTK_WED_MIOD_ENTRY_CNT,
823+ },
824+ [1] = {
825+ .q_base = dev->rro.mcu_view_miod +
826+ MTK_WED_MIOD_ENTRY_CNT *
827+ MTK_WED_MIOD_CNT,
828+ .cnt = MTK_WED_FB_CMD_CNT,
829+ .unit = 4,
830+ },
831+ },
832+ .wed = 0,
833+ };
834+
835+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_WED_CFG,
836+ &req, sizeof(req), true);
837+}
838+
839+static int
840+mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
841+{
842+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
843+
developerf50c1802022-07-05 20:35:53 +0800844+ if (dev->ver == MTK_WED_V1)
845+ return 0;
846+
developer8cb3ac72022-07-04 10:55:14 +0800847+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
848+}
849+
850+static void
851+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
852+ u32 reason, u32 hash)
853+{
854+ int idx = dev->hw->index;
855+ struct mtk_eth *eth = dev->hw->eth;
856+ struct ethhdr *eh;
857+
858+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) {
859+ if (!skb)
860+ return;
861+
862+ skb_set_mac_header(skb, 0);
863+ eh = eth_hdr(skb);
864+ skb->protocol = eh->h_proto;
865+ mtk_ppe_check_skb(eth->ppe[idx], skb, hash);
866+ }
867+}
868+
869 static void
870 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
871 {
872- u32 wdma_mask;
873- int i;
874+ int i, ret;
875
876 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
877 if (!dev->tx_wdma[i].desc)
878- mtk_wed_wdma_ring_setup(dev, i, 16);
879-
880+ mtk_wed_wdma_rx_ring_setup(dev, i, 16);
881
882 mtk_wed_hw_init(dev);
883
884 mtk_wed_set_int(dev, irq_mask);
885-
886-
887 mtk_wed_set_ext_int(dev, true);
888
889 if (dev->ver == MTK_WED_V1) {
developerf50c1802022-07-05 20:35:53 +0800890@@ -797,6 +1212,19 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800891 val |= BIT(0);
892 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
893 } else {
894+ /* driver set mid ready and only once */
895+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
896+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
897+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
898+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
899+
900+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
901+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
902+
903+ ret = mtk_wed_rro_cfg(dev);
904+ if (ret)
905+ return;
906+
907 mtk_wed_set_512_support(dev, true);
908 }
909
developerf50c1802022-07-05 20:35:53 +0800910@@ -841,9 +1269,17 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800911 wed_r32(dev, MTK_WED_REV_ID));
912
913 ret = mtk_wed_buffer_alloc(dev);
914- if (ret) {
915- mtk_wed_detach(dev);
916- goto out;
917+ if (ret)
918+ goto error;
919+
920+ if (dev->ver > MTK_WED_V1) {
921+ ret = mtk_wed_rx_bm_alloc(dev);
922+ if (ret)
923+ goto error;
924+
925+ ret = mtk_wed_rro_alloc(dev);
926+ if (ret)
927+ goto error;
928 }
929
930 mtk_wed_hw_init_early(dev);
developerf50c1802022-07-05 20:35:53 +0800931@@ -851,7 +1287,12 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800932 if (dev->ver == MTK_WED_V1)
933 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
934 BIT(hw->index), 0);
935+ else
936+ ret = mtk_wed_wo_init(hw);
937
938+error:
939+ if (ret)
940+ mtk_wed_detach(dev);
941 out:
942 mutex_unlock(&hw_lock);
943
developerf50c1802022-07-05 20:35:53 +0800944@@ -877,10 +1318,10 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +0800945
946 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
947
948- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1))
949+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true))
950 return -ENOMEM;
951
952- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
953+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
954 return -ENOMEM;
955
956 ring->reg_base = MTK_WED_RING_TX(idx);
developerf50c1802022-07-05 20:35:53 +0800957@@ -927,6 +1368,35 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +0800958 return 0;
959 }
960
961+static int
962+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
963+{
964+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
965+
966+ BUG_ON(idx > ARRAY_SIZE(dev->rx_ring));
967+
968+
969+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false))
970+ return -ENOMEM;
971+
972+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
973+ return -ENOMEM;
974+
975+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
976+ ring->wpdma = regs;
977+
978+ /* WPDMA -> WED */
979+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
980+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
981+
982+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
983+ ring->desc_phys);
984+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
985+ MTK_WED_RX_RING_SIZE);
986+
987+ return 0;
988+}
989+
990 static u32
991 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
992 {
developerf50c1802022-07-05 20:35:53 +0800993@@ -1014,6 +1484,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +0800994 .attach = mtk_wed_attach,
995 .tx_ring_setup = mtk_wed_tx_ring_setup,
996 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
997+ .rx_ring_setup = mtk_wed_rx_ring_setup,
998+ .msg_update = mtk_wed_send_msg,
999 .start = mtk_wed_start,
1000 .stop = mtk_wed_stop,
1001 .reset_dma = mtk_wed_reset_dma,
developerf50c1802022-07-05 20:35:53 +08001002@@ -1022,6 +1494,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +08001003 .irq_get = mtk_wed_irq_get,
1004 .irq_set_mask = mtk_wed_irq_set_mask,
1005 .detach = mtk_wed_detach,
1006+ .ppe_check = mtk_wed_ppe_check,
1007 };
1008 struct device_node *eth_np = eth->dev->of_node;
1009 struct platform_device *pdev;
1010diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
1011index 9b17b7405..ec79b0d42 100644
1012--- a/drivers/net/ethernet/mediatek/mtk_wed.h
1013+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
1014@@ -13,6 +13,7 @@
1015 #define MTK_WED_PKT_SIZE 1900
1016 #define MTK_WED_BUF_SIZE 2048
1017 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1018+#define MTK_WED_RX_RING_SIZE 1536
1019
1020 #define MTK_WED_TX_RING_SIZE 2048
1021 #define MTK_WED_WDMA_RING_SIZE 512
1022@@ -21,8 +22,15 @@
1023 #define MTK_WED_PER_GROUP_PKT 128
1024
1025 #define MTK_WED_FBUF_SIZE 128
1026+#define MTK_WED_MIOD_CNT 16
1027+#define MTK_WED_FB_CMD_CNT 1024
1028+#define MTK_WED_RRO_QUE_CNT 8192
1029+#define MTK_WED_MIOD_ENTRY_CNT 128
1030+
1031+#define MODULE_ID_WO 1
1032
1033 struct mtk_eth;
1034+struct mtk_wed_wo;
1035
1036 struct mtk_wed_hw {
1037 struct device_node *node;
1038@@ -34,12 +42,14 @@ struct mtk_wed_hw {
1039 struct regmap *mirror;
1040 struct dentry *debugfs_dir;
1041 struct mtk_wed_device *wed_dev;
1042+ struct mtk_wed_wo *wed_wo;
1043 u32 debugfs_reg;
1044 u32 num_flows;
1045 u32 wdma_phy;
1046 char dirname[5];
1047 int irq;
1048 int index;
1049+ u32 ver;
1050 };
1051
1052 struct mtk_wdma_info {
1053@@ -66,6 +76,18 @@ wed_r32(struct mtk_wed_device *dev, u32 reg)
1054 return val;
1055 }
1056
1057+static inline u32
1058+wifi_r32(struct mtk_wed_device *dev, u32 reg)
1059+{
1060+ return readl(dev->wlan.base + reg);
1061+}
1062+
1063+static inline void
1064+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1065+{
1066+ writel(val, dev->wlan.base + reg);
1067+}
1068+
1069 static inline void
1070 wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1071 {
1072@@ -114,6 +136,23 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1073 writel(val, dev->txfree_ring.wpdma + reg);
1074 }
1075
1076+static inline u32
1077+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
1078+{
1079+ if (!dev->rx_ring[ring].wpdma)
1080+ return 0;
1081+
1082+ return readl(dev->rx_ring[ring].wpdma + reg);
1083+}
1084+
1085+static inline void
1086+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
1087+{
1088+ if (!dev->rx_ring[ring].wpdma)
1089+ return;
1090+
1091+ writel(val, dev->rx_ring[ring].wpdma + reg);
1092+}
1093 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1094 void __iomem *wdma, u32 wdma_phy, int index);
1095 void mtk_wed_exit(void);
1096@@ -146,4 +185,15 @@ static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
1097 }
1098 #endif
1099
1100+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr);
1101+int wed_wo_mcu_init(struct mtk_wed_wo *wo);
1102+int mtk_wed_exception_init(struct mtk_wed_wo *wo);
1103+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1104+int mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb);
1105+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir);
1106+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1107+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd,
1108+ const void *data, int len, bool wait_resp);
1109+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget);
1110+
1111 #endif
1112diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.c b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
1113new file mode 100644
1114index 000000000..732ffc8cf
1115--- /dev/null
1116+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
1117@@ -0,0 +1,121 @@
1118+// SPDX-License-Identifier: GPL-2.0-only
1119+
1120+#include <linux/soc/mediatek/mtk_wed.h>
1121+#include <linux/of_address.h>
1122+#include <linux/mfd/syscon.h>
1123+#include <linux/of_irq.h>
1124+#include "mtk_wed_ccif.h"
1125+#include "mtk_wed_regs.h"
1126+#include "mtk_wed_wo.h"
1127+
1128+static inline void woif_set_isr(struct mtk_wed_wo *wo, u32 mask)
1129+{
1130+ woccif_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
1131+}
1132+
1133+static inline u32 woif_get_csr(struct mtk_wed_wo *wo)
1134+{
1135+ u32 val;
1136+
1137+ val = woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1138+
1139+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
1140+}
1141+
1142+static inline void woif_set_ack(struct mtk_wed_wo *wo, u32 mask)
1143+{
1144+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1145+}
1146+
1147+static inline void woif_kickout(struct mtk_wed_wo *wo)
1148+{
1149+ woccif_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
1150+ woccif_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
1151+}
1152+
1153+static inline void woif_clear_int(struct mtk_wed_wo *wo, u32 mask)
1154+{
1155+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1156+ woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1157+}
1158+
1159+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr)
1160+{
1161+ static const struct wed_wo_drv_ops wo_drv_ops = {
1162+ .kickout = woif_kickout,
1163+ .set_ack = woif_set_ack,
1164+ .set_isr = woif_set_isr,
1165+ .get_csr = woif_get_csr,
1166+ .clear_int = woif_clear_int,
1167+ };
1168+ struct device_node *np, *node = wo->hw->node;
1169+ struct wed_wo_queue_regs queues;
1170+ struct regmap *regs;
1171+ int ret;
1172+
1173+ np = of_parse_phandle(node, "mediatek,ap2woccif", 0);
1174+ if (!np)
1175+ return -ENODEV;
1176+
1177+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
1178+ if (!regs)
1179+ return -ENODEV;
1180+
1181+ wo->drv_ops = &wo_drv_ops;
1182+
1183+ wo->ccif.regs = regs;
1184+ wo->ccif.irq = irq_of_parse_and_map(np, 0);
1185+
1186+ spin_lock_init(&wo->ccif.irq_lock);
1187+
1188+ ret = request_irq(wo->ccif.irq, isr, IRQF_TRIGGER_HIGH,
1189+ "wo_ccif_isr", wo);
1190+ if (ret)
1191+ goto free_irq;
1192+
1193+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY1;
1194+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY2;
1195+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
1196+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
1197+
1198+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
1199+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
1200+ &queues);
1201+
1202+ if (ret)
1203+ goto free_irq;
1204+
1205+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY5;
1206+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY6;
1207+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
1208+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
1209+
1210+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
1211+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
1212+ &queues);
1213+ if (ret)
1214+ goto free_irq;
1215+
1216+ wo->ccif.q_int_mask = MTK_WED_WO_RXCH_INT_MASK;
1217+
1218+ ret = mtk_wed_wo_q_init(wo, mtk_wed_wo_rx_poll);
1219+ if (ret)
1220+ goto free_irq;
1221+
1222+ wo->ccif.q_exep_mask = MTK_WED_WO_EXCEPTION_INT_MASK;
1223+ wo->ccif.irqmask = MTK_WED_WO_ALL_INT_MASK;
1224+
1225+ /* rx queue irqmask */
1226+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
1227+
1228+ return 0;
1229+
1230+free_irq:
1231+ devm_free_irq(wo->hw->dev, wo->ccif.irq, wo);
1232+
1233+ return ret;
1234+}
1235+
1236+static void wed_wo_hardware_exit(struct mtk_wed_wo *wo)
1237+{
1238+}
1239diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.h b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1240new file mode 100644
1241index 000000000..68ade449c
1242--- /dev/null
1243+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1244@@ -0,0 +1,45 @@
1245+// SPDX-License-Identifier: GPL-2.0-only
1246+
1247+#ifndef __MTK_WED_CCIF_H
1248+#define __MTK_WED_CCIF_H
1249+
1250+#define MTK_WED_WO_RING_SIZE 256
1251+#define MTK_WED_WO_CMD_LEN 1504
1252+
1253+#define MTK_WED_WO_TXCH_NUM 0
1254+#define MTK_WED_WO_RXCH_NUM 1
1255+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
1256+
1257+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
1258+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
1259+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
1260+#define MTK_WED_WO_ALL_INT_MASK MTK_WED_WO_RXCH_INT_MASK | \
1261+ MTK_WED_WO_EXCEPTION_INT_MASK
1262+
1263+#define MTK_WED_WO_CCIF_BUSY 0x004
1264+#define MTK_WED_WO_CCIF_START 0x008
1265+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
1266+#define MTK_WED_WO_CCIF_RCHNUM 0x010
1267+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
1268+
1269+#define MTK_WED_WO_CCIF_ACK 0x014
1270+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
1271+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
1272+#define MTK_WED_WO_CCIF_DUMMY1 0x020
1273+#define MTK_WED_WO_CCIF_DUMMY2 0x024
1274+#define MTK_WED_WO_CCIF_DUMMY3 0x028
1275+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
1276+#define MTK_WED_WO_CCIF_SHADOW1 0x030
1277+#define MTK_WED_WO_CCIF_SHADOW2 0x034
1278+#define MTK_WED_WO_CCIF_SHADOW3 0x038
1279+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
1280+#define MTK_WED_WO_CCIF_DUMMY5 0x050
1281+#define MTK_WED_WO_CCIF_DUMMY6 0x054
1282+#define MTK_WED_WO_CCIF_DUMMY7 0x058
1283+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
1284+#define MTK_WED_WO_CCIF_SHADOW5 0x060
1285+#define MTK_WED_WO_CCIF_SHADOW6 0x064
1286+#define MTK_WED_WO_CCIF_SHADOW7 0x068
1287+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
1288+
1289+#endif
1290diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1291index f420f187e..fea7ae2fc 100644
1292--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1293+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1294@@ -2,6 +2,7 @@
1295 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1296
1297 #include <linux/seq_file.h>
1298+#include <linux/soc/mediatek/mtk_wed.h>
1299 #include "mtk_wed.h"
1300 #include "mtk_wed_regs.h"
1301
1302@@ -18,6 +19,8 @@ enum {
1303 DUMP_TYPE_WDMA,
1304 DUMP_TYPE_WPDMA_TX,
1305 DUMP_TYPE_WPDMA_TXFREE,
1306+ DUMP_TYPE_WPDMA_RX,
1307+ DUMP_TYPE_WED_RRO,
1308 };
1309
1310 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
1311@@ -36,6 +39,10 @@ enum {
1312
1313 #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
1314 #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
1315+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
1316+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
1317+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
1318+
1319
1320 static void
1321 print_reg_val(struct seq_file *s, const char *name, u32 val)
1322@@ -58,6 +65,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1323 cur->name);
1324 continue;
1325 case DUMP_TYPE_WED:
1326+ case DUMP_TYPE_WED_RRO:
1327 val = wed_r32(dev, cur->offset);
1328 break;
1329 case DUMP_TYPE_WDMA:
1330@@ -69,6 +77,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1331 case DUMP_TYPE_WPDMA_TXFREE:
1332 val = wpdma_txfree_r32(dev, cur->offset);
1333 break;
1334+ case DUMP_TYPE_WPDMA_RX:
1335+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
1336+ break;
1337 }
1338 print_reg_val(s, cur->name, val);
1339 }
1340@@ -132,6 +143,81 @@ wed_txinfo_show(struct seq_file *s, void *data)
1341 }
1342 DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
1343
1344+static int
1345+wed_rxinfo_show(struct seq_file *s, void *data)
1346+{
1347+ static const struct reg_dump regs[] = {
1348+ DUMP_STR("WPDMA RX"),
1349+ DUMP_WPDMA_RX_RING(0),
1350+ DUMP_WPDMA_RX_RING(1),
1351+
1352+ DUMP_STR("WPDMA RX"),
1353+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
1354+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
1355+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
1356+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
1357+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
1358+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
1359+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
1360+
1361+ DUMP_STR("WED RX"),
1362+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
1363+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
1364+
1365+ DUMP_STR("WED RRO"),
1366+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
1367+ DUMP_WED(WED_RROQM_MID_MIB),
1368+ DUMP_WED(WED_RROQM_MOD_MIB),
1369+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
1370+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
1371+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
1372+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
1373+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
1374+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
1375+
1376+ DUMP_STR("WED Route QM"),
1377+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
1378+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
1379+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
1380+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
1381+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
1382+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
1383+ DUMP_WED(WED_RTQM_Q2N_MIB),
1384+ DUMP_WED(WED_RTQM_Q2B_MIB),
1385+ DUMP_WED(WED_RTQM_PFDBK_MIB),
1386+
1387+ DUMP_STR("WED WDMA TX"),
1388+ DUMP_WED(WED_WDMA_TX_MIB),
1389+ DUMP_WED_RING(WED_WDMA_RING_TX),
1390+
1391+ DUMP_STR("WDMA TX"),
1392+ DUMP_WDMA(WDMA_GLO_CFG),
1393+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
1394+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
1395+
1396+ DUMP_STR("WED RX BM"),
1397+ DUMP_WED(WED_RX_BM_BASE),
1398+ DUMP_WED(WED_RX_BM_RX_DMAD),
1399+ DUMP_WED(WED_RX_BM_PTR),
1400+ DUMP_WED(WED_RX_BM_TKID_MIB),
1401+ DUMP_WED(WED_RX_BM_BLEN),
1402+ DUMP_WED(WED_RX_BM_STS),
1403+ DUMP_WED(WED_RX_BM_INTF2),
1404+ DUMP_WED(WED_RX_BM_INTF),
1405+ DUMP_WED(WED_RX_BM_ERR_STS),
1406+ };
1407+
1408+ struct mtk_wed_hw *hw = s->private;
1409+ struct mtk_wed_device *dev = hw->wed_dev;
1410+
1411+ if (!dev)
1412+ return 0;
1413+
1414+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
1415+
1416+ return 0;
1417+}
1418+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
1419
1420 static int
1421 mtk_wed_reg_set(void *data, u64 val)
1422@@ -175,4 +261,8 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
1423 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
1424 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
1425 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
1426+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
1427+ if (hw->ver > MTK_WED_V1) {
1428+ wed_wo_mcu_debugfs(hw, dir);
1429+ }
1430 }
1431diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1432new file mode 100644
1433index 000000000..bd1ab9500
1434--- /dev/null
1435+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1436@@ -0,0 +1,561 @@
1437+// SPDX-License-Identifier: GPL-2.0-only
1438+
1439+#include <linux/skbuff.h>
1440+#include <linux/debugfs.h>
1441+#include <linux/firmware.h>
1442+#include <linux/of_address.h>
1443+#include <linux/soc/mediatek/mtk_wed.h>
1444+#include "mtk_wed_regs.h"
1445+#include "mtk_wed_mcu.h"
1446+#include "mtk_wed_wo.h"
1447+
1448+struct sk_buff *
1449+mtk_wed_mcu_msg_alloc(struct mtk_wed_wo *wo,
1450+ const void *data, int data_len)
1451+{
1452+ const struct wed_wo_mcu_ops *ops = wo->mcu_ops;
1453+ int length = ops->headroom + data_len;
1454+ struct sk_buff *skb;
1455+
1456+ skb = alloc_skb(length, GFP_KERNEL);
1457+ if (!skb)
1458+ return NULL;
1459+
1460+ memset(skb->head, 0, length);
1461+ skb_reserve(skb, ops->headroom);
1462+
1463+ if (data && data_len)
1464+ skb_put_data(skb, data, data_len);
1465+
1466+ return skb;
1467+}
1468+
1469+struct sk_buff *
1470+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
1471+{
1472+ unsigned long timeout;
1473+
1474+ if (!time_is_after_jiffies(expires))
1475+ return NULL;
1476+
1477+ timeout = expires - jiffies;
1478+ wait_event_timeout(wo->mcu.wait,
1479+ (!skb_queue_empty(&wo->mcu.res_q)),
1480+ timeout);
1481+
1482+ return skb_dequeue(&wo->mcu.res_q);
1483+}
1484+
1485+int
1486+mtk_wed_mcu_skb_send_and_get_msg(struct mtk_wed_wo *wo,
1487+ int to_id, int cmd, struct sk_buff *skb,
1488+ bool wait_resp, struct sk_buff **ret_skb)
1489+{
1490+ unsigned long expires;
1491+ int ret, seq;
1492+
1493+ if (ret_skb)
1494+ *ret_skb = NULL;
1495+
1496+ mutex_lock(&wo->mcu.mutex);
1497+
1498+ ret = wo->mcu_ops->mcu_skb_send_msg(wo, to_id, cmd, skb, &seq, wait_resp);
1499+ if (ret < 0)
1500+ goto out;
1501+
1502+ if (!wait_resp) {
1503+ ret = 0;
1504+ goto out;
1505+ }
1506+
1507+ expires = jiffies + wo->mcu.timeout;
1508+
1509+ do {
1510+ skb = mtk_wed_mcu_get_response(wo, expires);
1511+ ret = wo->mcu_ops->mcu_parse_response(wo, cmd, skb, seq);
1512+
1513+ if (!ret && ret_skb)
1514+ *ret_skb = skb;
1515+ else
1516+ dev_kfree_skb(skb);
1517+ } while (ret == -EAGAIN);
1518+
1519+out:
1520+ mutex_unlock(&wo->mcu.mutex);
1521+
1522+ return ret;
1523+}
1524+
1525+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo,
1526+ struct sk_buff *skb)
1527+{
1528+ skb_queue_tail(&wo->mcu.res_q, skb);
1529+ wake_up(&wo->mcu.wait);
1530+}
1531+
1532+static int mtk_wed_mcu_send_and_get_msg(struct mtk_wed_wo *wo,
1533+ int to_id, int cmd, const void *data, int len,
1534+ bool wait_resp, struct sk_buff **ret_skb)
1535+{
1536+ struct sk_buff *skb;
1537+
1538+ skb = mtk_wed_mcu_msg_alloc(wo, data, len);
1539+ if (!skb)
1540+ return -ENOMEM;
1541+
1542+ return mtk_wed_mcu_skb_send_and_get_msg(wo, to_id, cmd, skb, wait_resp, ret_skb);
1543+}
1544+
1545+int
1546+mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,
1547+ int to_id, int cmd,
1548+ const void *data, int len, bool wait_resp)
1549+{
1550+ struct sk_buff *skb = NULL;
1551+ int ret = 0;
1552+
1553+ ret = mtk_wed_mcu_send_and_get_msg(wo, to_id, cmd, data,
1554+ len, wait_resp, &skb);
1555+ if (skb)
1556+ dev_kfree_skb(skb);
1557+
1558+ return ret;
1559+}
1560+
1561+int mtk_wed_exception_init(struct mtk_wed_wo *wo)
1562+{
1563+ struct wed_wo_exception *exp = &wo->exp;
1564+ struct {
1565+ u32 arg0;
1566+ u32 arg1;
1567+ }req;
1568+
1569+ exp->log_size = EXCEPTION_LOG_SIZE;
1570+ exp->log = kmalloc(exp->log_size, GFP_ATOMIC);
1571+ if (!exp->log)
1572+ return -ENOMEM;
1573+
1574+ memset(exp->log, 0, exp->log_size);
1575+ exp->phys = dma_map_single(wo->hw->dev, exp->log, exp->log_size,
1576+ DMA_FROM_DEVICE);
1577+
1578+ if (unlikely(dma_mapping_error(wo->hw->dev, exp->phys))) {
1579+ dev_info(wo->hw->dev, "dma map error\n");
1580+ goto free;
1581+ }
1582+
1583+ req.arg0 = (u32)exp->phys;
1584+ req.arg1 = (u32)exp->log_size;
1585+
1586+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_EXCEPTION_INIT,
1587+ &req, sizeof(req), false);
1588+
1589+free:
1590+ kfree(exp->log);
1591+ return -ENOMEM;
1592+}
1593+
1594+int
1595+mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb)
1596+{
1597+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1598+
1599+ if (hdr->ver != 0)
1600+ return WARP_INVALID_PARA_STATUS;
1601+
1602+ if (skb->len < sizeof(struct wed_cmd_hdr))
1603+ return WARP_INVALID_PARA_STATUS;
1604+
1605+ if (skb->len != hdr->length)
1606+ return WARP_INVALID_PARA_STATUS;
1607+
1608+ return WARP_OK_STATUS;
1609+}
1610+
1611+void
1612+mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
1613+{
1614+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1615+ struct wed_wo_log *record;
1616+ char *msg = (char *)(skb->data + sizeof(struct wed_cmd_hdr));
1617+ u16 msg_len = skb->len - sizeof(struct wed_cmd_hdr);
1618+ u32 i, cnt = 0;
1619+
1620+ switch (hdr->cmd_id) {
1621+ case WO_EVT_LOG_DUMP:
1622+ pr_info("[WO LOG]: %s\n", msg);
1623+ break;
1624+ case WO_EVT_PROFILING:
1625+ cnt = msg_len / (sizeof(struct wed_wo_log));
1626+ record = (struct wed_wo_log *) msg;
1627+ dev_info(wo->hw->dev, "[WO Profiling]: %d report arrived!\n", cnt);
1628+
1629+ for (i = 0 ; i < cnt ; i++) {
1630+ //PROFILE_STAT(wo->total, record[i].total);
1631+ //PROFILE_STAT(wo->mod, record[i].mod);
1632+ //PROFILE_STAT(wo->rro, record[i].rro);
1633+
1634+ dev_info(wo->hw->dev, "[WO Profiling]: SN:%u with latency: total=%u, rro:%u, mod:%u\n",
1635+ record[i].sn,
1636+ record[i].total,
1637+ record[i].rro,
1638+ record[i].mod);
1639+ }
1640+ break;
1641+
1642+ default:
1643+ break;
1644+ }
1645+
1646+ dev_kfree_skb(skb);
1647+
1648+}
1649+
1650+static int
1651+mtk_wed_load_firmware(struct mtk_wed_wo *wo)
1652+{
1653+ struct fw_info {
1654+ __le32 decomp_crc;
1655+ __le32 decomp_len;
1656+ __le32 decomp_blk_sz;
1657+ u8 reserved[4];
1658+ __le32 addr;
1659+ __le32 len;
1660+ u8 feature_set;
1661+ u8 reserved1[15];
1662+ } __packed *region;
1663+
1664+ char *mcu;
1665+ const struct mtk_wed_fw_trailer *hdr;
1666+ static u8 shared[MAX_REGION_SIZE] = {0};
1667+ const struct firmware *fw;
1668+ int ret, i;
1669+ u32 ofs = 0;
1670+ u32 boot_cr, val;
1671+
1672+ mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 : MT7986_FIRMWARE_WO_1;
1673+
1674+ ret = request_firmware(&fw, mcu, wo->hw->dev);
1675+ if (ret)
1676+ return ret;
1677+
1678+ hdr = (const struct mtk_wed_fw_trailer *)(fw->data + fw->size -
1679+ sizeof(*hdr));
1680+
1681+ dev_info(wo->hw->dev, "WO Firmware Version: %.10s, Build Time: %.15s\n",
1682+ hdr->fw_ver, hdr->build_date);
1683+
1684+ for (i = 0; i < hdr->n_region; i++) {
1685+ int j = 0;
1686+ region = (struct fw_info *)(fw->data + fw->size -
1687+ sizeof(*hdr) -
1688+ sizeof(*region) *
1689+ (hdr->n_region - i));
1690+
1691+ while (j < MAX_REGION_SIZE) {
1692+ struct mtk_wed_fw_region *wo_region;
1693+
1694+ wo_region = &wo->region[j];
1695+ if (!wo_region->addr)
1696+ break;
1697+
1698+ if (wo_region->addr_pa == region->addr) {
1699+ if (!wo_region->shared) {
1700+ memcpy(wo_region->addr,
1701+ fw->data + ofs, region->len);
1702+ } else if (!shared[j]) {
1703+ memcpy(wo_region->addr,
1704+ fw->data + ofs, region->len);
1705+ shared[j] = true;
1706+ }
1707+ }
1708+ j++;
1709+ }
1710+
1711+ if (j == __WO_REGION_MAX) {
1712+ ret = -ENOENT;
1713+ goto done;
1714+ }
1715+ ofs += region->len;
1716+ }
1717+
1718+ /* write the start address */
1719+ boot_cr = wo->hw->index ?
1720+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
1721+ wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
1722+
1723+ /* wo firmware reset */
1724+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
1725+
1726+ val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
1727+
1728+ val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
1729+ WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
1730+
1731+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
1732+
1733+done:
1734+ release_firmware(fw);
1735+
1736+ return ret;
1737+}
1738+
1739+static int
1740+mtk_wed_get_firmware_region(struct mtk_wed_wo *wo)
1741+{
1742+ struct device_node *node, *np = wo->hw->node;
1743+ struct mtk_wed_fw_region *region;
1744+ struct resource res;
1745+ const char *compat;
1746+ int i, ret;
1747+
1748+ static const char *const wo_region_compat[__WO_REGION_MAX] = {
1749+ [WO_REGION_EMI] = WOCPU_EMI_DEV_NODE,
1750+ [WO_REGION_ILM] = WOCPU_ILM_DEV_NODE,
1751+ [WO_REGION_DATA] = WOCPU_DATA_DEV_NODE,
1752+ [WO_REGION_BOOT] = WOCPU_BOOT_DEV_NODE,
1753+ };
1754+
1755+ for (i = 0; i < __WO_REGION_MAX; i++) {
1756+ region = &wo->region[i];
1757+ compat = wo_region_compat[i];
1758+
1759+ node = of_parse_phandle(np, compat, 0);
1760+ if (!node)
1761+ return -ENODEV;
1762+
1763+ ret = of_address_to_resource(node, 0, &res);
1764+ if (ret)
1765+ return ret;
1766+
1767+ region->addr_pa = res.start;
1768+ region->size = resource_size(&res);
1769+ region->addr = ioremap(region->addr_pa, region->size);
1770+
1771+ of_property_read_u32_index(node, "shared", 0, &region->shared);
1772+ }
1773+
1774+ return 0;
1775+}
1776+
1777+static int
1778+wo_mcu_send_message(struct mtk_wed_wo *wo,
1779+ int to_id, int cmd, struct sk_buff *skb,
1780+ int *wait_seq, bool wait_resp)
1781+{
1782+ struct wed_cmd_hdr *hdr;
1783+ u8 seq = 0;
1784+
1785+ /* TDO: make dynamic based on msg type */
1786+ wo->mcu.timeout = 20 * HZ;
1787+
1788+ if (wait_resp && wait_seq) {
1789+ seq = wo->mcu.msg_seq++ ;
1790+ *wait_seq = seq;
1791+ }
1792+
1793+ hdr = (struct wed_cmd_hdr *)skb_push(skb, sizeof(*hdr));
1794+
1795+ hdr->cmd_id = cmd;
1796+ hdr->length = cpu_to_le16(skb->len);
1797+ hdr->uni_id = seq;
1798+
1799+ if (to_id == MODULE_ID_WO)
1800+ hdr->flag |= WARP_CMD_FLAG_FROM_TO_WO;
1801+
1802+ if (wait_resp && wait_seq)
1803+ hdr->flag |= WARP_CMD_FLAG_NEED_RSP;
1804+
1805+ return mtk_wed_wo_q_tx_skb(wo, &wo->q_tx, skb);
1806+}
1807+
1808+static int
1809+wo_mcu_parse_response(struct mtk_wed_wo *wo, int cmd,
1810+ struct sk_buff *skb, int seq)
1811+{
1812+ struct wed_cmd_hdr *hdr;
1813+
1814+ if (!skb) {
1815+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
1816+ cmd, seq);
1817+ return -ETIMEDOUT;
1818+ }
1819+
1820+ hdr = (struct wed_cmd_hdr *)skb->data;
1821+ if (seq != hdr->uni_id) {
1822+ dev_err(wo->hw->dev, "Message %08x (seq %d) with not match uid(%d)\n",
1823+ cmd, seq, hdr->uni_id);
1824+ return -EAGAIN;
1825+ }
1826+
1827+ //skb_pull(skb, sizeof(struct wed_cmd_hdr));
1828+
1829+ return 0;
1830+}
1831+
1832+int wed_wo_mcu_init(struct mtk_wed_wo *wo)
1833+{
1834+ static const struct wed_wo_mcu_ops wo_mcu_ops = {
1835+ .headroom = sizeof(struct wed_cmd_hdr),
1836+ .mcu_skb_send_msg = wo_mcu_send_message,
1837+ .mcu_parse_response = wo_mcu_parse_response,
1838+ /*TDO .mcu_restart = wo_mcu_restart,*/
1839+ };
1840+ unsigned long timeout = jiffies + FW_DL_TIMEOUT;
1841+ int ret;
1842+ u32 val;
1843+
1844+ wo->mcu_ops = &wo_mcu_ops;
1845+
1846+ ret = mtk_wed_get_firmware_region(wo);
1847+ if (ret)
1848+ return ret;
1849+
1850+ /* set dummy cr */
1851+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL,
1852+ wo->hw->index + 1);
1853+
1854+ ret = mtk_wed_load_firmware(wo);
1855+ if (ret)
1856+ return ret;
1857+
1858+ do {
1859+ /* get dummy cr */
1860+ val = wed_r32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL);
1861+ } while (val != 0 && !time_after(jiffies, timeout));
1862+
1863+ if (val)
1864+ return -EBUSY;
1865+
1866+ return 0;
1867+}
1868+
1869+static ssize_t
1870+mtk_wed_wo_ctrl(struct file *file,
1871+ const char __user *user_buf,
1872+ size_t count,
1873+ loff_t *ppos)
1874+{
1875+ struct mtk_wed_hw *hw = file->private_data;
1876+ struct mtk_wed_wo *wo = hw->wed_wo;
1877+ char buf[100], *cmd = NULL, *input[11] = {0};
1878+ char msgbuf[128] = {0};
1879+ struct wo_cmd_query *query = (struct wo_cmd_query *)msgbuf;
1880+ u32 cmd_id;
1881+ bool wait = false;
1882+ char *sub_str = NULL;
1883+ int input_idx = 0, input_total = 0, scan_num = 0;
1884+ char *p;
1885+
1886+ if (count > sizeof(buf))
1887+ return -EINVAL;
1888+
1889+ if (copy_from_user(buf, user_buf, count))
1890+ return -EFAULT;
1891+
1892+ if (count && buf[count - 1] == '\n')
1893+ buf[count - 1] = '\0';
1894+ else
1895+ buf[count] = '\0';
1896+
1897+ p = buf;
1898+
1899+ while ((sub_str = strsep(&p, " ")) != NULL) {
1900+ input[input_idx] = sub_str;
1901+ input_idx++;
1902+ input_total++;
1903+ }
1904+ cmd = input[0];
1905+ if (input_total == 1 && cmd) {
1906+ if (strncmp(cmd, "bainfo", strlen(cmd)) == 0) {
1907+ cmd_id = WO_CMD_BA_INFO_DUMP;
1908+ } else if (strncmp(cmd, "bactrl", strlen(cmd)) == 0) {
1909+ cmd_id = WO_CMD_BA_CTRL_DUMP;
1910+ } else if (strncmp(cmd, "fbcmdq", strlen(cmd)) == 0) {
1911+ cmd_id = WO_CMD_FBCMD_Q_DUMP;
1912+ } else if (strncmp(cmd, "logflush", strlen(cmd)) == 0) {
1913+ cmd_id = WO_CMD_LOG_FLUSH;
1914+ } else if (strncmp(cmd, "cpustat.dump", strlen(cmd)) == 0) {
1915+ cmd_id = WO_CMD_CPU_STATS_DUMP;
1916+ } else if (strncmp(cmd, "state", strlen(cmd)) == 0) {
1917+ cmd_id = WO_CMD_WED_RX_STAT;
1918+ } else if (strncmp(cmd, "prof_hit_dump", strlen(cmd)) == 0) {
1919+ //wo_profiling_report();
1920+ return count;
1921+ } else if (strncmp(cmd, "rxcnt_info", strlen(cmd)) == 0) {
1922+ cmd_id = WO_CMD_RXCNT_INFO;
1923+ wait = true;
1924+ } else {
1925+ pr_info("(%s) unknown comand string(%s)!\n", __func__, cmd);
1926+ return count;
1927+ }
1928+ } else if (input_total > 1) {
1929+ for (input_idx = 1 ; input_idx < input_total ; input_idx++) {
1930+ scan_num = sscanf(input[input_idx], "%u", &query->query0+(input_idx - 1));
1931+
1932+ if (scan_num < 1) {
1933+ pr_info("(%s) require more input!\n", __func__);
1934+ return count;
1935+ }
1936+ }
1937+ if(strncmp(cmd, "devinfo", strlen(cmd)) == 0) {
1938+ cmd_id = WO_CMD_DEV_INFO_DUMP;
1939+ } else if (strncmp(cmd, "bssinfo", strlen(cmd)) == 0) {
1940+ cmd_id = WO_CMD_BSS_INFO_DUMP;
1941+ } else if (strncmp(cmd, "starec", strlen(cmd)) == 0) {
1942+ cmd_id = WO_CMD_STA_REC_DUMP;
1943+ } else if (strncmp(cmd, "starec_ba", strlen(cmd)) == 0) {
1944+ cmd_id = WO_CMD_STA_BA_DUMP;
1945+ } else if (strncmp(cmd, "logctrl", strlen(cmd)) == 0) {
1946+ cmd_id = WO_CMD_FW_LOG_CTRL;
1947+ } else if (strncmp(cmd, "cpustat.en", strlen(cmd)) == 0) {
1948+ cmd_id = WO_CMD_CPU_STATS_ENABLE;
1949+ } else if (strncmp(cmd, "prof_conf", strlen(cmd)) == 0) {
1950+ cmd_id = WO_CMD_PROF_CTRL;
1951+ } else if (strncmp(cmd, "rxcnt_ctrl", strlen(cmd)) == 0) {
1952+ cmd_id = WO_CMD_RXCNT_CTRL;
1953+ } else if (strncmp(cmd, "dbg_set", strlen(cmd)) == 0) {
1954+ cmd_id = WO_CMD_DBG_INFO;
1955+ }
1956+ } else {
1957+ dev_info(hw->dev, "usage: echo cmd='cmd_str' > wo_write\n");
1958+ dev_info(hw->dev, "cmd_str value range:\n");
1959+ dev_info(hw->dev, "\tbainfo:\n");
1960+ dev_info(hw->dev, "\tbactrl:\n");
1961+ dev_info(hw->dev, "\tfbcmdq:\n");
1962+ dev_info(hw->dev, "\tlogflush:\n");
1963+ dev_info(hw->dev, "\tcpustat.dump:\n");
1964+ dev_info(hw->dev, "\tprof_hit_dump:\n");
1965+ dev_info(hw->dev, "\trxcnt_info:\n");
1966+ dev_info(hw->dev, "\tdevinfo:\n");
1967+ dev_info(hw->dev, "\tbssinfo:\n");
1968+ dev_info(hw->dev, "\tstarec:\n");
1969+ dev_info(hw->dev, "\tstarec_ba:\n");
1970+ dev_info(hw->dev, "\tlogctrl:\n");
1971+ dev_info(hw->dev, "\tcpustat.en:\n");
1972+ dev_info(hw->dev, "\tprof_conf:\n");
1973+ dev_info(hw->dev, "\trxcnt_ctrl:\n");
1974+ dev_info(hw->dev, "\tdbg_set [level] [category]:\n");
1975+ return count;
1976+ }
1977+
1978+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, (void *)msgbuf, sizeof(struct wo_cmd_query), wait);
1979+
1980+ return count;
1981+
1982+}
1983+
1984+static const struct file_operations fops_wo_ctrl = {
1985+ .write = mtk_wed_wo_ctrl,
1986+ .open = simple_open,
1987+ .llseek = default_llseek,
1988+};
1989+
1990+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir)
1991+{
1992+ if (!dir)
1993+ return;
1994+
1995+ debugfs_create_file("wo_write", 0600, dir, hw, &fops_wo_ctrl);
1996+}
1997+
1998diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
1999new file mode 100644
2000index 000000000..6a5ac7672
2001--- /dev/null
2002+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2003@@ -0,0 +1,125 @@
2004+// SPDX-License-Identifier: GPL-2.0-only
2005+
2006+#ifndef __MTK_WED_MCU_H
2007+#define __MTK_WED_MCU_H
2008+
2009+#define EXCEPTION_LOG_SIZE 32768
2010+#define WOCPU_MCUSYS_RESET_ADDR 0x15194050
2011+#define WOCPU_WO0_MCUSYS_RESET_MASK 0x20
2012+#define WOCPU_WO1_MCUSYS_RESET_MASK 0x1
2013+
2014+#define WARP_INVALID_LENGTH_STATUS (-2)
2015+#define WARP_NULL_POINTER_STATUS (-3)
2016+#define WARP_INVALID_PARA_STATUS (-4)
2017+#define WARP_NOT_HANDLE_STATUS (-5)
2018+#define WARP_FAIL_STATUS (-1)
2019+#define WARP_OK_STATUS (0)
2020+#define WARP_ALREADY_DONE_STATUS (1)
2021+
2022+#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2023+#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2024+
2025+#define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2026+#define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2027+#define WOCPU_DLM_DEV_NODE "mediatek,wocpu_dlm"
2028+#define WOCPU_DATA_DEV_NODE "mediatek,wocpu_data"
2029+#define WOCPU_BOOT_DEV_NODE "mediatek,wocpu_boot"
2030+
2031+#define FW_DL_TIMEOUT ((3000 * HZ) / 1000)
2032+#define WOCPU_TIMEOUT ((1000 * HZ) / 1000)
2033+
2034+#define MAX_REGION_SIZE 3
2035+
2036+#define WOX_MCU_CFG_LS_BASE 0 /*0x15194000*/
2037+
2038+#define WOX_MCU_CFG_LS_HW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x000) // 4000
2039+#define WOX_MCU_CFG_LS_FW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x004) // 4004
2040+#define WOX_MCU_CFG_LS_CFG_DBG1_ADDR (WOX_MCU_CFG_LS_BASE + 0x00C) // 400C
2041+#define WOX_MCU_CFG_LS_CFG_DBG2_ADDR (WOX_MCU_CFG_LS_BASE + 0x010) // 4010
2042+#define WOX_MCU_CFG_LS_WF_MCCR_ADDR (WOX_MCU_CFG_LS_BASE + 0x014) // 4014
2043+#define WOX_MCU_CFG_LS_WF_MCCR_SET_ADDR (WOX_MCU_CFG_LS_BASE + 0x018) // 4018
2044+#define WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR (WOX_MCU_CFG_LS_BASE + 0x01C) // 401C
2045+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (WOX_MCU_CFG_LS_BASE + 0x050) // 4050
2046+#define WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x060) // 4060
2047+#define WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x064) // 4064
2048+
2049+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK BIT(5)
2050+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK BIT(0)
2051+
2052+
2053+enum wo_event_id {
2054+ WO_EVT_LOG_DUMP = 0x1,
2055+ WO_EVT_PROFILING = 0x2,
2056+ WO_EVT_RXCNT_INFO = 0x3
2057+};
2058+
2059+enum wo_cmd_id {
2060+ WO_CMD_WED_CFG = 0,
2061+ WO_CMD_WED_RX_STAT,
2062+ WO_CMD_RRO_SER,
2063+ WO_CMD_DBG_INFO,
2064+ WO_CMD_DEV_INFO,
2065+ WO_CMD_BSS_INFO,
2066+ WO_CMD_STA_REC,
2067+ WO_CMD_DEV_INFO_DUMP,
2068+ WO_CMD_BSS_INFO_DUMP,
2069+ WO_CMD_STA_REC_DUMP,
2070+ WO_CMD_BA_INFO_DUMP,
2071+ WO_CMD_FBCMD_Q_DUMP,
2072+ WO_CMD_FW_LOG_CTRL,
2073+ WO_CMD_LOG_FLUSH,
2074+ WO_CMD_CHANGE_STATE,
2075+ WO_CMD_CPU_STATS_ENABLE,
2076+ WO_CMD_CPU_STATS_DUMP,
2077+ WO_CMD_EXCEPTION_INIT,
2078+ WO_CMD_PROF_CTRL,
2079+ WO_CMD_STA_BA_DUMP,
2080+ WO_CMD_BA_CTRL_DUMP,
2081+ WO_CMD_RXCNT_CTRL,
2082+ WO_CMD_RXCNT_INFO,
2083+ WO_CMD_SET_CAP,
2084+ WO_CMD_CCIF_RING_DUMP,
2085+ WO_CMD_WED_END
2086+};
2087+
2088+enum wo_state {
2089+ WO_STATE_UNDEFINED = 0x0,
2090+ WO_STATE_INIT = 0x1,
2091+ WO_STATE_ENABLE = 0x2,
2092+ WO_STATE_DISABLE = 0x3,
2093+ WO_STATE_HALT = 0x4,
2094+ WO_STATE_GATING = 0x5,
2095+ WO_STATE_SER_RESET = 0x6,
2096+ WO_STATE_WF_RESET = 0x7,
2097+ WO_STATE_END
2098+};
2099+
2100+enum wo_done_state {
2101+ WOIF_UNDEFINED = 0,
2102+ WOIF_DISABLE_DONE = 1,
2103+ WOIF_TRIGGER_ENABLE = 2,
2104+ WOIF_ENABLE_DONE = 3,
2105+ WOIF_TRIGGER_GATING = 4,
2106+ WOIF_GATING_DONE = 5,
2107+ WOIF_TRIGGER_HALT = 6,
2108+ WOIF_HALT_DONE = 7,
2109+};
2110+
2111+enum wed_dummy_cr_idx {
2112+ WED_DUMMY_CR_FWDL = 0,
2113+ WED_DUMMY_CR_WO_STATUS = 1
2114+};
2115+
2116+struct mtk_wed_fw_trailer {
2117+ u8 chip_id;
2118+ u8 eco_code;
2119+ u8 n_region;
2120+ u8 format_ver;
2121+ u8 format_flag;
2122+ u8 reserved[2];
2123+ char fw_ver[10];
2124+ char build_date[15];
2125+ u32 crc;
2126+};
2127+
2128+#endif
2129diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2130index 69f136ed4..e911b5315 100644
2131--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2132+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2133@@ -4,6 +4,8 @@
2134 #ifndef __MTK_WED_REGS_H
2135 #define __MTK_WED_REGS_H
2136
2137+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
2138+
2139 #if defined(CONFIG_MEDIATEK_NETSYS_V2)
2140 #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(13, 0)
2141 #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(14)
2142@@ -16,6 +18,7 @@
2143 #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2144 #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2145 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2146+#define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
2147
2148 struct mtk_wdma_desc {
2149 __le32 buf0;
2150@@ -37,6 +40,8 @@ struct mtk_wdma_desc {
2151 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
2152 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2153 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2154+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
2155+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
2156 #define MTK_WED_RESET_WED BIT(31)
2157
2158 #define MTK_WED_CTRL 0x00c
2159@@ -48,8 +53,12 @@ struct mtk_wdma_desc {
2160 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2161 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2162 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2163-#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2164-#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2165+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
2166+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
2167+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
2168+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
2169+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
2170+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
2171 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2172 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
2173 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
2174@@ -64,8 +73,8 @@ struct mtk_wdma_desc {
2175 #define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
2176 #define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
2177 #endif
2178-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2179-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2180+#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
2181+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
2182 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2183 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2184 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
2185@@ -82,8 +91,8 @@ struct mtk_wdma_desc {
2186 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2187 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2188 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2189- MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | \
2190- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | \
2191+ MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
2192+ MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
2193 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2194 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2195 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | \
2196@@ -92,6 +101,8 @@ struct mtk_wdma_desc {
2197 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
2198
2199 #define MTK_WED_EXT_INT_MASK 0x028
2200+#define MTK_WED_EXT_INT_MASK1 0x02c
2201+#define MTK_WED_EXT_INT_MASK2 0x030
2202
2203 #define MTK_WED_STATUS 0x060
2204 #define MTK_WED_STATUS_TX GENMASK(15, 8)
2205@@ -179,6 +190,9 @@ struct mtk_wdma_desc {
2206
2207 #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2208
2209+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
2210+
2211+#define MTK_WED_SCR0 0x3c0
2212 #define MTK_WED_WPDMA_INT_TRIGGER 0x504
2213 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2214 #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
2215@@ -235,13 +249,19 @@ struct mtk_wdma_desc {
2216
2217 #define MTK_WED_WPDMA_INT_CTRL_TX 0x530
2218 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
2219-#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2220+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2221 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
2222 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
2223 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
2224 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
2225
2226 #define MTK_WED_WPDMA_INT_CTRL_RX 0x534
2227+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
2228+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
2229+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
2230+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
2231+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
2232+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
2233
2234 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
2235 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
2236@@ -266,13 +286,43 @@ struct mtk_wdma_desc {
2237 #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2238 #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2239
2240+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
2241+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
2242+
2243 #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2244 #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2245+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
2246+
2247+
2248+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
2249+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
2250+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
2251+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
2252+
2253+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
2254+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX0 BIT(16)
2255+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX1 BIT(17)
2256+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX0 BIT(24)
2257+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX1 BIT(25)
2258+
2259+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
2260+#define MTK_WED_WPDMA_RX_RING 0x770
2261+
2262+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
2263+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
2264+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
2265+
2266+#define MTK_WED_WDMA_RING_TX 0x800
2267+
2268+#define MTK_WED_WDMA_TX_MIB 0x810
2269+
2270+
2271 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2272 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2273
2274 #define MTK_WED_WDMA_GLO_CFG 0xa04
2275 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2276+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
2277 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2278 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2279 #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
2280@@ -316,6 +366,20 @@ struct mtk_wdma_desc {
2281 #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
2282 #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
2283
2284+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2285+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
2286+
2287+#define MTK_WED_RX_BM_BASE 0xd84
2288+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2289+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
2290+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
2291+
2292+#define MTK_WED_RX_PTR 0xd8c
2293+
2294+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
2295+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
2296+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
2297+
2298 #define MTK_WED_RING_OFS_BASE 0x00
2299 #define MTK_WED_RING_OFS_COUNT 0x04
2300 #define MTK_WED_RING_OFS_CPU_IDX 0x08
2301@@ -355,4 +419,71 @@ struct mtk_wdma_desc {
2302 /* DMA channel mapping */
2303 #define HIFSYS_DMA_AG_MAP 0x008
2304
2305+#define MTK_WED_RTQM_GLO_CFG 0xb00
2306+#define MTK_WED_RTQM_BUSY BIT(1)
2307+#define MTK_WED_RTQM_Q_RST BIT(2)
2308+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
2309+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
2310+
2311+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
2312+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
2313+#define MTK_WED_RTQM_Q2N_MIB 0xb80
2314+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
2315+
2316+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
2317+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
2318+
2319+#define MTK_WED_RROQM_GLO_CFG 0xc04
2320+#define MTK_WED_RROQM_RST_IDX 0xc08
2321+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
2322+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
2323+
2324+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
2325+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
2326+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
2327+
2328+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
2329+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
2330+
2331+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
2332+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
2333+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
2334+
2335+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
2336+
2337+#define MTK_WED_RROQ_BASE_L 0xc80
2338+#define MTK_WED_RROQ_BASE_H 0xc84
2339+
2340+
2341+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
2342+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
2343+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
2344+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
2345+
2346+#define MTK_WED_RROQM_MID_MIB 0xcc0
2347+#define MTK_WED_RROQM_MOD_MIB 0xcc4
2348+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
2349+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
2350+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
2351+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
2352+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
2353+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
2354+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
2355+
2356+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2357+#define MTK_WED_RX_BM_BASE 0xd84
2358+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2359+#define MTK_WED_RX_BM_PTR 0xd8c
2360+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
2361+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
2362+
2363+#define MTK_WED_RX_BM_BLEN 0xd90
2364+#define MTK_WED_RX_BM_STS 0xd94
2365+#define MTK_WED_RX_BM_INTF2 0xd98
2366+#define MTK_WED_RX_BM_INTF 0xd9c
2367+#define MTK_WED_RX_BM_ERR_STS 0xda8
2368+
2369+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
2370+#define MTK_WED_PCIE_INT_MASK 0x0
2371+
2372 #endif
2373diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
2374new file mode 100644
2375index 000000000..10618fc1a
2376--- /dev/null
2377+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
2378@@ -0,0 +1,548 @@
2379+// SPDX-License-Identifier: GPL-2.0-only
2380+
2381+#include <linux/kernel.h>
2382+#include <linux/bitfield.h>
2383+#include <linux/dma-mapping.h>
2384+#include <linux/skbuff.h>
2385+#include <linux/of_platform.h>
2386+#include <linux/interrupt.h>
2387+#include <linux/of_address.h>
2388+#include <linux/iopoll.h>
2389+#include <linux/soc/mediatek/mtk_wed.h>
2390+#include "mtk_wed.h"
2391+#include "mtk_wed_regs.h"
2392+#include "mtk_wed_ccif.h"
2393+#include "mtk_wed_wo.h"
2394+
2395+struct wed_wo_profile_stat profile_total[6] = {
2396+ {1001, 0},
2397+ {1501, 0},
2398+ {3001, 0},
2399+ {5001, 0},
2400+ {10001, 0},
2401+ {0xffffffff, 0}
2402+};
2403+
2404+struct wed_wo_profile_stat profiling_mod[6] = {
2405+ {1001, 0},
2406+ {1501, 0},
2407+ {3001, 0},
2408+ {5001, 0},
2409+ {10001, 0},
2410+ {0xffffffff, 0}
2411+};
2412+
2413+struct wed_wo_profile_stat profiling_rro[6] = {
2414+ {1001, 0},
2415+ {1501, 0},
2416+ {3001, 0},
2417+ {5001, 0},
2418+ {10001, 0},
2419+ {0xffffffff, 0}
2420+};
2421+
2422+static void
2423+woif_q_sync_idx(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2424+{
2425+ woccif_w32(wo, q->regs->desc_base, q->desc_dma);
2426+ woccif_w32(wo, q->regs->ring_size, q->ndesc);
2427+
2428+ /* wo fw start from 1 */
2429+ q->head = woccif_r32(wo, q->regs->dma_idx) + 1;
2430+ q->tail = q->head;
2431+}
2432+
2433+static void
2434+woif_q_reset(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2435+{
2436+
2437+ if (!q || !q->ndesc)
2438+ return;
2439+
2440+ woccif_w32(dev, q->regs->cpu_idx, 0);
2441+
2442+ woif_q_sync_idx(dev, q);
2443+}
2444+
2445+static void
2446+woif_q_kick(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset)
2447+{
2448+ wmb();
2449+ woccif_w32(wo, q->regs->cpu_idx, q->head + offset);
2450+}
2451+
2452+static int
2453+woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2454+{
2455+ int len = q->buf_size, frames = 0;
2456+ struct wed_wo_queue_entry *entry;
2457+ struct wed_wo_desc *desc;
2458+ dma_addr_t addr;
2459+ u32 ctrl = 0;
2460+ void *buf;
2461+
2462+ if (!q->ndesc)
2463+ return 0;
2464+
2465+ spin_lock_bh(&q->lock);
2466+
2467+ while (q->queued < q->ndesc - 1) {
2468+
2469+ buf = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
2470+ if (!buf)
2471+ break;
2472+
2473+ addr = dma_map_single(wo->hw->dev, buf, len, DMA_FROM_DEVICE);
2474+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
2475+ skb_free_frag(buf);
2476+ break;
2477+ }
2478+ dma_sync_single_for_cpu(wo->hw->dev, addr, len,
2479+ DMA_TO_DEVICE);
2480+ desc = &q->desc[q->head];
2481+ entry = &q->entry[q->head];
2482+
2483+ entry->dma_addr = addr;
2484+ entry->dma_len = len;
2485+
2486+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, entry->dma_len);
2487+ ctrl |= WED_CTL_LAST_SEC0;
2488+
2489+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2490+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2491+ dma_sync_single_for_device(wo->hw->dev, addr, len,
2492+ DMA_TO_DEVICE);
2493+ q->queued++;
2494+ q->entry[q->head].buf = buf;
2495+
2496+ q->head = (q->head + 1) % q->ndesc;
2497+ frames++;
2498+ }
2499+
2500+ spin_unlock_bh(&q->lock);
2501+
2502+ return frames;
2503+}
2504+
2505+static void
2506+woif_q_rx_fill_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2507+{
2508+ if(woif_q_rx_fill(wo, q))
2509+ woif_q_kick(wo, q, -1);
2510+}
2511+
2512+static int
2513+woif_q_alloc(struct mtk_wed_wo *dev, struct wed_wo_queue *q,
2514+ int n_desc, int bufsize, int idx,
2515+ struct wed_wo_queue_regs *regs)
2516+{
2517+ struct wed_wo_queue_regs *q_regs;
2518+ int size;
2519+
2520+ spin_lock_init(&q->lock);
2521+ spin_lock_init(&q->cleanup_lock);
2522+
2523+ q_regs = devm_kzalloc(dev->hw->dev, sizeof(*q_regs), GFP_KERNEL);
2524+
2525+ q_regs->desc_base = regs->desc_base;
2526+ q_regs->ring_size = regs->ring_size;
2527+ q_regs->cpu_idx = regs->cpu_idx;
2528+ q_regs->dma_idx = regs->dma_idx;
2529+
2530+ q->regs = q_regs;
2531+ q->ndesc = n_desc;
2532+ q->buf_size = bufsize;
2533+
2534+ size = q->ndesc * sizeof(struct wed_wo_desc);
2535+
2536+ q->desc = dmam_alloc_coherent(dev->hw->dev, size,
2537+ &q->desc_dma, GFP_KERNEL);
2538+ if (!q->desc)
2539+ return -ENOMEM;
2540+
2541+ size = q->ndesc * sizeof(*q->entry);
2542+ q->entry = devm_kzalloc(dev->hw->dev, size, GFP_KERNEL);
2543+ if (!q->entry)
2544+ return -ENOMEM;
2545+
2546+ if (idx == 0)
2547+ woif_q_reset(dev, &dev->q_tx);
2548+
2549+ return 0;
2550+}
2551+
2552+static void
2553+woif_q_tx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush)
2554+{
2555+ int last;
2556+
2557+ if (!q || !q->ndesc)
2558+ return;
2559+
2560+ spin_lock_bh(&q->cleanup_lock);
2561+ if (flush)
2562+ last = -1;
2563+ else
2564+ last = readl(&q->regs->dma_idx);
2565+
2566+ while (q->queued > 0 && q->tail != last) {
2567+ struct wed_wo_queue_entry *e;
2568+
2569+ e = &q->entry[q->tail];
2570+
2571+ dma_unmap_single(wo->hw->dev, e->dma_addr, e->dma_len,
2572+ DMA_TO_DEVICE);
2573+
2574+ if (e->skb)
2575+ dev_kfree_skb(e->skb);
2576+
2577+ memset(e, 0, sizeof(*e));
2578+
2579+ spin_lock_bh(&q->lock);
2580+ q->tail = (q->tail + 1) % q->ndesc;
2581+ q->queued--;
2582+ spin_unlock_bh(&q->lock);
2583+
2584+ if (!flush && q->tail == last)
2585+ last = readl(&q->regs->dma_idx);
2586+ }
2587+ spin_unlock_bh(&q->cleanup_lock);
2588+
2589+ if (flush) {
2590+ spin_lock_bh(&q->lock);
2591+ woif_q_sync_idx(wo, q);
2592+ woif_q_kick(wo, q, 0);
2593+ spin_unlock_bh(&q->lock);
2594+ }
2595+}
2596+
2597+static void
2598+woif_q_rx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2599+{
2600+}
2601+
2602+static void *
2603+woif_q_deq(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush,
2604+ int *len, u32 *info, bool *more)
2605+{
2606+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
2607+ struct wed_wo_queue_entry *e;
2608+ struct wed_wo_desc *desc;
2609+ int idx = q->tail;
2610+ void *buf;
2611+
2612+ *more = false;
2613+ if (!q->queued)
2614+ return NULL;
2615+
2616+ if (flush)
2617+ q->desc[idx].ctrl |= cpu_to_le32(WED_CTL_DMA_DONE);
2618+ else if (!(q->desc[idx].ctrl & cpu_to_le32(WED_CTL_DMA_DONE)))
2619+ return NULL;
2620+
2621+ q->tail = (q->tail + 1) % q->ndesc;
2622+ q->queued--;
2623+
2624+ desc = &q->desc[idx];
2625+ e = &q->entry[idx];
2626+
2627+ buf = e->buf;
2628+ if (len) {
2629+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
2630+ *len = FIELD_GET(WED_CTL_SD_LEN0, ctl);
2631+ *more = !(ctl & WED_CTL_LAST_SEC0);
2632+ }
2633+
2634+ if (info)
2635+ *info = le32_to_cpu(desc->info);
2636+ if(buf)
2637+ dma_unmap_single(wo->hw->dev, e->dma_addr, buf_len,
2638+ DMA_FROM_DEVICE);
2639+ e->skb = NULL;
2640+
2641+ return buf;
2642+}
2643+
2644+static int
2645+woif_q_init(struct mtk_wed_wo *dev,
2646+ int (*poll)(struct napi_struct *napi, int budget))
2647+{
2648+ init_dummy_netdev(&dev->napi_dev);
2649+ snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
2650+ "woif_q");
2651+
2652+ if (dev->q_rx.ndesc) {
2653+ netif_napi_add(&dev->napi_dev, &dev->napi, poll, 64);
2654+ woif_q_rx_fill(dev, &dev->q_rx);
2655+ woif_q_reset(dev, &dev->q_rx);
2656+ napi_enable(&dev->napi);
2657+ }
2658+
2659+ return 0;
2660+}
2661+
2662+void woif_q_rx_skb(struct mtk_wed_wo *wo, struct sk_buff *skb)
2663+{
2664+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
2665+ int ret;
2666+
2667+ ret = mtk_wed_mcu_cmd_sanity_check(wo, skb);
2668+ if (ret)
2669+ goto free_skb;
2670+
2671+ if (WED_WO_CMD_FLAG_IS_RSP(hdr))
2672+ mtk_wed_mcu_rx_event(wo, skb);
2673+ else
2674+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
2675+
2676+ return;
2677+free_skb:
2678+ dev_kfree_skb(skb);
2679+}
2680+
2681+static int
2682+woif_q_tx_skb(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
2683+ struct sk_buff *skb)
2684+{
2685+ struct wed_wo_queue_entry *entry;
2686+ struct wed_wo_desc *desc;
2687+ int len, ret, idx = -1;
2688+ dma_addr_t addr;
2689+ u32 ctrl = 0;
2690+
2691+ len = skb->len;
2692+ addr = dma_map_single(wo->hw->dev, skb->data, len, DMA_TO_DEVICE);
2693+ if (unlikely(dma_mapping_error(wo->hw->dev, addr)))
2694+ goto error;
2695+
2696+ /* packet tx, force trigger tx clean. */
2697+ if (q->queued + MTK_WED_WO_TXQ_FREE_THR >= q->ndesc - 1)
2698+ woif_q_tx_clean(wo, q, false);
2699+
2700+ if (q->queued + 1 >= q->ndesc - 1) {
2701+ ret = -ENOMEM;
2702+ goto error;
2703+ }
2704+
2705+ spin_lock_bh(&q->lock);
2706+
2707+ dma_sync_single_for_device(wo->hw->dev, addr, len,
2708+ DMA_TO_DEVICE);
2709+
2710+ idx = q->head;
2711+
2712+ desc = &q->desc[idx];
2713+ entry = &q->entry[idx];
2714+
2715+ entry->dma_addr = addr;
2716+ entry->dma_len = len;
2717+
2718+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, len);
2719+ ctrl |= WED_CTL_LAST_SEC0;
2720+ ctrl |= WED_CTL_DMA_DONE;
2721+
2722+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2723+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2724+
2725+ q->queued++;
2726+ q->entry[idx].skb = skb;
2727+
2728+ woif_q_kick(wo, q, 0);
2729+ wo->drv_ops->kickout(wo);
2730+
2731+ q->head = (q->head + 1) % q->ndesc;
2732+ spin_unlock_bh(&q->lock);
2733+ return 0;
2734+
2735+error:
2736+ dev_kfree_skb(skb);
2737+ return -ENOMEM;
2738+}
2739+
2740+static const struct wed_wo_queue_ops wo_queue_ops = {
2741+ .init = woif_q_init,
2742+ .alloc = woif_q_alloc,
2743+ .reset = woif_q_reset,
2744+ .tx_skb = woif_q_tx_skb,
2745+ .tx_clean = woif_q_tx_clean,
2746+ .rx_clean = woif_q_rx_clean,
2747+ .kick = woif_q_kick,
2748+};
2749+
2750+static int
2751+mtk_wed_wo_rx_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int budget)
2752+{
2753+ int len, data_len, done = 0;
2754+ struct sk_buff *skb;
2755+ unsigned char *data;
2756+ bool more;
2757+
2758+ while (done < budget) {
2759+ u32 info;
2760+
2761+ data = woif_q_deq(wo, q, false, &len, &info, &more);
2762+ if (!data)
2763+ break;
2764+
2765+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
2766+
2767+ if (data_len < len) {
2768+ skb_free_frag(data);
2769+ continue;
2770+ }
2771+
2772+ skb = build_skb(data, q->buf_size);
2773+ if (!skb) {
2774+ skb_free_frag(data);
2775+ continue;
2776+ }
2777+
2778+ __skb_put(skb, len);
2779+ done++;
2780+
2781+ woif_q_rx_skb(wo, skb);
2782+ }
2783+
2784+ woif_q_rx_fill_process(wo, q);
2785+
2786+ return done;
2787+}
2788+
2789+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
2790+ u32 clear, u32 val)
2791+{
2792+ unsigned long flags;
2793+
2794+ spin_lock_irqsave(&wo->ccif.irq_lock, flags);
2795+ wo->ccif.irqmask &= ~clear;
2796+ wo->ccif.irqmask |= val;
2797+ if (set)
2798+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
2799+
2800+ spin_unlock_irqrestore(&wo->ccif.irq_lock, flags);
2801+}
2802+
2803+static inline void mtk_wed_wo_set_ack_mask(struct mtk_wed_wo *wo, u32 mask)
2804+{
2805+ wo->drv_ops->set_ack(wo, mask);
2806+}
2807+
2808+static void mtk_wed_wo_poll_complete(struct mtk_wed_wo *wo)
2809+{
2810+ mtk_wed_wo_set_ack_mask(wo, wo->ccif.q_int_mask);
2811+ mtk_wed_wo_isr_enable(wo, wo->ccif.q_int_mask);
2812+}
2813+
2814+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget)
2815+{
2816+ struct mtk_wed_wo *wo;
2817+ int done = 0, cur;
2818+
2819+ wo = container_of(napi->dev, struct mtk_wed_wo, napi_dev);
2820+
2821+ rcu_read_lock();
2822+
2823+ do {
2824+ cur = mtk_wed_wo_rx_process(wo, &wo->q_rx, budget - done);
2825+ /* rx packet handle */
2826+ done += cur;
2827+ } while (cur && done < budget);
2828+
2829+ rcu_read_unlock();
2830+
2831+ if (done < budget && napi_complete(napi))
2832+ mtk_wed_wo_poll_complete(wo);
2833+
2834+ return done;
2835+}
2836+
2837+static void mtk_wed_wo_isr_tasklet(unsigned long data)
2838+{
2839+ struct mtk_wed_wo *wo = (struct mtk_wed_wo *)data;
2840+ u32 intr, mask;
2841+
2842+ /* disable isr */
2843+ wo->drv_ops->set_isr(wo, 0);
2844+
2845+ intr = wo->drv_ops->get_csr(wo);
2846+ intr &= wo->ccif.irqmask;
2847+
2848+ mask = intr & (wo->ccif.q_int_mask | wo->ccif.q_exep_mask);
2849+ mtk_wed_wo_isr_disable(wo, mask);
2850+
2851+ if (intr & wo->ccif.q_int_mask)
2852+ napi_schedule(&wo->napi);
2853+
2854+ if (intr & wo->ccif.q_exep_mask) {
2855+ /* todo */
2856+ }
2857+}
2858+
2859+static irqreturn_t mtk_wed_wo_isr_handler(int irq, void *wo_instance)
2860+{
2861+ struct mtk_wed_wo *wo = wo_instance;
2862+
2863+ wo->drv_ops->set_isr(wo, 0);
2864+
2865+ tasklet_schedule(&wo->irq_tasklet);
2866+
2867+ return IRQ_HANDLED;
2868+}
2869+
2870+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
2871+{
2872+ struct mtk_wed_wo *wo;
2873+ int ret = 0;
2874+
2875+ wo = kzalloc(sizeof(struct mtk_wed_wo), GFP_KERNEL);
2876+ if (!wo)
2877+ return -ENOMEM;
2878+
2879+ wo->hw = hw;
2880+ wo->queue_ops = &wo_queue_ops;
2881+ hw->wed_wo = wo;
2882+
2883+ tasklet_init(&wo->irq_tasklet, mtk_wed_wo_isr_tasklet,
2884+ (unsigned long)wo);
2885+
2886+ skb_queue_head_init(&wo->mcu.res_q);
2887+ init_waitqueue_head(&wo->mcu.wait);
2888+ mutex_init(&wo->mcu.mutex);
2889+
2890+ ret = wed_wo_hardware_init(wo, mtk_wed_wo_isr_handler);
2891+ if (ret)
2892+ goto error;
2893+
2894+ /* fw download */
2895+ ret = wed_wo_mcu_init(wo);
2896+ if (ret)
2897+ goto error;
2898+
2899+ ret = mtk_wed_exception_init(wo);
2900+ if (ret)
2901+ goto error;
2902+
2903+ return ret;
2904+
2905+error:
2906+ kfree(wo);
2907+
2908+ return ret;
2909+}
2910+
2911+void mtk_wed_wo_exit(struct mtk_wed_hw *hw)
2912+{
2913+/*
2914+#ifdef CONFIG_WED_HW_RRO_SUPPORT
2915+ woif_bus_exit(woif);
2916+ wo_exception_exit(woif);
2917+#endif
2918+*/
2919+ struct mtk_wed_wo *wo = hw->wed_wo;
2920+
2921+ if (wo->exp.log) {
2922+ dma_unmap_single(wo->hw->dev, wo->exp.phys, wo->exp.log_size, DMA_FROM_DEVICE);
2923+ kfree(wo->exp.log);
2924+ }
2925+
2926+}
2927diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
2928new file mode 100644
2929index 000000000..00b39e779
2930--- /dev/null
2931+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
2932@@ -0,0 +1,334 @@
2933+// SPDX-License-Identifier: GPL-2.0-only
2934+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2935+
2936+#ifndef __MTK_WED_WO_H
2937+#define __MTK_WED_WO_H
2938+
2939+#include <linux/netdevice.h>
2940+#include <linux/skbuff.h>
2941+#include "mtk_wed.h"
2942+
2943+#define WED_CTL_SD_LEN1 GENMASK(13, 0)
2944+#define WED_CTL_LAST_SEC1 BIT(14)
2945+#define WED_CTL_BURST BIT(15)
2946+#define WED_CTL_SD_LEN0_SHIFT 16
2947+#define WED_CTL_SD_LEN0 GENMASK(29, 16)
2948+#define WED_CTL_LAST_SEC0 BIT(30)
2949+#define WED_CTL_DMA_DONE BIT(31)
2950+#define WED_INFO_WINFO GENMASK(15, 0)
2951+
2952+#define MTK_WED_WO_TXQ_FREE_THR 10
2953+
2954+#define WED_WO_PROFILE_MAX_LVL 6
2955+
2956+
2957+enum mtk_wed_fw_region_id {
2958+ WO_REGION_EMI = 0,
2959+ WO_REGION_ILM,
2960+ WO_REGION_DATA,
2961+ WO_REGION_BOOT,
2962+ __WO_REGION_MAX
2963+};
2964+
2965+struct wed_wo_profile_stat {
2966+ u32 bound;
2967+ u32 record;
2968+};
2969+
2970+#define PROFILE_STAT(record, val) do { \
2971+ u8 lvl = 0; \
2972+ while (lvl < WED_WO_PROFILE_MAX_LVL) { \
2973+ if (val < record[lvl].bound) { \
2974+ record[lvl].record++; \
2975+ break; \
2976+ } \
2977+ lvl++; \
2978+ } \
2979+ } while (0)
2980+
2981+/* align with wo report structure */
2982+struct wed_wo_log {
2983+ u32 sn;
2984+ u32 total;
2985+ u32 rro;
2986+ u32 mod;
2987+};
2988+
2989+struct wed_wo_rxcnt {
2990+ u16 wlan_idx;
2991+ u16 tid;
2992+ u32 rx_pkt_cnt;
2993+ u32 rx_byte_cnt;
2994+ u32 rx_err_cnt;
2995+ u32 rx_drop_cnt;
2996+};
2997+
2998+struct wed_wo_queue {
2999+ struct wed_wo_queue_regs *regs;
3000+
3001+ spinlock_t lock;
3002+ spinlock_t cleanup_lock;
3003+ struct wed_wo_queue_entry *entry;
3004+ struct wed_wo_desc *desc;
3005+
3006+ u16 first;
3007+ u16 head;
3008+ u16 tail;
3009+ int ndesc;
3010+ int queued;
3011+ int buf_size;
3012+
3013+ u8 hw_idx;
3014+ u8 qid;
3015+ u8 flags;
3016+
3017+ dma_addr_t desc_dma;
3018+ struct page_frag_cache rx_page;
3019+};
3020+
3021+
3022+struct wed_wo_mmio {
3023+ struct regmap *regs;
3024+
3025+ spinlock_t irq_lock;
3026+ u8 irq;
3027+ u32 irqmask;
3028+
3029+ u32 q_int_mask;
3030+ u32 q_exep_mask;
3031+};
3032+
3033+struct wed_wo_mcu {
3034+ struct mutex mutex;
3035+ u32 msg_seq;
3036+ int timeout;
3037+
3038+ struct sk_buff_head res_q;
3039+ wait_queue_head_t wait;
3040+};
3041+
3042+struct wed_wo_exception {
3043+ void* log;
3044+ int log_size;
3045+ dma_addr_t phys;
3046+};
3047+
3048+struct wed_wo_queue_regs {
3049+ u32 desc_base;
3050+ u32 ring_size;
3051+ u32 cpu_idx;
3052+ u32 dma_idx;
3053+};
3054+
3055+struct wed_wo_desc {
3056+ __le32 buf0;
3057+ __le32 ctrl;
3058+ __le32 buf1;
3059+ __le32 info;
3060+ __le32 reserved[4];
3061+} __packed __aligned(32);
3062+
3063+struct wed_wo_queue_entry {
3064+ union {
3065+ void *buf;
3066+ struct sk_buff *skb;
3067+ };
3068+
3069+ u32 dma_addr;
3070+ u16 dma_len;
3071+ u16 wcid;
3072+ bool skip_buf0:1;
3073+ bool skip_buf1:1;
3074+ bool done:1;
3075+};
3076+
3077+struct wo_cmd_rxcnt_t {
3078+ u16 wlan_idx;
3079+ u16 tid;
3080+ u32 rx_pkt_cnt;
3081+ u32 rx_byte_cnt;
3082+ u32 rx_err_cnt;
3083+ u32 rx_drop_cnt;
3084+};
3085+
3086+struct wo_cmd_query {
3087+ u32 query0;
3088+ u32 query1;
3089+};
3090+
3091+struct wed_cmd_hdr {
3092+ /*DW0*/
3093+ u8 ver;
3094+ u8 cmd_id;
3095+ u16 length;
3096+
3097+ /*DW1*/
3098+ u16 uni_id;
3099+ u16 flag;
3100+
3101+ /*DW2*/
3102+ int status;
3103+
3104+ /*DW3*/
3105+ u8 reserved[20];
3106+};
3107+
3108+struct mtk_wed_fw_region {
3109+ void *addr;
3110+ u32 addr_pa;
3111+ u32 size;
3112+ u32 shared;
3113+};
3114+
3115+struct wed_wo_queue_ops;
3116+struct wed_wo_drv_ops;
3117+struct wed_wo_mcu_ops;
3118+
3119+struct wo_rx_total_cnt {
3120+ u64 rx_pkt_cnt;
3121+ u64 rx_byte_cnt;
3122+ u64 rx_err_cnt;
3123+ u64 rx_drop_cnt;
3124+};
3125+
3126+struct mtk_wed_wo {
3127+ struct mtk_wed_hw *hw;
3128+
3129+ struct wed_wo_mmio ccif;
3130+ struct wed_wo_mcu mcu;
3131+ struct wed_wo_exception exp;
3132+
3133+ const struct wed_wo_drv_ops *drv_ops;
3134+ const struct wed_wo_mcu_ops *mcu_ops;
3135+ const struct wed_wo_queue_ops *queue_ops;
3136+
3137+ struct net_device napi_dev;
3138+ spinlock_t rx_lock;
3139+ struct napi_struct napi;
3140+ struct sk_buff_head rx_skb;
3141+ struct wed_wo_queue q_rx;
3142+ struct tasklet_struct irq_tasklet;
3143+
3144+ struct wed_wo_queue q_tx;
3145+
3146+ struct mtk_wed_fw_region region[__WO_REGION_MAX];
3147+
3148+ struct wed_wo_profile_stat total[WED_WO_PROFILE_MAX_LVL];
3149+ struct wed_wo_profile_stat mod[WED_WO_PROFILE_MAX_LVL];
3150+ struct wed_wo_profile_stat rro[WED_WO_PROFILE_MAX_LVL];
3151+ char dirname[4];
3152+ struct wo_rx_total_cnt wo_rxcnt[8][544];
3153+};
3154+
3155+struct wed_wo_queue_ops {
3156+ int (*init)(struct mtk_wed_wo *wo,
3157+ int (*poll)(struct napi_struct *napi, int budget));
3158+
3159+ int (*alloc)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3160+ int idx, int n_desc, int bufsize,
3161+ struct wed_wo_queue_regs *regs);
3162+
3163+ void (*reset)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3164+
3165+ int (*tx_skb)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3166+ struct sk_buff *skb);
3167+ int (*tx_skb1)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3168+ u8 *msg, u32 msg_len);
3169+ void (*tx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3170+ bool flush);
3171+
3172+ void (*rx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3173+
3174+ void (*kick)(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset);
3175+};
3176+
3177+struct wed_wo_drv_ops {
3178+ void (*kickout)(struct mtk_wed_wo *wo);
3179+ void (*set_ack)(struct mtk_wed_wo *wo, u32 mask);
3180+ void (*set_isr)(struct mtk_wed_wo *wo, u32 mask);
3181+ u32 (*get_csr)(struct mtk_wed_wo *wo);
3182+ int (*tx_prepare_skb)(struct mtk_wed_wo *wo);
3183+ bool (*check_excpetion)(struct mtk_wed_wo *wo);
3184+ void (*clear_int)(struct mtk_wed_wo *wo, u32 mask);
3185+};
3186+
3187+struct wed_wo_mcu_ops {
3188+ u32 headroom;
3189+
3190+ int (*mcu_skb_send_msg)(struct mtk_wed_wo *wo, int to_id,
3191+ int cmd, struct sk_buff *skb,
3192+ int *seq, bool wait_resp);
3193+
3194+ int (*mcu_parse_response)(struct mtk_wed_wo *wo, int cmd,
3195+ struct sk_buff *skb, int seq);
3196+
3197+ int (*mcu_restart)(struct mtk_wed_wo *wo);
3198+};
3199+
3200+#define mtk_wed_wo_q_init(wo, ...) (wo)->queue_ops->init((wo), __VA_ARGS__)
3201+#define mtk_wed_wo_q_alloc(wo, ...) (wo)->queue_ops->alloc((wo), __VA_ARGS__)
3202+#define mtk_wed_wo_q_reset(wo, ...) (wo)->queue_ops->init((wo), __VA_ARGS__)
3203+#define mtk_wed_wo_q_tx_skb(wo, ...) (wo)->queue_ops->tx_skb((wo), __VA_ARGS__)
3204+#define mtk_wed_wo_q_tx_skb1(wo, ...) (wo)->queue_ops->tx_skb1((wo), __VA_ARGS__)
3205+#define mtk_wed_wo_q_tx_clean(wo, ...) (wo)->queue_ops->tx_clean((wo), __VA_ARGS__)
3206+#define mtk_wed_wo_q_rx_clean(wo, ...) (wo)->queue_ops->rx_clean((wo), __VA_ARGS__)
3207+#define mtk_wed_wo_q_kick(wo, ...) (wo)->queue_ops->kick((wo), __VA_ARGS__)
3208+
3209+enum {
3210+ WARP_CMD_FLAG_RSP = 1 << 0, /* is responce*/
3211+ WARP_CMD_FLAG_NEED_RSP = 1 << 1, /* need responce */
3212+ WARP_CMD_FLAG_FROM_TO_WO = 1 << 2, /* send between host and wo */
3213+};
3214+
3215+#define WED_WO_CMD_FLAG_IS_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_RSP))
3216+#define WED_WO_CMD_FLAG_SET_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_RSP))
3217+#define WED_WO_CMD_FLAG_IS_NEED_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_NEED_RSP))
3218+#define WED_WO_CMD_FLAG_SET_NEED_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_NEED_RSP))
3219+#define WED_WO_CMD_FLAG_IS_FROM_TO_WO(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_FROM_TO_WO))
3220+#define WED_WO_CMD_FLAG_SET_FROM_TO_WO(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_FROM_TO_WO))
3221+
3222+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3223+ u32 clear, u32 val);
3224+
3225+static inline void mtk_wed_wo_isr_enable(struct mtk_wed_wo *wo, u32 mask)
3226+{
3227+ mtk_wed_wo_set_isr_mask(wo, false, 0, mask);
3228+
3229+ tasklet_schedule(&wo->irq_tasklet);
3230+}
3231+
3232+static inline void mtk_wed_wo_isr_disable(struct mtk_wed_wo *wo, u32 mask)
3233+{
3234+ mtk_wed_wo_set_isr_mask(wo, true, mask, 0);
3235+}
3236+
3237+static inline void
3238+wo_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3239+{
3240+ writel(val, dev->region[WO_REGION_BOOT].addr + reg);
3241+}
3242+
3243+static inline u32
3244+wo_r32(struct mtk_wed_wo *dev, u32 reg)
3245+{
3246+ return readl(dev->region[WO_REGION_BOOT].addr + reg);
3247+}
3248+static inline void
3249+woccif_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3250+{
3251+ regmap_write(dev->ccif.regs, reg, val);
3252+}
3253+
3254+static inline u32
3255+woccif_r32(struct mtk_wed_wo *dev, u32 reg)
3256+{
3257+ unsigned int val;
3258+
3259+ regmap_read(dev->ccif.regs, reg, &val);
3260+
3261+ return val;
3262+}
3263+
3264+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
3265+#endif
3266+
3267diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3268index 24742604b..b6b6823ae 100644
3269--- a/include/linux/soc/mediatek/mtk_wed.h
3270+++ b/include/linux/soc/mediatek/mtk_wed.h
3271@@ -7,6 +7,9 @@
3272 #include <linux/pci.h>
3273
3274 #define MTK_WED_TX_QUEUES 2
3275+#define MTK_WED_RX_QUEUES 2
3276+
3277+#define WED_WO_STA_REC 0x6
3278
3279 enum {
3280 MTK_NO_WED,
3281@@ -33,6 +36,24 @@ struct mtk_wed_ring {
3282 void __iomem *wpdma;
3283 };
3284
3285+struct mtk_rxbm_desc {
3286+ __le32 buf0;
3287+ __le32 token;
3288+} __packed __aligned(4);
3289+
3290+struct dma_buf {
3291+ int size;
3292+ void **pages;
3293+ struct mtk_wdma_desc *desc;
3294+ dma_addr_t desc_phys;
3295+};
3296+
3297+struct dma_entry {
3298+ int size;
3299+ struct mtk_rxbm_desc *desc;
3300+ dma_addr_t desc_phys;
3301+};
3302+
3303 struct mtk_wed_device {
3304 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3305 const struct mtk_wed_ops *ops;
3306@@ -46,19 +67,27 @@ struct mtk_wed_device {
3307 struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3308 struct mtk_wed_ring txfree_ring;
3309 struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3310+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
3311+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
3312+
3313+ struct dma_buf buf_ring;
3314+ struct dma_entry rx_buf_ring;
3315+ struct page_frag_cache rx_page;
3316
3317 struct {
3318- int size;
3319- void **pages;
3320- struct mtk_wdma_desc *desc;
3321- dma_addr_t desc_phys;
3322- } buf_ring;
3323+ struct mtk_wed_ring rro_ring;
3324+ void __iomem *rro_desc;
3325+ dma_addr_t miod_desc_phys;
3326+ dma_addr_t fdbk_desc_phys;
3327+ u32 mcu_view_miod;
3328+ } rro;
3329
3330 /* filled by driver: */
3331 struct {
3332 struct pci_dev *pci_dev;
3333 void __iomem *base;
3334 u32 bus_type;
3335+ u32 phy_base;
3336
3337 union {
3338 u32 wpdma_phys;
3339@@ -67,16 +96,25 @@ struct mtk_wed_device {
3340 u32 wpdma_mask;
3341 u32 wpdma_tx;
3342 u32 wpdma_txfree;
3343+ u32 wpdma_rx_glo;
3344+ u32 wpdma_rx;
3345
3346 u8 tx_tbit[MTK_WED_TX_QUEUES];
3347+ u8 rx_tbit[MTK_WED_RX_QUEUES];
3348 u8 txfree_tbit;
3349
3350 u16 token_start;
3351 unsigned int nbuf;
3352+ unsigned int rx_nbuf;
3353+ unsigned int rx_pkt;
3354+ unsigned int rx_pkt_size;
3355
3356 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3357 int (*offload_enable)(struct mtk_wed_device *wed);
3358 void (*offload_disable)(struct mtk_wed_device *wed);
3359+ u32 (*init_rx_buf)(struct mtk_wed_device *wed,
3360+ int pkt_num);
3361+ void (*release_rx_buf)(struct mtk_wed_device *wed);
3362 } wlan;
3363 #endif
3364 };
3365@@ -87,6 +125,10 @@ struct mtk_wed_ops {
3366 void __iomem *regs);
3367 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3368 void __iomem *regs);
3369+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3370+ void __iomem *regs);
3371+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3372+ void *data, int len);
3373 void (*detach)(struct mtk_wed_device *dev);
3374
3375 void (*stop)(struct mtk_wed_device *dev);
3376@@ -98,6 +140,8 @@ struct mtk_wed_ops {
3377
3378 u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3379 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3380+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
3381+ u32 reason, u32 hash);
3382 };
3383
3384 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3385@@ -130,6 +174,10 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
3386 (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3387 #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3388 (_dev)->ops->txfree_ring_setup(_dev, _regs)
3389+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
3390+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
3391+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3392+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
3393 #define mtk_wed_device_reg_read(_dev, _reg) \
3394 (_dev)->ops->reg_read(_dev, _reg)
3395 #define mtk_wed_device_reg_write(_dev, _reg, _val) \
3396@@ -138,6 +186,8 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
3397 (_dev)->ops->irq_get(_dev, _mask)
3398 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
3399 (_dev)->ops->irq_set_mask(_dev, _mask)
3400+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3401+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
3402 #else
3403 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3404 {
3405@@ -147,10 +197,13 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3406 #define mtk_wed_device_start(_dev, _mask) do {} while (0)
3407 #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3408 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3409+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3410+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
3411 #define mtk_wed_device_reg_read(_dev, _reg) 0
3412 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3413 #define mtk_wed_device_irq_get(_dev, _mask) 0
3414 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3415+#define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
3416 #endif
3417
3418 #endif
3419--
34202.18.0
3421