blob: c435e05aaf1fb584bd95d8ee93934202d9e7d344 [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From bc8244ada5c668374813f7f9b73d990bf2695aaf Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Wed, 15 Jun 2022 14:38:54 +0800
4Subject: [PATCH 8/8] 9997-add-wed-rx-support-for-mt7896
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7986a.dtsi | 42 +-
9 arch/arm64/boot/dts/mediatek/mt7986b.dtsi | 42 +-
10 drivers/net/ethernet/mediatek/Makefile | 2 +-
11 drivers/net/ethernet/mediatek/mtk_wed.c | 544 +++++++++++++++--
12 drivers/net/ethernet/mediatek/mtk_wed.h | 50 ++
13 drivers/net/ethernet/mediatek/mtk_wed_ccif.c | 121 ++++
14 drivers/net/ethernet/mediatek/mtk_wed_ccif.h | 45 ++
15 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 90 +++
16 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 561 ++++++++++++++++++
17 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 125 ++++
18 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 145 ++++-
19 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 548 +++++++++++++++++
20 drivers/net/ethernet/mediatek/mtk_wed_wo.h | 334 +++++++++++
21 include/linux/soc/mediatek/mtk_wed.h | 63 +-
22 14 files changed, 2643 insertions(+), 69 deletions(-)
23 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_wed.c
24 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.c
25 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.h
26 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c
27 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.h
28 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
29 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h
30
31diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
32index 644255b35..ddcc0b809 100644
33--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
34+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
35@@ -65,6 +65,12 @@
36 interrupt-parent = <&gic>;
37 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
38 mediatek,wed_pcie = <&wed_pcie>;
39+ mediatek,ap2woccif = <&ap2woccif0>;
40+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
41+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
42+ mediatek,wocpu_boot = <&cpu_boot>;
43+ mediatek,wocpu_emi = <&wocpu0_emi>;
44+ mediatek,wocpu_data = <&wocpu_data>;
45 };
46
47 wed1: wed@15011000 {
48@@ -74,15 +80,26 @@
49 interrupt-parent = <&gic>;
50 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
51 mediatek,wed_pcie = <&wed_pcie>;
52+ mediatek,ap2woccif = <&ap2woccif1>;
53+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
54+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
55+ mediatek,wocpu_boot = <&cpu_boot>;
56+ mediatek,wocpu_emi = <&wocpu1_emi>;
57+ mediatek,wocpu_data = <&wocpu_data>;
58 };
59
60- ap2woccif: ap2woccif@151A5000 {
61- compatible = "mediatek,ap2woccif";
62- reg = <0 0x151A5000 0 0x1000>,
63- <0 0x151AD000 0 0x1000>;
64+ ap2woccif0: ap2woccif@151A5000 {
65+ compatible = "mediatek,ap2woccif", "syscon";
66+ reg = <0 0x151A5000 0 0x1000>;
67 interrupt-parent = <&gic>;
68- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
69- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
70+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
71+ };
72+
73+ ap2woccif1: ap2woccif@0x151AD000 {
74+ compatible = "mediatek,ap2woccif", "syscon";
75+ reg = <0 0x151AD000 0 0x1000>;
76+ interrupt-parent = <&gic>;
77+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
78 };
79
80 wocpu0_ilm: wocpu0_ilm@151E0000 {
81@@ -95,10 +112,17 @@
82 reg = <0 0x151F0000 0 0x8000>;
83 };
84
85- wocpu_dlm: wocpu_dlm@151E8000 {
86+ wocpu0_dlm: wocpu_dlm@151E8000 {
87+ compatible = "mediatek,wocpu_dlm";
88+ reg = <0 0x151E8000 0 0x2000>;
89+
90+ resets = <&ethsysrst 0>;
91+ reset-names = "wocpu_rst";
92+ };
93+
94+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
95 compatible = "mediatek,wocpu_dlm";
96- reg = <0 0x151E8000 0 0x2000>,
97- <0 0x151F8000 0 0x2000>;
98+ reg = <0 0x151F8000 0 0x2000>;
99
100 resets = <&ethsysrst 0>;
101 reset-names = "wocpu_rst";
102diff --git a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
103index 67bf86f6a..6710b388b 100644
104--- a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
105+++ b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
106@@ -65,6 +65,12 @@
107 interrupt-parent = <&gic>;
108 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
109 mediatek,wed_pcie = <&wed_pcie>;
110+ mediatek,ap2woccif = <&ap2woccif0>;
111+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
112+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
113+ mediatek,wocpu_boot = <&cpu_boot>;
114+ mediatek,wocpu_emi = <&wocpu0_emi>;
115+ mediatek,wocpu_data = <&wocpu_data>;
116 };
117
118 wed1: wed@15011000 {
119@@ -74,15 +80,26 @@
120 interrupt-parent = <&gic>;
121 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
122 mediatek,wed_pcie = <&wed_pcie>;
123+ mediatek,ap2woccif = <&ap2woccif1>;
124+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
125+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
126+ mediatek,wocpu_boot = <&cpu_boot>;
127+ mediatek,wocpu_emi = <&wocpu1_emi>;
128+ mediatek,wocpu_data = <&wocpu_data>;
129 };
130
131- ap2woccif: ap2woccif@151A5000 {
132- compatible = "mediatek,ap2woccif";
133- reg = <0 0x151A5000 0 0x1000>,
134- <0 0x151AD000 0 0x1000>;
135+ ap2woccif0: ap2woccif@151A5000 {
136+ compatible = "mediatek,ap2woccif", "syscon";
137+ reg = <0 0x151A5000 0 0x1000>;
138 interrupt-parent = <&gic>;
139- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
140- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
141+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
142+ };
143+
144+ ap2woccif1: ap2woccif@0x151AD000 {
145+ compatible = "mediatek,ap2woccif", "syscon";
146+ reg = <0 0x151AD000 0 0x1000>;
147+ interrupt-parent = <&gic>;
148+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
149 };
150
151 wocpu0_ilm: wocpu0_ilm@151E0000 {
152@@ -95,10 +112,17 @@
153 reg = <0 0x151F0000 0 0x8000>;
154 };
155
156- wocpu_dlm: wocpu_dlm@151E8000 {
157+ wocpu0_dlm: wocpu_dlm@151E8000 {
158+ compatible = "mediatek,wocpu_dlm";
159+ reg = <0 0x151E8000 0 0x2000>;
160+
161+ resets = <&ethsysrst 0>;
162+ reset-names = "wocpu_rst";
163+ };
164+
165+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
166 compatible = "mediatek,wocpu_dlm";
167- reg = <0 0x151E8000 0 0x2000>,
168- <0 0x151F8000 0 0x2000>;
169+ reg = <0 0x151F8000 0 0x2000>;
170
171 resets = <&ethsysrst 0>;
172 reset-names = "wocpu_rst";
173diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
174index 3528f1b3c..0c724a55c 100644
175--- a/drivers/net/ethernet/mediatek/Makefile
176+++ b/drivers/net/ethernet/mediatek/Makefile
177@@ -10,5 +10,5 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
178 ifdef CONFIG_DEBUG_FS
179 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
180 endif
181-obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
182+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o mtk_wed_wo.o mtk_wed_mcu.o mtk_wed_ccif.o
183 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
184diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
185old mode 100644
186new mode 100755
187index 48b0353bb..c4aab12b0
188--- a/drivers/net/ethernet/mediatek/mtk_wed.c
189+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
190@@ -13,11 +13,19 @@
191 #include <linux/debugfs.h>
192 #include <linux/iopoll.h>
193 #include <linux/soc/mediatek/mtk_wed.h>
194+
195 #include "mtk_eth_soc.h"
196 #include "mtk_wed_regs.h"
197 #include "mtk_wed.h"
198 #include "mtk_ppe.h"
199-
200+#include "mtk_wed_mcu.h"
201+#include "mtk_wed_wo.h"
202+
203+struct wo_cmd_ring {
204+ u32 q_base;
205+ u32 cnt;
206+ u32 unit;
207+};
208 static struct mtk_wed_hw *hw_list[2];
209 static DEFINE_MUTEX(hw_lock);
210
211@@ -51,6 +59,12 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
212 wdma_m32(dev, reg, 0, mask);
213 }
214
215+static void
216+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
217+{
218+ wdma_m32(dev, reg, mask, 0);
219+}
220+
221 static u32
222 mtk_wed_read_reset(struct mtk_wed_device *dev)
223 {
224@@ -68,6 +82,48 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
225 WARN_ON_ONCE(1);
226 }
227
228+static void
229+mtk_wed_wo_reset(struct mtk_wed_device *dev)
230+{
231+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
232+ u8 state = WO_STATE_DISABLE;
233+ u8 state_done = WOIF_DISABLE_DONE;
234+ void __iomem *reg;
235+ u32 value;
236+ unsigned long timeout = jiffies + WOCPU_TIMEOUT;
237+
238+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE,
239+ &state, sizeof(state), false);
240+
241+ do {
242+ value = wed_r32(dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_WO_STATUS);
243+ } while (value != state_done && !time_after(jiffies, timeout));
244+
245+ reg = ioremap(WOCPU_MCUSYS_RESET_ADDR, 4);
246+ value = readl((void *)reg);
247+ switch(dev->hw->index) {
248+ case 0:
249+ value |= WOCPU_WO0_MCUSYS_RESET_MASK;
250+ writel(value, (void *)reg);
251+ value &= ~WOCPU_WO0_MCUSYS_RESET_MASK;
252+ writel(value, (void *)reg);
253+ break;
254+ case 1:
255+ value |= WOCPU_WO1_MCUSYS_RESET_MASK;
256+ writel(value, (void *)reg);
257+ value &= ~WOCPU_WO1_MCUSYS_RESET_MASK;
258+ writel(value, (void *)reg);
259+ break;
260+ default:
261+ dev_err(dev->hw->dev, "wrong mtk_wed%d\n",
262+ dev->hw->index);
263+
264+ break;
265+ }
266+
267+ iounmap((void *)reg);
268+}
269+
270 static struct mtk_wed_hw *
271 mtk_wed_assign(struct mtk_wed_device *dev)
272 {
273@@ -205,6 +261,42 @@ free_pagelist:
274 kfree(page_list);
275 }
276
277+static int
278+mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
279+{
280+ struct mtk_rxbm_desc *desc;
281+ dma_addr_t desc_phys;
282+ int ring_size;
283+
284+ ring_size = dev->wlan.rx_nbuf;
285+ dev->rx_buf_ring.size = ring_size;
286+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
287+ &desc_phys, GFP_KERNEL);
288+ if (!desc)
289+ return -ENOMEM;
290+
291+ dev->rx_buf_ring.desc = desc;
292+ dev->rx_buf_ring.desc_phys = desc_phys;
293+
294+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_pkt);
295+ return 0;
296+}
297+
298+static void
299+mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
300+{
301+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
302+ int ring_size =dev->rx_buf_ring.size;
303+
304+ if (!desc)
305+ return;
306+
307+ dev->wlan.release_rx_buf(dev);
308+
309+ dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
310+ desc, dev->buf_ring.desc_phys);
311+}
312+
313 static void
314 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
315 {
316@@ -226,13 +318,22 @@ mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
317 mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
318 }
319
320+static void
321+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
322+{
323+ mtk_wed_free_rx_bm(dev);
324+ mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
325+}
326+
327 static void
328 mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
329 {
330 u32 wdma_mask;
331
332 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
333-
334+ if (dev->ver > MTK_WED_V1)
335+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
336+ GENMASK(1, 0));
337 /* wed control cr set */
338 wed_set(dev, MTK_WED_CTRL,
339 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
340@@ -251,7 +352,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
341 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
342 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
343 } else {
344- /* initail tx interrupt trigger */
345+
346 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
347 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
348 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
349@@ -262,22 +363,30 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
350 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
351 dev->wlan.tx_tbit[1]));
352
353- /* initail txfree interrupt trigger */
354 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
355 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
356 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
357 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
358 dev->wlan.txfree_tbit));
359+
360+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
361+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
362+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
363+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
364+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
365+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
366+ dev->wlan.rx_tbit[0]) |
367+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
368+ dev->wlan.rx_tbit[1]));
369 }
370- /* initail wdma interrupt agent */
371 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
372 if (dev->ver == MTK_WED_V1) {
373 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
374 } else {
375 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
376 wed_set(dev, MTK_WED_WDMA_INT_CTRL,
377- FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,dev->wdma_idx));
378-
379+ FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,
380+ dev->wdma_idx));
381 }
382
383 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
384@@ -312,6 +421,39 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
385 }
386 }
387
388+static void
389+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
390+{
391+#define MTK_WFMDA_RX_DMA_EN BIT(2)
392+
393+ int timeout = 3;
394+ u32 cur_idx, regs;
395+
396+ do {
397+ regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
398+ MTK_WED_RING_OFS_COUNT;
399+ cur_idx = wed_r32(dev, regs);
400+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
401+ break;
402+
403+ usleep_range(100000, 200000);
404+ } while (timeout-- > 0);
405+
406+ if (timeout) {
407+ unsigned int val;
408+
409+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
410+ dev->wlan.phy_base);
411+ val |= MTK_WFMDA_RX_DMA_EN;
412+
413+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
414+ dev->wlan.phy_base, val);
415+ } else {
416+ dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
417+ dev->hw->index);
418+ }
419+}
420+
421 static void
422 mtk_wed_dma_enable(struct mtk_wed_device *dev)
423 {
424@@ -336,9 +478,14 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
425 wdma_set(dev, MTK_WDMA_GLO_CFG,
426 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
427 } else {
428+ int idx = 0;
429+
430 wed_set(dev, MTK_WED_WPDMA_CTRL,
431 MTK_WED_WPDMA_CTRL_SDL1_FIXED);
432
433+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
434+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
435+
436 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
437 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
438 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
439@@ -346,6 +493,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
440 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
441 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
442 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
443+
444+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
445+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
446+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
447+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
448+ 0x2));
449+
450+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++)
451+ mtk_wed_check_wfdma_rx_fill(dev, idx);
452 }
453 }
454
455@@ -363,19 +519,23 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
456 MTK_WED_GLO_CFG_TX_DMA_EN |
457 MTK_WED_GLO_CFG_RX_DMA_EN);
458
459- wdma_m32(dev, MTK_WDMA_GLO_CFG,
460+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
461 MTK_WDMA_GLO_CFG_TX_DMA_EN |
462 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
463- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
464+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
465
466 if (dev->ver == MTK_WED_V1) {
467 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
468- wdma_m32(dev, MTK_WDMA_GLO_CFG,
469- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
470+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
471+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
472 } else {
473 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
474 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
475 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
476+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
477+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
478+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
479+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
480 }
481 }
482
483@@ -395,6 +555,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
484 MTK_WED_CTRL_WED_TX_BM_EN |
485 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
486
487+ if (dev->ver > MTK_WED_V1) {
488+ wed_clr(dev, MTK_WED_CTRL,
489+ MTK_WED_CTRL_WED_RX_BM_EN);
490+ }
491+
492 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
493 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
494 wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
495@@ -416,9 +581,15 @@ mtk_wed_detach(struct mtk_wed_device *dev)
496 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
497
498 mtk_wed_reset(dev, MTK_WED_RESET_WED);
499+ mtk_wed_wo_reset(dev);
500+
501+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
502+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
503+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
504
505 mtk_wed_free_buffer(dev);
506 mtk_wed_free_tx_rings(dev);
507+ mtk_wed_free_rx_rings(dev);
508
509 if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
510 wlan_node = dev->wlan.pci_dev->dev.of_node;
511@@ -477,7 +648,6 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
512 value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
513 value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
514
515- /* pcie interrupt status trigger register */
516 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
517 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
518
519@@ -501,6 +671,9 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
520 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
521 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
522 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
523+
524+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
525+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
526 } else {
527 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
528 }
529@@ -549,24 +722,92 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
530 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
531 MTK_WDMA_RING_RX(0)));
532 }
533+}
534+
535+static void
536+mtk_wed_rx_bm_hw_init(struct mtk_wed_device *dev)
537+{
538+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
539+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_pkt_size));
540+
541+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
542
543+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
544+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_pkt));
545+
546+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
547+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
548+
549+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
550 }
551
552 static void
553-mtk_wed_hw_init(struct mtk_wed_device *dev)
554+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
555+{
556+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
557+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
558+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
559+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
560+ MTK_WED_MIOD_ENTRY_CNT >> 2));
561+
562+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_desc_phys);
563+
564+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
565+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
566+
567+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_desc_phys);
568+
569+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
570+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
571+
572+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
573+
574+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.rro_ring.desc_phys);
575+
576+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
577+ MTK_WED_RROQM_RST_IDX_MIOD |
578+ MTK_WED_RROQM_RST_IDX_FDBK);
579+
580+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
581+
582+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT -1);
583+
584+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
585+}
586+
587+static void
588+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
589+{
590+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
591+
592+ do {
593+ udelay(100);
594+
595+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
596+ break;
597+ } while (1);
598+
599+ /* configure RX_ROUTE_QM */
600+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
601+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
602+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
603+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
604+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
605+
606+ /* enable RX_ROUTE_QM */
607+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
608+}
609+
610+static void
611+mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
612 {
613 int size = dev->buf_ring.size;
614 int rev_size = MTK_WED_TX_RING_SIZE / 2;
615 int thr = 1;
616
617- if (dev->init_done)
618- return;
619-
620- dev->init_done = true;
621- mtk_wed_set_ext_int(dev, false);
622-
623 if (dev->ver > MTK_WED_V1) {
624- size = MTK_WED_WDMA_RING_SIZE * 2 + dev->buf_ring.size;
625+ size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
626+ dev->buf_ring.size;
627 rev_size = size;
628 thr = 0;
629 }
630@@ -609,13 +850,48 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
631 }
632
633 static void
634-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale)
635+mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
636 {
637+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
638+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX0 |
639+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX1 |
640+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX0 |
641+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX1);
642+
643+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
644+
645+ mtk_wed_rx_bm_hw_init(dev);
646+ mtk_wed_rro_hw_init(dev);
647+ mtk_wed_route_qm_hw_init(dev);
648+}
649+
650+static void
651+mtk_wed_hw_init(struct mtk_wed_device *dev)
652+{
653+ if (dev->init_done)
654+ return;
655+
656+ dev->init_done = true;
657+ mtk_wed_set_ext_int(dev, false);
658+ mtk_wed_tx_hw_init(dev);
659+ if (dev->ver > MTK_WED_V1)
660+ mtk_wed_rx_hw_init(dev);
661+}
662+
663+static void
664+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
665+{
666+ __le32 ctrl;
667 int i;
668
669+ if (tx)
670+ ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
671+ else
672+ ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
673+
674 for (i = 0; i < size; i++) {
675 desc->buf0 = 0;
676- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
677+ desc->ctrl = ctrl;
678 desc->buf1 = 0;
679 desc->info = 0;
680 desc += scale;
681@@ -674,7 +950,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
682 if (!desc)
683 continue;
684
685- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver);
686+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true);
687 }
688
689 if (mtk_wed_poll_busy(dev))
690@@ -729,9 +1005,24 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
691
692 }
693
694+static int
695+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
696+ int size)
697+{
698+ ring->desc = dma_alloc_coherent(dev->hw->dev,
699+ size * sizeof(*ring->desc),
700+ &ring->desc_phys, GFP_KERNEL);
701+ if (!ring->desc)
702+ return -ENOMEM;
703+
704+ ring->size = size;
705+ memset(ring->desc, 0, size);
706+ return 0;
707+}
708+
709 static int
710 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
711- int size, int scale)
712+ int size, int scale, bool tx)
713 {
714 ring->desc = dma_alloc_coherent(dev->hw->dev,
715 size * sizeof(*ring->desc) * scale,
716@@ -740,17 +1031,18 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
717 return -ENOMEM;
718
719 ring->size = size;
720- mtk_wed_ring_reset(ring->desc, size, scale);
721+ mtk_wed_ring_reset(ring->desc, size, scale, tx);
722
723 return 0;
724 }
725
726 static int
727-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
728+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
729 {
730 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
731
732- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, dev->ver))
733+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
734+ dev->ver, true))
735 return -ENOMEM;
736
737 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
738@@ -767,22 +1059,140 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
739 return 0;
740 }
741
742+static int
743+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
744+{
745+ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
746+
747+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
748+ dev->ver, true))
749+ return -ENOMEM;
750+
751+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
752+ wdma->desc_phys);
753+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
754+ size);
755+ wdma_w32(dev,
756+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
757+ wdma_w32(dev,
758+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
759+
760+ if (idx == 0) {
761+ wed_w32(dev, MTK_WED_WDMA_RING_TX
762+ + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
763+ wed_w32(dev, MTK_WED_WDMA_RING_TX
764+ + MTK_WED_RING_OFS_COUNT, size);
765+ wed_w32(dev, MTK_WED_WDMA_RING_TX
766+ + MTK_WED_RING_OFS_CPU_IDX, 0);
767+ wed_w32(dev, MTK_WED_WDMA_RING_TX
768+ + MTK_WED_RING_OFS_DMA_IDX, 0);
769+ }
770+
771+ return 0;
772+}
773+
774+static int
775+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
776+{
777+ struct device_node *np, *node = dev->hw->node;
778+ struct mtk_wed_ring *ring;
779+ struct resource res;
780+ int ret;
781+
782+ np = of_parse_phandle(node, "mediatek,wocpu_dlm", 0);
783+ if (!np)
784+ return -ENODEV;
785+
786+ ret = of_address_to_resource(np, 0, &res);
787+ if (ret)
788+ return ret;
789+
790+ dev->rro.rro_desc = ioremap(res.start, resource_size(&res));
791+
792+ ring = &dev->rro.rro_ring;
793+
794+ dev->rro.miod_desc_phys = res.start;
795+
796+ dev->rro.mcu_view_miod = MTK_WED_WOCPU_VIEW_MIOD_BASE;
797+ dev->rro.fdbk_desc_phys = MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT
798+ + dev->rro.miod_desc_phys;
799+
800+ if (mtk_wed_rro_ring_alloc(dev, ring, MTK_WED_RRO_QUE_CNT))
801+ return -ENOMEM;
802+
803+ return 0;
804+}
805+
806+static int
807+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
808+{
809+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
810+ struct {
811+ struct wo_cmd_ring ring[2];
812+
813+ u32 wed;
814+ u8 ver;
815+ } req = {
816+ .ring = {
817+ [0] = {
818+ .q_base = dev->rro.mcu_view_miod,
819+ .cnt = MTK_WED_MIOD_CNT,
820+ .unit = MTK_WED_MIOD_ENTRY_CNT,
821+ },
822+ [1] = {
823+ .q_base = dev->rro.mcu_view_miod +
824+ MTK_WED_MIOD_ENTRY_CNT *
825+ MTK_WED_MIOD_CNT,
826+ .cnt = MTK_WED_FB_CMD_CNT,
827+ .unit = 4,
828+ },
829+ },
830+ .wed = 0,
831+ };
832+
833+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_WED_CFG,
834+ &req, sizeof(req), true);
835+}
836+
837+static int
838+mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
839+{
840+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
841+
842+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
843+}
844+
845+static void
846+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
847+ u32 reason, u32 hash)
848+{
849+ int idx = dev->hw->index;
850+ struct mtk_eth *eth = dev->hw->eth;
851+ struct ethhdr *eh;
852+
853+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) {
854+ if (!skb)
855+ return;
856+
857+ skb_set_mac_header(skb, 0);
858+ eh = eth_hdr(skb);
859+ skb->protocol = eh->h_proto;
860+ mtk_ppe_check_skb(eth->ppe[idx], skb, hash);
861+ }
862+}
863+
864 static void
865 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
866 {
867- u32 wdma_mask;
868- int i;
869+ int i, ret;
870
871 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
872 if (!dev->tx_wdma[i].desc)
873- mtk_wed_wdma_ring_setup(dev, i, 16);
874-
875+ mtk_wed_wdma_rx_ring_setup(dev, i, 16);
876
877 mtk_wed_hw_init(dev);
878
879 mtk_wed_set_int(dev, irq_mask);
880-
881-
882 mtk_wed_set_ext_int(dev, true);
883
884 if (dev->ver == MTK_WED_V1) {
885@@ -797,6 +1207,19 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
886 val |= BIT(0);
887 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
888 } else {
889+ /* driver set mid ready and only once */
890+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
891+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
892+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
893+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
894+
895+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
896+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
897+
898+ ret = mtk_wed_rro_cfg(dev);
899+ if (ret)
900+ return;
901+
902 mtk_wed_set_512_support(dev, true);
903 }
904
905@@ -841,9 +1264,17 @@ mtk_wed_attach(struct mtk_wed_device *dev)
906 wed_r32(dev, MTK_WED_REV_ID));
907
908 ret = mtk_wed_buffer_alloc(dev);
909- if (ret) {
910- mtk_wed_detach(dev);
911- goto out;
912+ if (ret)
913+ goto error;
914+
915+ if (dev->ver > MTK_WED_V1) {
916+ ret = mtk_wed_rx_bm_alloc(dev);
917+ if (ret)
918+ goto error;
919+
920+ ret = mtk_wed_rro_alloc(dev);
921+ if (ret)
922+ goto error;
923 }
924
925 mtk_wed_hw_init_early(dev);
926@@ -851,7 +1282,12 @@ mtk_wed_attach(struct mtk_wed_device *dev)
927 if (dev->ver == MTK_WED_V1)
928 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
929 BIT(hw->index), 0);
930+ else
931+ ret = mtk_wed_wo_init(hw);
932
933+error:
934+ if (ret)
935+ mtk_wed_detach(dev);
936 out:
937 mutex_unlock(&hw_lock);
938
939@@ -877,10 +1313,10 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
940
941 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
942
943- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1))
944+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true))
945 return -ENOMEM;
946
947- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
948+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
949 return -ENOMEM;
950
951 ring->reg_base = MTK_WED_RING_TX(idx);
952@@ -927,6 +1363,35 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
953 return 0;
954 }
955
956+static int
957+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
958+{
959+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
960+
961+ BUG_ON(idx > ARRAY_SIZE(dev->rx_ring));
962+
963+
964+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false))
965+ return -ENOMEM;
966+
967+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
968+ return -ENOMEM;
969+
970+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
971+ ring->wpdma = regs;
972+
973+ /* WPDMA -> WED */
974+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
975+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
976+
977+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
978+ ring->desc_phys);
979+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
980+ MTK_WED_RX_RING_SIZE);
981+
982+ return 0;
983+}
984+
985 static u32
986 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
987 {
988@@ -1014,6 +1479,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
989 .attach = mtk_wed_attach,
990 .tx_ring_setup = mtk_wed_tx_ring_setup,
991 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
992+ .rx_ring_setup = mtk_wed_rx_ring_setup,
993+ .msg_update = mtk_wed_send_msg,
994 .start = mtk_wed_start,
995 .stop = mtk_wed_stop,
996 .reset_dma = mtk_wed_reset_dma,
997@@ -1022,6 +1489,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
998 .irq_get = mtk_wed_irq_get,
999 .irq_set_mask = mtk_wed_irq_set_mask,
1000 .detach = mtk_wed_detach,
1001+ .ppe_check = mtk_wed_ppe_check,
1002 };
1003 struct device_node *eth_np = eth->dev->of_node;
1004 struct platform_device *pdev;
1005diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
1006index 9b17b7405..ec79b0d42 100644
1007--- a/drivers/net/ethernet/mediatek/mtk_wed.h
1008+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
1009@@ -13,6 +13,7 @@
1010 #define MTK_WED_PKT_SIZE 1900
1011 #define MTK_WED_BUF_SIZE 2048
1012 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1013+#define MTK_WED_RX_RING_SIZE 1536
1014
1015 #define MTK_WED_TX_RING_SIZE 2048
1016 #define MTK_WED_WDMA_RING_SIZE 512
1017@@ -21,8 +22,15 @@
1018 #define MTK_WED_PER_GROUP_PKT 128
1019
1020 #define MTK_WED_FBUF_SIZE 128
1021+#define MTK_WED_MIOD_CNT 16
1022+#define MTK_WED_FB_CMD_CNT 1024
1023+#define MTK_WED_RRO_QUE_CNT 8192
1024+#define MTK_WED_MIOD_ENTRY_CNT 128
1025+
1026+#define MODULE_ID_WO 1
1027
1028 struct mtk_eth;
1029+struct mtk_wed_wo;
1030
1031 struct mtk_wed_hw {
1032 struct device_node *node;
1033@@ -34,12 +42,14 @@ struct mtk_wed_hw {
1034 struct regmap *mirror;
1035 struct dentry *debugfs_dir;
1036 struct mtk_wed_device *wed_dev;
1037+ struct mtk_wed_wo *wed_wo;
1038 u32 debugfs_reg;
1039 u32 num_flows;
1040 u32 wdma_phy;
1041 char dirname[5];
1042 int irq;
1043 int index;
1044+ u32 ver;
1045 };
1046
1047 struct mtk_wdma_info {
1048@@ -66,6 +76,18 @@ wed_r32(struct mtk_wed_device *dev, u32 reg)
1049 return val;
1050 }
1051
1052+static inline u32
1053+wifi_r32(struct mtk_wed_device *dev, u32 reg)
1054+{
1055+ return readl(dev->wlan.base + reg);
1056+}
1057+
1058+static inline void
1059+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1060+{
1061+ writel(val, dev->wlan.base + reg);
1062+}
1063+
1064 static inline void
1065 wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1066 {
1067@@ -114,6 +136,23 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1068 writel(val, dev->txfree_ring.wpdma + reg);
1069 }
1070
1071+static inline u32
1072+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
1073+{
1074+ if (!dev->rx_ring[ring].wpdma)
1075+ return 0;
1076+
1077+ return readl(dev->rx_ring[ring].wpdma + reg);
1078+}
1079+
1080+static inline void
1081+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
1082+{
1083+ if (!dev->rx_ring[ring].wpdma)
1084+ return;
1085+
1086+ writel(val, dev->rx_ring[ring].wpdma + reg);
1087+}
1088 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1089 void __iomem *wdma, u32 wdma_phy, int index);
1090 void mtk_wed_exit(void);
1091@@ -146,4 +185,15 @@ static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
1092 }
1093 #endif
1094
1095+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr);
1096+int wed_wo_mcu_init(struct mtk_wed_wo *wo);
1097+int mtk_wed_exception_init(struct mtk_wed_wo *wo);
1098+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1099+int mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb);
1100+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir);
1101+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1102+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd,
1103+ const void *data, int len, bool wait_resp);
1104+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget);
1105+
1106 #endif
1107diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.c b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
1108new file mode 100644
1109index 000000000..732ffc8cf
1110--- /dev/null
1111+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
1112@@ -0,0 +1,121 @@
1113+// SPDX-License-Identifier: GPL-2.0-only
1114+
1115+#include <linux/soc/mediatek/mtk_wed.h>
1116+#include <linux/of_address.h>
1117+#include <linux/mfd/syscon.h>
1118+#include <linux/of_irq.h>
1119+#include "mtk_wed_ccif.h"
1120+#include "mtk_wed_regs.h"
1121+#include "mtk_wed_wo.h"
1122+
1123+static inline void woif_set_isr(struct mtk_wed_wo *wo, u32 mask)
1124+{
1125+ woccif_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
1126+}
1127+
1128+static inline u32 woif_get_csr(struct mtk_wed_wo *wo)
1129+{
1130+ u32 val;
1131+
1132+ val = woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1133+
1134+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
1135+}
1136+
1137+static inline void woif_set_ack(struct mtk_wed_wo *wo, u32 mask)
1138+{
1139+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1140+}
1141+
1142+static inline void woif_kickout(struct mtk_wed_wo *wo)
1143+{
1144+ woccif_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
1145+ woccif_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
1146+}
1147+
1148+static inline void woif_clear_int(struct mtk_wed_wo *wo, u32 mask)
1149+{
1150+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1151+ woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1152+}
1153+
1154+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr)
1155+{
1156+ static const struct wed_wo_drv_ops wo_drv_ops = {
1157+ .kickout = woif_kickout,
1158+ .set_ack = woif_set_ack,
1159+ .set_isr = woif_set_isr,
1160+ .get_csr = woif_get_csr,
1161+ .clear_int = woif_clear_int,
1162+ };
1163+ struct device_node *np, *node = wo->hw->node;
1164+ struct wed_wo_queue_regs queues;
1165+ struct regmap *regs;
1166+ int ret;
1167+
1168+ np = of_parse_phandle(node, "mediatek,ap2woccif", 0);
1169+ if (!np)
1170+ return -ENODEV;
1171+
1172+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
1173+ if (!regs)
1174+ return -ENODEV;
1175+
1176+ wo->drv_ops = &wo_drv_ops;
1177+
1178+ wo->ccif.regs = regs;
1179+ wo->ccif.irq = irq_of_parse_and_map(np, 0);
1180+
1181+ spin_lock_init(&wo->ccif.irq_lock);
1182+
1183+ ret = request_irq(wo->ccif.irq, isr, IRQF_TRIGGER_HIGH,
1184+ "wo_ccif_isr", wo);
1185+ if (ret)
1186+ goto free_irq;
1187+
1188+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY1;
1189+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY2;
1190+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
1191+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
1192+
1193+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
1194+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
1195+ &queues);
1196+
1197+ if (ret)
1198+ goto free_irq;
1199+
1200+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY5;
1201+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY6;
1202+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
1203+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
1204+
1205+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
1206+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
1207+ &queues);
1208+ if (ret)
1209+ goto free_irq;
1210+
1211+ wo->ccif.q_int_mask = MTK_WED_WO_RXCH_INT_MASK;
1212+
1213+ ret = mtk_wed_wo_q_init(wo, mtk_wed_wo_rx_poll);
1214+ if (ret)
1215+ goto free_irq;
1216+
1217+ wo->ccif.q_exep_mask = MTK_WED_WO_EXCEPTION_INT_MASK;
1218+ wo->ccif.irqmask = MTK_WED_WO_ALL_INT_MASK;
1219+
1220+ /* rx queue irqmask */
1221+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
1222+
1223+ return 0;
1224+
1225+free_irq:
1226+ devm_free_irq(wo->hw->dev, wo->ccif.irq, wo);
1227+
1228+ return ret;
1229+}
1230+
1231+static void wed_wo_hardware_exit(struct mtk_wed_wo *wo)
1232+{
1233+}
1234diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.h b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1235new file mode 100644
1236index 000000000..68ade449c
1237--- /dev/null
1238+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1239@@ -0,0 +1,45 @@
1240+// SPDX-License-Identifier: GPL-2.0-only
1241+
1242+#ifndef __MTK_WED_CCIF_H
1243+#define __MTK_WED_CCIF_H
1244+
1245+#define MTK_WED_WO_RING_SIZE 256
1246+#define MTK_WED_WO_CMD_LEN 1504
1247+
1248+#define MTK_WED_WO_TXCH_NUM 0
1249+#define MTK_WED_WO_RXCH_NUM 1
1250+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
1251+
1252+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
1253+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
1254+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
1255+#define MTK_WED_WO_ALL_INT_MASK MTK_WED_WO_RXCH_INT_MASK | \
1256+ MTK_WED_WO_EXCEPTION_INT_MASK
1257+
1258+#define MTK_WED_WO_CCIF_BUSY 0x004
1259+#define MTK_WED_WO_CCIF_START 0x008
1260+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
1261+#define MTK_WED_WO_CCIF_RCHNUM 0x010
1262+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
1263+
1264+#define MTK_WED_WO_CCIF_ACK 0x014
1265+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
1266+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
1267+#define MTK_WED_WO_CCIF_DUMMY1 0x020
1268+#define MTK_WED_WO_CCIF_DUMMY2 0x024
1269+#define MTK_WED_WO_CCIF_DUMMY3 0x028
1270+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
1271+#define MTK_WED_WO_CCIF_SHADOW1 0x030
1272+#define MTK_WED_WO_CCIF_SHADOW2 0x034
1273+#define MTK_WED_WO_CCIF_SHADOW3 0x038
1274+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
1275+#define MTK_WED_WO_CCIF_DUMMY5 0x050
1276+#define MTK_WED_WO_CCIF_DUMMY6 0x054
1277+#define MTK_WED_WO_CCIF_DUMMY7 0x058
1278+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
1279+#define MTK_WED_WO_CCIF_SHADOW5 0x060
1280+#define MTK_WED_WO_CCIF_SHADOW6 0x064
1281+#define MTK_WED_WO_CCIF_SHADOW7 0x068
1282+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
1283+
1284+#endif
1285diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1286index f420f187e..fea7ae2fc 100644
1287--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1288+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1289@@ -2,6 +2,7 @@
1290 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1291
1292 #include <linux/seq_file.h>
1293+#include <linux/soc/mediatek/mtk_wed.h>
1294 #include "mtk_wed.h"
1295 #include "mtk_wed_regs.h"
1296
1297@@ -18,6 +19,8 @@ enum {
1298 DUMP_TYPE_WDMA,
1299 DUMP_TYPE_WPDMA_TX,
1300 DUMP_TYPE_WPDMA_TXFREE,
1301+ DUMP_TYPE_WPDMA_RX,
1302+ DUMP_TYPE_WED_RRO,
1303 };
1304
1305 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
1306@@ -36,6 +39,10 @@ enum {
1307
1308 #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
1309 #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
1310+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
1311+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
1312+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
1313+
1314
1315 static void
1316 print_reg_val(struct seq_file *s, const char *name, u32 val)
1317@@ -58,6 +65,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1318 cur->name);
1319 continue;
1320 case DUMP_TYPE_WED:
1321+ case DUMP_TYPE_WED_RRO:
1322 val = wed_r32(dev, cur->offset);
1323 break;
1324 case DUMP_TYPE_WDMA:
1325@@ -69,6 +77,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1326 case DUMP_TYPE_WPDMA_TXFREE:
1327 val = wpdma_txfree_r32(dev, cur->offset);
1328 break;
1329+ case DUMP_TYPE_WPDMA_RX:
1330+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
1331+ break;
1332 }
1333 print_reg_val(s, cur->name, val);
1334 }
1335@@ -132,6 +143,81 @@ wed_txinfo_show(struct seq_file *s, void *data)
1336 }
1337 DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
1338
1339+static int
1340+wed_rxinfo_show(struct seq_file *s, void *data)
1341+{
1342+ static const struct reg_dump regs[] = {
1343+ DUMP_STR("WPDMA RX"),
1344+ DUMP_WPDMA_RX_RING(0),
1345+ DUMP_WPDMA_RX_RING(1),
1346+
1347+ DUMP_STR("WPDMA RX"),
1348+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
1349+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
1350+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
1351+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
1352+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
1353+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
1354+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
1355+
1356+ DUMP_STR("WED RX"),
1357+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
1358+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
1359+
1360+ DUMP_STR("WED RRO"),
1361+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
1362+ DUMP_WED(WED_RROQM_MID_MIB),
1363+ DUMP_WED(WED_RROQM_MOD_MIB),
1364+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
1365+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
1366+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
1367+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
1368+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
1369+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
1370+
1371+ DUMP_STR("WED Route QM"),
1372+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
1373+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
1374+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
1375+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
1376+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
1377+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
1378+ DUMP_WED(WED_RTQM_Q2N_MIB),
1379+ DUMP_WED(WED_RTQM_Q2B_MIB),
1380+ DUMP_WED(WED_RTQM_PFDBK_MIB),
1381+
1382+ DUMP_STR("WED WDMA TX"),
1383+ DUMP_WED(WED_WDMA_TX_MIB),
1384+ DUMP_WED_RING(WED_WDMA_RING_TX),
1385+
1386+ DUMP_STR("WDMA TX"),
1387+ DUMP_WDMA(WDMA_GLO_CFG),
1388+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
1389+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
1390+
1391+ DUMP_STR("WED RX BM"),
1392+ DUMP_WED(WED_RX_BM_BASE),
1393+ DUMP_WED(WED_RX_BM_RX_DMAD),
1394+ DUMP_WED(WED_RX_BM_PTR),
1395+ DUMP_WED(WED_RX_BM_TKID_MIB),
1396+ DUMP_WED(WED_RX_BM_BLEN),
1397+ DUMP_WED(WED_RX_BM_STS),
1398+ DUMP_WED(WED_RX_BM_INTF2),
1399+ DUMP_WED(WED_RX_BM_INTF),
1400+ DUMP_WED(WED_RX_BM_ERR_STS),
1401+ };
1402+
1403+ struct mtk_wed_hw *hw = s->private;
1404+ struct mtk_wed_device *dev = hw->wed_dev;
1405+
1406+ if (!dev)
1407+ return 0;
1408+
1409+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
1410+
1411+ return 0;
1412+}
1413+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
1414
1415 static int
1416 mtk_wed_reg_set(void *data, u64 val)
1417@@ -175,4 +261,8 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
1418 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
1419 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
1420 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
1421+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
1422+ if (hw->ver > MTK_WED_V1) {
1423+ wed_wo_mcu_debugfs(hw, dir);
1424+ }
1425 }
1426diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1427new file mode 100644
1428index 000000000..bd1ab9500
1429--- /dev/null
1430+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1431@@ -0,0 +1,561 @@
1432+// SPDX-License-Identifier: GPL-2.0-only
1433+
1434+#include <linux/skbuff.h>
1435+#include <linux/debugfs.h>
1436+#include <linux/firmware.h>
1437+#include <linux/of_address.h>
1438+#include <linux/soc/mediatek/mtk_wed.h>
1439+#include "mtk_wed_regs.h"
1440+#include "mtk_wed_mcu.h"
1441+#include "mtk_wed_wo.h"
1442+
1443+struct sk_buff *
1444+mtk_wed_mcu_msg_alloc(struct mtk_wed_wo *wo,
1445+ const void *data, int data_len)
1446+{
1447+ const struct wed_wo_mcu_ops *ops = wo->mcu_ops;
1448+ int length = ops->headroom + data_len;
1449+ struct sk_buff *skb;
1450+
1451+ skb = alloc_skb(length, GFP_KERNEL);
1452+ if (!skb)
1453+ return NULL;
1454+
1455+ memset(skb->head, 0, length);
1456+ skb_reserve(skb, ops->headroom);
1457+
1458+ if (data && data_len)
1459+ skb_put_data(skb, data, data_len);
1460+
1461+ return skb;
1462+}
1463+
1464+struct sk_buff *
1465+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
1466+{
1467+ unsigned long timeout;
1468+
1469+ if (!time_is_after_jiffies(expires))
1470+ return NULL;
1471+
1472+ timeout = expires - jiffies;
1473+ wait_event_timeout(wo->mcu.wait,
1474+ (!skb_queue_empty(&wo->mcu.res_q)),
1475+ timeout);
1476+
1477+ return skb_dequeue(&wo->mcu.res_q);
1478+}
1479+
1480+int
1481+mtk_wed_mcu_skb_send_and_get_msg(struct mtk_wed_wo *wo,
1482+ int to_id, int cmd, struct sk_buff *skb,
1483+ bool wait_resp, struct sk_buff **ret_skb)
1484+{
1485+ unsigned long expires;
1486+ int ret, seq;
1487+
1488+ if (ret_skb)
1489+ *ret_skb = NULL;
1490+
1491+ mutex_lock(&wo->mcu.mutex);
1492+
1493+ ret = wo->mcu_ops->mcu_skb_send_msg(wo, to_id, cmd, skb, &seq, wait_resp);
1494+ if (ret < 0)
1495+ goto out;
1496+
1497+ if (!wait_resp) {
1498+ ret = 0;
1499+ goto out;
1500+ }
1501+
1502+ expires = jiffies + wo->mcu.timeout;
1503+
1504+ do {
1505+ skb = mtk_wed_mcu_get_response(wo, expires);
1506+ ret = wo->mcu_ops->mcu_parse_response(wo, cmd, skb, seq);
1507+
1508+ if (!ret && ret_skb)
1509+ *ret_skb = skb;
1510+ else
1511+ dev_kfree_skb(skb);
1512+ } while (ret == -EAGAIN);
1513+
1514+out:
1515+ mutex_unlock(&wo->mcu.mutex);
1516+
1517+ return ret;
1518+}
1519+
1520+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo,
1521+ struct sk_buff *skb)
1522+{
1523+ skb_queue_tail(&wo->mcu.res_q, skb);
1524+ wake_up(&wo->mcu.wait);
1525+}
1526+
1527+static int mtk_wed_mcu_send_and_get_msg(struct mtk_wed_wo *wo,
1528+ int to_id, int cmd, const void *data, int len,
1529+ bool wait_resp, struct sk_buff **ret_skb)
1530+{
1531+ struct sk_buff *skb;
1532+
1533+ skb = mtk_wed_mcu_msg_alloc(wo, data, len);
1534+ if (!skb)
1535+ return -ENOMEM;
1536+
1537+ return mtk_wed_mcu_skb_send_and_get_msg(wo, to_id, cmd, skb, wait_resp, ret_skb);
1538+}
1539+
1540+int
1541+mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,
1542+ int to_id, int cmd,
1543+ const void *data, int len, bool wait_resp)
1544+{
1545+ struct sk_buff *skb = NULL;
1546+ int ret = 0;
1547+
1548+ ret = mtk_wed_mcu_send_and_get_msg(wo, to_id, cmd, data,
1549+ len, wait_resp, &skb);
1550+ if (skb)
1551+ dev_kfree_skb(skb);
1552+
1553+ return ret;
1554+}
1555+
1556+int mtk_wed_exception_init(struct mtk_wed_wo *wo)
1557+{
1558+ struct wed_wo_exception *exp = &wo->exp;
1559+ struct {
1560+ u32 arg0;
1561+ u32 arg1;
1562+ }req;
1563+
1564+ exp->log_size = EXCEPTION_LOG_SIZE;
1565+ exp->log = kmalloc(exp->log_size, GFP_ATOMIC);
1566+ if (!exp->log)
1567+ return -ENOMEM;
1568+
1569+ memset(exp->log, 0, exp->log_size);
1570+ exp->phys = dma_map_single(wo->hw->dev, exp->log, exp->log_size,
1571+ DMA_FROM_DEVICE);
1572+
1573+ if (unlikely(dma_mapping_error(wo->hw->dev, exp->phys))) {
1574+ dev_info(wo->hw->dev, "dma map error\n");
1575+ goto free;
1576+ }
1577+
1578+ req.arg0 = (u32)exp->phys;
1579+ req.arg1 = (u32)exp->log_size;
1580+
1581+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_EXCEPTION_INIT,
1582+ &req, sizeof(req), false);
1583+
1584+free:
1585+ kfree(exp->log);
1586+ return -ENOMEM;
1587+}
1588+
1589+int
1590+mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb)
1591+{
1592+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1593+
1594+ if (hdr->ver != 0)
1595+ return WARP_INVALID_PARA_STATUS;
1596+
1597+ if (skb->len < sizeof(struct wed_cmd_hdr))
1598+ return WARP_INVALID_PARA_STATUS;
1599+
1600+ if (skb->len != hdr->length)
1601+ return WARP_INVALID_PARA_STATUS;
1602+
1603+ return WARP_OK_STATUS;
1604+}
1605+
1606+void
1607+mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
1608+{
1609+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1610+ struct wed_wo_log *record;
1611+ char *msg = (char *)(skb->data + sizeof(struct wed_cmd_hdr));
1612+ u16 msg_len = skb->len - sizeof(struct wed_cmd_hdr);
1613+ u32 i, cnt = 0;
1614+
1615+ switch (hdr->cmd_id) {
1616+ case WO_EVT_LOG_DUMP:
1617+ pr_info("[WO LOG]: %s\n", msg);
1618+ break;
1619+ case WO_EVT_PROFILING:
1620+ cnt = msg_len / (sizeof(struct wed_wo_log));
1621+ record = (struct wed_wo_log *) msg;
1622+ dev_info(wo->hw->dev, "[WO Profiling]: %d report arrived!\n", cnt);
1623+
1624+ for (i = 0 ; i < cnt ; i++) {
1625+ //PROFILE_STAT(wo->total, record[i].total);
1626+ //PROFILE_STAT(wo->mod, record[i].mod);
1627+ //PROFILE_STAT(wo->rro, record[i].rro);
1628+
1629+ dev_info(wo->hw->dev, "[WO Profiling]: SN:%u with latency: total=%u, rro:%u, mod:%u\n",
1630+ record[i].sn,
1631+ record[i].total,
1632+ record[i].rro,
1633+ record[i].mod);
1634+ }
1635+ break;
1636+
1637+ default:
1638+ break;
1639+ }
1640+
1641+ dev_kfree_skb(skb);
1642+
1643+}
1644+
1645+static int
1646+mtk_wed_load_firmware(struct mtk_wed_wo *wo)
1647+{
1648+ struct fw_info {
1649+ __le32 decomp_crc;
1650+ __le32 decomp_len;
1651+ __le32 decomp_blk_sz;
1652+ u8 reserved[4];
1653+ __le32 addr;
1654+ __le32 len;
1655+ u8 feature_set;
1656+ u8 reserved1[15];
1657+ } __packed *region;
1658+
1659+ char *mcu;
1660+ const struct mtk_wed_fw_trailer *hdr;
1661+ static u8 shared[MAX_REGION_SIZE] = {0};
1662+ const struct firmware *fw;
1663+ int ret, i;
1664+ u32 ofs = 0;
1665+ u32 boot_cr, val;
1666+
1667+ mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 : MT7986_FIRMWARE_WO_1;
1668+
1669+ ret = request_firmware(&fw, mcu, wo->hw->dev);
1670+ if (ret)
1671+ return ret;
1672+
1673+ hdr = (const struct mtk_wed_fw_trailer *)(fw->data + fw->size -
1674+ sizeof(*hdr));
1675+
1676+ dev_info(wo->hw->dev, "WO Firmware Version: %.10s, Build Time: %.15s\n",
1677+ hdr->fw_ver, hdr->build_date);
1678+
1679+ for (i = 0; i < hdr->n_region; i++) {
1680+ int j = 0;
1681+ region = (struct fw_info *)(fw->data + fw->size -
1682+ sizeof(*hdr) -
1683+ sizeof(*region) *
1684+ (hdr->n_region - i));
1685+
1686+ while (j < MAX_REGION_SIZE) {
1687+ struct mtk_wed_fw_region *wo_region;
1688+
1689+ wo_region = &wo->region[j];
1690+ if (!wo_region->addr)
1691+ break;
1692+
1693+ if (wo_region->addr_pa == region->addr) {
1694+ if (!wo_region->shared) {
1695+ memcpy(wo_region->addr,
1696+ fw->data + ofs, region->len);
1697+ } else if (!shared[j]) {
1698+ memcpy(wo_region->addr,
1699+ fw->data + ofs, region->len);
1700+ shared[j] = true;
1701+ }
1702+ }
1703+ j++;
1704+ }
1705+
1706+ if (j == __WO_REGION_MAX) {
1707+ ret = -ENOENT;
1708+ goto done;
1709+ }
1710+ ofs += region->len;
1711+ }
1712+
1713+ /* write the start address */
1714+ boot_cr = wo->hw->index ?
1715+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
1716+ wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
1717+
1718+ /* wo firmware reset */
1719+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
1720+
1721+ val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
1722+
1723+ val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
1724+ WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
1725+
1726+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
1727+
1728+done:
1729+ release_firmware(fw);
1730+
1731+ return ret;
1732+}
1733+
1734+static int
1735+mtk_wed_get_firmware_region(struct mtk_wed_wo *wo)
1736+{
1737+ struct device_node *node, *np = wo->hw->node;
1738+ struct mtk_wed_fw_region *region;
1739+ struct resource res;
1740+ const char *compat;
1741+ int i, ret;
1742+
1743+ static const char *const wo_region_compat[__WO_REGION_MAX] = {
1744+ [WO_REGION_EMI] = WOCPU_EMI_DEV_NODE,
1745+ [WO_REGION_ILM] = WOCPU_ILM_DEV_NODE,
1746+ [WO_REGION_DATA] = WOCPU_DATA_DEV_NODE,
1747+ [WO_REGION_BOOT] = WOCPU_BOOT_DEV_NODE,
1748+ };
1749+
1750+ for (i = 0; i < __WO_REGION_MAX; i++) {
1751+ region = &wo->region[i];
1752+ compat = wo_region_compat[i];
1753+
1754+ node = of_parse_phandle(np, compat, 0);
1755+ if (!node)
1756+ return -ENODEV;
1757+
1758+ ret = of_address_to_resource(node, 0, &res);
1759+ if (ret)
1760+ return ret;
1761+
1762+ region->addr_pa = res.start;
1763+ region->size = resource_size(&res);
1764+ region->addr = ioremap(region->addr_pa, region->size);
1765+
1766+ of_property_read_u32_index(node, "shared", 0, &region->shared);
1767+ }
1768+
1769+ return 0;
1770+}
1771+
1772+static int
1773+wo_mcu_send_message(struct mtk_wed_wo *wo,
1774+ int to_id, int cmd, struct sk_buff *skb,
1775+ int *wait_seq, bool wait_resp)
1776+{
1777+ struct wed_cmd_hdr *hdr;
1778+ u8 seq = 0;
1779+
1780+ /* TDO: make dynamic based on msg type */
1781+ wo->mcu.timeout = 20 * HZ;
1782+
1783+ if (wait_resp && wait_seq) {
1784+ seq = wo->mcu.msg_seq++ ;
1785+ *wait_seq = seq;
1786+ }
1787+
1788+ hdr = (struct wed_cmd_hdr *)skb_push(skb, sizeof(*hdr));
1789+
1790+ hdr->cmd_id = cmd;
1791+ hdr->length = cpu_to_le16(skb->len);
1792+ hdr->uni_id = seq;
1793+
1794+ if (to_id == MODULE_ID_WO)
1795+ hdr->flag |= WARP_CMD_FLAG_FROM_TO_WO;
1796+
1797+ if (wait_resp && wait_seq)
1798+ hdr->flag |= WARP_CMD_FLAG_NEED_RSP;
1799+
1800+ return mtk_wed_wo_q_tx_skb(wo, &wo->q_tx, skb);
1801+}
1802+
1803+static int
1804+wo_mcu_parse_response(struct mtk_wed_wo *wo, int cmd,
1805+ struct sk_buff *skb, int seq)
1806+{
1807+ struct wed_cmd_hdr *hdr;
1808+
1809+ if (!skb) {
1810+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
1811+ cmd, seq);
1812+ return -ETIMEDOUT;
1813+ }
1814+
1815+ hdr = (struct wed_cmd_hdr *)skb->data;
1816+ if (seq != hdr->uni_id) {
1817+ dev_err(wo->hw->dev, "Message %08x (seq %d) with not match uid(%d)\n",
1818+ cmd, seq, hdr->uni_id);
1819+ return -EAGAIN;
1820+ }
1821+
1822+ //skb_pull(skb, sizeof(struct wed_cmd_hdr));
1823+
1824+ return 0;
1825+}
1826+
1827+int wed_wo_mcu_init(struct mtk_wed_wo *wo)
1828+{
1829+ static const struct wed_wo_mcu_ops wo_mcu_ops = {
1830+ .headroom = sizeof(struct wed_cmd_hdr),
1831+ .mcu_skb_send_msg = wo_mcu_send_message,
1832+ .mcu_parse_response = wo_mcu_parse_response,
1833+ /*TDO .mcu_restart = wo_mcu_restart,*/
1834+ };
1835+ unsigned long timeout = jiffies + FW_DL_TIMEOUT;
1836+ int ret;
1837+ u32 val;
1838+
1839+ wo->mcu_ops = &wo_mcu_ops;
1840+
1841+ ret = mtk_wed_get_firmware_region(wo);
1842+ if (ret)
1843+ return ret;
1844+
1845+ /* set dummy cr */
1846+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL,
1847+ wo->hw->index + 1);
1848+
1849+ ret = mtk_wed_load_firmware(wo);
1850+ if (ret)
1851+ return ret;
1852+
1853+ do {
1854+ /* get dummy cr */
1855+ val = wed_r32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL);
1856+ } while (val != 0 && !time_after(jiffies, timeout));
1857+
1858+ if (val)
1859+ return -EBUSY;
1860+
1861+ return 0;
1862+}
1863+
1864+static ssize_t
1865+mtk_wed_wo_ctrl(struct file *file,
1866+ const char __user *user_buf,
1867+ size_t count,
1868+ loff_t *ppos)
1869+{
1870+ struct mtk_wed_hw *hw = file->private_data;
1871+ struct mtk_wed_wo *wo = hw->wed_wo;
1872+ char buf[100], *cmd = NULL, *input[11] = {0};
1873+ char msgbuf[128] = {0};
1874+ struct wo_cmd_query *query = (struct wo_cmd_query *)msgbuf;
1875+ u32 cmd_id;
1876+ bool wait = false;
1877+ char *sub_str = NULL;
1878+ int input_idx = 0, input_total = 0, scan_num = 0;
1879+ char *p;
1880+
1881+ if (count > sizeof(buf))
1882+ return -EINVAL;
1883+
1884+ if (copy_from_user(buf, user_buf, count))
1885+ return -EFAULT;
1886+
1887+ if (count && buf[count - 1] == '\n')
1888+ buf[count - 1] = '\0';
1889+ else
1890+ buf[count] = '\0';
1891+
1892+ p = buf;
1893+
1894+ while ((sub_str = strsep(&p, " ")) != NULL) {
1895+ input[input_idx] = sub_str;
1896+ input_idx++;
1897+ input_total++;
1898+ }
1899+ cmd = input[0];
1900+ if (input_total == 1 && cmd) {
1901+ if (strncmp(cmd, "bainfo", strlen(cmd)) == 0) {
1902+ cmd_id = WO_CMD_BA_INFO_DUMP;
1903+ } else if (strncmp(cmd, "bactrl", strlen(cmd)) == 0) {
1904+ cmd_id = WO_CMD_BA_CTRL_DUMP;
1905+ } else if (strncmp(cmd, "fbcmdq", strlen(cmd)) == 0) {
1906+ cmd_id = WO_CMD_FBCMD_Q_DUMP;
1907+ } else if (strncmp(cmd, "logflush", strlen(cmd)) == 0) {
1908+ cmd_id = WO_CMD_LOG_FLUSH;
1909+ } else if (strncmp(cmd, "cpustat.dump", strlen(cmd)) == 0) {
1910+ cmd_id = WO_CMD_CPU_STATS_DUMP;
1911+ } else if (strncmp(cmd, "state", strlen(cmd)) == 0) {
1912+ cmd_id = WO_CMD_WED_RX_STAT;
1913+ } else if (strncmp(cmd, "prof_hit_dump", strlen(cmd)) == 0) {
1914+ //wo_profiling_report();
1915+ return count;
1916+ } else if (strncmp(cmd, "rxcnt_info", strlen(cmd)) == 0) {
1917+ cmd_id = WO_CMD_RXCNT_INFO;
1918+ wait = true;
1919+ } else {
1920+ pr_info("(%s) unknown comand string(%s)!\n", __func__, cmd);
1921+ return count;
1922+ }
1923+ } else if (input_total > 1) {
1924+ for (input_idx = 1 ; input_idx < input_total ; input_idx++) {
1925+ scan_num = sscanf(input[input_idx], "%u", &query->query0+(input_idx - 1));
1926+
1927+ if (scan_num < 1) {
1928+ pr_info("(%s) require more input!\n", __func__);
1929+ return count;
1930+ }
1931+ }
1932+ if(strncmp(cmd, "devinfo", strlen(cmd)) == 0) {
1933+ cmd_id = WO_CMD_DEV_INFO_DUMP;
1934+ } else if (strncmp(cmd, "bssinfo", strlen(cmd)) == 0) {
1935+ cmd_id = WO_CMD_BSS_INFO_DUMP;
1936+ } else if (strncmp(cmd, "starec", strlen(cmd)) == 0) {
1937+ cmd_id = WO_CMD_STA_REC_DUMP;
1938+ } else if (strncmp(cmd, "starec_ba", strlen(cmd)) == 0) {
1939+ cmd_id = WO_CMD_STA_BA_DUMP;
1940+ } else if (strncmp(cmd, "logctrl", strlen(cmd)) == 0) {
1941+ cmd_id = WO_CMD_FW_LOG_CTRL;
1942+ } else if (strncmp(cmd, "cpustat.en", strlen(cmd)) == 0) {
1943+ cmd_id = WO_CMD_CPU_STATS_ENABLE;
1944+ } else if (strncmp(cmd, "prof_conf", strlen(cmd)) == 0) {
1945+ cmd_id = WO_CMD_PROF_CTRL;
1946+ } else if (strncmp(cmd, "rxcnt_ctrl", strlen(cmd)) == 0) {
1947+ cmd_id = WO_CMD_RXCNT_CTRL;
1948+ } else if (strncmp(cmd, "dbg_set", strlen(cmd)) == 0) {
1949+ cmd_id = WO_CMD_DBG_INFO;
1950+ }
1951+ } else {
1952+ dev_info(hw->dev, "usage: echo cmd='cmd_str' > wo_write\n");
1953+ dev_info(hw->dev, "cmd_str value range:\n");
1954+ dev_info(hw->dev, "\tbainfo:\n");
1955+ dev_info(hw->dev, "\tbactrl:\n");
1956+ dev_info(hw->dev, "\tfbcmdq:\n");
1957+ dev_info(hw->dev, "\tlogflush:\n");
1958+ dev_info(hw->dev, "\tcpustat.dump:\n");
1959+ dev_info(hw->dev, "\tprof_hit_dump:\n");
1960+ dev_info(hw->dev, "\trxcnt_info:\n");
1961+ dev_info(hw->dev, "\tdevinfo:\n");
1962+ dev_info(hw->dev, "\tbssinfo:\n");
1963+ dev_info(hw->dev, "\tstarec:\n");
1964+ dev_info(hw->dev, "\tstarec_ba:\n");
1965+ dev_info(hw->dev, "\tlogctrl:\n");
1966+ dev_info(hw->dev, "\tcpustat.en:\n");
1967+ dev_info(hw->dev, "\tprof_conf:\n");
1968+ dev_info(hw->dev, "\trxcnt_ctrl:\n");
1969+ dev_info(hw->dev, "\tdbg_set [level] [category]:\n");
1970+ return count;
1971+ }
1972+
1973+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, (void *)msgbuf, sizeof(struct wo_cmd_query), wait);
1974+
1975+ return count;
1976+
1977+}
1978+
1979+static const struct file_operations fops_wo_ctrl = {
1980+ .write = mtk_wed_wo_ctrl,
1981+ .open = simple_open,
1982+ .llseek = default_llseek,
1983+};
1984+
1985+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir)
1986+{
1987+ if (!dir)
1988+ return;
1989+
1990+ debugfs_create_file("wo_write", 0600, dir, hw, &fops_wo_ctrl);
1991+}
1992+
1993diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
1994new file mode 100644
1995index 000000000..6a5ac7672
1996--- /dev/null
1997+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
1998@@ -0,0 +1,125 @@
1999+// SPDX-License-Identifier: GPL-2.0-only
2000+
2001+#ifndef __MTK_WED_MCU_H
2002+#define __MTK_WED_MCU_H
2003+
2004+#define EXCEPTION_LOG_SIZE 32768
2005+#define WOCPU_MCUSYS_RESET_ADDR 0x15194050
2006+#define WOCPU_WO0_MCUSYS_RESET_MASK 0x20
2007+#define WOCPU_WO1_MCUSYS_RESET_MASK 0x1
2008+
2009+#define WARP_INVALID_LENGTH_STATUS (-2)
2010+#define WARP_NULL_POINTER_STATUS (-3)
2011+#define WARP_INVALID_PARA_STATUS (-4)
2012+#define WARP_NOT_HANDLE_STATUS (-5)
2013+#define WARP_FAIL_STATUS (-1)
2014+#define WARP_OK_STATUS (0)
2015+#define WARP_ALREADY_DONE_STATUS (1)
2016+
2017+#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2018+#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2019+
2020+#define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2021+#define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2022+#define WOCPU_DLM_DEV_NODE "mediatek,wocpu_dlm"
2023+#define WOCPU_DATA_DEV_NODE "mediatek,wocpu_data"
2024+#define WOCPU_BOOT_DEV_NODE "mediatek,wocpu_boot"
2025+
2026+#define FW_DL_TIMEOUT ((3000 * HZ) / 1000)
2027+#define WOCPU_TIMEOUT ((1000 * HZ) / 1000)
2028+
2029+#define MAX_REGION_SIZE 3
2030+
2031+#define WOX_MCU_CFG_LS_BASE 0 /*0x15194000*/
2032+
2033+#define WOX_MCU_CFG_LS_HW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x000) // 4000
2034+#define WOX_MCU_CFG_LS_FW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x004) // 4004
2035+#define WOX_MCU_CFG_LS_CFG_DBG1_ADDR (WOX_MCU_CFG_LS_BASE + 0x00C) // 400C
2036+#define WOX_MCU_CFG_LS_CFG_DBG2_ADDR (WOX_MCU_CFG_LS_BASE + 0x010) // 4010
2037+#define WOX_MCU_CFG_LS_WF_MCCR_ADDR (WOX_MCU_CFG_LS_BASE + 0x014) // 4014
2038+#define WOX_MCU_CFG_LS_WF_MCCR_SET_ADDR (WOX_MCU_CFG_LS_BASE + 0x018) // 4018
2039+#define WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR (WOX_MCU_CFG_LS_BASE + 0x01C) // 401C
2040+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (WOX_MCU_CFG_LS_BASE + 0x050) // 4050
2041+#define WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x060) // 4060
2042+#define WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x064) // 4064
2043+
2044+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK BIT(5)
2045+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK BIT(0)
2046+
2047+
2048+enum wo_event_id {
2049+ WO_EVT_LOG_DUMP = 0x1,
2050+ WO_EVT_PROFILING = 0x2,
2051+ WO_EVT_RXCNT_INFO = 0x3
2052+};
2053+
2054+enum wo_cmd_id {
2055+ WO_CMD_WED_CFG = 0,
2056+ WO_CMD_WED_RX_STAT,
2057+ WO_CMD_RRO_SER,
2058+ WO_CMD_DBG_INFO,
2059+ WO_CMD_DEV_INFO,
2060+ WO_CMD_BSS_INFO,
2061+ WO_CMD_STA_REC,
2062+ WO_CMD_DEV_INFO_DUMP,
2063+ WO_CMD_BSS_INFO_DUMP,
2064+ WO_CMD_STA_REC_DUMP,
2065+ WO_CMD_BA_INFO_DUMP,
2066+ WO_CMD_FBCMD_Q_DUMP,
2067+ WO_CMD_FW_LOG_CTRL,
2068+ WO_CMD_LOG_FLUSH,
2069+ WO_CMD_CHANGE_STATE,
2070+ WO_CMD_CPU_STATS_ENABLE,
2071+ WO_CMD_CPU_STATS_DUMP,
2072+ WO_CMD_EXCEPTION_INIT,
2073+ WO_CMD_PROF_CTRL,
2074+ WO_CMD_STA_BA_DUMP,
2075+ WO_CMD_BA_CTRL_DUMP,
2076+ WO_CMD_RXCNT_CTRL,
2077+ WO_CMD_RXCNT_INFO,
2078+ WO_CMD_SET_CAP,
2079+ WO_CMD_CCIF_RING_DUMP,
2080+ WO_CMD_WED_END
2081+};
2082+
2083+enum wo_state {
2084+ WO_STATE_UNDEFINED = 0x0,
2085+ WO_STATE_INIT = 0x1,
2086+ WO_STATE_ENABLE = 0x2,
2087+ WO_STATE_DISABLE = 0x3,
2088+ WO_STATE_HALT = 0x4,
2089+ WO_STATE_GATING = 0x5,
2090+ WO_STATE_SER_RESET = 0x6,
2091+ WO_STATE_WF_RESET = 0x7,
2092+ WO_STATE_END
2093+};
2094+
2095+enum wo_done_state {
2096+ WOIF_UNDEFINED = 0,
2097+ WOIF_DISABLE_DONE = 1,
2098+ WOIF_TRIGGER_ENABLE = 2,
2099+ WOIF_ENABLE_DONE = 3,
2100+ WOIF_TRIGGER_GATING = 4,
2101+ WOIF_GATING_DONE = 5,
2102+ WOIF_TRIGGER_HALT = 6,
2103+ WOIF_HALT_DONE = 7,
2104+};
2105+
2106+enum wed_dummy_cr_idx {
2107+ WED_DUMMY_CR_FWDL = 0,
2108+ WED_DUMMY_CR_WO_STATUS = 1
2109+};
2110+
2111+struct mtk_wed_fw_trailer {
2112+ u8 chip_id;
2113+ u8 eco_code;
2114+ u8 n_region;
2115+ u8 format_ver;
2116+ u8 format_flag;
2117+ u8 reserved[2];
2118+ char fw_ver[10];
2119+ char build_date[15];
2120+ u32 crc;
2121+};
2122+
2123+#endif
2124diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2125index 69f136ed4..e911b5315 100644
2126--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2127+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2128@@ -4,6 +4,8 @@
2129 #ifndef __MTK_WED_REGS_H
2130 #define __MTK_WED_REGS_H
2131
2132+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
2133+
2134 #if defined(CONFIG_MEDIATEK_NETSYS_V2)
2135 #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(13, 0)
2136 #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(14)
2137@@ -16,6 +18,7 @@
2138 #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2139 #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2140 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2141+#define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
2142
2143 struct mtk_wdma_desc {
2144 __le32 buf0;
2145@@ -37,6 +40,8 @@ struct mtk_wdma_desc {
2146 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
2147 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2148 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2149+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
2150+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
2151 #define MTK_WED_RESET_WED BIT(31)
2152
2153 #define MTK_WED_CTRL 0x00c
2154@@ -48,8 +53,12 @@ struct mtk_wdma_desc {
2155 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2156 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2157 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2158-#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2159-#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2160+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
2161+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
2162+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
2163+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
2164+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
2165+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
2166 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2167 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
2168 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
2169@@ -64,8 +73,8 @@ struct mtk_wdma_desc {
2170 #define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
2171 #define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
2172 #endif
2173-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2174-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2175+#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
2176+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
2177 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2178 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2179 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
2180@@ -82,8 +91,8 @@ struct mtk_wdma_desc {
2181 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2182 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2183 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2184- MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | \
2185- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | \
2186+ MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
2187+ MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
2188 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2189 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2190 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | \
2191@@ -92,6 +101,8 @@ struct mtk_wdma_desc {
2192 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
2193
2194 #define MTK_WED_EXT_INT_MASK 0x028
2195+#define MTK_WED_EXT_INT_MASK1 0x02c
2196+#define MTK_WED_EXT_INT_MASK2 0x030
2197
2198 #define MTK_WED_STATUS 0x060
2199 #define MTK_WED_STATUS_TX GENMASK(15, 8)
2200@@ -179,6 +190,9 @@ struct mtk_wdma_desc {
2201
2202 #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2203
2204+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
2205+
2206+#define MTK_WED_SCR0 0x3c0
2207 #define MTK_WED_WPDMA_INT_TRIGGER 0x504
2208 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2209 #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
2210@@ -235,13 +249,19 @@ struct mtk_wdma_desc {
2211
2212 #define MTK_WED_WPDMA_INT_CTRL_TX 0x530
2213 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
2214-#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2215+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2216 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
2217 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
2218 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
2219 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
2220
2221 #define MTK_WED_WPDMA_INT_CTRL_RX 0x534
2222+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
2223+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
2224+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
2225+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
2226+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
2227+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
2228
2229 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
2230 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
2231@@ -266,13 +286,43 @@ struct mtk_wdma_desc {
2232 #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2233 #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2234
2235+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
2236+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
2237+
2238 #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2239 #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2240+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
2241+
2242+
2243+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
2244+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
2245+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
2246+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
2247+
2248+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
2249+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX0 BIT(16)
2250+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX1 BIT(17)
2251+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX0 BIT(24)
2252+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX1 BIT(25)
2253+
2254+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
2255+#define MTK_WED_WPDMA_RX_RING 0x770
2256+
2257+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
2258+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
2259+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
2260+
2261+#define MTK_WED_WDMA_RING_TX 0x800
2262+
2263+#define MTK_WED_WDMA_TX_MIB 0x810
2264+
2265+
2266 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2267 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2268
2269 #define MTK_WED_WDMA_GLO_CFG 0xa04
2270 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2271+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
2272 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2273 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2274 #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
2275@@ -316,6 +366,20 @@ struct mtk_wdma_desc {
2276 #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
2277 #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
2278
2279+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2280+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
2281+
2282+#define MTK_WED_RX_BM_BASE 0xd84
2283+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2284+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
2285+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
2286+
2287+#define MTK_WED_RX_PTR 0xd8c
2288+
2289+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
2290+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
2291+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
2292+
2293 #define MTK_WED_RING_OFS_BASE 0x00
2294 #define MTK_WED_RING_OFS_COUNT 0x04
2295 #define MTK_WED_RING_OFS_CPU_IDX 0x08
2296@@ -355,4 +419,71 @@ struct mtk_wdma_desc {
2297 /* DMA channel mapping */
2298 #define HIFSYS_DMA_AG_MAP 0x008
2299
2300+#define MTK_WED_RTQM_GLO_CFG 0xb00
2301+#define MTK_WED_RTQM_BUSY BIT(1)
2302+#define MTK_WED_RTQM_Q_RST BIT(2)
2303+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
2304+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
2305+
2306+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
2307+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
2308+#define MTK_WED_RTQM_Q2N_MIB 0xb80
2309+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
2310+
2311+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
2312+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
2313+
2314+#define MTK_WED_RROQM_GLO_CFG 0xc04
2315+#define MTK_WED_RROQM_RST_IDX 0xc08
2316+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
2317+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
2318+
2319+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
2320+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
2321+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
2322+
2323+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
2324+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
2325+
2326+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
2327+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
2328+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
2329+
2330+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
2331+
2332+#define MTK_WED_RROQ_BASE_L 0xc80
2333+#define MTK_WED_RROQ_BASE_H 0xc84
2334+
2335+
2336+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
2337+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
2338+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
2339+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
2340+
2341+#define MTK_WED_RROQM_MID_MIB 0xcc0
2342+#define MTK_WED_RROQM_MOD_MIB 0xcc4
2343+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
2344+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
2345+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
2346+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
2347+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
2348+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
2349+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
2350+
2351+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2352+#define MTK_WED_RX_BM_BASE 0xd84
2353+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2354+#define MTK_WED_RX_BM_PTR 0xd8c
2355+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
2356+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
2357+
2358+#define MTK_WED_RX_BM_BLEN 0xd90
2359+#define MTK_WED_RX_BM_STS 0xd94
2360+#define MTK_WED_RX_BM_INTF2 0xd98
2361+#define MTK_WED_RX_BM_INTF 0xd9c
2362+#define MTK_WED_RX_BM_ERR_STS 0xda8
2363+
2364+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
2365+#define MTK_WED_PCIE_INT_MASK 0x0
2366+
2367 #endif
2368diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
2369new file mode 100644
2370index 000000000..10618fc1a
2371--- /dev/null
2372+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
2373@@ -0,0 +1,548 @@
2374+// SPDX-License-Identifier: GPL-2.0-only
2375+
2376+#include <linux/kernel.h>
2377+#include <linux/bitfield.h>
2378+#include <linux/dma-mapping.h>
2379+#include <linux/skbuff.h>
2380+#include <linux/of_platform.h>
2381+#include <linux/interrupt.h>
2382+#include <linux/of_address.h>
2383+#include <linux/iopoll.h>
2384+#include <linux/soc/mediatek/mtk_wed.h>
2385+#include "mtk_wed.h"
2386+#include "mtk_wed_regs.h"
2387+#include "mtk_wed_ccif.h"
2388+#include "mtk_wed_wo.h"
2389+
2390+struct wed_wo_profile_stat profile_total[6] = {
2391+ {1001, 0},
2392+ {1501, 0},
2393+ {3001, 0},
2394+ {5001, 0},
2395+ {10001, 0},
2396+ {0xffffffff, 0}
2397+};
2398+
2399+struct wed_wo_profile_stat profiling_mod[6] = {
2400+ {1001, 0},
2401+ {1501, 0},
2402+ {3001, 0},
2403+ {5001, 0},
2404+ {10001, 0},
2405+ {0xffffffff, 0}
2406+};
2407+
2408+struct wed_wo_profile_stat profiling_rro[6] = {
2409+ {1001, 0},
2410+ {1501, 0},
2411+ {3001, 0},
2412+ {5001, 0},
2413+ {10001, 0},
2414+ {0xffffffff, 0}
2415+};
2416+
2417+static void
2418+woif_q_sync_idx(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2419+{
2420+ woccif_w32(wo, q->regs->desc_base, q->desc_dma);
2421+ woccif_w32(wo, q->regs->ring_size, q->ndesc);
2422+
2423+ /* wo fw start from 1 */
2424+ q->head = woccif_r32(wo, q->regs->dma_idx) + 1;
2425+ q->tail = q->head;
2426+}
2427+
2428+static void
2429+woif_q_reset(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2430+{
2431+
2432+ if (!q || !q->ndesc)
2433+ return;
2434+
2435+ woccif_w32(dev, q->regs->cpu_idx, 0);
2436+
2437+ woif_q_sync_idx(dev, q);
2438+}
2439+
2440+static void
2441+woif_q_kick(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset)
2442+{
2443+ wmb();
2444+ woccif_w32(wo, q->regs->cpu_idx, q->head + offset);
2445+}
2446+
2447+static int
2448+woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2449+{
2450+ int len = q->buf_size, frames = 0;
2451+ struct wed_wo_queue_entry *entry;
2452+ struct wed_wo_desc *desc;
2453+ dma_addr_t addr;
2454+ u32 ctrl = 0;
2455+ void *buf;
2456+
2457+ if (!q->ndesc)
2458+ return 0;
2459+
2460+ spin_lock_bh(&q->lock);
2461+
2462+ while (q->queued < q->ndesc - 1) {
2463+
2464+ buf = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
2465+ if (!buf)
2466+ break;
2467+
2468+ addr = dma_map_single(wo->hw->dev, buf, len, DMA_FROM_DEVICE);
2469+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
2470+ skb_free_frag(buf);
2471+ break;
2472+ }
2473+ dma_sync_single_for_cpu(wo->hw->dev, addr, len,
2474+ DMA_TO_DEVICE);
2475+ desc = &q->desc[q->head];
2476+ entry = &q->entry[q->head];
2477+
2478+ entry->dma_addr = addr;
2479+ entry->dma_len = len;
2480+
2481+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, entry->dma_len);
2482+ ctrl |= WED_CTL_LAST_SEC0;
2483+
2484+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2485+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2486+ dma_sync_single_for_device(wo->hw->dev, addr, len,
2487+ DMA_TO_DEVICE);
2488+ q->queued++;
2489+ q->entry[q->head].buf = buf;
2490+
2491+ q->head = (q->head + 1) % q->ndesc;
2492+ frames++;
2493+ }
2494+
2495+ spin_unlock_bh(&q->lock);
2496+
2497+ return frames;
2498+}
2499+
2500+static void
2501+woif_q_rx_fill_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2502+{
2503+ if(woif_q_rx_fill(wo, q))
2504+ woif_q_kick(wo, q, -1);
2505+}
2506+
2507+static int
2508+woif_q_alloc(struct mtk_wed_wo *dev, struct wed_wo_queue *q,
2509+ int n_desc, int bufsize, int idx,
2510+ struct wed_wo_queue_regs *regs)
2511+{
2512+ struct wed_wo_queue_regs *q_regs;
2513+ int size;
2514+
2515+ spin_lock_init(&q->lock);
2516+ spin_lock_init(&q->cleanup_lock);
2517+
2518+ q_regs = devm_kzalloc(dev->hw->dev, sizeof(*q_regs), GFP_KERNEL);
2519+
2520+ q_regs->desc_base = regs->desc_base;
2521+ q_regs->ring_size = regs->ring_size;
2522+ q_regs->cpu_idx = regs->cpu_idx;
2523+ q_regs->dma_idx = regs->dma_idx;
2524+
2525+ q->regs = q_regs;
2526+ q->ndesc = n_desc;
2527+ q->buf_size = bufsize;
2528+
2529+ size = q->ndesc * sizeof(struct wed_wo_desc);
2530+
2531+ q->desc = dmam_alloc_coherent(dev->hw->dev, size,
2532+ &q->desc_dma, GFP_KERNEL);
2533+ if (!q->desc)
2534+ return -ENOMEM;
2535+
2536+ size = q->ndesc * sizeof(*q->entry);
2537+ q->entry = devm_kzalloc(dev->hw->dev, size, GFP_KERNEL);
2538+ if (!q->entry)
2539+ return -ENOMEM;
2540+
2541+ if (idx == 0)
2542+ woif_q_reset(dev, &dev->q_tx);
2543+
2544+ return 0;
2545+}
2546+
2547+static void
2548+woif_q_tx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush)
2549+{
2550+ int last;
2551+
2552+ if (!q || !q->ndesc)
2553+ return;
2554+
2555+ spin_lock_bh(&q->cleanup_lock);
2556+ if (flush)
2557+ last = -1;
2558+ else
2559+ last = readl(&q->regs->dma_idx);
2560+
2561+ while (q->queued > 0 && q->tail != last) {
2562+ struct wed_wo_queue_entry *e;
2563+
2564+ e = &q->entry[q->tail];
2565+
2566+ dma_unmap_single(wo->hw->dev, e->dma_addr, e->dma_len,
2567+ DMA_TO_DEVICE);
2568+
2569+ if (e->skb)
2570+ dev_kfree_skb(e->skb);
2571+
2572+ memset(e, 0, sizeof(*e));
2573+
2574+ spin_lock_bh(&q->lock);
2575+ q->tail = (q->tail + 1) % q->ndesc;
2576+ q->queued--;
2577+ spin_unlock_bh(&q->lock);
2578+
2579+ if (!flush && q->tail == last)
2580+ last = readl(&q->regs->dma_idx);
2581+ }
2582+ spin_unlock_bh(&q->cleanup_lock);
2583+
2584+ if (flush) {
2585+ spin_lock_bh(&q->lock);
2586+ woif_q_sync_idx(wo, q);
2587+ woif_q_kick(wo, q, 0);
2588+ spin_unlock_bh(&q->lock);
2589+ }
2590+}
2591+
2592+static void
2593+woif_q_rx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2594+{
2595+}
2596+
2597+static void *
2598+woif_q_deq(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush,
2599+ int *len, u32 *info, bool *more)
2600+{
2601+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
2602+ struct wed_wo_queue_entry *e;
2603+ struct wed_wo_desc *desc;
2604+ int idx = q->tail;
2605+ void *buf;
2606+
2607+ *more = false;
2608+ if (!q->queued)
2609+ return NULL;
2610+
2611+ if (flush)
2612+ q->desc[idx].ctrl |= cpu_to_le32(WED_CTL_DMA_DONE);
2613+ else if (!(q->desc[idx].ctrl & cpu_to_le32(WED_CTL_DMA_DONE)))
2614+ return NULL;
2615+
2616+ q->tail = (q->tail + 1) % q->ndesc;
2617+ q->queued--;
2618+
2619+ desc = &q->desc[idx];
2620+ e = &q->entry[idx];
2621+
2622+ buf = e->buf;
2623+ if (len) {
2624+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
2625+ *len = FIELD_GET(WED_CTL_SD_LEN0, ctl);
2626+ *more = !(ctl & WED_CTL_LAST_SEC0);
2627+ }
2628+
2629+ if (info)
2630+ *info = le32_to_cpu(desc->info);
2631+ if(buf)
2632+ dma_unmap_single(wo->hw->dev, e->dma_addr, buf_len,
2633+ DMA_FROM_DEVICE);
2634+ e->skb = NULL;
2635+
2636+ return buf;
2637+}
2638+
2639+static int
2640+woif_q_init(struct mtk_wed_wo *dev,
2641+ int (*poll)(struct napi_struct *napi, int budget))
2642+{
2643+ init_dummy_netdev(&dev->napi_dev);
2644+ snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
2645+ "woif_q");
2646+
2647+ if (dev->q_rx.ndesc) {
2648+ netif_napi_add(&dev->napi_dev, &dev->napi, poll, 64);
2649+ woif_q_rx_fill(dev, &dev->q_rx);
2650+ woif_q_reset(dev, &dev->q_rx);
2651+ napi_enable(&dev->napi);
2652+ }
2653+
2654+ return 0;
2655+}
2656+
2657+void woif_q_rx_skb(struct mtk_wed_wo *wo, struct sk_buff *skb)
2658+{
2659+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
2660+ int ret;
2661+
2662+ ret = mtk_wed_mcu_cmd_sanity_check(wo, skb);
2663+ if (ret)
2664+ goto free_skb;
2665+
2666+ if (WED_WO_CMD_FLAG_IS_RSP(hdr))
2667+ mtk_wed_mcu_rx_event(wo, skb);
2668+ else
2669+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
2670+
2671+ return;
2672+free_skb:
2673+ dev_kfree_skb(skb);
2674+}
2675+
2676+static int
2677+woif_q_tx_skb(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
2678+ struct sk_buff *skb)
2679+{
2680+ struct wed_wo_queue_entry *entry;
2681+ struct wed_wo_desc *desc;
2682+ int len, ret, idx = -1;
2683+ dma_addr_t addr;
2684+ u32 ctrl = 0;
2685+
2686+ len = skb->len;
2687+ addr = dma_map_single(wo->hw->dev, skb->data, len, DMA_TO_DEVICE);
2688+ if (unlikely(dma_mapping_error(wo->hw->dev, addr)))
2689+ goto error;
2690+
2691+ /* packet tx, force trigger tx clean. */
2692+ if (q->queued + MTK_WED_WO_TXQ_FREE_THR >= q->ndesc - 1)
2693+ woif_q_tx_clean(wo, q, false);
2694+
2695+ if (q->queued + 1 >= q->ndesc - 1) {
2696+ ret = -ENOMEM;
2697+ goto error;
2698+ }
2699+
2700+ spin_lock_bh(&q->lock);
2701+
2702+ dma_sync_single_for_device(wo->hw->dev, addr, len,
2703+ DMA_TO_DEVICE);
2704+
2705+ idx = q->head;
2706+
2707+ desc = &q->desc[idx];
2708+ entry = &q->entry[idx];
2709+
2710+ entry->dma_addr = addr;
2711+ entry->dma_len = len;
2712+
2713+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, len);
2714+ ctrl |= WED_CTL_LAST_SEC0;
2715+ ctrl |= WED_CTL_DMA_DONE;
2716+
2717+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2718+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2719+
2720+ q->queued++;
2721+ q->entry[idx].skb = skb;
2722+
2723+ woif_q_kick(wo, q, 0);
2724+ wo->drv_ops->kickout(wo);
2725+
2726+ q->head = (q->head + 1) % q->ndesc;
2727+ spin_unlock_bh(&q->lock);
2728+ return 0;
2729+
2730+error:
2731+ dev_kfree_skb(skb);
2732+ return -ENOMEM;
2733+}
2734+
2735+static const struct wed_wo_queue_ops wo_queue_ops = {
2736+ .init = woif_q_init,
2737+ .alloc = woif_q_alloc,
2738+ .reset = woif_q_reset,
2739+ .tx_skb = woif_q_tx_skb,
2740+ .tx_clean = woif_q_tx_clean,
2741+ .rx_clean = woif_q_rx_clean,
2742+ .kick = woif_q_kick,
2743+};
2744+
2745+static int
2746+mtk_wed_wo_rx_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int budget)
2747+{
2748+ int len, data_len, done = 0;
2749+ struct sk_buff *skb;
2750+ unsigned char *data;
2751+ bool more;
2752+
2753+ while (done < budget) {
2754+ u32 info;
2755+
2756+ data = woif_q_deq(wo, q, false, &len, &info, &more);
2757+ if (!data)
2758+ break;
2759+
2760+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
2761+
2762+ if (data_len < len) {
2763+ skb_free_frag(data);
2764+ continue;
2765+ }
2766+
2767+ skb = build_skb(data, q->buf_size);
2768+ if (!skb) {
2769+ skb_free_frag(data);
2770+ continue;
2771+ }
2772+
2773+ __skb_put(skb, len);
2774+ done++;
2775+
2776+ woif_q_rx_skb(wo, skb);
2777+ }
2778+
2779+ woif_q_rx_fill_process(wo, q);
2780+
2781+ return done;
2782+}
2783+
2784+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
2785+ u32 clear, u32 val)
2786+{
2787+ unsigned long flags;
2788+
2789+ spin_lock_irqsave(&wo->ccif.irq_lock, flags);
2790+ wo->ccif.irqmask &= ~clear;
2791+ wo->ccif.irqmask |= val;
2792+ if (set)
2793+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
2794+
2795+ spin_unlock_irqrestore(&wo->ccif.irq_lock, flags);
2796+}
2797+
2798+static inline void mtk_wed_wo_set_ack_mask(struct mtk_wed_wo *wo, u32 mask)
2799+{
2800+ wo->drv_ops->set_ack(wo, mask);
2801+}
2802+
2803+static void mtk_wed_wo_poll_complete(struct mtk_wed_wo *wo)
2804+{
2805+ mtk_wed_wo_set_ack_mask(wo, wo->ccif.q_int_mask);
2806+ mtk_wed_wo_isr_enable(wo, wo->ccif.q_int_mask);
2807+}
2808+
2809+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget)
2810+{
2811+ struct mtk_wed_wo *wo;
2812+ int done = 0, cur;
2813+
2814+ wo = container_of(napi->dev, struct mtk_wed_wo, napi_dev);
2815+
2816+ rcu_read_lock();
2817+
2818+ do {
2819+ cur = mtk_wed_wo_rx_process(wo, &wo->q_rx, budget - done);
2820+ /* rx packet handle */
2821+ done += cur;
2822+ } while (cur && done < budget);
2823+
2824+ rcu_read_unlock();
2825+
2826+ if (done < budget && napi_complete(napi))
2827+ mtk_wed_wo_poll_complete(wo);
2828+
2829+ return done;
2830+}
2831+
2832+static void mtk_wed_wo_isr_tasklet(unsigned long data)
2833+{
2834+ struct mtk_wed_wo *wo = (struct mtk_wed_wo *)data;
2835+ u32 intr, mask;
2836+
2837+ /* disable isr */
2838+ wo->drv_ops->set_isr(wo, 0);
2839+
2840+ intr = wo->drv_ops->get_csr(wo);
2841+ intr &= wo->ccif.irqmask;
2842+
2843+ mask = intr & (wo->ccif.q_int_mask | wo->ccif.q_exep_mask);
2844+ mtk_wed_wo_isr_disable(wo, mask);
2845+
2846+ if (intr & wo->ccif.q_int_mask)
2847+ napi_schedule(&wo->napi);
2848+
2849+ if (intr & wo->ccif.q_exep_mask) {
2850+ /* todo */
2851+ }
2852+}
2853+
2854+static irqreturn_t mtk_wed_wo_isr_handler(int irq, void *wo_instance)
2855+{
2856+ struct mtk_wed_wo *wo = wo_instance;
2857+
2858+ wo->drv_ops->set_isr(wo, 0);
2859+
2860+ tasklet_schedule(&wo->irq_tasklet);
2861+
2862+ return IRQ_HANDLED;
2863+}
2864+
2865+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
2866+{
2867+ struct mtk_wed_wo *wo;
2868+ int ret = 0;
2869+
2870+ wo = kzalloc(sizeof(struct mtk_wed_wo), GFP_KERNEL);
2871+ if (!wo)
2872+ return -ENOMEM;
2873+
2874+ wo->hw = hw;
2875+ wo->queue_ops = &wo_queue_ops;
2876+ hw->wed_wo = wo;
2877+
2878+ tasklet_init(&wo->irq_tasklet, mtk_wed_wo_isr_tasklet,
2879+ (unsigned long)wo);
2880+
2881+ skb_queue_head_init(&wo->mcu.res_q);
2882+ init_waitqueue_head(&wo->mcu.wait);
2883+ mutex_init(&wo->mcu.mutex);
2884+
2885+ ret = wed_wo_hardware_init(wo, mtk_wed_wo_isr_handler);
2886+ if (ret)
2887+ goto error;
2888+
2889+ /* fw download */
2890+ ret = wed_wo_mcu_init(wo);
2891+ if (ret)
2892+ goto error;
2893+
2894+ ret = mtk_wed_exception_init(wo);
2895+ if (ret)
2896+ goto error;
2897+
2898+ return ret;
2899+
2900+error:
2901+ kfree(wo);
2902+
2903+ return ret;
2904+}
2905+
2906+void mtk_wed_wo_exit(struct mtk_wed_hw *hw)
2907+{
2908+/*
2909+#ifdef CONFIG_WED_HW_RRO_SUPPORT
2910+ woif_bus_exit(woif);
2911+ wo_exception_exit(woif);
2912+#endif
2913+*/
2914+ struct mtk_wed_wo *wo = hw->wed_wo;
2915+
2916+ if (wo->exp.log) {
2917+ dma_unmap_single(wo->hw->dev, wo->exp.phys, wo->exp.log_size, DMA_FROM_DEVICE);
2918+ kfree(wo->exp.log);
2919+ }
2920+
2921+}
2922diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
2923new file mode 100644
2924index 000000000..00b39e779
2925--- /dev/null
2926+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
2927@@ -0,0 +1,334 @@
2928+// SPDX-License-Identifier: GPL-2.0-only
2929+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2930+
2931+#ifndef __MTK_WED_WO_H
2932+#define __MTK_WED_WO_H
2933+
2934+#include <linux/netdevice.h>
2935+#include <linux/skbuff.h>
2936+#include "mtk_wed.h"
2937+
2938+#define WED_CTL_SD_LEN1 GENMASK(13, 0)
2939+#define WED_CTL_LAST_SEC1 BIT(14)
2940+#define WED_CTL_BURST BIT(15)
2941+#define WED_CTL_SD_LEN0_SHIFT 16
2942+#define WED_CTL_SD_LEN0 GENMASK(29, 16)
2943+#define WED_CTL_LAST_SEC0 BIT(30)
2944+#define WED_CTL_DMA_DONE BIT(31)
2945+#define WED_INFO_WINFO GENMASK(15, 0)
2946+
2947+#define MTK_WED_WO_TXQ_FREE_THR 10
2948+
2949+#define WED_WO_PROFILE_MAX_LVL 6
2950+
2951+
2952+enum mtk_wed_fw_region_id {
2953+ WO_REGION_EMI = 0,
2954+ WO_REGION_ILM,
2955+ WO_REGION_DATA,
2956+ WO_REGION_BOOT,
2957+ __WO_REGION_MAX
2958+};
2959+
2960+struct wed_wo_profile_stat {
2961+ u32 bound;
2962+ u32 record;
2963+};
2964+
2965+#define PROFILE_STAT(record, val) do { \
2966+ u8 lvl = 0; \
2967+ while (lvl < WED_WO_PROFILE_MAX_LVL) { \
2968+ if (val < record[lvl].bound) { \
2969+ record[lvl].record++; \
2970+ break; \
2971+ } \
2972+ lvl++; \
2973+ } \
2974+ } while (0)
2975+
2976+/* align with wo report structure */
2977+struct wed_wo_log {
2978+ u32 sn;
2979+ u32 total;
2980+ u32 rro;
2981+ u32 mod;
2982+};
2983+
2984+struct wed_wo_rxcnt {
2985+ u16 wlan_idx;
2986+ u16 tid;
2987+ u32 rx_pkt_cnt;
2988+ u32 rx_byte_cnt;
2989+ u32 rx_err_cnt;
2990+ u32 rx_drop_cnt;
2991+};
2992+
2993+struct wed_wo_queue {
2994+ struct wed_wo_queue_regs *regs;
2995+
2996+ spinlock_t lock;
2997+ spinlock_t cleanup_lock;
2998+ struct wed_wo_queue_entry *entry;
2999+ struct wed_wo_desc *desc;
3000+
3001+ u16 first;
3002+ u16 head;
3003+ u16 tail;
3004+ int ndesc;
3005+ int queued;
3006+ int buf_size;
3007+
3008+ u8 hw_idx;
3009+ u8 qid;
3010+ u8 flags;
3011+
3012+ dma_addr_t desc_dma;
3013+ struct page_frag_cache rx_page;
3014+};
3015+
3016+
3017+struct wed_wo_mmio {
3018+ struct regmap *regs;
3019+
3020+ spinlock_t irq_lock;
3021+ u8 irq;
3022+ u32 irqmask;
3023+
3024+ u32 q_int_mask;
3025+ u32 q_exep_mask;
3026+};
3027+
3028+struct wed_wo_mcu {
3029+ struct mutex mutex;
3030+ u32 msg_seq;
3031+ int timeout;
3032+
3033+ struct sk_buff_head res_q;
3034+ wait_queue_head_t wait;
3035+};
3036+
3037+struct wed_wo_exception {
3038+ void* log;
3039+ int log_size;
3040+ dma_addr_t phys;
3041+};
3042+
3043+struct wed_wo_queue_regs {
3044+ u32 desc_base;
3045+ u32 ring_size;
3046+ u32 cpu_idx;
3047+ u32 dma_idx;
3048+};
3049+
3050+struct wed_wo_desc {
3051+ __le32 buf0;
3052+ __le32 ctrl;
3053+ __le32 buf1;
3054+ __le32 info;
3055+ __le32 reserved[4];
3056+} __packed __aligned(32);
3057+
3058+struct wed_wo_queue_entry {
3059+ union {
3060+ void *buf;
3061+ struct sk_buff *skb;
3062+ };
3063+
3064+ u32 dma_addr;
3065+ u16 dma_len;
3066+ u16 wcid;
3067+ bool skip_buf0:1;
3068+ bool skip_buf1:1;
3069+ bool done:1;
3070+};
3071+
3072+struct wo_cmd_rxcnt_t {
3073+ u16 wlan_idx;
3074+ u16 tid;
3075+ u32 rx_pkt_cnt;
3076+ u32 rx_byte_cnt;
3077+ u32 rx_err_cnt;
3078+ u32 rx_drop_cnt;
3079+};
3080+
3081+struct wo_cmd_query {
3082+ u32 query0;
3083+ u32 query1;
3084+};
3085+
3086+struct wed_cmd_hdr {
3087+ /*DW0*/
3088+ u8 ver;
3089+ u8 cmd_id;
3090+ u16 length;
3091+
3092+ /*DW1*/
3093+ u16 uni_id;
3094+ u16 flag;
3095+
3096+ /*DW2*/
3097+ int status;
3098+
3099+ /*DW3*/
3100+ u8 reserved[20];
3101+};
3102+
3103+struct mtk_wed_fw_region {
3104+ void *addr;
3105+ u32 addr_pa;
3106+ u32 size;
3107+ u32 shared;
3108+};
3109+
3110+struct wed_wo_queue_ops;
3111+struct wed_wo_drv_ops;
3112+struct wed_wo_mcu_ops;
3113+
3114+struct wo_rx_total_cnt {
3115+ u64 rx_pkt_cnt;
3116+ u64 rx_byte_cnt;
3117+ u64 rx_err_cnt;
3118+ u64 rx_drop_cnt;
3119+};
3120+
3121+struct mtk_wed_wo {
3122+ struct mtk_wed_hw *hw;
3123+
3124+ struct wed_wo_mmio ccif;
3125+ struct wed_wo_mcu mcu;
3126+ struct wed_wo_exception exp;
3127+
3128+ const struct wed_wo_drv_ops *drv_ops;
3129+ const struct wed_wo_mcu_ops *mcu_ops;
3130+ const struct wed_wo_queue_ops *queue_ops;
3131+
3132+ struct net_device napi_dev;
3133+ spinlock_t rx_lock;
3134+ struct napi_struct napi;
3135+ struct sk_buff_head rx_skb;
3136+ struct wed_wo_queue q_rx;
3137+ struct tasklet_struct irq_tasklet;
3138+
3139+ struct wed_wo_queue q_tx;
3140+
3141+ struct mtk_wed_fw_region region[__WO_REGION_MAX];
3142+
3143+ struct wed_wo_profile_stat total[WED_WO_PROFILE_MAX_LVL];
3144+ struct wed_wo_profile_stat mod[WED_WO_PROFILE_MAX_LVL];
3145+ struct wed_wo_profile_stat rro[WED_WO_PROFILE_MAX_LVL];
3146+ char dirname[4];
3147+ struct wo_rx_total_cnt wo_rxcnt[8][544];
3148+};
3149+
3150+struct wed_wo_queue_ops {
3151+ int (*init)(struct mtk_wed_wo *wo,
3152+ int (*poll)(struct napi_struct *napi, int budget));
3153+
3154+ int (*alloc)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3155+ int idx, int n_desc, int bufsize,
3156+ struct wed_wo_queue_regs *regs);
3157+
3158+ void (*reset)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3159+
3160+ int (*tx_skb)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3161+ struct sk_buff *skb);
3162+ int (*tx_skb1)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3163+ u8 *msg, u32 msg_len);
3164+ void (*tx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3165+ bool flush);
3166+
3167+ void (*rx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3168+
3169+ void (*kick)(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset);
3170+};
3171+
3172+struct wed_wo_drv_ops {
3173+ void (*kickout)(struct mtk_wed_wo *wo);
3174+ void (*set_ack)(struct mtk_wed_wo *wo, u32 mask);
3175+ void (*set_isr)(struct mtk_wed_wo *wo, u32 mask);
3176+ u32 (*get_csr)(struct mtk_wed_wo *wo);
3177+ int (*tx_prepare_skb)(struct mtk_wed_wo *wo);
3178+ bool (*check_excpetion)(struct mtk_wed_wo *wo);
3179+ void (*clear_int)(struct mtk_wed_wo *wo, u32 mask);
3180+};
3181+
3182+struct wed_wo_mcu_ops {
3183+ u32 headroom;
3184+
3185+ int (*mcu_skb_send_msg)(struct mtk_wed_wo *wo, int to_id,
3186+ int cmd, struct sk_buff *skb,
3187+ int *seq, bool wait_resp);
3188+
3189+ int (*mcu_parse_response)(struct mtk_wed_wo *wo, int cmd,
3190+ struct sk_buff *skb, int seq);
3191+
3192+ int (*mcu_restart)(struct mtk_wed_wo *wo);
3193+};
3194+
3195+#define mtk_wed_wo_q_init(wo, ...) (wo)->queue_ops->init((wo), __VA_ARGS__)
3196+#define mtk_wed_wo_q_alloc(wo, ...) (wo)->queue_ops->alloc((wo), __VA_ARGS__)
3197+#define mtk_wed_wo_q_reset(wo, ...) (wo)->queue_ops->init((wo), __VA_ARGS__)
3198+#define mtk_wed_wo_q_tx_skb(wo, ...) (wo)->queue_ops->tx_skb((wo), __VA_ARGS__)
3199+#define mtk_wed_wo_q_tx_skb1(wo, ...) (wo)->queue_ops->tx_skb1((wo), __VA_ARGS__)
3200+#define mtk_wed_wo_q_tx_clean(wo, ...) (wo)->queue_ops->tx_clean((wo), __VA_ARGS__)
3201+#define mtk_wed_wo_q_rx_clean(wo, ...) (wo)->queue_ops->rx_clean((wo), __VA_ARGS__)
3202+#define mtk_wed_wo_q_kick(wo, ...) (wo)->queue_ops->kick((wo), __VA_ARGS__)
3203+
3204+enum {
3205+ WARP_CMD_FLAG_RSP = 1 << 0, /* is responce*/
3206+ WARP_CMD_FLAG_NEED_RSP = 1 << 1, /* need responce */
3207+ WARP_CMD_FLAG_FROM_TO_WO = 1 << 2, /* send between host and wo */
3208+};
3209+
3210+#define WED_WO_CMD_FLAG_IS_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_RSP))
3211+#define WED_WO_CMD_FLAG_SET_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_RSP))
3212+#define WED_WO_CMD_FLAG_IS_NEED_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_NEED_RSP))
3213+#define WED_WO_CMD_FLAG_SET_NEED_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_NEED_RSP))
3214+#define WED_WO_CMD_FLAG_IS_FROM_TO_WO(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_FROM_TO_WO))
3215+#define WED_WO_CMD_FLAG_SET_FROM_TO_WO(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_FROM_TO_WO))
3216+
3217+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3218+ u32 clear, u32 val);
3219+
3220+static inline void mtk_wed_wo_isr_enable(struct mtk_wed_wo *wo, u32 mask)
3221+{
3222+ mtk_wed_wo_set_isr_mask(wo, false, 0, mask);
3223+
3224+ tasklet_schedule(&wo->irq_tasklet);
3225+}
3226+
3227+static inline void mtk_wed_wo_isr_disable(struct mtk_wed_wo *wo, u32 mask)
3228+{
3229+ mtk_wed_wo_set_isr_mask(wo, true, mask, 0);
3230+}
3231+
3232+static inline void
3233+wo_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3234+{
3235+ writel(val, dev->region[WO_REGION_BOOT].addr + reg);
3236+}
3237+
3238+static inline u32
3239+wo_r32(struct mtk_wed_wo *dev, u32 reg)
3240+{
3241+ return readl(dev->region[WO_REGION_BOOT].addr + reg);
3242+}
3243+static inline void
3244+woccif_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3245+{
3246+ regmap_write(dev->ccif.regs, reg, val);
3247+}
3248+
3249+static inline u32
3250+woccif_r32(struct mtk_wed_wo *dev, u32 reg)
3251+{
3252+ unsigned int val;
3253+
3254+ regmap_read(dev->ccif.regs, reg, &val);
3255+
3256+ return val;
3257+}
3258+
3259+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
3260+#endif
3261+
3262diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3263index 24742604b..b6b6823ae 100644
3264--- a/include/linux/soc/mediatek/mtk_wed.h
3265+++ b/include/linux/soc/mediatek/mtk_wed.h
3266@@ -7,6 +7,9 @@
3267 #include <linux/pci.h>
3268
3269 #define MTK_WED_TX_QUEUES 2
3270+#define MTK_WED_RX_QUEUES 2
3271+
3272+#define WED_WO_STA_REC 0x6
3273
3274 enum {
3275 MTK_NO_WED,
3276@@ -33,6 +36,24 @@ struct mtk_wed_ring {
3277 void __iomem *wpdma;
3278 };
3279
3280+struct mtk_rxbm_desc {
3281+ __le32 buf0;
3282+ __le32 token;
3283+} __packed __aligned(4);
3284+
3285+struct dma_buf {
3286+ int size;
3287+ void **pages;
3288+ struct mtk_wdma_desc *desc;
3289+ dma_addr_t desc_phys;
3290+};
3291+
3292+struct dma_entry {
3293+ int size;
3294+ struct mtk_rxbm_desc *desc;
3295+ dma_addr_t desc_phys;
3296+};
3297+
3298 struct mtk_wed_device {
3299 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3300 const struct mtk_wed_ops *ops;
3301@@ -46,19 +67,27 @@ struct mtk_wed_device {
3302 struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3303 struct mtk_wed_ring txfree_ring;
3304 struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3305+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
3306+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
3307+
3308+ struct dma_buf buf_ring;
3309+ struct dma_entry rx_buf_ring;
3310+ struct page_frag_cache rx_page;
3311
3312 struct {
3313- int size;
3314- void **pages;
3315- struct mtk_wdma_desc *desc;
3316- dma_addr_t desc_phys;
3317- } buf_ring;
3318+ struct mtk_wed_ring rro_ring;
3319+ void __iomem *rro_desc;
3320+ dma_addr_t miod_desc_phys;
3321+ dma_addr_t fdbk_desc_phys;
3322+ u32 mcu_view_miod;
3323+ } rro;
3324
3325 /* filled by driver: */
3326 struct {
3327 struct pci_dev *pci_dev;
3328 void __iomem *base;
3329 u32 bus_type;
3330+ u32 phy_base;
3331
3332 union {
3333 u32 wpdma_phys;
3334@@ -67,16 +96,25 @@ struct mtk_wed_device {
3335 u32 wpdma_mask;
3336 u32 wpdma_tx;
3337 u32 wpdma_txfree;
3338+ u32 wpdma_rx_glo;
3339+ u32 wpdma_rx;
3340
3341 u8 tx_tbit[MTK_WED_TX_QUEUES];
3342+ u8 rx_tbit[MTK_WED_RX_QUEUES];
3343 u8 txfree_tbit;
3344
3345 u16 token_start;
3346 unsigned int nbuf;
3347+ unsigned int rx_nbuf;
3348+ unsigned int rx_pkt;
3349+ unsigned int rx_pkt_size;
3350
3351 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3352 int (*offload_enable)(struct mtk_wed_device *wed);
3353 void (*offload_disable)(struct mtk_wed_device *wed);
3354+ u32 (*init_rx_buf)(struct mtk_wed_device *wed,
3355+ int pkt_num);
3356+ void (*release_rx_buf)(struct mtk_wed_device *wed);
3357 } wlan;
3358 #endif
3359 };
3360@@ -87,6 +125,10 @@ struct mtk_wed_ops {
3361 void __iomem *regs);
3362 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3363 void __iomem *regs);
3364+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3365+ void __iomem *regs);
3366+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3367+ void *data, int len);
3368 void (*detach)(struct mtk_wed_device *dev);
3369
3370 void (*stop)(struct mtk_wed_device *dev);
3371@@ -98,6 +140,8 @@ struct mtk_wed_ops {
3372
3373 u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3374 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3375+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
3376+ u32 reason, u32 hash);
3377 };
3378
3379 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3380@@ -130,6 +174,10 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
3381 (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3382 #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3383 (_dev)->ops->txfree_ring_setup(_dev, _regs)
3384+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
3385+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
3386+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3387+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
3388 #define mtk_wed_device_reg_read(_dev, _reg) \
3389 (_dev)->ops->reg_read(_dev, _reg)
3390 #define mtk_wed_device_reg_write(_dev, _reg, _val) \
3391@@ -138,6 +186,8 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
3392 (_dev)->ops->irq_get(_dev, _mask)
3393 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
3394 (_dev)->ops->irq_set_mask(_dev, _mask)
3395+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3396+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
3397 #else
3398 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3399 {
3400@@ -147,10 +197,13 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3401 #define mtk_wed_device_start(_dev, _mask) do {} while (0)
3402 #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3403 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3404+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3405+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
3406 #define mtk_wed_device_reg_read(_dev, _reg) 0
3407 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3408 #define mtk_wed_device_irq_get(_dev, _mask) 0
3409 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3410+#define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
3411 #endif
3412
3413 #endif
3414--
34152.18.0
3416