blob: 3df0ab7833edb6ee47e26476e685029dbf7ae0a5 [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From bc8244ada5c668374813f7f9b73d990bf2695aaf Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Wed, 15 Jun 2022 14:38:54 +0800
4Subject: [PATCH 8/8] 9997-add-wed-rx-support-for-mt7896
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7986a.dtsi | 42 +-
9 arch/arm64/boot/dts/mediatek/mt7986b.dtsi | 42 +-
10 drivers/net/ethernet/mediatek/Makefile | 2 +-
developera3f86ed2022-07-08 14:15:13 +080011 drivers/net/ethernet/mediatek/mtk_wed.c | 613 ++++++++++++++++--
12 drivers/net/ethernet/mediatek/mtk_wed.h | 51 ++
13 drivers/net/ethernet/mediatek/mtk_wed_ccif.c | 133 ++++
developer8cb3ac72022-07-04 10:55:14 +080014 drivers/net/ethernet/mediatek/mtk_wed_ccif.h | 45 ++
15 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 90 +++
developera3f86ed2022-07-08 14:15:13 +080016 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 561 ++++++++++++++++
developer8cb3ac72022-07-04 10:55:14 +080017 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 125 ++++
developera3f86ed2022-07-08 14:15:13 +080018 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 147 ++++-
19 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 588 +++++++++++++++++
20 drivers/net/ethernet/mediatek/mtk_wed_wo.h | 336 ++++++++++
developer8cb3ac72022-07-04 10:55:14 +080021 include/linux/soc/mediatek/mtk_wed.h | 63 +-
22 14 files changed, 2643 insertions(+), 69 deletions(-)
23 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_wed.c
24 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.c
25 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.h
26 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c
27 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.h
28 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
29 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h
30
31diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
32index 644255b35..ddcc0b809 100644
33--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
34+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
35@@ -65,6 +65,12 @@
36 interrupt-parent = <&gic>;
37 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
38 mediatek,wed_pcie = <&wed_pcie>;
39+ mediatek,ap2woccif = <&ap2woccif0>;
40+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
41+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
42+ mediatek,wocpu_boot = <&cpu_boot>;
43+ mediatek,wocpu_emi = <&wocpu0_emi>;
44+ mediatek,wocpu_data = <&wocpu_data>;
45 };
46
47 wed1: wed@15011000 {
48@@ -74,15 +80,26 @@
49 interrupt-parent = <&gic>;
50 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
51 mediatek,wed_pcie = <&wed_pcie>;
52+ mediatek,ap2woccif = <&ap2woccif1>;
53+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
54+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
55+ mediatek,wocpu_boot = <&cpu_boot>;
56+ mediatek,wocpu_emi = <&wocpu1_emi>;
57+ mediatek,wocpu_data = <&wocpu_data>;
58 };
59
60- ap2woccif: ap2woccif@151A5000 {
61- compatible = "mediatek,ap2woccif";
62- reg = <0 0x151A5000 0 0x1000>,
63- <0 0x151AD000 0 0x1000>;
64+ ap2woccif0: ap2woccif@151A5000 {
65+ compatible = "mediatek,ap2woccif", "syscon";
66+ reg = <0 0x151A5000 0 0x1000>;
67 interrupt-parent = <&gic>;
68- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
69- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
70+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
71+ };
72+
73+ ap2woccif1: ap2woccif@0x151AD000 {
74+ compatible = "mediatek,ap2woccif", "syscon";
75+ reg = <0 0x151AD000 0 0x1000>;
76+ interrupt-parent = <&gic>;
77+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
78 };
79
80 wocpu0_ilm: wocpu0_ilm@151E0000 {
81@@ -95,10 +112,17 @@
82 reg = <0 0x151F0000 0 0x8000>;
83 };
84
85- wocpu_dlm: wocpu_dlm@151E8000 {
86+ wocpu0_dlm: wocpu_dlm@151E8000 {
87+ compatible = "mediatek,wocpu_dlm";
88+ reg = <0 0x151E8000 0 0x2000>;
89+
90+ resets = <&ethsysrst 0>;
91+ reset-names = "wocpu_rst";
92+ };
93+
94+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
95 compatible = "mediatek,wocpu_dlm";
96- reg = <0 0x151E8000 0 0x2000>,
97- <0 0x151F8000 0 0x2000>;
98+ reg = <0 0x151F8000 0 0x2000>;
99
100 resets = <&ethsysrst 0>;
101 reset-names = "wocpu_rst";
102diff --git a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
103index 67bf86f6a..6710b388b 100644
104--- a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
105+++ b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
106@@ -65,6 +65,12 @@
107 interrupt-parent = <&gic>;
108 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
109 mediatek,wed_pcie = <&wed_pcie>;
110+ mediatek,ap2woccif = <&ap2woccif0>;
111+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
112+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
113+ mediatek,wocpu_boot = <&cpu_boot>;
114+ mediatek,wocpu_emi = <&wocpu0_emi>;
115+ mediatek,wocpu_data = <&wocpu_data>;
116 };
117
118 wed1: wed@15011000 {
119@@ -74,15 +80,26 @@
120 interrupt-parent = <&gic>;
121 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
122 mediatek,wed_pcie = <&wed_pcie>;
123+ mediatek,ap2woccif = <&ap2woccif1>;
124+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
125+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
126+ mediatek,wocpu_boot = <&cpu_boot>;
127+ mediatek,wocpu_emi = <&wocpu1_emi>;
128+ mediatek,wocpu_data = <&wocpu_data>;
129 };
130
131- ap2woccif: ap2woccif@151A5000 {
132- compatible = "mediatek,ap2woccif";
133- reg = <0 0x151A5000 0 0x1000>,
134- <0 0x151AD000 0 0x1000>;
135+ ap2woccif0: ap2woccif@151A5000 {
136+ compatible = "mediatek,ap2woccif", "syscon";
137+ reg = <0 0x151A5000 0 0x1000>;
138 interrupt-parent = <&gic>;
139- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
140- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
141+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
142+ };
143+
144+ ap2woccif1: ap2woccif@0x151AD000 {
145+ compatible = "mediatek,ap2woccif", "syscon";
146+ reg = <0 0x151AD000 0 0x1000>;
147+ interrupt-parent = <&gic>;
148+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
149 };
150
151 wocpu0_ilm: wocpu0_ilm@151E0000 {
152@@ -95,10 +112,17 @@
153 reg = <0 0x151F0000 0 0x8000>;
154 };
155
156- wocpu_dlm: wocpu_dlm@151E8000 {
157+ wocpu0_dlm: wocpu_dlm@151E8000 {
158+ compatible = "mediatek,wocpu_dlm";
159+ reg = <0 0x151E8000 0 0x2000>;
160+
161+ resets = <&ethsysrst 0>;
162+ reset-names = "wocpu_rst";
163+ };
164+
165+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
166 compatible = "mediatek,wocpu_dlm";
167- reg = <0 0x151E8000 0 0x2000>,
168- <0 0x151F8000 0 0x2000>;
169+ reg = <0 0x151F8000 0 0x2000>;
170
171 resets = <&ethsysrst 0>;
172 reset-names = "wocpu_rst";
173diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
developera3f86ed2022-07-08 14:15:13 +0800174index 3528f1b..0c724a5 100644
developer8cb3ac72022-07-04 10:55:14 +0800175--- a/drivers/net/ethernet/mediatek/Makefile
176+++ b/drivers/net/ethernet/mediatek/Makefile
177@@ -10,5 +10,5 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
178 ifdef CONFIG_DEBUG_FS
179 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
180 endif
181-obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
182+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o mtk_wed_wo.o mtk_wed_mcu.o mtk_wed_ccif.o
183 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
184diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developera3f86ed2022-07-08 14:15:13 +0800185index 48b0353..4d47b3a 100644
developer8cb3ac72022-07-04 10:55:14 +0800186--- a/drivers/net/ethernet/mediatek/mtk_wed.c
187+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
188@@ -13,11 +13,19 @@
189 #include <linux/debugfs.h>
190 #include <linux/iopoll.h>
191 #include <linux/soc/mediatek/mtk_wed.h>
192+
193 #include "mtk_eth_soc.h"
194 #include "mtk_wed_regs.h"
195 #include "mtk_wed.h"
196 #include "mtk_ppe.h"
197-
198+#include "mtk_wed_mcu.h"
199+#include "mtk_wed_wo.h"
200+
201+struct wo_cmd_ring {
202+ u32 q_base;
203+ u32 cnt;
204+ u32 unit;
205+};
206 static struct mtk_wed_hw *hw_list[2];
207 static DEFINE_MUTEX(hw_lock);
208
developera3f86ed2022-07-08 14:15:13 +0800209@@ -51,6 +59,56 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
developer8cb3ac72022-07-04 10:55:14 +0800210 wdma_m32(dev, reg, 0, mask);
211 }
212
213+static void
214+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
215+{
216+ wdma_m32(dev, reg, mask, 0);
217+}
218+
developera3f86ed2022-07-08 14:15:13 +0800219+static u32
220+mtk_wdma_read_reset(struct mtk_wed_device *dev)
221+{
222+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
223+}
224+
225+static void
226+mtk_wdma_rx_reset(struct mtk_wed_device *dev)
227+{
228+ u32 status;
229+ u32 mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
230+ int i;
231+
232+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
233+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
234+ !(status & mask), 0, 1000))
235+ WARN_ON_ONCE(1);
236+
237+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
238+ if (!dev->rx_wdma[i].desc) {
239+ wdma_w32(dev, MTK_WDMA_RING_RX(i) +
240+ MTK_WED_RING_OFS_CPU_IDX, 0);
241+ }
242+}
243+
244+static void
245+mtk_wdma_tx_reset(struct mtk_wed_device *dev)
246+{
247+ u32 status;
248+ u32 mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
249+ int i;
250+
251+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
252+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
253+ !(status & mask), 0, 1000))
254+ WARN_ON_ONCE(1);
255+
256+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
257+ if (!dev->tx_wdma[i].desc) {
258+ wdma_w32(dev, MTK_WDMA_RING_TX(i) +
259+ MTK_WED_RING_OFS_CPU_IDX, 0);
260+ }
261+}
262+
developer8cb3ac72022-07-04 10:55:14 +0800263 static u32
264 mtk_wed_read_reset(struct mtk_wed_device *dev)
265 {
developera3f86ed2022-07-08 14:15:13 +0800266@@ -68,6 +126,52 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
developer8cb3ac72022-07-04 10:55:14 +0800267 WARN_ON_ONCE(1);
268 }
269
270+static void
271+mtk_wed_wo_reset(struct mtk_wed_device *dev)
272+{
273+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
274+ u8 state = WO_STATE_DISABLE;
275+ u8 state_done = WOIF_DISABLE_DONE;
276+ void __iomem *reg;
277+ u32 value;
278+ unsigned long timeout = jiffies + WOCPU_TIMEOUT;
279+
developera3f86ed2022-07-08 14:15:13 +0800280+ mtk_wdma_rx_reset(dev);
281+
282+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
283+
developer8cb3ac72022-07-04 10:55:14 +0800284+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE,
285+ &state, sizeof(state), false);
286+
287+ do {
288+ value = wed_r32(dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_WO_STATUS);
289+ } while (value != state_done && !time_after(jiffies, timeout));
290+
291+ reg = ioremap(WOCPU_MCUSYS_RESET_ADDR, 4);
292+ value = readl((void *)reg);
293+ switch(dev->hw->index) {
294+ case 0:
295+ value |= WOCPU_WO0_MCUSYS_RESET_MASK;
296+ writel(value, (void *)reg);
297+ value &= ~WOCPU_WO0_MCUSYS_RESET_MASK;
298+ writel(value, (void *)reg);
299+ break;
300+ case 1:
301+ value |= WOCPU_WO1_MCUSYS_RESET_MASK;
302+ writel(value, (void *)reg);
303+ value &= ~WOCPU_WO1_MCUSYS_RESET_MASK;
304+ writel(value, (void *)reg);
305+ break;
306+ default:
307+ dev_err(dev->hw->dev, "wrong mtk_wed%d\n",
308+ dev->hw->index);
309+
310+ break;
311+ }
312+
313+ iounmap((void *)reg);
314+}
315+
316 static struct mtk_wed_hw *
317 mtk_wed_assign(struct mtk_wed_device *dev)
318 {
developera3f86ed2022-07-08 14:15:13 +0800319@@ -178,7 +282,7 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
320 {
321 struct mtk_wdma_desc *desc = dev->buf_ring.desc;
322 void **page_list = dev->buf_ring.pages;
323- int page_idx;
324+ int ring_size, page_idx;
325 int i;
326
327 if (!page_list)
328@@ -187,6 +291,13 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
329 if (!desc)
330 goto free_pagelist;
331
332+ if (dev->ver == MTK_WED_V1) {
333+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
334+ } else {
335+ ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
336+ MTK_WED_WDMA_RING_SIZE * 2;
337+ }
338+
339 for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
340 void *page = page_list[page_idx++];
341
342@@ -205,6 +316,42 @@ free_pagelist:
developer8cb3ac72022-07-04 10:55:14 +0800343 kfree(page_list);
344 }
345
346+static int
347+mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
348+{
349+ struct mtk_rxbm_desc *desc;
350+ dma_addr_t desc_phys;
351+ int ring_size;
352+
353+ ring_size = dev->wlan.rx_nbuf;
354+ dev->rx_buf_ring.size = ring_size;
355+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
356+ &desc_phys, GFP_KERNEL);
357+ if (!desc)
358+ return -ENOMEM;
359+
360+ dev->rx_buf_ring.desc = desc;
361+ dev->rx_buf_ring.desc_phys = desc_phys;
362+
363+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_pkt);
364+ return 0;
365+}
366+
367+static void
368+mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
369+{
370+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
developera3f86ed2022-07-08 14:15:13 +0800371+ int ring_size = dev->rx_buf_ring.size;
developer8cb3ac72022-07-04 10:55:14 +0800372+
373+ if (!desc)
374+ return;
375+
376+ dev->wlan.release_rx_buf(dev);
377+
developera3f86ed2022-07-08 14:15:13 +0800378+ /* dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
379+ desc, dev->buf_ring.desc_phys); */
developer8cb3ac72022-07-04 10:55:14 +0800380+}
381+
382 static void
383 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
384 {
developera3f86ed2022-07-08 14:15:13 +0800385@@ -226,13 +373,22 @@ mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800386 mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
387 }
388
389+static void
390+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
391+{
392+ mtk_wed_free_rx_bm(dev);
393+ mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
394+}
395+
396 static void
397 mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
398 {
399 u32 wdma_mask;
400
401 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
402-
403+ if (dev->ver > MTK_WED_V1)
404+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
405+ GENMASK(1, 0));
406 /* wed control cr set */
407 wed_set(dev, MTK_WED_CTRL,
408 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
developera3f86ed2022-07-08 14:15:13 +0800409@@ -251,7 +407,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800410 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
411 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
412 } else {
413- /* initail tx interrupt trigger */
414+
415 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
416 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
417 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
developera3f86ed2022-07-08 14:15:13 +0800418@@ -262,22 +418,30 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800419 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
420 dev->wlan.tx_tbit[1]));
421
422- /* initail txfree interrupt trigger */
423 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
424 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
425 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
426 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
427 dev->wlan.txfree_tbit));
428+
429+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
430+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
431+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
432+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
433+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
434+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
435+ dev->wlan.rx_tbit[0]) |
436+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
437+ dev->wlan.rx_tbit[1]));
438 }
439- /* initail wdma interrupt agent */
440 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
441 if (dev->ver == MTK_WED_V1) {
442 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
443 } else {
444 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
445 wed_set(dev, MTK_WED_WDMA_INT_CTRL,
446- FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,dev->wdma_idx));
447-
448+ FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,
449+ dev->wdma_idx));
450 }
451
452 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
developera3f86ed2022-07-08 14:15:13 +0800453@@ -312,6 +476,39 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
developer8cb3ac72022-07-04 10:55:14 +0800454 }
455 }
456
457+static void
458+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
459+{
460+#define MTK_WFMDA_RX_DMA_EN BIT(2)
461+
462+ int timeout = 3;
463+ u32 cur_idx, regs;
464+
465+ do {
466+ regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
467+ MTK_WED_RING_OFS_COUNT;
468+ cur_idx = wed_r32(dev, regs);
469+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
470+ break;
471+
472+ usleep_range(100000, 200000);
473+ } while (timeout-- > 0);
474+
475+ if (timeout) {
476+ unsigned int val;
477+
478+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
479+ dev->wlan.phy_base);
480+ val |= MTK_WFMDA_RX_DMA_EN;
481+
482+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
483+ dev->wlan.phy_base, val);
484+ } else {
485+ dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
486+ dev->hw->index);
487+ }
488+}
489+
490 static void
491 mtk_wed_dma_enable(struct mtk_wed_device *dev)
492 {
developera3f86ed2022-07-08 14:15:13 +0800493@@ -336,9 +533,14 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800494 wdma_set(dev, MTK_WDMA_GLO_CFG,
495 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
496 } else {
497+ int idx = 0;
498+
499 wed_set(dev, MTK_WED_WPDMA_CTRL,
500 MTK_WED_WPDMA_CTRL_SDL1_FIXED);
501
502+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
503+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
504+
505 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
506 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
507 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
developera3f86ed2022-07-08 14:15:13 +0800508@@ -346,6 +548,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800509 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
510 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
511 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
512+
513+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
514+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
515+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
516+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
517+ 0x2));
518+
519+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++)
520+ mtk_wed_check_wfdma_rx_fill(dev, idx);
521 }
522 }
523
developera3f86ed2022-07-08 14:15:13 +0800524@@ -363,19 +574,23 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800525 MTK_WED_GLO_CFG_TX_DMA_EN |
526 MTK_WED_GLO_CFG_RX_DMA_EN);
527
528- wdma_m32(dev, MTK_WDMA_GLO_CFG,
529+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
530 MTK_WDMA_GLO_CFG_TX_DMA_EN |
531 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
532- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
533+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
534
535 if (dev->ver == MTK_WED_V1) {
536 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
537- wdma_m32(dev, MTK_WDMA_GLO_CFG,
538- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
539+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
540+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
541 } else {
542 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
543 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
544 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
545+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
546+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
547+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
548+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
549 }
550 }
551
developera3f86ed2022-07-08 14:15:13 +0800552@@ -384,8 +599,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
553 {
554 mtk_wed_dma_disable(dev);
555
556- if (dev->ver > MTK_WED_V1)
557+ if (dev->ver > MTK_WED_V1) {
558 mtk_wed_set_512_support(dev, false);
559+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
560+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
561+ }
562
563 mtk_wed_set_ext_int(dev, false);
564
565@@ -395,6 +613,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800566 MTK_WED_CTRL_WED_TX_BM_EN |
567 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
568
569+ if (dev->ver > MTK_WED_V1) {
570+ wed_clr(dev, MTK_WED_CTRL,
571+ MTK_WED_CTRL_WED_RX_BM_EN);
572+ }
573+
574 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
575 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
576 wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
developera3f86ed2022-07-08 14:15:13 +0800577@@ -417,8 +640,19 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800578
579 mtk_wed_reset(dev, MTK_WED_RESET_WED);
developera3f86ed2022-07-08 14:15:13 +0800580
developer8cb3ac72022-07-04 10:55:14 +0800581+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
582+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
583+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
developera3f86ed2022-07-08 14:15:13 +0800584+
developer8cb3ac72022-07-04 10:55:14 +0800585 mtk_wed_free_buffer(dev);
586 mtk_wed_free_tx_rings(dev);
developera3f86ed2022-07-08 14:15:13 +0800587+ if (dev->ver > MTK_WED_V1) {
588+ mtk_wed_wo_reset(dev);
developerf50c1802022-07-05 20:35:53 +0800589+ mtk_wed_free_rx_rings(dev);
developera3f86ed2022-07-08 14:15:13 +0800590+ mtk_wed_wo_exit(hw);
591+ }
592+
593+ mtk_wdma_tx_reset(dev);
developer8cb3ac72022-07-04 10:55:14 +0800594
595 if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
596 wlan_node = dev->wlan.pci_dev->dev.of_node;
developera3f86ed2022-07-08 14:15:13 +0800597@@ -477,7 +711,6 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800598 value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
599 value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
600
601- /* pcie interrupt status trigger register */
602 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
603 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
604
developera3f86ed2022-07-08 14:15:13 +0800605@@ -501,6 +734,9 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800606 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
607 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
608 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
609+
610+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
611+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
612 } else {
613 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
614 }
developera3f86ed2022-07-08 14:15:13 +0800615@@ -549,24 +785,92 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800616 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
617 MTK_WDMA_RING_RX(0)));
618 }
619+}
developera3f86ed2022-07-08 14:15:13 +0800620
developer8cb3ac72022-07-04 10:55:14 +0800621+static void
622+mtk_wed_rx_bm_hw_init(struct mtk_wed_device *dev)
623+{
624+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
625+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_pkt_size));
626+
627+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
developera3f86ed2022-07-08 14:15:13 +0800628+
developer8cb3ac72022-07-04 10:55:14 +0800629+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
630+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_pkt));
631+
632+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
633+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
634+
635+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
636 }
637
638 static void
639-mtk_wed_hw_init(struct mtk_wed_device *dev)
640+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
641+{
642+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
643+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
644+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
645+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
646+ MTK_WED_MIOD_ENTRY_CNT >> 2));
647+
648+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_desc_phys);
649+
650+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
651+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
652+
653+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_desc_phys);
654+
655+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
656+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
657+
658+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
659+
660+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.rro_ring.desc_phys);
661+
662+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
663+ MTK_WED_RROQM_RST_IDX_MIOD |
664+ MTK_WED_RROQM_RST_IDX_FDBK);
665+
666+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
667+
668+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT -1);
669+
670+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
671+}
672+
673+static void
674+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
675+{
676+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
677+
678+ do {
679+ udelay(100);
680+
681+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
682+ break;
683+ } while (1);
684+
685+ /* configure RX_ROUTE_QM */
686+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
687+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
688+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
689+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
690+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
691+
692+ /* enable RX_ROUTE_QM */
693+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
694+}
695+
696+static void
697+mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
698 {
699 int size = dev->buf_ring.size;
700 int rev_size = MTK_WED_TX_RING_SIZE / 2;
701 int thr = 1;
702
703- if (dev->init_done)
704- return;
705-
706- dev->init_done = true;
707- mtk_wed_set_ext_int(dev, false);
708-
709 if (dev->ver > MTK_WED_V1) {
710- size = MTK_WED_WDMA_RING_SIZE * 2 + dev->buf_ring.size;
711+ size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
712+ dev->buf_ring.size;
713 rev_size = size;
714 thr = 0;
715 }
developera3f86ed2022-07-08 14:15:13 +0800716@@ -609,13 +913,48 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800717 }
718
719 static void
720-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale)
721+mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
developera3f86ed2022-07-08 14:15:13 +0800722+{
developer8cb3ac72022-07-04 10:55:14 +0800723+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
724+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX0 |
725+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX1 |
726+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX0 |
727+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX1);
728+
729+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
730+
731+ mtk_wed_rx_bm_hw_init(dev);
732+ mtk_wed_rro_hw_init(dev);
733+ mtk_wed_route_qm_hw_init(dev);
734+}
735+
736+static void
737+mtk_wed_hw_init(struct mtk_wed_device *dev)
738+{
739+ if (dev->init_done)
740+ return;
741+
742+ dev->init_done = true;
743+ mtk_wed_set_ext_int(dev, false);
744+ mtk_wed_tx_hw_init(dev);
745+ if (dev->ver > MTK_WED_V1)
746+ mtk_wed_rx_hw_init(dev);
747+}
748+
749+static void
750+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
developera3f86ed2022-07-08 14:15:13 +0800751 {
developer8cb3ac72022-07-04 10:55:14 +0800752+ __le32 ctrl;
753 int i;
754
755+ if (tx)
756+ ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
757+ else
758+ ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
759+
760 for (i = 0; i < size; i++) {
761 desc->buf0 = 0;
762- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
763+ desc->ctrl = ctrl;
764 desc->buf1 = 0;
765 desc->info = 0;
766 desc += scale;
developera3f86ed2022-07-08 14:15:13 +0800767@@ -674,7 +1013,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800768 if (!desc)
769 continue;
770
771- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver);
772+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true);
773 }
774
775 if (mtk_wed_poll_busy(dev))
developera3f86ed2022-07-08 14:15:13 +0800776@@ -729,9 +1068,24 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800777
778 }
779
780+static int
781+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
782+ int size)
783+{
784+ ring->desc = dma_alloc_coherent(dev->hw->dev,
785+ size * sizeof(*ring->desc),
786+ &ring->desc_phys, GFP_KERNEL);
787+ if (!ring->desc)
788+ return -ENOMEM;
789+
790+ ring->size = size;
791+ memset(ring->desc, 0, size);
792+ return 0;
793+}
794+
795 static int
796 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
797- int size, int scale)
798+ int size, int scale, bool tx)
799 {
800 ring->desc = dma_alloc_coherent(dev->hw->dev,
801 size * sizeof(*ring->desc) * scale,
developera3f86ed2022-07-08 14:15:13 +0800802@@ -740,17 +1094,18 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
developer8cb3ac72022-07-04 10:55:14 +0800803 return -ENOMEM;
804
805 ring->size = size;
806- mtk_wed_ring_reset(ring->desc, size, scale);
807+ mtk_wed_ring_reset(ring->desc, size, scale, tx);
808
809 return 0;
810 }
811
812 static int
813-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
814+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
815 {
816 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
817
818- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, dev->ver))
819+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
820+ dev->ver, true))
821 return -ENOMEM;
822
823 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
developera3f86ed2022-07-08 14:15:13 +0800824@@ -767,22 +1122,143 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
developer8cb3ac72022-07-04 10:55:14 +0800825 return 0;
826 }
827
828+static int
829+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
830+{
831+ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
832+
833+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
834+ dev->ver, true))
835+ return -ENOMEM;
836+
837+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
838+ wdma->desc_phys);
839+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
840+ size);
841+ wdma_w32(dev,
842+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
843+ wdma_w32(dev,
844+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
845+
846+ if (idx == 0) {
847+ wed_w32(dev, MTK_WED_WDMA_RING_TX
848+ + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
849+ wed_w32(dev, MTK_WED_WDMA_RING_TX
850+ + MTK_WED_RING_OFS_COUNT, size);
851+ wed_w32(dev, MTK_WED_WDMA_RING_TX
852+ + MTK_WED_RING_OFS_CPU_IDX, 0);
853+ wed_w32(dev, MTK_WED_WDMA_RING_TX
854+ + MTK_WED_RING_OFS_DMA_IDX, 0);
855+ }
856+
857+ return 0;
858+}
859+
860+static int
861+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
862+{
863+ struct device_node *np, *node = dev->hw->node;
864+ struct mtk_wed_ring *ring;
865+ struct resource res;
866+ int ret;
867+
868+ np = of_parse_phandle(node, "mediatek,wocpu_dlm", 0);
869+ if (!np)
870+ return -ENODEV;
871+
872+ ret = of_address_to_resource(np, 0, &res);
873+ if (ret)
874+ return ret;
875+
876+ dev->rro.rro_desc = ioremap(res.start, resource_size(&res));
877+
878+ ring = &dev->rro.rro_ring;
879+
880+ dev->rro.miod_desc_phys = res.start;
881+
882+ dev->rro.mcu_view_miod = MTK_WED_WOCPU_VIEW_MIOD_BASE;
883+ dev->rro.fdbk_desc_phys = MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT
884+ + dev->rro.miod_desc_phys;
885+
886+ if (mtk_wed_rro_ring_alloc(dev, ring, MTK_WED_RRO_QUE_CNT))
887+ return -ENOMEM;
888+
889+ return 0;
890+}
891+
892+static int
893+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
894+{
895+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
896+ struct {
897+ struct wo_cmd_ring ring[2];
898+
899+ u32 wed;
900+ u8 ver;
901+ } req = {
902+ .ring = {
903+ [0] = {
904+ .q_base = dev->rro.mcu_view_miod,
905+ .cnt = MTK_WED_MIOD_CNT,
906+ .unit = MTK_WED_MIOD_ENTRY_CNT,
907+ },
908+ [1] = {
909+ .q_base = dev->rro.mcu_view_miod +
910+ MTK_WED_MIOD_ENTRY_CNT *
911+ MTK_WED_MIOD_CNT,
912+ .cnt = MTK_WED_FB_CMD_CNT,
913+ .unit = 4,
914+ },
915+ },
916+ .wed = 0,
917+ };
918+
919+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_WED_CFG,
920+ &req, sizeof(req), true);
921+}
922+
923+static int
924+mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
925+{
926+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
927+
developerf50c1802022-07-05 20:35:53 +0800928+ if (dev->ver == MTK_WED_V1)
929+ return 0;
930+
developer8cb3ac72022-07-04 10:55:14 +0800931+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
932+}
933+
934+static void
935+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
936+ u32 reason, u32 hash)
937+{
938+ int idx = dev->hw->index;
939+ struct mtk_eth *eth = dev->hw->eth;
940+ struct ethhdr *eh;
941+
942+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) {
943+ if (!skb)
944+ return;
945+
946+ skb_set_mac_header(skb, 0);
947+ eh = eth_hdr(skb);
948+ skb->protocol = eh->h_proto;
949+ mtk_ppe_check_skb(eth->ppe[idx], skb, hash);
950+ }
951+}
952+
953 static void
954 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
955 {
956- u32 wdma_mask;
957- int i;
958+ int i, ret;
959
960 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
961 if (!dev->tx_wdma[i].desc)
962- mtk_wed_wdma_ring_setup(dev, i, 16);
963-
964+ mtk_wed_wdma_rx_ring_setup(dev, i, 16);
965
966 mtk_wed_hw_init(dev);
967
968 mtk_wed_set_int(dev, irq_mask);
969-
970-
971 mtk_wed_set_ext_int(dev, true);
972
973 if (dev->ver == MTK_WED_V1) {
developera3f86ed2022-07-08 14:15:13 +0800974@@ -797,6 +1273,19 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800975 val |= BIT(0);
976 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
977 } else {
978+ /* driver set mid ready and only once */
979+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
980+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
981+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
982+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
983+
984+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
985+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
986+
987+ ret = mtk_wed_rro_cfg(dev);
988+ if (ret)
989+ return;
990+
991 mtk_wed_set_512_support(dev, true);
992 }
993
developera3f86ed2022-07-08 14:15:13 +0800994@@ -841,9 +1330,17 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800995 wed_r32(dev, MTK_WED_REV_ID));
996
997 ret = mtk_wed_buffer_alloc(dev);
998- if (ret) {
999- mtk_wed_detach(dev);
1000- goto out;
1001+ if (ret)
1002+ goto error;
1003+
1004+ if (dev->ver > MTK_WED_V1) {
1005+ ret = mtk_wed_rx_bm_alloc(dev);
1006+ if (ret)
1007+ goto error;
1008+
1009+ ret = mtk_wed_rro_alloc(dev);
1010+ if (ret)
1011+ goto error;
1012 }
1013
1014 mtk_wed_hw_init_early(dev);
developera3f86ed2022-07-08 14:15:13 +08001015@@ -851,7 +1348,12 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08001016 if (dev->ver == MTK_WED_V1)
1017 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1018 BIT(hw->index), 0);
1019+ else
1020+ ret = mtk_wed_wo_init(hw);
1021
1022+error:
1023+ if (ret)
1024+ mtk_wed_detach(dev);
1025 out:
1026 mutex_unlock(&hw_lock);
1027
developera3f86ed2022-07-08 14:15:13 +08001028@@ -877,10 +1379,10 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +08001029
1030 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
1031
1032- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1))
1033+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true))
1034 return -ENOMEM;
1035
1036- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1037+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1038 return -ENOMEM;
1039
1040 ring->reg_base = MTK_WED_RING_TX(idx);
developera3f86ed2022-07-08 14:15:13 +08001041@@ -927,6 +1429,35 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +08001042 return 0;
1043 }
1044
1045+static int
1046+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1047+{
1048+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1049+
1050+ BUG_ON(idx > ARRAY_SIZE(dev->rx_ring));
1051+
1052+
1053+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false))
1054+ return -ENOMEM;
1055+
1056+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1057+ return -ENOMEM;
1058+
1059+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
1060+ ring->wpdma = regs;
1061+
1062+ /* WPDMA -> WED */
1063+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1064+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
1065+
1066+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
1067+ ring->desc_phys);
1068+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
1069+ MTK_WED_RX_RING_SIZE);
1070+
1071+ return 0;
1072+}
1073+
1074 static u32
1075 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
1076 {
developera3f86ed2022-07-08 14:15:13 +08001077@@ -1014,6 +1545,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +08001078 .attach = mtk_wed_attach,
1079 .tx_ring_setup = mtk_wed_tx_ring_setup,
1080 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
1081+ .rx_ring_setup = mtk_wed_rx_ring_setup,
1082+ .msg_update = mtk_wed_send_msg,
1083 .start = mtk_wed_start,
1084 .stop = mtk_wed_stop,
1085 .reset_dma = mtk_wed_reset_dma,
developera3f86ed2022-07-08 14:15:13 +08001086@@ -1022,6 +1555,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +08001087 .irq_get = mtk_wed_irq_get,
1088 .irq_set_mask = mtk_wed_irq_set_mask,
1089 .detach = mtk_wed_detach,
1090+ .ppe_check = mtk_wed_ppe_check,
1091 };
1092 struct device_node *eth_np = eth->dev->of_node;
1093 struct platform_device *pdev;
1094diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
developera3f86ed2022-07-08 14:15:13 +08001095index 9b17b74..8ef5253 100644
developer8cb3ac72022-07-04 10:55:14 +08001096--- a/drivers/net/ethernet/mediatek/mtk_wed.h
1097+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
1098@@ -13,6 +13,7 @@
1099 #define MTK_WED_PKT_SIZE 1900
1100 #define MTK_WED_BUF_SIZE 2048
1101 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1102+#define MTK_WED_RX_RING_SIZE 1536
1103
1104 #define MTK_WED_TX_RING_SIZE 2048
1105 #define MTK_WED_WDMA_RING_SIZE 512
1106@@ -21,8 +22,15 @@
1107 #define MTK_WED_PER_GROUP_PKT 128
1108
1109 #define MTK_WED_FBUF_SIZE 128
1110+#define MTK_WED_MIOD_CNT 16
1111+#define MTK_WED_FB_CMD_CNT 1024
1112+#define MTK_WED_RRO_QUE_CNT 8192
1113+#define MTK_WED_MIOD_ENTRY_CNT 128
1114+
1115+#define MODULE_ID_WO 1
1116
1117 struct mtk_eth;
1118+struct mtk_wed_wo;
1119
1120 struct mtk_wed_hw {
1121 struct device_node *node;
1122@@ -34,12 +42,14 @@ struct mtk_wed_hw {
1123 struct regmap *mirror;
1124 struct dentry *debugfs_dir;
1125 struct mtk_wed_device *wed_dev;
1126+ struct mtk_wed_wo *wed_wo;
1127 u32 debugfs_reg;
1128 u32 num_flows;
1129 u32 wdma_phy;
1130 char dirname[5];
1131 int irq;
1132 int index;
1133+ u32 ver;
1134 };
1135
1136 struct mtk_wdma_info {
1137@@ -66,6 +76,18 @@ wed_r32(struct mtk_wed_device *dev, u32 reg)
1138 return val;
1139 }
1140
1141+static inline u32
1142+wifi_r32(struct mtk_wed_device *dev, u32 reg)
1143+{
1144+ return readl(dev->wlan.base + reg);
1145+}
1146+
1147+static inline void
1148+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1149+{
1150+ writel(val, dev->wlan.base + reg);
1151+}
1152+
1153 static inline void
1154 wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1155 {
1156@@ -114,6 +136,23 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1157 writel(val, dev->txfree_ring.wpdma + reg);
1158 }
1159
1160+static inline u32
1161+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
1162+{
1163+ if (!dev->rx_ring[ring].wpdma)
1164+ return 0;
1165+
1166+ return readl(dev->rx_ring[ring].wpdma + reg);
1167+}
1168+
1169+static inline void
1170+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
1171+{
1172+ if (!dev->rx_ring[ring].wpdma)
1173+ return;
1174+
1175+ writel(val, dev->rx_ring[ring].wpdma + reg);
1176+}
1177 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1178 void __iomem *wdma, u32 wdma_phy, int index);
1179 void mtk_wed_exit(void);
developera3f86ed2022-07-08 14:15:13 +08001180@@ -146,4 +185,16 @@ static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
developer8cb3ac72022-07-04 10:55:14 +08001181 }
1182 #endif
1183
1184+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr);
developera3f86ed2022-07-08 14:15:13 +08001185+void wed_wo_hardware_exit(struct mtk_wed_wo *wo);
developer8cb3ac72022-07-04 10:55:14 +08001186+int wed_wo_mcu_init(struct mtk_wed_wo *wo);
1187+int mtk_wed_exception_init(struct mtk_wed_wo *wo);
1188+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1189+int mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb);
1190+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir);
1191+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1192+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd,
1193+ const void *data, int len, bool wait_resp);
1194+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget);
1195+
1196 #endif
1197diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.c b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
1198new file mode 100644
developera3f86ed2022-07-08 14:15:13 +08001199index 0000000..22ef337
developer8cb3ac72022-07-04 10:55:14 +08001200--- /dev/null
1201+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
developera3f86ed2022-07-08 14:15:13 +08001202@@ -0,0 +1,133 @@
developer8cb3ac72022-07-04 10:55:14 +08001203+// SPDX-License-Identifier: GPL-2.0-only
1204+
1205+#include <linux/soc/mediatek/mtk_wed.h>
1206+#include <linux/of_address.h>
1207+#include <linux/mfd/syscon.h>
1208+#include <linux/of_irq.h>
1209+#include "mtk_wed_ccif.h"
1210+#include "mtk_wed_regs.h"
1211+#include "mtk_wed_wo.h"
1212+
1213+static inline void woif_set_isr(struct mtk_wed_wo *wo, u32 mask)
1214+{
1215+ woccif_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
1216+}
1217+
1218+static inline u32 woif_get_csr(struct mtk_wed_wo *wo)
1219+{
1220+ u32 val;
1221+
1222+ val = woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1223+
1224+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
1225+}
1226+
1227+static inline void woif_set_ack(struct mtk_wed_wo *wo, u32 mask)
1228+{
1229+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1230+}
1231+
1232+static inline void woif_kickout(struct mtk_wed_wo *wo)
1233+{
1234+ woccif_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
1235+ woccif_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
1236+}
1237+
1238+static inline void woif_clear_int(struct mtk_wed_wo *wo, u32 mask)
1239+{
1240+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1241+ woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1242+}
1243+
1244+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr)
1245+{
1246+ static const struct wed_wo_drv_ops wo_drv_ops = {
1247+ .kickout = woif_kickout,
1248+ .set_ack = woif_set_ack,
1249+ .set_isr = woif_set_isr,
1250+ .get_csr = woif_get_csr,
1251+ .clear_int = woif_clear_int,
1252+ };
1253+ struct device_node *np, *node = wo->hw->node;
1254+ struct wed_wo_queue_regs queues;
1255+ struct regmap *regs;
1256+ int ret;
1257+
1258+ np = of_parse_phandle(node, "mediatek,ap2woccif", 0);
1259+ if (!np)
1260+ return -ENODEV;
1261+
1262+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
1263+ if (!regs)
1264+ return -ENODEV;
1265+
1266+ wo->drv_ops = &wo_drv_ops;
1267+
1268+ wo->ccif.regs = regs;
1269+ wo->ccif.irq = irq_of_parse_and_map(np, 0);
1270+
1271+ spin_lock_init(&wo->ccif.irq_lock);
1272+
1273+ ret = request_irq(wo->ccif.irq, isr, IRQF_TRIGGER_HIGH,
1274+ "wo_ccif_isr", wo);
1275+ if (ret)
1276+ goto free_irq;
1277+
1278+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY1;
1279+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY2;
1280+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
1281+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
1282+
1283+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
1284+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
1285+ &queues);
1286+
1287+ if (ret)
1288+ goto free_irq;
1289+
1290+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY5;
1291+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY6;
1292+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
1293+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
1294+
1295+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
1296+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
1297+ &queues);
1298+ if (ret)
1299+ goto free_irq;
1300+
1301+ wo->ccif.q_int_mask = MTK_WED_WO_RXCH_INT_MASK;
1302+
1303+ ret = mtk_wed_wo_q_init(wo, mtk_wed_wo_rx_poll);
1304+ if (ret)
1305+ goto free_irq;
1306+
1307+ wo->ccif.q_exep_mask = MTK_WED_WO_EXCEPTION_INT_MASK;
1308+ wo->ccif.irqmask = MTK_WED_WO_ALL_INT_MASK;
1309+
1310+ /* rx queue irqmask */
1311+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
1312+
1313+ return 0;
1314+
1315+free_irq:
developera3f86ed2022-07-08 14:15:13 +08001316+ free_irq(wo->ccif.irq, wo);
developer8cb3ac72022-07-04 10:55:14 +08001317+
1318+ return ret;
1319+}
1320+
developera3f86ed2022-07-08 14:15:13 +08001321+void wed_wo_hardware_exit(struct mtk_wed_wo *wo)
developer8cb3ac72022-07-04 10:55:14 +08001322+{
developera3f86ed2022-07-08 14:15:13 +08001323+ wo->drv_ops->set_isr(wo, 0);
1324+
1325+ disable_irq(wo->ccif.irq);
1326+ free_irq(wo->ccif.irq, wo);
1327+
1328+ tasklet_disable(&wo->irq_tasklet);
1329+ netif_napi_del(&wo->napi);
1330+
1331+ mtk_wed_wo_q_tx_clean(wo, &wo->q_tx, true);
1332+ mtk_wed_wo_q_rx_clean(wo, &wo->q_rx);
1333+ mtk_wed_wo_q_free(wo, &wo->q_tx);
1334+ mtk_wed_wo_q_free(wo, &wo->q_rx);
developer8cb3ac72022-07-04 10:55:14 +08001335+}
1336diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.h b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1337new file mode 100644
developera3f86ed2022-07-08 14:15:13 +08001338index 0000000..68ade44
developer8cb3ac72022-07-04 10:55:14 +08001339--- /dev/null
1340+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1341@@ -0,0 +1,45 @@
1342+// SPDX-License-Identifier: GPL-2.0-only
1343+
1344+#ifndef __MTK_WED_CCIF_H
1345+#define __MTK_WED_CCIF_H
1346+
1347+#define MTK_WED_WO_RING_SIZE 256
1348+#define MTK_WED_WO_CMD_LEN 1504
1349+
1350+#define MTK_WED_WO_TXCH_NUM 0
1351+#define MTK_WED_WO_RXCH_NUM 1
1352+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
1353+
1354+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
1355+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
1356+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
1357+#define MTK_WED_WO_ALL_INT_MASK MTK_WED_WO_RXCH_INT_MASK | \
1358+ MTK_WED_WO_EXCEPTION_INT_MASK
1359+
1360+#define MTK_WED_WO_CCIF_BUSY 0x004
1361+#define MTK_WED_WO_CCIF_START 0x008
1362+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
1363+#define MTK_WED_WO_CCIF_RCHNUM 0x010
1364+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
1365+
1366+#define MTK_WED_WO_CCIF_ACK 0x014
1367+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
1368+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
1369+#define MTK_WED_WO_CCIF_DUMMY1 0x020
1370+#define MTK_WED_WO_CCIF_DUMMY2 0x024
1371+#define MTK_WED_WO_CCIF_DUMMY3 0x028
1372+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
1373+#define MTK_WED_WO_CCIF_SHADOW1 0x030
1374+#define MTK_WED_WO_CCIF_SHADOW2 0x034
1375+#define MTK_WED_WO_CCIF_SHADOW3 0x038
1376+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
1377+#define MTK_WED_WO_CCIF_DUMMY5 0x050
1378+#define MTK_WED_WO_CCIF_DUMMY6 0x054
1379+#define MTK_WED_WO_CCIF_DUMMY7 0x058
1380+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
1381+#define MTK_WED_WO_CCIF_SHADOW5 0x060
1382+#define MTK_WED_WO_CCIF_SHADOW6 0x064
1383+#define MTK_WED_WO_CCIF_SHADOW7 0x068
1384+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
1385+
1386+#endif
1387diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
developera3f86ed2022-07-08 14:15:13 +08001388index f420f18..fea7ae2 100644
developer8cb3ac72022-07-04 10:55:14 +08001389--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1390+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1391@@ -2,6 +2,7 @@
1392 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1393
1394 #include <linux/seq_file.h>
1395+#include <linux/soc/mediatek/mtk_wed.h>
1396 #include "mtk_wed.h"
1397 #include "mtk_wed_regs.h"
1398
1399@@ -18,6 +19,8 @@ enum {
1400 DUMP_TYPE_WDMA,
1401 DUMP_TYPE_WPDMA_TX,
1402 DUMP_TYPE_WPDMA_TXFREE,
1403+ DUMP_TYPE_WPDMA_RX,
1404+ DUMP_TYPE_WED_RRO,
1405 };
1406
1407 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
1408@@ -36,6 +39,10 @@ enum {
1409
1410 #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
1411 #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
1412+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
1413+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
1414+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
1415+
1416
1417 static void
1418 print_reg_val(struct seq_file *s, const char *name, u32 val)
1419@@ -58,6 +65,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1420 cur->name);
1421 continue;
1422 case DUMP_TYPE_WED:
1423+ case DUMP_TYPE_WED_RRO:
1424 val = wed_r32(dev, cur->offset);
1425 break;
1426 case DUMP_TYPE_WDMA:
1427@@ -69,6 +77,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1428 case DUMP_TYPE_WPDMA_TXFREE:
1429 val = wpdma_txfree_r32(dev, cur->offset);
1430 break;
1431+ case DUMP_TYPE_WPDMA_RX:
1432+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
1433+ break;
1434 }
1435 print_reg_val(s, cur->name, val);
1436 }
1437@@ -132,6 +143,81 @@ wed_txinfo_show(struct seq_file *s, void *data)
1438 }
1439 DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
1440
1441+static int
1442+wed_rxinfo_show(struct seq_file *s, void *data)
1443+{
1444+ static const struct reg_dump regs[] = {
1445+ DUMP_STR("WPDMA RX"),
1446+ DUMP_WPDMA_RX_RING(0),
1447+ DUMP_WPDMA_RX_RING(1),
1448+
1449+ DUMP_STR("WPDMA RX"),
1450+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
1451+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
1452+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
1453+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
1454+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
1455+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
1456+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
1457+
1458+ DUMP_STR("WED RX"),
1459+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
1460+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
1461+
1462+ DUMP_STR("WED RRO"),
1463+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
1464+ DUMP_WED(WED_RROQM_MID_MIB),
1465+ DUMP_WED(WED_RROQM_MOD_MIB),
1466+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
1467+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
1468+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
1469+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
1470+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
1471+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
1472+
1473+ DUMP_STR("WED Route QM"),
1474+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
1475+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
1476+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
1477+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
1478+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
1479+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
1480+ DUMP_WED(WED_RTQM_Q2N_MIB),
1481+ DUMP_WED(WED_RTQM_Q2B_MIB),
1482+ DUMP_WED(WED_RTQM_PFDBK_MIB),
1483+
1484+ DUMP_STR("WED WDMA TX"),
1485+ DUMP_WED(WED_WDMA_TX_MIB),
1486+ DUMP_WED_RING(WED_WDMA_RING_TX),
1487+
1488+ DUMP_STR("WDMA TX"),
1489+ DUMP_WDMA(WDMA_GLO_CFG),
1490+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
1491+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
1492+
1493+ DUMP_STR("WED RX BM"),
1494+ DUMP_WED(WED_RX_BM_BASE),
1495+ DUMP_WED(WED_RX_BM_RX_DMAD),
1496+ DUMP_WED(WED_RX_BM_PTR),
1497+ DUMP_WED(WED_RX_BM_TKID_MIB),
1498+ DUMP_WED(WED_RX_BM_BLEN),
1499+ DUMP_WED(WED_RX_BM_STS),
1500+ DUMP_WED(WED_RX_BM_INTF2),
1501+ DUMP_WED(WED_RX_BM_INTF),
1502+ DUMP_WED(WED_RX_BM_ERR_STS),
1503+ };
1504+
1505+ struct mtk_wed_hw *hw = s->private;
1506+ struct mtk_wed_device *dev = hw->wed_dev;
1507+
1508+ if (!dev)
1509+ return 0;
1510+
1511+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
1512+
1513+ return 0;
1514+}
1515+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
1516
1517 static int
1518 mtk_wed_reg_set(void *data, u64 val)
1519@@ -175,4 +261,8 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
1520 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
1521 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
1522 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
1523+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
1524+ if (hw->ver > MTK_WED_V1) {
1525+ wed_wo_mcu_debugfs(hw, dir);
1526+ }
1527 }
1528diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1529new file mode 100644
developera3f86ed2022-07-08 14:15:13 +08001530index 0000000..bd1ab95
developer8cb3ac72022-07-04 10:55:14 +08001531--- /dev/null
1532+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1533@@ -0,0 +1,561 @@
1534+// SPDX-License-Identifier: GPL-2.0-only
1535+
1536+#include <linux/skbuff.h>
1537+#include <linux/debugfs.h>
1538+#include <linux/firmware.h>
1539+#include <linux/of_address.h>
1540+#include <linux/soc/mediatek/mtk_wed.h>
1541+#include "mtk_wed_regs.h"
1542+#include "mtk_wed_mcu.h"
1543+#include "mtk_wed_wo.h"
1544+
1545+struct sk_buff *
1546+mtk_wed_mcu_msg_alloc(struct mtk_wed_wo *wo,
1547+ const void *data, int data_len)
1548+{
1549+ const struct wed_wo_mcu_ops *ops = wo->mcu_ops;
1550+ int length = ops->headroom + data_len;
1551+ struct sk_buff *skb;
1552+
1553+ skb = alloc_skb(length, GFP_KERNEL);
1554+ if (!skb)
1555+ return NULL;
1556+
1557+ memset(skb->head, 0, length);
1558+ skb_reserve(skb, ops->headroom);
1559+
1560+ if (data && data_len)
1561+ skb_put_data(skb, data, data_len);
1562+
1563+ return skb;
1564+}
1565+
1566+struct sk_buff *
1567+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
1568+{
1569+ unsigned long timeout;
1570+
1571+ if (!time_is_after_jiffies(expires))
1572+ return NULL;
1573+
1574+ timeout = expires - jiffies;
1575+ wait_event_timeout(wo->mcu.wait,
1576+ (!skb_queue_empty(&wo->mcu.res_q)),
1577+ timeout);
1578+
1579+ return skb_dequeue(&wo->mcu.res_q);
1580+}
1581+
1582+int
1583+mtk_wed_mcu_skb_send_and_get_msg(struct mtk_wed_wo *wo,
1584+ int to_id, int cmd, struct sk_buff *skb,
1585+ bool wait_resp, struct sk_buff **ret_skb)
1586+{
1587+ unsigned long expires;
1588+ int ret, seq;
1589+
1590+ if (ret_skb)
1591+ *ret_skb = NULL;
1592+
1593+ mutex_lock(&wo->mcu.mutex);
1594+
1595+ ret = wo->mcu_ops->mcu_skb_send_msg(wo, to_id, cmd, skb, &seq, wait_resp);
1596+ if (ret < 0)
1597+ goto out;
1598+
1599+ if (!wait_resp) {
1600+ ret = 0;
1601+ goto out;
1602+ }
1603+
1604+ expires = jiffies + wo->mcu.timeout;
1605+
1606+ do {
1607+ skb = mtk_wed_mcu_get_response(wo, expires);
1608+ ret = wo->mcu_ops->mcu_parse_response(wo, cmd, skb, seq);
1609+
1610+ if (!ret && ret_skb)
1611+ *ret_skb = skb;
1612+ else
1613+ dev_kfree_skb(skb);
1614+ } while (ret == -EAGAIN);
1615+
1616+out:
1617+ mutex_unlock(&wo->mcu.mutex);
1618+
1619+ return ret;
1620+}
1621+
1622+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo,
1623+ struct sk_buff *skb)
1624+{
1625+ skb_queue_tail(&wo->mcu.res_q, skb);
1626+ wake_up(&wo->mcu.wait);
1627+}
1628+
1629+static int mtk_wed_mcu_send_and_get_msg(struct mtk_wed_wo *wo,
1630+ int to_id, int cmd, const void *data, int len,
1631+ bool wait_resp, struct sk_buff **ret_skb)
1632+{
1633+ struct sk_buff *skb;
1634+
1635+ skb = mtk_wed_mcu_msg_alloc(wo, data, len);
1636+ if (!skb)
1637+ return -ENOMEM;
1638+
1639+ return mtk_wed_mcu_skb_send_and_get_msg(wo, to_id, cmd, skb, wait_resp, ret_skb);
1640+}
1641+
1642+int
1643+mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,
1644+ int to_id, int cmd,
1645+ const void *data, int len, bool wait_resp)
1646+{
1647+ struct sk_buff *skb = NULL;
1648+ int ret = 0;
1649+
1650+ ret = mtk_wed_mcu_send_and_get_msg(wo, to_id, cmd, data,
1651+ len, wait_resp, &skb);
1652+ if (skb)
1653+ dev_kfree_skb(skb);
1654+
1655+ return ret;
1656+}
1657+
1658+int mtk_wed_exception_init(struct mtk_wed_wo *wo)
1659+{
1660+ struct wed_wo_exception *exp = &wo->exp;
1661+ struct {
1662+ u32 arg0;
1663+ u32 arg1;
1664+ }req;
1665+
1666+ exp->log_size = EXCEPTION_LOG_SIZE;
1667+ exp->log = kmalloc(exp->log_size, GFP_ATOMIC);
1668+ if (!exp->log)
1669+ return -ENOMEM;
1670+
1671+ memset(exp->log, 0, exp->log_size);
1672+ exp->phys = dma_map_single(wo->hw->dev, exp->log, exp->log_size,
1673+ DMA_FROM_DEVICE);
1674+
1675+ if (unlikely(dma_mapping_error(wo->hw->dev, exp->phys))) {
1676+ dev_info(wo->hw->dev, "dma map error\n");
1677+ goto free;
1678+ }
1679+
1680+ req.arg0 = (u32)exp->phys;
1681+ req.arg1 = (u32)exp->log_size;
1682+
1683+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_EXCEPTION_INIT,
1684+ &req, sizeof(req), false);
1685+
1686+free:
1687+ kfree(exp->log);
1688+ return -ENOMEM;
1689+}
1690+
1691+int
1692+mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb)
1693+{
1694+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1695+
1696+ if (hdr->ver != 0)
1697+ return WARP_INVALID_PARA_STATUS;
1698+
1699+ if (skb->len < sizeof(struct wed_cmd_hdr))
1700+ return WARP_INVALID_PARA_STATUS;
1701+
1702+ if (skb->len != hdr->length)
1703+ return WARP_INVALID_PARA_STATUS;
1704+
1705+ return WARP_OK_STATUS;
1706+}
1707+
1708+void
1709+mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
1710+{
1711+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1712+ struct wed_wo_log *record;
1713+ char *msg = (char *)(skb->data + sizeof(struct wed_cmd_hdr));
1714+ u16 msg_len = skb->len - sizeof(struct wed_cmd_hdr);
1715+ u32 i, cnt = 0;
1716+
1717+ switch (hdr->cmd_id) {
1718+ case WO_EVT_LOG_DUMP:
1719+ pr_info("[WO LOG]: %s\n", msg);
1720+ break;
1721+ case WO_EVT_PROFILING:
1722+ cnt = msg_len / (sizeof(struct wed_wo_log));
1723+ record = (struct wed_wo_log *) msg;
1724+ dev_info(wo->hw->dev, "[WO Profiling]: %d report arrived!\n", cnt);
1725+
1726+ for (i = 0 ; i < cnt ; i++) {
1727+ //PROFILE_STAT(wo->total, record[i].total);
1728+ //PROFILE_STAT(wo->mod, record[i].mod);
1729+ //PROFILE_STAT(wo->rro, record[i].rro);
1730+
1731+ dev_info(wo->hw->dev, "[WO Profiling]: SN:%u with latency: total=%u, rro:%u, mod:%u\n",
1732+ record[i].sn,
1733+ record[i].total,
1734+ record[i].rro,
1735+ record[i].mod);
1736+ }
1737+ break;
1738+
1739+ default:
1740+ break;
1741+ }
1742+
1743+ dev_kfree_skb(skb);
1744+
1745+}
1746+
1747+static int
1748+mtk_wed_load_firmware(struct mtk_wed_wo *wo)
1749+{
1750+ struct fw_info {
1751+ __le32 decomp_crc;
1752+ __le32 decomp_len;
1753+ __le32 decomp_blk_sz;
1754+ u8 reserved[4];
1755+ __le32 addr;
1756+ __le32 len;
1757+ u8 feature_set;
1758+ u8 reserved1[15];
1759+ } __packed *region;
1760+
1761+ char *mcu;
1762+ const struct mtk_wed_fw_trailer *hdr;
1763+ static u8 shared[MAX_REGION_SIZE] = {0};
1764+ const struct firmware *fw;
1765+ int ret, i;
1766+ u32 ofs = 0;
1767+ u32 boot_cr, val;
1768+
1769+ mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 : MT7986_FIRMWARE_WO_1;
1770+
1771+ ret = request_firmware(&fw, mcu, wo->hw->dev);
1772+ if (ret)
1773+ return ret;
1774+
1775+ hdr = (const struct mtk_wed_fw_trailer *)(fw->data + fw->size -
1776+ sizeof(*hdr));
1777+
1778+ dev_info(wo->hw->dev, "WO Firmware Version: %.10s, Build Time: %.15s\n",
1779+ hdr->fw_ver, hdr->build_date);
1780+
1781+ for (i = 0; i < hdr->n_region; i++) {
1782+ int j = 0;
1783+ region = (struct fw_info *)(fw->data + fw->size -
1784+ sizeof(*hdr) -
1785+ sizeof(*region) *
1786+ (hdr->n_region - i));
1787+
1788+ while (j < MAX_REGION_SIZE) {
1789+ struct mtk_wed_fw_region *wo_region;
1790+
1791+ wo_region = &wo->region[j];
1792+ if (!wo_region->addr)
1793+ break;
1794+
1795+ if (wo_region->addr_pa == region->addr) {
1796+ if (!wo_region->shared) {
1797+ memcpy(wo_region->addr,
1798+ fw->data + ofs, region->len);
1799+ } else if (!shared[j]) {
1800+ memcpy(wo_region->addr,
1801+ fw->data + ofs, region->len);
1802+ shared[j] = true;
1803+ }
1804+ }
1805+ j++;
1806+ }
1807+
1808+ if (j == __WO_REGION_MAX) {
1809+ ret = -ENOENT;
1810+ goto done;
1811+ }
1812+ ofs += region->len;
1813+ }
1814+
1815+ /* write the start address */
1816+ boot_cr = wo->hw->index ?
1817+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
1818+ wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
1819+
1820+ /* wo firmware reset */
1821+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
1822+
1823+ val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
1824+
1825+ val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
1826+ WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
1827+
1828+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
1829+
1830+done:
1831+ release_firmware(fw);
1832+
1833+ return ret;
1834+}
1835+
1836+static int
1837+mtk_wed_get_firmware_region(struct mtk_wed_wo *wo)
1838+{
1839+ struct device_node *node, *np = wo->hw->node;
1840+ struct mtk_wed_fw_region *region;
1841+ struct resource res;
1842+ const char *compat;
1843+ int i, ret;
1844+
1845+ static const char *const wo_region_compat[__WO_REGION_MAX] = {
1846+ [WO_REGION_EMI] = WOCPU_EMI_DEV_NODE,
1847+ [WO_REGION_ILM] = WOCPU_ILM_DEV_NODE,
1848+ [WO_REGION_DATA] = WOCPU_DATA_DEV_NODE,
1849+ [WO_REGION_BOOT] = WOCPU_BOOT_DEV_NODE,
1850+ };
1851+
1852+ for (i = 0; i < __WO_REGION_MAX; i++) {
1853+ region = &wo->region[i];
1854+ compat = wo_region_compat[i];
1855+
1856+ node = of_parse_phandle(np, compat, 0);
1857+ if (!node)
1858+ return -ENODEV;
1859+
1860+ ret = of_address_to_resource(node, 0, &res);
1861+ if (ret)
1862+ return ret;
1863+
1864+ region->addr_pa = res.start;
1865+ region->size = resource_size(&res);
1866+ region->addr = ioremap(region->addr_pa, region->size);
1867+
1868+ of_property_read_u32_index(node, "shared", 0, &region->shared);
1869+ }
1870+
1871+ return 0;
1872+}
1873+
1874+static int
1875+wo_mcu_send_message(struct mtk_wed_wo *wo,
1876+ int to_id, int cmd, struct sk_buff *skb,
1877+ int *wait_seq, bool wait_resp)
1878+{
1879+ struct wed_cmd_hdr *hdr;
1880+ u8 seq = 0;
1881+
1882+ /* TDO: make dynamic based on msg type */
1883+ wo->mcu.timeout = 20 * HZ;
1884+
1885+ if (wait_resp && wait_seq) {
1886+ seq = wo->mcu.msg_seq++ ;
1887+ *wait_seq = seq;
1888+ }
1889+
1890+ hdr = (struct wed_cmd_hdr *)skb_push(skb, sizeof(*hdr));
1891+
1892+ hdr->cmd_id = cmd;
1893+ hdr->length = cpu_to_le16(skb->len);
1894+ hdr->uni_id = seq;
1895+
1896+ if (to_id == MODULE_ID_WO)
1897+ hdr->flag |= WARP_CMD_FLAG_FROM_TO_WO;
1898+
1899+ if (wait_resp && wait_seq)
1900+ hdr->flag |= WARP_CMD_FLAG_NEED_RSP;
1901+
1902+ return mtk_wed_wo_q_tx_skb(wo, &wo->q_tx, skb);
1903+}
1904+
1905+static int
1906+wo_mcu_parse_response(struct mtk_wed_wo *wo, int cmd,
1907+ struct sk_buff *skb, int seq)
1908+{
1909+ struct wed_cmd_hdr *hdr;
1910+
1911+ if (!skb) {
1912+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
1913+ cmd, seq);
1914+ return -ETIMEDOUT;
1915+ }
1916+
1917+ hdr = (struct wed_cmd_hdr *)skb->data;
1918+ if (seq != hdr->uni_id) {
1919+ dev_err(wo->hw->dev, "Message %08x (seq %d) with not match uid(%d)\n",
1920+ cmd, seq, hdr->uni_id);
1921+ return -EAGAIN;
1922+ }
1923+
1924+ //skb_pull(skb, sizeof(struct wed_cmd_hdr));
1925+
1926+ return 0;
1927+}
1928+
1929+int wed_wo_mcu_init(struct mtk_wed_wo *wo)
1930+{
1931+ static const struct wed_wo_mcu_ops wo_mcu_ops = {
1932+ .headroom = sizeof(struct wed_cmd_hdr),
1933+ .mcu_skb_send_msg = wo_mcu_send_message,
1934+ .mcu_parse_response = wo_mcu_parse_response,
1935+ /*TDO .mcu_restart = wo_mcu_restart,*/
1936+ };
1937+ unsigned long timeout = jiffies + FW_DL_TIMEOUT;
1938+ int ret;
1939+ u32 val;
1940+
1941+ wo->mcu_ops = &wo_mcu_ops;
1942+
1943+ ret = mtk_wed_get_firmware_region(wo);
1944+ if (ret)
1945+ return ret;
1946+
1947+ /* set dummy cr */
1948+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL,
1949+ wo->hw->index + 1);
1950+
1951+ ret = mtk_wed_load_firmware(wo);
1952+ if (ret)
1953+ return ret;
1954+
1955+ do {
1956+ /* get dummy cr */
1957+ val = wed_r32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL);
1958+ } while (val != 0 && !time_after(jiffies, timeout));
1959+
1960+ if (val)
1961+ return -EBUSY;
1962+
1963+ return 0;
1964+}
1965+
1966+static ssize_t
1967+mtk_wed_wo_ctrl(struct file *file,
1968+ const char __user *user_buf,
1969+ size_t count,
1970+ loff_t *ppos)
1971+{
1972+ struct mtk_wed_hw *hw = file->private_data;
1973+ struct mtk_wed_wo *wo = hw->wed_wo;
1974+ char buf[100], *cmd = NULL, *input[11] = {0};
1975+ char msgbuf[128] = {0};
1976+ struct wo_cmd_query *query = (struct wo_cmd_query *)msgbuf;
1977+ u32 cmd_id;
1978+ bool wait = false;
1979+ char *sub_str = NULL;
1980+ int input_idx = 0, input_total = 0, scan_num = 0;
1981+ char *p;
1982+
1983+ if (count > sizeof(buf))
1984+ return -EINVAL;
1985+
1986+ if (copy_from_user(buf, user_buf, count))
1987+ return -EFAULT;
1988+
1989+ if (count && buf[count - 1] == '\n')
1990+ buf[count - 1] = '\0';
1991+ else
1992+ buf[count] = '\0';
1993+
1994+ p = buf;
1995+
1996+ while ((sub_str = strsep(&p, " ")) != NULL) {
1997+ input[input_idx] = sub_str;
1998+ input_idx++;
1999+ input_total++;
2000+ }
2001+ cmd = input[0];
2002+ if (input_total == 1 && cmd) {
2003+ if (strncmp(cmd, "bainfo", strlen(cmd)) == 0) {
2004+ cmd_id = WO_CMD_BA_INFO_DUMP;
2005+ } else if (strncmp(cmd, "bactrl", strlen(cmd)) == 0) {
2006+ cmd_id = WO_CMD_BA_CTRL_DUMP;
2007+ } else if (strncmp(cmd, "fbcmdq", strlen(cmd)) == 0) {
2008+ cmd_id = WO_CMD_FBCMD_Q_DUMP;
2009+ } else if (strncmp(cmd, "logflush", strlen(cmd)) == 0) {
2010+ cmd_id = WO_CMD_LOG_FLUSH;
2011+ } else if (strncmp(cmd, "cpustat.dump", strlen(cmd)) == 0) {
2012+ cmd_id = WO_CMD_CPU_STATS_DUMP;
2013+ } else if (strncmp(cmd, "state", strlen(cmd)) == 0) {
2014+ cmd_id = WO_CMD_WED_RX_STAT;
2015+ } else if (strncmp(cmd, "prof_hit_dump", strlen(cmd)) == 0) {
2016+ //wo_profiling_report();
2017+ return count;
2018+ } else if (strncmp(cmd, "rxcnt_info", strlen(cmd)) == 0) {
2019+ cmd_id = WO_CMD_RXCNT_INFO;
2020+ wait = true;
2021+ } else {
2022+ pr_info("(%s) unknown comand string(%s)!\n", __func__, cmd);
2023+ return count;
2024+ }
2025+ } else if (input_total > 1) {
2026+ for (input_idx = 1 ; input_idx < input_total ; input_idx++) {
2027+ scan_num = sscanf(input[input_idx], "%u", &query->query0+(input_idx - 1));
2028+
2029+ if (scan_num < 1) {
2030+ pr_info("(%s) require more input!\n", __func__);
2031+ return count;
2032+ }
2033+ }
2034+ if(strncmp(cmd, "devinfo", strlen(cmd)) == 0) {
2035+ cmd_id = WO_CMD_DEV_INFO_DUMP;
2036+ } else if (strncmp(cmd, "bssinfo", strlen(cmd)) == 0) {
2037+ cmd_id = WO_CMD_BSS_INFO_DUMP;
2038+ } else if (strncmp(cmd, "starec", strlen(cmd)) == 0) {
2039+ cmd_id = WO_CMD_STA_REC_DUMP;
2040+ } else if (strncmp(cmd, "starec_ba", strlen(cmd)) == 0) {
2041+ cmd_id = WO_CMD_STA_BA_DUMP;
2042+ } else if (strncmp(cmd, "logctrl", strlen(cmd)) == 0) {
2043+ cmd_id = WO_CMD_FW_LOG_CTRL;
2044+ } else if (strncmp(cmd, "cpustat.en", strlen(cmd)) == 0) {
2045+ cmd_id = WO_CMD_CPU_STATS_ENABLE;
2046+ } else if (strncmp(cmd, "prof_conf", strlen(cmd)) == 0) {
2047+ cmd_id = WO_CMD_PROF_CTRL;
2048+ } else if (strncmp(cmd, "rxcnt_ctrl", strlen(cmd)) == 0) {
2049+ cmd_id = WO_CMD_RXCNT_CTRL;
2050+ } else if (strncmp(cmd, "dbg_set", strlen(cmd)) == 0) {
2051+ cmd_id = WO_CMD_DBG_INFO;
2052+ }
2053+ } else {
2054+ dev_info(hw->dev, "usage: echo cmd='cmd_str' > wo_write\n");
2055+ dev_info(hw->dev, "cmd_str value range:\n");
2056+ dev_info(hw->dev, "\tbainfo:\n");
2057+ dev_info(hw->dev, "\tbactrl:\n");
2058+ dev_info(hw->dev, "\tfbcmdq:\n");
2059+ dev_info(hw->dev, "\tlogflush:\n");
2060+ dev_info(hw->dev, "\tcpustat.dump:\n");
2061+ dev_info(hw->dev, "\tprof_hit_dump:\n");
2062+ dev_info(hw->dev, "\trxcnt_info:\n");
2063+ dev_info(hw->dev, "\tdevinfo:\n");
2064+ dev_info(hw->dev, "\tbssinfo:\n");
2065+ dev_info(hw->dev, "\tstarec:\n");
2066+ dev_info(hw->dev, "\tstarec_ba:\n");
2067+ dev_info(hw->dev, "\tlogctrl:\n");
2068+ dev_info(hw->dev, "\tcpustat.en:\n");
2069+ dev_info(hw->dev, "\tprof_conf:\n");
2070+ dev_info(hw->dev, "\trxcnt_ctrl:\n");
2071+ dev_info(hw->dev, "\tdbg_set [level] [category]:\n");
2072+ return count;
2073+ }
2074+
2075+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, (void *)msgbuf, sizeof(struct wo_cmd_query), wait);
2076+
2077+ return count;
2078+
2079+}
2080+
2081+static const struct file_operations fops_wo_ctrl = {
2082+ .write = mtk_wed_wo_ctrl,
2083+ .open = simple_open,
2084+ .llseek = default_llseek,
2085+};
2086+
2087+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir)
2088+{
2089+ if (!dir)
2090+ return;
2091+
2092+ debugfs_create_file("wo_write", 0600, dir, hw, &fops_wo_ctrl);
2093+}
2094+
2095diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2096new file mode 100644
developera3f86ed2022-07-08 14:15:13 +08002097index 0000000..6a5ac76
developer8cb3ac72022-07-04 10:55:14 +08002098--- /dev/null
2099+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2100@@ -0,0 +1,125 @@
2101+// SPDX-License-Identifier: GPL-2.0-only
2102+
2103+#ifndef __MTK_WED_MCU_H
2104+#define __MTK_WED_MCU_H
2105+
2106+#define EXCEPTION_LOG_SIZE 32768
2107+#define WOCPU_MCUSYS_RESET_ADDR 0x15194050
2108+#define WOCPU_WO0_MCUSYS_RESET_MASK 0x20
2109+#define WOCPU_WO1_MCUSYS_RESET_MASK 0x1
2110+
2111+#define WARP_INVALID_LENGTH_STATUS (-2)
2112+#define WARP_NULL_POINTER_STATUS (-3)
2113+#define WARP_INVALID_PARA_STATUS (-4)
2114+#define WARP_NOT_HANDLE_STATUS (-5)
2115+#define WARP_FAIL_STATUS (-1)
2116+#define WARP_OK_STATUS (0)
2117+#define WARP_ALREADY_DONE_STATUS (1)
2118+
2119+#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2120+#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2121+
2122+#define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2123+#define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2124+#define WOCPU_DLM_DEV_NODE "mediatek,wocpu_dlm"
2125+#define WOCPU_DATA_DEV_NODE "mediatek,wocpu_data"
2126+#define WOCPU_BOOT_DEV_NODE "mediatek,wocpu_boot"
2127+
2128+#define FW_DL_TIMEOUT ((3000 * HZ) / 1000)
2129+#define WOCPU_TIMEOUT ((1000 * HZ) / 1000)
2130+
2131+#define MAX_REGION_SIZE 3
2132+
2133+#define WOX_MCU_CFG_LS_BASE 0 /*0x15194000*/
2134+
2135+#define WOX_MCU_CFG_LS_HW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x000) // 4000
2136+#define WOX_MCU_CFG_LS_FW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x004) // 4004
2137+#define WOX_MCU_CFG_LS_CFG_DBG1_ADDR (WOX_MCU_CFG_LS_BASE + 0x00C) // 400C
2138+#define WOX_MCU_CFG_LS_CFG_DBG2_ADDR (WOX_MCU_CFG_LS_BASE + 0x010) // 4010
2139+#define WOX_MCU_CFG_LS_WF_MCCR_ADDR (WOX_MCU_CFG_LS_BASE + 0x014) // 4014
2140+#define WOX_MCU_CFG_LS_WF_MCCR_SET_ADDR (WOX_MCU_CFG_LS_BASE + 0x018) // 4018
2141+#define WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR (WOX_MCU_CFG_LS_BASE + 0x01C) // 401C
2142+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (WOX_MCU_CFG_LS_BASE + 0x050) // 4050
2143+#define WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x060) // 4060
2144+#define WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x064) // 4064
2145+
2146+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK BIT(5)
2147+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK BIT(0)
2148+
2149+
2150+enum wo_event_id {
2151+ WO_EVT_LOG_DUMP = 0x1,
2152+ WO_EVT_PROFILING = 0x2,
2153+ WO_EVT_RXCNT_INFO = 0x3
2154+};
2155+
2156+enum wo_cmd_id {
2157+ WO_CMD_WED_CFG = 0,
2158+ WO_CMD_WED_RX_STAT,
2159+ WO_CMD_RRO_SER,
2160+ WO_CMD_DBG_INFO,
2161+ WO_CMD_DEV_INFO,
2162+ WO_CMD_BSS_INFO,
2163+ WO_CMD_STA_REC,
2164+ WO_CMD_DEV_INFO_DUMP,
2165+ WO_CMD_BSS_INFO_DUMP,
2166+ WO_CMD_STA_REC_DUMP,
2167+ WO_CMD_BA_INFO_DUMP,
2168+ WO_CMD_FBCMD_Q_DUMP,
2169+ WO_CMD_FW_LOG_CTRL,
2170+ WO_CMD_LOG_FLUSH,
2171+ WO_CMD_CHANGE_STATE,
2172+ WO_CMD_CPU_STATS_ENABLE,
2173+ WO_CMD_CPU_STATS_DUMP,
2174+ WO_CMD_EXCEPTION_INIT,
2175+ WO_CMD_PROF_CTRL,
2176+ WO_CMD_STA_BA_DUMP,
2177+ WO_CMD_BA_CTRL_DUMP,
2178+ WO_CMD_RXCNT_CTRL,
2179+ WO_CMD_RXCNT_INFO,
2180+ WO_CMD_SET_CAP,
2181+ WO_CMD_CCIF_RING_DUMP,
2182+ WO_CMD_WED_END
2183+};
2184+
2185+enum wo_state {
2186+ WO_STATE_UNDEFINED = 0x0,
2187+ WO_STATE_INIT = 0x1,
2188+ WO_STATE_ENABLE = 0x2,
2189+ WO_STATE_DISABLE = 0x3,
2190+ WO_STATE_HALT = 0x4,
2191+ WO_STATE_GATING = 0x5,
2192+ WO_STATE_SER_RESET = 0x6,
2193+ WO_STATE_WF_RESET = 0x7,
2194+ WO_STATE_END
2195+};
2196+
2197+enum wo_done_state {
2198+ WOIF_UNDEFINED = 0,
2199+ WOIF_DISABLE_DONE = 1,
2200+ WOIF_TRIGGER_ENABLE = 2,
2201+ WOIF_ENABLE_DONE = 3,
2202+ WOIF_TRIGGER_GATING = 4,
2203+ WOIF_GATING_DONE = 5,
2204+ WOIF_TRIGGER_HALT = 6,
2205+ WOIF_HALT_DONE = 7,
2206+};
2207+
2208+enum wed_dummy_cr_idx {
2209+ WED_DUMMY_CR_FWDL = 0,
2210+ WED_DUMMY_CR_WO_STATUS = 1
2211+};
2212+
2213+struct mtk_wed_fw_trailer {
2214+ u8 chip_id;
2215+ u8 eco_code;
2216+ u8 n_region;
2217+ u8 format_ver;
2218+ u8 format_flag;
2219+ u8 reserved[2];
2220+ char fw_ver[10];
2221+ char build_date[15];
2222+ u32 crc;
2223+};
2224+
2225+#endif
2226diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
developera3f86ed2022-07-08 14:15:13 +08002227index e107de7..64a2483 100644
developer8cb3ac72022-07-04 10:55:14 +08002228--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2229+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2230@@ -4,6 +4,8 @@
2231 #ifndef __MTK_WED_REGS_H
2232 #define __MTK_WED_REGS_H
2233
2234+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
2235+
2236 #if defined(CONFIG_MEDIATEK_NETSYS_V2)
2237 #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(13, 0)
2238 #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(14)
2239@@ -16,6 +18,7 @@
2240 #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2241 #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2242 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2243+#define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
2244
2245 struct mtk_wdma_desc {
2246 __le32 buf0;
developera3f86ed2022-07-08 14:15:13 +08002247@@ -41,6 +44,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002248 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
2249 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2250 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2251+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
2252+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
2253 #define MTK_WED_RESET_WED BIT(31)
2254
2255 #define MTK_WED_CTRL 0x00c
developera3f86ed2022-07-08 14:15:13 +08002256@@ -52,8 +57,12 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002257 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2258 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2259 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2260-#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2261-#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2262+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
2263+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
2264+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
2265+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
2266+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
2267+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
2268 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2269 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
2270 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
developera3f86ed2022-07-08 14:15:13 +08002271@@ -68,8 +77,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002272 #define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
2273 #define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
2274 #endif
2275-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2276-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2277+#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
2278+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
2279 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2280 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2281 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
developera3f86ed2022-07-08 14:15:13 +08002282@@ -86,8 +95,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002283 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2284 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2285 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2286- MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | \
2287- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | \
2288+ MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
2289+ MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
2290 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2291 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2292 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | \
developera3f86ed2022-07-08 14:15:13 +08002293@@ -96,6 +105,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002294 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
2295
2296 #define MTK_WED_EXT_INT_MASK 0x028
2297+#define MTK_WED_EXT_INT_MASK1 0x02c
2298+#define MTK_WED_EXT_INT_MASK2 0x030
2299
2300 #define MTK_WED_STATUS 0x060
2301 #define MTK_WED_STATUS_TX GENMASK(15, 8)
developera3f86ed2022-07-08 14:15:13 +08002302@@ -183,6 +194,9 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002303
2304 #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2305
2306+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
2307+
2308+#define MTK_WED_SCR0 0x3c0
2309 #define MTK_WED_WPDMA_INT_TRIGGER 0x504
2310 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2311 #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
developera3f86ed2022-07-08 14:15:13 +08002312@@ -239,13 +253,19 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002313
2314 #define MTK_WED_WPDMA_INT_CTRL_TX 0x530
2315 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
2316-#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2317+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2318 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
2319 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
2320 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
2321 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
2322
2323 #define MTK_WED_WPDMA_INT_CTRL_RX 0x534
2324+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
2325+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
2326+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
2327+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
2328+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
2329+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
2330
2331 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
2332 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
developera3f86ed2022-07-08 14:15:13 +08002333@@ -270,13 +290,43 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002334 #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2335 #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2336
2337+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
2338+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
2339+
2340 #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2341 #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2342+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
2343+
2344+
2345+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
2346+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
2347+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
2348+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
2349+
2350+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
2351+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX0 BIT(16)
2352+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX1 BIT(17)
2353+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX0 BIT(24)
2354+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX1 BIT(25)
2355+
2356+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
2357+#define MTK_WED_WPDMA_RX_RING 0x770
2358+
2359+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
2360+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
2361+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
2362+
2363+#define MTK_WED_WDMA_RING_TX 0x800
2364+
2365+#define MTK_WED_WDMA_TX_MIB 0x810
2366+
2367+
2368 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2369 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2370
2371 #define MTK_WED_WDMA_GLO_CFG 0xa04
2372 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2373+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
2374 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2375 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2376 #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
developera3f86ed2022-07-08 14:15:13 +08002377@@ -320,6 +370,20 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002378 #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
2379 #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
2380
2381+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2382+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
2383+
2384+#define MTK_WED_RX_BM_BASE 0xd84
2385+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2386+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
2387+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
2388+
2389+#define MTK_WED_RX_PTR 0xd8c
2390+
2391+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
2392+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
2393+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
2394+
2395 #define MTK_WED_RING_OFS_BASE 0x00
2396 #define MTK_WED_RING_OFS_COUNT 0x04
2397 #define MTK_WED_RING_OFS_CPU_IDX 0x08
developera3f86ed2022-07-08 14:15:13 +08002398@@ -330,7 +394,9 @@ struct mtk_wdma_desc {
2399
2400 #define MTK_WDMA_GLO_CFG 0x204
2401 #define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
2402+#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
2403 #define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
2404+#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
2405 #define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
2406 #define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
2407 #define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
2408@@ -359,4 +425,71 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002409 /* DMA channel mapping */
2410 #define HIFSYS_DMA_AG_MAP 0x008
2411
2412+#define MTK_WED_RTQM_GLO_CFG 0xb00
2413+#define MTK_WED_RTQM_BUSY BIT(1)
2414+#define MTK_WED_RTQM_Q_RST BIT(2)
2415+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
2416+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
2417+
2418+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
2419+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
2420+#define MTK_WED_RTQM_Q2N_MIB 0xb80
2421+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
2422+
2423+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
2424+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
2425+
2426+#define MTK_WED_RROQM_GLO_CFG 0xc04
2427+#define MTK_WED_RROQM_RST_IDX 0xc08
2428+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
2429+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
2430+
2431+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
2432+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
2433+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
2434+
2435+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
2436+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
2437+
2438+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
2439+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
2440+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
2441+
2442+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
2443+
2444+#define MTK_WED_RROQ_BASE_L 0xc80
2445+#define MTK_WED_RROQ_BASE_H 0xc84
2446+
2447+
2448+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
2449+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
2450+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
2451+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
2452+
2453+#define MTK_WED_RROQM_MID_MIB 0xcc0
2454+#define MTK_WED_RROQM_MOD_MIB 0xcc4
2455+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
2456+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
2457+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
2458+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
2459+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
2460+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
2461+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
2462+
2463+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2464+#define MTK_WED_RX_BM_BASE 0xd84
2465+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2466+#define MTK_WED_RX_BM_PTR 0xd8c
2467+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
2468+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
2469+
2470+#define MTK_WED_RX_BM_BLEN 0xd90
2471+#define MTK_WED_RX_BM_STS 0xd94
2472+#define MTK_WED_RX_BM_INTF2 0xd98
2473+#define MTK_WED_RX_BM_INTF 0xd9c
2474+#define MTK_WED_RX_BM_ERR_STS 0xda8
2475+
2476+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
2477+#define MTK_WED_PCIE_INT_MASK 0x0
2478+
2479 #endif
2480diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
2481new file mode 100644
developera3f86ed2022-07-08 14:15:13 +08002482index 0000000..e101f17
developer8cb3ac72022-07-04 10:55:14 +08002483--- /dev/null
2484+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
developera3f86ed2022-07-08 14:15:13 +08002485@@ -0,0 +1,588 @@
developer8cb3ac72022-07-04 10:55:14 +08002486+// SPDX-License-Identifier: GPL-2.0-only
2487+
2488+#include <linux/kernel.h>
2489+#include <linux/bitfield.h>
2490+#include <linux/dma-mapping.h>
2491+#include <linux/skbuff.h>
2492+#include <linux/of_platform.h>
2493+#include <linux/interrupt.h>
2494+#include <linux/of_address.h>
2495+#include <linux/iopoll.h>
2496+#include <linux/soc/mediatek/mtk_wed.h>
2497+#include "mtk_wed.h"
2498+#include "mtk_wed_regs.h"
2499+#include "mtk_wed_ccif.h"
2500+#include "mtk_wed_wo.h"
2501+
2502+struct wed_wo_profile_stat profile_total[6] = {
2503+ {1001, 0},
2504+ {1501, 0},
2505+ {3001, 0},
2506+ {5001, 0},
2507+ {10001, 0},
2508+ {0xffffffff, 0}
2509+};
2510+
2511+struct wed_wo_profile_stat profiling_mod[6] = {
2512+ {1001, 0},
2513+ {1501, 0},
2514+ {3001, 0},
2515+ {5001, 0},
2516+ {10001, 0},
2517+ {0xffffffff, 0}
2518+};
2519+
2520+struct wed_wo_profile_stat profiling_rro[6] = {
2521+ {1001, 0},
2522+ {1501, 0},
2523+ {3001, 0},
2524+ {5001, 0},
2525+ {10001, 0},
2526+ {0xffffffff, 0}
2527+};
2528+
2529+static void
2530+woif_q_sync_idx(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2531+{
2532+ woccif_w32(wo, q->regs->desc_base, q->desc_dma);
2533+ woccif_w32(wo, q->regs->ring_size, q->ndesc);
2534+
2535+ /* wo fw start from 1 */
developera3f86ed2022-07-08 14:15:13 +08002536+ q->tail = q->head = 1;
developer8cb3ac72022-07-04 10:55:14 +08002537+}
2538+
2539+static void
2540+woif_q_reset(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2541+{
2542+
2543+ if (!q || !q->ndesc)
2544+ return;
2545+
2546+ woccif_w32(dev, q->regs->cpu_idx, 0);
2547+
2548+ woif_q_sync_idx(dev, q);
2549+}
2550+
2551+static void
2552+woif_q_kick(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset)
2553+{
2554+ wmb();
2555+ woccif_w32(wo, q->regs->cpu_idx, q->head + offset);
2556+}
2557+
2558+static int
2559+woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2560+{
2561+ int len = q->buf_size, frames = 0;
2562+ struct wed_wo_queue_entry *entry;
2563+ struct wed_wo_desc *desc;
2564+ dma_addr_t addr;
2565+ u32 ctrl = 0;
2566+ void *buf;
2567+
2568+ if (!q->ndesc)
2569+ return 0;
2570+
2571+ spin_lock_bh(&q->lock);
2572+
2573+ while (q->queued < q->ndesc - 1) {
2574+
2575+ buf = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
2576+ if (!buf)
2577+ break;
2578+
2579+ addr = dma_map_single(wo->hw->dev, buf, len, DMA_FROM_DEVICE);
2580+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
2581+ skb_free_frag(buf);
2582+ break;
2583+ }
2584+ dma_sync_single_for_cpu(wo->hw->dev, addr, len,
2585+ DMA_TO_DEVICE);
2586+ desc = &q->desc[q->head];
2587+ entry = &q->entry[q->head];
2588+
2589+ entry->dma_addr = addr;
2590+ entry->dma_len = len;
2591+
2592+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, entry->dma_len);
2593+ ctrl |= WED_CTL_LAST_SEC0;
2594+
2595+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2596+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2597+ dma_sync_single_for_device(wo->hw->dev, addr, len,
2598+ DMA_TO_DEVICE);
2599+ q->queued++;
2600+ q->entry[q->head].buf = buf;
2601+
2602+ q->head = (q->head + 1) % q->ndesc;
2603+ frames++;
2604+ }
2605+
2606+ spin_unlock_bh(&q->lock);
2607+
2608+ return frames;
2609+}
2610+
2611+static void
2612+woif_q_rx_fill_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2613+{
2614+ if(woif_q_rx_fill(wo, q))
2615+ woif_q_kick(wo, q, -1);
2616+}
2617+
2618+static int
2619+woif_q_alloc(struct mtk_wed_wo *dev, struct wed_wo_queue *q,
2620+ int n_desc, int bufsize, int idx,
2621+ struct wed_wo_queue_regs *regs)
2622+{
2623+ struct wed_wo_queue_regs *q_regs;
2624+ int size;
2625+
2626+ spin_lock_init(&q->lock);
2627+ spin_lock_init(&q->cleanup_lock);
2628+
2629+ q_regs = devm_kzalloc(dev->hw->dev, sizeof(*q_regs), GFP_KERNEL);
2630+
2631+ q_regs->desc_base = regs->desc_base;
2632+ q_regs->ring_size = regs->ring_size;
2633+ q_regs->cpu_idx = regs->cpu_idx;
2634+ q_regs->dma_idx = regs->dma_idx;
2635+
2636+ q->regs = q_regs;
2637+ q->ndesc = n_desc;
2638+ q->buf_size = bufsize;
2639+
2640+ size = q->ndesc * sizeof(struct wed_wo_desc);
2641+
2642+ q->desc = dmam_alloc_coherent(dev->hw->dev, size,
2643+ &q->desc_dma, GFP_KERNEL);
2644+ if (!q->desc)
2645+ return -ENOMEM;
2646+
2647+ size = q->ndesc * sizeof(*q->entry);
2648+ q->entry = devm_kzalloc(dev->hw->dev, size, GFP_KERNEL);
2649+ if (!q->entry)
2650+ return -ENOMEM;
2651+
2652+ if (idx == 0)
2653+ woif_q_reset(dev, &dev->q_tx);
2654+
2655+ return 0;
2656+}
2657+
2658+static void
developera3f86ed2022-07-08 14:15:13 +08002659+woif_q_free(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2660+{
2661+ int size;
2662+
2663+ if (!q)
2664+ return;
2665+
2666+ if (!q->desc)
2667+ return;
2668+
2669+ woccif_w32(dev, q->regs->cpu_idx, 0);
2670+
2671+ size = q->ndesc * sizeof(struct wed_wo_desc);
2672+ dma_free_coherent(dev->hw->dev, size, q->desc, q->desc_dma);
2673+}
2674+
2675+static void
developer8cb3ac72022-07-04 10:55:14 +08002676+woif_q_tx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush)
2677+{
2678+ int last;
2679+
2680+ if (!q || !q->ndesc)
2681+ return;
2682+
2683+ spin_lock_bh(&q->cleanup_lock);
2684+ if (flush)
2685+ last = -1;
2686+ else
2687+ last = readl(&q->regs->dma_idx);
2688+
2689+ while (q->queued > 0 && q->tail != last) {
2690+ struct wed_wo_queue_entry *e;
2691+
2692+ e = &q->entry[q->tail];
2693+
2694+ dma_unmap_single(wo->hw->dev, e->dma_addr, e->dma_len,
2695+ DMA_TO_DEVICE);
2696+
2697+ if (e->skb)
2698+ dev_kfree_skb(e->skb);
2699+
2700+ memset(e, 0, sizeof(*e));
2701+
2702+ spin_lock_bh(&q->lock);
2703+ q->tail = (q->tail + 1) % q->ndesc;
2704+ q->queued--;
2705+ spin_unlock_bh(&q->lock);
2706+
2707+ if (!flush && q->tail == last)
2708+ last = readl(&q->regs->dma_idx);
2709+ }
2710+ spin_unlock_bh(&q->cleanup_lock);
2711+
2712+ if (flush) {
2713+ spin_lock_bh(&q->lock);
2714+ woif_q_sync_idx(wo, q);
2715+ woif_q_kick(wo, q, 0);
2716+ spin_unlock_bh(&q->lock);
2717+ }
2718+}
2719+
developer8cb3ac72022-07-04 10:55:14 +08002720+static void *
2721+woif_q_deq(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush,
2722+ int *len, u32 *info, bool *more)
2723+{
2724+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
2725+ struct wed_wo_queue_entry *e;
2726+ struct wed_wo_desc *desc;
2727+ int idx = q->tail;
2728+ void *buf;
2729+
2730+ *more = false;
2731+ if (!q->queued)
2732+ return NULL;
2733+
2734+ if (flush)
2735+ q->desc[idx].ctrl |= cpu_to_le32(WED_CTL_DMA_DONE);
2736+ else if (!(q->desc[idx].ctrl & cpu_to_le32(WED_CTL_DMA_DONE)))
2737+ return NULL;
2738+
2739+ q->tail = (q->tail + 1) % q->ndesc;
2740+ q->queued--;
2741+
2742+ desc = &q->desc[idx];
2743+ e = &q->entry[idx];
2744+
2745+ buf = e->buf;
2746+ if (len) {
2747+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
2748+ *len = FIELD_GET(WED_CTL_SD_LEN0, ctl);
2749+ *more = !(ctl & WED_CTL_LAST_SEC0);
2750+ }
2751+
2752+ if (info)
2753+ *info = le32_to_cpu(desc->info);
2754+ if(buf)
2755+ dma_unmap_single(wo->hw->dev, e->dma_addr, buf_len,
2756+ DMA_FROM_DEVICE);
2757+ e->skb = NULL;
2758+
2759+ return buf;
2760+}
2761+
developera3f86ed2022-07-08 14:15:13 +08002762+static void
2763+woif_q_rx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2764+{
2765+ struct page *page;
2766+ void *buf;
2767+ bool more;
2768+
2769+ if (!q->ndesc)
2770+ return;
2771+
2772+ spin_lock_bh(&q->lock);
2773+ do {
2774+ buf = woif_q_deq(wo, q, true, NULL, NULL, &more);
2775+ if (!buf)
2776+ break;
2777+
2778+ skb_free_frag(buf);
2779+ } while (1);
2780+ spin_unlock_bh(&q->lock);
2781+
2782+ if (!q->rx_page.va)
2783+ return;
2784+
2785+ page = virt_to_page(q->rx_page.va);
2786+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
2787+ memset(&q->rx_page, 0, sizeof(q->rx_page));
2788+
2789+}
2790+
developer8cb3ac72022-07-04 10:55:14 +08002791+static int
2792+woif_q_init(struct mtk_wed_wo *dev,
2793+ int (*poll)(struct napi_struct *napi, int budget))
2794+{
2795+ init_dummy_netdev(&dev->napi_dev);
2796+ snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
2797+ "woif_q");
2798+
2799+ if (dev->q_rx.ndesc) {
2800+ netif_napi_add(&dev->napi_dev, &dev->napi, poll, 64);
2801+ woif_q_rx_fill(dev, &dev->q_rx);
2802+ woif_q_reset(dev, &dev->q_rx);
2803+ napi_enable(&dev->napi);
2804+ }
2805+
2806+ return 0;
2807+}
2808+
2809+void woif_q_rx_skb(struct mtk_wed_wo *wo, struct sk_buff *skb)
2810+{
2811+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
2812+ int ret;
2813+
2814+ ret = mtk_wed_mcu_cmd_sanity_check(wo, skb);
2815+ if (ret)
2816+ goto free_skb;
2817+
2818+ if (WED_WO_CMD_FLAG_IS_RSP(hdr))
2819+ mtk_wed_mcu_rx_event(wo, skb);
2820+ else
2821+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
2822+
2823+ return;
2824+free_skb:
2825+ dev_kfree_skb(skb);
2826+}
2827+
2828+static int
2829+woif_q_tx_skb(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
2830+ struct sk_buff *skb)
2831+{
2832+ struct wed_wo_queue_entry *entry;
2833+ struct wed_wo_desc *desc;
2834+ int len, ret, idx = -1;
2835+ dma_addr_t addr;
2836+ u32 ctrl = 0;
2837+
2838+ len = skb->len;
2839+ addr = dma_map_single(wo->hw->dev, skb->data, len, DMA_TO_DEVICE);
2840+ if (unlikely(dma_mapping_error(wo->hw->dev, addr)))
2841+ goto error;
2842+
2843+ /* packet tx, force trigger tx clean. */
2844+ if (q->queued + MTK_WED_WO_TXQ_FREE_THR >= q->ndesc - 1)
2845+ woif_q_tx_clean(wo, q, false);
2846+
2847+ if (q->queued + 1 >= q->ndesc - 1) {
2848+ ret = -ENOMEM;
2849+ goto error;
2850+ }
2851+
2852+ spin_lock_bh(&q->lock);
2853+
2854+ dma_sync_single_for_device(wo->hw->dev, addr, len,
2855+ DMA_TO_DEVICE);
2856+
2857+ idx = q->head;
2858+
2859+ desc = &q->desc[idx];
2860+ entry = &q->entry[idx];
2861+
2862+ entry->dma_addr = addr;
2863+ entry->dma_len = len;
2864+
2865+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, len);
2866+ ctrl |= WED_CTL_LAST_SEC0;
2867+ ctrl |= WED_CTL_DMA_DONE;
2868+
2869+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2870+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2871+
2872+ q->queued++;
2873+ q->entry[idx].skb = skb;
2874+
2875+ woif_q_kick(wo, q, 0);
2876+ wo->drv_ops->kickout(wo);
2877+
2878+ q->head = (q->head + 1) % q->ndesc;
2879+ spin_unlock_bh(&q->lock);
2880+ return 0;
2881+
2882+error:
2883+ dev_kfree_skb(skb);
2884+ return -ENOMEM;
2885+}
2886+
2887+static const struct wed_wo_queue_ops wo_queue_ops = {
2888+ .init = woif_q_init,
2889+ .alloc = woif_q_alloc,
developera3f86ed2022-07-08 14:15:13 +08002890+ .free = woif_q_free,
developer8cb3ac72022-07-04 10:55:14 +08002891+ .reset = woif_q_reset,
2892+ .tx_skb = woif_q_tx_skb,
2893+ .tx_clean = woif_q_tx_clean,
2894+ .rx_clean = woif_q_rx_clean,
2895+ .kick = woif_q_kick,
2896+};
2897+
2898+static int
2899+mtk_wed_wo_rx_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int budget)
2900+{
2901+ int len, data_len, done = 0;
2902+ struct sk_buff *skb;
2903+ unsigned char *data;
2904+ bool more;
2905+
2906+ while (done < budget) {
2907+ u32 info;
2908+
2909+ data = woif_q_deq(wo, q, false, &len, &info, &more);
2910+ if (!data)
2911+ break;
2912+
2913+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
2914+
2915+ if (data_len < len) {
2916+ skb_free_frag(data);
2917+ continue;
2918+ }
2919+
2920+ skb = build_skb(data, q->buf_size);
2921+ if (!skb) {
2922+ skb_free_frag(data);
2923+ continue;
2924+ }
2925+
2926+ __skb_put(skb, len);
2927+ done++;
2928+
2929+ woif_q_rx_skb(wo, skb);
2930+ }
2931+
2932+ woif_q_rx_fill_process(wo, q);
2933+
2934+ return done;
2935+}
2936+
2937+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
2938+ u32 clear, u32 val)
2939+{
2940+ unsigned long flags;
2941+
2942+ spin_lock_irqsave(&wo->ccif.irq_lock, flags);
2943+ wo->ccif.irqmask &= ~clear;
2944+ wo->ccif.irqmask |= val;
2945+ if (set)
2946+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
2947+
2948+ spin_unlock_irqrestore(&wo->ccif.irq_lock, flags);
2949+}
2950+
2951+static inline void mtk_wed_wo_set_ack_mask(struct mtk_wed_wo *wo, u32 mask)
2952+{
2953+ wo->drv_ops->set_ack(wo, mask);
2954+}
2955+
2956+static void mtk_wed_wo_poll_complete(struct mtk_wed_wo *wo)
2957+{
2958+ mtk_wed_wo_set_ack_mask(wo, wo->ccif.q_int_mask);
2959+ mtk_wed_wo_isr_enable(wo, wo->ccif.q_int_mask);
2960+}
2961+
2962+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget)
2963+{
2964+ struct mtk_wed_wo *wo;
2965+ int done = 0, cur;
2966+
2967+ wo = container_of(napi->dev, struct mtk_wed_wo, napi_dev);
2968+
2969+ rcu_read_lock();
2970+
2971+ do {
2972+ cur = mtk_wed_wo_rx_process(wo, &wo->q_rx, budget - done);
2973+ /* rx packet handle */
2974+ done += cur;
2975+ } while (cur && done < budget);
2976+
2977+ rcu_read_unlock();
2978+
2979+ if (done < budget && napi_complete(napi))
2980+ mtk_wed_wo_poll_complete(wo);
2981+
2982+ return done;
2983+}
2984+
2985+static void mtk_wed_wo_isr_tasklet(unsigned long data)
2986+{
2987+ struct mtk_wed_wo *wo = (struct mtk_wed_wo *)data;
2988+ u32 intr, mask;
2989+
2990+ /* disable isr */
2991+ wo->drv_ops->set_isr(wo, 0);
2992+
2993+ intr = wo->drv_ops->get_csr(wo);
2994+ intr &= wo->ccif.irqmask;
2995+
2996+ mask = intr & (wo->ccif.q_int_mask | wo->ccif.q_exep_mask);
2997+ mtk_wed_wo_isr_disable(wo, mask);
2998+
2999+ if (intr & wo->ccif.q_int_mask)
3000+ napi_schedule(&wo->napi);
3001+
3002+ if (intr & wo->ccif.q_exep_mask) {
3003+ /* todo */
3004+ }
3005+}
3006+
3007+static irqreturn_t mtk_wed_wo_isr_handler(int irq, void *wo_instance)
3008+{
3009+ struct mtk_wed_wo *wo = wo_instance;
3010+
3011+ wo->drv_ops->set_isr(wo, 0);
3012+
3013+ tasklet_schedule(&wo->irq_tasklet);
3014+
3015+ return IRQ_HANDLED;
3016+}
3017+
3018+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
3019+{
3020+ struct mtk_wed_wo *wo;
3021+ int ret = 0;
3022+
3023+ wo = kzalloc(sizeof(struct mtk_wed_wo), GFP_KERNEL);
3024+ if (!wo)
3025+ return -ENOMEM;
3026+
3027+ wo->hw = hw;
3028+ wo->queue_ops = &wo_queue_ops;
3029+ hw->wed_wo = wo;
3030+
3031+ tasklet_init(&wo->irq_tasklet, mtk_wed_wo_isr_tasklet,
3032+ (unsigned long)wo);
3033+
3034+ skb_queue_head_init(&wo->mcu.res_q);
3035+ init_waitqueue_head(&wo->mcu.wait);
3036+ mutex_init(&wo->mcu.mutex);
3037+
3038+ ret = wed_wo_hardware_init(wo, mtk_wed_wo_isr_handler);
3039+ if (ret)
3040+ goto error;
3041+
3042+ /* fw download */
3043+ ret = wed_wo_mcu_init(wo);
3044+ if (ret)
3045+ goto error;
3046+
3047+ ret = mtk_wed_exception_init(wo);
3048+ if (ret)
3049+ goto error;
3050+
3051+ return ret;
3052+
3053+error:
3054+ kfree(wo);
3055+
3056+ return ret;
3057+}
3058+
3059+void mtk_wed_wo_exit(struct mtk_wed_hw *hw)
3060+{
developer8cb3ac72022-07-04 10:55:14 +08003061+ struct mtk_wed_wo *wo = hw->wed_wo;
3062+
developera3f86ed2022-07-08 14:15:13 +08003063+ wed_wo_hardware_exit(wo);
3064+
developer8cb3ac72022-07-04 10:55:14 +08003065+ if (wo->exp.log) {
3066+ dma_unmap_single(wo->hw->dev, wo->exp.phys, wo->exp.log_size, DMA_FROM_DEVICE);
3067+ kfree(wo->exp.log);
3068+ }
3069+
developera3f86ed2022-07-08 14:15:13 +08003070+ wo->hw = NULL;
3071+ memset(wo, 0, sizeof(*wo));
3072+ kfree(wo);
developer8cb3ac72022-07-04 10:55:14 +08003073+}
3074diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
3075new file mode 100644
developera3f86ed2022-07-08 14:15:13 +08003076index 0000000..d962e3a
developer8cb3ac72022-07-04 10:55:14 +08003077--- /dev/null
3078+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
developera3f86ed2022-07-08 14:15:13 +08003079@@ -0,0 +1,336 @@
developer8cb3ac72022-07-04 10:55:14 +08003080+// SPDX-License-Identifier: GPL-2.0-only
3081+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
3082+
3083+#ifndef __MTK_WED_WO_H
3084+#define __MTK_WED_WO_H
3085+
3086+#include <linux/netdevice.h>
3087+#include <linux/skbuff.h>
3088+#include "mtk_wed.h"
3089+
3090+#define WED_CTL_SD_LEN1 GENMASK(13, 0)
3091+#define WED_CTL_LAST_SEC1 BIT(14)
3092+#define WED_CTL_BURST BIT(15)
3093+#define WED_CTL_SD_LEN0_SHIFT 16
3094+#define WED_CTL_SD_LEN0 GENMASK(29, 16)
3095+#define WED_CTL_LAST_SEC0 BIT(30)
3096+#define WED_CTL_DMA_DONE BIT(31)
3097+#define WED_INFO_WINFO GENMASK(15, 0)
3098+
3099+#define MTK_WED_WO_TXQ_FREE_THR 10
3100+
3101+#define WED_WO_PROFILE_MAX_LVL 6
3102+
3103+
3104+enum mtk_wed_fw_region_id {
3105+ WO_REGION_EMI = 0,
3106+ WO_REGION_ILM,
3107+ WO_REGION_DATA,
3108+ WO_REGION_BOOT,
3109+ __WO_REGION_MAX
3110+};
3111+
3112+struct wed_wo_profile_stat {
3113+ u32 bound;
3114+ u32 record;
3115+};
3116+
3117+#define PROFILE_STAT(record, val) do { \
3118+ u8 lvl = 0; \
3119+ while (lvl < WED_WO_PROFILE_MAX_LVL) { \
3120+ if (val < record[lvl].bound) { \
3121+ record[lvl].record++; \
3122+ break; \
3123+ } \
3124+ lvl++; \
3125+ } \
3126+ } while (0)
3127+
3128+/* align with wo report structure */
3129+struct wed_wo_log {
3130+ u32 sn;
3131+ u32 total;
3132+ u32 rro;
3133+ u32 mod;
3134+};
3135+
3136+struct wed_wo_rxcnt {
3137+ u16 wlan_idx;
3138+ u16 tid;
3139+ u32 rx_pkt_cnt;
3140+ u32 rx_byte_cnt;
3141+ u32 rx_err_cnt;
3142+ u32 rx_drop_cnt;
3143+};
3144+
3145+struct wed_wo_queue {
3146+ struct wed_wo_queue_regs *regs;
3147+
3148+ spinlock_t lock;
3149+ spinlock_t cleanup_lock;
3150+ struct wed_wo_queue_entry *entry;
3151+ struct wed_wo_desc *desc;
3152+
3153+ u16 first;
3154+ u16 head;
3155+ u16 tail;
3156+ int ndesc;
3157+ int queued;
3158+ int buf_size;
3159+
3160+ u8 hw_idx;
3161+ u8 qid;
3162+ u8 flags;
3163+
3164+ dma_addr_t desc_dma;
3165+ struct page_frag_cache rx_page;
3166+};
3167+
3168+
3169+struct wed_wo_mmio {
3170+ struct regmap *regs;
3171+
3172+ spinlock_t irq_lock;
3173+ u8 irq;
3174+ u32 irqmask;
3175+
3176+ u32 q_int_mask;
3177+ u32 q_exep_mask;
3178+};
3179+
3180+struct wed_wo_mcu {
3181+ struct mutex mutex;
3182+ u32 msg_seq;
3183+ int timeout;
3184+
3185+ struct sk_buff_head res_q;
3186+ wait_queue_head_t wait;
3187+};
3188+
3189+struct wed_wo_exception {
3190+ void* log;
3191+ int log_size;
3192+ dma_addr_t phys;
3193+};
3194+
3195+struct wed_wo_queue_regs {
3196+ u32 desc_base;
3197+ u32 ring_size;
3198+ u32 cpu_idx;
3199+ u32 dma_idx;
3200+};
3201+
3202+struct wed_wo_desc {
3203+ __le32 buf0;
3204+ __le32 ctrl;
3205+ __le32 buf1;
3206+ __le32 info;
3207+ __le32 reserved[4];
3208+} __packed __aligned(32);
3209+
3210+struct wed_wo_queue_entry {
3211+ union {
3212+ void *buf;
3213+ struct sk_buff *skb;
3214+ };
3215+
3216+ u32 dma_addr;
3217+ u16 dma_len;
3218+ u16 wcid;
3219+ bool skip_buf0:1;
3220+ bool skip_buf1:1;
3221+ bool done:1;
3222+};
3223+
3224+struct wo_cmd_rxcnt_t {
3225+ u16 wlan_idx;
3226+ u16 tid;
3227+ u32 rx_pkt_cnt;
3228+ u32 rx_byte_cnt;
3229+ u32 rx_err_cnt;
3230+ u32 rx_drop_cnt;
3231+};
3232+
3233+struct wo_cmd_query {
3234+ u32 query0;
3235+ u32 query1;
3236+};
3237+
3238+struct wed_cmd_hdr {
3239+ /*DW0*/
3240+ u8 ver;
3241+ u8 cmd_id;
3242+ u16 length;
3243+
3244+ /*DW1*/
3245+ u16 uni_id;
3246+ u16 flag;
3247+
3248+ /*DW2*/
3249+ int status;
3250+
3251+ /*DW3*/
3252+ u8 reserved[20];
3253+};
3254+
3255+struct mtk_wed_fw_region {
3256+ void *addr;
3257+ u32 addr_pa;
3258+ u32 size;
3259+ u32 shared;
3260+};
3261+
3262+struct wed_wo_queue_ops;
3263+struct wed_wo_drv_ops;
3264+struct wed_wo_mcu_ops;
3265+
3266+struct wo_rx_total_cnt {
3267+ u64 rx_pkt_cnt;
3268+ u64 rx_byte_cnt;
3269+ u64 rx_err_cnt;
3270+ u64 rx_drop_cnt;
3271+};
3272+
3273+struct mtk_wed_wo {
3274+ struct mtk_wed_hw *hw;
3275+
3276+ struct wed_wo_mmio ccif;
3277+ struct wed_wo_mcu mcu;
3278+ struct wed_wo_exception exp;
3279+
3280+ const struct wed_wo_drv_ops *drv_ops;
3281+ const struct wed_wo_mcu_ops *mcu_ops;
3282+ const struct wed_wo_queue_ops *queue_ops;
3283+
3284+ struct net_device napi_dev;
3285+ spinlock_t rx_lock;
3286+ struct napi_struct napi;
3287+ struct sk_buff_head rx_skb;
3288+ struct wed_wo_queue q_rx;
3289+ struct tasklet_struct irq_tasklet;
3290+
3291+ struct wed_wo_queue q_tx;
3292+
3293+ struct mtk_wed_fw_region region[__WO_REGION_MAX];
3294+
3295+ struct wed_wo_profile_stat total[WED_WO_PROFILE_MAX_LVL];
3296+ struct wed_wo_profile_stat mod[WED_WO_PROFILE_MAX_LVL];
3297+ struct wed_wo_profile_stat rro[WED_WO_PROFILE_MAX_LVL];
3298+ char dirname[4];
3299+ struct wo_rx_total_cnt wo_rxcnt[8][544];
3300+};
3301+
3302+struct wed_wo_queue_ops {
3303+ int (*init)(struct mtk_wed_wo *wo,
3304+ int (*poll)(struct napi_struct *napi, int budget));
3305+
3306+ int (*alloc)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3307+ int idx, int n_desc, int bufsize,
3308+ struct wed_wo_queue_regs *regs);
developera3f86ed2022-07-08 14:15:13 +08003309+ void (*free)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
developer8cb3ac72022-07-04 10:55:14 +08003310+ void (*reset)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3311+
3312+ int (*tx_skb)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3313+ struct sk_buff *skb);
3314+ int (*tx_skb1)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3315+ u8 *msg, u32 msg_len);
3316+ void (*tx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3317+ bool flush);
3318+
3319+ void (*rx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3320+
3321+ void (*kick)(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset);
3322+};
3323+
3324+struct wed_wo_drv_ops {
3325+ void (*kickout)(struct mtk_wed_wo *wo);
3326+ void (*set_ack)(struct mtk_wed_wo *wo, u32 mask);
3327+ void (*set_isr)(struct mtk_wed_wo *wo, u32 mask);
3328+ u32 (*get_csr)(struct mtk_wed_wo *wo);
3329+ int (*tx_prepare_skb)(struct mtk_wed_wo *wo);
3330+ bool (*check_excpetion)(struct mtk_wed_wo *wo);
3331+ void (*clear_int)(struct mtk_wed_wo *wo, u32 mask);
3332+};
3333+
3334+struct wed_wo_mcu_ops {
3335+ u32 headroom;
3336+
3337+ int (*mcu_skb_send_msg)(struct mtk_wed_wo *wo, int to_id,
3338+ int cmd, struct sk_buff *skb,
3339+ int *seq, bool wait_resp);
3340+
3341+ int (*mcu_parse_response)(struct mtk_wed_wo *wo, int cmd,
3342+ struct sk_buff *skb, int seq);
3343+
3344+ int (*mcu_restart)(struct mtk_wed_wo *wo);
3345+};
3346+
3347+#define mtk_wed_wo_q_init(wo, ...) (wo)->queue_ops->init((wo), __VA_ARGS__)
3348+#define mtk_wed_wo_q_alloc(wo, ...) (wo)->queue_ops->alloc((wo), __VA_ARGS__)
developera3f86ed2022-07-08 14:15:13 +08003349+#define mtk_wed_wo_q_free(wo, ...) (wo)->queue_ops->free((wo), __VA_ARGS__)
3350+#define mtk_wed_wo_q_reset(wo, ...) (wo)->queue_ops->reset((wo), __VA_ARGS__)
developer8cb3ac72022-07-04 10:55:14 +08003351+#define mtk_wed_wo_q_tx_skb(wo, ...) (wo)->queue_ops->tx_skb((wo), __VA_ARGS__)
3352+#define mtk_wed_wo_q_tx_skb1(wo, ...) (wo)->queue_ops->tx_skb1((wo), __VA_ARGS__)
3353+#define mtk_wed_wo_q_tx_clean(wo, ...) (wo)->queue_ops->tx_clean((wo), __VA_ARGS__)
3354+#define mtk_wed_wo_q_rx_clean(wo, ...) (wo)->queue_ops->rx_clean((wo), __VA_ARGS__)
3355+#define mtk_wed_wo_q_kick(wo, ...) (wo)->queue_ops->kick((wo), __VA_ARGS__)
3356+
3357+enum {
3358+ WARP_CMD_FLAG_RSP = 1 << 0, /* is responce*/
3359+ WARP_CMD_FLAG_NEED_RSP = 1 << 1, /* need responce */
3360+ WARP_CMD_FLAG_FROM_TO_WO = 1 << 2, /* send between host and wo */
3361+};
3362+
3363+#define WED_WO_CMD_FLAG_IS_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_RSP))
3364+#define WED_WO_CMD_FLAG_SET_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_RSP))
3365+#define WED_WO_CMD_FLAG_IS_NEED_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_NEED_RSP))
3366+#define WED_WO_CMD_FLAG_SET_NEED_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_NEED_RSP))
3367+#define WED_WO_CMD_FLAG_IS_FROM_TO_WO(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_FROM_TO_WO))
3368+#define WED_WO_CMD_FLAG_SET_FROM_TO_WO(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_FROM_TO_WO))
3369+
3370+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3371+ u32 clear, u32 val);
3372+
3373+static inline void mtk_wed_wo_isr_enable(struct mtk_wed_wo *wo, u32 mask)
3374+{
3375+ mtk_wed_wo_set_isr_mask(wo, false, 0, mask);
3376+
3377+ tasklet_schedule(&wo->irq_tasklet);
3378+}
3379+
3380+static inline void mtk_wed_wo_isr_disable(struct mtk_wed_wo *wo, u32 mask)
3381+{
3382+ mtk_wed_wo_set_isr_mask(wo, true, mask, 0);
3383+}
3384+
3385+static inline void
3386+wo_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3387+{
3388+ writel(val, dev->region[WO_REGION_BOOT].addr + reg);
3389+}
3390+
3391+static inline u32
3392+wo_r32(struct mtk_wed_wo *dev, u32 reg)
3393+{
3394+ return readl(dev->region[WO_REGION_BOOT].addr + reg);
3395+}
3396+static inline void
3397+woccif_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3398+{
3399+ regmap_write(dev->ccif.regs, reg, val);
3400+}
3401+
3402+static inline u32
3403+woccif_r32(struct mtk_wed_wo *dev, u32 reg)
3404+{
3405+ unsigned int val;
3406+
3407+ regmap_read(dev->ccif.regs, reg, &val);
3408+
3409+ return val;
3410+}
3411+
3412+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
developera3f86ed2022-07-08 14:15:13 +08003413+void mtk_wed_wo_exit(struct mtk_wed_hw *hw);
developer8cb3ac72022-07-04 10:55:14 +08003414+#endif
3415+
3416diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3417index 24742604b..b6b6823ae 100644
3418--- a/include/linux/soc/mediatek/mtk_wed.h
3419+++ b/include/linux/soc/mediatek/mtk_wed.h
3420@@ -7,6 +7,9 @@
3421 #include <linux/pci.h>
3422
3423 #define MTK_WED_TX_QUEUES 2
3424+#define MTK_WED_RX_QUEUES 2
3425+
3426+#define WED_WO_STA_REC 0x6
3427
3428 enum {
3429 MTK_NO_WED,
3430@@ -33,6 +36,24 @@ struct mtk_wed_ring {
3431 void __iomem *wpdma;
3432 };
3433
3434+struct mtk_rxbm_desc {
3435+ __le32 buf0;
3436+ __le32 token;
3437+} __packed __aligned(4);
3438+
3439+struct dma_buf {
3440+ int size;
3441+ void **pages;
3442+ struct mtk_wdma_desc *desc;
3443+ dma_addr_t desc_phys;
3444+};
3445+
3446+struct dma_entry {
3447+ int size;
3448+ struct mtk_rxbm_desc *desc;
3449+ dma_addr_t desc_phys;
3450+};
3451+
3452 struct mtk_wed_device {
3453 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3454 const struct mtk_wed_ops *ops;
3455@@ -46,19 +67,27 @@ struct mtk_wed_device {
3456 struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3457 struct mtk_wed_ring txfree_ring;
3458 struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3459+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
3460+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
3461+
3462+ struct dma_buf buf_ring;
3463+ struct dma_entry rx_buf_ring;
3464+ struct page_frag_cache rx_page;
3465
3466 struct {
3467- int size;
3468- void **pages;
3469- struct mtk_wdma_desc *desc;
3470- dma_addr_t desc_phys;
3471- } buf_ring;
3472+ struct mtk_wed_ring rro_ring;
3473+ void __iomem *rro_desc;
3474+ dma_addr_t miod_desc_phys;
3475+ dma_addr_t fdbk_desc_phys;
3476+ u32 mcu_view_miod;
3477+ } rro;
3478
3479 /* filled by driver: */
3480 struct {
3481 struct pci_dev *pci_dev;
3482 void __iomem *base;
3483 u32 bus_type;
3484+ u32 phy_base;
3485
3486 union {
3487 u32 wpdma_phys;
3488@@ -67,16 +96,25 @@ struct mtk_wed_device {
3489 u32 wpdma_mask;
3490 u32 wpdma_tx;
3491 u32 wpdma_txfree;
3492+ u32 wpdma_rx_glo;
3493+ u32 wpdma_rx;
3494
3495 u8 tx_tbit[MTK_WED_TX_QUEUES];
3496+ u8 rx_tbit[MTK_WED_RX_QUEUES];
3497 u8 txfree_tbit;
3498
3499 u16 token_start;
3500 unsigned int nbuf;
3501+ unsigned int rx_nbuf;
3502+ unsigned int rx_pkt;
3503+ unsigned int rx_pkt_size;
3504
3505 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3506 int (*offload_enable)(struct mtk_wed_device *wed);
3507 void (*offload_disable)(struct mtk_wed_device *wed);
3508+ u32 (*init_rx_buf)(struct mtk_wed_device *wed,
3509+ int pkt_num);
3510+ void (*release_rx_buf)(struct mtk_wed_device *wed);
3511 } wlan;
3512 #endif
3513 };
3514@@ -87,6 +125,10 @@ struct mtk_wed_ops {
3515 void __iomem *regs);
3516 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3517 void __iomem *regs);
3518+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3519+ void __iomem *regs);
3520+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3521+ void *data, int len);
3522 void (*detach)(struct mtk_wed_device *dev);
3523
3524 void (*stop)(struct mtk_wed_device *dev);
3525@@ -98,6 +140,8 @@ struct mtk_wed_ops {
3526
3527 u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3528 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3529+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
3530+ u32 reason, u32 hash);
3531 };
3532
3533 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3534@@ -130,6 +174,10 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
3535 (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3536 #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3537 (_dev)->ops->txfree_ring_setup(_dev, _regs)
3538+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
3539+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
3540+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3541+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
3542 #define mtk_wed_device_reg_read(_dev, _reg) \
3543 (_dev)->ops->reg_read(_dev, _reg)
3544 #define mtk_wed_device_reg_write(_dev, _reg, _val) \
3545@@ -138,6 +186,8 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
3546 (_dev)->ops->irq_get(_dev, _mask)
3547 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
3548 (_dev)->ops->irq_set_mask(_dev, _mask)
3549+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3550+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
3551 #else
3552 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3553 {
3554@@ -147,10 +197,13 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3555 #define mtk_wed_device_start(_dev, _mask) do {} while (0)
3556 #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3557 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3558+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3559+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
3560 #define mtk_wed_device_reg_read(_dev, _reg) 0
3561 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3562 #define mtk_wed_device_irq_get(_dev, _mask) 0
3563 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3564+#define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
3565 #endif
3566
3567 #endif
3568--
35692.18.0
3570