blob: fd64dcfe10943178aaedee4e9e95d7432ef35acf [file] [log] [blame]
developer69bcd592024-03-25 14:26:39 +08001From fad15885ddf2f138ed3ec652b4fcd0cbdf54bf4e Mon Sep 17 00:00:00 2001
developer3262bf82022-07-12 11:37:54 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
developer740bee82023-10-16 10:58:43 +08003Date: Mon, 18 Sep 2023 11:04:53 +0800
developer69bcd592024-03-25 14:26:39 +08004Subject: [PATCH 08/24] add-wed-rx-support-for-netsys2
developer3262bf82022-07-12 11:37:54 +08005
developer3262bf82022-07-12 11:37:54 +08006---
developer69bcd592024-03-25 14:26:39 +08007 arch/arm64/boot/dts/mediatek/mt7981.dtsi | 23 +-
developer3262bf82022-07-12 11:37:54 +08008 arch/arm64/boot/dts/mediatek/mt7986a.dtsi | 42 +-
9 arch/arm64/boot/dts/mediatek/mt7986b.dtsi | 42 +-
10 drivers/net/ethernet/mediatek/Makefile | 2 +-
developer69bcd592024-03-25 14:26:39 +080011 drivers/net/ethernet/mediatek/mtk_wed.c | 645 ++++++++++++++++--
developer29f66b32022-07-12 15:23:20 +080012 drivers/net/ethernet/mediatek/mtk_wed.h | 51 ++
13 drivers/net/ethernet/mediatek/mtk_wed_ccif.c | 133 ++++
developer3262bf82022-07-12 11:37:54 +080014 drivers/net/ethernet/mediatek/mtk_wed_ccif.h | 45 ++
15 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 90 +++
developer69bcd592024-03-25 14:26:39 +080016 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 604 ++++++++++++++++
17 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 97 +++
18 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 143 +++-
19 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 564 +++++++++++++++
20 drivers/net/ethernet/mediatek/mtk_wed_wo.h | 325 +++++++++
21 include/linux/soc/mediatek/mtk_wed.h | 114 +++-
22 15 files changed, 2822 insertions(+), 98 deletions(-)
developer3262bf82022-07-12 11:37:54 +080023 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.c
24 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.h
25 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c
26 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.h
27 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
28 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h
29
developer69bcd592024-03-25 14:26:39 +080030diff --git a/arch/arm64/boot/dts/mediatek/mt7981.dtsi b/arch/arm64/boot/dts/mediatek/mt7981.dtsi
31index cb8f4e1..39b99d8 100644
32--- a/arch/arm64/boot/dts/mediatek/mt7981.dtsi
33+++ b/arch/arm64/boot/dts/mediatek/mt7981.dtsi
34@@ -97,26 +97,29 @@
35 interrupt-parent = <&gic>;
36 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
37 mediatek,wed_pcie = <&wed_pcie>;
38+ mediatek,ap2woccif = <&ap2woccif0>;
39+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
40+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
41+ mediatek,wocpu_boot = <&cpu_boot>;
42+ mediatek,wocpu_emi = <&wocpu0_emi>;
43+ mediatek,wocpu_data = <&wocpu_data>;
44 };
45
46- ap2woccif: ap2woccif@151A5000 {
47- compatible = "mediatek,ap2woccif";
48- reg = <0 0x151A5000 0 0x1000>,
49- <0 0x151AD000 0 0x1000>;
50+ ap2woccif0: ap2woccif@151A5000 {
51+ compatible = "mediatek,ap2woccif", "syscon";
52+ reg = <0 0x151A5000 0 0x1000>;
53 interrupt-parent = <&gic>;
54- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
55- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
56- };
57+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
58+ };
59
60 wocpu0_ilm: wocpu0_ilm@151E0000 {
61 compatible = "mediatek,wocpu0_ilm";
62 reg = <0 0x151E0000 0 0x8000>;
63 };
64
65- wocpu_dlm: wocpu_dlm@151E8000 {
66+ wocpu0_dlm: wocpu_dlm@151E8000 {
67 compatible = "mediatek,wocpu_dlm";
68- reg = <0 0x151E8000 0 0x2000>,
69- <0 0x151F8000 0 0x2000>;
70+ reg = <0 0x151E8000 0 0x2000>;
71
72 resets = <&ethsysrst 0>;
73 reset-names = "wocpu_rst";
developer3262bf82022-07-12 11:37:54 +080074diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
developer69bcd592024-03-25 14:26:39 +080075index 9c288fc..e6f50d5 100644
developer3262bf82022-07-12 11:37:54 +080076--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
77+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
78@@ -65,6 +65,12 @@
79 interrupt-parent = <&gic>;
80 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
81 mediatek,wed_pcie = <&wed_pcie>;
82+ mediatek,ap2woccif = <&ap2woccif0>;
83+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
84+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
85+ mediatek,wocpu_boot = <&cpu_boot>;
86+ mediatek,wocpu_emi = <&wocpu0_emi>;
87+ mediatek,wocpu_data = <&wocpu_data>;
88 };
89
90 wed1: wed@15011000 {
91@@ -74,15 +80,26 @@
92 interrupt-parent = <&gic>;
93 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
94 mediatek,wed_pcie = <&wed_pcie>;
95+ mediatek,ap2woccif = <&ap2woccif1>;
96+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
97+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
98+ mediatek,wocpu_boot = <&cpu_boot>;
99+ mediatek,wocpu_emi = <&wocpu1_emi>;
100+ mediatek,wocpu_data = <&wocpu_data>;
101 };
102
103- ap2woccif: ap2woccif@151A5000 {
104- compatible = "mediatek,ap2woccif";
105- reg = <0 0x151A5000 0 0x1000>,
106- <0 0x151AD000 0 0x1000>;
107+ ap2woccif0: ap2woccif@151A5000 {
108+ compatible = "mediatek,ap2woccif", "syscon";
109+ reg = <0 0x151A5000 0 0x1000>;
110 interrupt-parent = <&gic>;
111- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
112- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
113+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
114+ };
115+
116+ ap2woccif1: ap2woccif@0x151AD000 {
117+ compatible = "mediatek,ap2woccif", "syscon";
118+ reg = <0 0x151AD000 0 0x1000>;
119+ interrupt-parent = <&gic>;
120+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
121 };
122
123 wocpu0_ilm: wocpu0_ilm@151E0000 {
124@@ -95,10 +112,17 @@
125 reg = <0 0x151F0000 0 0x8000>;
126 };
127
128- wocpu_dlm: wocpu_dlm@151E8000 {
129+ wocpu0_dlm: wocpu_dlm@151E8000 {
130+ compatible = "mediatek,wocpu_dlm";
131+ reg = <0 0x151E8000 0 0x2000>;
132+
133+ resets = <&ethsysrst 0>;
134+ reset-names = "wocpu_rst";
135+ };
136+
137+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
138 compatible = "mediatek,wocpu_dlm";
139- reg = <0 0x151E8000 0 0x2000>,
140- <0 0x151F8000 0 0x2000>;
141+ reg = <0 0x151F8000 0 0x2000>;
142
143 resets = <&ethsysrst 0>;
144 reset-names = "wocpu_rst";
145diff --git a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
developer69bcd592024-03-25 14:26:39 +0800146index 02feaa9..3bacadc 100644
developer3262bf82022-07-12 11:37:54 +0800147--- a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
148+++ b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
149@@ -65,6 +65,12 @@
150 interrupt-parent = <&gic>;
151 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
152 mediatek,wed_pcie = <&wed_pcie>;
153+ mediatek,ap2woccif = <&ap2woccif0>;
154+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
155+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
156+ mediatek,wocpu_boot = <&cpu_boot>;
157+ mediatek,wocpu_emi = <&wocpu0_emi>;
158+ mediatek,wocpu_data = <&wocpu_data>;
159 };
160
161 wed1: wed@15011000 {
162@@ -74,15 +80,26 @@
163 interrupt-parent = <&gic>;
164 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
165 mediatek,wed_pcie = <&wed_pcie>;
166+ mediatek,ap2woccif = <&ap2woccif1>;
167+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
168+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
169+ mediatek,wocpu_boot = <&cpu_boot>;
170+ mediatek,wocpu_emi = <&wocpu1_emi>;
171+ mediatek,wocpu_data = <&wocpu_data>;
172 };
173
174- ap2woccif: ap2woccif@151A5000 {
175- compatible = "mediatek,ap2woccif";
176- reg = <0 0x151A5000 0 0x1000>,
177- <0 0x151AD000 0 0x1000>;
178+ ap2woccif0: ap2woccif@151A5000 {
179+ compatible = "mediatek,ap2woccif", "syscon";
180+ reg = <0 0x151A5000 0 0x1000>;
181 interrupt-parent = <&gic>;
182- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
183- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
184+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
185+ };
186+
187+ ap2woccif1: ap2woccif@0x151AD000 {
188+ compatible = "mediatek,ap2woccif", "syscon";
189+ reg = <0 0x151AD000 0 0x1000>;
190+ interrupt-parent = <&gic>;
191+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
192 };
193
194 wocpu0_ilm: wocpu0_ilm@151E0000 {
195@@ -95,10 +112,17 @@
196 reg = <0 0x151F0000 0 0x8000>;
197 };
198
199- wocpu_dlm: wocpu_dlm@151E8000 {
200+ wocpu0_dlm: wocpu_dlm@151E8000 {
201+ compatible = "mediatek,wocpu_dlm";
202+ reg = <0 0x151E8000 0 0x2000>;
203+
204+ resets = <&ethsysrst 0>;
205+ reset-names = "wocpu_rst";
206+ };
207+
208+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
209 compatible = "mediatek,wocpu_dlm";
210- reg = <0 0x151E8000 0 0x2000>,
211- <0 0x151F8000 0 0x2000>;
212+ reg = <0 0x151F8000 0 0x2000>;
213
214 resets = <&ethsysrst 0>;
215 reset-names = "wocpu_rst";
216diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
developer740bee82023-10-16 10:58:43 +0800217index 4090132..fdbb90f 100644
developer3262bf82022-07-12 11:37:54 +0800218--- a/drivers/net/ethernet/mediatek/Makefile
219+++ b/drivers/net/ethernet/mediatek/Makefile
220@@ -10,5 +10,5 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
221 ifdef CONFIG_DEBUG_FS
222 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
223 endif
224-obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
225+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o mtk_wed_wo.o mtk_wed_mcu.o mtk_wed_ccif.o
226 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
227diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developer69bcd592024-03-25 14:26:39 +0800228index ea8b2db..ad9f3d5 100644
developer3262bf82022-07-12 11:37:54 +0800229--- a/drivers/net/ethernet/mediatek/mtk_wed.c
230+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
developer69bcd592024-03-25 14:26:39 +0800231@@ -13,10 +13,13 @@
developer3262bf82022-07-12 11:37:54 +0800232 #include <linux/debugfs.h>
233 #include <linux/iopoll.h>
234 #include <linux/soc/mediatek/mtk_wed.h>
235+
236 #include "mtk_eth_soc.h"
237 #include "mtk_wed_regs.h"
238 #include "mtk_wed.h"
239 #include "mtk_ppe.h"
developer3262bf82022-07-12 11:37:54 +0800240+#include "mtk_wed_mcu.h"
241+#include "mtk_wed_wo.h"
developer69bcd592024-03-25 14:26:39 +0800242
developer3262bf82022-07-12 11:37:54 +0800243 static struct mtk_wed_hw *hw_list[2];
244 static DEFINE_MUTEX(hw_lock);
developer69bcd592024-03-25 14:26:39 +0800245@@ -51,12 +54,65 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
developer3262bf82022-07-12 11:37:54 +0800246 wdma_m32(dev, reg, 0, mask);
247 }
248
249+static void
250+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
251+{
252+ wdma_m32(dev, reg, mask, 0);
253+}
254+
developer69bcd592024-03-25 14:26:39 +0800255 static u32
256 mtk_wed_read_reset(struct mtk_wed_device *dev)
257 {
258 return wed_r32(dev, MTK_WED_RESET);
259 }
260
developer29f66b32022-07-12 15:23:20 +0800261+static u32
262+mtk_wdma_read_reset(struct mtk_wed_device *dev)
263+{
264+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
265+}
266+
developer69bcd592024-03-25 14:26:39 +0800267+static int
developer29f66b32022-07-12 15:23:20 +0800268+mtk_wdma_rx_reset(struct mtk_wed_device *dev)
269+{
developer69bcd592024-03-25 14:26:39 +0800270+ u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
271+ int i, ret;
developer29f66b32022-07-12 15:23:20 +0800272+
273+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
developer69bcd592024-03-25 14:26:39 +0800274+ ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
275+ !(status & mask), 0, 1000)
276+ if (ret)
277+ dev_err(dev->hw->dev, "rx reset failed \n");
developer29f66b32022-07-12 15:23:20 +0800278+
developer69bcd592024-03-25 14:26:39 +0800279+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) {
280+ if (!dev->rx_wdma[i].desc)
281+ continue;
282+
283+ wdma_w32(dev,
284+ MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
developer29f66b32022-07-12 15:23:20 +0800285+ }
286+}
287+
288+static void
289+mtk_wdma_tx_reset(struct mtk_wed_device *dev)
290+{
developer69bcd592024-03-25 14:26:39 +0800291+ u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
developer29f66b32022-07-12 15:23:20 +0800292+ int i;
293+
294+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
295+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
296+ !(status & mask), 0, 1000))
developer69bcd592024-03-25 14:26:39 +0800297+ dev_err(dev->hw->dev, "tx reset failed \n");
developer29f66b32022-07-12 15:23:20 +0800298+
developer69bcd592024-03-25 14:26:39 +0800299+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) {
300+ if (!dev->tx_wdma[i].desc)
301+ continue;
302+
303+ wdma_w32(dev,
304+ MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
developer29f66b32022-07-12 15:23:20 +0800305+ }
306+}
307+
developer69bcd592024-03-25 14:26:39 +0800308 static void
309 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
developer3262bf82022-07-12 11:37:54 +0800310 {
developer69bcd592024-03-25 14:26:39 +0800311@@ -68,6 +124,58 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
developer3262bf82022-07-12 11:37:54 +0800312 WARN_ON_ONCE(1);
313 }
314
developer69bcd592024-03-25 14:26:39 +0800315+static u32
316+mtk_wed_wo_read_status(struct mtk_wed_device *dev)
317+{
318+ return wed_r32(dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_WO_STATUS);
319+}
320+
developer3262bf82022-07-12 11:37:54 +0800321+static void
322+mtk_wed_wo_reset(struct mtk_wed_device *dev)
323+{
324+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
325+ u8 state = WO_STATE_DISABLE;
developer3262bf82022-07-12 11:37:54 +0800326+ void __iomem *reg;
developer69bcd592024-03-25 14:26:39 +0800327+ u32 val;
developer3262bf82022-07-12 11:37:54 +0800328+
developerc89c5472022-08-02 13:00:04 +0800329+ mtk_wdma_tx_reset(dev);
developer29f66b32022-07-12 15:23:20 +0800330+
331+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
332+
developer69bcd592024-03-25 14:26:39 +0800333+ mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
334+ MTK_WED_WO_CMD_CHANGE_STATE, &state,
335+ sizeof(state), false);
developer3262bf82022-07-12 11:37:54 +0800336+
developer69bcd592024-03-25 14:26:39 +0800337+ if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val,
338+ val == WOIF_DISABLE_DONE,
339+ 100, WOCPU_TIMEOUT))
340+ dev_err(dev->hw->dev, "failed to disable wed-wo\n");
developer3262bf82022-07-12 11:37:54 +0800341+
342+ reg = ioremap(WOCPU_MCUSYS_RESET_ADDR, 4);
developer69bcd592024-03-25 14:26:39 +0800343+ val = readl((void *)reg);
developer3262bf82022-07-12 11:37:54 +0800344+ switch(dev->hw->index) {
345+ case 0:
developer69bcd592024-03-25 14:26:39 +0800346+ val |= WOCPU_WO0_MCUSYS_RESET_MASK;
347+ writel(val, (void *)reg);
348+ val &= ~WOCPU_WO0_MCUSYS_RESET_MASK;
349+ writel(val, (void *)reg);
developer3262bf82022-07-12 11:37:54 +0800350+ break;
351+ case 1:
developer69bcd592024-03-25 14:26:39 +0800352+ val |= WOCPU_WO1_MCUSYS_RESET_MASK;
353+ writel(val, (void *)reg);
354+ val &= ~WOCPU_WO1_MCUSYS_RESET_MASK;
355+ writel(val, (void *)reg);
developer3262bf82022-07-12 11:37:54 +0800356+ break;
357+ default:
358+ dev_err(dev->hw->dev, "wrong mtk_wed%d\n",
359+ dev->hw->index);
360+
361+ break;
362+ }
363+
364+ iounmap((void *)reg);
365+}
366+
367 static struct mtk_wed_hw *
368 mtk_wed_assign(struct mtk_wed_device *dev)
369 {
developer69bcd592024-03-25 14:26:39 +0800370@@ -102,7 +210,7 @@ mtk_wed_assign(struct mtk_wed_device *dev)
371 }
372
373 static int
374-mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
375+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
376 {
377 struct mtk_wdma_desc *desc;
378 dma_addr_t desc_phys;
379@@ -124,16 +232,16 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
380 if (!page_list)
381 return -ENOMEM;
382
383- dev->buf_ring.size = ring_size;
384- dev->buf_ring.pages = page_list;
385+ dev->tx_buf_ring.size = ring_size;
386+ dev->tx_buf_ring.pages = page_list;
387
388 desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
389 &desc_phys, GFP_KERNEL);
390 if (!desc)
391 return -ENOMEM;
392
393- dev->buf_ring.desc = desc;
394- dev->buf_ring.desc_phys = desc_phys;
395+ dev->tx_buf_ring.desc = desc;
396+ dev->tx_buf_ring.desc_phys = desc_phys;
397
398 for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
399 dma_addr_t page_phys, buf_phys;
400@@ -194,11 +302,11 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
401 }
402
403 static void
404-mtk_wed_free_buffer(struct mtk_wed_device *dev)
405+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
developer29f66b32022-07-12 15:23:20 +0800406 {
developer69bcd592024-03-25 14:26:39 +0800407- struct mtk_wdma_desc *desc = dev->buf_ring.desc;
408- void **page_list = dev->buf_ring.pages;
developer29f66b32022-07-12 15:23:20 +0800409- int page_idx;
developer69bcd592024-03-25 14:26:39 +0800410+ struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
411+ void **page_list = dev->tx_buf_ring.pages;
developer29f66b32022-07-12 15:23:20 +0800412+ int ring_size, page_idx;
413 int i;
414
415 if (!page_list)
developer69bcd592024-03-25 14:26:39 +0800416@@ -207,7 +315,14 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
developer29f66b32022-07-12 15:23:20 +0800417 if (!desc)
418 goto free_pagelist;
419
developer4df64ba2022-09-01 14:44:55 +0800420- for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
developer69bcd592024-03-25 14:26:39 +0800421+ if (dev->hw->version == 1) {
developer29f66b32022-07-12 15:23:20 +0800422+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
423+ } else {
424+ ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
425+ MTK_WED_WDMA_RING_SIZE * 2;
426+ }
427+
developer4df64ba2022-09-01 14:44:55 +0800428+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
developer29f66b32022-07-12 15:23:20 +0800429 void *page = page_list[page_idx++];
developer69bcd592024-03-25 14:26:39 +0800430 dma_addr_t buf_addr;
developer29f66b32022-07-12 15:23:20 +0800431
developer69bcd592024-03-25 14:26:39 +0800432@@ -220,13 +335,64 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
developer4df64ba2022-09-01 14:44:55 +0800433 __free_page(page);
434 }
435
436- dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
developer69bcd592024-03-25 14:26:39 +0800437- desc, dev->buf_ring.desc_phys);
developer4df64ba2022-09-01 14:44:55 +0800438+ dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
developer69bcd592024-03-25 14:26:39 +0800439+ desc, dev->tx_buf_ring.desc_phys);
developer4df64ba2022-09-01 14:44:55 +0800440
441 free_pagelist:
developer3262bf82022-07-12 11:37:54 +0800442 kfree(page_list);
443 }
444
445+static int
developer69bcd592024-03-25 14:26:39 +0800446+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800447+{
448+ struct mtk_rxbm_desc *desc;
449+ dma_addr_t desc_phys;
developer3262bf82022-07-12 11:37:54 +0800450+
developer69bcd592024-03-25 14:26:39 +0800451+ dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
452+ desc = dma_alloc_coherent(dev->hw->dev,
453+ dev->wlan.rx_nbuf * sizeof(*desc),
developer3262bf82022-07-12 11:37:54 +0800454+ &desc_phys, GFP_KERNEL);
455+ if (!desc)
456+ return -ENOMEM;
457+
458+ dev->rx_buf_ring.desc = desc;
459+ dev->rx_buf_ring.desc_phys = desc_phys;
developer3d5faf22022-11-29 18:07:22 +0800460+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
developer69bcd592024-03-25 14:26:39 +0800461+
developer3262bf82022-07-12 11:37:54 +0800462+ return 0;
463+}
464+
465+static void
developer69bcd592024-03-25 14:26:39 +0800466+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800467+{
468+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
developer3262bf82022-07-12 11:37:54 +0800469+
470+ if (!desc)
471+ return;
472+
473+ dev->wlan.release_rx_buf(dev);
474+
developer69bcd592024-03-25 14:26:39 +0800475+ dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
developere8c82b22022-08-09 14:58:55 +0800476+ desc, dev->rx_buf_ring.desc_phys);
developer3262bf82022-07-12 11:37:54 +0800477+}
478+
developer69bcd592024-03-25 14:26:39 +0800479+static void
480+mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
481+{
482+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
483+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size));
484+
485+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
486+
487+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
488+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt));
489+
490+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
491+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
492+
493+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
494+}
495+
developer3262bf82022-07-12 11:37:54 +0800496 static void
developer69bcd592024-03-25 14:26:39 +0800497 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
developer3262bf82022-07-12 11:37:54 +0800498 {
developer69bcd592024-03-25 14:26:39 +0800499@@ -237,6 +403,13 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
500 ring->desc, ring->desc_phys);
developer3262bf82022-07-12 11:37:54 +0800501 }
502
503+static void
504+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
505+{
developer69bcd592024-03-25 14:26:39 +0800506+ mtk_wed_free_rx_buffer(dev);
507+ mtk_wed_free_ring(dev, &dev->rro.ring);
developer3262bf82022-07-12 11:37:54 +0800508+}
509+
510 static void
developer69bcd592024-03-25 14:26:39 +0800511 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800512 {
developer69bcd592024-03-25 14:26:39 +0800513@@ -244,8 +417,8 @@ mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800514
developer69bcd592024-03-25 14:26:39 +0800515 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
516 mtk_wed_free_ring(dev, &dev->tx_ring[i]);
517- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
518- mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
519+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
520+ mtk_wed_free_ring(dev, &dev->rx_wdma[i]);
521 }
developer3262bf82022-07-12 11:37:54 +0800522
developer69bcd592024-03-25 14:26:39 +0800523 static void
524@@ -277,6 +450,39 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
developer3262bf82022-07-12 11:37:54 +0800525 }
526 }
527
developer69bcd592024-03-25 14:26:39 +0800528+#define MTK_WFMDA_RX_DMA_EN BIT(2)
developer3262bf82022-07-12 11:37:54 +0800529+static void
530+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
531+{
developer69bcd592024-03-25 14:26:39 +0800532+ u32 val;
533+ int i;
developer3262bf82022-07-12 11:37:54 +0800534+
developer69bcd592024-03-25 14:26:39 +0800535+ if(!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
536+ return;
developer3262bf82022-07-12 11:37:54 +0800537+
developer69bcd592024-03-25 14:26:39 +0800538+ for (i = 0; i < 3; i++) {
539+ u32 cur_idx;
540+
541+ cur_idx = wed_r32(dev,
542+ MTK_WED_WPDMA_RING_RX_DATA(idx) +
543+ MTK_WED_RING_OFS_CPU_IDX);
developer3262bf82022-07-12 11:37:54 +0800544+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
545+ break;
546+
547+ usleep_range(100000, 200000);
developer69bcd592024-03-25 14:26:39 +0800548+ }
developer3262bf82022-07-12 11:37:54 +0800549+
developer69bcd592024-03-25 14:26:39 +0800550+ if (i == 3) {
developer3262bf82022-07-12 11:37:54 +0800551+ dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
552+ dev->hw->index);
developer69bcd592024-03-25 14:26:39 +0800553+ return;
developer3262bf82022-07-12 11:37:54 +0800554+ }
developer69bcd592024-03-25 14:26:39 +0800555+
556+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
557+ MTK_WFMDA_RX_DMA_EN;
558+ wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
developer3262bf82022-07-12 11:37:54 +0800559+}
560+
561 static void
developer69bcd592024-03-25 14:26:39 +0800562 mtk_wed_dma_disable(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800563 {
developer69bcd592024-03-25 14:26:39 +0800564@@ -291,22 +497,26 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800565 MTK_WED_GLO_CFG_TX_DMA_EN |
566 MTK_WED_GLO_CFG_RX_DMA_EN);
567
568- wdma_m32(dev, MTK_WDMA_GLO_CFG,
569+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
570 MTK_WDMA_GLO_CFG_TX_DMA_EN |
571 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
572- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
573+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
574
developer69bcd592024-03-25 14:26:39 +0800575 if (dev->hw->version == 1) {
developer3262bf82022-07-12 11:37:54 +0800576 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
577- wdma_m32(dev, MTK_WDMA_GLO_CFG,
578- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
579+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
580+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
581 } else {
582 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
583 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
584 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
developer69bcd592024-03-25 14:26:39 +0800585-
586- mtk_wed_set_512_support (dev, false)
developer3262bf82022-07-12 11:37:54 +0800587+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
588+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
589+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
590+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
591 }
developer69bcd592024-03-25 14:26:39 +0800592+
593+ mtk_wed_set_512_support (dev, false);
developer3262bf82022-07-12 11:37:54 +0800594 }
595
developer69bcd592024-03-25 14:26:39 +0800596 static void
597@@ -327,6 +537,14 @@ mtk_wed_stop(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800598 wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
developer69bcd592024-03-25 14:26:39 +0800599 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
600 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
601+
602+ if (dev->hw->version == 1)
603+ return;
604+
605+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
606+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
607+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
608+
609 }
610
611 static void
612@@ -344,10 +562,23 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800613
614 mtk_wed_reset(dev, MTK_WED_RESET_WED);
developer29f66b32022-07-12 15:23:20 +0800615
developer69bcd592024-03-25 14:26:39 +0800616- mtk_wed_free_buffer(dev);
617+ if (mtk_wed_get_rx_capa(dev)) {
618+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
619+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
620+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
621+ }
developer29f66b32022-07-12 15:23:20 +0800622+
developer69bcd592024-03-25 14:26:39 +0800623+ mtk_wed_free_tx_buffer(dev);
developer3262bf82022-07-12 11:37:54 +0800624 mtk_wed_free_tx_rings(dev);
developer69bcd592024-03-25 14:26:39 +0800625
626- if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
627+ if (mtk_wed_get_rx_capa(dev)) {
developerdc3f9fc2022-12-26 19:10:09 +0800628+ mtk_wed_wo_reset(dev);
developer3262bf82022-07-12 11:37:54 +0800629+ mtk_wed_free_rx_rings(dev);
developerdc3f9fc2022-12-26 19:10:09 +0800630+ mtk_wed_wo_exit(hw);
developer69bcd592024-03-25 14:26:39 +0800631+ mtk_wdma_rx_reset(dev);
developer29f66b32022-07-12 15:23:20 +0800632+ }
developerdc3f9fc2022-12-26 19:10:09 +0800633+
developer3d5faf22022-11-29 18:07:22 +0800634+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
developer3262bf82022-07-12 11:37:54 +0800635 wlan_node = dev->wlan.pci_dev->dev.of_node;
developer69bcd592024-03-25 14:26:39 +0800636 if (of_dma_is_coherent(wlan_node) && hw->hifsys)
developer3d5faf22022-11-29 18:07:22 +0800637 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
developer69bcd592024-03-25 14:26:39 +0800638@@ -370,7 +601,7 @@ static void
639 mtk_wed_bus_init(struct mtk_wed_device *dev)
developer3d5faf22022-11-29 18:07:22 +0800640 {
developer69bcd592024-03-25 14:26:39 +0800641 switch (dev->wlan.bus_type) {
642- case MTK_BUS_TYPE_PCIE: {
643+ case MTK_WED_BUS_PCIE: {
644 struct device_node *np = dev->hw->eth->dev->of_node;
645 struct regmap *regs;
developer3262bf82022-07-12 11:37:54 +0800646
developer69bcd592024-03-25 14:26:39 +0800647@@ -402,7 +633,7 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer3d5faf22022-11-29 18:07:22 +0800648 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
developer69bcd592024-03-25 14:26:39 +0800649 break;
650 }
651- case MTK_BUS_TYPE_AXI:
652+ case MTK_WED_BUS_AXI:
developer3d5faf22022-11-29 18:07:22 +0800653 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
654 MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
655 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
developer69bcd592024-03-25 14:26:39 +0800656@@ -424,6 +655,8 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800657 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
658 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
659 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
developer3262bf82022-07-12 11:37:54 +0800660+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
661+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
developer3262bf82022-07-12 11:37:54 +0800662 }
developer69bcd592024-03-25 14:26:39 +0800663 }
developerdc3f9fc2022-12-26 19:10:09 +0800664
developer69bcd592024-03-25 14:26:39 +0800665@@ -470,6 +703,141 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
666
667 }
668
669+static int
670+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
671+ int size)
developer3262bf82022-07-12 11:37:54 +0800672+{
developer69bcd592024-03-25 14:26:39 +0800673+ ring->desc = dma_alloc_coherent(dev->hw->dev,
674+ size * sizeof(*ring->desc),
675+ &ring->desc_phys, GFP_KERNEL);
676+ if (!ring->desc)
677+ return -ENOMEM;
developer3262bf82022-07-12 11:37:54 +0800678+
developer69bcd592024-03-25 14:26:39 +0800679+ ring->desc_size = sizeof(*ring->desc);
680+ ring->size = size;
681+ memset(ring->desc, 0, size);
developerdc3f9fc2022-12-26 19:10:09 +0800682+
developer69bcd592024-03-25 14:26:39 +0800683+ return 0;
684+}
developer3262bf82022-07-12 11:37:54 +0800685+
developer69bcd592024-03-25 14:26:39 +0800686+#define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT)
687+static int
688+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
689+{
690+ struct device_node *np, *node = dev->hw->node;
691+ struct mtk_wed_ring *ring;
692+ struct resource res;
693+ int ret;
developer3262bf82022-07-12 11:37:54 +0800694+
developer69bcd592024-03-25 14:26:39 +0800695+ np = of_parse_phandle(node, "mediatek,wocpu_dlm", 0);
696+ if (!np)
697+ return -ENODEV;
698+
699+ ret = of_address_to_resource(np, 0, &res);
700+ if (ret)
701+ return ret;
702+
703+ dev->rro.rro_desc = ioremap(res.start, resource_size(&res));
704+
705+ ring = &dev->rro.ring;
706+
707+ dev->rro.miod_phys = res.start;
708+ dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
709+
710+ if (mtk_wed_rro_ring_alloc(dev, ring, MTK_WED_RRO_QUE_CNT))
711+ return -ENOMEM;
712+
713+ return 0;
714+}
715+
716+static int
717+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
718+{
719+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
720+ struct {
721+ struct {
722+ __le32 base;
723+ __le32 cnt;
724+ __le32 unit;
725+ } ring[2];
726+
727+ __le32 wed;
728+ u8 version;
729+ } req = {
730+ .ring[0] = {
731+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE),
732+ .cnt = cpu_to_le32(MTK_WED_MIOD_CNT),
733+ .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT),
734+ },
735+ .ring[1] = {
736+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE +
737+ MTK_WED_MIOD_COUNT),
738+ .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT),
739+ .unit = cpu_to_le32(4),
740+ },
741+ };
742+
743+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, MTK_WED_WO_CMD_WED_CFG,
744+ &req, sizeof(req), true);
745+}
746+
747+static void
developer3262bf82022-07-12 11:37:54 +0800748+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
749+{
750+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
751+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
752+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
753+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
754+ MTK_WED_MIOD_ENTRY_CNT >> 2));
755+
developer69bcd592024-03-25 14:26:39 +0800756+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys);
developer3262bf82022-07-12 11:37:54 +0800757+
758+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
759+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
760+
developer69bcd592024-03-25 14:26:39 +0800761+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys);
developer3262bf82022-07-12 11:37:54 +0800762+
763+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
764+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
765+
766+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
767+
developer69bcd592024-03-25 14:26:39 +0800768+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys);
developer3262bf82022-07-12 11:37:54 +0800769+
770+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
771+ MTK_WED_RROQM_RST_IDX_MIOD |
772+ MTK_WED_RROQM_RST_IDX_FDBK);
773+
774+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
775+
776+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT -1);
777+
778+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
779+}
780+
781+static void
782+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
783+{
784+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
785+
developer69bcd592024-03-25 14:26:39 +0800786+ for (;;) {
787+ usleep_range(100, 200);
developer3262bf82022-07-12 11:37:54 +0800788+
789+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
790+ break;
developer69bcd592024-03-25 14:26:39 +0800791+ }
developer3262bf82022-07-12 11:37:54 +0800792+
793+ /* configure RX_ROUTE_QM */
794+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
795+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
796+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
797+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
798+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
799+
800+ /* enable RX_ROUTE_QM */
801+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
802+}
803+
developer69bcd592024-03-25 14:26:39 +0800804 static void
805 mtk_wed_hw_init(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800806 {
developer69bcd592024-03-25 14:26:39 +0800807@@ -479,7 +847,7 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
808 dev->init_done = true;
809 mtk_wed_set_ext_int(dev, false);
developer3262bf82022-07-12 11:37:54 +0800810
developer69bcd592024-03-25 14:26:39 +0800811- wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
812+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
813
814 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
815
816@@ -487,7 +855,7 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
817 wed_w32(dev, MTK_WED_TX_BM_CTRL,
818 MTK_WED_TX_BM_CTRL_PAUSE |
819 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
820- dev->buf_ring.size / 128) |
821+ dev->tx_buf_ring.size / 128) |
822 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
823 MTK_WED_TX_RING_SIZE / 256));
824 wed_w32(dev, MTK_WED_TX_BM_TKID,
825@@ -503,9 +871,9 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
826 wed_w32(dev, MTK_WED_TX_BM_CTRL,
827 MTK_WED_TX_BM_CTRL_PAUSE |
828 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
829- dev->buf_ring.size / 128) |
830+ dev->tx_buf_ring.size / 128) |
831 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
832- dev->buf_ring.size / 128));
833+ dev->tx_buf_ring.size / 128));
834 wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
835 FIELD_PREP(MTK_WED_TX_BM_TKID_START,
836 dev->wlan.token_start) |
837@@ -518,9 +886,9 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
838 wed_w32(dev, MTK_WED_TX_TKID_CTRL,
839 MTK_WED_TX_TKID_CTRL_PAUSE |
840 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
841- dev->buf_ring.size / 128) |
842+ dev->tx_buf_ring.size / 128) |
843 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
844- dev->buf_ring.size / 128));
845+ dev->tx_buf_ring.size / 128));
846 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
847 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
848 MTK_WED_TX_TKID_DYN_THR_HI);
849@@ -528,27 +896,42 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
850
851 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
852
853- if (dev->hw->version == 1)
854+ if (dev->hw->version == 1) {
855 wed_set(dev, MTK_WED_CTRL,
856 MTK_WED_CTRL_WED_TX_BM_EN |
857 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
858- else
859+ } else {
860 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
861+ /* rx hw init*/
862+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
863+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
864+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
865+
866+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
867+
868+ mtk_wed_rx_buffer_hw_init(dev);
869+ mtk_wed_rro_hw_init(dev);
870+ mtk_wed_route_qm_hw_init(dev);
871+ }
872
873 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
developer3262bf82022-07-12 11:37:54 +0800874 }
875
876 static void
developer69bcd592024-03-25 14:26:39 +0800877-mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
878+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
developerdc3f9fc2022-12-26 19:10:09 +0800879 {
developer69bcd592024-03-25 14:26:39 +0800880 void *head = (void *)ring->desc;
developer3262bf82022-07-12 11:37:54 +0800881 int i;
882
developer3262bf82022-07-12 11:37:54 +0800883 for (i = 0; i < size; i++) {
developer69bcd592024-03-25 14:26:39 +0800884 struct mtk_wdma_desc *desc;
885+
886 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
developer3262bf82022-07-12 11:37:54 +0800887 desc->buf0 = 0;
888- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
developer69bcd592024-03-25 14:26:39 +0800889+ if (tx)
890+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
891+ else
892+ desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
developer3262bf82022-07-12 11:37:54 +0800893 desc->buf1 = 0;
894 desc->info = 0;
developer69bcd592024-03-25 14:26:39 +0800895 }
896@@ -604,7 +987,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
897 if (!dev->tx_ring[i].desc)
developer3262bf82022-07-12 11:37:54 +0800898 continue;
899
developer69bcd592024-03-25 14:26:39 +0800900- mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
901+ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE,
902+ true);
developer3262bf82022-07-12 11:37:54 +0800903 }
904
905 if (mtk_wed_poll_busy(dev))
developer69bcd592024-03-25 14:26:39 +0800906@@ -622,6 +1006,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developerc89c5472022-08-02 13:00:04 +0800907 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
908 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
909
910+ mtk_wdma_rx_reset(dev);
911+
912 if (busy) {
913 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
914 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
developer69bcd592024-03-25 14:26:39 +0800915@@ -661,7 +1047,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +0800916
developer3262bf82022-07-12 11:37:54 +0800917 static int
918 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
developer69bcd592024-03-25 14:26:39 +0800919- int size, u32 desc_size)
920+ int size, u32 desc_size, bool tx)
developer3262bf82022-07-12 11:37:54 +0800921 {
developer69bcd592024-03-25 14:26:39 +0800922 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
923 &ring->desc_phys, GFP_KERNEL);
924@@ -670,18 +1056,23 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
developer3262bf82022-07-12 11:37:54 +0800925
developer69bcd592024-03-25 14:26:39 +0800926 ring->desc_size = desc_size;
developer3262bf82022-07-12 11:37:54 +0800927 ring->size = size;
developer69bcd592024-03-25 14:26:39 +0800928- mtk_wed_ring_reset(ring, size);
929+ mtk_wed_ring_reset(ring, size, tx);
developer3262bf82022-07-12 11:37:54 +0800930
931 return 0;
932 }
933
934 static int
935-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
936+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
937 {
developer69bcd592024-03-25 14:26:39 +0800938 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
939- struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
940+ struct mtk_wed_ring *wdma;
developer3262bf82022-07-12 11:37:54 +0800941
developer69bcd592024-03-25 14:26:39 +0800942- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
943+ if (idx >= ARRAY_SIZE(dev->rx_wdma))
944+ return -EINVAL;
945+
946+ wdma = &dev->rx_wdma[idx];
947+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size,
948+ true))
developer3262bf82022-07-12 11:37:54 +0800949 return -ENOMEM;
950
951 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
developer69bcd592024-03-25 14:26:39 +0800952@@ -698,6 +1089,62 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
developer3262bf82022-07-12 11:37:54 +0800953 return 0;
954 }
955
956+static int
957+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
958+{
developer69bcd592024-03-25 14:26:39 +0800959+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
960+ struct mtk_wed_ring *wdma;
developer3262bf82022-07-12 11:37:54 +0800961+
developer69bcd592024-03-25 14:26:39 +0800962+ if (idx >= ARRAY_SIZE(dev->tx_wdma))
963+ return -EINVAL;
964+
965+ wdma = &dev->tx_wdma[idx];
developer3262bf82022-07-12 11:37:54 +0800966+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
developer69bcd592024-03-25 14:26:39 +0800967+ desc_size, true))
developer3262bf82022-07-12 11:37:54 +0800968+ return -ENOMEM;
969+
970+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
971+ wdma->desc_phys);
972+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
973+ size);
974+ wdma_w32(dev,
975+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
976+ wdma_w32(dev,
977+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
978+
developer69bcd592024-03-25 14:26:39 +0800979+ if (!idx) {
980+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
981+ wdma->desc_phys);
982+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT,
983+ size);
984+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX,
985+ 0);
986+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX,
987+ 0);
developer3262bf82022-07-12 11:37:54 +0800988+ }
989+
990+ return 0;
991+}
992+
developer69bcd592024-03-25 14:26:39 +0800993+static void
994+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
995+ u32 reason, u32 hash)
developer3262bf82022-07-12 11:37:54 +0800996+{
developer69bcd592024-03-25 14:26:39 +0800997+ struct mtk_eth *eth = dev->hw->eth;
998+ struct ethhdr *eh;
developer3262bf82022-07-12 11:37:54 +0800999+
developer69bcd592024-03-25 14:26:39 +08001000+ if (!skb)
1001+ return;
developer3262bf82022-07-12 11:37:54 +08001002+
developer69bcd592024-03-25 14:26:39 +08001003+ if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
1004+ return;
developer3262bf82022-07-12 11:37:54 +08001005+
developer69bcd592024-03-25 14:26:39 +08001006+ skb_set_mac_header(skb, 0);
1007+ eh = eth_hdr(skb);
1008+ skb->protocol = eh->h_proto;
1009+ mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash);
developer3262bf82022-07-12 11:37:54 +08001010+}
1011+
developer69bcd592024-03-25 14:26:39 +08001012 static void
1013 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
1014 {
1015@@ -720,6 +1167,8 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
1016
1017 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
1018 } else {
1019+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
1020+ GENMASK(1, 0));
1021 /* initail tx interrupt trigger */
1022 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
1023 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
1024@@ -738,6 +1187,16 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
1025 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
1026 dev->wlan.txfree_tbit));
1027
1028+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
1029+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
1030+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
1031+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
1032+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
1033+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
1034+ dev->wlan.rx_tbit[0]) |
1035+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
1036+ dev->wlan.rx_tbit[1]));
developer3262bf82022-07-12 11:37:54 +08001037+
developer69bcd592024-03-25 14:26:39 +08001038 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
1039 wed_set(dev, MTK_WED_WDMA_INT_CTRL,
1040 FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,dev->wdma_idx));
1041@@ -775,9 +1234,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
1042 wdma_set(dev, MTK_WDMA_GLO_CFG,
1043 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
1044 } else {
1045+ int i;
developer3262bf82022-07-12 11:37:54 +08001046+
developer69bcd592024-03-25 14:26:39 +08001047 wed_set(dev, MTK_WED_WPDMA_CTRL,
1048 MTK_WED_WPDMA_CTRL_SDL1_FIXED);
1049
1050+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1051+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1052+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
developer3262bf82022-07-12 11:37:54 +08001053+
developer69bcd592024-03-25 14:26:39 +08001054 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1055 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
1056 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
1057@@ -785,18 +1250,26 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
1058 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1059 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
1060 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
developer3262bf82022-07-12 11:37:54 +08001061+
developer69bcd592024-03-25 14:26:39 +08001062+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1063+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
1064+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
1065+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
1066+ 0x2));
developer3262bf82022-07-12 11:37:54 +08001067+
developer69bcd592024-03-25 14:26:39 +08001068+ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
1069+ mtk_wed_check_wfdma_rx_fill(dev, i);
1070 }
1071 }
1072
developer3262bf82022-07-12 11:37:54 +08001073 static void
1074 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1075 {
1076- u32 wdma_mask;
developer69bcd592024-03-25 14:26:39 +08001077 int i;
developer3262bf82022-07-12 11:37:54 +08001078
developer69bcd592024-03-25 14:26:39 +08001079- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1080- if (!dev->tx_wdma[i].desc)
developer3262bf82022-07-12 11:37:54 +08001081- mtk_wed_wdma_ring_setup(dev, i, 16);
developer69bcd592024-03-25 14:26:39 +08001082+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
1083+ if (!dev->rx_wdma[i].desc)
developer3262bf82022-07-12 11:37:54 +08001084+ mtk_wed_wdma_rx_ring_setup(dev, i, 16);
1085
developer3262bf82022-07-12 11:37:54 +08001086
developer69bcd592024-03-25 14:26:39 +08001087 mtk_wed_hw_init(dev);
1088@@ -813,9 +1286,22 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1089 val |= BIT(0) | (BIT(1) * !!dev->hw->index);
developer3262bf82022-07-12 11:37:54 +08001090 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
1091 } else {
developer33907d42022-09-19 14:33:58 +08001092- mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
developer3262bf82022-07-12 11:37:54 +08001093+ /* driver set mid ready and only once */
1094+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1095+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1096+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1097+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1098+
1099+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1100+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1101+
developer69bcd592024-03-25 14:26:39 +08001102+ if (mtk_wed_rro_cfg(dev))
developer3262bf82022-07-12 11:37:54 +08001103+ return;
developer69bcd592024-03-25 14:26:39 +08001104+
developer3262bf82022-07-12 11:37:54 +08001105 }
developerc89c5472022-08-02 13:00:04 +08001106
developer69bcd592024-03-25 14:26:39 +08001107+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1108+
developerc89c5472022-08-02 13:00:04 +08001109 mtk_wed_dma_enable(dev);
1110 dev->running = true;
developer69bcd592024-03-25 14:26:39 +08001111 }
1112@@ -844,7 +1330,7 @@ mtk_wed_attach(struct mtk_wed_device *dev)
1113 if (!hw) {
1114 module_put(THIS_MODULE);
1115 ret = -ENODEV;
1116- goto out;
1117+ goto unlock;
developer3d5faf22022-11-29 18:07:22 +08001118 }
1119
developer69bcd592024-03-25 14:26:39 +08001120 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE ?
1121@@ -856,26 +1342,40 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer3d5faf22022-11-29 18:07:22 +08001122 dev->dev = hw->dev;
1123 dev->irq = hw->irq;
developer69bcd592024-03-25 14:26:39 +08001124 dev->wdma_idx = hw->index;
1125+ dev->version = hw->version;
1126
1127 if (hw->eth->dma_dev == hw->eth->dev &&
1128 of_dma_is_coherent(hw->eth->dev->of_node))
1129 mtk_eth_set_dma_device(hw->eth, hw->dev);
developer3262bf82022-07-12 11:37:54 +08001130
developer69bcd592024-03-25 14:26:39 +08001131- ret = mtk_wed_buffer_alloc(dev);
developer3262bf82022-07-12 11:37:54 +08001132- if (ret) {
1133- mtk_wed_detach(dev);
developer69bcd592024-03-25 14:26:39 +08001134+ ret = mtk_wed_tx_buffer_alloc(dev);
developer3262bf82022-07-12 11:37:54 +08001135+ if (ret)
developer69bcd592024-03-25 14:26:39 +08001136 goto out;
1137- }
developer3262bf82022-07-12 11:37:54 +08001138+
developer69bcd592024-03-25 14:26:39 +08001139+ if (mtk_wed_get_rx_capa(dev)) {
1140+ ret = mtk_wed_rx_buffer_alloc(dev);
developer3262bf82022-07-12 11:37:54 +08001141+ if (ret)
developer69bcd592024-03-25 14:26:39 +08001142+ goto out;
developer3262bf82022-07-12 11:37:54 +08001143+
1144+ ret = mtk_wed_rro_alloc(dev);
1145+ if (ret)
developer69bcd592024-03-25 14:26:39 +08001146+ goto out;
1147+ }
developer3262bf82022-07-12 11:37:54 +08001148
1149 mtk_wed_hw_init_early(dev);
developer69bcd592024-03-25 14:26:39 +08001150
1151- if (hw->version == 1)
1152+ if (hw->version == 1) {
developer3262bf82022-07-12 11:37:54 +08001153 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1154 BIT(hw->index), 0);
developer69bcd592024-03-25 14:26:39 +08001155- else
1156+ } else {
1157 dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
developer3262bf82022-07-12 11:37:54 +08001158+ ret = mtk_wed_wo_init(hw);
developer69bcd592024-03-25 14:26:39 +08001159+ }
developer3262bf82022-07-12 11:37:54 +08001160
developer69bcd592024-03-25 14:26:39 +08001161 out:
developerdc3f9fc2022-12-26 19:10:09 +08001162+ if (ret)
1163+ mtk_wed_detach(dev);
developer69bcd592024-03-25 14:26:39 +08001164+unlock:
developer3262bf82022-07-12 11:37:54 +08001165 mutex_unlock(&hw_lock);
1166
developer69bcd592024-03-25 14:26:39 +08001167 return ret;
1168@@ -898,13 +1398,14 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1169 * WDMA RX.
1170 */
developer3262bf82022-07-12 11:37:54 +08001171
developer69bcd592024-03-25 14:26:39 +08001172- BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
1173+ if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
1174+ return -EINVAL;
developer3262bf82022-07-12 11:37:54 +08001175
developer69bcd592024-03-25 14:26:39 +08001176 if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
1177- sizeof(*ring->desc)))
1178+ sizeof(*ring->desc), true))
developer3262bf82022-07-12 11:37:54 +08001179 return -ENOMEM;
1180
1181- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1182+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1183 return -ENOMEM;
1184
1185 ring->reg_base = MTK_WED_RING_TX(idx);
developer69bcd592024-03-25 14:26:39 +08001186@@ -948,6 +1449,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer3262bf82022-07-12 11:37:54 +08001187 return 0;
1188 }
1189
1190+static int
1191+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1192+{
1193+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1194+
developer69bcd592024-03-25 14:26:39 +08001195+ if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring)))
1196+ return -EINVAL;
developer3262bf82022-07-12 11:37:54 +08001197+
developer69bcd592024-03-25 14:26:39 +08001198+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
1199+ sizeof(*ring->desc), false))
developer3262bf82022-07-12 11:37:54 +08001200+ return -ENOMEM;
1201+
1202+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1203+ return -ENOMEM;
1204+
1205+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
1206+ ring->wpdma = regs;
developer69bcd592024-03-25 14:26:39 +08001207+ ring->flags |= MTK_WED_RING_CONFIGURED;
developer3262bf82022-07-12 11:37:54 +08001208+
1209+ /* WPDMA -> WED */
1210+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1211+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
1212+
1213+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
1214+ ring->desc_phys);
1215+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
1216+ MTK_WED_RX_RING_SIZE);
1217+
1218+ return 0;
1219+}
1220+
1221 static u32
1222 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
1223 {
developer69bcd592024-03-25 14:26:39 +08001224@@ -1037,7 +1569,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1225 static const struct mtk_wed_ops wed_ops = {
developer3262bf82022-07-12 11:37:54 +08001226 .attach = mtk_wed_attach,
1227 .tx_ring_setup = mtk_wed_tx_ring_setup,
developer3262bf82022-07-12 11:37:54 +08001228+ .rx_ring_setup = mtk_wed_rx_ring_setup,
developer69bcd592024-03-25 14:26:39 +08001229 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
1230+ .msg_update = mtk_wed_mcu_msg_update,
developer3262bf82022-07-12 11:37:54 +08001231 .start = mtk_wed_start,
1232 .stop = mtk_wed_stop,
1233 .reset_dma = mtk_wed_reset_dma,
developer69bcd592024-03-25 14:26:39 +08001234@@ -1046,6 +1580,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer3262bf82022-07-12 11:37:54 +08001235 .irq_get = mtk_wed_irq_get,
1236 .irq_set_mask = mtk_wed_irq_set_mask,
1237 .detach = mtk_wed_detach,
1238+ .ppe_check = mtk_wed_ppe_check,
1239 };
1240 struct device_node *eth_np = eth->dev->of_node;
1241 struct platform_device *pdev;
1242diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
developer69bcd592024-03-25 14:26:39 +08001243index c9a20e4..1bfd96f 100644
developer3262bf82022-07-12 11:37:54 +08001244--- a/drivers/net/ethernet/mediatek/mtk_wed.h
1245+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
1246@@ -13,6 +13,7 @@
1247 #define MTK_WED_PKT_SIZE 1900
1248 #define MTK_WED_BUF_SIZE 2048
1249 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1250+#define MTK_WED_RX_RING_SIZE 1536
1251
1252 #define MTK_WED_TX_RING_SIZE 2048
1253 #define MTK_WED_WDMA_RING_SIZE 512
1254@@ -21,8 +22,15 @@
1255 #define MTK_WED_PER_GROUP_PKT 128
1256
1257 #define MTK_WED_FBUF_SIZE 128
1258+#define MTK_WED_MIOD_CNT 16
1259+#define MTK_WED_FB_CMD_CNT 1024
1260+#define MTK_WED_RRO_QUE_CNT 8192
1261+#define MTK_WED_MIOD_ENTRY_CNT 128
1262+
developer69bcd592024-03-25 14:26:39 +08001263+#define MTK_WED_MODULE_ID_WO 1
developer3262bf82022-07-12 11:37:54 +08001264
1265 struct mtk_eth;
1266+struct mtk_wed_wo;
1267
1268 struct mtk_wed_hw {
1269 struct device_node *node;
developer69bcd592024-03-25 14:26:39 +08001270@@ -35,6 +43,7 @@ struct mtk_wed_hw {
developer3262bf82022-07-12 11:37:54 +08001271 struct regmap *mirror;
1272 struct dentry *debugfs_dir;
1273 struct mtk_wed_device *wed_dev;
1274+ struct mtk_wed_wo *wed_wo;
1275 u32 debugfs_reg;
1276 u32 num_flows;
developer69bcd592024-03-25 14:26:39 +08001277 u8 version;
1278@@ -67,6 +76,18 @@ wed_r32(struct mtk_wed_device *dev, u32 reg)
developer3262bf82022-07-12 11:37:54 +08001279 return val;
1280 }
1281
1282+static inline u32
1283+wifi_r32(struct mtk_wed_device *dev, u32 reg)
1284+{
1285+ return readl(dev->wlan.base + reg);
1286+}
1287+
1288+static inline void
1289+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1290+{
1291+ writel(val, dev->wlan.base + reg);
1292+}
1293+
1294 static inline void
1295 wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1296 {
developer69bcd592024-03-25 14:26:39 +08001297@@ -97,6 +118,24 @@ wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
1298 writel(val, dev->tx_ring[ring].wpdma + reg);
developer3262bf82022-07-12 11:37:54 +08001299 }
1300
1301+static inline u32
1302+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
1303+{
1304+ if (!dev->rx_ring[ring].wpdma)
1305+ return 0;
1306+
1307+ return readl(dev->rx_ring[ring].wpdma + reg);
1308+}
1309+
1310+static inline void
1311+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
1312+{
1313+ if (!dev->rx_ring[ring].wpdma)
1314+ return;
1315+
1316+ writel(val, dev->rx_ring[ring].wpdma + reg);
1317+}
developer69bcd592024-03-25 14:26:39 +08001318+
1319 static inline u32
1320 wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
1321 {
1322@@ -149,4 +188,16 @@ static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
developer3262bf82022-07-12 11:37:54 +08001323 }
1324 #endif
1325
1326+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr);
developer29f66b32022-07-12 15:23:20 +08001327+void wed_wo_hardware_exit(struct mtk_wed_wo *wo);
developer3262bf82022-07-12 11:37:54 +08001328+int wed_wo_mcu_init(struct mtk_wed_wo *wo);
1329+int mtk_wed_exception_init(struct mtk_wed_wo *wo);
1330+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1331+int mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb);
1332+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir);
1333+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1334+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd,
1335+ const void *data, int len, bool wait_resp);
1336+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget);
1337+
1338 #endif
1339diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.c b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
1340new file mode 100644
developerdc3f9fc2022-12-26 19:10:09 +08001341index 0000000..951278b
developer3262bf82022-07-12 11:37:54 +08001342--- /dev/null
1343+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
developerdc3f9fc2022-12-26 19:10:09 +08001344@@ -0,0 +1,133 @@
developer3262bf82022-07-12 11:37:54 +08001345+// SPDX-License-Identifier: GPL-2.0-only
1346+
1347+#include <linux/soc/mediatek/mtk_wed.h>
1348+#include <linux/of_address.h>
1349+#include <linux/mfd/syscon.h>
1350+#include <linux/of_irq.h>
1351+#include "mtk_wed_ccif.h"
1352+#include "mtk_wed_regs.h"
1353+#include "mtk_wed_wo.h"
1354+
1355+static inline void woif_set_isr(struct mtk_wed_wo *wo, u32 mask)
1356+{
1357+ woccif_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
1358+}
1359+
1360+static inline u32 woif_get_csr(struct mtk_wed_wo *wo)
1361+{
1362+ u32 val;
1363+
1364+ val = woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1365+
1366+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
1367+}
1368+
1369+static inline void woif_set_ack(struct mtk_wed_wo *wo, u32 mask)
1370+{
1371+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1372+}
1373+
1374+static inline void woif_kickout(struct mtk_wed_wo *wo)
1375+{
1376+ woccif_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
1377+ woccif_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
1378+}
1379+
1380+static inline void woif_clear_int(struct mtk_wed_wo *wo, u32 mask)
1381+{
1382+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1383+ woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1384+}
1385+
1386+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr)
1387+{
1388+ static const struct wed_wo_drv_ops wo_drv_ops = {
1389+ .kickout = woif_kickout,
1390+ .set_ack = woif_set_ack,
1391+ .set_isr = woif_set_isr,
1392+ .get_csr = woif_get_csr,
1393+ .clear_int = woif_clear_int,
1394+ };
1395+ struct device_node *np, *node = wo->hw->node;
1396+ struct wed_wo_queue_regs queues;
1397+ struct regmap *regs;
1398+ int ret;
1399+
1400+ np = of_parse_phandle(node, "mediatek,ap2woccif", 0);
1401+ if (!np)
1402+ return -ENODEV;
1403+
developerdc3f9fc2022-12-26 19:10:09 +08001404+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
1405+ if (!regs)
1406+ return -ENODEV;
developer3262bf82022-07-12 11:37:54 +08001407+
1408+ wo->drv_ops = &wo_drv_ops;
developerdc3f9fc2022-12-26 19:10:09 +08001409+
1410+ wo->ccif.regs = regs;
developer3262bf82022-07-12 11:37:54 +08001411+ wo->ccif.irq = irq_of_parse_and_map(np, 0);
1412+
1413+ spin_lock_init(&wo->ccif.irq_lock);
1414+
1415+ ret = request_irq(wo->ccif.irq, isr, IRQF_TRIGGER_HIGH,
1416+ "wo_ccif_isr", wo);
1417+ if (ret)
1418+ goto free_irq;
1419+
1420+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY1;
1421+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY2;
1422+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
1423+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
1424+
1425+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
1426+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
1427+ &queues);
1428+
1429+ if (ret)
1430+ goto free_irq;
1431+
1432+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY5;
1433+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY6;
1434+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
1435+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
1436+
1437+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
1438+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
1439+ &queues);
1440+ if (ret)
1441+ goto free_irq;
1442+
1443+ wo->ccif.q_int_mask = MTK_WED_WO_RXCH_INT_MASK;
1444+
1445+ ret = mtk_wed_wo_q_init(wo, mtk_wed_wo_rx_poll);
1446+ if (ret)
1447+ goto free_irq;
1448+
1449+ wo->ccif.q_exep_mask = MTK_WED_WO_EXCEPTION_INT_MASK;
1450+ wo->ccif.irqmask = MTK_WED_WO_ALL_INT_MASK;
1451+
1452+ /* rx queue irqmask */
1453+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
1454+
1455+ return 0;
1456+
1457+free_irq:
developer29f66b32022-07-12 15:23:20 +08001458+ free_irq(wo->ccif.irq, wo);
developer3262bf82022-07-12 11:37:54 +08001459+
1460+ return ret;
1461+}
1462+
developer29f66b32022-07-12 15:23:20 +08001463+void wed_wo_hardware_exit(struct mtk_wed_wo *wo)
developer3262bf82022-07-12 11:37:54 +08001464+{
developer29f66b32022-07-12 15:23:20 +08001465+ wo->drv_ops->set_isr(wo, 0);
1466+
1467+ disable_irq(wo->ccif.irq);
1468+ free_irq(wo->ccif.irq, wo);
1469+
1470+ tasklet_disable(&wo->irq_tasklet);
1471+ netif_napi_del(&wo->napi);
1472+
developer36936c32022-09-30 12:55:06 +08001473+ mtk_wed_wo_q_tx_clean(wo, &wo->q_tx);
developer29f66b32022-07-12 15:23:20 +08001474+ mtk_wed_wo_q_rx_clean(wo, &wo->q_rx);
1475+ mtk_wed_wo_q_free(wo, &wo->q_tx);
1476+ mtk_wed_wo_q_free(wo, &wo->q_rx);
developer3262bf82022-07-12 11:37:54 +08001477+}
1478diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.h b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1479new file mode 100644
developer20126ad2022-09-12 14:42:56 +08001480index 0000000..68ade44
developer3262bf82022-07-12 11:37:54 +08001481--- /dev/null
1482+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1483@@ -0,0 +1,45 @@
1484+// SPDX-License-Identifier: GPL-2.0-only
1485+
1486+#ifndef __MTK_WED_CCIF_H
1487+#define __MTK_WED_CCIF_H
1488+
1489+#define MTK_WED_WO_RING_SIZE 256
1490+#define MTK_WED_WO_CMD_LEN 1504
1491+
1492+#define MTK_WED_WO_TXCH_NUM 0
1493+#define MTK_WED_WO_RXCH_NUM 1
1494+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
1495+
1496+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
1497+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
1498+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
1499+#define MTK_WED_WO_ALL_INT_MASK MTK_WED_WO_RXCH_INT_MASK | \
1500+ MTK_WED_WO_EXCEPTION_INT_MASK
1501+
1502+#define MTK_WED_WO_CCIF_BUSY 0x004
1503+#define MTK_WED_WO_CCIF_START 0x008
1504+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
1505+#define MTK_WED_WO_CCIF_RCHNUM 0x010
1506+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
1507+
1508+#define MTK_WED_WO_CCIF_ACK 0x014
1509+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
1510+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
1511+#define MTK_WED_WO_CCIF_DUMMY1 0x020
1512+#define MTK_WED_WO_CCIF_DUMMY2 0x024
1513+#define MTK_WED_WO_CCIF_DUMMY3 0x028
1514+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
1515+#define MTK_WED_WO_CCIF_SHADOW1 0x030
1516+#define MTK_WED_WO_CCIF_SHADOW2 0x034
1517+#define MTK_WED_WO_CCIF_SHADOW3 0x038
1518+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
1519+#define MTK_WED_WO_CCIF_DUMMY5 0x050
1520+#define MTK_WED_WO_CCIF_DUMMY6 0x054
1521+#define MTK_WED_WO_CCIF_DUMMY7 0x058
1522+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
1523+#define MTK_WED_WO_CCIF_SHADOW5 0x060
1524+#define MTK_WED_WO_CCIF_SHADOW6 0x064
1525+#define MTK_WED_WO_CCIF_SHADOW7 0x068
1526+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
1527+
1528+#endif
1529diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
developer69bcd592024-03-25 14:26:39 +08001530index f420f18..7d8be99 100644
developer3262bf82022-07-12 11:37:54 +08001531--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1532+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1533@@ -2,6 +2,7 @@
1534 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1535
1536 #include <linux/seq_file.h>
1537+#include <linux/soc/mediatek/mtk_wed.h>
1538 #include "mtk_wed.h"
1539 #include "mtk_wed_regs.h"
1540
1541@@ -18,6 +19,8 @@ enum {
1542 DUMP_TYPE_WDMA,
1543 DUMP_TYPE_WPDMA_TX,
1544 DUMP_TYPE_WPDMA_TXFREE,
1545+ DUMP_TYPE_WPDMA_RX,
1546+ DUMP_TYPE_WED_RRO,
1547 };
1548
1549 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
1550@@ -36,6 +39,10 @@ enum {
1551
1552 #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
1553 #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
1554+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
1555+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
1556+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
1557+
1558
1559 static void
1560 print_reg_val(struct seq_file *s, const char *name, u32 val)
1561@@ -58,6 +65,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1562 cur->name);
1563 continue;
1564 case DUMP_TYPE_WED:
1565+ case DUMP_TYPE_WED_RRO:
1566 val = wed_r32(dev, cur->offset);
1567 break;
1568 case DUMP_TYPE_WDMA:
1569@@ -69,6 +77,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1570 case DUMP_TYPE_WPDMA_TXFREE:
1571 val = wpdma_txfree_r32(dev, cur->offset);
1572 break;
1573+ case DUMP_TYPE_WPDMA_RX:
1574+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
1575+ break;
1576 }
1577 print_reg_val(s, cur->name, val);
1578 }
1579@@ -132,6 +143,81 @@ wed_txinfo_show(struct seq_file *s, void *data)
1580 }
1581 DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
1582
1583+static int
1584+wed_rxinfo_show(struct seq_file *s, void *data)
1585+{
1586+ static const struct reg_dump regs[] = {
1587+ DUMP_STR("WPDMA RX"),
1588+ DUMP_WPDMA_RX_RING(0),
1589+ DUMP_WPDMA_RX_RING(1),
1590+
1591+ DUMP_STR("WPDMA RX"),
1592+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
1593+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
1594+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
1595+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
1596+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
1597+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
1598+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
1599+
1600+ DUMP_STR("WED RX"),
1601+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
1602+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
1603+
1604+ DUMP_STR("WED RRO"),
1605+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
1606+ DUMP_WED(WED_RROQM_MID_MIB),
1607+ DUMP_WED(WED_RROQM_MOD_MIB),
1608+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
1609+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
1610+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
1611+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
1612+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
1613+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
1614+
1615+ DUMP_STR("WED Route QM"),
1616+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
1617+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
1618+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
1619+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
1620+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
1621+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
1622+ DUMP_WED(WED_RTQM_Q2N_MIB),
1623+ DUMP_WED(WED_RTQM_Q2B_MIB),
1624+ DUMP_WED(WED_RTQM_PFDBK_MIB),
1625+
1626+ DUMP_STR("WED WDMA TX"),
1627+ DUMP_WED(WED_WDMA_TX_MIB),
1628+ DUMP_WED_RING(WED_WDMA_RING_TX),
1629+
1630+ DUMP_STR("WDMA TX"),
1631+ DUMP_WDMA(WDMA_GLO_CFG),
1632+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
1633+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
1634+
1635+ DUMP_STR("WED RX BM"),
1636+ DUMP_WED(WED_RX_BM_BASE),
1637+ DUMP_WED(WED_RX_BM_RX_DMAD),
1638+ DUMP_WED(WED_RX_BM_PTR),
1639+ DUMP_WED(WED_RX_BM_TKID_MIB),
1640+ DUMP_WED(WED_RX_BM_BLEN),
1641+ DUMP_WED(WED_RX_BM_STS),
1642+ DUMP_WED(WED_RX_BM_INTF2),
1643+ DUMP_WED(WED_RX_BM_INTF),
1644+ DUMP_WED(WED_RX_BM_ERR_STS),
1645+ };
1646+
1647+ struct mtk_wed_hw *hw = s->private;
1648+ struct mtk_wed_device *dev = hw->wed_dev;
1649+
1650+ if (!dev)
1651+ return 0;
1652+
1653+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
1654+
1655+ return 0;
1656+}
1657+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
1658
1659 static int
1660 mtk_wed_reg_set(void *data, u64 val)
1661@@ -175,4 +261,8 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
1662 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
1663 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
1664 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
1665+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
developer69bcd592024-03-25 14:26:39 +08001666+ if (hw->version != 1) {
developer3262bf82022-07-12 11:37:54 +08001667+ wed_wo_mcu_debugfs(hw, dir);
1668+ }
1669 }
1670diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1671new file mode 100644
developer69bcd592024-03-25 14:26:39 +08001672index 0000000..be63406
developer3262bf82022-07-12 11:37:54 +08001673--- /dev/null
1674+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
developer69bcd592024-03-25 14:26:39 +08001675@@ -0,0 +1,604 @@
developer3262bf82022-07-12 11:37:54 +08001676+// SPDX-License-Identifier: GPL-2.0-only
1677+
1678+#include <linux/skbuff.h>
1679+#include <linux/debugfs.h>
1680+#include <linux/firmware.h>
1681+#include <linux/of_address.h>
1682+#include <linux/soc/mediatek/mtk_wed.h>
1683+#include "mtk_wed_regs.h"
1684+#include "mtk_wed_mcu.h"
1685+#include "mtk_wed_wo.h"
1686+
1687+struct sk_buff *
1688+mtk_wed_mcu_msg_alloc(struct mtk_wed_wo *wo,
1689+ const void *data, int data_len)
1690+{
1691+ const struct wed_wo_mcu_ops *ops = wo->mcu_ops;
1692+ int length = ops->headroom + data_len;
1693+ struct sk_buff *skb;
1694+
1695+ skb = alloc_skb(length, GFP_KERNEL);
1696+ if (!skb)
1697+ return NULL;
1698+
1699+ memset(skb->head, 0, length);
1700+ skb_reserve(skb, ops->headroom);
1701+
1702+ if (data && data_len)
1703+ skb_put_data(skb, data, data_len);
1704+
1705+ return skb;
1706+}
1707+
1708+struct sk_buff *
1709+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
1710+{
1711+ unsigned long timeout;
1712+
1713+ if (!time_is_after_jiffies(expires))
1714+ return NULL;
1715+
1716+ timeout = expires - jiffies;
1717+ wait_event_timeout(wo->mcu.wait,
1718+ (!skb_queue_empty(&wo->mcu.res_q)),
1719+ timeout);
1720+
1721+ return skb_dequeue(&wo->mcu.res_q);
1722+}
1723+
1724+int
1725+mtk_wed_mcu_skb_send_and_get_msg(struct mtk_wed_wo *wo,
1726+ int to_id, int cmd, struct sk_buff *skb,
1727+ bool wait_resp, struct sk_buff **ret_skb)
1728+{
1729+ unsigned long expires;
1730+ int ret, seq;
1731+
1732+ if (ret_skb)
1733+ *ret_skb = NULL;
1734+
1735+ mutex_lock(&wo->mcu.mutex);
1736+
1737+ ret = wo->mcu_ops->mcu_skb_send_msg(wo, to_id, cmd, skb, &seq, wait_resp);
1738+ if (ret < 0)
1739+ goto out;
1740+
1741+ if (!wait_resp) {
1742+ ret = 0;
1743+ goto out;
1744+ }
1745+
1746+ expires = jiffies + wo->mcu.timeout;
1747+
1748+ do {
1749+ skb = mtk_wed_mcu_get_response(wo, expires);
1750+ ret = wo->mcu_ops->mcu_parse_response(wo, cmd, skb, seq);
1751+
1752+ if (!ret && ret_skb)
1753+ *ret_skb = skb;
1754+ else
1755+ dev_kfree_skb(skb);
1756+ } while (ret == -EAGAIN);
1757+
1758+out:
1759+ mutex_unlock(&wo->mcu.mutex);
1760+
1761+ return ret;
1762+}
1763+
developer69bcd592024-03-25 14:26:39 +08001764+int
1765+mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, int len)
1766+{
1767+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
1768+
1769+ if (dev->hw->version == 1)
1770+ return 0;
1771+
1772+ if (WARN_ON(!wo))
1773+ return -ENODEV;
1774+
1775+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len, true);
1776+}
1777+
developer3262bf82022-07-12 11:37:54 +08001778+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo,
1779+ struct sk_buff *skb)
1780+{
1781+ skb_queue_tail(&wo->mcu.res_q, skb);
1782+ wake_up(&wo->mcu.wait);
1783+}
1784+
1785+static int mtk_wed_mcu_send_and_get_msg(struct mtk_wed_wo *wo,
1786+ int to_id, int cmd, const void *data, int len,
1787+ bool wait_resp, struct sk_buff **ret_skb)
1788+{
1789+ struct sk_buff *skb;
1790+
1791+ skb = mtk_wed_mcu_msg_alloc(wo, data, len);
1792+ if (!skb)
1793+ return -ENOMEM;
1794+
1795+ return mtk_wed_mcu_skb_send_and_get_msg(wo, to_id, cmd, skb, wait_resp, ret_skb);
1796+}
1797+
1798+int
1799+mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,
1800+ int to_id, int cmd,
1801+ const void *data, int len, bool wait_resp)
1802+{
1803+ struct sk_buff *skb = NULL;
1804+ int ret = 0;
1805+
1806+ ret = mtk_wed_mcu_send_and_get_msg(wo, to_id, cmd, data,
1807+ len, wait_resp, &skb);
1808+ if (skb)
1809+ dev_kfree_skb(skb);
1810+
1811+ return ret;
1812+}
1813+
1814+int mtk_wed_exception_init(struct mtk_wed_wo *wo)
1815+{
1816+ struct wed_wo_exception *exp = &wo->exp;
1817+ struct {
1818+ u32 arg0;
1819+ u32 arg1;
1820+ }req;
1821+
1822+ exp->log_size = EXCEPTION_LOG_SIZE;
1823+ exp->log = kmalloc(exp->log_size, GFP_ATOMIC);
1824+ if (!exp->log)
1825+ return -ENOMEM;
1826+
1827+ memset(exp->log, 0, exp->log_size);
1828+ exp->phys = dma_map_single(wo->hw->dev, exp->log, exp->log_size,
1829+ DMA_FROM_DEVICE);
1830+
1831+ if (unlikely(dma_mapping_error(wo->hw->dev, exp->phys))) {
1832+ dev_info(wo->hw->dev, "dma map error\n");
1833+ goto free;
1834+ }
1835+
1836+ req.arg0 = (u32)exp->phys;
1837+ req.arg1 = (u32)exp->log_size;
1838+
developer69bcd592024-03-25 14:26:39 +08001839+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, MTK_WED_WO_CMD_EXCEPTION_INIT,
developer3262bf82022-07-12 11:37:54 +08001840+ &req, sizeof(req), false);
1841+
1842+free:
1843+ kfree(exp->log);
1844+ return -ENOMEM;
1845+}
1846+
1847+int
1848+mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb)
1849+{
1850+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1851+
1852+ if (hdr->ver != 0)
1853+ return WARP_INVALID_PARA_STATUS;
1854+
1855+ if (skb->len < sizeof(struct wed_cmd_hdr))
1856+ return WARP_INVALID_PARA_STATUS;
1857+
1858+ if (skb->len != hdr->length)
1859+ return WARP_INVALID_PARA_STATUS;
1860+
1861+ return WARP_OK_STATUS;
1862+}
1863+
1864+void
1865+mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
1866+{
developer8ec491f2022-08-22 19:48:44 +08001867+ struct mtk_wed_device *wed = wo->hw->wed_dev;
developer3262bf82022-07-12 11:37:54 +08001868+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1869+ struct wed_wo_log *record;
developer3d5faf22022-11-29 18:07:22 +08001870+ struct mtk_wed_wo_rx_stats *rxcnt;
developer3262bf82022-07-12 11:37:54 +08001871+ char *msg = (char *)(skb->data + sizeof(struct wed_cmd_hdr));
1872+ u16 msg_len = skb->len - sizeof(struct wed_cmd_hdr);
1873+ u32 i, cnt = 0;
1874+
1875+ switch (hdr->cmd_id) {
1876+ case WO_EVT_LOG_DUMP:
1877+ pr_info("[WO LOG]: %s\n", msg);
1878+ break;
1879+ case WO_EVT_PROFILING:
1880+ cnt = msg_len / (sizeof(struct wed_wo_log));
1881+ record = (struct wed_wo_log *) msg;
1882+ dev_info(wo->hw->dev, "[WO Profiling]: %d report arrived!\n", cnt);
1883+
1884+ for (i = 0 ; i < cnt ; i++) {
1885+ //PROFILE_STAT(wo->total, record[i].total);
1886+ //PROFILE_STAT(wo->mod, record[i].mod);
1887+ //PROFILE_STAT(wo->rro, record[i].rro);
1888+
1889+ dev_info(wo->hw->dev, "[WO Profiling]: SN:%u with latency: total=%u, rro:%u, mod:%u\n",
1890+ record[i].sn,
1891+ record[i].total,
1892+ record[i].rro,
1893+ record[i].mod);
1894+ }
1895+ break;
developer8ec491f2022-08-22 19:48:44 +08001896+ case WO_EVT_RXCNT_INFO:
1897+ cnt = *(u32 *)msg;
developer3d5faf22022-11-29 18:07:22 +08001898+ rxcnt = (struct mtk_wed_wo_rx_stats *)((u32 *)msg+1);
developer3262bf82022-07-12 11:37:54 +08001899+
developer8ec491f2022-08-22 19:48:44 +08001900+ for (i = 0; i < cnt; i++)
developer3d5faf22022-11-29 18:07:22 +08001901+ if (wed->wlan.update_wo_rx_stats)
1902+ wed->wlan.update_wo_rx_stats(wed, &rxcnt[i]);
developer8ec491f2022-08-22 19:48:44 +08001903+ break;
developer3262bf82022-07-12 11:37:54 +08001904+ default:
1905+ break;
1906+ }
1907+
1908+ dev_kfree_skb(skb);
1909+
1910+}
1911+
1912+static int
1913+mtk_wed_load_firmware(struct mtk_wed_wo *wo)
1914+{
1915+ struct fw_info {
1916+ __le32 decomp_crc;
1917+ __le32 decomp_len;
1918+ __le32 decomp_blk_sz;
1919+ u8 reserved[4];
1920+ __le32 addr;
1921+ __le32 len;
1922+ u8 feature_set;
1923+ u8 reserved1[15];
1924+ } __packed *region;
1925+
1926+ char *mcu;
1927+ const struct mtk_wed_fw_trailer *hdr;
1928+ static u8 shared[MAX_REGION_SIZE] = {0};
1929+ const struct firmware *fw;
1930+ int ret, i;
1931+ u32 ofs = 0;
1932+ u32 boot_cr, val;
1933+
developer740bee82023-10-16 10:58:43 +08001934+ if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
1935+ mcu = MT7981_FIRMWARE_WO;
1936+ else
1937+ mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 :
1938+ MT7986_FIRMWARE_WO_1;
developer3262bf82022-07-12 11:37:54 +08001939+
1940+ ret = request_firmware(&fw, mcu, wo->hw->dev);
1941+ if (ret)
1942+ return ret;
1943+
1944+ hdr = (const struct mtk_wed_fw_trailer *)(fw->data + fw->size -
1945+ sizeof(*hdr));
1946+
1947+ dev_info(wo->hw->dev, "WO Firmware Version: %.10s, Build Time: %.15s\n",
1948+ hdr->fw_ver, hdr->build_date);
1949+
1950+ for (i = 0; i < hdr->n_region; i++) {
1951+ int j = 0;
1952+ region = (struct fw_info *)(fw->data + fw->size -
1953+ sizeof(*hdr) -
1954+ sizeof(*region) *
1955+ (hdr->n_region - i));
1956+
1957+ while (j < MAX_REGION_SIZE) {
1958+ struct mtk_wed_fw_region *wo_region;
1959+
1960+ wo_region = &wo->region[j];
1961+ if (!wo_region->addr)
1962+ break;
1963+
1964+ if (wo_region->addr_pa == region->addr) {
1965+ if (!wo_region->shared) {
1966+ memcpy(wo_region->addr,
1967+ fw->data + ofs, region->len);
1968+ } else if (!shared[j]) {
1969+ memcpy(wo_region->addr,
1970+ fw->data + ofs, region->len);
1971+ shared[j] = true;
1972+ }
1973+ }
1974+ j++;
1975+ }
1976+
1977+ if (j == __WO_REGION_MAX) {
1978+ ret = -ENOENT;
1979+ goto done;
1980+ }
1981+ ofs += region->len;
1982+ }
1983+
1984+ /* write the start address */
1985+ boot_cr = wo->hw->index ?
1986+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
1987+ wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
1988+
1989+ /* wo firmware reset */
1990+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
1991+
1992+ val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
1993+
1994+ val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
1995+ WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
1996+
1997+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
1998+
1999+done:
2000+ release_firmware(fw);
2001+
2002+ return ret;
2003+}
2004+
2005+static int
2006+mtk_wed_get_firmware_region(struct mtk_wed_wo *wo)
2007+{
2008+ struct device_node *node, *np = wo->hw->node;
2009+ struct mtk_wed_fw_region *region;
2010+ struct resource res;
2011+ const char *compat;
2012+ int i, ret;
2013+
2014+ static const char *const wo_region_compat[__WO_REGION_MAX] = {
2015+ [WO_REGION_EMI] = WOCPU_EMI_DEV_NODE,
2016+ [WO_REGION_ILM] = WOCPU_ILM_DEV_NODE,
2017+ [WO_REGION_DATA] = WOCPU_DATA_DEV_NODE,
2018+ [WO_REGION_BOOT] = WOCPU_BOOT_DEV_NODE,
2019+ };
2020+
2021+ for (i = 0; i < __WO_REGION_MAX; i++) {
2022+ region = &wo->region[i];
2023+ compat = wo_region_compat[i];
2024+
2025+ node = of_parse_phandle(np, compat, 0);
2026+ if (!node)
2027+ return -ENODEV;
2028+
2029+ ret = of_address_to_resource(node, 0, &res);
2030+ if (ret)
2031+ return ret;
2032+
2033+ region->addr_pa = res.start;
2034+ region->size = resource_size(&res);
2035+ region->addr = ioremap(region->addr_pa, region->size);
2036+
2037+ of_property_read_u32_index(node, "shared", 0, &region->shared);
2038+ }
2039+
2040+ return 0;
2041+}
2042+
2043+static int
2044+wo_mcu_send_message(struct mtk_wed_wo *wo,
2045+ int to_id, int cmd, struct sk_buff *skb,
2046+ int *wait_seq, bool wait_resp)
2047+{
2048+ struct wed_cmd_hdr *hdr;
2049+ u8 seq = 0;
2050+
2051+ /* TDO: make dynamic based on msg type */
2052+ wo->mcu.timeout = 20 * HZ;
2053+
2054+ if (wait_resp && wait_seq) {
2055+ seq = wo->mcu.msg_seq++ ;
2056+ *wait_seq = seq;
2057+ }
2058+
2059+ hdr = (struct wed_cmd_hdr *)skb_push(skb, sizeof(*hdr));
2060+
2061+ hdr->cmd_id = cmd;
2062+ hdr->length = cpu_to_le16(skb->len);
2063+ hdr->uni_id = seq;
2064+
developer69bcd592024-03-25 14:26:39 +08002065+ if (to_id == MTK_WED_MODULE_ID_WO)
developer3262bf82022-07-12 11:37:54 +08002066+ hdr->flag |= WARP_CMD_FLAG_FROM_TO_WO;
2067+
2068+ if (wait_resp && wait_seq)
2069+ hdr->flag |= WARP_CMD_FLAG_NEED_RSP;
2070+
2071+ return mtk_wed_wo_q_tx_skb(wo, &wo->q_tx, skb);
2072+}
2073+
2074+static int
2075+wo_mcu_parse_response(struct mtk_wed_wo *wo, int cmd,
2076+ struct sk_buff *skb, int seq)
2077+{
developer8ec491f2022-08-22 19:48:44 +08002078+ struct mtk_wed_device *wed = wo->hw->wed_dev;
developer3262bf82022-07-12 11:37:54 +08002079+ struct wed_cmd_hdr *hdr;
developer3d5faf22022-11-29 18:07:22 +08002080+ struct mtk_wed_wo_rx_stats *rxcnt = NULL;
developer8ec491f2022-08-22 19:48:44 +08002081+ u32 i, cnt = 0;
developer3262bf82022-07-12 11:37:54 +08002082+
2083+ if (!skb) {
2084+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
2085+ cmd, seq);
2086+ return -ETIMEDOUT;
2087+ }
2088+
2089+ hdr = (struct wed_cmd_hdr *)skb->data;
2090+ if (seq != hdr->uni_id) {
2091+ dev_err(wo->hw->dev, "Message %08x (seq %d) with not match uid(%d)\n",
2092+ cmd, seq, hdr->uni_id);
2093+ return -EAGAIN;
2094+ }
2095+
developer8ec491f2022-08-22 19:48:44 +08002096+ skb_pull(skb, sizeof(struct wed_cmd_hdr));
2097+
2098+ switch (cmd) {
developer3d5faf22022-11-29 18:07:22 +08002099+ case MTK_WED_WO_CMD_RXCNT_INFO:
developer8ec491f2022-08-22 19:48:44 +08002100+ cnt = *(u32 *)skb->data;
developer3d5faf22022-11-29 18:07:22 +08002101+ rxcnt = (struct mtk_wed_wo_rx_stats *)((u32 *)skb->data+1);
developer8ec491f2022-08-22 19:48:44 +08002102+
2103+ for (i = 0; i < cnt; i++)
developer3d5faf22022-11-29 18:07:22 +08002104+ if (wed->wlan.update_wo_rx_stats)
2105+ wed->wlan.update_wo_rx_stats(wed, &rxcnt[i]);
developer8ec491f2022-08-22 19:48:44 +08002106+ break;
2107+ default:
2108+ break;
2109+ }
developer3262bf82022-07-12 11:37:54 +08002110+
2111+ return 0;
2112+}
2113+
2114+int wed_wo_mcu_init(struct mtk_wed_wo *wo)
2115+{
2116+ static const struct wed_wo_mcu_ops wo_mcu_ops = {
2117+ .headroom = sizeof(struct wed_cmd_hdr),
2118+ .mcu_skb_send_msg = wo_mcu_send_message,
2119+ .mcu_parse_response = wo_mcu_parse_response,
2120+ /*TDO .mcu_restart = wo_mcu_restart,*/
2121+ };
2122+ unsigned long timeout = jiffies + FW_DL_TIMEOUT;
2123+ int ret;
2124+ u32 val;
2125+
2126+ wo->mcu_ops = &wo_mcu_ops;
2127+
2128+ ret = mtk_wed_get_firmware_region(wo);
2129+ if (ret)
2130+ return ret;
2131+
2132+ /* set dummy cr */
2133+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL,
2134+ wo->hw->index + 1);
2135+
2136+ ret = mtk_wed_load_firmware(wo);
2137+ if (ret)
2138+ return ret;
2139+
2140+ do {
2141+ /* get dummy cr */
2142+ val = wed_r32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL);
2143+ } while (val != 0 && !time_after(jiffies, timeout));
2144+
2145+ if (val)
2146+ return -EBUSY;
2147+
2148+ return 0;
2149+}
2150+
2151+static ssize_t
2152+mtk_wed_wo_ctrl(struct file *file,
2153+ const char __user *user_buf,
2154+ size_t count,
2155+ loff_t *ppos)
2156+{
2157+ struct mtk_wed_hw *hw = file->private_data;
2158+ struct mtk_wed_wo *wo = hw->wed_wo;
2159+ char buf[100], *cmd = NULL, *input[11] = {0};
2160+ char msgbuf[128] = {0};
2161+ struct wo_cmd_query *query = (struct wo_cmd_query *)msgbuf;
2162+ u32 cmd_id;
2163+ bool wait = false;
2164+ char *sub_str = NULL;
2165+ int input_idx = 0, input_total = 0, scan_num = 0;
2166+ char *p;
2167+
2168+ if (count > sizeof(buf))
2169+ return -EINVAL;
2170+
2171+ if (copy_from_user(buf, user_buf, count))
2172+ return -EFAULT;
2173+
2174+ if (count && buf[count - 1] == '\n')
2175+ buf[count - 1] = '\0';
2176+ else
2177+ buf[count] = '\0';
2178+
2179+ p = buf;
2180+
2181+ while ((sub_str = strsep(&p, " ")) != NULL) {
2182+ input[input_idx] = sub_str;
2183+ input_idx++;
2184+ input_total++;
2185+ }
2186+ cmd = input[0];
2187+ if (input_total == 1 && cmd) {
2188+ if (strncmp(cmd, "bainfo", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002189+ cmd_id = MTK_WED_WO_CMD_BA_INFO_DUMP;
developer3262bf82022-07-12 11:37:54 +08002190+ } else if (strncmp(cmd, "bactrl", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002191+ cmd_id = MTK_WED_WO_CMD_BA_CTRL_DUMP;
developer3262bf82022-07-12 11:37:54 +08002192+ } else if (strncmp(cmd, "fbcmdq", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002193+ cmd_id = MTK_WED_WO_CMD_FBCMD_Q_DUMP;
developer3262bf82022-07-12 11:37:54 +08002194+ } else if (strncmp(cmd, "logflush", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002195+ cmd_id = MTK_WED_WO_CMD_LOG_FLUSH;
developer3262bf82022-07-12 11:37:54 +08002196+ } else if (strncmp(cmd, "cpustat.dump", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002197+ cmd_id = MTK_WED_WO_CMD_CPU_STATS_DUMP;
developer3262bf82022-07-12 11:37:54 +08002198+ } else if (strncmp(cmd, "state", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002199+ cmd_id = MTK_WED_WO_CMD_WED_RX_STAT;
developer3262bf82022-07-12 11:37:54 +08002200+ } else if (strncmp(cmd, "prof_hit_dump", strlen(cmd)) == 0) {
2201+ //wo_profiling_report();
2202+ return count;
2203+ } else if (strncmp(cmd, "rxcnt_info", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002204+ cmd_id = MTK_WED_WO_CMD_RXCNT_INFO;
developer3262bf82022-07-12 11:37:54 +08002205+ wait = true;
2206+ } else {
2207+ pr_info("(%s) unknown comand string(%s)!\n", __func__, cmd);
2208+ return count;
2209+ }
2210+ } else if (input_total > 1) {
2211+ for (input_idx = 1 ; input_idx < input_total ; input_idx++) {
2212+ scan_num = sscanf(input[input_idx], "%u", &query->query0+(input_idx - 1));
2213+
2214+ if (scan_num < 1) {
2215+ pr_info("(%s) require more input!\n", __func__);
2216+ return count;
2217+ }
2218+ }
2219+ if(strncmp(cmd, "devinfo", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002220+ cmd_id = MTK_WED_WO_CMD_DEV_INFO_DUMP;
developer3262bf82022-07-12 11:37:54 +08002221+ } else if (strncmp(cmd, "bssinfo", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002222+ cmd_id = MTK_WED_WO_CMD_BSS_INFO_DUMP;
developer3262bf82022-07-12 11:37:54 +08002223+ } else if (strncmp(cmd, "starec", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002224+ cmd_id = MTK_WED_WO_CMD_STA_REC_DUMP;
developer3262bf82022-07-12 11:37:54 +08002225+ } else if (strncmp(cmd, "starec_ba", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002226+ cmd_id = MTK_WED_WO_CMD_STA_BA_DUMP;
developer3262bf82022-07-12 11:37:54 +08002227+ } else if (strncmp(cmd, "logctrl", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002228+ cmd_id = MTK_WED_WO_CMD_FW_LOG_CTRL;
developer3262bf82022-07-12 11:37:54 +08002229+ } else if (strncmp(cmd, "cpustat.en", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002230+ cmd_id = MTK_WED_WO_CMD_CPU_STATS_ENABLE;
developer3262bf82022-07-12 11:37:54 +08002231+ } else if (strncmp(cmd, "prof_conf", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002232+ cmd_id = MTK_WED_WO_CMD_PROF_CTRL;
developer3262bf82022-07-12 11:37:54 +08002233+ } else if (strncmp(cmd, "rxcnt_ctrl", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002234+ cmd_id = MTK_WED_WO_CMD_RXCNT_CTRL;
developer3262bf82022-07-12 11:37:54 +08002235+ } else if (strncmp(cmd, "dbg_set", strlen(cmd)) == 0) {
developer3d5faf22022-11-29 18:07:22 +08002236+ cmd_id = MTK_WED_WO_CMD_DBG_INFO;
developer3262bf82022-07-12 11:37:54 +08002237+ }
2238+ } else {
2239+ dev_info(hw->dev, "usage: echo cmd='cmd_str' > wo_write\n");
2240+ dev_info(hw->dev, "cmd_str value range:\n");
2241+ dev_info(hw->dev, "\tbainfo:\n");
2242+ dev_info(hw->dev, "\tbactrl:\n");
2243+ dev_info(hw->dev, "\tfbcmdq:\n");
2244+ dev_info(hw->dev, "\tlogflush:\n");
2245+ dev_info(hw->dev, "\tcpustat.dump:\n");
2246+ dev_info(hw->dev, "\tprof_hit_dump:\n");
2247+ dev_info(hw->dev, "\trxcnt_info:\n");
2248+ dev_info(hw->dev, "\tdevinfo:\n");
2249+ dev_info(hw->dev, "\tbssinfo:\n");
2250+ dev_info(hw->dev, "\tstarec:\n");
2251+ dev_info(hw->dev, "\tstarec_ba:\n");
2252+ dev_info(hw->dev, "\tlogctrl:\n");
2253+ dev_info(hw->dev, "\tcpustat.en:\n");
2254+ dev_info(hw->dev, "\tprof_conf:\n");
2255+ dev_info(hw->dev, "\trxcnt_ctrl:\n");
2256+ dev_info(hw->dev, "\tdbg_set [level] [category]:\n");
2257+ return count;
2258+ }
2259+
developer69bcd592024-03-25 14:26:39 +08002260+ mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, cmd_id, (void *)msgbuf, sizeof(struct wo_cmd_query), wait);
developer3262bf82022-07-12 11:37:54 +08002261+
2262+ return count;
2263+
2264+}
2265+
2266+static const struct file_operations fops_wo_ctrl = {
2267+ .write = mtk_wed_wo_ctrl,
2268+ .open = simple_open,
2269+ .llseek = default_llseek,
2270+};
2271+
2272+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir)
2273+{
2274+ if (!dir)
2275+ return;
2276+
2277+ debugfs_create_file("wo_write", 0600, dir, hw, &fops_wo_ctrl);
2278+}
2279+
2280diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2281new file mode 100644
developer69bcd592024-03-25 14:26:39 +08002282index 0000000..dbb17ae
developer3262bf82022-07-12 11:37:54 +08002283--- /dev/null
2284+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
developer740bee82023-10-16 10:58:43 +08002285@@ -0,0 +1,97 @@
developer3262bf82022-07-12 11:37:54 +08002286+// SPDX-License-Identifier: GPL-2.0-only
2287+
2288+#ifndef __MTK_WED_MCU_H
2289+#define __MTK_WED_MCU_H
2290+
2291+#define EXCEPTION_LOG_SIZE 32768
2292+#define WOCPU_MCUSYS_RESET_ADDR 0x15194050
2293+#define WOCPU_WO0_MCUSYS_RESET_MASK 0x20
2294+#define WOCPU_WO1_MCUSYS_RESET_MASK 0x1
2295+
2296+#define WARP_INVALID_LENGTH_STATUS (-2)
2297+#define WARP_NULL_POINTER_STATUS (-3)
2298+#define WARP_INVALID_PARA_STATUS (-4)
2299+#define WARP_NOT_HANDLE_STATUS (-5)
2300+#define WARP_FAIL_STATUS (-1)
2301+#define WARP_OK_STATUS (0)
2302+#define WARP_ALREADY_DONE_STATUS (1)
2303+
developer740bee82023-10-16 10:58:43 +08002304+#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
developer3262bf82022-07-12 11:37:54 +08002305+#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2306+#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2307+
2308+#define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2309+#define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2310+#define WOCPU_DLM_DEV_NODE "mediatek,wocpu_dlm"
2311+#define WOCPU_DATA_DEV_NODE "mediatek,wocpu_data"
2312+#define WOCPU_BOOT_DEV_NODE "mediatek,wocpu_boot"
2313+
2314+#define FW_DL_TIMEOUT ((3000 * HZ) / 1000)
2315+#define WOCPU_TIMEOUT ((1000 * HZ) / 1000)
2316+
2317+#define MAX_REGION_SIZE 3
2318+
2319+#define WOX_MCU_CFG_LS_BASE 0 /*0x15194000*/
2320+
2321+#define WOX_MCU_CFG_LS_HW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x000) // 4000
2322+#define WOX_MCU_CFG_LS_FW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x004) // 4004
2323+#define WOX_MCU_CFG_LS_CFG_DBG1_ADDR (WOX_MCU_CFG_LS_BASE + 0x00C) // 400C
2324+#define WOX_MCU_CFG_LS_CFG_DBG2_ADDR (WOX_MCU_CFG_LS_BASE + 0x010) // 4010
2325+#define WOX_MCU_CFG_LS_WF_MCCR_ADDR (WOX_MCU_CFG_LS_BASE + 0x014) // 4014
2326+#define WOX_MCU_CFG_LS_WF_MCCR_SET_ADDR (WOX_MCU_CFG_LS_BASE + 0x018) // 4018
2327+#define WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR (WOX_MCU_CFG_LS_BASE + 0x01C) // 401C
2328+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (WOX_MCU_CFG_LS_BASE + 0x050) // 4050
2329+#define WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x060) // 4060
2330+#define WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x064) // 4064
2331+
2332+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK BIT(5)
2333+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK BIT(0)
2334+
2335+
2336+enum wo_event_id {
2337+ WO_EVT_LOG_DUMP = 0x1,
2338+ WO_EVT_PROFILING = 0x2,
2339+ WO_EVT_RXCNT_INFO = 0x3
2340+};
2341+
developer3262bf82022-07-12 11:37:54 +08002342+enum wo_state {
2343+ WO_STATE_UNDEFINED = 0x0,
2344+ WO_STATE_INIT = 0x1,
2345+ WO_STATE_ENABLE = 0x2,
2346+ WO_STATE_DISABLE = 0x3,
2347+ WO_STATE_HALT = 0x4,
2348+ WO_STATE_GATING = 0x5,
2349+ WO_STATE_SER_RESET = 0x6,
2350+ WO_STATE_WF_RESET = 0x7,
2351+ WO_STATE_END
2352+};
2353+
2354+enum wo_done_state {
2355+ WOIF_UNDEFINED = 0,
2356+ WOIF_DISABLE_DONE = 1,
2357+ WOIF_TRIGGER_ENABLE = 2,
2358+ WOIF_ENABLE_DONE = 3,
2359+ WOIF_TRIGGER_GATING = 4,
2360+ WOIF_GATING_DONE = 5,
2361+ WOIF_TRIGGER_HALT = 6,
2362+ WOIF_HALT_DONE = 7,
2363+};
2364+
2365+enum wed_dummy_cr_idx {
2366+ WED_DUMMY_CR_FWDL = 0,
2367+ WED_DUMMY_CR_WO_STATUS = 1
2368+};
2369+
2370+struct mtk_wed_fw_trailer {
2371+ u8 chip_id;
2372+ u8 eco_code;
2373+ u8 n_region;
2374+ u8 format_ver;
2375+ u8 format_flag;
2376+ u8 reserved[2];
2377+ char fw_ver[10];
2378+ char build_date[15];
2379+ u32 crc;
2380+};
2381+
2382+#endif
2383diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
developer69bcd592024-03-25 14:26:39 +08002384index e797e9d..a79305f 100644
developer3262bf82022-07-12 11:37:54 +08002385--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2386+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
developer69bcd592024-03-25 14:26:39 +08002387@@ -4,6 +4,7 @@
developer3262bf82022-07-12 11:37:54 +08002388 #ifndef __MTK_WED_REGS_H
2389 #define __MTK_WED_REGS_H
2390
2391+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
developer69bcd592024-03-25 14:26:39 +08002392 #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
2393 #define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
2394 #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
2395@@ -11,6 +12,7 @@
developer3262bf82022-07-12 11:37:54 +08002396 #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2397 #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2398 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2399+#define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
2400
2401 struct mtk_wdma_desc {
2402 __le32 buf0;
developer69bcd592024-03-25 14:26:39 +08002403@@ -37,6 +39,8 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002404 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
2405 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2406 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2407+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
2408+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
2409 #define MTK_WED_RESET_WED BIT(31)
2410
2411 #define MTK_WED_CTRL 0x00c
developer69bcd592024-03-25 14:26:39 +08002412@@ -48,8 +52,12 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002413 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2414 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2415 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2416-#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2417-#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2418+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
2419+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
2420+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
2421+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
2422+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
2423+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
2424 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2425 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
2426 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
developer69bcd592024-03-25 14:26:39 +08002427@@ -64,8 +72,8 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002428 #define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
2429 #define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
2430 #endif
2431-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2432-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2433+#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
2434+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
2435 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2436 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2437 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
developer69bcd592024-03-25 14:26:39 +08002438@@ -82,8 +90,8 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002439 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2440 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2441 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2442- MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | \
2443- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | \
2444+ MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
2445+ MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
2446 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2447 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
developer740bee82023-10-16 10:58:43 +08002448 MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
developer69bcd592024-03-25 14:26:39 +08002449@@ -91,6 +99,8 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002450 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
2451
2452 #define MTK_WED_EXT_INT_MASK 0x028
2453+#define MTK_WED_EXT_INT_MASK1 0x02c
2454+#define MTK_WED_EXT_INT_MASK2 0x030
2455
2456 #define MTK_WED_STATUS 0x060
2457 #define MTK_WED_STATUS_TX GENMASK(15, 8)
developer69bcd592024-03-25 14:26:39 +08002458@@ -172,6 +182,9 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002459
2460 #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2461
2462+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
2463+
2464+#define MTK_WED_SCR0 0x3c0
2465 #define MTK_WED_WPDMA_INT_TRIGGER 0x504
2466 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2467 #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
developer69bcd592024-03-25 14:26:39 +08002468@@ -228,13 +241,19 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002469
2470 #define MTK_WED_WPDMA_INT_CTRL_TX 0x530
2471 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
2472-#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2473+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2474 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
2475 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
2476 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
2477 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
2478
2479 #define MTK_WED_WPDMA_INT_CTRL_RX 0x534
2480+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
2481+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
2482+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
2483+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
2484+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
2485+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
2486
2487 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
2488 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
developer69bcd592024-03-25 14:26:39 +08002489@@ -259,13 +278,40 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002490 #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2491 #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2492
2493+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
2494+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
2495+
2496 #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2497 #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
developerc89c5472022-08-02 13:00:04 +08002498+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
developer3262bf82022-07-12 11:37:54 +08002499+
2500+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
2501+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
2502+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
2503+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
2504+
2505+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
developerc89c5472022-08-02 13:00:04 +08002506+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
2507+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
developer3262bf82022-07-12 11:37:54 +08002508+
2509+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
2510+#define MTK_WED_WPDMA_RX_RING 0x770
2511+
2512+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
2513+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
2514+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
2515+
2516+#define MTK_WED_WDMA_RING_TX 0x800
2517+
2518+#define MTK_WED_WDMA_TX_MIB 0x810
2519+
2520+
2521 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2522 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2523
2524 #define MTK_WED_WDMA_GLO_CFG 0xa04
2525 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2526+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
2527 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2528 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2529 #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
developer69bcd592024-03-25 14:26:39 +08002530@@ -309,6 +355,20 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002531 #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
2532 #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
2533
2534+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2535+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
2536+
2537+#define MTK_WED_RX_BM_BASE 0xd84
2538+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2539+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
2540+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
2541+
2542+#define MTK_WED_RX_PTR 0xd8c
2543+
2544+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
2545+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
2546+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
2547+
2548 #define MTK_WED_RING_OFS_BASE 0x00
2549 #define MTK_WED_RING_OFS_COUNT 0x04
2550 #define MTK_WED_RING_OFS_CPU_IDX 0x08
developer69bcd592024-03-25 14:26:39 +08002551@@ -319,12 +379,13 @@ struct mtk_wdma_desc {
developer29f66b32022-07-12 15:23:20 +08002552
2553 #define MTK_WDMA_GLO_CFG 0x204
2554 #define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
2555+#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
2556 #define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
2557+#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
2558 #define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
2559 #define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
2560 #define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
developerc89c5472022-08-02 13:00:04 +08002561
2562-
2563 #define MTK_WDMA_RESET_IDX 0x208
2564 #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
2565 #define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
developer69bcd592024-03-25 14:26:39 +08002566@@ -348,4 +409,70 @@ struct mtk_wdma_desc {
developer3262bf82022-07-12 11:37:54 +08002567 /* DMA channel mapping */
2568 #define HIFSYS_DMA_AG_MAP 0x008
2569
2570+#define MTK_WED_RTQM_GLO_CFG 0xb00
2571+#define MTK_WED_RTQM_BUSY BIT(1)
2572+#define MTK_WED_RTQM_Q_RST BIT(2)
2573+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
2574+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
2575+
2576+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
2577+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
2578+#define MTK_WED_RTQM_Q2N_MIB 0xb80
2579+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
2580+
2581+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
2582+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
2583+
2584+#define MTK_WED_RROQM_GLO_CFG 0xc04
2585+#define MTK_WED_RROQM_RST_IDX 0xc08
2586+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
2587+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
2588+
2589+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
2590+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
2591+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
2592+
2593+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
2594+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
2595+
2596+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
2597+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
2598+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
2599+
2600+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
2601+
2602+#define MTK_WED_RROQ_BASE_L 0xc80
2603+#define MTK_WED_RROQ_BASE_H 0xc84
2604+
developer3262bf82022-07-12 11:37:54 +08002605+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
2606+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
2607+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
2608+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
2609+
2610+#define MTK_WED_RROQM_MID_MIB 0xcc0
2611+#define MTK_WED_RROQM_MOD_MIB 0xcc4
2612+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
2613+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
2614+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
2615+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
2616+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
2617+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
2618+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
2619+
2620+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2621+#define MTK_WED_RX_BM_BASE 0xd84
2622+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2623+#define MTK_WED_RX_BM_PTR 0xd8c
2624+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
2625+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
2626+
2627+#define MTK_WED_RX_BM_BLEN 0xd90
2628+#define MTK_WED_RX_BM_STS 0xd94
2629+#define MTK_WED_RX_BM_INTF2 0xd98
2630+#define MTK_WED_RX_BM_INTF 0xd9c
2631+#define MTK_WED_RX_BM_ERR_STS 0xda8
2632+
2633+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
2634+#define MTK_WED_PCIE_INT_MASK 0x0
2635+
2636 #endif
2637diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
2638new file mode 100644
developer3d5faf22022-11-29 18:07:22 +08002639index 0000000..54b7787
developer3262bf82022-07-12 11:37:54 +08002640--- /dev/null
2641+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
developer36936c32022-09-30 12:55:06 +08002642@@ -0,0 +1,564 @@
developer3262bf82022-07-12 11:37:54 +08002643+// SPDX-License-Identifier: GPL-2.0-only
2644+
2645+#include <linux/kernel.h>
2646+#include <linux/bitfield.h>
2647+#include <linux/dma-mapping.h>
2648+#include <linux/skbuff.h>
2649+#include <linux/of_platform.h>
2650+#include <linux/interrupt.h>
2651+#include <linux/of_address.h>
2652+#include <linux/iopoll.h>
2653+#include <linux/soc/mediatek/mtk_wed.h>
2654+#include "mtk_wed.h"
2655+#include "mtk_wed_regs.h"
2656+#include "mtk_wed_ccif.h"
2657+#include "mtk_wed_wo.h"
2658+
2659+struct wed_wo_profile_stat profile_total[6] = {
2660+ {1001, 0},
2661+ {1501, 0},
2662+ {3001, 0},
2663+ {5001, 0},
2664+ {10001, 0},
2665+ {0xffffffff, 0}
2666+};
2667+
2668+struct wed_wo_profile_stat profiling_mod[6] = {
2669+ {1001, 0},
2670+ {1501, 0},
2671+ {3001, 0},
2672+ {5001, 0},
2673+ {10001, 0},
2674+ {0xffffffff, 0}
2675+};
2676+
2677+struct wed_wo_profile_stat profiling_rro[6] = {
2678+ {1001, 0},
2679+ {1501, 0},
2680+ {3001, 0},
2681+ {5001, 0},
2682+ {10001, 0},
2683+ {0xffffffff, 0}
2684+};
2685+
2686+static void
2687+woif_q_sync_idx(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2688+{
2689+ woccif_w32(wo, q->regs->desc_base, q->desc_dma);
2690+ woccif_w32(wo, q->regs->ring_size, q->ndesc);
2691+
developer3262bf82022-07-12 11:37:54 +08002692+}
2693+
2694+static void
2695+woif_q_reset(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2696+{
2697+
2698+ if (!q || !q->ndesc)
2699+ return;
2700+
2701+ woccif_w32(dev, q->regs->cpu_idx, 0);
2702+
2703+ woif_q_sync_idx(dev, q);
2704+}
2705+
2706+static void
2707+woif_q_kick(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset)
2708+{
2709+ wmb();
2710+ woccif_w32(wo, q->regs->cpu_idx, q->head + offset);
2711+}
2712+
2713+static int
developer36936c32022-09-30 12:55:06 +08002714+woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool rx)
developer3262bf82022-07-12 11:37:54 +08002715+{
2716+ int len = q->buf_size, frames = 0;
2717+ struct wed_wo_queue_entry *entry;
developer36936c32022-09-30 12:55:06 +08002718+ struct page_frag_cache *page = &q->tx_page;
developer3262bf82022-07-12 11:37:54 +08002719+ struct wed_wo_desc *desc;
2720+ dma_addr_t addr;
2721+ u32 ctrl = 0;
2722+ void *buf;
2723+
2724+ if (!q->ndesc)
2725+ return 0;
2726+
2727+ spin_lock_bh(&q->lock);
2728+
developer36936c32022-09-30 12:55:06 +08002729+ if(rx)
2730+ page = &q->rx_page;
developer3262bf82022-07-12 11:37:54 +08002731+
developer36936c32022-09-30 12:55:06 +08002732+ while (q->queued < q->ndesc) {
2733+ buf = page_frag_alloc(page, len, GFP_ATOMIC);
developer3262bf82022-07-12 11:37:54 +08002734+ if (!buf)
2735+ break;
2736+
2737+ addr = dma_map_single(wo->hw->dev, buf, len, DMA_FROM_DEVICE);
2738+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
2739+ skb_free_frag(buf);
2740+ break;
2741+ }
developer4df64ba2022-09-01 14:44:55 +08002742+
2743+ q->head = (q->head + 1) % q->ndesc;
2744+
developer3262bf82022-07-12 11:37:54 +08002745+ desc = &q->desc[q->head];
2746+ entry = &q->entry[q->head];
2747+
2748+ entry->dma_addr = addr;
2749+ entry->dma_len = len;
2750+
developer36936c32022-09-30 12:55:06 +08002751+ if (rx) {
2752+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, entry->dma_len);
2753+ ctrl |= WED_CTL_LAST_SEC0;
developer3262bf82022-07-12 11:37:54 +08002754+
developer36936c32022-09-30 12:55:06 +08002755+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2756+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2757+ }
developer3262bf82022-07-12 11:37:54 +08002758+ q->queued++;
2759+ q->entry[q->head].buf = buf;
2760+
developer3262bf82022-07-12 11:37:54 +08002761+ frames++;
2762+ }
2763+
2764+ spin_unlock_bh(&q->lock);
2765+
2766+ return frames;
2767+}
2768+
2769+static void
2770+woif_q_rx_fill_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2771+{
developer36936c32022-09-30 12:55:06 +08002772+ if(woif_q_rx_fill(wo, q, true))
developer3262bf82022-07-12 11:37:54 +08002773+ woif_q_kick(wo, q, -1);
2774+}
2775+
2776+static int
2777+woif_q_alloc(struct mtk_wed_wo *dev, struct wed_wo_queue *q,
2778+ int n_desc, int bufsize, int idx,
2779+ struct wed_wo_queue_regs *regs)
2780+{
2781+ struct wed_wo_queue_regs *q_regs;
2782+ int size;
2783+
2784+ spin_lock_init(&q->lock);
2785+ spin_lock_init(&q->cleanup_lock);
2786+
2787+ q_regs = devm_kzalloc(dev->hw->dev, sizeof(*q_regs), GFP_KERNEL);
2788+
2789+ q_regs->desc_base = regs->desc_base;
2790+ q_regs->ring_size = regs->ring_size;
2791+ q_regs->cpu_idx = regs->cpu_idx;
2792+ q_regs->dma_idx = regs->dma_idx;
2793+
2794+ q->regs = q_regs;
2795+ q->ndesc = n_desc;
2796+ q->buf_size = bufsize;
2797+
2798+ size = q->ndesc * sizeof(struct wed_wo_desc);
2799+
2800+ q->desc = dmam_alloc_coherent(dev->hw->dev, size,
2801+ &q->desc_dma, GFP_KERNEL);
2802+ if (!q->desc)
2803+ return -ENOMEM;
2804+
2805+ size = q->ndesc * sizeof(*q->entry);
2806+ q->entry = devm_kzalloc(dev->hw->dev, size, GFP_KERNEL);
2807+ if (!q->entry)
2808+ return -ENOMEM;
2809+
developer36936c32022-09-30 12:55:06 +08002810+ if (idx == 0) {
2811+ /* alloc tx buf */
2812+ woif_q_rx_fill(dev, &dev->q_tx, false);
developer3262bf82022-07-12 11:37:54 +08002813+ woif_q_reset(dev, &dev->q_tx);
developer36936c32022-09-30 12:55:06 +08002814+ }
developer3262bf82022-07-12 11:37:54 +08002815+
2816+ return 0;
2817+}
2818+
2819+static void
developer29f66b32022-07-12 15:23:20 +08002820+woif_q_free(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2821+{
2822+ int size;
2823+
2824+ if (!q)
2825+ return;
2826+
2827+ if (!q->desc)
2828+ return;
2829+
2830+ woccif_w32(dev, q->regs->cpu_idx, 0);
2831+
2832+ size = q->ndesc * sizeof(struct wed_wo_desc);
2833+ dma_free_coherent(dev->hw->dev, size, q->desc, q->desc_dma);
2834+}
2835+
2836+static void
developer36936c32022-09-30 12:55:06 +08002837+woif_q_tx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
developer3262bf82022-07-12 11:37:54 +08002838+{
developer36936c32022-09-30 12:55:06 +08002839+ struct page *page;
2840+ int i = 0;
developer3262bf82022-07-12 11:37:54 +08002841+
2842+ if (!q || !q->ndesc)
2843+ return;
2844+
developer36936c32022-09-30 12:55:06 +08002845+ spin_lock_bh(&q->lock);
2846+ while (i < q->ndesc) {
developer3262bf82022-07-12 11:37:54 +08002847+ struct wed_wo_queue_entry *e;
2848+
developer36936c32022-09-30 12:55:06 +08002849+ e = &q->entry[i];
2850+ i++;
developer3262bf82022-07-12 11:37:54 +08002851+
developer36936c32022-09-30 12:55:06 +08002852+ if (!e)
2853+ continue;
developer3262bf82022-07-12 11:37:54 +08002854+ dma_unmap_single(wo->hw->dev, e->dma_addr, e->dma_len,
2855+ DMA_TO_DEVICE);
2856+
developer36936c32022-09-30 12:55:06 +08002857+ skb_free_frag(e->buf);
developer3262bf82022-07-12 11:37:54 +08002858+ }
developer36936c32022-09-30 12:55:06 +08002859+ spin_unlock_bh(&q->lock);
developer3262bf82022-07-12 11:37:54 +08002860+
developer36936c32022-09-30 12:55:06 +08002861+ if (!q->tx_page.va)
2862+ return;
2863+
2864+ page = virt_to_page(q->tx_page.va);
2865+ __page_frag_cache_drain(page, q->tx_page.pagecnt_bias);
2866+ memset(&q->tx_page, 0, sizeof(q->tx_page));
developer3262bf82022-07-12 11:37:54 +08002867+}
2868+
developer3262bf82022-07-12 11:37:54 +08002869+static void *
2870+woif_q_deq(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush,
2871+ int *len, u32 *info, bool *more)
2872+{
2873+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
2874+ struct wed_wo_queue_entry *e;
2875+ struct wed_wo_desc *desc;
developer4df64ba2022-09-01 14:44:55 +08002876+ int idx = (q->tail + 1) % q->ndesc;;
developer3262bf82022-07-12 11:37:54 +08002877+ void *buf;
2878+
2879+ *more = false;
2880+ if (!q->queued)
2881+ return NULL;
2882+
2883+ if (flush)
2884+ q->desc[idx].ctrl |= cpu_to_le32(WED_CTL_DMA_DONE);
2885+ else if (!(q->desc[idx].ctrl & cpu_to_le32(WED_CTL_DMA_DONE)))
2886+ return NULL;
2887+
developer4df64ba2022-09-01 14:44:55 +08002888+ q->tail = idx;
developer3262bf82022-07-12 11:37:54 +08002889+ q->queued--;
2890+
2891+ desc = &q->desc[idx];
2892+ e = &q->entry[idx];
2893+
2894+ buf = e->buf;
2895+ if (len) {
2896+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
2897+ *len = FIELD_GET(WED_CTL_SD_LEN0, ctl);
2898+ *more = !(ctl & WED_CTL_LAST_SEC0);
2899+ }
2900+
2901+ if (info)
2902+ *info = le32_to_cpu(desc->info);
2903+ if(buf)
2904+ dma_unmap_single(wo->hw->dev, e->dma_addr, buf_len,
2905+ DMA_FROM_DEVICE);
2906+ e->skb = NULL;
2907+
2908+ return buf;
2909+}
2910+
developer29f66b32022-07-12 15:23:20 +08002911+static void
2912+woif_q_rx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2913+{
2914+ struct page *page;
2915+ void *buf;
2916+ bool more;
2917+
2918+ if (!q->ndesc)
2919+ return;
2920+
2921+ spin_lock_bh(&q->lock);
2922+ do {
2923+ buf = woif_q_deq(wo, q, true, NULL, NULL, &more);
2924+ if (!buf)
2925+ break;
2926+
2927+ skb_free_frag(buf);
2928+ } while (1);
2929+ spin_unlock_bh(&q->lock);
2930+
2931+ if (!q->rx_page.va)
2932+ return;
2933+
2934+ page = virt_to_page(q->rx_page.va);
2935+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
2936+ memset(&q->rx_page, 0, sizeof(q->rx_page));
developer29f66b32022-07-12 15:23:20 +08002937+}
2938+
developer3262bf82022-07-12 11:37:54 +08002939+static int
2940+woif_q_init(struct mtk_wed_wo *dev,
2941+ int (*poll)(struct napi_struct *napi, int budget))
2942+{
2943+ init_dummy_netdev(&dev->napi_dev);
2944+ snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
2945+ "woif_q");
2946+
2947+ if (dev->q_rx.ndesc) {
2948+ netif_napi_add(&dev->napi_dev, &dev->napi, poll, 64);
developer36936c32022-09-30 12:55:06 +08002949+ woif_q_rx_fill(dev, &dev->q_rx, true);
developer3262bf82022-07-12 11:37:54 +08002950+ woif_q_reset(dev, &dev->q_rx);
2951+ napi_enable(&dev->napi);
2952+ }
2953+
2954+ return 0;
2955+}
2956+
2957+void woif_q_rx_skb(struct mtk_wed_wo *wo, struct sk_buff *skb)
2958+{
2959+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
2960+ int ret;
2961+
2962+ ret = mtk_wed_mcu_cmd_sanity_check(wo, skb);
2963+ if (ret)
2964+ goto free_skb;
2965+
2966+ if (WED_WO_CMD_FLAG_IS_RSP(hdr))
2967+ mtk_wed_mcu_rx_event(wo, skb);
2968+ else
2969+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
2970+
2971+ return;
2972+free_skb:
2973+ dev_kfree_skb(skb);
2974+}
2975+
2976+static int
2977+woif_q_tx_skb(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
2978+ struct sk_buff *skb)
2979+{
2980+ struct wed_wo_queue_entry *entry;
2981+ struct wed_wo_desc *desc;
developer36936c32022-09-30 12:55:06 +08002982+ int len, ret = 0, idx = -1;
developer3262bf82022-07-12 11:37:54 +08002983+ dma_addr_t addr;
2984+ u32 ctrl = 0;
2985+
2986+ len = skb->len;
developer36936c32022-09-30 12:55:06 +08002987+ spin_lock_bh(&q->lock);
developer3262bf82022-07-12 11:37:54 +08002988+
developer36936c32022-09-30 12:55:06 +08002989+ q->tail = woccif_r32(wo, q->regs->dma_idx);
2990+ q->head = (q->head + 1) % q->ndesc;
2991+ if (q->tail == q->head) {
developer3262bf82022-07-12 11:37:54 +08002992+ ret = -ENOMEM;
2993+ goto error;
2994+ }
2995+
developer3262bf82022-07-12 11:37:54 +08002996+ idx = q->head;
developer3262bf82022-07-12 11:37:54 +08002997+ desc = &q->desc[idx];
2998+ entry = &q->entry[idx];
2999+
developer36936c32022-09-30 12:55:06 +08003000+ if (len > entry->dma_len) {
3001+ ret = -ENOMEM;
3002+ goto error;
3003+ }
3004+ addr = entry->dma_addr;
3005+
3006+ dma_sync_single_for_cpu(wo->hw->dev, addr, len, DMA_TO_DEVICE);
3007+ memcpy(entry->buf, skb->data, len);
3008+ dma_sync_single_for_device(wo->hw->dev, addr, len, DMA_TO_DEVICE);
developer3262bf82022-07-12 11:37:54 +08003009+
3010+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, len);
3011+ ctrl |= WED_CTL_LAST_SEC0;
3012+ ctrl |= WED_CTL_DMA_DONE;
3013+
3014+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
3015+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
3016+
developer3262bf82022-07-12 11:37:54 +08003017+ woif_q_kick(wo, q, 0);
3018+ wo->drv_ops->kickout(wo);
3019+
developer3262bf82022-07-12 11:37:54 +08003020+ spin_unlock_bh(&q->lock);
developer3262bf82022-07-12 11:37:54 +08003021+
3022+error:
3023+ dev_kfree_skb(skb);
developer36936c32022-09-30 12:55:06 +08003024+ return ret;
developer3262bf82022-07-12 11:37:54 +08003025+}
3026+
3027+static const struct wed_wo_queue_ops wo_queue_ops = {
3028+ .init = woif_q_init,
3029+ .alloc = woif_q_alloc,
developer29f66b32022-07-12 15:23:20 +08003030+ .free = woif_q_free,
developer3262bf82022-07-12 11:37:54 +08003031+ .reset = woif_q_reset,
3032+ .tx_skb = woif_q_tx_skb,
3033+ .tx_clean = woif_q_tx_clean,
3034+ .rx_clean = woif_q_rx_clean,
3035+ .kick = woif_q_kick,
3036+};
3037+
3038+static int
3039+mtk_wed_wo_rx_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int budget)
3040+{
developer36936c32022-09-30 12:55:06 +08003041+ int len, done = 0;
developer3262bf82022-07-12 11:37:54 +08003042+ struct sk_buff *skb;
3043+ unsigned char *data;
3044+ bool more;
3045+
3046+ while (done < budget) {
3047+ u32 info;
3048+
3049+ data = woif_q_deq(wo, q, false, &len, &info, &more);
3050+ if (!data)
3051+ break;
3052+
developer3262bf82022-07-12 11:37:54 +08003053+ skb = build_skb(data, q->buf_size);
3054+ if (!skb) {
3055+ skb_free_frag(data);
3056+ continue;
3057+ }
3058+
3059+ __skb_put(skb, len);
3060+ done++;
3061+
3062+ woif_q_rx_skb(wo, skb);
3063+ }
3064+
3065+ woif_q_rx_fill_process(wo, q);
3066+
3067+ return done;
3068+}
3069+
3070+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3071+ u32 clear, u32 val)
3072+{
3073+ unsigned long flags;
3074+
3075+ spin_lock_irqsave(&wo->ccif.irq_lock, flags);
3076+ wo->ccif.irqmask &= ~clear;
3077+ wo->ccif.irqmask |= val;
3078+ if (set)
3079+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
3080+
3081+ spin_unlock_irqrestore(&wo->ccif.irq_lock, flags);
3082+}
3083+
3084+static inline void mtk_wed_wo_set_ack_mask(struct mtk_wed_wo *wo, u32 mask)
3085+{
3086+ wo->drv_ops->set_ack(wo, mask);
3087+}
3088+
3089+static void mtk_wed_wo_poll_complete(struct mtk_wed_wo *wo)
3090+{
3091+ mtk_wed_wo_set_ack_mask(wo, wo->ccif.q_int_mask);
3092+ mtk_wed_wo_isr_enable(wo, wo->ccif.q_int_mask);
3093+}
3094+
3095+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget)
3096+{
3097+ struct mtk_wed_wo *wo;
3098+ int done = 0, cur;
3099+
3100+ wo = container_of(napi->dev, struct mtk_wed_wo, napi_dev);
3101+
3102+ rcu_read_lock();
3103+
3104+ do {
3105+ cur = mtk_wed_wo_rx_process(wo, &wo->q_rx, budget - done);
3106+ /* rx packet handle */
3107+ done += cur;
3108+ } while (cur && done < budget);
3109+
3110+ rcu_read_unlock();
3111+
3112+ if (done < budget && napi_complete(napi))
3113+ mtk_wed_wo_poll_complete(wo);
3114+
3115+ return done;
3116+}
3117+
3118+static void mtk_wed_wo_isr_tasklet(unsigned long data)
3119+{
3120+ struct mtk_wed_wo *wo = (struct mtk_wed_wo *)data;
3121+ u32 intr, mask;
3122+
3123+ /* disable isr */
3124+ wo->drv_ops->set_isr(wo, 0);
3125+
3126+ intr = wo->drv_ops->get_csr(wo);
3127+ intr &= wo->ccif.irqmask;
3128+
3129+ mask = intr & (wo->ccif.q_int_mask | wo->ccif.q_exep_mask);
3130+ mtk_wed_wo_isr_disable(wo, mask);
3131+
3132+ if (intr & wo->ccif.q_int_mask)
3133+ napi_schedule(&wo->napi);
3134+
3135+ if (intr & wo->ccif.q_exep_mask) {
3136+ /* todo */
3137+ }
3138+}
3139+
3140+static irqreturn_t mtk_wed_wo_isr_handler(int irq, void *wo_instance)
3141+{
3142+ struct mtk_wed_wo *wo = wo_instance;
3143+
3144+ wo->drv_ops->set_isr(wo, 0);
3145+
3146+ tasklet_schedule(&wo->irq_tasklet);
3147+
3148+ return IRQ_HANDLED;
3149+}
3150+
3151+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
3152+{
3153+ struct mtk_wed_wo *wo;
3154+ int ret = 0;
3155+
3156+ wo = kzalloc(sizeof(struct mtk_wed_wo), GFP_KERNEL);
3157+ if (!wo)
3158+ return -ENOMEM;
3159+
3160+ wo->hw = hw;
3161+ wo->queue_ops = &wo_queue_ops;
3162+ hw->wed_wo = wo;
3163+
3164+ tasklet_init(&wo->irq_tasklet, mtk_wed_wo_isr_tasklet,
3165+ (unsigned long)wo);
3166+
3167+ skb_queue_head_init(&wo->mcu.res_q);
3168+ init_waitqueue_head(&wo->mcu.wait);
3169+ mutex_init(&wo->mcu.mutex);
3170+
3171+ ret = wed_wo_hardware_init(wo, mtk_wed_wo_isr_handler);
3172+ if (ret)
3173+ goto error;
3174+
3175+ /* fw download */
3176+ ret = wed_wo_mcu_init(wo);
3177+ if (ret)
3178+ goto error;
3179+
3180+ ret = mtk_wed_exception_init(wo);
3181+ if (ret)
3182+ goto error;
3183+
3184+ return ret;
3185+
3186+error:
3187+ kfree(wo);
3188+
3189+ return ret;
3190+}
3191+
3192+void mtk_wed_wo_exit(struct mtk_wed_hw *hw)
3193+{
developer3262bf82022-07-12 11:37:54 +08003194+ struct mtk_wed_wo *wo = hw->wed_wo;
3195+
developer29f66b32022-07-12 15:23:20 +08003196+ wed_wo_hardware_exit(wo);
3197+
developer3262bf82022-07-12 11:37:54 +08003198+ if (wo->exp.log) {
3199+ dma_unmap_single(wo->hw->dev, wo->exp.phys, wo->exp.log_size, DMA_FROM_DEVICE);
3200+ kfree(wo->exp.log);
3201+ }
3202+
developer29f66b32022-07-12 15:23:20 +08003203+ wo->hw = NULL;
3204+ memset(wo, 0, sizeof(*wo));
3205+ kfree(wo);
developer3262bf82022-07-12 11:37:54 +08003206+}
3207diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
3208new file mode 100644
developer69bcd592024-03-25 14:26:39 +08003209index 0000000..b24fef3
developer3262bf82022-07-12 11:37:54 +08003210--- /dev/null
3211+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
developer69bcd592024-03-25 14:26:39 +08003212@@ -0,0 +1,325 @@
developer3262bf82022-07-12 11:37:54 +08003213+// SPDX-License-Identifier: GPL-2.0-only
3214+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
3215+
3216+#ifndef __MTK_WED_WO_H
3217+#define __MTK_WED_WO_H
3218+
3219+#include <linux/netdevice.h>
3220+#include <linux/skbuff.h>
3221+#include "mtk_wed.h"
3222+
3223+#define WED_CTL_SD_LEN1 GENMASK(13, 0)
3224+#define WED_CTL_LAST_SEC1 BIT(14)
3225+#define WED_CTL_BURST BIT(15)
3226+#define WED_CTL_SD_LEN0_SHIFT 16
3227+#define WED_CTL_SD_LEN0 GENMASK(29, 16)
3228+#define WED_CTL_LAST_SEC0 BIT(30)
3229+#define WED_CTL_DMA_DONE BIT(31)
3230+#define WED_INFO_WINFO GENMASK(15, 0)
3231+
3232+#define MTK_WED_WO_TXQ_FREE_THR 10
3233+
3234+#define WED_WO_PROFILE_MAX_LVL 6
3235+
3236+
3237+enum mtk_wed_fw_region_id {
3238+ WO_REGION_EMI = 0,
3239+ WO_REGION_ILM,
3240+ WO_REGION_DATA,
3241+ WO_REGION_BOOT,
3242+ __WO_REGION_MAX
3243+};
3244+
3245+struct wed_wo_profile_stat {
3246+ u32 bound;
3247+ u32 record;
3248+};
3249+
3250+#define PROFILE_STAT(record, val) do { \
3251+ u8 lvl = 0; \
3252+ while (lvl < WED_WO_PROFILE_MAX_LVL) { \
3253+ if (val < record[lvl].bound) { \
3254+ record[lvl].record++; \
3255+ break; \
3256+ } \
3257+ lvl++; \
3258+ } \
3259+ } while (0)
3260+
3261+/* align with wo report structure */
3262+struct wed_wo_log {
3263+ u32 sn;
3264+ u32 total;
3265+ u32 rro;
3266+ u32 mod;
3267+};
3268+
3269+struct wed_wo_rxcnt {
3270+ u16 wlan_idx;
3271+ u16 tid;
3272+ u32 rx_pkt_cnt;
3273+ u32 rx_byte_cnt;
3274+ u32 rx_err_cnt;
3275+ u32 rx_drop_cnt;
3276+};
3277+
3278+struct wed_wo_queue {
3279+ struct wed_wo_queue_regs *regs;
3280+
3281+ spinlock_t lock;
3282+ spinlock_t cleanup_lock;
3283+ struct wed_wo_queue_entry *entry;
3284+ struct wed_wo_desc *desc;
3285+
3286+ u16 first;
3287+ u16 head;
3288+ u16 tail;
3289+ int ndesc;
3290+ int queued;
3291+ int buf_size;
3292+
3293+ u8 hw_idx;
3294+ u8 qid;
3295+ u8 flags;
3296+
3297+ dma_addr_t desc_dma;
3298+ struct page_frag_cache rx_page;
developer36936c32022-09-30 12:55:06 +08003299+ struct page_frag_cache tx_page;
developer3262bf82022-07-12 11:37:54 +08003300+};
3301+
3302+
3303+struct wed_wo_mmio {
3304+ struct regmap *regs;
3305+
3306+ spinlock_t irq_lock;
3307+ u8 irq;
3308+ u32 irqmask;
3309+
3310+ u32 q_int_mask;
3311+ u32 q_exep_mask;
3312+};
3313+
3314+struct wed_wo_mcu {
3315+ struct mutex mutex;
3316+ u32 msg_seq;
3317+ int timeout;
3318+
3319+ struct sk_buff_head res_q;
3320+ wait_queue_head_t wait;
3321+};
3322+
3323+struct wed_wo_exception {
3324+ void* log;
3325+ int log_size;
3326+ dma_addr_t phys;
3327+};
3328+
3329+struct wed_wo_queue_regs {
3330+ u32 desc_base;
3331+ u32 ring_size;
3332+ u32 cpu_idx;
3333+ u32 dma_idx;
3334+};
3335+
3336+struct wed_wo_desc {
3337+ __le32 buf0;
3338+ __le32 ctrl;
3339+ __le32 buf1;
3340+ __le32 info;
3341+ __le32 reserved[4];
3342+} __packed __aligned(32);
3343+
3344+struct wed_wo_queue_entry {
3345+ union {
3346+ void *buf;
3347+ struct sk_buff *skb;
3348+ };
3349+
3350+ u32 dma_addr;
3351+ u16 dma_len;
3352+ u16 wcid;
3353+ bool skip_buf0:1;
3354+ bool skip_buf1:1;
3355+ bool done:1;
3356+};
3357+
developer3262bf82022-07-12 11:37:54 +08003358+struct wo_cmd_query {
3359+ u32 query0;
3360+ u32 query1;
3361+};
3362+
3363+struct wed_cmd_hdr {
3364+ /*DW0*/
3365+ u8 ver;
3366+ u8 cmd_id;
3367+ u16 length;
3368+
3369+ /*DW1*/
3370+ u16 uni_id;
3371+ u16 flag;
3372+
3373+ /*DW2*/
3374+ int status;
3375+
3376+ /*DW3*/
3377+ u8 reserved[20];
3378+};
3379+
3380+struct mtk_wed_fw_region {
3381+ void *addr;
3382+ u32 addr_pa;
3383+ u32 size;
3384+ u32 shared;
3385+};
3386+
3387+struct wed_wo_queue_ops;
3388+struct wed_wo_drv_ops;
3389+struct wed_wo_mcu_ops;
3390+
3391+struct wo_rx_total_cnt {
3392+ u64 rx_pkt_cnt;
3393+ u64 rx_byte_cnt;
3394+ u64 rx_err_cnt;
3395+ u64 rx_drop_cnt;
3396+};
3397+
3398+struct mtk_wed_wo {
3399+ struct mtk_wed_hw *hw;
3400+
3401+ struct wed_wo_mmio ccif;
3402+ struct wed_wo_mcu mcu;
3403+ struct wed_wo_exception exp;
3404+
3405+ const struct wed_wo_drv_ops *drv_ops;
3406+ const struct wed_wo_mcu_ops *mcu_ops;
3407+ const struct wed_wo_queue_ops *queue_ops;
3408+
3409+ struct net_device napi_dev;
3410+ spinlock_t rx_lock;
3411+ struct napi_struct napi;
3412+ struct sk_buff_head rx_skb;
3413+ struct wed_wo_queue q_rx;
3414+ struct tasklet_struct irq_tasklet;
3415+
3416+ struct wed_wo_queue q_tx;
3417+
3418+ struct mtk_wed_fw_region region[__WO_REGION_MAX];
3419+
3420+ struct wed_wo_profile_stat total[WED_WO_PROFILE_MAX_LVL];
3421+ struct wed_wo_profile_stat mod[WED_WO_PROFILE_MAX_LVL];
3422+ struct wed_wo_profile_stat rro[WED_WO_PROFILE_MAX_LVL];
3423+ char dirname[4];
3424+ struct wo_rx_total_cnt wo_rxcnt[8][544];
3425+};
3426+
3427+struct wed_wo_queue_ops {
3428+ int (*init)(struct mtk_wed_wo *wo,
3429+ int (*poll)(struct napi_struct *napi, int budget));
3430+
3431+ int (*alloc)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3432+ int idx, int n_desc, int bufsize,
3433+ struct wed_wo_queue_regs *regs);
developer29f66b32022-07-12 15:23:20 +08003434+ void (*free)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
developer3262bf82022-07-12 11:37:54 +08003435+ void (*reset)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3436+
3437+ int (*tx_skb)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3438+ struct sk_buff *skb);
developer36936c32022-09-30 12:55:06 +08003439+ void (*tx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
developer3262bf82022-07-12 11:37:54 +08003440+
3441+ void (*rx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3442+
3443+ void (*kick)(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset);
3444+};
3445+
3446+struct wed_wo_drv_ops {
3447+ void (*kickout)(struct mtk_wed_wo *wo);
3448+ void (*set_ack)(struct mtk_wed_wo *wo, u32 mask);
3449+ void (*set_isr)(struct mtk_wed_wo *wo, u32 mask);
3450+ u32 (*get_csr)(struct mtk_wed_wo *wo);
3451+ int (*tx_prepare_skb)(struct mtk_wed_wo *wo);
3452+ bool (*check_excpetion)(struct mtk_wed_wo *wo);
3453+ void (*clear_int)(struct mtk_wed_wo *wo, u32 mask);
3454+};
3455+
3456+struct wed_wo_mcu_ops {
3457+ u32 headroom;
3458+
3459+ int (*mcu_skb_send_msg)(struct mtk_wed_wo *wo, int to_id,
3460+ int cmd, struct sk_buff *skb,
3461+ int *seq, bool wait_resp);
3462+
3463+ int (*mcu_parse_response)(struct mtk_wed_wo *wo, int cmd,
3464+ struct sk_buff *skb, int seq);
3465+
3466+ int (*mcu_restart)(struct mtk_wed_wo *wo);
3467+};
3468+
3469+#define mtk_wed_wo_q_init(wo, ...) (wo)->queue_ops->init((wo), __VA_ARGS__)
3470+#define mtk_wed_wo_q_alloc(wo, ...) (wo)->queue_ops->alloc((wo), __VA_ARGS__)
developer29f66b32022-07-12 15:23:20 +08003471+#define mtk_wed_wo_q_free(wo, ...) (wo)->queue_ops->free((wo), __VA_ARGS__)
3472+#define mtk_wed_wo_q_reset(wo, ...) (wo)->queue_ops->reset((wo), __VA_ARGS__)
developer3262bf82022-07-12 11:37:54 +08003473+#define mtk_wed_wo_q_tx_skb(wo, ...) (wo)->queue_ops->tx_skb((wo), __VA_ARGS__)
developer3262bf82022-07-12 11:37:54 +08003474+#define mtk_wed_wo_q_tx_clean(wo, ...) (wo)->queue_ops->tx_clean((wo), __VA_ARGS__)
3475+#define mtk_wed_wo_q_rx_clean(wo, ...) (wo)->queue_ops->rx_clean((wo), __VA_ARGS__)
3476+#define mtk_wed_wo_q_kick(wo, ...) (wo)->queue_ops->kick((wo), __VA_ARGS__)
3477+
3478+enum {
3479+ WARP_CMD_FLAG_RSP = 1 << 0, /* is responce*/
3480+ WARP_CMD_FLAG_NEED_RSP = 1 << 1, /* need responce */
3481+ WARP_CMD_FLAG_FROM_TO_WO = 1 << 2, /* send between host and wo */
3482+};
3483+
3484+#define WED_WO_CMD_FLAG_IS_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_RSP))
3485+#define WED_WO_CMD_FLAG_SET_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_RSP))
3486+#define WED_WO_CMD_FLAG_IS_NEED_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_NEED_RSP))
3487+#define WED_WO_CMD_FLAG_SET_NEED_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_NEED_RSP))
3488+#define WED_WO_CMD_FLAG_IS_FROM_TO_WO(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_FROM_TO_WO))
3489+#define WED_WO_CMD_FLAG_SET_FROM_TO_WO(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_FROM_TO_WO))
3490+
3491+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3492+ u32 clear, u32 val);
3493+
3494+static inline void mtk_wed_wo_isr_enable(struct mtk_wed_wo *wo, u32 mask)
3495+{
3496+ mtk_wed_wo_set_isr_mask(wo, false, 0, mask);
3497+
3498+ tasklet_schedule(&wo->irq_tasklet);
3499+}
3500+
3501+static inline void mtk_wed_wo_isr_disable(struct mtk_wed_wo *wo, u32 mask)
3502+{
3503+ mtk_wed_wo_set_isr_mask(wo, true, mask, 0);
3504+}
3505+
3506+static inline void
3507+wo_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3508+{
3509+ writel(val, dev->region[WO_REGION_BOOT].addr + reg);
3510+}
3511+
3512+static inline u32
3513+wo_r32(struct mtk_wed_wo *dev, u32 reg)
3514+{
3515+ return readl(dev->region[WO_REGION_BOOT].addr + reg);
3516+}
3517+static inline void
3518+woccif_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3519+{
3520+ regmap_write(dev->ccif.regs, reg, val);
3521+}
3522+
3523+static inline u32
3524+woccif_r32(struct mtk_wed_wo *dev, u32 reg)
3525+{
3526+ unsigned int val;
3527+
3528+ regmap_read(dev->ccif.regs, reg, &val);
3529+
3530+ return val;
3531+}
3532+
developer69bcd592024-03-25 14:26:39 +08003533+int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, int len);
developer3262bf82022-07-12 11:37:54 +08003534+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
developer29f66b32022-07-12 15:23:20 +08003535+void mtk_wed_wo_exit(struct mtk_wed_hw *hw);
developer3262bf82022-07-12 11:37:54 +08003536+#endif
3537+
3538diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
developer69bcd592024-03-25 14:26:39 +08003539index 00036f9..658f392 100644
developer3262bf82022-07-12 11:37:54 +08003540--- a/include/linux/soc/mediatek/mtk_wed.h
3541+++ b/include/linux/soc/mediatek/mtk_wed.h
developer69bcd592024-03-25 14:26:39 +08003542@@ -7,26 +7,73 @@
developer3262bf82022-07-12 11:37:54 +08003543 #include <linux/pci.h>
3544
3545 #define MTK_WED_TX_QUEUES 2
3546+#define MTK_WED_RX_QUEUES 2
developer69bcd592024-03-25 14:26:39 +08003547
3548-enum {
3549- MTK_BUS_TYPE_PCIE,
3550- MTK_BUS_TYPE_AXI,
3551- MTK_BUS_TYPE_MAX
3552-};
3553+#define WED_WO_STA_REC 0x6
3554
3555 struct mtk_wed_hw;
3556 struct mtk_wdma_desc;
3557
developer3d5faf22022-11-29 18:07:22 +08003558+enum mtk_wed_wo_cmd {
3559+ MTK_WED_WO_CMD_WED_CFG,
3560+ MTK_WED_WO_CMD_WED_RX_STAT,
3561+ MTK_WED_WO_CMD_RRO_SER,
3562+ MTK_WED_WO_CMD_DBG_INFO,
3563+ MTK_WED_WO_CMD_DEV_INFO,
3564+ MTK_WED_WO_CMD_BSS_INFO,
3565+ MTK_WED_WO_CMD_STA_REC,
3566+ MTK_WED_WO_CMD_DEV_INFO_DUMP,
3567+ MTK_WED_WO_CMD_BSS_INFO_DUMP,
3568+ MTK_WED_WO_CMD_STA_REC_DUMP,
3569+ MTK_WED_WO_CMD_BA_INFO_DUMP,
3570+ MTK_WED_WO_CMD_FBCMD_Q_DUMP,
3571+ MTK_WED_WO_CMD_FW_LOG_CTRL,
3572+ MTK_WED_WO_CMD_LOG_FLUSH,
3573+ MTK_WED_WO_CMD_CHANGE_STATE,
3574+ MTK_WED_WO_CMD_CPU_STATS_ENABLE,
3575+ MTK_WED_WO_CMD_CPU_STATS_DUMP,
3576+ MTK_WED_WO_CMD_EXCEPTION_INIT,
3577+ MTK_WED_WO_CMD_PROF_CTRL,
3578+ MTK_WED_WO_CMD_STA_BA_DUMP,
3579+ MTK_WED_WO_CMD_BA_CTRL_DUMP,
3580+ MTK_WED_WO_CMD_RXCNT_CTRL,
3581+ MTK_WED_WO_CMD_RXCNT_INFO,
3582+ MTK_WED_WO_CMD_SET_CAP,
3583+ MTK_WED_WO_CMD_CCIF_RING_DUMP,
3584+ MTK_WED_WO_CMD_WED_END
developerc8592942022-10-31 14:07:50 +08003585+};
developer69bcd592024-03-25 14:26:39 +08003586+
developer3262bf82022-07-12 11:37:54 +08003587+struct mtk_rxbm_desc {
3588+ __le32 buf0;
3589+ __le32 token;
3590+} __packed __aligned(4);
3591+
developer69bcd592024-03-25 14:26:39 +08003592+enum mtk_wed_bus_tye{
3593+ MTK_WED_BUS_PCIE,
3594+ MTK_WED_BUS_AXI,
developer3262bf82022-07-12 11:37:54 +08003595+};
3596+
developer69bcd592024-03-25 14:26:39 +08003597+#define MTK_WED_RING_CONFIGURED BIT(0)
3598 struct mtk_wed_ring {
3599 struct mtk_wdma_desc *desc;
3600 dma_addr_t desc_phys;
3601 u32 desc_size;
3602 int size;
3603+ u32 flags;
3604
3605 u32 reg_base;
3606 void __iomem *wpdma;
3607 };
3608
developer3d5faf22022-11-29 18:07:22 +08003609+struct mtk_wed_wo_rx_stats {
3610+ __le16 wlan_idx;
3611+ __le16 tid;
3612+ __le32 rx_pkt_cnt;
3613+ __le32 rx_byte_cnt;
3614+ __le32 rx_err_cnt;
3615+ __le32 rx_drop_cnt;
developer8ec491f2022-08-22 19:48:44 +08003616+};
3617+
developer3262bf82022-07-12 11:37:54 +08003618 struct mtk_wed_device {
3619 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3620 const struct mtk_wed_ops *ops;
developer69bcd592024-03-25 14:26:39 +08003621@@ -35,18 +82,37 @@ struct mtk_wed_device {
3622 bool init_done, running;
3623 int wdma_idx;
3624 int irq;
3625+ u8 version;
3626+
3627+ /* used by wlan driver */
3628 u32 rev_id;
3629
developer3262bf82022-07-12 11:37:54 +08003630 struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
developer69bcd592024-03-25 14:26:39 +08003631+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
developer3262bf82022-07-12 11:37:54 +08003632 struct mtk_wed_ring txfree_ring;
3633 struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
developer3262bf82022-07-12 11:37:54 +08003634+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
developer3262bf82022-07-12 11:37:54 +08003635
3636 struct {
developer3d5faf22022-11-29 18:07:22 +08003637 int size;
developer69bcd592024-03-25 14:26:39 +08003638 void **pages;
3639 struct mtk_wdma_desc *desc;
developer3d5faf22022-11-29 18:07:22 +08003640 dma_addr_t desc_phys;
developer3262bf82022-07-12 11:37:54 +08003641- } buf_ring;
developer69bcd592024-03-25 14:26:39 +08003642+ } tx_buf_ring;
3643+
3644+ struct {
3645+ int size;
3646+ struct page_frag_cache rx_page;
3647+ struct mtk_rxbm_desc *desc;
3648+ dma_addr_t desc_phys;
3649+ } rx_buf_ring;
developer3d5faf22022-11-29 18:07:22 +08003650+
3651+ struct {
developer69bcd592024-03-25 14:26:39 +08003652+ struct mtk_wed_ring ring;
developer3262bf82022-07-12 11:37:54 +08003653+ void __iomem *rro_desc;
developer69bcd592024-03-25 14:26:39 +08003654+ dma_addr_t miod_phys;
3655+ dma_addr_t fdbk_phys;
developer3262bf82022-07-12 11:37:54 +08003656+ } rro;
3657
3658 /* filled by driver: */
3659 struct {
developer69bcd592024-03-25 14:26:39 +08003660@@ -56,24 +122,35 @@ struct mtk_wed_device {
3661 };
3662 enum mtk_wed_bus_tye bus_type;
developer3262bf82022-07-12 11:37:54 +08003663 void __iomem *base;
developer3262bf82022-07-12 11:37:54 +08003664+ u32 phy_base;
3665
developerc89c5472022-08-02 13:00:04 +08003666 u32 wpdma_phys;
3667 u32 wpdma_int;
developer3262bf82022-07-12 11:37:54 +08003668 u32 wpdma_mask;
3669 u32 wpdma_tx;
3670 u32 wpdma_txfree;
3671+ u32 wpdma_rx_glo;
3672+ u32 wpdma_rx;
3673
3674 u8 tx_tbit[MTK_WED_TX_QUEUES];
3675+ u8 rx_tbit[MTK_WED_RX_QUEUES];
3676 u8 txfree_tbit;
3677
3678 u16 token_start;
3679 unsigned int nbuf;
3680+ unsigned int rx_nbuf;
developer3d5faf22022-11-29 18:07:22 +08003681+ unsigned int rx_npkt;
3682+ unsigned int rx_size;
developer3262bf82022-07-12 11:37:54 +08003683
developer33907d42022-09-19 14:33:58 +08003684 bool wcid_512;
3685
developer3262bf82022-07-12 11:37:54 +08003686 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3687 int (*offload_enable)(struct mtk_wed_device *wed);
3688 void (*offload_disable)(struct mtk_wed_device *wed);
developer69bcd592024-03-25 14:26:39 +08003689+ u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size);
developer3262bf82022-07-12 11:37:54 +08003690+ void (*release_rx_buf)(struct mtk_wed_device *wed);
developer3d5faf22022-11-29 18:07:22 +08003691+ void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
3692+ struct mtk_wed_wo_rx_stats *stats);
developer3262bf82022-07-12 11:37:54 +08003693 } wlan;
3694 #endif
3695 };
developer69bcd592024-03-25 14:26:39 +08003696@@ -82,9 +159,15 @@ struct mtk_wed_ops {
3697 int (*attach)(struct mtk_wed_device *dev);
3698 int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
developer3262bf82022-07-12 11:37:54 +08003699 void __iomem *regs);
developer69bcd592024-03-25 14:26:39 +08003700+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3701+ void __iomem *regs);
developer3262bf82022-07-12 11:37:54 +08003702 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3703 void __iomem *regs);
developer3262bf82022-07-12 11:37:54 +08003704+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3705+ void *data, int len);
3706 void (*detach)(struct mtk_wed_device *dev);
developerc89c5472022-08-02 13:00:04 +08003707+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
developer3262bf82022-07-12 11:37:54 +08003708+ u32 reason, u32 hash);
developer3262bf82022-07-12 11:37:54 +08003709
developer69bcd592024-03-25 14:26:39 +08003710 void (*stop)(struct mtk_wed_device *dev);
3711 void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
3712@@ -119,6 +202,16 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developer3d5faf22022-11-29 18:07:22 +08003713 return ret;
3714 }
3715
3716+static inline bool
3717+mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3718+{
3719+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
developer69bcd592024-03-25 14:26:39 +08003720+ return dev->version != 1;
developer3d5faf22022-11-29 18:07:22 +08003721+#else
3722+ return false;
3723+#endif
3724+}
3725+
3726 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3727 #define mtk_wed_device_active(_dev) !!(_dev)->ops
3728 #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
developer69bcd592024-03-25 14:26:39 +08003729@@ -135,6 +228,12 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +08003730 (_dev)->ops->irq_get(_dev, _mask)
3731 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
3732 (_dev)->ops->irq_set_mask(_dev, _mask)
developer69bcd592024-03-25 14:26:39 +08003733+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
3734+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
developer3262bf82022-07-12 11:37:54 +08003735+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3736+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
developer69bcd592024-03-25 14:26:39 +08003737+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3738+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
developer3262bf82022-07-12 11:37:54 +08003739 #else
3740 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3741 {
developer69bcd592024-03-25 14:26:39 +08003742@@ -148,6 +247,9 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
developer3262bf82022-07-12 11:37:54 +08003743 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3744 #define mtk_wed_device_irq_get(_dev, _mask) 0
3745 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
developer69bcd592024-03-25 14:26:39 +08003746+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
developer3262bf82022-07-12 11:37:54 +08003747+#define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
developer69bcd592024-03-25 14:26:39 +08003748+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
developer3262bf82022-07-12 11:37:54 +08003749 #endif
3750
3751 #endif
3752--
developer20126ad2022-09-12 14:42:56 +080037532.18.0
developer3262bf82022-07-12 11:37:54 +08003754