blob: 7efa9df847c57ae32d4b97b6c55eef5ac26b86e2 [file] [log] [blame]
developer58aa0682023-09-18 14:02:26 +08001From d35f304a7d0ec9612064a41b98338d9f712fbb48 Mon Sep 17 00:00:00 2001
developer8cb3ac72022-07-04 10:55:14 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
developer58aa0682023-09-18 14:02:26 +08003Date: Mon, 18 Sep 2023 11:04:53 +0800
4Subject: [PATCH 09/22] add-wed-rx-support-for-mt7896
developer8cb3ac72022-07-04 10:55:14 +08005
developer8cb3ac72022-07-04 10:55:14 +08006---
7 arch/arm64/boot/dts/mediatek/mt7986a.dtsi | 42 +-
8 arch/arm64/boot/dts/mediatek/mt7986b.dtsi | 42 +-
9 drivers/net/ethernet/mediatek/Makefile | 2 +-
developer144824b2022-11-25 21:27:43 +080010 drivers/net/ethernet/mediatek/mtk_wed.c | 639 ++++++++++++++++--
developera3f86ed2022-07-08 14:15:13 +080011 drivers/net/ethernet/mediatek/mtk_wed.h | 51 ++
12 drivers/net/ethernet/mediatek/mtk_wed_ccif.c | 133 ++++
developer8cb3ac72022-07-04 10:55:14 +080013 drivers/net/ethernet/mediatek/mtk_wed_ccif.h | 45 ++
14 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 90 +++
developer8fec8ae2022-08-15 15:01:09 -070015 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 586 ++++++++++++++++
developerfaaa5162022-10-24 14:12:16 +080016 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 96 +++
developere0cbe332022-09-10 17:36:02 +080017 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 144 +++-
developer53bfd362022-09-29 12:02:18 +080018 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 564 ++++++++++++++++
19 drivers/net/ethernet/mediatek/mtk_wed_wo.h | 324 +++++++++
developer144824b2022-11-25 21:27:43 +080020 include/linux/soc/mediatek/mtk_wed.h | 126 +++-
developer58aa0682023-09-18 14:02:26 +080021 14 files changed, 2801 insertions(+), 83 deletions(-)
developer8cb3ac72022-07-04 10:55:14 +080022 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.c
23 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.h
24 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c
25 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.h
26 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
27 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h
28
29diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
developer58aa0682023-09-18 14:02:26 +080030index 3ff8994..c5dc5e8 100644
developer8cb3ac72022-07-04 10:55:14 +080031--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
32+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
33@@ -65,6 +65,12 @@
34 interrupt-parent = <&gic>;
35 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
36 mediatek,wed_pcie = <&wed_pcie>;
37+ mediatek,ap2woccif = <&ap2woccif0>;
38+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
39+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
40+ mediatek,wocpu_boot = <&cpu_boot>;
41+ mediatek,wocpu_emi = <&wocpu0_emi>;
42+ mediatek,wocpu_data = <&wocpu_data>;
43 };
44
45 wed1: wed@15011000 {
46@@ -74,15 +80,26 @@
47 interrupt-parent = <&gic>;
48 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
49 mediatek,wed_pcie = <&wed_pcie>;
50+ mediatek,ap2woccif = <&ap2woccif1>;
51+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
52+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
53+ mediatek,wocpu_boot = <&cpu_boot>;
54+ mediatek,wocpu_emi = <&wocpu1_emi>;
55+ mediatek,wocpu_data = <&wocpu_data>;
56 };
57
58- ap2woccif: ap2woccif@151A5000 {
59- compatible = "mediatek,ap2woccif";
60- reg = <0 0x151A5000 0 0x1000>,
61- <0 0x151AD000 0 0x1000>;
62+ ap2woccif0: ap2woccif@151A5000 {
63+ compatible = "mediatek,ap2woccif", "syscon";
64+ reg = <0 0x151A5000 0 0x1000>;
65 interrupt-parent = <&gic>;
66- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
67- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
68+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
69+ };
70+
71+ ap2woccif1: ap2woccif@0x151AD000 {
72+ compatible = "mediatek,ap2woccif", "syscon";
73+ reg = <0 0x151AD000 0 0x1000>;
74+ interrupt-parent = <&gic>;
75+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
76 };
77
78 wocpu0_ilm: wocpu0_ilm@151E0000 {
79@@ -95,10 +112,17 @@
80 reg = <0 0x151F0000 0 0x8000>;
81 };
82
83- wocpu_dlm: wocpu_dlm@151E8000 {
84+ wocpu0_dlm: wocpu_dlm@151E8000 {
85+ compatible = "mediatek,wocpu_dlm";
86+ reg = <0 0x151E8000 0 0x2000>;
87+
88+ resets = <&ethsysrst 0>;
89+ reset-names = "wocpu_rst";
90+ };
91+
92+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
93 compatible = "mediatek,wocpu_dlm";
94- reg = <0 0x151E8000 0 0x2000>,
95- <0 0x151F8000 0 0x2000>;
96+ reg = <0 0x151F8000 0 0x2000>;
97
98 resets = <&ethsysrst 0>;
99 reset-names = "wocpu_rst";
100diff --git a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
developer58aa0682023-09-18 14:02:26 +0800101index 043e509..bfd2a02 100644
developer8cb3ac72022-07-04 10:55:14 +0800102--- a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
103+++ b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
104@@ -65,6 +65,12 @@
105 interrupt-parent = <&gic>;
106 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
107 mediatek,wed_pcie = <&wed_pcie>;
108+ mediatek,ap2woccif = <&ap2woccif0>;
109+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
110+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
111+ mediatek,wocpu_boot = <&cpu_boot>;
112+ mediatek,wocpu_emi = <&wocpu0_emi>;
113+ mediatek,wocpu_data = <&wocpu_data>;
114 };
115
116 wed1: wed@15011000 {
117@@ -74,15 +80,26 @@
118 interrupt-parent = <&gic>;
119 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
120 mediatek,wed_pcie = <&wed_pcie>;
121+ mediatek,ap2woccif = <&ap2woccif1>;
122+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
123+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
124+ mediatek,wocpu_boot = <&cpu_boot>;
125+ mediatek,wocpu_emi = <&wocpu1_emi>;
126+ mediatek,wocpu_data = <&wocpu_data>;
127 };
128
129- ap2woccif: ap2woccif@151A5000 {
130- compatible = "mediatek,ap2woccif";
131- reg = <0 0x151A5000 0 0x1000>,
132- <0 0x151AD000 0 0x1000>;
133+ ap2woccif0: ap2woccif@151A5000 {
134+ compatible = "mediatek,ap2woccif", "syscon";
135+ reg = <0 0x151A5000 0 0x1000>;
136 interrupt-parent = <&gic>;
137- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
138- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
139+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
140+ };
141+
142+ ap2woccif1: ap2woccif@0x151AD000 {
143+ compatible = "mediatek,ap2woccif", "syscon";
144+ reg = <0 0x151AD000 0 0x1000>;
145+ interrupt-parent = <&gic>;
146+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
147 };
148
149 wocpu0_ilm: wocpu0_ilm@151E0000 {
150@@ -95,10 +112,17 @@
151 reg = <0 0x151F0000 0 0x8000>;
152 };
153
154- wocpu_dlm: wocpu_dlm@151E8000 {
155+ wocpu0_dlm: wocpu_dlm@151E8000 {
156+ compatible = "mediatek,wocpu_dlm";
157+ reg = <0 0x151E8000 0 0x2000>;
158+
159+ resets = <&ethsysrst 0>;
160+ reset-names = "wocpu_rst";
161+ };
162+
163+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
164 compatible = "mediatek,wocpu_dlm";
165- reg = <0 0x151E8000 0 0x2000>,
166- <0 0x151F8000 0 0x2000>;
167+ reg = <0 0x151F8000 0 0x2000>;
168
169 resets = <&ethsysrst 0>;
170 reset-names = "wocpu_rst";
developer428eaaa2023-10-06 15:48:21 +0800171diff --git a/arch/arm64/boot/dts/mediatek/mt7981.dtsi b/arch/arm64/boot/dts/mediatek/mt7981.dtsi
172index d34943e7..512fb5d6 100644
173--- a/arch/arm64/boot/dts/mediatek/mt7981.dtsi
174+++ b/arch/arm64/boot/dts/mediatek/mt7981.dtsi
175@@ -97,26 +97,29 @@
176 interrupt-parent = <&gic>;
177 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
178 mediatek,wed_pcie = <&wed_pcie>;
179+ mediatek,ap2woccif = <&ap2woccif0>;
180+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
181+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
182+ mediatek,wocpu_boot = <&cpu_boot>;
183+ mediatek,wocpu_emi = <&wocpu0_emi>;
184+ mediatek,wocpu_data = <&wocpu_data>;
185 };
186
187- ap2woccif: ap2woccif@151A5000 {
188- compatible = "mediatek,ap2woccif";
189- reg = <0 0x151A5000 0 0x1000>,
190- <0 0x151AD000 0 0x1000>;
191+ ap2woccif0: ap2woccif@151A5000 {
192+ compatible = "mediatek,ap2woccif", "syscon";
193+ reg = <0 0x151A5000 0 0x1000>;
194 interrupt-parent = <&gic>;
195- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
196- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
197- };
198+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
199+ };
200
201 wocpu0_ilm: wocpu0_ilm@151E0000 {
202 compatible = "mediatek,wocpu0_ilm";
203 reg = <0 0x151E0000 0 0x8000>;
204 };
205
206- wocpu_dlm: wocpu_dlm@151E8000 {
207+ wocpu0_dlm: wocpu_dlm@151E8000 {
208 compatible = "mediatek,wocpu_dlm";
209- reg = <0 0x151E8000 0 0x2000>,
210- <0 0x151F8000 0 0x2000>;
211+ reg = <0 0x151E8000 0 0x2000>;
212
213 resets = <&ethsysrst 0>;
214 reset-names = "wocpu_rst";
developer8cb3ac72022-07-04 10:55:14 +0800215diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
developer58aa0682023-09-18 14:02:26 +0800216index 4090132..fdbb90f 100644
developer8cb3ac72022-07-04 10:55:14 +0800217--- a/drivers/net/ethernet/mediatek/Makefile
218+++ b/drivers/net/ethernet/mediatek/Makefile
developeree39bcf2023-06-16 08:03:30 +0800219@@ -10,5 +10,5 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
developer8cb3ac72022-07-04 10:55:14 +0800220 ifdef CONFIG_DEBUG_FS
221 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
222 endif
223-obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
224+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o mtk_wed_wo.o mtk_wed_mcu.o mtk_wed_ccif.o
225 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
226diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developer58aa0682023-09-18 14:02:26 +0800227index 23e3dc5..4b2f1a2 100644
developer8cb3ac72022-07-04 10:55:14 +0800228--- a/drivers/net/ethernet/mediatek/mtk_wed.c
229+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
230@@ -13,11 +13,19 @@
231 #include <linux/debugfs.h>
232 #include <linux/iopoll.h>
233 #include <linux/soc/mediatek/mtk_wed.h>
234+
235 #include "mtk_eth_soc.h"
236 #include "mtk_wed_regs.h"
237 #include "mtk_wed.h"
238 #include "mtk_ppe.h"
239-
240+#include "mtk_wed_mcu.h"
241+#include "mtk_wed_wo.h"
242+
243+struct wo_cmd_ring {
244+ u32 q_base;
245+ u32 cnt;
246+ u32 unit;
247+};
248 static struct mtk_wed_hw *hw_list[2];
249 static DEFINE_MUTEX(hw_lock);
250
developera3f86ed2022-07-08 14:15:13 +0800251@@ -51,6 +59,56 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
developer8cb3ac72022-07-04 10:55:14 +0800252 wdma_m32(dev, reg, 0, mask);
253 }
254
255+static void
256+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
257+{
258+ wdma_m32(dev, reg, mask, 0);
259+}
260+
developera3f86ed2022-07-08 14:15:13 +0800261+static u32
262+mtk_wdma_read_reset(struct mtk_wed_device *dev)
263+{
264+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
265+}
266+
267+static void
268+mtk_wdma_rx_reset(struct mtk_wed_device *dev)
269+{
270+ u32 status;
271+ u32 mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
272+ int i;
273+
274+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
275+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
276+ !(status & mask), 0, 1000))
277+ WARN_ON_ONCE(1);
278+
279+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
280+ if (!dev->rx_wdma[i].desc) {
281+ wdma_w32(dev, MTK_WDMA_RING_RX(i) +
282+ MTK_WED_RING_OFS_CPU_IDX, 0);
283+ }
284+}
285+
286+static void
287+mtk_wdma_tx_reset(struct mtk_wed_device *dev)
288+{
289+ u32 status;
290+ u32 mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
291+ int i;
292+
293+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
294+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
295+ !(status & mask), 0, 1000))
296+ WARN_ON_ONCE(1);
297+
298+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
299+ if (!dev->tx_wdma[i].desc) {
300+ wdma_w32(dev, MTK_WDMA_RING_TX(i) +
301+ MTK_WED_RING_OFS_CPU_IDX, 0);
302+ }
303+}
304+
developer8cb3ac72022-07-04 10:55:14 +0800305 static u32
306 mtk_wed_read_reset(struct mtk_wed_device *dev)
307 {
developerd7d9aa42022-12-23 16:09:53 +0800308@@ -68,6 +126,52 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
developer8cb3ac72022-07-04 10:55:14 +0800309 WARN_ON_ONCE(1);
310 }
311
312+static void
313+mtk_wed_wo_reset(struct mtk_wed_device *dev)
314+{
315+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
316+ u8 state = WO_STATE_DISABLE;
317+ u8 state_done = WOIF_DISABLE_DONE;
318+ void __iomem *reg;
319+ u32 value;
320+ unsigned long timeout = jiffies + WOCPU_TIMEOUT;
321+
developerc1b2cd12022-07-28 18:35:24 +0800322+ mtk_wdma_tx_reset(dev);
developera3f86ed2022-07-08 14:15:13 +0800323+
324+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
325+
developerd7d9aa42022-12-23 16:09:53 +0800326+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, MTK_WED_WO_CMD_CHANGE_STATE,
327+ &state, sizeof(state), false);
developer8cb3ac72022-07-04 10:55:14 +0800328+
329+ do {
330+ value = wed_r32(dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_WO_STATUS);
331+ } while (value != state_done && !time_after(jiffies, timeout));
332+
333+ reg = ioremap(WOCPU_MCUSYS_RESET_ADDR, 4);
334+ value = readl((void *)reg);
335+ switch(dev->hw->index) {
336+ case 0:
337+ value |= WOCPU_WO0_MCUSYS_RESET_MASK;
338+ writel(value, (void *)reg);
339+ value &= ~WOCPU_WO0_MCUSYS_RESET_MASK;
340+ writel(value, (void *)reg);
341+ break;
342+ case 1:
343+ value |= WOCPU_WO1_MCUSYS_RESET_MASK;
344+ writel(value, (void *)reg);
345+ value &= ~WOCPU_WO1_MCUSYS_RESET_MASK;
346+ writel(value, (void *)reg);
347+ break;
348+ default:
349+ dev_err(dev->hw->dev, "wrong mtk_wed%d\n",
350+ dev->hw->index);
351+
352+ break;
353+ }
354+
355+ iounmap((void *)reg);
356+}
357+
358 static struct mtk_wed_hw *
359 mtk_wed_assign(struct mtk_wed_device *dev)
360 {
developerd7d9aa42022-12-23 16:09:53 +0800361@@ -178,7 +282,7 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
developera3f86ed2022-07-08 14:15:13 +0800362 {
363 struct mtk_wdma_desc *desc = dev->buf_ring.desc;
364 void **page_list = dev->buf_ring.pages;
365- int page_idx;
366+ int ring_size, page_idx;
367 int i;
368
369 if (!page_list)
developerd7d9aa42022-12-23 16:09:53 +0800370@@ -187,7 +291,14 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
developera3f86ed2022-07-08 14:15:13 +0800371 if (!desc)
372 goto free_pagelist;
373
developerf11dcd72022-08-27 18:29:27 +0800374- for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
developera3f86ed2022-07-08 14:15:13 +0800375+ if (dev->ver == MTK_WED_V1) {
376+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
377+ } else {
378+ ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
379+ MTK_WED_WDMA_RING_SIZE * 2;
380+ }
381+
developerf11dcd72022-08-27 18:29:27 +0800382+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
developera3f86ed2022-07-08 14:15:13 +0800383 void *page = page_list[page_idx++];
384
developerf11dcd72022-08-27 18:29:27 +0800385 if (!page)
developerd7d9aa42022-12-23 16:09:53 +0800386@@ -198,13 +309,49 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
developerf11dcd72022-08-27 18:29:27 +0800387 __free_page(page);
388 }
389
390- dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
391+ dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
392 desc, dev->buf_ring.desc_phys);
393
394 free_pagelist:
developer8cb3ac72022-07-04 10:55:14 +0800395 kfree(page_list);
396 }
397
398+static int
399+mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
400+{
401+ struct mtk_rxbm_desc *desc;
402+ dma_addr_t desc_phys;
403+ int ring_size;
404+
405+ ring_size = dev->wlan.rx_nbuf;
406+ dev->rx_buf_ring.size = ring_size;
407+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
408+ &desc_phys, GFP_KERNEL);
409+ if (!desc)
410+ return -ENOMEM;
411+
412+ dev->rx_buf_ring.desc = desc;
413+ dev->rx_buf_ring.desc_phys = desc_phys;
414+
developer144824b2022-11-25 21:27:43 +0800415+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
developer8cb3ac72022-07-04 10:55:14 +0800416+ return 0;
417+}
418+
419+static void
420+mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
421+{
422+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
developera3f86ed2022-07-08 14:15:13 +0800423+ int ring_size = dev->rx_buf_ring.size;
developer8cb3ac72022-07-04 10:55:14 +0800424+
425+ if (!desc)
426+ return;
427+
428+ dev->wlan.release_rx_buf(dev);
429+
developer9dbe57a2022-08-05 18:23:53 +0800430+ dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
431+ desc, dev->rx_buf_ring.desc_phys);
developer8cb3ac72022-07-04 10:55:14 +0800432+}
433+
434 static void
435 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
436 {
developerd7d9aa42022-12-23 16:09:53 +0800437@@ -226,13 +373,22 @@ mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800438 mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
439 }
440
441+static void
442+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
443+{
444+ mtk_wed_free_rx_bm(dev);
445+ mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
446+}
447+
448 static void
449 mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
450 {
451 u32 wdma_mask;
452
453 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
454-
455+ if (dev->ver > MTK_WED_V1)
456+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
457+ GENMASK(1, 0));
458 /* wed control cr set */
459 wed_set(dev, MTK_WED_CTRL,
460 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
developerd7d9aa42022-12-23 16:09:53 +0800461@@ -251,7 +407,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800462 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
463 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
464 } else {
465- /* initail tx interrupt trigger */
466+
467 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
468 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
469 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
developerd7d9aa42022-12-23 16:09:53 +0800470@@ -262,22 +418,30 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800471 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
472 dev->wlan.tx_tbit[1]));
473
474- /* initail txfree interrupt trigger */
475 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
476 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
477 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
478 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
479 dev->wlan.txfree_tbit));
480+
481+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
482+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
483+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
484+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
485+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
486+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
487+ dev->wlan.rx_tbit[0]) |
488+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
489+ dev->wlan.rx_tbit[1]));
490 }
491- /* initail wdma interrupt agent */
492 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
493 if (dev->ver == MTK_WED_V1) {
494 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
495 } else {
496 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
497 wed_set(dev, MTK_WED_WDMA_INT_CTRL,
498- FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,dev->wdma_idx));
499-
500+ FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,
501+ dev->wdma_idx));
502 }
503
504 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
developerd7d9aa42022-12-23 16:09:53 +0800505@@ -312,6 +476,40 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
developer8cb3ac72022-07-04 10:55:14 +0800506 }
507 }
508
509+static void
510+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
511+{
512+#define MTK_WFMDA_RX_DMA_EN BIT(2)
513+
514+ int timeout = 3;
515+ u32 cur_idx, regs;
516+
517+ do {
518+ regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
developerc1b2cd12022-07-28 18:35:24 +0800519+ MTK_WED_RING_OFS_CPU_IDX;
developer8cb3ac72022-07-04 10:55:14 +0800520+ cur_idx = wed_r32(dev, regs);
521+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
522+ break;
523+
524+ usleep_range(100000, 200000);
developerc1b2cd12022-07-28 18:35:24 +0800525+ timeout--;
526+ } while (timeout > 0);
developer8cb3ac72022-07-04 10:55:14 +0800527+
528+ if (timeout) {
529+ unsigned int val;
530+
531+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
532+ dev->wlan.phy_base);
533+ val |= MTK_WFMDA_RX_DMA_EN;
534+
535+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
536+ dev->wlan.phy_base, val);
537+ } else {
538+ dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
539+ dev->hw->index);
540+ }
541+}
542+
543 static void
544 mtk_wed_dma_enable(struct mtk_wed_device *dev)
545 {
developerd7d9aa42022-12-23 16:09:53 +0800546@@ -336,9 +534,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800547 wdma_set(dev, MTK_WDMA_GLO_CFG,
548 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
549 } else {
550+ int idx = 0;
551+
552 wed_set(dev, MTK_WED_WPDMA_CTRL,
553 MTK_WED_WPDMA_CTRL_SDL1_FIXED);
554
555+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
developerc1b2cd12022-07-28 18:35:24 +0800556+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
developer8cb3ac72022-07-04 10:55:14 +0800557+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
558+
559 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
560 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
561 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
developerd7d9aa42022-12-23 16:09:53 +0800562@@ -346,6 +550,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800563 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
564 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
565 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
566+
567+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
568+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
569+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
570+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
571+ 0x2));
572+
573+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++)
574+ mtk_wed_check_wfdma_rx_fill(dev, idx);
575 }
576 }
577
developerd7d9aa42022-12-23 16:09:53 +0800578@@ -363,19 +576,23 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800579 MTK_WED_GLO_CFG_TX_DMA_EN |
580 MTK_WED_GLO_CFG_RX_DMA_EN);
581
582- wdma_m32(dev, MTK_WDMA_GLO_CFG,
583+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
584 MTK_WDMA_GLO_CFG_TX_DMA_EN |
585 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
586- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
587+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
588
589 if (dev->ver == MTK_WED_V1) {
590 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
591- wdma_m32(dev, MTK_WDMA_GLO_CFG,
592- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
593+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
594+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
595 } else {
596 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
597 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
598 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
599+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
600+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
601+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
602+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
603 }
604 }
605
developerd7d9aa42022-12-23 16:09:53 +0800606@@ -383,10 +600,12 @@ static void
developerc1b2cd12022-07-28 18:35:24 +0800607 mtk_wed_stop(struct mtk_wed_device *dev)
developera3f86ed2022-07-08 14:15:13 +0800608 {
609 mtk_wed_dma_disable(dev);
developerc1b2cd12022-07-28 18:35:24 +0800610+ mtk_wed_set_512_support(dev, false);
developera3f86ed2022-07-08 14:15:13 +0800611
612- if (dev->ver > MTK_WED_V1)
developerc1b2cd12022-07-28 18:35:24 +0800613- mtk_wed_set_512_support(dev, false);
614-
developera3f86ed2022-07-08 14:15:13 +0800615+ if (dev->ver > MTK_WED_V1) {
developera3f86ed2022-07-08 14:15:13 +0800616+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
617+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
618+ }
developera3f86ed2022-07-08 14:15:13 +0800619 mtk_wed_set_ext_int(dev, false);
620
developerc1b2cd12022-07-28 18:35:24 +0800621 wed_clr(dev, MTK_WED_CTRL,
developerd7d9aa42022-12-23 16:09:53 +0800622@@ -395,6 +614,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800623 MTK_WED_CTRL_WED_TX_BM_EN |
624 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
625
626+ if (dev->ver > MTK_WED_V1) {
627+ wed_clr(dev, MTK_WED_CTRL,
628+ MTK_WED_CTRL_WED_RX_BM_EN);
629+ }
630+
631 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
632 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
633 wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
developerd7d9aa42022-12-23 16:09:53 +0800634@@ -417,10 +641,21 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800635
636 mtk_wed_reset(dev, MTK_WED_RESET_WED);
developera3f86ed2022-07-08 14:15:13 +0800637
developer8cb3ac72022-07-04 10:55:14 +0800638+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
639+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
640+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
developera3f86ed2022-07-08 14:15:13 +0800641+
developer8cb3ac72022-07-04 10:55:14 +0800642 mtk_wed_free_buffer(dev);
643 mtk_wed_free_tx_rings(dev);
developera3f86ed2022-07-08 14:15:13 +0800644+ if (dev->ver > MTK_WED_V1) {
developerd7d9aa42022-12-23 16:09:53 +0800645+ mtk_wed_wo_reset(dev);
developerf50c1802022-07-05 20:35:53 +0800646+ mtk_wed_free_rx_rings(dev);
developerd7d9aa42022-12-23 16:09:53 +0800647+ mtk_wed_wo_exit(hw);
developera3f86ed2022-07-08 14:15:13 +0800648+ }
developerd7d9aa42022-12-23 16:09:53 +0800649+
650+ mtk_wdma_rx_reset(dev);
developer8cb3ac72022-07-04 10:55:14 +0800651
developer144824b2022-11-25 21:27:43 +0800652- if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
653+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
developer8cb3ac72022-07-04 10:55:14 +0800654 wlan_node = dev->wlan.pci_dev->dev.of_node;
developer144824b2022-11-25 21:27:43 +0800655 if (of_dma_is_coherent(wlan_node))
656 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
developerd7d9aa42022-12-23 16:09:53 +0800657@@ -443,7 +678,7 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer144824b2022-11-25 21:27:43 +0800658 {
659 #define PCIE_BASE_ADDR0 0x11280000
660
661- if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
662+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
663 struct device_node *node;
664 void __iomem * base_addr;
665 u32 value = 0;
developerd7d9aa42022-12-23 16:09:53 +0800666@@ -477,7 +712,6 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800667 value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
668 value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
669
670- /* pcie interrupt status trigger register */
671 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
672 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
673
developerd7d9aa42022-12-23 16:09:53 +0800674@@ -485,7 +719,7 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer144824b2022-11-25 21:27:43 +0800675 value = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
676 wed_set(dev, MTK_WED_PCIE_INT_CTRL,
677 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
678- } else if (dev->wlan.bus_type == MTK_BUS_TYPE_AXI) {
679+ } else if (dev->wlan.bus_type == MTK_WED_BUS_AXI) {
680 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
681 MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
682 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
developerd7d9aa42022-12-23 16:09:53 +0800683@@ -501,6 +735,9 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800684 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
685 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
686 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
687+
688+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
689+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
690 } else {
691 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
692 }
developerd7d9aa42022-12-23 16:09:53 +0800693@@ -549,24 +786,92 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800694 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
695 MTK_WDMA_RING_RX(0)));
696 }
697+}
developerd7d9aa42022-12-23 16:09:53 +0800698
developer8cb3ac72022-07-04 10:55:14 +0800699+static void
700+mtk_wed_rx_bm_hw_init(struct mtk_wed_device *dev)
701+{
702+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
developer144824b2022-11-25 21:27:43 +0800703+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size));
developer8cb3ac72022-07-04 10:55:14 +0800704+
705+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
developerd7d9aa42022-12-23 16:09:53 +0800706+
developer8cb3ac72022-07-04 10:55:14 +0800707+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
developer144824b2022-11-25 21:27:43 +0800708+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt));
developer8cb3ac72022-07-04 10:55:14 +0800709+
710+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
711+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
712+
713+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
714 }
715
716 static void
717-mtk_wed_hw_init(struct mtk_wed_device *dev)
718+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
719+{
720+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
721+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
722+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
723+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
724+ MTK_WED_MIOD_ENTRY_CNT >> 2));
725+
726+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_desc_phys);
727+
728+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
729+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
730+
731+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_desc_phys);
732+
733+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
734+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
735+
736+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
737+
738+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.rro_ring.desc_phys);
739+
740+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
741+ MTK_WED_RROQM_RST_IDX_MIOD |
742+ MTK_WED_RROQM_RST_IDX_FDBK);
743+
744+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
745+
746+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT -1);
747+
748+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
749+}
750+
751+static void
752+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
753+{
754+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
755+
756+ do {
757+ udelay(100);
758+
759+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
760+ break;
761+ } while (1);
762+
763+ /* configure RX_ROUTE_QM */
764+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
765+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
766+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
767+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
768+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
769+
770+ /* enable RX_ROUTE_QM */
771+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
772+}
773+
774+static void
775+mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
776 {
777 int size = dev->buf_ring.size;
778 int rev_size = MTK_WED_TX_RING_SIZE / 2;
779 int thr = 1;
780
781- if (dev->init_done)
782- return;
783-
784- dev->init_done = true;
785- mtk_wed_set_ext_int(dev, false);
786-
787 if (dev->ver > MTK_WED_V1) {
788- size = MTK_WED_WDMA_RING_SIZE * 2 + dev->buf_ring.size;
789+ size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
790+ dev->buf_ring.size;
791 rev_size = size;
792 thr = 0;
793 }
developerd7d9aa42022-12-23 16:09:53 +0800794@@ -609,13 +914,46 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800795 }
796
797 static void
798-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale)
799+mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
developerd7d9aa42022-12-23 16:09:53 +0800800 {
developer8cb3ac72022-07-04 10:55:14 +0800801+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
developerc1b2cd12022-07-28 18:35:24 +0800802+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
803+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
developer8cb3ac72022-07-04 10:55:14 +0800804+
805+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
806+
807+ mtk_wed_rx_bm_hw_init(dev);
808+ mtk_wed_rro_hw_init(dev);
809+ mtk_wed_route_qm_hw_init(dev);
810+}
811+
812+static void
813+mtk_wed_hw_init(struct mtk_wed_device *dev)
814+{
815+ if (dev->init_done)
816+ return;
817+
818+ dev->init_done = true;
819+ mtk_wed_set_ext_int(dev, false);
820+ mtk_wed_tx_hw_init(dev);
821+ if (dev->ver > MTK_WED_V1)
822+ mtk_wed_rx_hw_init(dev);
823+}
824+
825+static void
826+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
developerd7d9aa42022-12-23 16:09:53 +0800827+{
developer8cb3ac72022-07-04 10:55:14 +0800828+ __le32 ctrl;
829 int i;
830
831+ if (tx)
832+ ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
833+ else
834+ ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
835+
836 for (i = 0; i < size; i++) {
837 desc->buf0 = 0;
838- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
839+ desc->ctrl = ctrl;
840 desc->buf1 = 0;
841 desc->info = 0;
842 desc += scale;
developerd7d9aa42022-12-23 16:09:53 +0800843@@ -674,7 +1012,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800844 if (!desc)
845 continue;
846
847- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver);
848+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true);
849 }
850
851 if (mtk_wed_poll_busy(dev))
developerd7d9aa42022-12-23 16:09:53 +0800852@@ -692,6 +1030,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developerc1b2cd12022-07-28 18:35:24 +0800853 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
854 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
855
856+ mtk_wdma_rx_reset(dev);
857+
858 if (busy) {
859 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
860 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
developerd7d9aa42022-12-23 16:09:53 +0800861@@ -729,9 +1069,24 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800862
863 }
864
865+static int
866+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
867+ int size)
868+{
869+ ring->desc = dma_alloc_coherent(dev->hw->dev,
870+ size * sizeof(*ring->desc),
871+ &ring->desc_phys, GFP_KERNEL);
872+ if (!ring->desc)
873+ return -ENOMEM;
874+
875+ ring->size = size;
876+ memset(ring->desc, 0, size);
877+ return 0;
878+}
879+
880 static int
881 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
882- int size, int scale)
883+ int size, int scale, bool tx)
884 {
885 ring->desc = dma_alloc_coherent(dev->hw->dev,
886 size * sizeof(*ring->desc) * scale,
developerd7d9aa42022-12-23 16:09:53 +0800887@@ -740,17 +1095,18 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
developer8cb3ac72022-07-04 10:55:14 +0800888 return -ENOMEM;
889
890 ring->size = size;
891- mtk_wed_ring_reset(ring->desc, size, scale);
892+ mtk_wed_ring_reset(ring->desc, size, scale, tx);
893
894 return 0;
895 }
896
897 static int
898-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
899+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
900 {
901 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
902
903- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, dev->ver))
904+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
905+ dev->ver, true))
906 return -ENOMEM;
907
908 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
developerd7d9aa42022-12-23 16:09:53 +0800909@@ -767,22 +1123,143 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
developer8cb3ac72022-07-04 10:55:14 +0800910 return 0;
911 }
912
913+static int
914+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
915+{
916+ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
917+
918+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
919+ dev->ver, true))
920+ return -ENOMEM;
921+
922+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
923+ wdma->desc_phys);
924+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
925+ size);
926+ wdma_w32(dev,
927+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
928+ wdma_w32(dev,
929+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
930+
931+ if (idx == 0) {
932+ wed_w32(dev, MTK_WED_WDMA_RING_TX
933+ + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
934+ wed_w32(dev, MTK_WED_WDMA_RING_TX
935+ + MTK_WED_RING_OFS_COUNT, size);
936+ wed_w32(dev, MTK_WED_WDMA_RING_TX
937+ + MTK_WED_RING_OFS_CPU_IDX, 0);
938+ wed_w32(dev, MTK_WED_WDMA_RING_TX
939+ + MTK_WED_RING_OFS_DMA_IDX, 0);
940+ }
941+
942+ return 0;
943+}
944+
945+static int
946+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
947+{
948+ struct device_node *np, *node = dev->hw->node;
949+ struct mtk_wed_ring *ring;
950+ struct resource res;
951+ int ret;
952+
953+ np = of_parse_phandle(node, "mediatek,wocpu_dlm", 0);
954+ if (!np)
955+ return -ENODEV;
956+
957+ ret = of_address_to_resource(np, 0, &res);
958+ if (ret)
959+ return ret;
960+
961+ dev->rro.rro_desc = ioremap(res.start, resource_size(&res));
962+
963+ ring = &dev->rro.rro_ring;
964+
965+ dev->rro.miod_desc_phys = res.start;
966+
967+ dev->rro.mcu_view_miod = MTK_WED_WOCPU_VIEW_MIOD_BASE;
968+ dev->rro.fdbk_desc_phys = MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT
969+ + dev->rro.miod_desc_phys;
970+
971+ if (mtk_wed_rro_ring_alloc(dev, ring, MTK_WED_RRO_QUE_CNT))
972+ return -ENOMEM;
973+
974+ return 0;
975+}
976+
977+static int
978+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
979+{
980+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
981+ struct {
982+ struct wo_cmd_ring ring[2];
983+
984+ u32 wed;
985+ u8 ver;
986+ } req = {
987+ .ring = {
988+ [0] = {
989+ .q_base = dev->rro.mcu_view_miod,
990+ .cnt = MTK_WED_MIOD_CNT,
991+ .unit = MTK_WED_MIOD_ENTRY_CNT,
992+ },
993+ [1] = {
994+ .q_base = dev->rro.mcu_view_miod +
995+ MTK_WED_MIOD_ENTRY_CNT *
996+ MTK_WED_MIOD_CNT,
997+ .cnt = MTK_WED_FB_CMD_CNT,
998+ .unit = 4,
999+ },
1000+ },
1001+ .wed = 0,
1002+ };
1003+
developer144824b2022-11-25 21:27:43 +08001004+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, MTK_WED_WO_CMD_WED_CFG,
developer8cb3ac72022-07-04 10:55:14 +08001005+ &req, sizeof(req), true);
1006+}
1007+
1008+static int
1009+mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
1010+{
1011+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
1012+
developerf50c1802022-07-05 20:35:53 +08001013+ if (dev->ver == MTK_WED_V1)
1014+ return 0;
1015+
developer8cb3ac72022-07-04 10:55:14 +08001016+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
1017+}
1018+
1019+static void
1020+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
1021+ u32 reason, u32 hash)
1022+{
1023+ int idx = dev->hw->index;
1024+ struct mtk_eth *eth = dev->hw->eth;
1025+ struct ethhdr *eh;
1026+
1027+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) {
1028+ if (!skb)
1029+ return;
1030+
1031+ skb_set_mac_header(skb, 0);
1032+ eh = eth_hdr(skb);
1033+ skb->protocol = eh->h_proto;
1034+ mtk_ppe_check_skb(eth->ppe[idx], skb, hash);
1035+ }
1036+}
1037+
1038 static void
1039 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1040 {
1041- u32 wdma_mask;
1042- int i;
1043+ int i, ret;
1044
1045 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1046 if (!dev->tx_wdma[i].desc)
1047- mtk_wed_wdma_ring_setup(dev, i, 16);
1048-
1049+ mtk_wed_wdma_rx_ring_setup(dev, i, 16);
1050
1051 mtk_wed_hw_init(dev);
1052
1053 mtk_wed_set_int(dev, irq_mask);
1054-
1055-
1056 mtk_wed_set_ext_int(dev, true);
1057
1058 if (dev->ver == MTK_WED_V1) {
developerd7d9aa42022-12-23 16:09:53 +08001059@@ -797,8 +1274,20 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +08001060 val |= BIT(0);
1061 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
1062 } else {
developer203096a2022-09-13 21:07:19 +08001063- mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
developer8cb3ac72022-07-04 10:55:14 +08001064+ /* driver set mid ready and only once */
1065+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1066+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1067+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1068+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1069+
1070+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1071+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1072+
1073+ ret = mtk_wed_rro_cfg(dev);
1074+ if (ret)
1075+ return;
developer8cb3ac72022-07-04 10:55:14 +08001076 }
developer203096a2022-09-13 21:07:19 +08001077+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
developer8cb3ac72022-07-04 10:55:14 +08001078
developerc1b2cd12022-07-28 18:35:24 +08001079 mtk_wed_dma_enable(dev);
1080 dev->running = true;
developerd7d9aa42022-12-23 16:09:53 +08001081@@ -809,6 +1298,7 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer144824b2022-11-25 21:27:43 +08001082 __releases(RCU)
1083 {
1084 struct mtk_wed_hw *hw;
1085+ struct device *device;
1086 u16 ver;
1087 int ret = 0;
1088
developerd7d9aa42022-12-23 16:09:53 +08001089@@ -829,6 +1319,12 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer144824b2022-11-25 21:27:43 +08001090 goto out;
1091 }
1092
1093+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
1094+ ? &dev->wlan.pci_dev->dev
1095+ : &dev->wlan.platform_dev->dev;
1096+ dev_info(device, "attaching wed device %d version %d\n",
1097+ hw->index, hw->ver);
1098+
1099 dev->hw = hw;
1100 dev->dev = hw->dev;
1101 dev->irq = hw->irq;
developerd7d9aa42022-12-23 16:09:53 +08001102@@ -847,9 +1343,17 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developere0cbe332022-09-10 17:36:02 +08001103 dev->rev_id = ((dev->ver << 28) | ver << 16);
developer8cb3ac72022-07-04 10:55:14 +08001104
1105 ret = mtk_wed_buffer_alloc(dev);
1106- if (ret) {
1107- mtk_wed_detach(dev);
1108- goto out;
1109+ if (ret)
1110+ goto error;
1111+
1112+ if (dev->ver > MTK_WED_V1) {
1113+ ret = mtk_wed_rx_bm_alloc(dev);
1114+ if (ret)
1115+ goto error;
1116+
1117+ ret = mtk_wed_rro_alloc(dev);
1118+ if (ret)
1119+ goto error;
1120 }
1121
1122 mtk_wed_hw_init_early(dev);
developerd7d9aa42022-12-23 16:09:53 +08001123@@ -857,7 +1361,12 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08001124 if (dev->ver == MTK_WED_V1)
1125 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1126 BIT(hw->index), 0);
1127+ else
1128+ ret = mtk_wed_wo_init(hw);
1129
1130+error:
developerd7d9aa42022-12-23 16:09:53 +08001131+ if (ret)
1132+ mtk_wed_detach(dev);
developer8cb3ac72022-07-04 10:55:14 +08001133 out:
1134 mutex_unlock(&hw_lock);
1135
developerd7d9aa42022-12-23 16:09:53 +08001136@@ -883,10 +1392,10 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +08001137
1138 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
1139
1140- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1))
1141+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true))
1142 return -ENOMEM;
1143
1144- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1145+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1146 return -ENOMEM;
1147
1148 ring->reg_base = MTK_WED_RING_TX(idx);
developerd7d9aa42022-12-23 16:09:53 +08001149@@ -933,6 +1442,35 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +08001150 return 0;
1151 }
1152
1153+static int
1154+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1155+{
1156+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1157+
1158+ BUG_ON(idx > ARRAY_SIZE(dev->rx_ring));
1159+
1160+
1161+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false))
1162+ return -ENOMEM;
1163+
1164+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1165+ return -ENOMEM;
1166+
1167+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
1168+ ring->wpdma = regs;
1169+
1170+ /* WPDMA -> WED */
1171+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1172+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
1173+
1174+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
1175+ ring->desc_phys);
1176+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
1177+ MTK_WED_RX_RING_SIZE);
1178+
1179+ return 0;
1180+}
1181+
1182 static u32
1183 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
1184 {
developer58aa0682023-09-18 14:02:26 +08001185@@ -1022,6 +1560,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +08001186 .attach = mtk_wed_attach,
1187 .tx_ring_setup = mtk_wed_tx_ring_setup,
1188 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
1189+ .rx_ring_setup = mtk_wed_rx_ring_setup,
1190+ .msg_update = mtk_wed_send_msg,
1191 .start = mtk_wed_start,
1192 .stop = mtk_wed_stop,
1193 .reset_dma = mtk_wed_reset_dma,
developer58aa0682023-09-18 14:02:26 +08001194@@ -1030,6 +1570,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +08001195 .irq_get = mtk_wed_irq_get,
1196 .irq_set_mask = mtk_wed_irq_set_mask,
1197 .detach = mtk_wed_detach,
1198+ .ppe_check = mtk_wed_ppe_check,
1199 };
1200 struct device_node *eth_np = eth->dev->of_node;
1201 struct platform_device *pdev;
developer58aa0682023-09-18 14:02:26 +08001202@@ -1069,6 +1610,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer144824b2022-11-25 21:27:43 +08001203 hw->wdma_phy = wdma_phy;
1204 hw->index = index;
1205 hw->irq = irq;
1206+ hw->ver = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
1207
1208 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1209 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
developer58aa0682023-09-18 14:02:26 +08001210@@ -1085,6 +1627,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developerc1b2cd12022-07-28 18:35:24 +08001211 regmap_write(hw->mirror, 0, 0);
1212 regmap_write(hw->mirror, 4, 0);
1213 }
1214+ hw->ver = MTK_WED_V1;
1215 }
1216
1217 mtk_wed_hw_add_debugfs(hw);
developer8cb3ac72022-07-04 10:55:14 +08001218diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
developere0cbe332022-09-10 17:36:02 +08001219index 9b17b74..8ef5253 100644
developer8cb3ac72022-07-04 10:55:14 +08001220--- a/drivers/net/ethernet/mediatek/mtk_wed.h
1221+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
1222@@ -13,6 +13,7 @@
1223 #define MTK_WED_PKT_SIZE 1900
1224 #define MTK_WED_BUF_SIZE 2048
1225 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1226+#define MTK_WED_RX_RING_SIZE 1536
1227
1228 #define MTK_WED_TX_RING_SIZE 2048
1229 #define MTK_WED_WDMA_RING_SIZE 512
1230@@ -21,8 +22,15 @@
1231 #define MTK_WED_PER_GROUP_PKT 128
1232
1233 #define MTK_WED_FBUF_SIZE 128
1234+#define MTK_WED_MIOD_CNT 16
1235+#define MTK_WED_FB_CMD_CNT 1024
1236+#define MTK_WED_RRO_QUE_CNT 8192
1237+#define MTK_WED_MIOD_ENTRY_CNT 128
1238+
1239+#define MODULE_ID_WO 1
1240
1241 struct mtk_eth;
1242+struct mtk_wed_wo;
1243
1244 struct mtk_wed_hw {
1245 struct device_node *node;
1246@@ -34,12 +42,14 @@ struct mtk_wed_hw {
1247 struct regmap *mirror;
1248 struct dentry *debugfs_dir;
1249 struct mtk_wed_device *wed_dev;
1250+ struct mtk_wed_wo *wed_wo;
1251 u32 debugfs_reg;
1252 u32 num_flows;
1253 u32 wdma_phy;
1254 char dirname[5];
1255 int irq;
1256 int index;
1257+ u32 ver;
1258 };
1259
1260 struct mtk_wdma_info {
1261@@ -66,6 +76,18 @@ wed_r32(struct mtk_wed_device *dev, u32 reg)
1262 return val;
1263 }
1264
1265+static inline u32
1266+wifi_r32(struct mtk_wed_device *dev, u32 reg)
1267+{
1268+ return readl(dev->wlan.base + reg);
1269+}
1270+
1271+static inline void
1272+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1273+{
1274+ writel(val, dev->wlan.base + reg);
1275+}
1276+
1277 static inline void
1278 wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1279 {
1280@@ -114,6 +136,23 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1281 writel(val, dev->txfree_ring.wpdma + reg);
1282 }
1283
1284+static inline u32
1285+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
1286+{
1287+ if (!dev->rx_ring[ring].wpdma)
1288+ return 0;
1289+
1290+ return readl(dev->rx_ring[ring].wpdma + reg);
1291+}
1292+
1293+static inline void
1294+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
1295+{
1296+ if (!dev->rx_ring[ring].wpdma)
1297+ return;
1298+
1299+ writel(val, dev->rx_ring[ring].wpdma + reg);
1300+}
1301 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1302 void __iomem *wdma, u32 wdma_phy, int index);
1303 void mtk_wed_exit(void);
developera3f86ed2022-07-08 14:15:13 +08001304@@ -146,4 +185,16 @@ static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
developer8cb3ac72022-07-04 10:55:14 +08001305 }
1306 #endif
1307
1308+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr);
developera3f86ed2022-07-08 14:15:13 +08001309+void wed_wo_hardware_exit(struct mtk_wed_wo *wo);
developer8cb3ac72022-07-04 10:55:14 +08001310+int wed_wo_mcu_init(struct mtk_wed_wo *wo);
1311+int mtk_wed_exception_init(struct mtk_wed_wo *wo);
1312+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1313+int mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb);
1314+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir);
1315+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1316+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd,
1317+ const void *data, int len, bool wait_resp);
1318+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget);
1319+
1320 #endif
1321diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.c b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
1322new file mode 100644
developerd7d9aa42022-12-23 16:09:53 +08001323index 0000000..951278b
developer8cb3ac72022-07-04 10:55:14 +08001324--- /dev/null
1325+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
developerd7d9aa42022-12-23 16:09:53 +08001326@@ -0,0 +1,133 @@
developer8cb3ac72022-07-04 10:55:14 +08001327+// SPDX-License-Identifier: GPL-2.0-only
1328+
1329+#include <linux/soc/mediatek/mtk_wed.h>
1330+#include <linux/of_address.h>
1331+#include <linux/mfd/syscon.h>
1332+#include <linux/of_irq.h>
1333+#include "mtk_wed_ccif.h"
1334+#include "mtk_wed_regs.h"
1335+#include "mtk_wed_wo.h"
1336+
1337+static inline void woif_set_isr(struct mtk_wed_wo *wo, u32 mask)
1338+{
1339+ woccif_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
1340+}
1341+
1342+static inline u32 woif_get_csr(struct mtk_wed_wo *wo)
1343+{
1344+ u32 val;
1345+
1346+ val = woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1347+
1348+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
1349+}
1350+
1351+static inline void woif_set_ack(struct mtk_wed_wo *wo, u32 mask)
1352+{
1353+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1354+}
1355+
1356+static inline void woif_kickout(struct mtk_wed_wo *wo)
1357+{
1358+ woccif_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
1359+ woccif_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
1360+}
1361+
1362+static inline void woif_clear_int(struct mtk_wed_wo *wo, u32 mask)
1363+{
1364+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1365+ woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1366+}
1367+
1368+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr)
1369+{
1370+ static const struct wed_wo_drv_ops wo_drv_ops = {
1371+ .kickout = woif_kickout,
1372+ .set_ack = woif_set_ack,
1373+ .set_isr = woif_set_isr,
1374+ .get_csr = woif_get_csr,
1375+ .clear_int = woif_clear_int,
1376+ };
1377+ struct device_node *np, *node = wo->hw->node;
1378+ struct wed_wo_queue_regs queues;
1379+ struct regmap *regs;
1380+ int ret;
1381+
1382+ np = of_parse_phandle(node, "mediatek,ap2woccif", 0);
1383+ if (!np)
1384+ return -ENODEV;
1385+
developerd7d9aa42022-12-23 16:09:53 +08001386+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
1387+ if (!regs)
1388+ return -ENODEV;
developer8cb3ac72022-07-04 10:55:14 +08001389+
1390+ wo->drv_ops = &wo_drv_ops;
developerd7d9aa42022-12-23 16:09:53 +08001391+
1392+ wo->ccif.regs = regs;
developer8cb3ac72022-07-04 10:55:14 +08001393+ wo->ccif.irq = irq_of_parse_and_map(np, 0);
1394+
1395+ spin_lock_init(&wo->ccif.irq_lock);
1396+
1397+ ret = request_irq(wo->ccif.irq, isr, IRQF_TRIGGER_HIGH,
1398+ "wo_ccif_isr", wo);
1399+ if (ret)
1400+ goto free_irq;
1401+
1402+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY1;
1403+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY2;
1404+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
1405+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
1406+
1407+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
1408+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
1409+ &queues);
1410+
1411+ if (ret)
1412+ goto free_irq;
1413+
1414+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY5;
1415+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY6;
1416+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
1417+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
1418+
1419+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
1420+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
1421+ &queues);
1422+ if (ret)
1423+ goto free_irq;
1424+
1425+ wo->ccif.q_int_mask = MTK_WED_WO_RXCH_INT_MASK;
1426+
1427+ ret = mtk_wed_wo_q_init(wo, mtk_wed_wo_rx_poll);
1428+ if (ret)
1429+ goto free_irq;
1430+
1431+ wo->ccif.q_exep_mask = MTK_WED_WO_EXCEPTION_INT_MASK;
1432+ wo->ccif.irqmask = MTK_WED_WO_ALL_INT_MASK;
1433+
1434+ /* rx queue irqmask */
1435+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
1436+
1437+ return 0;
1438+
1439+free_irq:
developera3f86ed2022-07-08 14:15:13 +08001440+ free_irq(wo->ccif.irq, wo);
developer8cb3ac72022-07-04 10:55:14 +08001441+
1442+ return ret;
1443+}
1444+
developera3f86ed2022-07-08 14:15:13 +08001445+void wed_wo_hardware_exit(struct mtk_wed_wo *wo)
developer8cb3ac72022-07-04 10:55:14 +08001446+{
developera3f86ed2022-07-08 14:15:13 +08001447+ wo->drv_ops->set_isr(wo, 0);
1448+
1449+ disable_irq(wo->ccif.irq);
1450+ free_irq(wo->ccif.irq, wo);
1451+
1452+ tasklet_disable(&wo->irq_tasklet);
1453+ netif_napi_del(&wo->napi);
1454+
developer53bfd362022-09-29 12:02:18 +08001455+ mtk_wed_wo_q_tx_clean(wo, &wo->q_tx);
developera3f86ed2022-07-08 14:15:13 +08001456+ mtk_wed_wo_q_rx_clean(wo, &wo->q_rx);
1457+ mtk_wed_wo_q_free(wo, &wo->q_tx);
1458+ mtk_wed_wo_q_free(wo, &wo->q_rx);
developer8cb3ac72022-07-04 10:55:14 +08001459+}
1460diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.h b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1461new file mode 100644
developere0cbe332022-09-10 17:36:02 +08001462index 0000000..68ade44
developer8cb3ac72022-07-04 10:55:14 +08001463--- /dev/null
1464+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1465@@ -0,0 +1,45 @@
1466+// SPDX-License-Identifier: GPL-2.0-only
1467+
1468+#ifndef __MTK_WED_CCIF_H
1469+#define __MTK_WED_CCIF_H
1470+
1471+#define MTK_WED_WO_RING_SIZE 256
1472+#define MTK_WED_WO_CMD_LEN 1504
1473+
1474+#define MTK_WED_WO_TXCH_NUM 0
1475+#define MTK_WED_WO_RXCH_NUM 1
1476+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
1477+
1478+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
1479+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
1480+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
1481+#define MTK_WED_WO_ALL_INT_MASK MTK_WED_WO_RXCH_INT_MASK | \
1482+ MTK_WED_WO_EXCEPTION_INT_MASK
1483+
1484+#define MTK_WED_WO_CCIF_BUSY 0x004
1485+#define MTK_WED_WO_CCIF_START 0x008
1486+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
1487+#define MTK_WED_WO_CCIF_RCHNUM 0x010
1488+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
1489+
1490+#define MTK_WED_WO_CCIF_ACK 0x014
1491+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
1492+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
1493+#define MTK_WED_WO_CCIF_DUMMY1 0x020
1494+#define MTK_WED_WO_CCIF_DUMMY2 0x024
1495+#define MTK_WED_WO_CCIF_DUMMY3 0x028
1496+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
1497+#define MTK_WED_WO_CCIF_SHADOW1 0x030
1498+#define MTK_WED_WO_CCIF_SHADOW2 0x034
1499+#define MTK_WED_WO_CCIF_SHADOW3 0x038
1500+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
1501+#define MTK_WED_WO_CCIF_DUMMY5 0x050
1502+#define MTK_WED_WO_CCIF_DUMMY6 0x054
1503+#define MTK_WED_WO_CCIF_DUMMY7 0x058
1504+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
1505+#define MTK_WED_WO_CCIF_SHADOW5 0x060
1506+#define MTK_WED_WO_CCIF_SHADOW6 0x064
1507+#define MTK_WED_WO_CCIF_SHADOW7 0x068
1508+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
1509+
1510+#endif
1511diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
developere0cbe332022-09-10 17:36:02 +08001512index f420f18..4a9e684 100644
developer8cb3ac72022-07-04 10:55:14 +08001513--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1514+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1515@@ -2,6 +2,7 @@
1516 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1517
1518 #include <linux/seq_file.h>
1519+#include <linux/soc/mediatek/mtk_wed.h>
1520 #include "mtk_wed.h"
1521 #include "mtk_wed_regs.h"
1522
1523@@ -18,6 +19,8 @@ enum {
1524 DUMP_TYPE_WDMA,
1525 DUMP_TYPE_WPDMA_TX,
1526 DUMP_TYPE_WPDMA_TXFREE,
1527+ DUMP_TYPE_WPDMA_RX,
1528+ DUMP_TYPE_WED_RRO,
1529 };
1530
1531 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
1532@@ -36,6 +39,10 @@ enum {
1533
1534 #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
1535 #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
1536+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
1537+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
1538+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
1539+
1540
1541 static void
1542 print_reg_val(struct seq_file *s, const char *name, u32 val)
1543@@ -58,6 +65,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1544 cur->name);
1545 continue;
1546 case DUMP_TYPE_WED:
1547+ case DUMP_TYPE_WED_RRO:
1548 val = wed_r32(dev, cur->offset);
1549 break;
1550 case DUMP_TYPE_WDMA:
1551@@ -69,6 +77,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1552 case DUMP_TYPE_WPDMA_TXFREE:
1553 val = wpdma_txfree_r32(dev, cur->offset);
1554 break;
1555+ case DUMP_TYPE_WPDMA_RX:
1556+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
1557+ break;
1558 }
1559 print_reg_val(s, cur->name, val);
1560 }
1561@@ -132,6 +143,81 @@ wed_txinfo_show(struct seq_file *s, void *data)
1562 }
1563 DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
1564
1565+static int
1566+wed_rxinfo_show(struct seq_file *s, void *data)
1567+{
1568+ static const struct reg_dump regs[] = {
1569+ DUMP_STR("WPDMA RX"),
1570+ DUMP_WPDMA_RX_RING(0),
1571+ DUMP_WPDMA_RX_RING(1),
1572+
1573+ DUMP_STR("WPDMA RX"),
1574+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
1575+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
1576+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
1577+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
1578+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
1579+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
1580+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
1581+
1582+ DUMP_STR("WED RX"),
1583+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
1584+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
1585+
1586+ DUMP_STR("WED RRO"),
1587+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
1588+ DUMP_WED(WED_RROQM_MID_MIB),
1589+ DUMP_WED(WED_RROQM_MOD_MIB),
1590+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
1591+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
1592+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
1593+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
1594+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
1595+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
1596+
1597+ DUMP_STR("WED Route QM"),
1598+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
1599+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
1600+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
1601+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
1602+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
1603+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
1604+ DUMP_WED(WED_RTQM_Q2N_MIB),
1605+ DUMP_WED(WED_RTQM_Q2B_MIB),
1606+ DUMP_WED(WED_RTQM_PFDBK_MIB),
1607+
1608+ DUMP_STR("WED WDMA TX"),
1609+ DUMP_WED(WED_WDMA_TX_MIB),
1610+ DUMP_WED_RING(WED_WDMA_RING_TX),
1611+
1612+ DUMP_STR("WDMA TX"),
1613+ DUMP_WDMA(WDMA_GLO_CFG),
1614+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
1615+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
1616+
1617+ DUMP_STR("WED RX BM"),
1618+ DUMP_WED(WED_RX_BM_BASE),
1619+ DUMP_WED(WED_RX_BM_RX_DMAD),
1620+ DUMP_WED(WED_RX_BM_PTR),
1621+ DUMP_WED(WED_RX_BM_TKID_MIB),
1622+ DUMP_WED(WED_RX_BM_BLEN),
1623+ DUMP_WED(WED_RX_BM_STS),
1624+ DUMP_WED(WED_RX_BM_INTF2),
1625+ DUMP_WED(WED_RX_BM_INTF),
1626+ DUMP_WED(WED_RX_BM_ERR_STS),
1627+ };
1628+
1629+ struct mtk_wed_hw *hw = s->private;
1630+ struct mtk_wed_device *dev = hw->wed_dev;
1631+
1632+ if (!dev)
1633+ return 0;
1634+
1635+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
1636+
1637+ return 0;
1638+}
1639+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
1640
1641 static int
1642 mtk_wed_reg_set(void *data, u64 val)
1643@@ -175,4 +261,8 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
1644 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
1645 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
1646 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
1647+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
developerc1b2cd12022-07-28 18:35:24 +08001648+ if (hw->ver != MTK_WED_V1) {
developer8cb3ac72022-07-04 10:55:14 +08001649+ wed_wo_mcu_debugfs(hw, dir);
1650+ }
1651 }
1652diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1653new file mode 100644
developer144824b2022-11-25 21:27:43 +08001654index 0000000..96e30a3
developer8cb3ac72022-07-04 10:55:14 +08001655--- /dev/null
1656+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
developer428eaaa2023-10-06 15:48:21 +08001657@@ -0,0 +1,590 @@
developer8cb3ac72022-07-04 10:55:14 +08001658+// SPDX-License-Identifier: GPL-2.0-only
1659+
1660+#include <linux/skbuff.h>
1661+#include <linux/debugfs.h>
1662+#include <linux/firmware.h>
1663+#include <linux/of_address.h>
1664+#include <linux/soc/mediatek/mtk_wed.h>
1665+#include "mtk_wed_regs.h"
1666+#include "mtk_wed_mcu.h"
1667+#include "mtk_wed_wo.h"
1668+
1669+struct sk_buff *
1670+mtk_wed_mcu_msg_alloc(struct mtk_wed_wo *wo,
1671+ const void *data, int data_len)
1672+{
1673+ const struct wed_wo_mcu_ops *ops = wo->mcu_ops;
1674+ int length = ops->headroom + data_len;
1675+ struct sk_buff *skb;
1676+
1677+ skb = alloc_skb(length, GFP_KERNEL);
1678+ if (!skb)
1679+ return NULL;
1680+
1681+ memset(skb->head, 0, length);
1682+ skb_reserve(skb, ops->headroom);
1683+
1684+ if (data && data_len)
1685+ skb_put_data(skb, data, data_len);
1686+
1687+ return skb;
1688+}
1689+
1690+struct sk_buff *
1691+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
1692+{
1693+ unsigned long timeout;
1694+
1695+ if (!time_is_after_jiffies(expires))
1696+ return NULL;
1697+
1698+ timeout = expires - jiffies;
1699+ wait_event_timeout(wo->mcu.wait,
1700+ (!skb_queue_empty(&wo->mcu.res_q)),
1701+ timeout);
1702+
1703+ return skb_dequeue(&wo->mcu.res_q);
1704+}
1705+
1706+int
1707+mtk_wed_mcu_skb_send_and_get_msg(struct mtk_wed_wo *wo,
1708+ int to_id, int cmd, struct sk_buff *skb,
1709+ bool wait_resp, struct sk_buff **ret_skb)
1710+{
1711+ unsigned long expires;
1712+ int ret, seq;
1713+
1714+ if (ret_skb)
1715+ *ret_skb = NULL;
1716+
1717+ mutex_lock(&wo->mcu.mutex);
1718+
1719+ ret = wo->mcu_ops->mcu_skb_send_msg(wo, to_id, cmd, skb, &seq, wait_resp);
1720+ if (ret < 0)
1721+ goto out;
1722+
1723+ if (!wait_resp) {
1724+ ret = 0;
1725+ goto out;
1726+ }
1727+
1728+ expires = jiffies + wo->mcu.timeout;
1729+
1730+ do {
1731+ skb = mtk_wed_mcu_get_response(wo, expires);
1732+ ret = wo->mcu_ops->mcu_parse_response(wo, cmd, skb, seq);
1733+
1734+ if (!ret && ret_skb)
1735+ *ret_skb = skb;
1736+ else
1737+ dev_kfree_skb(skb);
1738+ } while (ret == -EAGAIN);
1739+
1740+out:
1741+ mutex_unlock(&wo->mcu.mutex);
1742+
1743+ return ret;
1744+}
1745+
1746+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo,
1747+ struct sk_buff *skb)
1748+{
1749+ skb_queue_tail(&wo->mcu.res_q, skb);
1750+ wake_up(&wo->mcu.wait);
1751+}
1752+
1753+static int mtk_wed_mcu_send_and_get_msg(struct mtk_wed_wo *wo,
1754+ int to_id, int cmd, const void *data, int len,
1755+ bool wait_resp, struct sk_buff **ret_skb)
1756+{
1757+ struct sk_buff *skb;
1758+
1759+ skb = mtk_wed_mcu_msg_alloc(wo, data, len);
1760+ if (!skb)
1761+ return -ENOMEM;
1762+
1763+ return mtk_wed_mcu_skb_send_and_get_msg(wo, to_id, cmd, skb, wait_resp, ret_skb);
1764+}
1765+
1766+int
1767+mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,
1768+ int to_id, int cmd,
1769+ const void *data, int len, bool wait_resp)
1770+{
1771+ struct sk_buff *skb = NULL;
1772+ int ret = 0;
1773+
1774+ ret = mtk_wed_mcu_send_and_get_msg(wo, to_id, cmd, data,
1775+ len, wait_resp, &skb);
1776+ if (skb)
1777+ dev_kfree_skb(skb);
1778+
1779+ return ret;
1780+}
1781+
1782+int mtk_wed_exception_init(struct mtk_wed_wo *wo)
1783+{
1784+ struct wed_wo_exception *exp = &wo->exp;
1785+ struct {
1786+ u32 arg0;
1787+ u32 arg1;
1788+ }req;
1789+
1790+ exp->log_size = EXCEPTION_LOG_SIZE;
1791+ exp->log = kmalloc(exp->log_size, GFP_ATOMIC);
1792+ if (!exp->log)
1793+ return -ENOMEM;
1794+
1795+ memset(exp->log, 0, exp->log_size);
1796+ exp->phys = dma_map_single(wo->hw->dev, exp->log, exp->log_size,
1797+ DMA_FROM_DEVICE);
1798+
1799+ if (unlikely(dma_mapping_error(wo->hw->dev, exp->phys))) {
1800+ dev_info(wo->hw->dev, "dma map error\n");
1801+ goto free;
1802+ }
1803+
1804+ req.arg0 = (u32)exp->phys;
1805+ req.arg1 = (u32)exp->log_size;
1806+
developer144824b2022-11-25 21:27:43 +08001807+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, MTK_WED_WO_CMD_EXCEPTION_INIT,
developer8cb3ac72022-07-04 10:55:14 +08001808+ &req, sizeof(req), false);
1809+
1810+free:
1811+ kfree(exp->log);
1812+ return -ENOMEM;
1813+}
1814+
1815+int
1816+mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb)
1817+{
1818+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1819+
1820+ if (hdr->ver != 0)
1821+ return WARP_INVALID_PARA_STATUS;
1822+
1823+ if (skb->len < sizeof(struct wed_cmd_hdr))
1824+ return WARP_INVALID_PARA_STATUS;
1825+
1826+ if (skb->len != hdr->length)
1827+ return WARP_INVALID_PARA_STATUS;
1828+
1829+ return WARP_OK_STATUS;
1830+}
1831+
1832+void
1833+mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
1834+{
developer8fec8ae2022-08-15 15:01:09 -07001835+ struct mtk_wed_device *wed = wo->hw->wed_dev;
developer8cb3ac72022-07-04 10:55:14 +08001836+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1837+ struct wed_wo_log *record;
developer144824b2022-11-25 21:27:43 +08001838+ struct mtk_wed_wo_rx_stats *rxcnt;
developer8cb3ac72022-07-04 10:55:14 +08001839+ char *msg = (char *)(skb->data + sizeof(struct wed_cmd_hdr));
1840+ u16 msg_len = skb->len - sizeof(struct wed_cmd_hdr);
1841+ u32 i, cnt = 0;
1842+
1843+ switch (hdr->cmd_id) {
1844+ case WO_EVT_LOG_DUMP:
1845+ pr_info("[WO LOG]: %s\n", msg);
1846+ break;
1847+ case WO_EVT_PROFILING:
1848+ cnt = msg_len / (sizeof(struct wed_wo_log));
1849+ record = (struct wed_wo_log *) msg;
1850+ dev_info(wo->hw->dev, "[WO Profiling]: %d report arrived!\n", cnt);
1851+
1852+ for (i = 0 ; i < cnt ; i++) {
1853+ //PROFILE_STAT(wo->total, record[i].total);
1854+ //PROFILE_STAT(wo->mod, record[i].mod);
1855+ //PROFILE_STAT(wo->rro, record[i].rro);
1856+
1857+ dev_info(wo->hw->dev, "[WO Profiling]: SN:%u with latency: total=%u, rro:%u, mod:%u\n",
1858+ record[i].sn,
1859+ record[i].total,
1860+ record[i].rro,
1861+ record[i].mod);
1862+ }
1863+ break;
developer8fec8ae2022-08-15 15:01:09 -07001864+ case WO_EVT_RXCNT_INFO:
1865+ cnt = *(u32 *)msg;
developer144824b2022-11-25 21:27:43 +08001866+ rxcnt = (struct mtk_wed_wo_rx_stats *)((u32 *)msg+1);
developer8cb3ac72022-07-04 10:55:14 +08001867+
developer8fec8ae2022-08-15 15:01:09 -07001868+ for (i = 0; i < cnt; i++)
developer144824b2022-11-25 21:27:43 +08001869+ if (wed->wlan.update_wo_rx_stats)
1870+ wed->wlan.update_wo_rx_stats(wed, &rxcnt[i]);
developer8fec8ae2022-08-15 15:01:09 -07001871+ break;
developer8cb3ac72022-07-04 10:55:14 +08001872+ default:
1873+ break;
1874+ }
1875+
1876+ dev_kfree_skb(skb);
1877+
1878+}
1879+
1880+static int
1881+mtk_wed_load_firmware(struct mtk_wed_wo *wo)
1882+{
1883+ struct fw_info {
1884+ __le32 decomp_crc;
1885+ __le32 decomp_len;
1886+ __le32 decomp_blk_sz;
1887+ u8 reserved[4];
1888+ __le32 addr;
1889+ __le32 len;
1890+ u8 feature_set;
1891+ u8 reserved1[15];
1892+ } __packed *region;
1893+
1894+ char *mcu;
1895+ const struct mtk_wed_fw_trailer *hdr;
1896+ static u8 shared[MAX_REGION_SIZE] = {0};
1897+ const struct firmware *fw;
1898+ int ret, i;
1899+ u32 ofs = 0;
1900+ u32 boot_cr, val;
1901+
developer428eaaa2023-10-06 15:48:21 +08001902+ if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
1903+ mcu = MT7981_FIRMWARE_WO;
1904+ else
1905+ mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 :
1906+ MT7986_FIRMWARE_WO_1;
developer8cb3ac72022-07-04 10:55:14 +08001907+
1908+ ret = request_firmware(&fw, mcu, wo->hw->dev);
1909+ if (ret)
1910+ return ret;
1911+
1912+ hdr = (const struct mtk_wed_fw_trailer *)(fw->data + fw->size -
1913+ sizeof(*hdr));
1914+
1915+ dev_info(wo->hw->dev, "WO Firmware Version: %.10s, Build Time: %.15s\n",
1916+ hdr->fw_ver, hdr->build_date);
1917+
1918+ for (i = 0; i < hdr->n_region; i++) {
1919+ int j = 0;
1920+ region = (struct fw_info *)(fw->data + fw->size -
1921+ sizeof(*hdr) -
1922+ sizeof(*region) *
1923+ (hdr->n_region - i));
1924+
1925+ while (j < MAX_REGION_SIZE) {
1926+ struct mtk_wed_fw_region *wo_region;
1927+
1928+ wo_region = &wo->region[j];
1929+ if (!wo_region->addr)
1930+ break;
1931+
1932+ if (wo_region->addr_pa == region->addr) {
1933+ if (!wo_region->shared) {
1934+ memcpy(wo_region->addr,
1935+ fw->data + ofs, region->len);
1936+ } else if (!shared[j]) {
1937+ memcpy(wo_region->addr,
1938+ fw->data + ofs, region->len);
1939+ shared[j] = true;
1940+ }
1941+ }
1942+ j++;
1943+ }
1944+
1945+ if (j == __WO_REGION_MAX) {
1946+ ret = -ENOENT;
1947+ goto done;
1948+ }
1949+ ofs += region->len;
1950+ }
1951+
1952+ /* write the start address */
1953+ boot_cr = wo->hw->index ?
1954+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
1955+ wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
1956+
1957+ /* wo firmware reset */
1958+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
1959+
1960+ val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
1961+
1962+ val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
1963+ WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
1964+
1965+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
1966+
1967+done:
1968+ release_firmware(fw);
1969+
1970+ return ret;
1971+}
1972+
1973+static int
1974+mtk_wed_get_firmware_region(struct mtk_wed_wo *wo)
1975+{
1976+ struct device_node *node, *np = wo->hw->node;
1977+ struct mtk_wed_fw_region *region;
1978+ struct resource res;
1979+ const char *compat;
1980+ int i, ret;
1981+
1982+ static const char *const wo_region_compat[__WO_REGION_MAX] = {
1983+ [WO_REGION_EMI] = WOCPU_EMI_DEV_NODE,
1984+ [WO_REGION_ILM] = WOCPU_ILM_DEV_NODE,
1985+ [WO_REGION_DATA] = WOCPU_DATA_DEV_NODE,
1986+ [WO_REGION_BOOT] = WOCPU_BOOT_DEV_NODE,
1987+ };
1988+
1989+ for (i = 0; i < __WO_REGION_MAX; i++) {
1990+ region = &wo->region[i];
1991+ compat = wo_region_compat[i];
1992+
1993+ node = of_parse_phandle(np, compat, 0);
1994+ if (!node)
1995+ return -ENODEV;
1996+
1997+ ret = of_address_to_resource(node, 0, &res);
1998+ if (ret)
1999+ return ret;
2000+
2001+ region->addr_pa = res.start;
2002+ region->size = resource_size(&res);
2003+ region->addr = ioremap(region->addr_pa, region->size);
2004+
2005+ of_property_read_u32_index(node, "shared", 0, &region->shared);
2006+ }
2007+
2008+ return 0;
2009+}
2010+
2011+static int
2012+wo_mcu_send_message(struct mtk_wed_wo *wo,
2013+ int to_id, int cmd, struct sk_buff *skb,
2014+ int *wait_seq, bool wait_resp)
2015+{
2016+ struct wed_cmd_hdr *hdr;
2017+ u8 seq = 0;
2018+
2019+ /* TDO: make dynamic based on msg type */
2020+ wo->mcu.timeout = 20 * HZ;
2021+
2022+ if (wait_resp && wait_seq) {
2023+ seq = wo->mcu.msg_seq++ ;
2024+ *wait_seq = seq;
2025+ }
2026+
2027+ hdr = (struct wed_cmd_hdr *)skb_push(skb, sizeof(*hdr));
2028+
2029+ hdr->cmd_id = cmd;
2030+ hdr->length = cpu_to_le16(skb->len);
2031+ hdr->uni_id = seq;
2032+
2033+ if (to_id == MODULE_ID_WO)
2034+ hdr->flag |= WARP_CMD_FLAG_FROM_TO_WO;
2035+
2036+ if (wait_resp && wait_seq)
2037+ hdr->flag |= WARP_CMD_FLAG_NEED_RSP;
2038+
2039+ return mtk_wed_wo_q_tx_skb(wo, &wo->q_tx, skb);
2040+}
2041+
2042+static int
2043+wo_mcu_parse_response(struct mtk_wed_wo *wo, int cmd,
2044+ struct sk_buff *skb, int seq)
2045+{
developer8fec8ae2022-08-15 15:01:09 -07002046+ struct mtk_wed_device *wed = wo->hw->wed_dev;
developer8cb3ac72022-07-04 10:55:14 +08002047+ struct wed_cmd_hdr *hdr;
developer144824b2022-11-25 21:27:43 +08002048+ struct mtk_wed_wo_rx_stats *rxcnt = NULL;
developer8fec8ae2022-08-15 15:01:09 -07002049+ u32 i, cnt = 0;
developer8cb3ac72022-07-04 10:55:14 +08002050+
2051+ if (!skb) {
2052+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
2053+ cmd, seq);
2054+ return -ETIMEDOUT;
2055+ }
2056+
2057+ hdr = (struct wed_cmd_hdr *)skb->data;
2058+ if (seq != hdr->uni_id) {
2059+ dev_err(wo->hw->dev, "Message %08x (seq %d) with not match uid(%d)\n",
2060+ cmd, seq, hdr->uni_id);
2061+ return -EAGAIN;
2062+ }
2063+
developer8fec8ae2022-08-15 15:01:09 -07002064+ skb_pull(skb, sizeof(struct wed_cmd_hdr));
2065+
2066+ switch (cmd) {
developer144824b2022-11-25 21:27:43 +08002067+ case MTK_WED_WO_CMD_RXCNT_INFO:
developer8fec8ae2022-08-15 15:01:09 -07002068+ cnt = *(u32 *)skb->data;
developer144824b2022-11-25 21:27:43 +08002069+ rxcnt = (struct mtk_wed_wo_rx_stats *)((u32 *)skb->data+1);
developer8fec8ae2022-08-15 15:01:09 -07002070+
2071+ for (i = 0; i < cnt; i++)
developer144824b2022-11-25 21:27:43 +08002072+ if (wed->wlan.update_wo_rx_stats)
2073+ wed->wlan.update_wo_rx_stats(wed, &rxcnt[i]);
developer8fec8ae2022-08-15 15:01:09 -07002074+ break;
2075+ default:
2076+ break;
2077+ }
developer8cb3ac72022-07-04 10:55:14 +08002078+
2079+ return 0;
2080+}
2081+
2082+int wed_wo_mcu_init(struct mtk_wed_wo *wo)
2083+{
2084+ static const struct wed_wo_mcu_ops wo_mcu_ops = {
2085+ .headroom = sizeof(struct wed_cmd_hdr),
2086+ .mcu_skb_send_msg = wo_mcu_send_message,
2087+ .mcu_parse_response = wo_mcu_parse_response,
2088+ /*TDO .mcu_restart = wo_mcu_restart,*/
2089+ };
2090+ unsigned long timeout = jiffies + FW_DL_TIMEOUT;
2091+ int ret;
2092+ u32 val;
2093+
2094+ wo->mcu_ops = &wo_mcu_ops;
2095+
2096+ ret = mtk_wed_get_firmware_region(wo);
2097+ if (ret)
2098+ return ret;
2099+
2100+ /* set dummy cr */
2101+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL,
2102+ wo->hw->index + 1);
2103+
2104+ ret = mtk_wed_load_firmware(wo);
2105+ if (ret)
2106+ return ret;
2107+
2108+ do {
2109+ /* get dummy cr */
2110+ val = wed_r32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL);
2111+ } while (val != 0 && !time_after(jiffies, timeout));
2112+
2113+ if (val)
2114+ return -EBUSY;
2115+
2116+ return 0;
2117+}
2118+
2119+static ssize_t
2120+mtk_wed_wo_ctrl(struct file *file,
2121+ const char __user *user_buf,
2122+ size_t count,
2123+ loff_t *ppos)
2124+{
2125+ struct mtk_wed_hw *hw = file->private_data;
2126+ struct mtk_wed_wo *wo = hw->wed_wo;
2127+ char buf[100], *cmd = NULL, *input[11] = {0};
2128+ char msgbuf[128] = {0};
2129+ struct wo_cmd_query *query = (struct wo_cmd_query *)msgbuf;
2130+ u32 cmd_id;
2131+ bool wait = false;
2132+ char *sub_str = NULL;
2133+ int input_idx = 0, input_total = 0, scan_num = 0;
2134+ char *p;
2135+
2136+ if (count > sizeof(buf))
2137+ return -EINVAL;
2138+
2139+ if (copy_from_user(buf, user_buf, count))
2140+ return -EFAULT;
2141+
2142+ if (count && buf[count - 1] == '\n')
2143+ buf[count - 1] = '\0';
2144+ else
2145+ buf[count] = '\0';
2146+
2147+ p = buf;
2148+
2149+ while ((sub_str = strsep(&p, " ")) != NULL) {
2150+ input[input_idx] = sub_str;
2151+ input_idx++;
2152+ input_total++;
2153+ }
2154+ cmd = input[0];
2155+ if (input_total == 1 && cmd) {
2156+ if (strncmp(cmd, "bainfo", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002157+ cmd_id = MTK_WED_WO_CMD_BA_INFO_DUMP;
developer8cb3ac72022-07-04 10:55:14 +08002158+ } else if (strncmp(cmd, "bactrl", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002159+ cmd_id = MTK_WED_WO_CMD_BA_CTRL_DUMP;
developer8cb3ac72022-07-04 10:55:14 +08002160+ } else if (strncmp(cmd, "fbcmdq", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002161+ cmd_id = MTK_WED_WO_CMD_FBCMD_Q_DUMP;
developer8cb3ac72022-07-04 10:55:14 +08002162+ } else if (strncmp(cmd, "logflush", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002163+ cmd_id = MTK_WED_WO_CMD_LOG_FLUSH;
developer8cb3ac72022-07-04 10:55:14 +08002164+ } else if (strncmp(cmd, "cpustat.dump", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002165+ cmd_id = MTK_WED_WO_CMD_CPU_STATS_DUMP;
developer8cb3ac72022-07-04 10:55:14 +08002166+ } else if (strncmp(cmd, "state", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002167+ cmd_id = MTK_WED_WO_CMD_WED_RX_STAT;
developer8cb3ac72022-07-04 10:55:14 +08002168+ } else if (strncmp(cmd, "prof_hit_dump", strlen(cmd)) == 0) {
2169+ //wo_profiling_report();
2170+ return count;
2171+ } else if (strncmp(cmd, "rxcnt_info", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002172+ cmd_id = MTK_WED_WO_CMD_RXCNT_INFO;
developer8cb3ac72022-07-04 10:55:14 +08002173+ wait = true;
2174+ } else {
2175+ pr_info("(%s) unknown comand string(%s)!\n", __func__, cmd);
2176+ return count;
2177+ }
2178+ } else if (input_total > 1) {
2179+ for (input_idx = 1 ; input_idx < input_total ; input_idx++) {
2180+ scan_num = sscanf(input[input_idx], "%u", &query->query0+(input_idx - 1));
2181+
2182+ if (scan_num < 1) {
2183+ pr_info("(%s) require more input!\n", __func__);
2184+ return count;
2185+ }
2186+ }
2187+ if(strncmp(cmd, "devinfo", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002188+ cmd_id = MTK_WED_WO_CMD_DEV_INFO_DUMP;
developer8cb3ac72022-07-04 10:55:14 +08002189+ } else if (strncmp(cmd, "bssinfo", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002190+ cmd_id = MTK_WED_WO_CMD_BSS_INFO_DUMP;
developer8cb3ac72022-07-04 10:55:14 +08002191+ } else if (strncmp(cmd, "starec", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002192+ cmd_id = MTK_WED_WO_CMD_STA_REC_DUMP;
developer8cb3ac72022-07-04 10:55:14 +08002193+ } else if (strncmp(cmd, "starec_ba", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002194+ cmd_id = MTK_WED_WO_CMD_STA_BA_DUMP;
developer8cb3ac72022-07-04 10:55:14 +08002195+ } else if (strncmp(cmd, "logctrl", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002196+ cmd_id = MTK_WED_WO_CMD_FW_LOG_CTRL;
developer8cb3ac72022-07-04 10:55:14 +08002197+ } else if (strncmp(cmd, "cpustat.en", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002198+ cmd_id = MTK_WED_WO_CMD_CPU_STATS_ENABLE;
developer8cb3ac72022-07-04 10:55:14 +08002199+ } else if (strncmp(cmd, "prof_conf", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002200+ cmd_id = MTK_WED_WO_CMD_PROF_CTRL;
developer8cb3ac72022-07-04 10:55:14 +08002201+ } else if (strncmp(cmd, "rxcnt_ctrl", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002202+ cmd_id = MTK_WED_WO_CMD_RXCNT_CTRL;
developer8cb3ac72022-07-04 10:55:14 +08002203+ } else if (strncmp(cmd, "dbg_set", strlen(cmd)) == 0) {
developer144824b2022-11-25 21:27:43 +08002204+ cmd_id = MTK_WED_WO_CMD_DBG_INFO;
developer8cb3ac72022-07-04 10:55:14 +08002205+ }
2206+ } else {
2207+ dev_info(hw->dev, "usage: echo cmd='cmd_str' > wo_write\n");
2208+ dev_info(hw->dev, "cmd_str value range:\n");
2209+ dev_info(hw->dev, "\tbainfo:\n");
2210+ dev_info(hw->dev, "\tbactrl:\n");
2211+ dev_info(hw->dev, "\tfbcmdq:\n");
2212+ dev_info(hw->dev, "\tlogflush:\n");
2213+ dev_info(hw->dev, "\tcpustat.dump:\n");
2214+ dev_info(hw->dev, "\tprof_hit_dump:\n");
2215+ dev_info(hw->dev, "\trxcnt_info:\n");
2216+ dev_info(hw->dev, "\tdevinfo:\n");
2217+ dev_info(hw->dev, "\tbssinfo:\n");
2218+ dev_info(hw->dev, "\tstarec:\n");
2219+ dev_info(hw->dev, "\tstarec_ba:\n");
2220+ dev_info(hw->dev, "\tlogctrl:\n");
2221+ dev_info(hw->dev, "\tcpustat.en:\n");
2222+ dev_info(hw->dev, "\tprof_conf:\n");
2223+ dev_info(hw->dev, "\trxcnt_ctrl:\n");
2224+ dev_info(hw->dev, "\tdbg_set [level] [category]:\n");
2225+ return count;
2226+ }
2227+
2228+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, (void *)msgbuf, sizeof(struct wo_cmd_query), wait);
2229+
2230+ return count;
2231+
2232+}
2233+
2234+static const struct file_operations fops_wo_ctrl = {
2235+ .write = mtk_wed_wo_ctrl,
2236+ .open = simple_open,
2237+ .llseek = default_llseek,
2238+};
2239+
2240+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir)
2241+{
2242+ if (!dir)
2243+ return;
2244+
2245+ debugfs_create_file("wo_write", 0600, dir, hw, &fops_wo_ctrl);
2246+}
2247+
2248diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2249new file mode 100644
developer144824b2022-11-25 21:27:43 +08002250index 0000000..19e1199
developer8cb3ac72022-07-04 10:55:14 +08002251--- /dev/null
2252+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
developer428eaaa2023-10-06 15:48:21 +08002253@@ -0,0 +1,97 @@
developer8cb3ac72022-07-04 10:55:14 +08002254+// SPDX-License-Identifier: GPL-2.0-only
2255+
2256+#ifndef __MTK_WED_MCU_H
2257+#define __MTK_WED_MCU_H
2258+
2259+#define EXCEPTION_LOG_SIZE 32768
2260+#define WOCPU_MCUSYS_RESET_ADDR 0x15194050
2261+#define WOCPU_WO0_MCUSYS_RESET_MASK 0x20
2262+#define WOCPU_WO1_MCUSYS_RESET_MASK 0x1
2263+
2264+#define WARP_INVALID_LENGTH_STATUS (-2)
2265+#define WARP_NULL_POINTER_STATUS (-3)
2266+#define WARP_INVALID_PARA_STATUS (-4)
2267+#define WARP_NOT_HANDLE_STATUS (-5)
2268+#define WARP_FAIL_STATUS (-1)
2269+#define WARP_OK_STATUS (0)
2270+#define WARP_ALREADY_DONE_STATUS (1)
2271+
developer428eaaa2023-10-06 15:48:21 +08002272+#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
developer8cb3ac72022-07-04 10:55:14 +08002273+#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2274+#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2275+
2276+#define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2277+#define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2278+#define WOCPU_DLM_DEV_NODE "mediatek,wocpu_dlm"
2279+#define WOCPU_DATA_DEV_NODE "mediatek,wocpu_data"
2280+#define WOCPU_BOOT_DEV_NODE "mediatek,wocpu_boot"
2281+
2282+#define FW_DL_TIMEOUT ((3000 * HZ) / 1000)
2283+#define WOCPU_TIMEOUT ((1000 * HZ) / 1000)
2284+
2285+#define MAX_REGION_SIZE 3
2286+
2287+#define WOX_MCU_CFG_LS_BASE 0 /*0x15194000*/
2288+
2289+#define WOX_MCU_CFG_LS_HW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x000) // 4000
2290+#define WOX_MCU_CFG_LS_FW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x004) // 4004
2291+#define WOX_MCU_CFG_LS_CFG_DBG1_ADDR (WOX_MCU_CFG_LS_BASE + 0x00C) // 400C
2292+#define WOX_MCU_CFG_LS_CFG_DBG2_ADDR (WOX_MCU_CFG_LS_BASE + 0x010) // 4010
2293+#define WOX_MCU_CFG_LS_WF_MCCR_ADDR (WOX_MCU_CFG_LS_BASE + 0x014) // 4014
2294+#define WOX_MCU_CFG_LS_WF_MCCR_SET_ADDR (WOX_MCU_CFG_LS_BASE + 0x018) // 4018
2295+#define WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR (WOX_MCU_CFG_LS_BASE + 0x01C) // 401C
2296+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (WOX_MCU_CFG_LS_BASE + 0x050) // 4050
2297+#define WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x060) // 4060
2298+#define WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x064) // 4064
2299+
2300+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK BIT(5)
2301+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK BIT(0)
2302+
2303+
2304+enum wo_event_id {
2305+ WO_EVT_LOG_DUMP = 0x1,
2306+ WO_EVT_PROFILING = 0x2,
2307+ WO_EVT_RXCNT_INFO = 0x3
2308+};
2309+
developer8cb3ac72022-07-04 10:55:14 +08002310+enum wo_state {
2311+ WO_STATE_UNDEFINED = 0x0,
2312+ WO_STATE_INIT = 0x1,
2313+ WO_STATE_ENABLE = 0x2,
2314+ WO_STATE_DISABLE = 0x3,
2315+ WO_STATE_HALT = 0x4,
2316+ WO_STATE_GATING = 0x5,
2317+ WO_STATE_SER_RESET = 0x6,
2318+ WO_STATE_WF_RESET = 0x7,
2319+ WO_STATE_END
2320+};
2321+
2322+enum wo_done_state {
2323+ WOIF_UNDEFINED = 0,
2324+ WOIF_DISABLE_DONE = 1,
2325+ WOIF_TRIGGER_ENABLE = 2,
2326+ WOIF_ENABLE_DONE = 3,
2327+ WOIF_TRIGGER_GATING = 4,
2328+ WOIF_GATING_DONE = 5,
2329+ WOIF_TRIGGER_HALT = 6,
2330+ WOIF_HALT_DONE = 7,
2331+};
2332+
2333+enum wed_dummy_cr_idx {
2334+ WED_DUMMY_CR_FWDL = 0,
2335+ WED_DUMMY_CR_WO_STATUS = 1
2336+};
2337+
2338+struct mtk_wed_fw_trailer {
2339+ u8 chip_id;
2340+ u8 eco_code;
2341+ u8 n_region;
2342+ u8 format_ver;
2343+ u8 format_flag;
2344+ u8 reserved[2];
2345+ char fw_ver[10];
2346+ char build_date[15];
2347+ u32 crc;
2348+};
2349+
2350+#endif
2351diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
developer58aa0682023-09-18 14:02:26 +08002352index 14e0e21..31871f7 100644
developer8cb3ac72022-07-04 10:55:14 +08002353--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2354+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2355@@ -4,6 +4,8 @@
2356 #ifndef __MTK_WED_REGS_H
2357 #define __MTK_WED_REGS_H
2358
2359+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
2360+
2361 #if defined(CONFIG_MEDIATEK_NETSYS_V2)
2362 #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(13, 0)
2363 #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(14)
2364@@ -16,6 +18,7 @@
2365 #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2366 #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2367 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2368+#define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
2369
2370 struct mtk_wdma_desc {
2371 __le32 buf0;
developere0cbe332022-09-10 17:36:02 +08002372@@ -42,6 +45,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002373 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
2374 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2375 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2376+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
2377+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
2378 #define MTK_WED_RESET_WED BIT(31)
2379
2380 #define MTK_WED_CTRL 0x00c
developere0cbe332022-09-10 17:36:02 +08002381@@ -53,8 +58,12 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002382 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2383 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2384 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2385-#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2386-#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2387+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
2388+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
2389+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
2390+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
2391+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
2392+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
2393 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2394 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
2395 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
developere0cbe332022-09-10 17:36:02 +08002396@@ -69,8 +78,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002397 #define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
2398 #define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
2399 #endif
2400-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2401-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2402+#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
2403+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
2404 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2405 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2406 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
developere0cbe332022-09-10 17:36:02 +08002407@@ -87,8 +96,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002408 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2409 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2410 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2411- MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | \
2412- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | \
2413+ MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
2414+ MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
2415 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2416 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
developer58aa0682023-09-18 14:02:26 +08002417 MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
2418@@ -96,6 +105,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002419 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
2420
2421 #define MTK_WED_EXT_INT_MASK 0x028
2422+#define MTK_WED_EXT_INT_MASK1 0x02c
2423+#define MTK_WED_EXT_INT_MASK2 0x030
2424
2425 #define MTK_WED_STATUS 0x060
2426 #define MTK_WED_STATUS_TX GENMASK(15, 8)
developer58aa0682023-09-18 14:02:26 +08002427@@ -183,6 +194,9 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002428
2429 #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2430
2431+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
2432+
2433+#define MTK_WED_SCR0 0x3c0
2434 #define MTK_WED_WPDMA_INT_TRIGGER 0x504
2435 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2436 #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
developer58aa0682023-09-18 14:02:26 +08002437@@ -239,13 +253,19 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002438
2439 #define MTK_WED_WPDMA_INT_CTRL_TX 0x530
2440 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
2441-#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2442+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2443 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
2444 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
2445 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
2446 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
2447
2448 #define MTK_WED_WPDMA_INT_CTRL_RX 0x534
2449+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
2450+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
2451+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
2452+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
2453+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
2454+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
2455
2456 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
2457 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
developer58aa0682023-09-18 14:02:26 +08002458@@ -270,13 +290,40 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002459 #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2460 #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2461
2462+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
2463+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
2464+
2465 #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2466 #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
developerc1b2cd12022-07-28 18:35:24 +08002467+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
developer8cb3ac72022-07-04 10:55:14 +08002468+
2469+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
2470+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
2471+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
2472+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
2473+
2474+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
developerc1b2cd12022-07-28 18:35:24 +08002475+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
2476+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
developer8cb3ac72022-07-04 10:55:14 +08002477+
2478+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
2479+#define MTK_WED_WPDMA_RX_RING 0x770
2480+
2481+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
2482+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
2483+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
2484+
2485+#define MTK_WED_WDMA_RING_TX 0x800
2486+
2487+#define MTK_WED_WDMA_TX_MIB 0x810
2488+
2489+
2490 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2491 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2492
2493 #define MTK_WED_WDMA_GLO_CFG 0xa04
2494 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2495+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
2496 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2497 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2498 #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
developer58aa0682023-09-18 14:02:26 +08002499@@ -320,6 +367,20 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002500 #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
2501 #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
2502
2503+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2504+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
2505+
2506+#define MTK_WED_RX_BM_BASE 0xd84
2507+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2508+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
2509+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
2510+
2511+#define MTK_WED_RX_PTR 0xd8c
2512+
2513+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
2514+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
2515+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
2516+
2517 #define MTK_WED_RING_OFS_BASE 0x00
2518 #define MTK_WED_RING_OFS_COUNT 0x04
2519 #define MTK_WED_RING_OFS_CPU_IDX 0x08
developer58aa0682023-09-18 14:02:26 +08002520@@ -330,12 +391,13 @@ struct mtk_wdma_desc {
developera3f86ed2022-07-08 14:15:13 +08002521
2522 #define MTK_WDMA_GLO_CFG 0x204
2523 #define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
2524+#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
2525 #define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
2526+#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
2527 #define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
2528 #define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
2529 #define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
developerc1b2cd12022-07-28 18:35:24 +08002530
2531-
2532 #define MTK_WDMA_RESET_IDX 0x208
2533 #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
2534 #define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
developer58aa0682023-09-18 14:02:26 +08002535@@ -359,4 +421,70 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002536 /* DMA channel mapping */
2537 #define HIFSYS_DMA_AG_MAP 0x008
2538
2539+#define MTK_WED_RTQM_GLO_CFG 0xb00
2540+#define MTK_WED_RTQM_BUSY BIT(1)
2541+#define MTK_WED_RTQM_Q_RST BIT(2)
2542+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
2543+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
2544+
2545+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
2546+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
2547+#define MTK_WED_RTQM_Q2N_MIB 0xb80
2548+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
2549+
2550+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
2551+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
2552+
2553+#define MTK_WED_RROQM_GLO_CFG 0xc04
2554+#define MTK_WED_RROQM_RST_IDX 0xc08
2555+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
2556+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
2557+
2558+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
2559+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
2560+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
2561+
2562+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
2563+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
2564+
2565+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
2566+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
2567+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
2568+
2569+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
2570+
2571+#define MTK_WED_RROQ_BASE_L 0xc80
2572+#define MTK_WED_RROQ_BASE_H 0xc84
2573+
developer8cb3ac72022-07-04 10:55:14 +08002574+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
2575+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
2576+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
2577+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
2578+
2579+#define MTK_WED_RROQM_MID_MIB 0xcc0
2580+#define MTK_WED_RROQM_MOD_MIB 0xcc4
2581+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
2582+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
2583+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
2584+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
2585+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
2586+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
2587+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
2588+
2589+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2590+#define MTK_WED_RX_BM_BASE 0xd84
2591+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2592+#define MTK_WED_RX_BM_PTR 0xd8c
2593+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
2594+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
2595+
2596+#define MTK_WED_RX_BM_BLEN 0xd90
2597+#define MTK_WED_RX_BM_STS 0xd94
2598+#define MTK_WED_RX_BM_INTF2 0xd98
2599+#define MTK_WED_RX_BM_INTF 0xd9c
2600+#define MTK_WED_RX_BM_ERR_STS 0xda8
2601+
2602+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
2603+#define MTK_WED_PCIE_INT_MASK 0x0
2604+
2605 #endif
2606diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
2607new file mode 100644
developer144824b2022-11-25 21:27:43 +08002608index 0000000..54b7787
developer8cb3ac72022-07-04 10:55:14 +08002609--- /dev/null
2610+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
developer53bfd362022-09-29 12:02:18 +08002611@@ -0,0 +1,564 @@
developer8cb3ac72022-07-04 10:55:14 +08002612+// SPDX-License-Identifier: GPL-2.0-only
2613+
2614+#include <linux/kernel.h>
2615+#include <linux/bitfield.h>
2616+#include <linux/dma-mapping.h>
2617+#include <linux/skbuff.h>
2618+#include <linux/of_platform.h>
2619+#include <linux/interrupt.h>
2620+#include <linux/of_address.h>
2621+#include <linux/iopoll.h>
2622+#include <linux/soc/mediatek/mtk_wed.h>
2623+#include "mtk_wed.h"
2624+#include "mtk_wed_regs.h"
2625+#include "mtk_wed_ccif.h"
2626+#include "mtk_wed_wo.h"
2627+
2628+struct wed_wo_profile_stat profile_total[6] = {
2629+ {1001, 0},
2630+ {1501, 0},
2631+ {3001, 0},
2632+ {5001, 0},
2633+ {10001, 0},
2634+ {0xffffffff, 0}
2635+};
2636+
2637+struct wed_wo_profile_stat profiling_mod[6] = {
2638+ {1001, 0},
2639+ {1501, 0},
2640+ {3001, 0},
2641+ {5001, 0},
2642+ {10001, 0},
2643+ {0xffffffff, 0}
2644+};
2645+
2646+struct wed_wo_profile_stat profiling_rro[6] = {
2647+ {1001, 0},
2648+ {1501, 0},
2649+ {3001, 0},
2650+ {5001, 0},
2651+ {10001, 0},
2652+ {0xffffffff, 0}
2653+};
2654+
2655+static void
2656+woif_q_sync_idx(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2657+{
2658+ woccif_w32(wo, q->regs->desc_base, q->desc_dma);
2659+ woccif_w32(wo, q->regs->ring_size, q->ndesc);
2660+
developer8cb3ac72022-07-04 10:55:14 +08002661+}
2662+
2663+static void
2664+woif_q_reset(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2665+{
2666+
2667+ if (!q || !q->ndesc)
2668+ return;
2669+
2670+ woccif_w32(dev, q->regs->cpu_idx, 0);
2671+
2672+ woif_q_sync_idx(dev, q);
2673+}
2674+
2675+static void
2676+woif_q_kick(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset)
2677+{
2678+ wmb();
2679+ woccif_w32(wo, q->regs->cpu_idx, q->head + offset);
2680+}
2681+
2682+static int
developer53bfd362022-09-29 12:02:18 +08002683+woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool rx)
developer8cb3ac72022-07-04 10:55:14 +08002684+{
2685+ int len = q->buf_size, frames = 0;
2686+ struct wed_wo_queue_entry *entry;
developer53bfd362022-09-29 12:02:18 +08002687+ struct page_frag_cache *page = &q->tx_page;
developer8cb3ac72022-07-04 10:55:14 +08002688+ struct wed_wo_desc *desc;
2689+ dma_addr_t addr;
2690+ u32 ctrl = 0;
2691+ void *buf;
2692+
2693+ if (!q->ndesc)
2694+ return 0;
2695+
2696+ spin_lock_bh(&q->lock);
2697+
developer53bfd362022-09-29 12:02:18 +08002698+ if(rx)
2699+ page = &q->rx_page;
developer8cb3ac72022-07-04 10:55:14 +08002700+
developer53bfd362022-09-29 12:02:18 +08002701+ while (q->queued < q->ndesc) {
2702+ buf = page_frag_alloc(page, len, GFP_ATOMIC);
developer8cb3ac72022-07-04 10:55:14 +08002703+ if (!buf)
2704+ break;
2705+
2706+ addr = dma_map_single(wo->hw->dev, buf, len, DMA_FROM_DEVICE);
2707+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
2708+ skb_free_frag(buf);
2709+ break;
2710+ }
developerf11dcd72022-08-27 18:29:27 +08002711+
2712+ q->head = (q->head + 1) % q->ndesc;
2713+
developer8cb3ac72022-07-04 10:55:14 +08002714+ desc = &q->desc[q->head];
2715+ entry = &q->entry[q->head];
2716+
2717+ entry->dma_addr = addr;
2718+ entry->dma_len = len;
2719+
developer53bfd362022-09-29 12:02:18 +08002720+ if (rx) {
2721+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, entry->dma_len);
2722+ ctrl |= WED_CTL_LAST_SEC0;
developer8cb3ac72022-07-04 10:55:14 +08002723+
developer53bfd362022-09-29 12:02:18 +08002724+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2725+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2726+ }
developer8cb3ac72022-07-04 10:55:14 +08002727+ q->queued++;
2728+ q->entry[q->head].buf = buf;
2729+
developer8cb3ac72022-07-04 10:55:14 +08002730+ frames++;
2731+ }
2732+
2733+ spin_unlock_bh(&q->lock);
2734+
2735+ return frames;
2736+}
2737+
2738+static void
2739+woif_q_rx_fill_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2740+{
developer53bfd362022-09-29 12:02:18 +08002741+ if(woif_q_rx_fill(wo, q, true))
developer8cb3ac72022-07-04 10:55:14 +08002742+ woif_q_kick(wo, q, -1);
2743+}
2744+
2745+static int
2746+woif_q_alloc(struct mtk_wed_wo *dev, struct wed_wo_queue *q,
2747+ int n_desc, int bufsize, int idx,
2748+ struct wed_wo_queue_regs *regs)
2749+{
2750+ struct wed_wo_queue_regs *q_regs;
2751+ int size;
2752+
2753+ spin_lock_init(&q->lock);
2754+ spin_lock_init(&q->cleanup_lock);
2755+
2756+ q_regs = devm_kzalloc(dev->hw->dev, sizeof(*q_regs), GFP_KERNEL);
2757+
2758+ q_regs->desc_base = regs->desc_base;
2759+ q_regs->ring_size = regs->ring_size;
2760+ q_regs->cpu_idx = regs->cpu_idx;
2761+ q_regs->dma_idx = regs->dma_idx;
2762+
2763+ q->regs = q_regs;
2764+ q->ndesc = n_desc;
2765+ q->buf_size = bufsize;
2766+
2767+ size = q->ndesc * sizeof(struct wed_wo_desc);
2768+
2769+ q->desc = dmam_alloc_coherent(dev->hw->dev, size,
2770+ &q->desc_dma, GFP_KERNEL);
2771+ if (!q->desc)
2772+ return -ENOMEM;
2773+
2774+ size = q->ndesc * sizeof(*q->entry);
2775+ q->entry = devm_kzalloc(dev->hw->dev, size, GFP_KERNEL);
2776+ if (!q->entry)
2777+ return -ENOMEM;
2778+
developer53bfd362022-09-29 12:02:18 +08002779+ if (idx == 0) {
2780+ /* alloc tx buf */
2781+ woif_q_rx_fill(dev, &dev->q_tx, false);
developer8cb3ac72022-07-04 10:55:14 +08002782+ woif_q_reset(dev, &dev->q_tx);
developer53bfd362022-09-29 12:02:18 +08002783+ }
developer8cb3ac72022-07-04 10:55:14 +08002784+
2785+ return 0;
2786+}
2787+
2788+static void
developera3f86ed2022-07-08 14:15:13 +08002789+woif_q_free(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2790+{
2791+ int size;
2792+
2793+ if (!q)
2794+ return;
2795+
2796+ if (!q->desc)
2797+ return;
2798+
2799+ woccif_w32(dev, q->regs->cpu_idx, 0);
2800+
2801+ size = q->ndesc * sizeof(struct wed_wo_desc);
2802+ dma_free_coherent(dev->hw->dev, size, q->desc, q->desc_dma);
2803+}
2804+
2805+static void
developer53bfd362022-09-29 12:02:18 +08002806+woif_q_tx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
developer8cb3ac72022-07-04 10:55:14 +08002807+{
developer53bfd362022-09-29 12:02:18 +08002808+ struct page *page;
2809+ int i = 0;
developer8cb3ac72022-07-04 10:55:14 +08002810+
2811+ if (!q || !q->ndesc)
2812+ return;
2813+
developer53bfd362022-09-29 12:02:18 +08002814+ spin_lock_bh(&q->lock);
2815+ while (i < q->ndesc) {
developer8cb3ac72022-07-04 10:55:14 +08002816+ struct wed_wo_queue_entry *e;
2817+
developer53bfd362022-09-29 12:02:18 +08002818+ e = &q->entry[i];
2819+ i++;
developer8cb3ac72022-07-04 10:55:14 +08002820+
developer53bfd362022-09-29 12:02:18 +08002821+ if (!e)
2822+ continue;
developer8cb3ac72022-07-04 10:55:14 +08002823+ dma_unmap_single(wo->hw->dev, e->dma_addr, e->dma_len,
2824+ DMA_TO_DEVICE);
2825+
developer53bfd362022-09-29 12:02:18 +08002826+ skb_free_frag(e->buf);
developer8cb3ac72022-07-04 10:55:14 +08002827+ }
developer53bfd362022-09-29 12:02:18 +08002828+ spin_unlock_bh(&q->lock);
developer8cb3ac72022-07-04 10:55:14 +08002829+
developer53bfd362022-09-29 12:02:18 +08002830+ if (!q->tx_page.va)
2831+ return;
2832+
2833+ page = virt_to_page(q->tx_page.va);
2834+ __page_frag_cache_drain(page, q->tx_page.pagecnt_bias);
2835+ memset(&q->tx_page, 0, sizeof(q->tx_page));
developer8cb3ac72022-07-04 10:55:14 +08002836+}
2837+
developer8cb3ac72022-07-04 10:55:14 +08002838+static void *
2839+woif_q_deq(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush,
2840+ int *len, u32 *info, bool *more)
2841+{
2842+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
2843+ struct wed_wo_queue_entry *e;
2844+ struct wed_wo_desc *desc;
developerf11dcd72022-08-27 18:29:27 +08002845+ int idx = (q->tail + 1) % q->ndesc;;
developer8cb3ac72022-07-04 10:55:14 +08002846+ void *buf;
2847+
2848+ *more = false;
2849+ if (!q->queued)
2850+ return NULL;
2851+
2852+ if (flush)
2853+ q->desc[idx].ctrl |= cpu_to_le32(WED_CTL_DMA_DONE);
2854+ else if (!(q->desc[idx].ctrl & cpu_to_le32(WED_CTL_DMA_DONE)))
2855+ return NULL;
2856+
developerf11dcd72022-08-27 18:29:27 +08002857+ q->tail = idx;
developer8cb3ac72022-07-04 10:55:14 +08002858+ q->queued--;
2859+
2860+ desc = &q->desc[idx];
2861+ e = &q->entry[idx];
2862+
2863+ buf = e->buf;
2864+ if (len) {
2865+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
2866+ *len = FIELD_GET(WED_CTL_SD_LEN0, ctl);
2867+ *more = !(ctl & WED_CTL_LAST_SEC0);
2868+ }
2869+
2870+ if (info)
2871+ *info = le32_to_cpu(desc->info);
2872+ if(buf)
2873+ dma_unmap_single(wo->hw->dev, e->dma_addr, buf_len,
2874+ DMA_FROM_DEVICE);
2875+ e->skb = NULL;
2876+
2877+ return buf;
2878+}
2879+
developera3f86ed2022-07-08 14:15:13 +08002880+static void
2881+woif_q_rx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2882+{
2883+ struct page *page;
2884+ void *buf;
2885+ bool more;
2886+
2887+ if (!q->ndesc)
2888+ return;
2889+
2890+ spin_lock_bh(&q->lock);
2891+ do {
2892+ buf = woif_q_deq(wo, q, true, NULL, NULL, &more);
2893+ if (!buf)
2894+ break;
2895+
2896+ skb_free_frag(buf);
2897+ } while (1);
2898+ spin_unlock_bh(&q->lock);
2899+
2900+ if (!q->rx_page.va)
2901+ return;
2902+
2903+ page = virt_to_page(q->rx_page.va);
2904+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
2905+ memset(&q->rx_page, 0, sizeof(q->rx_page));
developera3f86ed2022-07-08 14:15:13 +08002906+}
2907+
developer8cb3ac72022-07-04 10:55:14 +08002908+static int
2909+woif_q_init(struct mtk_wed_wo *dev,
2910+ int (*poll)(struct napi_struct *napi, int budget))
2911+{
2912+ init_dummy_netdev(&dev->napi_dev);
2913+ snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
2914+ "woif_q");
2915+
2916+ if (dev->q_rx.ndesc) {
2917+ netif_napi_add(&dev->napi_dev, &dev->napi, poll, 64);
developer53bfd362022-09-29 12:02:18 +08002918+ woif_q_rx_fill(dev, &dev->q_rx, true);
developer8cb3ac72022-07-04 10:55:14 +08002919+ woif_q_reset(dev, &dev->q_rx);
2920+ napi_enable(&dev->napi);
2921+ }
2922+
2923+ return 0;
2924+}
2925+
2926+void woif_q_rx_skb(struct mtk_wed_wo *wo, struct sk_buff *skb)
2927+{
2928+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
2929+ int ret;
2930+
2931+ ret = mtk_wed_mcu_cmd_sanity_check(wo, skb);
2932+ if (ret)
2933+ goto free_skb;
2934+
2935+ if (WED_WO_CMD_FLAG_IS_RSP(hdr))
2936+ mtk_wed_mcu_rx_event(wo, skb);
2937+ else
2938+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
2939+
2940+ return;
2941+free_skb:
2942+ dev_kfree_skb(skb);
2943+}
2944+
2945+static int
2946+woif_q_tx_skb(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
2947+ struct sk_buff *skb)
2948+{
2949+ struct wed_wo_queue_entry *entry;
2950+ struct wed_wo_desc *desc;
developer53bfd362022-09-29 12:02:18 +08002951+ int len, ret = 0, idx = -1;
developer8cb3ac72022-07-04 10:55:14 +08002952+ dma_addr_t addr;
2953+ u32 ctrl = 0;
2954+
2955+ len = skb->len;
developer53bfd362022-09-29 12:02:18 +08002956+ spin_lock_bh(&q->lock);
developer8cb3ac72022-07-04 10:55:14 +08002957+
developer53bfd362022-09-29 12:02:18 +08002958+ q->tail = woccif_r32(wo, q->regs->dma_idx);
2959+ q->head = (q->head + 1) % q->ndesc;
2960+ if (q->tail == q->head) {
developer8cb3ac72022-07-04 10:55:14 +08002961+ ret = -ENOMEM;
2962+ goto error;
2963+ }
2964+
developer8cb3ac72022-07-04 10:55:14 +08002965+ idx = q->head;
developer8cb3ac72022-07-04 10:55:14 +08002966+ desc = &q->desc[idx];
2967+ entry = &q->entry[idx];
2968+
developer53bfd362022-09-29 12:02:18 +08002969+ if (len > entry->dma_len) {
2970+ ret = -ENOMEM;
2971+ goto error;
2972+ }
2973+ addr = entry->dma_addr;
2974+
2975+ dma_sync_single_for_cpu(wo->hw->dev, addr, len, DMA_TO_DEVICE);
2976+ memcpy(entry->buf, skb->data, len);
2977+ dma_sync_single_for_device(wo->hw->dev, addr, len, DMA_TO_DEVICE);
developer8cb3ac72022-07-04 10:55:14 +08002978+
2979+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, len);
2980+ ctrl |= WED_CTL_LAST_SEC0;
2981+ ctrl |= WED_CTL_DMA_DONE;
2982+
2983+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2984+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2985+
developer8cb3ac72022-07-04 10:55:14 +08002986+ woif_q_kick(wo, q, 0);
2987+ wo->drv_ops->kickout(wo);
2988+
developer8cb3ac72022-07-04 10:55:14 +08002989+ spin_unlock_bh(&q->lock);
developer8cb3ac72022-07-04 10:55:14 +08002990+
2991+error:
2992+ dev_kfree_skb(skb);
developer53bfd362022-09-29 12:02:18 +08002993+ return ret;
developer8cb3ac72022-07-04 10:55:14 +08002994+}
2995+
2996+static const struct wed_wo_queue_ops wo_queue_ops = {
2997+ .init = woif_q_init,
2998+ .alloc = woif_q_alloc,
developera3f86ed2022-07-08 14:15:13 +08002999+ .free = woif_q_free,
developer8cb3ac72022-07-04 10:55:14 +08003000+ .reset = woif_q_reset,
3001+ .tx_skb = woif_q_tx_skb,
3002+ .tx_clean = woif_q_tx_clean,
3003+ .rx_clean = woif_q_rx_clean,
3004+ .kick = woif_q_kick,
3005+};
3006+
3007+static int
3008+mtk_wed_wo_rx_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int budget)
3009+{
developer53bfd362022-09-29 12:02:18 +08003010+ int len, done = 0;
developer8cb3ac72022-07-04 10:55:14 +08003011+ struct sk_buff *skb;
3012+ unsigned char *data;
3013+ bool more;
3014+
3015+ while (done < budget) {
3016+ u32 info;
3017+
3018+ data = woif_q_deq(wo, q, false, &len, &info, &more);
3019+ if (!data)
3020+ break;
3021+
developer8cb3ac72022-07-04 10:55:14 +08003022+ skb = build_skb(data, q->buf_size);
3023+ if (!skb) {
3024+ skb_free_frag(data);
3025+ continue;
3026+ }
3027+
3028+ __skb_put(skb, len);
3029+ done++;
3030+
3031+ woif_q_rx_skb(wo, skb);
3032+ }
3033+
3034+ woif_q_rx_fill_process(wo, q);
3035+
3036+ return done;
3037+}
3038+
3039+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3040+ u32 clear, u32 val)
3041+{
3042+ unsigned long flags;
3043+
3044+ spin_lock_irqsave(&wo->ccif.irq_lock, flags);
3045+ wo->ccif.irqmask &= ~clear;
3046+ wo->ccif.irqmask |= val;
3047+ if (set)
3048+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
3049+
3050+ spin_unlock_irqrestore(&wo->ccif.irq_lock, flags);
3051+}
3052+
3053+static inline void mtk_wed_wo_set_ack_mask(struct mtk_wed_wo *wo, u32 mask)
3054+{
3055+ wo->drv_ops->set_ack(wo, mask);
3056+}
3057+
3058+static void mtk_wed_wo_poll_complete(struct mtk_wed_wo *wo)
3059+{
3060+ mtk_wed_wo_set_ack_mask(wo, wo->ccif.q_int_mask);
3061+ mtk_wed_wo_isr_enable(wo, wo->ccif.q_int_mask);
3062+}
3063+
3064+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget)
3065+{
3066+ struct mtk_wed_wo *wo;
3067+ int done = 0, cur;
3068+
3069+ wo = container_of(napi->dev, struct mtk_wed_wo, napi_dev);
3070+
3071+ rcu_read_lock();
3072+
3073+ do {
3074+ cur = mtk_wed_wo_rx_process(wo, &wo->q_rx, budget - done);
3075+ /* rx packet handle */
3076+ done += cur;
3077+ } while (cur && done < budget);
3078+
3079+ rcu_read_unlock();
3080+
3081+ if (done < budget && napi_complete(napi))
3082+ mtk_wed_wo_poll_complete(wo);
3083+
3084+ return done;
3085+}
3086+
3087+static void mtk_wed_wo_isr_tasklet(unsigned long data)
3088+{
3089+ struct mtk_wed_wo *wo = (struct mtk_wed_wo *)data;
3090+ u32 intr, mask;
3091+
3092+ /* disable isr */
3093+ wo->drv_ops->set_isr(wo, 0);
3094+
3095+ intr = wo->drv_ops->get_csr(wo);
3096+ intr &= wo->ccif.irqmask;
3097+
3098+ mask = intr & (wo->ccif.q_int_mask | wo->ccif.q_exep_mask);
3099+ mtk_wed_wo_isr_disable(wo, mask);
3100+
3101+ if (intr & wo->ccif.q_int_mask)
3102+ napi_schedule(&wo->napi);
3103+
3104+ if (intr & wo->ccif.q_exep_mask) {
3105+ /* todo */
3106+ }
3107+}
3108+
3109+static irqreturn_t mtk_wed_wo_isr_handler(int irq, void *wo_instance)
3110+{
3111+ struct mtk_wed_wo *wo = wo_instance;
3112+
3113+ wo->drv_ops->set_isr(wo, 0);
3114+
3115+ tasklet_schedule(&wo->irq_tasklet);
3116+
3117+ return IRQ_HANDLED;
3118+}
3119+
3120+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
3121+{
3122+ struct mtk_wed_wo *wo;
3123+ int ret = 0;
3124+
3125+ wo = kzalloc(sizeof(struct mtk_wed_wo), GFP_KERNEL);
3126+ if (!wo)
3127+ return -ENOMEM;
3128+
3129+ wo->hw = hw;
3130+ wo->queue_ops = &wo_queue_ops;
3131+ hw->wed_wo = wo;
3132+
3133+ tasklet_init(&wo->irq_tasklet, mtk_wed_wo_isr_tasklet,
3134+ (unsigned long)wo);
3135+
3136+ skb_queue_head_init(&wo->mcu.res_q);
3137+ init_waitqueue_head(&wo->mcu.wait);
3138+ mutex_init(&wo->mcu.mutex);
3139+
3140+ ret = wed_wo_hardware_init(wo, mtk_wed_wo_isr_handler);
3141+ if (ret)
3142+ goto error;
3143+
3144+ /* fw download */
3145+ ret = wed_wo_mcu_init(wo);
3146+ if (ret)
3147+ goto error;
3148+
3149+ ret = mtk_wed_exception_init(wo);
3150+ if (ret)
3151+ goto error;
3152+
3153+ return ret;
3154+
3155+error:
3156+ kfree(wo);
3157+
3158+ return ret;
3159+}
3160+
3161+void mtk_wed_wo_exit(struct mtk_wed_hw *hw)
3162+{
developer8cb3ac72022-07-04 10:55:14 +08003163+ struct mtk_wed_wo *wo = hw->wed_wo;
3164+
developera3f86ed2022-07-08 14:15:13 +08003165+ wed_wo_hardware_exit(wo);
3166+
developer8cb3ac72022-07-04 10:55:14 +08003167+ if (wo->exp.log) {
3168+ dma_unmap_single(wo->hw->dev, wo->exp.phys, wo->exp.log_size, DMA_FROM_DEVICE);
3169+ kfree(wo->exp.log);
3170+ }
3171+
developera3f86ed2022-07-08 14:15:13 +08003172+ wo->hw = NULL;
3173+ memset(wo, 0, sizeof(*wo));
3174+ kfree(wo);
developer8cb3ac72022-07-04 10:55:14 +08003175+}
3176diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
3177new file mode 100644
developer144824b2022-11-25 21:27:43 +08003178index 0000000..548b38e
developer8cb3ac72022-07-04 10:55:14 +08003179--- /dev/null
3180+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
developer53bfd362022-09-29 12:02:18 +08003181@@ -0,0 +1,324 @@
developer8cb3ac72022-07-04 10:55:14 +08003182+// SPDX-License-Identifier: GPL-2.0-only
3183+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
3184+
3185+#ifndef __MTK_WED_WO_H
3186+#define __MTK_WED_WO_H
3187+
3188+#include <linux/netdevice.h>
3189+#include <linux/skbuff.h>
3190+#include "mtk_wed.h"
3191+
3192+#define WED_CTL_SD_LEN1 GENMASK(13, 0)
3193+#define WED_CTL_LAST_SEC1 BIT(14)
3194+#define WED_CTL_BURST BIT(15)
3195+#define WED_CTL_SD_LEN0_SHIFT 16
3196+#define WED_CTL_SD_LEN0 GENMASK(29, 16)
3197+#define WED_CTL_LAST_SEC0 BIT(30)
3198+#define WED_CTL_DMA_DONE BIT(31)
3199+#define WED_INFO_WINFO GENMASK(15, 0)
3200+
3201+#define MTK_WED_WO_TXQ_FREE_THR 10
3202+
3203+#define WED_WO_PROFILE_MAX_LVL 6
3204+
3205+
3206+enum mtk_wed_fw_region_id {
3207+ WO_REGION_EMI = 0,
3208+ WO_REGION_ILM,
3209+ WO_REGION_DATA,
3210+ WO_REGION_BOOT,
3211+ __WO_REGION_MAX
3212+};
3213+
3214+struct wed_wo_profile_stat {
3215+ u32 bound;
3216+ u32 record;
3217+};
3218+
3219+#define PROFILE_STAT(record, val) do { \
3220+ u8 lvl = 0; \
3221+ while (lvl < WED_WO_PROFILE_MAX_LVL) { \
3222+ if (val < record[lvl].bound) { \
3223+ record[lvl].record++; \
3224+ break; \
3225+ } \
3226+ lvl++; \
3227+ } \
3228+ } while (0)
3229+
3230+/* align with wo report structure */
3231+struct wed_wo_log {
3232+ u32 sn;
3233+ u32 total;
3234+ u32 rro;
3235+ u32 mod;
3236+};
3237+
3238+struct wed_wo_rxcnt {
3239+ u16 wlan_idx;
3240+ u16 tid;
3241+ u32 rx_pkt_cnt;
3242+ u32 rx_byte_cnt;
3243+ u32 rx_err_cnt;
3244+ u32 rx_drop_cnt;
3245+};
3246+
3247+struct wed_wo_queue {
3248+ struct wed_wo_queue_regs *regs;
3249+
3250+ spinlock_t lock;
3251+ spinlock_t cleanup_lock;
3252+ struct wed_wo_queue_entry *entry;
3253+ struct wed_wo_desc *desc;
3254+
3255+ u16 first;
3256+ u16 head;
3257+ u16 tail;
3258+ int ndesc;
3259+ int queued;
3260+ int buf_size;
3261+
3262+ u8 hw_idx;
3263+ u8 qid;
3264+ u8 flags;
3265+
3266+ dma_addr_t desc_dma;
3267+ struct page_frag_cache rx_page;
developer53bfd362022-09-29 12:02:18 +08003268+ struct page_frag_cache tx_page;
developer8cb3ac72022-07-04 10:55:14 +08003269+};
3270+
3271+
3272+struct wed_wo_mmio {
3273+ struct regmap *regs;
3274+
3275+ spinlock_t irq_lock;
3276+ u8 irq;
3277+ u32 irqmask;
3278+
3279+ u32 q_int_mask;
3280+ u32 q_exep_mask;
3281+};
3282+
3283+struct wed_wo_mcu {
3284+ struct mutex mutex;
3285+ u32 msg_seq;
3286+ int timeout;
3287+
3288+ struct sk_buff_head res_q;
3289+ wait_queue_head_t wait;
3290+};
3291+
3292+struct wed_wo_exception {
3293+ void* log;
3294+ int log_size;
3295+ dma_addr_t phys;
3296+};
3297+
3298+struct wed_wo_queue_regs {
3299+ u32 desc_base;
3300+ u32 ring_size;
3301+ u32 cpu_idx;
3302+ u32 dma_idx;
3303+};
3304+
3305+struct wed_wo_desc {
3306+ __le32 buf0;
3307+ __le32 ctrl;
3308+ __le32 buf1;
3309+ __le32 info;
3310+ __le32 reserved[4];
3311+} __packed __aligned(32);
3312+
3313+struct wed_wo_queue_entry {
3314+ union {
3315+ void *buf;
3316+ struct sk_buff *skb;
3317+ };
3318+
3319+ u32 dma_addr;
3320+ u16 dma_len;
3321+ u16 wcid;
3322+ bool skip_buf0:1;
3323+ bool skip_buf1:1;
3324+ bool done:1;
3325+};
3326+
developer8cb3ac72022-07-04 10:55:14 +08003327+struct wo_cmd_query {
3328+ u32 query0;
3329+ u32 query1;
3330+};
3331+
3332+struct wed_cmd_hdr {
3333+ /*DW0*/
3334+ u8 ver;
3335+ u8 cmd_id;
3336+ u16 length;
3337+
3338+ /*DW1*/
3339+ u16 uni_id;
3340+ u16 flag;
3341+
3342+ /*DW2*/
3343+ int status;
3344+
3345+ /*DW3*/
3346+ u8 reserved[20];
3347+};
3348+
3349+struct mtk_wed_fw_region {
3350+ void *addr;
3351+ u32 addr_pa;
3352+ u32 size;
3353+ u32 shared;
3354+};
3355+
3356+struct wed_wo_queue_ops;
3357+struct wed_wo_drv_ops;
3358+struct wed_wo_mcu_ops;
3359+
3360+struct wo_rx_total_cnt {
3361+ u64 rx_pkt_cnt;
3362+ u64 rx_byte_cnt;
3363+ u64 rx_err_cnt;
3364+ u64 rx_drop_cnt;
3365+};
3366+
3367+struct mtk_wed_wo {
3368+ struct mtk_wed_hw *hw;
3369+
3370+ struct wed_wo_mmio ccif;
3371+ struct wed_wo_mcu mcu;
3372+ struct wed_wo_exception exp;
3373+
3374+ const struct wed_wo_drv_ops *drv_ops;
3375+ const struct wed_wo_mcu_ops *mcu_ops;
3376+ const struct wed_wo_queue_ops *queue_ops;
3377+
3378+ struct net_device napi_dev;
3379+ spinlock_t rx_lock;
3380+ struct napi_struct napi;
3381+ struct sk_buff_head rx_skb;
3382+ struct wed_wo_queue q_rx;
3383+ struct tasklet_struct irq_tasklet;
3384+
3385+ struct wed_wo_queue q_tx;
3386+
3387+ struct mtk_wed_fw_region region[__WO_REGION_MAX];
3388+
3389+ struct wed_wo_profile_stat total[WED_WO_PROFILE_MAX_LVL];
3390+ struct wed_wo_profile_stat mod[WED_WO_PROFILE_MAX_LVL];
3391+ struct wed_wo_profile_stat rro[WED_WO_PROFILE_MAX_LVL];
3392+ char dirname[4];
3393+ struct wo_rx_total_cnt wo_rxcnt[8][544];
3394+};
3395+
3396+struct wed_wo_queue_ops {
3397+ int (*init)(struct mtk_wed_wo *wo,
3398+ int (*poll)(struct napi_struct *napi, int budget));
3399+
3400+ int (*alloc)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3401+ int idx, int n_desc, int bufsize,
3402+ struct wed_wo_queue_regs *regs);
developera3f86ed2022-07-08 14:15:13 +08003403+ void (*free)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
developer8cb3ac72022-07-04 10:55:14 +08003404+ void (*reset)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3405+
3406+ int (*tx_skb)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3407+ struct sk_buff *skb);
developer53bfd362022-09-29 12:02:18 +08003408+ void (*tx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
developer8cb3ac72022-07-04 10:55:14 +08003409+
3410+ void (*rx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3411+
3412+ void (*kick)(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset);
3413+};
3414+
3415+struct wed_wo_drv_ops {
3416+ void (*kickout)(struct mtk_wed_wo *wo);
3417+ void (*set_ack)(struct mtk_wed_wo *wo, u32 mask);
3418+ void (*set_isr)(struct mtk_wed_wo *wo, u32 mask);
3419+ u32 (*get_csr)(struct mtk_wed_wo *wo);
3420+ int (*tx_prepare_skb)(struct mtk_wed_wo *wo);
3421+ bool (*check_excpetion)(struct mtk_wed_wo *wo);
3422+ void (*clear_int)(struct mtk_wed_wo *wo, u32 mask);
3423+};
3424+
3425+struct wed_wo_mcu_ops {
3426+ u32 headroom;
3427+
3428+ int (*mcu_skb_send_msg)(struct mtk_wed_wo *wo, int to_id,
3429+ int cmd, struct sk_buff *skb,
3430+ int *seq, bool wait_resp);
3431+
3432+ int (*mcu_parse_response)(struct mtk_wed_wo *wo, int cmd,
3433+ struct sk_buff *skb, int seq);
3434+
3435+ int (*mcu_restart)(struct mtk_wed_wo *wo);
3436+};
3437+
3438+#define mtk_wed_wo_q_init(wo, ...) (wo)->queue_ops->init((wo), __VA_ARGS__)
3439+#define mtk_wed_wo_q_alloc(wo, ...) (wo)->queue_ops->alloc((wo), __VA_ARGS__)
developera3f86ed2022-07-08 14:15:13 +08003440+#define mtk_wed_wo_q_free(wo, ...) (wo)->queue_ops->free((wo), __VA_ARGS__)
3441+#define mtk_wed_wo_q_reset(wo, ...) (wo)->queue_ops->reset((wo), __VA_ARGS__)
developer8cb3ac72022-07-04 10:55:14 +08003442+#define mtk_wed_wo_q_tx_skb(wo, ...) (wo)->queue_ops->tx_skb((wo), __VA_ARGS__)
developer8cb3ac72022-07-04 10:55:14 +08003443+#define mtk_wed_wo_q_tx_clean(wo, ...) (wo)->queue_ops->tx_clean((wo), __VA_ARGS__)
3444+#define mtk_wed_wo_q_rx_clean(wo, ...) (wo)->queue_ops->rx_clean((wo), __VA_ARGS__)
3445+#define mtk_wed_wo_q_kick(wo, ...) (wo)->queue_ops->kick((wo), __VA_ARGS__)
3446+
3447+enum {
3448+ WARP_CMD_FLAG_RSP = 1 << 0, /* is responce*/
3449+ WARP_CMD_FLAG_NEED_RSP = 1 << 1, /* need responce */
3450+ WARP_CMD_FLAG_FROM_TO_WO = 1 << 2, /* send between host and wo */
3451+};
3452+
3453+#define WED_WO_CMD_FLAG_IS_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_RSP))
3454+#define WED_WO_CMD_FLAG_SET_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_RSP))
3455+#define WED_WO_CMD_FLAG_IS_NEED_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_NEED_RSP))
3456+#define WED_WO_CMD_FLAG_SET_NEED_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_NEED_RSP))
3457+#define WED_WO_CMD_FLAG_IS_FROM_TO_WO(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_FROM_TO_WO))
3458+#define WED_WO_CMD_FLAG_SET_FROM_TO_WO(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_FROM_TO_WO))
3459+
3460+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3461+ u32 clear, u32 val);
3462+
3463+static inline void mtk_wed_wo_isr_enable(struct mtk_wed_wo *wo, u32 mask)
3464+{
3465+ mtk_wed_wo_set_isr_mask(wo, false, 0, mask);
3466+
3467+ tasklet_schedule(&wo->irq_tasklet);
3468+}
3469+
3470+static inline void mtk_wed_wo_isr_disable(struct mtk_wed_wo *wo, u32 mask)
3471+{
3472+ mtk_wed_wo_set_isr_mask(wo, true, mask, 0);
3473+}
3474+
3475+static inline void
3476+wo_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3477+{
3478+ writel(val, dev->region[WO_REGION_BOOT].addr + reg);
3479+}
3480+
3481+static inline u32
3482+wo_r32(struct mtk_wed_wo *dev, u32 reg)
3483+{
3484+ return readl(dev->region[WO_REGION_BOOT].addr + reg);
3485+}
3486+static inline void
3487+woccif_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3488+{
3489+ regmap_write(dev->ccif.regs, reg, val);
3490+}
3491+
3492+static inline u32
3493+woccif_r32(struct mtk_wed_wo *dev, u32 reg)
3494+{
3495+ unsigned int val;
3496+
3497+ regmap_read(dev->ccif.regs, reg, &val);
3498+
3499+ return val;
3500+}
3501+
3502+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
developera3f86ed2022-07-08 14:15:13 +08003503+void mtk_wed_wo_exit(struct mtk_wed_hw *hw);
developer8cb3ac72022-07-04 10:55:14 +08003504+#endif
3505+
3506diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
developer144824b2022-11-25 21:27:43 +08003507index e914cb4..e8fca31 100644
developer8cb3ac72022-07-04 10:55:14 +08003508--- a/include/linux/soc/mediatek/mtk_wed.h
3509+++ b/include/linux/soc/mediatek/mtk_wed.h
developer144824b2022-11-25 21:27:43 +08003510@@ -6,7 +6,39 @@
3511 #include <linux/regmap.h>
developer8cb3ac72022-07-04 10:55:14 +08003512 #include <linux/pci.h>
3513
developer144824b2022-11-25 21:27:43 +08003514+#define WED_WO_STA_REC 0x6
3515+
developer8cb3ac72022-07-04 10:55:14 +08003516 #define MTK_WED_TX_QUEUES 2
3517+#define MTK_WED_RX_QUEUES 2
3518+
developer144824b2022-11-25 21:27:43 +08003519+enum mtk_wed_wo_cmd {
3520+ MTK_WED_WO_CMD_WED_CFG,
3521+ MTK_WED_WO_CMD_WED_RX_STAT,
3522+ MTK_WED_WO_CMD_RRO_SER,
3523+ MTK_WED_WO_CMD_DBG_INFO,
3524+ MTK_WED_WO_CMD_DEV_INFO,
3525+ MTK_WED_WO_CMD_BSS_INFO,
3526+ MTK_WED_WO_CMD_STA_REC,
3527+ MTK_WED_WO_CMD_DEV_INFO_DUMP,
3528+ MTK_WED_WO_CMD_BSS_INFO_DUMP,
3529+ MTK_WED_WO_CMD_STA_REC_DUMP,
3530+ MTK_WED_WO_CMD_BA_INFO_DUMP,
3531+ MTK_WED_WO_CMD_FBCMD_Q_DUMP,
3532+ MTK_WED_WO_CMD_FW_LOG_CTRL,
3533+ MTK_WED_WO_CMD_LOG_FLUSH,
3534+ MTK_WED_WO_CMD_CHANGE_STATE,
3535+ MTK_WED_WO_CMD_CPU_STATS_ENABLE,
3536+ MTK_WED_WO_CMD_CPU_STATS_DUMP,
3537+ MTK_WED_WO_CMD_EXCEPTION_INIT,
3538+ MTK_WED_WO_CMD_PROF_CTRL,
3539+ MTK_WED_WO_CMD_STA_BA_DUMP,
3540+ MTK_WED_WO_CMD_BA_CTRL_DUMP,
3541+ MTK_WED_WO_CMD_RXCNT_CTRL,
3542+ MTK_WED_WO_CMD_RXCNT_INFO,
3543+ MTK_WED_WO_CMD_SET_CAP,
3544+ MTK_WED_WO_CMD_CCIF_RING_DUMP,
3545+ MTK_WED_WO_CMD_WED_END
developerfaaa5162022-10-24 14:12:16 +08003546+};
developer8cb3ac72022-07-04 10:55:14 +08003547
3548 enum {
3549 MTK_NO_WED,
developer144824b2022-11-25 21:27:43 +08003550@@ -15,10 +47,9 @@ enum {
3551 MTK_WED_VMAX
3552 };
3553
3554-enum {
3555- MTK_BUS_TYPE_PCIE,
3556- MTK_BUS_TYPE_AXI,
3557- MTK_BUS_TYPE_MAX
3558+enum mtk_wed_bus_tye {
3559+ MTK_WED_BUS_PCIE,
3560+ MTK_WED_BUS_AXI,
3561 };
3562
3563 struct mtk_wed_hw;
3564@@ -33,6 +64,33 @@ struct mtk_wed_ring {
developer8cb3ac72022-07-04 10:55:14 +08003565 void __iomem *wpdma;
3566 };
3567
3568+struct mtk_rxbm_desc {
3569+ __le32 buf0;
3570+ __le32 token;
3571+} __packed __aligned(4);
3572+
3573+struct dma_buf {
3574+ int size;
3575+ void **pages;
3576+ struct mtk_wdma_desc *desc;
3577+ dma_addr_t desc_phys;
3578+};
3579+
3580+struct dma_entry {
3581+ int size;
3582+ struct mtk_rxbm_desc *desc;
3583+ dma_addr_t desc_phys;
3584+};
3585+
developer144824b2022-11-25 21:27:43 +08003586+struct mtk_wed_wo_rx_stats {
3587+ __le16 wlan_idx;
3588+ __le16 tid;
3589+ __le32 rx_pkt_cnt;
3590+ __le32 rx_byte_cnt;
3591+ __le32 rx_err_cnt;
3592+ __le32 rx_drop_cnt;
developer8fec8ae2022-08-15 15:01:09 -07003593+};
3594+
developer8cb3ac72022-07-04 10:55:14 +08003595 struct mtk_wed_device {
3596 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3597 const struct mtk_wed_ops *ops;
developer144824b2022-11-25 21:27:43 +08003598@@ -47,37 +105,64 @@ struct mtk_wed_device {
developer8cb3ac72022-07-04 10:55:14 +08003599 struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3600 struct mtk_wed_ring txfree_ring;
3601 struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3602+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
3603+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
3604+
3605+ struct dma_buf buf_ring;
developer8cb3ac72022-07-04 10:55:14 +08003606
3607 struct {
developer144824b2022-11-25 21:27:43 +08003608 int size;
developer8cb3ac72022-07-04 10:55:14 +08003609- void **pages;
3610- struct mtk_wdma_desc *desc;
developer144824b2022-11-25 21:27:43 +08003611+ struct page_frag_cache rx_page;
3612+ struct mtk_rxbm_desc *desc;
3613 dma_addr_t desc_phys;
developer8cb3ac72022-07-04 10:55:14 +08003614- } buf_ring;
developer144824b2022-11-25 21:27:43 +08003615+ } rx_buf_ring;
3616+
3617+ struct {
developer8cb3ac72022-07-04 10:55:14 +08003618+ struct mtk_wed_ring rro_ring;
3619+ void __iomem *rro_desc;
3620+ dma_addr_t miod_desc_phys;
3621+ dma_addr_t fdbk_desc_phys;
3622+ u32 mcu_view_miod;
3623+ } rro;
3624
3625 /* filled by driver: */
3626 struct {
developer144824b2022-11-25 21:27:43 +08003627- struct pci_dev *pci_dev;
3628+ union {
3629+ struct platform_device *platform_dev;
3630+ struct pci_dev *pci_dev;
3631+ };
developer8cb3ac72022-07-04 10:55:14 +08003632 void __iomem *base;
3633 u32 bus_type;
3634+ u32 phy_base;
3635
developerbbca0f92022-07-26 17:26:12 +08003636 u32 wpdma_phys;
3637 u32 wpdma_int;
developer8cb3ac72022-07-04 10:55:14 +08003638 u32 wpdma_mask;
3639 u32 wpdma_tx;
3640 u32 wpdma_txfree;
3641+ u32 wpdma_rx_glo;
3642+ u32 wpdma_rx;
3643
3644 u8 tx_tbit[MTK_WED_TX_QUEUES];
3645+ u8 rx_tbit[MTK_WED_RX_QUEUES];
3646 u8 txfree_tbit;
3647
3648 u16 token_start;
3649 unsigned int nbuf;
3650+ unsigned int rx_nbuf;
developer144824b2022-11-25 21:27:43 +08003651+ unsigned int rx_npkt;
3652+ unsigned int rx_size;
developer8cb3ac72022-07-04 10:55:14 +08003653
developer203096a2022-09-13 21:07:19 +08003654 bool wcid_512;
3655
developer8cb3ac72022-07-04 10:55:14 +08003656 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3657 int (*offload_enable)(struct mtk_wed_device *wed);
3658 void (*offload_disable)(struct mtk_wed_device *wed);
3659+ u32 (*init_rx_buf)(struct mtk_wed_device *wed,
3660+ int pkt_num);
3661+ void (*release_rx_buf)(struct mtk_wed_device *wed);
developer144824b2022-11-25 21:27:43 +08003662+ void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
3663+ struct mtk_wed_wo_rx_stats *stats);
developer8cb3ac72022-07-04 10:55:14 +08003664 } wlan;
3665 #endif
3666 };
developer144824b2022-11-25 21:27:43 +08003667@@ -88,6 +173,10 @@ struct mtk_wed_ops {
developer8cb3ac72022-07-04 10:55:14 +08003668 void __iomem *regs);
3669 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3670 void __iomem *regs);
3671+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3672+ void __iomem *regs);
3673+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3674+ void *data, int len);
3675 void (*detach)(struct mtk_wed_device *dev);
3676
3677 void (*stop)(struct mtk_wed_device *dev);
developer144824b2022-11-25 21:27:43 +08003678@@ -99,6 +188,8 @@ struct mtk_wed_ops {
developer8cb3ac72022-07-04 10:55:14 +08003679
3680 u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3681 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
developerbbca0f92022-07-26 17:26:12 +08003682+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
developer8cb3ac72022-07-04 10:55:14 +08003683+ u32 reason, u32 hash);
3684 };
3685
3686 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
developer144824b2022-11-25 21:27:43 +08003687@@ -123,6 +214,16 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
3688 return ret;
3689 }
3690
3691+static inline bool
3692+mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3693+{
3694+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3695+ return dev->ver != 1;
3696+#else
3697+ return false;
3698+#endif
3699+}
3700+
3701 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3702 #define mtk_wed_device_active(_dev) !!(_dev)->ops
3703 #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3704@@ -131,6 +232,10 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08003705 (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3706 #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3707 (_dev)->ops->txfree_ring_setup(_dev, _regs)
3708+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
3709+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
3710+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3711+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
3712 #define mtk_wed_device_reg_read(_dev, _reg) \
3713 (_dev)->ops->reg_read(_dev, _reg)
3714 #define mtk_wed_device_reg_write(_dev, _reg, _val) \
developer144824b2022-11-25 21:27:43 +08003715@@ -139,6 +244,8 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08003716 (_dev)->ops->irq_get(_dev, _mask)
3717 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
3718 (_dev)->ops->irq_set_mask(_dev, _mask)
3719+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3720+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
3721 #else
3722 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3723 {
developer144824b2022-11-25 21:27:43 +08003724@@ -148,10 +255,13 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08003725 #define mtk_wed_device_start(_dev, _mask) do {} while (0)
3726 #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3727 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3728+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3729+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
3730 #define mtk_wed_device_reg_read(_dev, _reg) 0
3731 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3732 #define mtk_wed_device_irq_get(_dev, _mask) 0
3733 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3734+#define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
3735 #endif
3736
3737 #endif
3738--
developere0cbe332022-09-10 17:36:02 +080037392.18.0
developer8cb3ac72022-07-04 10:55:14 +08003740