blob: 00909d55e4940bccc168d287cd6d119145444edb [file] [log] [blame]
developer8fec8ae2022-08-15 15:01:09 -07001From 7c81104d65728fb1c0f156c46e3cfc5dec24b119 Mon Sep 17 00:00:00 2001
developer8cb3ac72022-07-04 10:55:14 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Wed, 15 Jun 2022 14:38:54 +0800
4Subject: [PATCH 8/8] 9997-add-wed-rx-support-for-mt7896
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7986a.dtsi | 42 +-
9 arch/arm64/boot/dts/mediatek/mt7986b.dtsi | 42 +-
10 drivers/net/ethernet/mediatek/Makefile | 2 +-
developere0cbe332022-09-10 17:36:02 +080011 drivers/net/ethernet/mediatek/mtk_wed.c | 625 ++++++++++++++++--
developera3f86ed2022-07-08 14:15:13 +080012 drivers/net/ethernet/mediatek/mtk_wed.h | 51 ++
13 drivers/net/ethernet/mediatek/mtk_wed_ccif.c | 133 ++++
developer8cb3ac72022-07-04 10:55:14 +080014 drivers/net/ethernet/mediatek/mtk_wed_ccif.h | 45 ++
15 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 90 +++
developer8fec8ae2022-08-15 15:01:09 -070016 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 586 ++++++++++++++++
developer8cb3ac72022-07-04 10:55:14 +080017 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 125 ++++
developere0cbe332022-09-10 17:36:02 +080018 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 144 +++-
developer53bfd362022-09-29 12:02:18 +080019 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 564 ++++++++++++++++
20 drivers/net/ethernet/mediatek/mtk_wed_wo.h | 324 +++++++++
developere0cbe332022-09-10 17:36:02 +080021 include/linux/soc/mediatek/mtk_wed.h | 74 ++-
developer8fec8ae2022-08-15 15:01:09 -070022 14 files changed, 2796 insertions(+), 75 deletions(-)
developer8cb3ac72022-07-04 10:55:14 +080023 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.c
24 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.h
25 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c
26 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.h
27 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
28 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h
29
30diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
developer8fec8ae2022-08-15 15:01:09 -070031index 87d2b11a9..6abc06db8 100644
developer8cb3ac72022-07-04 10:55:14 +080032--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
33+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
34@@ -65,6 +65,12 @@
35 interrupt-parent = <&gic>;
36 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
37 mediatek,wed_pcie = <&wed_pcie>;
38+ mediatek,ap2woccif = <&ap2woccif0>;
39+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
40+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
41+ mediatek,wocpu_boot = <&cpu_boot>;
42+ mediatek,wocpu_emi = <&wocpu0_emi>;
43+ mediatek,wocpu_data = <&wocpu_data>;
44 };
45
46 wed1: wed@15011000 {
47@@ -74,15 +80,26 @@
48 interrupt-parent = <&gic>;
49 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
50 mediatek,wed_pcie = <&wed_pcie>;
51+ mediatek,ap2woccif = <&ap2woccif1>;
52+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
53+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
54+ mediatek,wocpu_boot = <&cpu_boot>;
55+ mediatek,wocpu_emi = <&wocpu1_emi>;
56+ mediatek,wocpu_data = <&wocpu_data>;
57 };
58
59- ap2woccif: ap2woccif@151A5000 {
60- compatible = "mediatek,ap2woccif";
61- reg = <0 0x151A5000 0 0x1000>,
62- <0 0x151AD000 0 0x1000>;
63+ ap2woccif0: ap2woccif@151A5000 {
64+ compatible = "mediatek,ap2woccif", "syscon";
65+ reg = <0 0x151A5000 0 0x1000>;
66 interrupt-parent = <&gic>;
67- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
68- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
69+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
70+ };
71+
72+ ap2woccif1: ap2woccif@0x151AD000 {
73+ compatible = "mediatek,ap2woccif", "syscon";
74+ reg = <0 0x151AD000 0 0x1000>;
75+ interrupt-parent = <&gic>;
76+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
77 };
78
79 wocpu0_ilm: wocpu0_ilm@151E0000 {
80@@ -95,10 +112,17 @@
81 reg = <0 0x151F0000 0 0x8000>;
82 };
83
84- wocpu_dlm: wocpu_dlm@151E8000 {
85+ wocpu0_dlm: wocpu_dlm@151E8000 {
86+ compatible = "mediatek,wocpu_dlm";
87+ reg = <0 0x151E8000 0 0x2000>;
88+
89+ resets = <&ethsysrst 0>;
90+ reset-names = "wocpu_rst";
91+ };
92+
93+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
94 compatible = "mediatek,wocpu_dlm";
95- reg = <0 0x151E8000 0 0x2000>,
96- <0 0x151F8000 0 0x2000>;
97+ reg = <0 0x151F8000 0 0x2000>;
98
99 resets = <&ethsysrst 0>;
100 reset-names = "wocpu_rst";
101diff --git a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
102index 67bf86f6a..6710b388b 100644
103--- a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
104+++ b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
105@@ -65,6 +65,12 @@
106 interrupt-parent = <&gic>;
107 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
108 mediatek,wed_pcie = <&wed_pcie>;
109+ mediatek,ap2woccif = <&ap2woccif0>;
110+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
111+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
112+ mediatek,wocpu_boot = <&cpu_boot>;
113+ mediatek,wocpu_emi = <&wocpu0_emi>;
114+ mediatek,wocpu_data = <&wocpu_data>;
115 };
116
117 wed1: wed@15011000 {
118@@ -74,15 +80,26 @@
119 interrupt-parent = <&gic>;
120 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
121 mediatek,wed_pcie = <&wed_pcie>;
122+ mediatek,ap2woccif = <&ap2woccif1>;
123+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
124+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
125+ mediatek,wocpu_boot = <&cpu_boot>;
126+ mediatek,wocpu_emi = <&wocpu1_emi>;
127+ mediatek,wocpu_data = <&wocpu_data>;
128 };
129
130- ap2woccif: ap2woccif@151A5000 {
131- compatible = "mediatek,ap2woccif";
132- reg = <0 0x151A5000 0 0x1000>,
133- <0 0x151AD000 0 0x1000>;
134+ ap2woccif0: ap2woccif@151A5000 {
135+ compatible = "mediatek,ap2woccif", "syscon";
136+ reg = <0 0x151A5000 0 0x1000>;
137 interrupt-parent = <&gic>;
138- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
139- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
140+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
141+ };
142+
143+ ap2woccif1: ap2woccif@0x151AD000 {
144+ compatible = "mediatek,ap2woccif", "syscon";
145+ reg = <0 0x151AD000 0 0x1000>;
146+ interrupt-parent = <&gic>;
147+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
148 };
149
150 wocpu0_ilm: wocpu0_ilm@151E0000 {
151@@ -95,10 +112,17 @@
152 reg = <0 0x151F0000 0 0x8000>;
153 };
154
155- wocpu_dlm: wocpu_dlm@151E8000 {
156+ wocpu0_dlm: wocpu_dlm@151E8000 {
157+ compatible = "mediatek,wocpu_dlm";
158+ reg = <0 0x151E8000 0 0x2000>;
159+
160+ resets = <&ethsysrst 0>;
161+ reset-names = "wocpu_rst";
162+ };
163+
164+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
165 compatible = "mediatek,wocpu_dlm";
166- reg = <0 0x151E8000 0 0x2000>,
167- <0 0x151F8000 0 0x2000>;
168+ reg = <0 0x151F8000 0 0x2000>;
169
170 resets = <&ethsysrst 0>;
171 reset-names = "wocpu_rst";
172diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
developere0cbe332022-09-10 17:36:02 +0800173index 3528f1b..0c724a5 100644
developer8cb3ac72022-07-04 10:55:14 +0800174--- a/drivers/net/ethernet/mediatek/Makefile
175+++ b/drivers/net/ethernet/mediatek/Makefile
176@@ -10,5 +10,5 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
177 ifdef CONFIG_DEBUG_FS
178 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
179 endif
180-obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
181+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o mtk_wed_wo.o mtk_wed_mcu.o mtk_wed_ccif.o
182 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
183diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developere0cbe332022-09-10 17:36:02 +0800184index 2700176..b037d00 100644
developer8cb3ac72022-07-04 10:55:14 +0800185--- a/drivers/net/ethernet/mediatek/mtk_wed.c
186+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
187@@ -13,11 +13,19 @@
188 #include <linux/debugfs.h>
189 #include <linux/iopoll.h>
190 #include <linux/soc/mediatek/mtk_wed.h>
191+
192 #include "mtk_eth_soc.h"
193 #include "mtk_wed_regs.h"
194 #include "mtk_wed.h"
195 #include "mtk_ppe.h"
196-
197+#include "mtk_wed_mcu.h"
198+#include "mtk_wed_wo.h"
199+
200+struct wo_cmd_ring {
201+ u32 q_base;
202+ u32 cnt;
203+ u32 unit;
204+};
205 static struct mtk_wed_hw *hw_list[2];
206 static DEFINE_MUTEX(hw_lock);
207
developera3f86ed2022-07-08 14:15:13 +0800208@@ -51,6 +59,56 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
developer8cb3ac72022-07-04 10:55:14 +0800209 wdma_m32(dev, reg, 0, mask);
210 }
211
212+static void
213+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
214+{
215+ wdma_m32(dev, reg, mask, 0);
216+}
217+
developera3f86ed2022-07-08 14:15:13 +0800218+static u32
219+mtk_wdma_read_reset(struct mtk_wed_device *dev)
220+{
221+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
222+}
223+
224+static void
225+mtk_wdma_rx_reset(struct mtk_wed_device *dev)
226+{
227+ u32 status;
228+ u32 mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
229+ int i;
230+
231+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
232+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
233+ !(status & mask), 0, 1000))
234+ WARN_ON_ONCE(1);
235+
236+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
237+ if (!dev->rx_wdma[i].desc) {
238+ wdma_w32(dev, MTK_WDMA_RING_RX(i) +
239+ MTK_WED_RING_OFS_CPU_IDX, 0);
240+ }
241+}
242+
243+static void
244+mtk_wdma_tx_reset(struct mtk_wed_device *dev)
245+{
246+ u32 status;
247+ u32 mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
248+ int i;
249+
250+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
251+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
252+ !(status & mask), 0, 1000))
253+ WARN_ON_ONCE(1);
254+
255+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
256+ if (!dev->tx_wdma[i].desc) {
257+ wdma_w32(dev, MTK_WDMA_RING_TX(i) +
258+ MTK_WED_RING_OFS_CPU_IDX, 0);
259+ }
260+}
261+
developer8cb3ac72022-07-04 10:55:14 +0800262 static u32
263 mtk_wed_read_reset(struct mtk_wed_device *dev)
264 {
developera3f86ed2022-07-08 14:15:13 +0800265@@ -68,6 +126,52 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
developer8cb3ac72022-07-04 10:55:14 +0800266 WARN_ON_ONCE(1);
267 }
268
269+static void
270+mtk_wed_wo_reset(struct mtk_wed_device *dev)
271+{
272+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
273+ u8 state = WO_STATE_DISABLE;
274+ u8 state_done = WOIF_DISABLE_DONE;
275+ void __iomem *reg;
276+ u32 value;
277+ unsigned long timeout = jiffies + WOCPU_TIMEOUT;
278+
developerc1b2cd12022-07-28 18:35:24 +0800279+ mtk_wdma_tx_reset(dev);
developera3f86ed2022-07-08 14:15:13 +0800280+
281+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
282+
developer8cb3ac72022-07-04 10:55:14 +0800283+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE,
284+ &state, sizeof(state), false);
285+
286+ do {
287+ value = wed_r32(dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_WO_STATUS);
288+ } while (value != state_done && !time_after(jiffies, timeout));
289+
290+ reg = ioremap(WOCPU_MCUSYS_RESET_ADDR, 4);
291+ value = readl((void *)reg);
292+ switch(dev->hw->index) {
293+ case 0:
294+ value |= WOCPU_WO0_MCUSYS_RESET_MASK;
295+ writel(value, (void *)reg);
296+ value &= ~WOCPU_WO0_MCUSYS_RESET_MASK;
297+ writel(value, (void *)reg);
298+ break;
299+ case 1:
300+ value |= WOCPU_WO1_MCUSYS_RESET_MASK;
301+ writel(value, (void *)reg);
302+ value &= ~WOCPU_WO1_MCUSYS_RESET_MASK;
303+ writel(value, (void *)reg);
304+ break;
305+ default:
306+ dev_err(dev->hw->dev, "wrong mtk_wed%d\n",
307+ dev->hw->index);
308+
309+ break;
310+ }
311+
312+ iounmap((void *)reg);
313+}
314+
315 static struct mtk_wed_hw *
316 mtk_wed_assign(struct mtk_wed_device *dev)
317 {
developera3f86ed2022-07-08 14:15:13 +0800318@@ -178,7 +282,7 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
319 {
320 struct mtk_wdma_desc *desc = dev->buf_ring.desc;
321 void **page_list = dev->buf_ring.pages;
322- int page_idx;
323+ int ring_size, page_idx;
324 int i;
325
326 if (!page_list)
developerf11dcd72022-08-27 18:29:27 +0800327@@ -187,7 +291,14 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
developera3f86ed2022-07-08 14:15:13 +0800328 if (!desc)
329 goto free_pagelist;
330
developerf11dcd72022-08-27 18:29:27 +0800331- for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
developera3f86ed2022-07-08 14:15:13 +0800332+ if (dev->ver == MTK_WED_V1) {
333+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
334+ } else {
335+ ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
336+ MTK_WED_WDMA_RING_SIZE * 2;
337+ }
338+
developerf11dcd72022-08-27 18:29:27 +0800339+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
developera3f86ed2022-07-08 14:15:13 +0800340 void *page = page_list[page_idx++];
341
developerf11dcd72022-08-27 18:29:27 +0800342 if (!page)
343@@ -198,13 +309,49 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
344 __free_page(page);
345 }
346
347- dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
348+ dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
349 desc, dev->buf_ring.desc_phys);
350
351 free_pagelist:
developer8cb3ac72022-07-04 10:55:14 +0800352 kfree(page_list);
353 }
354
355+static int
356+mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
357+{
358+ struct mtk_rxbm_desc *desc;
359+ dma_addr_t desc_phys;
360+ int ring_size;
361+
362+ ring_size = dev->wlan.rx_nbuf;
363+ dev->rx_buf_ring.size = ring_size;
364+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
365+ &desc_phys, GFP_KERNEL);
366+ if (!desc)
367+ return -ENOMEM;
368+
369+ dev->rx_buf_ring.desc = desc;
370+ dev->rx_buf_ring.desc_phys = desc_phys;
371+
372+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_pkt);
373+ return 0;
374+}
375+
376+static void
377+mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
378+{
379+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
developera3f86ed2022-07-08 14:15:13 +0800380+ int ring_size = dev->rx_buf_ring.size;
developer8cb3ac72022-07-04 10:55:14 +0800381+
382+ if (!desc)
383+ return;
384+
385+ dev->wlan.release_rx_buf(dev);
386+
developer9dbe57a2022-08-05 18:23:53 +0800387+ dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
388+ desc, dev->rx_buf_ring.desc_phys);
developer8cb3ac72022-07-04 10:55:14 +0800389+}
390+
391 static void
392 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
393 {
developera3f86ed2022-07-08 14:15:13 +0800394@@ -226,13 +373,22 @@ mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800395 mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
396 }
397
398+static void
399+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
400+{
401+ mtk_wed_free_rx_bm(dev);
402+ mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
403+}
404+
405 static void
406 mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
407 {
408 u32 wdma_mask;
409
410 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
411-
412+ if (dev->ver > MTK_WED_V1)
413+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
414+ GENMASK(1, 0));
415 /* wed control cr set */
416 wed_set(dev, MTK_WED_CTRL,
417 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
developera3f86ed2022-07-08 14:15:13 +0800418@@ -251,7 +407,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800419 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
420 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
421 } else {
422- /* initail tx interrupt trigger */
423+
424 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
425 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
426 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
developera3f86ed2022-07-08 14:15:13 +0800427@@ -262,22 +418,30 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800428 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
429 dev->wlan.tx_tbit[1]));
430
431- /* initail txfree interrupt trigger */
432 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
433 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
434 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
435 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
436 dev->wlan.txfree_tbit));
437+
438+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
439+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
440+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
441+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
442+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
443+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
444+ dev->wlan.rx_tbit[0]) |
445+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
446+ dev->wlan.rx_tbit[1]));
447 }
448- /* initail wdma interrupt agent */
449 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
450 if (dev->ver == MTK_WED_V1) {
451 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
452 } else {
453 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
454 wed_set(dev, MTK_WED_WDMA_INT_CTRL,
455- FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,dev->wdma_idx));
456-
457+ FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,
458+ dev->wdma_idx));
459 }
460
461 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
developerc1b2cd12022-07-28 18:35:24 +0800462@@ -312,6 +476,40 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
developer8cb3ac72022-07-04 10:55:14 +0800463 }
464 }
465
466+static void
467+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
468+{
469+#define MTK_WFMDA_RX_DMA_EN BIT(2)
470+
471+ int timeout = 3;
472+ u32 cur_idx, regs;
473+
474+ do {
475+ regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
developerc1b2cd12022-07-28 18:35:24 +0800476+ MTK_WED_RING_OFS_CPU_IDX;
developer8cb3ac72022-07-04 10:55:14 +0800477+ cur_idx = wed_r32(dev, regs);
478+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
479+ break;
480+
481+ usleep_range(100000, 200000);
developerc1b2cd12022-07-28 18:35:24 +0800482+ timeout--;
483+ } while (timeout > 0);
developer8cb3ac72022-07-04 10:55:14 +0800484+
485+ if (timeout) {
486+ unsigned int val;
487+
488+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
489+ dev->wlan.phy_base);
490+ val |= MTK_WFMDA_RX_DMA_EN;
491+
492+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
493+ dev->wlan.phy_base, val);
494+ } else {
495+ dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
496+ dev->hw->index);
497+ }
498+}
499+
500 static void
501 mtk_wed_dma_enable(struct mtk_wed_device *dev)
502 {
developerc1b2cd12022-07-28 18:35:24 +0800503@@ -336,9 +534,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800504 wdma_set(dev, MTK_WDMA_GLO_CFG,
505 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
506 } else {
507+ int idx = 0;
508+
509 wed_set(dev, MTK_WED_WPDMA_CTRL,
510 MTK_WED_WPDMA_CTRL_SDL1_FIXED);
511
512+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
developerc1b2cd12022-07-28 18:35:24 +0800513+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
developer8cb3ac72022-07-04 10:55:14 +0800514+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
515+
516 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
517 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
518 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
developerc1b2cd12022-07-28 18:35:24 +0800519@@ -346,6 +550,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800520 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
521 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
522 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
523+
524+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
525+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
526+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
527+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
528+ 0x2));
529+
530+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++)
531+ mtk_wed_check_wfdma_rx_fill(dev, idx);
532 }
533 }
534
developerc1b2cd12022-07-28 18:35:24 +0800535@@ -363,19 +576,23 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800536 MTK_WED_GLO_CFG_TX_DMA_EN |
537 MTK_WED_GLO_CFG_RX_DMA_EN);
538
539- wdma_m32(dev, MTK_WDMA_GLO_CFG,
540+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
541 MTK_WDMA_GLO_CFG_TX_DMA_EN |
542 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
543- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
544+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
545
546 if (dev->ver == MTK_WED_V1) {
547 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
548- wdma_m32(dev, MTK_WDMA_GLO_CFG,
549- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
550+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
551+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
552 } else {
553 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
554 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
555 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
556+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
557+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
558+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
559+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
560 }
561 }
562
developerc1b2cd12022-07-28 18:35:24 +0800563@@ -383,10 +600,12 @@ static void
564 mtk_wed_stop(struct mtk_wed_device *dev)
developera3f86ed2022-07-08 14:15:13 +0800565 {
566 mtk_wed_dma_disable(dev);
developerc1b2cd12022-07-28 18:35:24 +0800567+ mtk_wed_set_512_support(dev, false);
developera3f86ed2022-07-08 14:15:13 +0800568
569- if (dev->ver > MTK_WED_V1)
developerc1b2cd12022-07-28 18:35:24 +0800570- mtk_wed_set_512_support(dev, false);
571-
developera3f86ed2022-07-08 14:15:13 +0800572+ if (dev->ver > MTK_WED_V1) {
developera3f86ed2022-07-08 14:15:13 +0800573+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
574+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
575+ }
developera3f86ed2022-07-08 14:15:13 +0800576 mtk_wed_set_ext_int(dev, false);
577
developerc1b2cd12022-07-28 18:35:24 +0800578 wed_clr(dev, MTK_WED_CTRL,
579@@ -395,6 +614,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800580 MTK_WED_CTRL_WED_TX_BM_EN |
581 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
582
583+ if (dev->ver > MTK_WED_V1) {
584+ wed_clr(dev, MTK_WED_CTRL,
585+ MTK_WED_CTRL_WED_RX_BM_EN);
586+ }
587+
588 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
589 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
590 wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
developerc1b2cd12022-07-28 18:35:24 +0800591@@ -417,8 +641,19 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800592
593 mtk_wed_reset(dev, MTK_WED_RESET_WED);
developera3f86ed2022-07-08 14:15:13 +0800594
developer8cb3ac72022-07-04 10:55:14 +0800595+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
596+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
597+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
developera3f86ed2022-07-08 14:15:13 +0800598+
developer8cb3ac72022-07-04 10:55:14 +0800599 mtk_wed_free_buffer(dev);
600 mtk_wed_free_tx_rings(dev);
developera3f86ed2022-07-08 14:15:13 +0800601+ if (dev->ver > MTK_WED_V1) {
602+ mtk_wed_wo_reset(dev);
developerf50c1802022-07-05 20:35:53 +0800603+ mtk_wed_free_rx_rings(dev);
developera3f86ed2022-07-08 14:15:13 +0800604+ mtk_wed_wo_exit(hw);
605+ }
606+
developerc1b2cd12022-07-28 18:35:24 +0800607+ mtk_wdma_rx_reset(dev);
developer8cb3ac72022-07-04 10:55:14 +0800608
609 if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
610 wlan_node = dev->wlan.pci_dev->dev.of_node;
developerc1b2cd12022-07-28 18:35:24 +0800611@@ -477,7 +712,6 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800612 value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
613 value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
614
615- /* pcie interrupt status trigger register */
616 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
617 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
618
developerc1b2cd12022-07-28 18:35:24 +0800619@@ -501,6 +735,9 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800620 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
621 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
622 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
623+
624+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
625+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
626 } else {
627 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
628 }
developerc1b2cd12022-07-28 18:35:24 +0800629@@ -549,24 +786,92 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800630 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
631 MTK_WDMA_RING_RX(0)));
632 }
633+}
developera3f86ed2022-07-08 14:15:13 +0800634
developer8cb3ac72022-07-04 10:55:14 +0800635+static void
636+mtk_wed_rx_bm_hw_init(struct mtk_wed_device *dev)
637+{
638+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
639+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_pkt_size));
640+
641+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
developera3f86ed2022-07-08 14:15:13 +0800642+
developer8cb3ac72022-07-04 10:55:14 +0800643+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
644+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_pkt));
645+
646+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
647+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
648+
649+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
650 }
651
652 static void
653-mtk_wed_hw_init(struct mtk_wed_device *dev)
654+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
655+{
656+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
657+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
658+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
659+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
660+ MTK_WED_MIOD_ENTRY_CNT >> 2));
661+
662+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_desc_phys);
663+
664+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
665+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
666+
667+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_desc_phys);
668+
669+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
670+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
671+
672+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
673+
674+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.rro_ring.desc_phys);
675+
676+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
677+ MTK_WED_RROQM_RST_IDX_MIOD |
678+ MTK_WED_RROQM_RST_IDX_FDBK);
679+
680+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
681+
682+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT -1);
683+
684+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
685+}
686+
687+static void
688+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
689+{
690+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
691+
692+ do {
693+ udelay(100);
694+
695+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
696+ break;
697+ } while (1);
698+
699+ /* configure RX_ROUTE_QM */
700+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
701+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
702+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
703+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
704+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
705+
706+ /* enable RX_ROUTE_QM */
707+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
708+}
709+
710+static void
711+mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
712 {
713 int size = dev->buf_ring.size;
714 int rev_size = MTK_WED_TX_RING_SIZE / 2;
715 int thr = 1;
716
717- if (dev->init_done)
718- return;
719-
720- dev->init_done = true;
721- mtk_wed_set_ext_int(dev, false);
722-
723 if (dev->ver > MTK_WED_V1) {
724- size = MTK_WED_WDMA_RING_SIZE * 2 + dev->buf_ring.size;
725+ size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
726+ dev->buf_ring.size;
727 rev_size = size;
728 thr = 0;
729 }
developerc1b2cd12022-07-28 18:35:24 +0800730@@ -609,13 +914,46 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800731 }
732
733 static void
734-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale)
735+mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
developere0cbe332022-09-10 17:36:02 +0800736+{
developer8cb3ac72022-07-04 10:55:14 +0800737+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
developerc1b2cd12022-07-28 18:35:24 +0800738+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
739+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
developer8cb3ac72022-07-04 10:55:14 +0800740+
741+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
742+
743+ mtk_wed_rx_bm_hw_init(dev);
744+ mtk_wed_rro_hw_init(dev);
745+ mtk_wed_route_qm_hw_init(dev);
746+}
747+
748+static void
749+mtk_wed_hw_init(struct mtk_wed_device *dev)
750+{
751+ if (dev->init_done)
752+ return;
753+
754+ dev->init_done = true;
755+ mtk_wed_set_ext_int(dev, false);
756+ mtk_wed_tx_hw_init(dev);
757+ if (dev->ver > MTK_WED_V1)
758+ mtk_wed_rx_hw_init(dev);
759+}
760+
761+static void
762+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
developere0cbe332022-09-10 17:36:02 +0800763 {
developer8cb3ac72022-07-04 10:55:14 +0800764+ __le32 ctrl;
765 int i;
766
767+ if (tx)
768+ ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
769+ else
770+ ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
771+
772 for (i = 0; i < size; i++) {
773 desc->buf0 = 0;
774- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
775+ desc->ctrl = ctrl;
776 desc->buf1 = 0;
777 desc->info = 0;
778 desc += scale;
developerc1b2cd12022-07-28 18:35:24 +0800779@@ -674,7 +1012,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800780 if (!desc)
781 continue;
782
783- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver);
784+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true);
785 }
786
787 if (mtk_wed_poll_busy(dev))
developerc1b2cd12022-07-28 18:35:24 +0800788@@ -692,6 +1030,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
789 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
790 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
791
792+ mtk_wdma_rx_reset(dev);
793+
794 if (busy) {
795 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
796 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
797@@ -729,9 +1069,24 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800798
799 }
800
801+static int
802+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
803+ int size)
804+{
805+ ring->desc = dma_alloc_coherent(dev->hw->dev,
806+ size * sizeof(*ring->desc),
807+ &ring->desc_phys, GFP_KERNEL);
808+ if (!ring->desc)
809+ return -ENOMEM;
810+
811+ ring->size = size;
812+ memset(ring->desc, 0, size);
813+ return 0;
814+}
815+
816 static int
817 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
818- int size, int scale)
819+ int size, int scale, bool tx)
820 {
821 ring->desc = dma_alloc_coherent(dev->hw->dev,
822 size * sizeof(*ring->desc) * scale,
developerc1b2cd12022-07-28 18:35:24 +0800823@@ -740,17 +1095,18 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
developer8cb3ac72022-07-04 10:55:14 +0800824 return -ENOMEM;
825
826 ring->size = size;
827- mtk_wed_ring_reset(ring->desc, size, scale);
828+ mtk_wed_ring_reset(ring->desc, size, scale, tx);
829
830 return 0;
831 }
832
833 static int
834-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
835+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
836 {
837 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
838
839- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, dev->ver))
840+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
841+ dev->ver, true))
842 return -ENOMEM;
843
844 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
developerc1b2cd12022-07-28 18:35:24 +0800845@@ -767,22 +1123,143 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
developer8cb3ac72022-07-04 10:55:14 +0800846 return 0;
847 }
848
849+static int
850+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
851+{
852+ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
853+
854+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
855+ dev->ver, true))
856+ return -ENOMEM;
857+
858+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
859+ wdma->desc_phys);
860+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
861+ size);
862+ wdma_w32(dev,
863+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
864+ wdma_w32(dev,
865+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
866+
867+ if (idx == 0) {
868+ wed_w32(dev, MTK_WED_WDMA_RING_TX
869+ + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
870+ wed_w32(dev, MTK_WED_WDMA_RING_TX
871+ + MTK_WED_RING_OFS_COUNT, size);
872+ wed_w32(dev, MTK_WED_WDMA_RING_TX
873+ + MTK_WED_RING_OFS_CPU_IDX, 0);
874+ wed_w32(dev, MTK_WED_WDMA_RING_TX
875+ + MTK_WED_RING_OFS_DMA_IDX, 0);
876+ }
877+
878+ return 0;
879+}
880+
881+static int
882+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
883+{
884+ struct device_node *np, *node = dev->hw->node;
885+ struct mtk_wed_ring *ring;
886+ struct resource res;
887+ int ret;
888+
889+ np = of_parse_phandle(node, "mediatek,wocpu_dlm", 0);
890+ if (!np)
891+ return -ENODEV;
892+
893+ ret = of_address_to_resource(np, 0, &res);
894+ if (ret)
895+ return ret;
896+
897+ dev->rro.rro_desc = ioremap(res.start, resource_size(&res));
898+
899+ ring = &dev->rro.rro_ring;
900+
901+ dev->rro.miod_desc_phys = res.start;
902+
903+ dev->rro.mcu_view_miod = MTK_WED_WOCPU_VIEW_MIOD_BASE;
904+ dev->rro.fdbk_desc_phys = MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT
905+ + dev->rro.miod_desc_phys;
906+
907+ if (mtk_wed_rro_ring_alloc(dev, ring, MTK_WED_RRO_QUE_CNT))
908+ return -ENOMEM;
909+
910+ return 0;
911+}
912+
913+static int
914+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
915+{
916+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
917+ struct {
918+ struct wo_cmd_ring ring[2];
919+
920+ u32 wed;
921+ u8 ver;
922+ } req = {
923+ .ring = {
924+ [0] = {
925+ .q_base = dev->rro.mcu_view_miod,
926+ .cnt = MTK_WED_MIOD_CNT,
927+ .unit = MTK_WED_MIOD_ENTRY_CNT,
928+ },
929+ [1] = {
930+ .q_base = dev->rro.mcu_view_miod +
931+ MTK_WED_MIOD_ENTRY_CNT *
932+ MTK_WED_MIOD_CNT,
933+ .cnt = MTK_WED_FB_CMD_CNT,
934+ .unit = 4,
935+ },
936+ },
937+ .wed = 0,
938+ };
939+
940+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_WED_CFG,
941+ &req, sizeof(req), true);
942+}
943+
944+static int
945+mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
946+{
947+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
948+
developerf50c1802022-07-05 20:35:53 +0800949+ if (dev->ver == MTK_WED_V1)
950+ return 0;
951+
developer8cb3ac72022-07-04 10:55:14 +0800952+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
953+}
954+
955+static void
956+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
957+ u32 reason, u32 hash)
958+{
959+ int idx = dev->hw->index;
960+ struct mtk_eth *eth = dev->hw->eth;
961+ struct ethhdr *eh;
962+
963+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) {
964+ if (!skb)
965+ return;
966+
967+ skb_set_mac_header(skb, 0);
968+ eh = eth_hdr(skb);
969+ skb->protocol = eh->h_proto;
970+ mtk_ppe_check_skb(eth->ppe[idx], skb, hash);
971+ }
972+}
973+
974 static void
975 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
976 {
977- u32 wdma_mask;
978- int i;
979+ int i, ret;
980
981 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
982 if (!dev->tx_wdma[i].desc)
983- mtk_wed_wdma_ring_setup(dev, i, 16);
984-
985+ mtk_wed_wdma_rx_ring_setup(dev, i, 16);
986
987 mtk_wed_hw_init(dev);
988
989 mtk_wed_set_int(dev, irq_mask);
990-
991-
992 mtk_wed_set_ext_int(dev, true);
993
994 if (dev->ver == MTK_WED_V1) {
developerc1b2cd12022-07-28 18:35:24 +0800995@@ -797,8 +1274,20 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800996 val |= BIT(0);
997 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
998 } else {
developer203096a2022-09-13 21:07:19 +0800999- mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
developer8cb3ac72022-07-04 10:55:14 +08001000+ /* driver set mid ready and only once */
1001+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1002+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1003+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1004+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1005+
1006+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1007+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1008+
1009+ ret = mtk_wed_rro_cfg(dev);
1010+ if (ret)
1011+ return;
developer8cb3ac72022-07-04 10:55:14 +08001012 }
developer203096a2022-09-13 21:07:19 +08001013+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
developer8cb3ac72022-07-04 10:55:14 +08001014
developerc1b2cd12022-07-28 18:35:24 +08001015 mtk_wed_dma_enable(dev);
1016 dev->running = true;
developere0cbe332022-09-10 17:36:02 +08001017@@ -847,9 +1336,17 @@ mtk_wed_attach(struct mtk_wed_device *dev)
1018 dev->rev_id = ((dev->ver << 28) | ver << 16);
developer8cb3ac72022-07-04 10:55:14 +08001019
1020 ret = mtk_wed_buffer_alloc(dev);
1021- if (ret) {
1022- mtk_wed_detach(dev);
1023- goto out;
1024+ if (ret)
1025+ goto error;
1026+
1027+ if (dev->ver > MTK_WED_V1) {
1028+ ret = mtk_wed_rx_bm_alloc(dev);
1029+ if (ret)
1030+ goto error;
1031+
1032+ ret = mtk_wed_rro_alloc(dev);
1033+ if (ret)
1034+ goto error;
1035 }
1036
1037 mtk_wed_hw_init_early(dev);
developere0cbe332022-09-10 17:36:02 +08001038@@ -857,7 +1354,12 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08001039 if (dev->ver == MTK_WED_V1)
1040 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1041 BIT(hw->index), 0);
1042+ else
1043+ ret = mtk_wed_wo_init(hw);
1044
1045+error:
1046+ if (ret)
1047+ mtk_wed_detach(dev);
1048 out:
1049 mutex_unlock(&hw_lock);
1050
developere0cbe332022-09-10 17:36:02 +08001051@@ -883,10 +1385,10 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +08001052
1053 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
1054
1055- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1))
1056+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true))
1057 return -ENOMEM;
1058
1059- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1060+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1061 return -ENOMEM;
1062
1063 ring->reg_base = MTK_WED_RING_TX(idx);
developere0cbe332022-09-10 17:36:02 +08001064@@ -933,6 +1435,35 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +08001065 return 0;
1066 }
1067
1068+static int
1069+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1070+{
1071+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1072+
1073+ BUG_ON(idx > ARRAY_SIZE(dev->rx_ring));
1074+
1075+
1076+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false))
1077+ return -ENOMEM;
1078+
1079+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1080+ return -ENOMEM;
1081+
1082+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
1083+ ring->wpdma = regs;
1084+
1085+ /* WPDMA -> WED */
1086+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1087+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
1088+
1089+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
1090+ ring->desc_phys);
1091+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
1092+ MTK_WED_RX_RING_SIZE);
1093+
1094+ return 0;
1095+}
1096+
1097 static u32
1098 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
1099 {
developere0cbe332022-09-10 17:36:02 +08001100@@ -1020,6 +1551,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +08001101 .attach = mtk_wed_attach,
1102 .tx_ring_setup = mtk_wed_tx_ring_setup,
1103 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
1104+ .rx_ring_setup = mtk_wed_rx_ring_setup,
1105+ .msg_update = mtk_wed_send_msg,
1106 .start = mtk_wed_start,
1107 .stop = mtk_wed_stop,
1108 .reset_dma = mtk_wed_reset_dma,
developere0cbe332022-09-10 17:36:02 +08001109@@ -1028,6 +1561,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +08001110 .irq_get = mtk_wed_irq_get,
1111 .irq_set_mask = mtk_wed_irq_set_mask,
1112 .detach = mtk_wed_detach,
1113+ .ppe_check = mtk_wed_ppe_check,
1114 };
1115 struct device_node *eth_np = eth->dev->of_node;
1116 struct platform_device *pdev;
developere0cbe332022-09-10 17:36:02 +08001117@@ -1083,6 +1617,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developerc1b2cd12022-07-28 18:35:24 +08001118 regmap_write(hw->mirror, 0, 0);
1119 regmap_write(hw->mirror, 4, 0);
1120 }
1121+ hw->ver = MTK_WED_V1;
1122 }
1123
1124 mtk_wed_hw_add_debugfs(hw);
developer8cb3ac72022-07-04 10:55:14 +08001125diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
developere0cbe332022-09-10 17:36:02 +08001126index 9b17b74..8ef5253 100644
developer8cb3ac72022-07-04 10:55:14 +08001127--- a/drivers/net/ethernet/mediatek/mtk_wed.h
1128+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
1129@@ -13,6 +13,7 @@
1130 #define MTK_WED_PKT_SIZE 1900
1131 #define MTK_WED_BUF_SIZE 2048
1132 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1133+#define MTK_WED_RX_RING_SIZE 1536
1134
1135 #define MTK_WED_TX_RING_SIZE 2048
1136 #define MTK_WED_WDMA_RING_SIZE 512
1137@@ -21,8 +22,15 @@
1138 #define MTK_WED_PER_GROUP_PKT 128
1139
1140 #define MTK_WED_FBUF_SIZE 128
1141+#define MTK_WED_MIOD_CNT 16
1142+#define MTK_WED_FB_CMD_CNT 1024
1143+#define MTK_WED_RRO_QUE_CNT 8192
1144+#define MTK_WED_MIOD_ENTRY_CNT 128
1145+
1146+#define MODULE_ID_WO 1
1147
1148 struct mtk_eth;
1149+struct mtk_wed_wo;
1150
1151 struct mtk_wed_hw {
1152 struct device_node *node;
1153@@ -34,12 +42,14 @@ struct mtk_wed_hw {
1154 struct regmap *mirror;
1155 struct dentry *debugfs_dir;
1156 struct mtk_wed_device *wed_dev;
1157+ struct mtk_wed_wo *wed_wo;
1158 u32 debugfs_reg;
1159 u32 num_flows;
1160 u32 wdma_phy;
1161 char dirname[5];
1162 int irq;
1163 int index;
1164+ u32 ver;
1165 };
1166
1167 struct mtk_wdma_info {
1168@@ -66,6 +76,18 @@ wed_r32(struct mtk_wed_device *dev, u32 reg)
1169 return val;
1170 }
1171
1172+static inline u32
1173+wifi_r32(struct mtk_wed_device *dev, u32 reg)
1174+{
1175+ return readl(dev->wlan.base + reg);
1176+}
1177+
1178+static inline void
1179+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1180+{
1181+ writel(val, dev->wlan.base + reg);
1182+}
1183+
1184 static inline void
1185 wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1186 {
1187@@ -114,6 +136,23 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1188 writel(val, dev->txfree_ring.wpdma + reg);
1189 }
1190
1191+static inline u32
1192+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
1193+{
1194+ if (!dev->rx_ring[ring].wpdma)
1195+ return 0;
1196+
1197+ return readl(dev->rx_ring[ring].wpdma + reg);
1198+}
1199+
1200+static inline void
1201+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
1202+{
1203+ if (!dev->rx_ring[ring].wpdma)
1204+ return;
1205+
1206+ writel(val, dev->rx_ring[ring].wpdma + reg);
1207+}
1208 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1209 void __iomem *wdma, u32 wdma_phy, int index);
1210 void mtk_wed_exit(void);
developera3f86ed2022-07-08 14:15:13 +08001211@@ -146,4 +185,16 @@ static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
developer8cb3ac72022-07-04 10:55:14 +08001212 }
1213 #endif
1214
1215+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr);
developera3f86ed2022-07-08 14:15:13 +08001216+void wed_wo_hardware_exit(struct mtk_wed_wo *wo);
developer8cb3ac72022-07-04 10:55:14 +08001217+int wed_wo_mcu_init(struct mtk_wed_wo *wo);
1218+int mtk_wed_exception_init(struct mtk_wed_wo *wo);
1219+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1220+int mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb);
1221+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir);
1222+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1223+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd,
1224+ const void *data, int len, bool wait_resp);
1225+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget);
1226+
1227 #endif
1228diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.c b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
1229new file mode 100644
developere0cbe332022-09-10 17:36:02 +08001230index 0000000..22ef337
developer8cb3ac72022-07-04 10:55:14 +08001231--- /dev/null
1232+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
developera3f86ed2022-07-08 14:15:13 +08001233@@ -0,0 +1,133 @@
developer8cb3ac72022-07-04 10:55:14 +08001234+// SPDX-License-Identifier: GPL-2.0-only
1235+
1236+#include <linux/soc/mediatek/mtk_wed.h>
1237+#include <linux/of_address.h>
1238+#include <linux/mfd/syscon.h>
1239+#include <linux/of_irq.h>
1240+#include "mtk_wed_ccif.h"
1241+#include "mtk_wed_regs.h"
1242+#include "mtk_wed_wo.h"
1243+
1244+static inline void woif_set_isr(struct mtk_wed_wo *wo, u32 mask)
1245+{
1246+ woccif_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
1247+}
1248+
1249+static inline u32 woif_get_csr(struct mtk_wed_wo *wo)
1250+{
1251+ u32 val;
1252+
1253+ val = woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1254+
1255+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
1256+}
1257+
1258+static inline void woif_set_ack(struct mtk_wed_wo *wo, u32 mask)
1259+{
1260+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1261+}
1262+
1263+static inline void woif_kickout(struct mtk_wed_wo *wo)
1264+{
1265+ woccif_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
1266+ woccif_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
1267+}
1268+
1269+static inline void woif_clear_int(struct mtk_wed_wo *wo, u32 mask)
1270+{
1271+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1272+ woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1273+}
1274+
1275+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr)
1276+{
1277+ static const struct wed_wo_drv_ops wo_drv_ops = {
1278+ .kickout = woif_kickout,
1279+ .set_ack = woif_set_ack,
1280+ .set_isr = woif_set_isr,
1281+ .get_csr = woif_get_csr,
1282+ .clear_int = woif_clear_int,
1283+ };
1284+ struct device_node *np, *node = wo->hw->node;
1285+ struct wed_wo_queue_regs queues;
1286+ struct regmap *regs;
1287+ int ret;
1288+
1289+ np = of_parse_phandle(node, "mediatek,ap2woccif", 0);
1290+ if (!np)
1291+ return -ENODEV;
1292+
1293+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
1294+ if (!regs)
1295+ return -ENODEV;
1296+
1297+ wo->drv_ops = &wo_drv_ops;
1298+
1299+ wo->ccif.regs = regs;
1300+ wo->ccif.irq = irq_of_parse_and_map(np, 0);
1301+
1302+ spin_lock_init(&wo->ccif.irq_lock);
1303+
1304+ ret = request_irq(wo->ccif.irq, isr, IRQF_TRIGGER_HIGH,
1305+ "wo_ccif_isr", wo);
1306+ if (ret)
1307+ goto free_irq;
1308+
1309+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY1;
1310+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY2;
1311+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
1312+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
1313+
1314+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
1315+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
1316+ &queues);
1317+
1318+ if (ret)
1319+ goto free_irq;
1320+
1321+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY5;
1322+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY6;
1323+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
1324+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
1325+
1326+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
1327+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
1328+ &queues);
1329+ if (ret)
1330+ goto free_irq;
1331+
1332+ wo->ccif.q_int_mask = MTK_WED_WO_RXCH_INT_MASK;
1333+
1334+ ret = mtk_wed_wo_q_init(wo, mtk_wed_wo_rx_poll);
1335+ if (ret)
1336+ goto free_irq;
1337+
1338+ wo->ccif.q_exep_mask = MTK_WED_WO_EXCEPTION_INT_MASK;
1339+ wo->ccif.irqmask = MTK_WED_WO_ALL_INT_MASK;
1340+
1341+ /* rx queue irqmask */
1342+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
1343+
1344+ return 0;
1345+
1346+free_irq:
developera3f86ed2022-07-08 14:15:13 +08001347+ free_irq(wo->ccif.irq, wo);
developer8cb3ac72022-07-04 10:55:14 +08001348+
1349+ return ret;
1350+}
1351+
developera3f86ed2022-07-08 14:15:13 +08001352+void wed_wo_hardware_exit(struct mtk_wed_wo *wo)
developer8cb3ac72022-07-04 10:55:14 +08001353+{
developera3f86ed2022-07-08 14:15:13 +08001354+ wo->drv_ops->set_isr(wo, 0);
1355+
1356+ disable_irq(wo->ccif.irq);
1357+ free_irq(wo->ccif.irq, wo);
1358+
1359+ tasklet_disable(&wo->irq_tasklet);
1360+ netif_napi_del(&wo->napi);
1361+
developer53bfd362022-09-29 12:02:18 +08001362+ mtk_wed_wo_q_tx_clean(wo, &wo->q_tx);
developera3f86ed2022-07-08 14:15:13 +08001363+ mtk_wed_wo_q_rx_clean(wo, &wo->q_rx);
1364+ mtk_wed_wo_q_free(wo, &wo->q_tx);
1365+ mtk_wed_wo_q_free(wo, &wo->q_rx);
developer8cb3ac72022-07-04 10:55:14 +08001366+}
1367diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.h b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1368new file mode 100644
developere0cbe332022-09-10 17:36:02 +08001369index 0000000..68ade44
developer8cb3ac72022-07-04 10:55:14 +08001370--- /dev/null
1371+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1372@@ -0,0 +1,45 @@
1373+// SPDX-License-Identifier: GPL-2.0-only
1374+
1375+#ifndef __MTK_WED_CCIF_H
1376+#define __MTK_WED_CCIF_H
1377+
1378+#define MTK_WED_WO_RING_SIZE 256
1379+#define MTK_WED_WO_CMD_LEN 1504
1380+
1381+#define MTK_WED_WO_TXCH_NUM 0
1382+#define MTK_WED_WO_RXCH_NUM 1
1383+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
1384+
1385+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
1386+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
1387+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
1388+#define MTK_WED_WO_ALL_INT_MASK MTK_WED_WO_RXCH_INT_MASK | \
1389+ MTK_WED_WO_EXCEPTION_INT_MASK
1390+
1391+#define MTK_WED_WO_CCIF_BUSY 0x004
1392+#define MTK_WED_WO_CCIF_START 0x008
1393+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
1394+#define MTK_WED_WO_CCIF_RCHNUM 0x010
1395+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
1396+
1397+#define MTK_WED_WO_CCIF_ACK 0x014
1398+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
1399+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
1400+#define MTK_WED_WO_CCIF_DUMMY1 0x020
1401+#define MTK_WED_WO_CCIF_DUMMY2 0x024
1402+#define MTK_WED_WO_CCIF_DUMMY3 0x028
1403+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
1404+#define MTK_WED_WO_CCIF_SHADOW1 0x030
1405+#define MTK_WED_WO_CCIF_SHADOW2 0x034
1406+#define MTK_WED_WO_CCIF_SHADOW3 0x038
1407+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
1408+#define MTK_WED_WO_CCIF_DUMMY5 0x050
1409+#define MTK_WED_WO_CCIF_DUMMY6 0x054
1410+#define MTK_WED_WO_CCIF_DUMMY7 0x058
1411+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
1412+#define MTK_WED_WO_CCIF_SHADOW5 0x060
1413+#define MTK_WED_WO_CCIF_SHADOW6 0x064
1414+#define MTK_WED_WO_CCIF_SHADOW7 0x068
1415+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
1416+
1417+#endif
1418diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
developere0cbe332022-09-10 17:36:02 +08001419index f420f18..4a9e684 100644
developer8cb3ac72022-07-04 10:55:14 +08001420--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1421+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1422@@ -2,6 +2,7 @@
1423 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1424
1425 #include <linux/seq_file.h>
1426+#include <linux/soc/mediatek/mtk_wed.h>
1427 #include "mtk_wed.h"
1428 #include "mtk_wed_regs.h"
1429
1430@@ -18,6 +19,8 @@ enum {
1431 DUMP_TYPE_WDMA,
1432 DUMP_TYPE_WPDMA_TX,
1433 DUMP_TYPE_WPDMA_TXFREE,
1434+ DUMP_TYPE_WPDMA_RX,
1435+ DUMP_TYPE_WED_RRO,
1436 };
1437
1438 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
1439@@ -36,6 +39,10 @@ enum {
1440
1441 #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
1442 #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
1443+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
1444+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
1445+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
1446+
1447
1448 static void
1449 print_reg_val(struct seq_file *s, const char *name, u32 val)
1450@@ -58,6 +65,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1451 cur->name);
1452 continue;
1453 case DUMP_TYPE_WED:
1454+ case DUMP_TYPE_WED_RRO:
1455 val = wed_r32(dev, cur->offset);
1456 break;
1457 case DUMP_TYPE_WDMA:
1458@@ -69,6 +77,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1459 case DUMP_TYPE_WPDMA_TXFREE:
1460 val = wpdma_txfree_r32(dev, cur->offset);
1461 break;
1462+ case DUMP_TYPE_WPDMA_RX:
1463+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
1464+ break;
1465 }
1466 print_reg_val(s, cur->name, val);
1467 }
1468@@ -132,6 +143,81 @@ wed_txinfo_show(struct seq_file *s, void *data)
1469 }
1470 DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
1471
1472+static int
1473+wed_rxinfo_show(struct seq_file *s, void *data)
1474+{
1475+ static const struct reg_dump regs[] = {
1476+ DUMP_STR("WPDMA RX"),
1477+ DUMP_WPDMA_RX_RING(0),
1478+ DUMP_WPDMA_RX_RING(1),
1479+
1480+ DUMP_STR("WPDMA RX"),
1481+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
1482+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
1483+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
1484+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
1485+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
1486+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
1487+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
1488+
1489+ DUMP_STR("WED RX"),
1490+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
1491+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
1492+
1493+ DUMP_STR("WED RRO"),
1494+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
1495+ DUMP_WED(WED_RROQM_MID_MIB),
1496+ DUMP_WED(WED_RROQM_MOD_MIB),
1497+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
1498+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
1499+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
1500+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
1501+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
1502+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
1503+
1504+ DUMP_STR("WED Route QM"),
1505+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
1506+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
1507+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
1508+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
1509+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
1510+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
1511+ DUMP_WED(WED_RTQM_Q2N_MIB),
1512+ DUMP_WED(WED_RTQM_Q2B_MIB),
1513+ DUMP_WED(WED_RTQM_PFDBK_MIB),
1514+
1515+ DUMP_STR("WED WDMA TX"),
1516+ DUMP_WED(WED_WDMA_TX_MIB),
1517+ DUMP_WED_RING(WED_WDMA_RING_TX),
1518+
1519+ DUMP_STR("WDMA TX"),
1520+ DUMP_WDMA(WDMA_GLO_CFG),
1521+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
1522+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
1523+
1524+ DUMP_STR("WED RX BM"),
1525+ DUMP_WED(WED_RX_BM_BASE),
1526+ DUMP_WED(WED_RX_BM_RX_DMAD),
1527+ DUMP_WED(WED_RX_BM_PTR),
1528+ DUMP_WED(WED_RX_BM_TKID_MIB),
1529+ DUMP_WED(WED_RX_BM_BLEN),
1530+ DUMP_WED(WED_RX_BM_STS),
1531+ DUMP_WED(WED_RX_BM_INTF2),
1532+ DUMP_WED(WED_RX_BM_INTF),
1533+ DUMP_WED(WED_RX_BM_ERR_STS),
1534+ };
1535+
1536+ struct mtk_wed_hw *hw = s->private;
1537+ struct mtk_wed_device *dev = hw->wed_dev;
1538+
1539+ if (!dev)
1540+ return 0;
1541+
1542+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
1543+
1544+ return 0;
1545+}
1546+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
1547
1548 static int
1549 mtk_wed_reg_set(void *data, u64 val)
1550@@ -175,4 +261,8 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
1551 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
1552 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
1553 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
1554+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
developerc1b2cd12022-07-28 18:35:24 +08001555+ if (hw->ver != MTK_WED_V1) {
developer8cb3ac72022-07-04 10:55:14 +08001556+ wed_wo_mcu_debugfs(hw, dir);
1557+ }
1558 }
1559diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1560new file mode 100644
developere0cbe332022-09-10 17:36:02 +08001561index 0000000..723bdfd
developer8cb3ac72022-07-04 10:55:14 +08001562--- /dev/null
1563+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
developer8fec8ae2022-08-15 15:01:09 -07001564@@ -0,0 +1,586 @@
developer8cb3ac72022-07-04 10:55:14 +08001565+// SPDX-License-Identifier: GPL-2.0-only
1566+
1567+#include <linux/skbuff.h>
1568+#include <linux/debugfs.h>
1569+#include <linux/firmware.h>
1570+#include <linux/of_address.h>
1571+#include <linux/soc/mediatek/mtk_wed.h>
1572+#include "mtk_wed_regs.h"
1573+#include "mtk_wed_mcu.h"
1574+#include "mtk_wed_wo.h"
1575+
1576+struct sk_buff *
1577+mtk_wed_mcu_msg_alloc(struct mtk_wed_wo *wo,
1578+ const void *data, int data_len)
1579+{
1580+ const struct wed_wo_mcu_ops *ops = wo->mcu_ops;
1581+ int length = ops->headroom + data_len;
1582+ struct sk_buff *skb;
1583+
1584+ skb = alloc_skb(length, GFP_KERNEL);
1585+ if (!skb)
1586+ return NULL;
1587+
1588+ memset(skb->head, 0, length);
1589+ skb_reserve(skb, ops->headroom);
1590+
1591+ if (data && data_len)
1592+ skb_put_data(skb, data, data_len);
1593+
1594+ return skb;
1595+}
1596+
1597+struct sk_buff *
1598+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
1599+{
1600+ unsigned long timeout;
1601+
1602+ if (!time_is_after_jiffies(expires))
1603+ return NULL;
1604+
1605+ timeout = expires - jiffies;
1606+ wait_event_timeout(wo->mcu.wait,
1607+ (!skb_queue_empty(&wo->mcu.res_q)),
1608+ timeout);
1609+
1610+ return skb_dequeue(&wo->mcu.res_q);
1611+}
1612+
1613+int
1614+mtk_wed_mcu_skb_send_and_get_msg(struct mtk_wed_wo *wo,
1615+ int to_id, int cmd, struct sk_buff *skb,
1616+ bool wait_resp, struct sk_buff **ret_skb)
1617+{
1618+ unsigned long expires;
1619+ int ret, seq;
1620+
1621+ if (ret_skb)
1622+ *ret_skb = NULL;
1623+
1624+ mutex_lock(&wo->mcu.mutex);
1625+
1626+ ret = wo->mcu_ops->mcu_skb_send_msg(wo, to_id, cmd, skb, &seq, wait_resp);
1627+ if (ret < 0)
1628+ goto out;
1629+
1630+ if (!wait_resp) {
1631+ ret = 0;
1632+ goto out;
1633+ }
1634+
1635+ expires = jiffies + wo->mcu.timeout;
1636+
1637+ do {
1638+ skb = mtk_wed_mcu_get_response(wo, expires);
1639+ ret = wo->mcu_ops->mcu_parse_response(wo, cmd, skb, seq);
1640+
1641+ if (!ret && ret_skb)
1642+ *ret_skb = skb;
1643+ else
1644+ dev_kfree_skb(skb);
1645+ } while (ret == -EAGAIN);
1646+
1647+out:
1648+ mutex_unlock(&wo->mcu.mutex);
1649+
1650+ return ret;
1651+}
1652+
1653+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo,
1654+ struct sk_buff *skb)
1655+{
1656+ skb_queue_tail(&wo->mcu.res_q, skb);
1657+ wake_up(&wo->mcu.wait);
1658+}
1659+
1660+static int mtk_wed_mcu_send_and_get_msg(struct mtk_wed_wo *wo,
1661+ int to_id, int cmd, const void *data, int len,
1662+ bool wait_resp, struct sk_buff **ret_skb)
1663+{
1664+ struct sk_buff *skb;
1665+
1666+ skb = mtk_wed_mcu_msg_alloc(wo, data, len);
1667+ if (!skb)
1668+ return -ENOMEM;
1669+
1670+ return mtk_wed_mcu_skb_send_and_get_msg(wo, to_id, cmd, skb, wait_resp, ret_skb);
1671+}
1672+
1673+int
1674+mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,
1675+ int to_id, int cmd,
1676+ const void *data, int len, bool wait_resp)
1677+{
1678+ struct sk_buff *skb = NULL;
1679+ int ret = 0;
1680+
1681+ ret = mtk_wed_mcu_send_and_get_msg(wo, to_id, cmd, data,
1682+ len, wait_resp, &skb);
1683+ if (skb)
1684+ dev_kfree_skb(skb);
1685+
1686+ return ret;
1687+}
1688+
1689+int mtk_wed_exception_init(struct mtk_wed_wo *wo)
1690+{
1691+ struct wed_wo_exception *exp = &wo->exp;
1692+ struct {
1693+ u32 arg0;
1694+ u32 arg1;
1695+ }req;
1696+
1697+ exp->log_size = EXCEPTION_LOG_SIZE;
1698+ exp->log = kmalloc(exp->log_size, GFP_ATOMIC);
1699+ if (!exp->log)
1700+ return -ENOMEM;
1701+
1702+ memset(exp->log, 0, exp->log_size);
1703+ exp->phys = dma_map_single(wo->hw->dev, exp->log, exp->log_size,
1704+ DMA_FROM_DEVICE);
1705+
1706+ if (unlikely(dma_mapping_error(wo->hw->dev, exp->phys))) {
1707+ dev_info(wo->hw->dev, "dma map error\n");
1708+ goto free;
1709+ }
1710+
1711+ req.arg0 = (u32)exp->phys;
1712+ req.arg1 = (u32)exp->log_size;
1713+
1714+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_EXCEPTION_INIT,
1715+ &req, sizeof(req), false);
1716+
1717+free:
1718+ kfree(exp->log);
1719+ return -ENOMEM;
1720+}
1721+
1722+int
1723+mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb)
1724+{
1725+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1726+
1727+ if (hdr->ver != 0)
1728+ return WARP_INVALID_PARA_STATUS;
1729+
1730+ if (skb->len < sizeof(struct wed_cmd_hdr))
1731+ return WARP_INVALID_PARA_STATUS;
1732+
1733+ if (skb->len != hdr->length)
1734+ return WARP_INVALID_PARA_STATUS;
1735+
1736+ return WARP_OK_STATUS;
1737+}
1738+
1739+void
1740+mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
1741+{
developer8fec8ae2022-08-15 15:01:09 -07001742+ struct mtk_wed_device *wed = wo->hw->wed_dev;
developer8cb3ac72022-07-04 10:55:14 +08001743+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1744+ struct wed_wo_log *record;
developer8fec8ae2022-08-15 15:01:09 -07001745+ struct wo_cmd_rxcnt_t *rxcnt;
developer8cb3ac72022-07-04 10:55:14 +08001746+ char *msg = (char *)(skb->data + sizeof(struct wed_cmd_hdr));
1747+ u16 msg_len = skb->len - sizeof(struct wed_cmd_hdr);
1748+ u32 i, cnt = 0;
1749+
1750+ switch (hdr->cmd_id) {
1751+ case WO_EVT_LOG_DUMP:
1752+ pr_info("[WO LOG]: %s\n", msg);
1753+ break;
1754+ case WO_EVT_PROFILING:
1755+ cnt = msg_len / (sizeof(struct wed_wo_log));
1756+ record = (struct wed_wo_log *) msg;
1757+ dev_info(wo->hw->dev, "[WO Profiling]: %d report arrived!\n", cnt);
1758+
1759+ for (i = 0 ; i < cnt ; i++) {
1760+ //PROFILE_STAT(wo->total, record[i].total);
1761+ //PROFILE_STAT(wo->mod, record[i].mod);
1762+ //PROFILE_STAT(wo->rro, record[i].rro);
1763+
1764+ dev_info(wo->hw->dev, "[WO Profiling]: SN:%u with latency: total=%u, rro:%u, mod:%u\n",
1765+ record[i].sn,
1766+ record[i].total,
1767+ record[i].rro,
1768+ record[i].mod);
1769+ }
1770+ break;
developer8fec8ae2022-08-15 15:01:09 -07001771+ case WO_EVT_RXCNT_INFO:
1772+ cnt = *(u32 *)msg;
1773+ rxcnt = (struct wo_cmd_rxcnt_t *)((u32 *)msg+1);
developer8cb3ac72022-07-04 10:55:14 +08001774+
developer8fec8ae2022-08-15 15:01:09 -07001775+ for (i = 0; i < cnt; i++)
1776+ if (wed->wlan.update_wo_rxcnt)
1777+ wed->wlan.update_wo_rxcnt(wed, rxcnt);
1778+ break;
developer8cb3ac72022-07-04 10:55:14 +08001779+ default:
1780+ break;
1781+ }
1782+
1783+ dev_kfree_skb(skb);
1784+
1785+}
1786+
1787+static int
1788+mtk_wed_load_firmware(struct mtk_wed_wo *wo)
1789+{
1790+ struct fw_info {
1791+ __le32 decomp_crc;
1792+ __le32 decomp_len;
1793+ __le32 decomp_blk_sz;
1794+ u8 reserved[4];
1795+ __le32 addr;
1796+ __le32 len;
1797+ u8 feature_set;
1798+ u8 reserved1[15];
1799+ } __packed *region;
1800+
1801+ char *mcu;
1802+ const struct mtk_wed_fw_trailer *hdr;
1803+ static u8 shared[MAX_REGION_SIZE] = {0};
1804+ const struct firmware *fw;
1805+ int ret, i;
1806+ u32 ofs = 0;
1807+ u32 boot_cr, val;
1808+
1809+ mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 : MT7986_FIRMWARE_WO_1;
1810+
1811+ ret = request_firmware(&fw, mcu, wo->hw->dev);
1812+ if (ret)
1813+ return ret;
1814+
1815+ hdr = (const struct mtk_wed_fw_trailer *)(fw->data + fw->size -
1816+ sizeof(*hdr));
1817+
1818+ dev_info(wo->hw->dev, "WO Firmware Version: %.10s, Build Time: %.15s\n",
1819+ hdr->fw_ver, hdr->build_date);
1820+
1821+ for (i = 0; i < hdr->n_region; i++) {
1822+ int j = 0;
1823+ region = (struct fw_info *)(fw->data + fw->size -
1824+ sizeof(*hdr) -
1825+ sizeof(*region) *
1826+ (hdr->n_region - i));
1827+
1828+ while (j < MAX_REGION_SIZE) {
1829+ struct mtk_wed_fw_region *wo_region;
1830+
1831+ wo_region = &wo->region[j];
1832+ if (!wo_region->addr)
1833+ break;
1834+
1835+ if (wo_region->addr_pa == region->addr) {
1836+ if (!wo_region->shared) {
1837+ memcpy(wo_region->addr,
1838+ fw->data + ofs, region->len);
1839+ } else if (!shared[j]) {
1840+ memcpy(wo_region->addr,
1841+ fw->data + ofs, region->len);
1842+ shared[j] = true;
1843+ }
1844+ }
1845+ j++;
1846+ }
1847+
1848+ if (j == __WO_REGION_MAX) {
1849+ ret = -ENOENT;
1850+ goto done;
1851+ }
1852+ ofs += region->len;
1853+ }
1854+
1855+ /* write the start address */
1856+ boot_cr = wo->hw->index ?
1857+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
1858+ wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
1859+
1860+ /* wo firmware reset */
1861+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
1862+
1863+ val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
1864+
1865+ val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
1866+ WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
1867+
1868+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
1869+
1870+done:
1871+ release_firmware(fw);
1872+
1873+ return ret;
1874+}
1875+
1876+static int
1877+mtk_wed_get_firmware_region(struct mtk_wed_wo *wo)
1878+{
1879+ struct device_node *node, *np = wo->hw->node;
1880+ struct mtk_wed_fw_region *region;
1881+ struct resource res;
1882+ const char *compat;
1883+ int i, ret;
1884+
1885+ static const char *const wo_region_compat[__WO_REGION_MAX] = {
1886+ [WO_REGION_EMI] = WOCPU_EMI_DEV_NODE,
1887+ [WO_REGION_ILM] = WOCPU_ILM_DEV_NODE,
1888+ [WO_REGION_DATA] = WOCPU_DATA_DEV_NODE,
1889+ [WO_REGION_BOOT] = WOCPU_BOOT_DEV_NODE,
1890+ };
1891+
1892+ for (i = 0; i < __WO_REGION_MAX; i++) {
1893+ region = &wo->region[i];
1894+ compat = wo_region_compat[i];
1895+
1896+ node = of_parse_phandle(np, compat, 0);
1897+ if (!node)
1898+ return -ENODEV;
1899+
1900+ ret = of_address_to_resource(node, 0, &res);
1901+ if (ret)
1902+ return ret;
1903+
1904+ region->addr_pa = res.start;
1905+ region->size = resource_size(&res);
1906+ region->addr = ioremap(region->addr_pa, region->size);
1907+
1908+ of_property_read_u32_index(node, "shared", 0, &region->shared);
1909+ }
1910+
1911+ return 0;
1912+}
1913+
1914+static int
1915+wo_mcu_send_message(struct mtk_wed_wo *wo,
1916+ int to_id, int cmd, struct sk_buff *skb,
1917+ int *wait_seq, bool wait_resp)
1918+{
1919+ struct wed_cmd_hdr *hdr;
1920+ u8 seq = 0;
1921+
1922+ /* TDO: make dynamic based on msg type */
1923+ wo->mcu.timeout = 20 * HZ;
1924+
1925+ if (wait_resp && wait_seq) {
1926+ seq = wo->mcu.msg_seq++ ;
1927+ *wait_seq = seq;
1928+ }
1929+
1930+ hdr = (struct wed_cmd_hdr *)skb_push(skb, sizeof(*hdr));
1931+
1932+ hdr->cmd_id = cmd;
1933+ hdr->length = cpu_to_le16(skb->len);
1934+ hdr->uni_id = seq;
1935+
1936+ if (to_id == MODULE_ID_WO)
1937+ hdr->flag |= WARP_CMD_FLAG_FROM_TO_WO;
1938+
1939+ if (wait_resp && wait_seq)
1940+ hdr->flag |= WARP_CMD_FLAG_NEED_RSP;
1941+
1942+ return mtk_wed_wo_q_tx_skb(wo, &wo->q_tx, skb);
1943+}
1944+
1945+static int
1946+wo_mcu_parse_response(struct mtk_wed_wo *wo, int cmd,
1947+ struct sk_buff *skb, int seq)
1948+{
developer8fec8ae2022-08-15 15:01:09 -07001949+ struct mtk_wed_device *wed = wo->hw->wed_dev;
developer8cb3ac72022-07-04 10:55:14 +08001950+ struct wed_cmd_hdr *hdr;
developer8fec8ae2022-08-15 15:01:09 -07001951+ struct wo_cmd_rxcnt_t *rxcnt = NULL;
1952+ u32 i, cnt = 0;
developer8cb3ac72022-07-04 10:55:14 +08001953+
1954+ if (!skb) {
1955+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
1956+ cmd, seq);
1957+ return -ETIMEDOUT;
1958+ }
1959+
1960+ hdr = (struct wed_cmd_hdr *)skb->data;
1961+ if (seq != hdr->uni_id) {
1962+ dev_err(wo->hw->dev, "Message %08x (seq %d) with not match uid(%d)\n",
1963+ cmd, seq, hdr->uni_id);
1964+ return -EAGAIN;
1965+ }
1966+
developer8fec8ae2022-08-15 15:01:09 -07001967+ skb_pull(skb, sizeof(struct wed_cmd_hdr));
1968+
1969+ switch (cmd) {
1970+ case WO_CMD_RXCNT_INFO:
1971+ cnt = *(u32 *)skb->data;
1972+ rxcnt = (struct wo_cmd_rxcnt_t *)((u32 *)skb->data+1);
1973+
1974+ for (i = 0; i < cnt; i++)
1975+ if (wed->wlan.update_wo_rxcnt)
1976+ wed->wlan.update_wo_rxcnt(wed, rxcnt);
1977+ break;
1978+ default:
1979+ break;
1980+ }
developer8cb3ac72022-07-04 10:55:14 +08001981+
1982+ return 0;
1983+}
1984+
1985+int wed_wo_mcu_init(struct mtk_wed_wo *wo)
1986+{
1987+ static const struct wed_wo_mcu_ops wo_mcu_ops = {
1988+ .headroom = sizeof(struct wed_cmd_hdr),
1989+ .mcu_skb_send_msg = wo_mcu_send_message,
1990+ .mcu_parse_response = wo_mcu_parse_response,
1991+ /*TDO .mcu_restart = wo_mcu_restart,*/
1992+ };
1993+ unsigned long timeout = jiffies + FW_DL_TIMEOUT;
1994+ int ret;
1995+ u32 val;
1996+
1997+ wo->mcu_ops = &wo_mcu_ops;
1998+
1999+ ret = mtk_wed_get_firmware_region(wo);
2000+ if (ret)
2001+ return ret;
2002+
2003+ /* set dummy cr */
2004+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL,
2005+ wo->hw->index + 1);
2006+
2007+ ret = mtk_wed_load_firmware(wo);
2008+ if (ret)
2009+ return ret;
2010+
2011+ do {
2012+ /* get dummy cr */
2013+ val = wed_r32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL);
2014+ } while (val != 0 && !time_after(jiffies, timeout));
2015+
2016+ if (val)
2017+ return -EBUSY;
2018+
2019+ return 0;
2020+}
2021+
2022+static ssize_t
2023+mtk_wed_wo_ctrl(struct file *file,
2024+ const char __user *user_buf,
2025+ size_t count,
2026+ loff_t *ppos)
2027+{
2028+ struct mtk_wed_hw *hw = file->private_data;
2029+ struct mtk_wed_wo *wo = hw->wed_wo;
2030+ char buf[100], *cmd = NULL, *input[11] = {0};
2031+ char msgbuf[128] = {0};
2032+ struct wo_cmd_query *query = (struct wo_cmd_query *)msgbuf;
2033+ u32 cmd_id;
2034+ bool wait = false;
2035+ char *sub_str = NULL;
2036+ int input_idx = 0, input_total = 0, scan_num = 0;
2037+ char *p;
2038+
2039+ if (count > sizeof(buf))
2040+ return -EINVAL;
2041+
2042+ if (copy_from_user(buf, user_buf, count))
2043+ return -EFAULT;
2044+
2045+ if (count && buf[count - 1] == '\n')
2046+ buf[count - 1] = '\0';
2047+ else
2048+ buf[count] = '\0';
2049+
2050+ p = buf;
2051+
2052+ while ((sub_str = strsep(&p, " ")) != NULL) {
2053+ input[input_idx] = sub_str;
2054+ input_idx++;
2055+ input_total++;
2056+ }
2057+ cmd = input[0];
2058+ if (input_total == 1 && cmd) {
2059+ if (strncmp(cmd, "bainfo", strlen(cmd)) == 0) {
2060+ cmd_id = WO_CMD_BA_INFO_DUMP;
2061+ } else if (strncmp(cmd, "bactrl", strlen(cmd)) == 0) {
2062+ cmd_id = WO_CMD_BA_CTRL_DUMP;
2063+ } else if (strncmp(cmd, "fbcmdq", strlen(cmd)) == 0) {
2064+ cmd_id = WO_CMD_FBCMD_Q_DUMP;
2065+ } else if (strncmp(cmd, "logflush", strlen(cmd)) == 0) {
2066+ cmd_id = WO_CMD_LOG_FLUSH;
2067+ } else if (strncmp(cmd, "cpustat.dump", strlen(cmd)) == 0) {
2068+ cmd_id = WO_CMD_CPU_STATS_DUMP;
2069+ } else if (strncmp(cmd, "state", strlen(cmd)) == 0) {
2070+ cmd_id = WO_CMD_WED_RX_STAT;
2071+ } else if (strncmp(cmd, "prof_hit_dump", strlen(cmd)) == 0) {
2072+ //wo_profiling_report();
2073+ return count;
2074+ } else if (strncmp(cmd, "rxcnt_info", strlen(cmd)) == 0) {
2075+ cmd_id = WO_CMD_RXCNT_INFO;
2076+ wait = true;
2077+ } else {
2078+ pr_info("(%s) unknown comand string(%s)!\n", __func__, cmd);
2079+ return count;
2080+ }
2081+ } else if (input_total > 1) {
2082+ for (input_idx = 1 ; input_idx < input_total ; input_idx++) {
2083+ scan_num = sscanf(input[input_idx], "%u", &query->query0+(input_idx - 1));
2084+
2085+ if (scan_num < 1) {
2086+ pr_info("(%s) require more input!\n", __func__);
2087+ return count;
2088+ }
2089+ }
2090+ if(strncmp(cmd, "devinfo", strlen(cmd)) == 0) {
2091+ cmd_id = WO_CMD_DEV_INFO_DUMP;
2092+ } else if (strncmp(cmd, "bssinfo", strlen(cmd)) == 0) {
2093+ cmd_id = WO_CMD_BSS_INFO_DUMP;
2094+ } else if (strncmp(cmd, "starec", strlen(cmd)) == 0) {
2095+ cmd_id = WO_CMD_STA_REC_DUMP;
2096+ } else if (strncmp(cmd, "starec_ba", strlen(cmd)) == 0) {
2097+ cmd_id = WO_CMD_STA_BA_DUMP;
2098+ } else if (strncmp(cmd, "logctrl", strlen(cmd)) == 0) {
2099+ cmd_id = WO_CMD_FW_LOG_CTRL;
2100+ } else if (strncmp(cmd, "cpustat.en", strlen(cmd)) == 0) {
2101+ cmd_id = WO_CMD_CPU_STATS_ENABLE;
2102+ } else if (strncmp(cmd, "prof_conf", strlen(cmd)) == 0) {
2103+ cmd_id = WO_CMD_PROF_CTRL;
2104+ } else if (strncmp(cmd, "rxcnt_ctrl", strlen(cmd)) == 0) {
2105+ cmd_id = WO_CMD_RXCNT_CTRL;
2106+ } else if (strncmp(cmd, "dbg_set", strlen(cmd)) == 0) {
2107+ cmd_id = WO_CMD_DBG_INFO;
2108+ }
2109+ } else {
2110+ dev_info(hw->dev, "usage: echo cmd='cmd_str' > wo_write\n");
2111+ dev_info(hw->dev, "cmd_str value range:\n");
2112+ dev_info(hw->dev, "\tbainfo:\n");
2113+ dev_info(hw->dev, "\tbactrl:\n");
2114+ dev_info(hw->dev, "\tfbcmdq:\n");
2115+ dev_info(hw->dev, "\tlogflush:\n");
2116+ dev_info(hw->dev, "\tcpustat.dump:\n");
2117+ dev_info(hw->dev, "\tprof_hit_dump:\n");
2118+ dev_info(hw->dev, "\trxcnt_info:\n");
2119+ dev_info(hw->dev, "\tdevinfo:\n");
2120+ dev_info(hw->dev, "\tbssinfo:\n");
2121+ dev_info(hw->dev, "\tstarec:\n");
2122+ dev_info(hw->dev, "\tstarec_ba:\n");
2123+ dev_info(hw->dev, "\tlogctrl:\n");
2124+ dev_info(hw->dev, "\tcpustat.en:\n");
2125+ dev_info(hw->dev, "\tprof_conf:\n");
2126+ dev_info(hw->dev, "\trxcnt_ctrl:\n");
2127+ dev_info(hw->dev, "\tdbg_set [level] [category]:\n");
2128+ return count;
2129+ }
2130+
2131+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, (void *)msgbuf, sizeof(struct wo_cmd_query), wait);
2132+
2133+ return count;
2134+
2135+}
2136+
2137+static const struct file_operations fops_wo_ctrl = {
2138+ .write = mtk_wed_wo_ctrl,
2139+ .open = simple_open,
2140+ .llseek = default_llseek,
2141+};
2142+
2143+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir)
2144+{
2145+ if (!dir)
2146+ return;
2147+
2148+ debugfs_create_file("wo_write", 0600, dir, hw, &fops_wo_ctrl);
2149+}
2150+
2151diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2152new file mode 100644
developere0cbe332022-09-10 17:36:02 +08002153index 0000000..6a5ac76
developer8cb3ac72022-07-04 10:55:14 +08002154--- /dev/null
2155+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2156@@ -0,0 +1,125 @@
2157+// SPDX-License-Identifier: GPL-2.0-only
2158+
2159+#ifndef __MTK_WED_MCU_H
2160+#define __MTK_WED_MCU_H
2161+
2162+#define EXCEPTION_LOG_SIZE 32768
2163+#define WOCPU_MCUSYS_RESET_ADDR 0x15194050
2164+#define WOCPU_WO0_MCUSYS_RESET_MASK 0x20
2165+#define WOCPU_WO1_MCUSYS_RESET_MASK 0x1
2166+
2167+#define WARP_INVALID_LENGTH_STATUS (-2)
2168+#define WARP_NULL_POINTER_STATUS (-3)
2169+#define WARP_INVALID_PARA_STATUS (-4)
2170+#define WARP_NOT_HANDLE_STATUS (-5)
2171+#define WARP_FAIL_STATUS (-1)
2172+#define WARP_OK_STATUS (0)
2173+#define WARP_ALREADY_DONE_STATUS (1)
2174+
2175+#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2176+#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2177+
2178+#define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2179+#define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2180+#define WOCPU_DLM_DEV_NODE "mediatek,wocpu_dlm"
2181+#define WOCPU_DATA_DEV_NODE "mediatek,wocpu_data"
2182+#define WOCPU_BOOT_DEV_NODE "mediatek,wocpu_boot"
2183+
2184+#define FW_DL_TIMEOUT ((3000 * HZ) / 1000)
2185+#define WOCPU_TIMEOUT ((1000 * HZ) / 1000)
2186+
2187+#define MAX_REGION_SIZE 3
2188+
2189+#define WOX_MCU_CFG_LS_BASE 0 /*0x15194000*/
2190+
2191+#define WOX_MCU_CFG_LS_HW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x000) // 4000
2192+#define WOX_MCU_CFG_LS_FW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x004) // 4004
2193+#define WOX_MCU_CFG_LS_CFG_DBG1_ADDR (WOX_MCU_CFG_LS_BASE + 0x00C) // 400C
2194+#define WOX_MCU_CFG_LS_CFG_DBG2_ADDR (WOX_MCU_CFG_LS_BASE + 0x010) // 4010
2195+#define WOX_MCU_CFG_LS_WF_MCCR_ADDR (WOX_MCU_CFG_LS_BASE + 0x014) // 4014
2196+#define WOX_MCU_CFG_LS_WF_MCCR_SET_ADDR (WOX_MCU_CFG_LS_BASE + 0x018) // 4018
2197+#define WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR (WOX_MCU_CFG_LS_BASE + 0x01C) // 401C
2198+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (WOX_MCU_CFG_LS_BASE + 0x050) // 4050
2199+#define WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x060) // 4060
2200+#define WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x064) // 4064
2201+
2202+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK BIT(5)
2203+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK BIT(0)
2204+
2205+
2206+enum wo_event_id {
2207+ WO_EVT_LOG_DUMP = 0x1,
2208+ WO_EVT_PROFILING = 0x2,
2209+ WO_EVT_RXCNT_INFO = 0x3
2210+};
2211+
2212+enum wo_cmd_id {
2213+ WO_CMD_WED_CFG = 0,
2214+ WO_CMD_WED_RX_STAT,
2215+ WO_CMD_RRO_SER,
2216+ WO_CMD_DBG_INFO,
2217+ WO_CMD_DEV_INFO,
2218+ WO_CMD_BSS_INFO,
2219+ WO_CMD_STA_REC,
2220+ WO_CMD_DEV_INFO_DUMP,
2221+ WO_CMD_BSS_INFO_DUMP,
2222+ WO_CMD_STA_REC_DUMP,
2223+ WO_CMD_BA_INFO_DUMP,
2224+ WO_CMD_FBCMD_Q_DUMP,
2225+ WO_CMD_FW_LOG_CTRL,
2226+ WO_CMD_LOG_FLUSH,
2227+ WO_CMD_CHANGE_STATE,
2228+ WO_CMD_CPU_STATS_ENABLE,
2229+ WO_CMD_CPU_STATS_DUMP,
2230+ WO_CMD_EXCEPTION_INIT,
2231+ WO_CMD_PROF_CTRL,
2232+ WO_CMD_STA_BA_DUMP,
2233+ WO_CMD_BA_CTRL_DUMP,
2234+ WO_CMD_RXCNT_CTRL,
2235+ WO_CMD_RXCNT_INFO,
2236+ WO_CMD_SET_CAP,
2237+ WO_CMD_CCIF_RING_DUMP,
2238+ WO_CMD_WED_END
2239+};
2240+
2241+enum wo_state {
2242+ WO_STATE_UNDEFINED = 0x0,
2243+ WO_STATE_INIT = 0x1,
2244+ WO_STATE_ENABLE = 0x2,
2245+ WO_STATE_DISABLE = 0x3,
2246+ WO_STATE_HALT = 0x4,
2247+ WO_STATE_GATING = 0x5,
2248+ WO_STATE_SER_RESET = 0x6,
2249+ WO_STATE_WF_RESET = 0x7,
2250+ WO_STATE_END
2251+};
2252+
2253+enum wo_done_state {
2254+ WOIF_UNDEFINED = 0,
2255+ WOIF_DISABLE_DONE = 1,
2256+ WOIF_TRIGGER_ENABLE = 2,
2257+ WOIF_ENABLE_DONE = 3,
2258+ WOIF_TRIGGER_GATING = 4,
2259+ WOIF_GATING_DONE = 5,
2260+ WOIF_TRIGGER_HALT = 6,
2261+ WOIF_HALT_DONE = 7,
2262+};
2263+
2264+enum wed_dummy_cr_idx {
2265+ WED_DUMMY_CR_FWDL = 0,
2266+ WED_DUMMY_CR_WO_STATUS = 1
2267+};
2268+
2269+struct mtk_wed_fw_trailer {
2270+ u8 chip_id;
2271+ u8 eco_code;
2272+ u8 n_region;
2273+ u8 format_ver;
2274+ u8 format_flag;
2275+ u8 reserved[2];
2276+ char fw_ver[10];
2277+ char build_date[15];
2278+ u32 crc;
2279+};
2280+
2281+#endif
2282diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
developere0cbe332022-09-10 17:36:02 +08002283index b189761..9d021e2 100644
developer8cb3ac72022-07-04 10:55:14 +08002284--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2285+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2286@@ -4,6 +4,8 @@
2287 #ifndef __MTK_WED_REGS_H
2288 #define __MTK_WED_REGS_H
2289
2290+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
2291+
2292 #if defined(CONFIG_MEDIATEK_NETSYS_V2)
2293 #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(13, 0)
2294 #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(14)
2295@@ -16,6 +18,7 @@
2296 #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2297 #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2298 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2299+#define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
2300
2301 struct mtk_wdma_desc {
2302 __le32 buf0;
developere0cbe332022-09-10 17:36:02 +08002303@@ -42,6 +45,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002304 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
2305 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2306 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2307+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
2308+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
2309 #define MTK_WED_RESET_WED BIT(31)
2310
2311 #define MTK_WED_CTRL 0x00c
developere0cbe332022-09-10 17:36:02 +08002312@@ -53,8 +58,12 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002313 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2314 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2315 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2316-#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2317-#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2318+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
2319+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
2320+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
2321+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
2322+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
2323+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
2324 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2325 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
2326 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
developere0cbe332022-09-10 17:36:02 +08002327@@ -69,8 +78,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002328 #define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
2329 #define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
2330 #endif
2331-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2332-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2333+#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
2334+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
2335 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2336 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2337 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
developere0cbe332022-09-10 17:36:02 +08002338@@ -87,8 +96,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002339 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2340 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2341 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2342- MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | \
2343- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | \
2344+ MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
2345+ MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
2346 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2347 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2348 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | \
developere0cbe332022-09-10 17:36:02 +08002349@@ -97,6 +106,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002350 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
2351
2352 #define MTK_WED_EXT_INT_MASK 0x028
2353+#define MTK_WED_EXT_INT_MASK1 0x02c
2354+#define MTK_WED_EXT_INT_MASK2 0x030
2355
2356 #define MTK_WED_STATUS 0x060
2357 #define MTK_WED_STATUS_TX GENMASK(15, 8)
developere0cbe332022-09-10 17:36:02 +08002358@@ -184,6 +195,9 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002359
2360 #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2361
2362+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
2363+
2364+#define MTK_WED_SCR0 0x3c0
2365 #define MTK_WED_WPDMA_INT_TRIGGER 0x504
2366 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2367 #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
developere0cbe332022-09-10 17:36:02 +08002368@@ -240,13 +254,19 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002369
2370 #define MTK_WED_WPDMA_INT_CTRL_TX 0x530
2371 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
2372-#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2373+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2374 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
2375 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
2376 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
2377 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
2378
2379 #define MTK_WED_WPDMA_INT_CTRL_RX 0x534
2380+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
2381+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
2382+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
2383+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
2384+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
2385+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
2386
2387 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
2388 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
developere0cbe332022-09-10 17:36:02 +08002389@@ -271,13 +291,40 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002390 #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2391 #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2392
2393+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
2394+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
2395+
2396 #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2397 #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
developerc1b2cd12022-07-28 18:35:24 +08002398+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
developer8cb3ac72022-07-04 10:55:14 +08002399+
2400+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
2401+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
2402+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
2403+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
2404+
2405+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
developerc1b2cd12022-07-28 18:35:24 +08002406+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
2407+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
developer8cb3ac72022-07-04 10:55:14 +08002408+
2409+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
2410+#define MTK_WED_WPDMA_RX_RING 0x770
2411+
2412+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
2413+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
2414+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
2415+
2416+#define MTK_WED_WDMA_RING_TX 0x800
2417+
2418+#define MTK_WED_WDMA_TX_MIB 0x810
2419+
2420+
2421 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2422 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2423
2424 #define MTK_WED_WDMA_GLO_CFG 0xa04
2425 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2426+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
2427 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2428 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2429 #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
developere0cbe332022-09-10 17:36:02 +08002430@@ -321,6 +368,20 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002431 #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
2432 #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
2433
2434+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2435+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
2436+
2437+#define MTK_WED_RX_BM_BASE 0xd84
2438+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2439+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
2440+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
2441+
2442+#define MTK_WED_RX_PTR 0xd8c
2443+
2444+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
2445+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
2446+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
2447+
2448 #define MTK_WED_RING_OFS_BASE 0x00
2449 #define MTK_WED_RING_OFS_COUNT 0x04
2450 #define MTK_WED_RING_OFS_CPU_IDX 0x08
developere0cbe332022-09-10 17:36:02 +08002451@@ -331,12 +392,13 @@ struct mtk_wdma_desc {
developera3f86ed2022-07-08 14:15:13 +08002452
2453 #define MTK_WDMA_GLO_CFG 0x204
2454 #define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
2455+#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
2456 #define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
2457+#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
2458 #define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
2459 #define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
2460 #define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
developerc1b2cd12022-07-28 18:35:24 +08002461
2462-
2463 #define MTK_WDMA_RESET_IDX 0x208
2464 #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
2465 #define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
developere0cbe332022-09-10 17:36:02 +08002466@@ -360,4 +422,70 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002467 /* DMA channel mapping */
2468 #define HIFSYS_DMA_AG_MAP 0x008
2469
2470+#define MTK_WED_RTQM_GLO_CFG 0xb00
2471+#define MTK_WED_RTQM_BUSY BIT(1)
2472+#define MTK_WED_RTQM_Q_RST BIT(2)
2473+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
2474+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
2475+
2476+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
2477+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
2478+#define MTK_WED_RTQM_Q2N_MIB 0xb80
2479+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
2480+
2481+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
2482+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
2483+
2484+#define MTK_WED_RROQM_GLO_CFG 0xc04
2485+#define MTK_WED_RROQM_RST_IDX 0xc08
2486+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
2487+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
2488+
2489+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
2490+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
2491+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
2492+
2493+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
2494+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
2495+
2496+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
2497+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
2498+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
2499+
2500+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
2501+
2502+#define MTK_WED_RROQ_BASE_L 0xc80
2503+#define MTK_WED_RROQ_BASE_H 0xc84
2504+
developer8cb3ac72022-07-04 10:55:14 +08002505+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
2506+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
2507+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
2508+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
2509+
2510+#define MTK_WED_RROQM_MID_MIB 0xcc0
2511+#define MTK_WED_RROQM_MOD_MIB 0xcc4
2512+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
2513+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
2514+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
2515+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
2516+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
2517+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
2518+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
2519+
2520+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2521+#define MTK_WED_RX_BM_BASE 0xd84
2522+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2523+#define MTK_WED_RX_BM_PTR 0xd8c
2524+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
2525+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
2526+
2527+#define MTK_WED_RX_BM_BLEN 0xd90
2528+#define MTK_WED_RX_BM_STS 0xd94
2529+#define MTK_WED_RX_BM_INTF2 0xd98
2530+#define MTK_WED_RX_BM_INTF 0xd9c
2531+#define MTK_WED_RX_BM_ERR_STS 0xda8
2532+
2533+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
2534+#define MTK_WED_PCIE_INT_MASK 0x0
2535+
2536 #endif
2537diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
2538new file mode 100644
developere0cbe332022-09-10 17:36:02 +08002539index 0000000..8434272
developer8cb3ac72022-07-04 10:55:14 +08002540--- /dev/null
2541+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
developer53bfd362022-09-29 12:02:18 +08002542@@ -0,0 +1,564 @@
developer8cb3ac72022-07-04 10:55:14 +08002543+// SPDX-License-Identifier: GPL-2.0-only
2544+
2545+#include <linux/kernel.h>
2546+#include <linux/bitfield.h>
2547+#include <linux/dma-mapping.h>
2548+#include <linux/skbuff.h>
2549+#include <linux/of_platform.h>
2550+#include <linux/interrupt.h>
2551+#include <linux/of_address.h>
2552+#include <linux/iopoll.h>
2553+#include <linux/soc/mediatek/mtk_wed.h>
2554+#include "mtk_wed.h"
2555+#include "mtk_wed_regs.h"
2556+#include "mtk_wed_ccif.h"
2557+#include "mtk_wed_wo.h"
2558+
2559+struct wed_wo_profile_stat profile_total[6] = {
2560+ {1001, 0},
2561+ {1501, 0},
2562+ {3001, 0},
2563+ {5001, 0},
2564+ {10001, 0},
2565+ {0xffffffff, 0}
2566+};
2567+
2568+struct wed_wo_profile_stat profiling_mod[6] = {
2569+ {1001, 0},
2570+ {1501, 0},
2571+ {3001, 0},
2572+ {5001, 0},
2573+ {10001, 0},
2574+ {0xffffffff, 0}
2575+};
2576+
2577+struct wed_wo_profile_stat profiling_rro[6] = {
2578+ {1001, 0},
2579+ {1501, 0},
2580+ {3001, 0},
2581+ {5001, 0},
2582+ {10001, 0},
2583+ {0xffffffff, 0}
2584+};
2585+
2586+static void
2587+woif_q_sync_idx(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2588+{
2589+ woccif_w32(wo, q->regs->desc_base, q->desc_dma);
2590+ woccif_w32(wo, q->regs->ring_size, q->ndesc);
2591+
developer8cb3ac72022-07-04 10:55:14 +08002592+}
2593+
2594+static void
2595+woif_q_reset(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2596+{
2597+
2598+ if (!q || !q->ndesc)
2599+ return;
2600+
2601+ woccif_w32(dev, q->regs->cpu_idx, 0);
2602+
2603+ woif_q_sync_idx(dev, q);
2604+}
2605+
2606+static void
2607+woif_q_kick(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset)
2608+{
2609+ wmb();
2610+ woccif_w32(wo, q->regs->cpu_idx, q->head + offset);
2611+}
2612+
2613+static int
developer53bfd362022-09-29 12:02:18 +08002614+woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool rx)
developer8cb3ac72022-07-04 10:55:14 +08002615+{
2616+ int len = q->buf_size, frames = 0;
2617+ struct wed_wo_queue_entry *entry;
developer53bfd362022-09-29 12:02:18 +08002618+ struct page_frag_cache *page = &q->tx_page;
developer8cb3ac72022-07-04 10:55:14 +08002619+ struct wed_wo_desc *desc;
2620+ dma_addr_t addr;
2621+ u32 ctrl = 0;
2622+ void *buf;
2623+
2624+ if (!q->ndesc)
2625+ return 0;
2626+
2627+ spin_lock_bh(&q->lock);
2628+
developer53bfd362022-09-29 12:02:18 +08002629+ if(rx)
2630+ page = &q->rx_page;
developer8cb3ac72022-07-04 10:55:14 +08002631+
developer53bfd362022-09-29 12:02:18 +08002632+ while (q->queued < q->ndesc) {
2633+ buf = page_frag_alloc(page, len, GFP_ATOMIC);
developer8cb3ac72022-07-04 10:55:14 +08002634+ if (!buf)
2635+ break;
2636+
2637+ addr = dma_map_single(wo->hw->dev, buf, len, DMA_FROM_DEVICE);
2638+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
2639+ skb_free_frag(buf);
2640+ break;
2641+ }
developerf11dcd72022-08-27 18:29:27 +08002642+
2643+ q->head = (q->head + 1) % q->ndesc;
2644+
developer8cb3ac72022-07-04 10:55:14 +08002645+ desc = &q->desc[q->head];
2646+ entry = &q->entry[q->head];
2647+
2648+ entry->dma_addr = addr;
2649+ entry->dma_len = len;
2650+
developer53bfd362022-09-29 12:02:18 +08002651+ if (rx) {
2652+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, entry->dma_len);
2653+ ctrl |= WED_CTL_LAST_SEC0;
developer8cb3ac72022-07-04 10:55:14 +08002654+
developer53bfd362022-09-29 12:02:18 +08002655+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2656+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2657+ }
developer8cb3ac72022-07-04 10:55:14 +08002658+ q->queued++;
2659+ q->entry[q->head].buf = buf;
2660+
developer8cb3ac72022-07-04 10:55:14 +08002661+ frames++;
2662+ }
2663+
2664+ spin_unlock_bh(&q->lock);
2665+
2666+ return frames;
2667+}
2668+
2669+static void
2670+woif_q_rx_fill_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2671+{
developer53bfd362022-09-29 12:02:18 +08002672+ if(woif_q_rx_fill(wo, q, true))
developer8cb3ac72022-07-04 10:55:14 +08002673+ woif_q_kick(wo, q, -1);
2674+}
2675+
2676+static int
2677+woif_q_alloc(struct mtk_wed_wo *dev, struct wed_wo_queue *q,
2678+ int n_desc, int bufsize, int idx,
2679+ struct wed_wo_queue_regs *regs)
2680+{
2681+ struct wed_wo_queue_regs *q_regs;
2682+ int size;
2683+
2684+ spin_lock_init(&q->lock);
2685+ spin_lock_init(&q->cleanup_lock);
2686+
2687+ q_regs = devm_kzalloc(dev->hw->dev, sizeof(*q_regs), GFP_KERNEL);
2688+
2689+ q_regs->desc_base = regs->desc_base;
2690+ q_regs->ring_size = regs->ring_size;
2691+ q_regs->cpu_idx = regs->cpu_idx;
2692+ q_regs->dma_idx = regs->dma_idx;
2693+
2694+ q->regs = q_regs;
2695+ q->ndesc = n_desc;
2696+ q->buf_size = bufsize;
2697+
2698+ size = q->ndesc * sizeof(struct wed_wo_desc);
2699+
2700+ q->desc = dmam_alloc_coherent(dev->hw->dev, size,
2701+ &q->desc_dma, GFP_KERNEL);
2702+ if (!q->desc)
2703+ return -ENOMEM;
2704+
2705+ size = q->ndesc * sizeof(*q->entry);
2706+ q->entry = devm_kzalloc(dev->hw->dev, size, GFP_KERNEL);
2707+ if (!q->entry)
2708+ return -ENOMEM;
2709+
developer53bfd362022-09-29 12:02:18 +08002710+ if (idx == 0) {
2711+ /* alloc tx buf */
2712+ woif_q_rx_fill(dev, &dev->q_tx, false);
developer8cb3ac72022-07-04 10:55:14 +08002713+ woif_q_reset(dev, &dev->q_tx);
developer53bfd362022-09-29 12:02:18 +08002714+ }
developer8cb3ac72022-07-04 10:55:14 +08002715+
2716+ return 0;
2717+}
2718+
2719+static void
developera3f86ed2022-07-08 14:15:13 +08002720+woif_q_free(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2721+{
2722+ int size;
2723+
2724+ if (!q)
2725+ return;
2726+
2727+ if (!q->desc)
2728+ return;
2729+
2730+ woccif_w32(dev, q->regs->cpu_idx, 0);
2731+
2732+ size = q->ndesc * sizeof(struct wed_wo_desc);
2733+ dma_free_coherent(dev->hw->dev, size, q->desc, q->desc_dma);
2734+}
2735+
2736+static void
developer53bfd362022-09-29 12:02:18 +08002737+woif_q_tx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
developer8cb3ac72022-07-04 10:55:14 +08002738+{
developer53bfd362022-09-29 12:02:18 +08002739+ struct page *page;
2740+ int i = 0;
developer8cb3ac72022-07-04 10:55:14 +08002741+
2742+ if (!q || !q->ndesc)
2743+ return;
2744+
developer53bfd362022-09-29 12:02:18 +08002745+ spin_lock_bh(&q->lock);
2746+ while (i < q->ndesc) {
developer8cb3ac72022-07-04 10:55:14 +08002747+ struct wed_wo_queue_entry *e;
2748+
developer53bfd362022-09-29 12:02:18 +08002749+ e = &q->entry[i];
2750+ i++;
developer8cb3ac72022-07-04 10:55:14 +08002751+
developer53bfd362022-09-29 12:02:18 +08002752+ if (!e)
2753+ continue;
developer8cb3ac72022-07-04 10:55:14 +08002754+ dma_unmap_single(wo->hw->dev, e->dma_addr, e->dma_len,
2755+ DMA_TO_DEVICE);
2756+
developer53bfd362022-09-29 12:02:18 +08002757+ skb_free_frag(e->buf);
developer8cb3ac72022-07-04 10:55:14 +08002758+ }
developer53bfd362022-09-29 12:02:18 +08002759+ spin_unlock_bh(&q->lock);
developer8cb3ac72022-07-04 10:55:14 +08002760+
developer53bfd362022-09-29 12:02:18 +08002761+ if (!q->tx_page.va)
2762+ return;
2763+
2764+ page = virt_to_page(q->tx_page.va);
2765+ __page_frag_cache_drain(page, q->tx_page.pagecnt_bias);
2766+ memset(&q->tx_page, 0, sizeof(q->tx_page));
developer8cb3ac72022-07-04 10:55:14 +08002767+}
2768+
developer8cb3ac72022-07-04 10:55:14 +08002769+static void *
2770+woif_q_deq(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush,
2771+ int *len, u32 *info, bool *more)
2772+{
2773+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
2774+ struct wed_wo_queue_entry *e;
2775+ struct wed_wo_desc *desc;
developerf11dcd72022-08-27 18:29:27 +08002776+ int idx = (q->tail + 1) % q->ndesc;;
developer8cb3ac72022-07-04 10:55:14 +08002777+ void *buf;
2778+
2779+ *more = false;
2780+ if (!q->queued)
2781+ return NULL;
2782+
2783+ if (flush)
2784+ q->desc[idx].ctrl |= cpu_to_le32(WED_CTL_DMA_DONE);
2785+ else if (!(q->desc[idx].ctrl & cpu_to_le32(WED_CTL_DMA_DONE)))
2786+ return NULL;
2787+
developerf11dcd72022-08-27 18:29:27 +08002788+ q->tail = idx;
developer8cb3ac72022-07-04 10:55:14 +08002789+ q->queued--;
2790+
2791+ desc = &q->desc[idx];
2792+ e = &q->entry[idx];
2793+
2794+ buf = e->buf;
2795+ if (len) {
2796+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
2797+ *len = FIELD_GET(WED_CTL_SD_LEN0, ctl);
2798+ *more = !(ctl & WED_CTL_LAST_SEC0);
2799+ }
2800+
2801+ if (info)
2802+ *info = le32_to_cpu(desc->info);
2803+ if(buf)
2804+ dma_unmap_single(wo->hw->dev, e->dma_addr, buf_len,
2805+ DMA_FROM_DEVICE);
2806+ e->skb = NULL;
2807+
2808+ return buf;
2809+}
2810+
developera3f86ed2022-07-08 14:15:13 +08002811+static void
2812+woif_q_rx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2813+{
2814+ struct page *page;
2815+ void *buf;
2816+ bool more;
2817+
2818+ if (!q->ndesc)
2819+ return;
2820+
2821+ spin_lock_bh(&q->lock);
2822+ do {
2823+ buf = woif_q_deq(wo, q, true, NULL, NULL, &more);
2824+ if (!buf)
2825+ break;
2826+
2827+ skb_free_frag(buf);
2828+ } while (1);
2829+ spin_unlock_bh(&q->lock);
2830+
2831+ if (!q->rx_page.va)
2832+ return;
2833+
2834+ page = virt_to_page(q->rx_page.va);
2835+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
2836+ memset(&q->rx_page, 0, sizeof(q->rx_page));
developera3f86ed2022-07-08 14:15:13 +08002837+}
2838+
developer8cb3ac72022-07-04 10:55:14 +08002839+static int
2840+woif_q_init(struct mtk_wed_wo *dev,
2841+ int (*poll)(struct napi_struct *napi, int budget))
2842+{
2843+ init_dummy_netdev(&dev->napi_dev);
2844+ snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
2845+ "woif_q");
2846+
2847+ if (dev->q_rx.ndesc) {
2848+ netif_napi_add(&dev->napi_dev, &dev->napi, poll, 64);
developer53bfd362022-09-29 12:02:18 +08002849+ woif_q_rx_fill(dev, &dev->q_rx, true);
developer8cb3ac72022-07-04 10:55:14 +08002850+ woif_q_reset(dev, &dev->q_rx);
2851+ napi_enable(&dev->napi);
2852+ }
2853+
2854+ return 0;
2855+}
2856+
2857+void woif_q_rx_skb(struct mtk_wed_wo *wo, struct sk_buff *skb)
2858+{
2859+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
2860+ int ret;
2861+
2862+ ret = mtk_wed_mcu_cmd_sanity_check(wo, skb);
2863+ if (ret)
2864+ goto free_skb;
2865+
2866+ if (WED_WO_CMD_FLAG_IS_RSP(hdr))
2867+ mtk_wed_mcu_rx_event(wo, skb);
2868+ else
2869+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
2870+
2871+ return;
2872+free_skb:
2873+ dev_kfree_skb(skb);
2874+}
2875+
2876+static int
2877+woif_q_tx_skb(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
2878+ struct sk_buff *skb)
2879+{
2880+ struct wed_wo_queue_entry *entry;
2881+ struct wed_wo_desc *desc;
developer53bfd362022-09-29 12:02:18 +08002882+ int len, ret = 0, idx = -1;
developer8cb3ac72022-07-04 10:55:14 +08002883+ dma_addr_t addr;
2884+ u32 ctrl = 0;
2885+
2886+ len = skb->len;
developer53bfd362022-09-29 12:02:18 +08002887+ spin_lock_bh(&q->lock);
developer8cb3ac72022-07-04 10:55:14 +08002888+
developer53bfd362022-09-29 12:02:18 +08002889+ q->tail = woccif_r32(wo, q->regs->dma_idx);
2890+ q->head = (q->head + 1) % q->ndesc;
2891+ if (q->tail == q->head) {
developer8cb3ac72022-07-04 10:55:14 +08002892+ ret = -ENOMEM;
2893+ goto error;
2894+ }
2895+
developer8cb3ac72022-07-04 10:55:14 +08002896+ idx = q->head;
developer8cb3ac72022-07-04 10:55:14 +08002897+ desc = &q->desc[idx];
2898+ entry = &q->entry[idx];
2899+
developer53bfd362022-09-29 12:02:18 +08002900+ if (len > entry->dma_len) {
2901+ ret = -ENOMEM;
2902+ goto error;
2903+ }
2904+ addr = entry->dma_addr;
2905+
2906+ dma_sync_single_for_cpu(wo->hw->dev, addr, len, DMA_TO_DEVICE);
2907+ memcpy(entry->buf, skb->data, len);
2908+ dma_sync_single_for_device(wo->hw->dev, addr, len, DMA_TO_DEVICE);
developer8cb3ac72022-07-04 10:55:14 +08002909+
2910+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, len);
2911+ ctrl |= WED_CTL_LAST_SEC0;
2912+ ctrl |= WED_CTL_DMA_DONE;
2913+
2914+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2915+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2916+
developer8cb3ac72022-07-04 10:55:14 +08002917+ woif_q_kick(wo, q, 0);
2918+ wo->drv_ops->kickout(wo);
2919+
developer8cb3ac72022-07-04 10:55:14 +08002920+ spin_unlock_bh(&q->lock);
developer8cb3ac72022-07-04 10:55:14 +08002921+
2922+error:
2923+ dev_kfree_skb(skb);
developer53bfd362022-09-29 12:02:18 +08002924+ return ret;
developer8cb3ac72022-07-04 10:55:14 +08002925+}
2926+
2927+static const struct wed_wo_queue_ops wo_queue_ops = {
2928+ .init = woif_q_init,
2929+ .alloc = woif_q_alloc,
developera3f86ed2022-07-08 14:15:13 +08002930+ .free = woif_q_free,
developer8cb3ac72022-07-04 10:55:14 +08002931+ .reset = woif_q_reset,
2932+ .tx_skb = woif_q_tx_skb,
2933+ .tx_clean = woif_q_tx_clean,
2934+ .rx_clean = woif_q_rx_clean,
2935+ .kick = woif_q_kick,
2936+};
2937+
2938+static int
2939+mtk_wed_wo_rx_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int budget)
2940+{
developer53bfd362022-09-29 12:02:18 +08002941+ int len, done = 0;
developer8cb3ac72022-07-04 10:55:14 +08002942+ struct sk_buff *skb;
2943+ unsigned char *data;
2944+ bool more;
2945+
2946+ while (done < budget) {
2947+ u32 info;
2948+
2949+ data = woif_q_deq(wo, q, false, &len, &info, &more);
2950+ if (!data)
2951+ break;
2952+
developer8cb3ac72022-07-04 10:55:14 +08002953+ skb = build_skb(data, q->buf_size);
2954+ if (!skb) {
2955+ skb_free_frag(data);
2956+ continue;
2957+ }
2958+
2959+ __skb_put(skb, len);
2960+ done++;
2961+
2962+ woif_q_rx_skb(wo, skb);
2963+ }
2964+
2965+ woif_q_rx_fill_process(wo, q);
2966+
2967+ return done;
2968+}
2969+
2970+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
2971+ u32 clear, u32 val)
2972+{
2973+ unsigned long flags;
2974+
2975+ spin_lock_irqsave(&wo->ccif.irq_lock, flags);
2976+ wo->ccif.irqmask &= ~clear;
2977+ wo->ccif.irqmask |= val;
2978+ if (set)
2979+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
2980+
2981+ spin_unlock_irqrestore(&wo->ccif.irq_lock, flags);
2982+}
2983+
2984+static inline void mtk_wed_wo_set_ack_mask(struct mtk_wed_wo *wo, u32 mask)
2985+{
2986+ wo->drv_ops->set_ack(wo, mask);
2987+}
2988+
2989+static void mtk_wed_wo_poll_complete(struct mtk_wed_wo *wo)
2990+{
2991+ mtk_wed_wo_set_ack_mask(wo, wo->ccif.q_int_mask);
2992+ mtk_wed_wo_isr_enable(wo, wo->ccif.q_int_mask);
2993+}
2994+
2995+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget)
2996+{
2997+ struct mtk_wed_wo *wo;
2998+ int done = 0, cur;
2999+
3000+ wo = container_of(napi->dev, struct mtk_wed_wo, napi_dev);
3001+
3002+ rcu_read_lock();
3003+
3004+ do {
3005+ cur = mtk_wed_wo_rx_process(wo, &wo->q_rx, budget - done);
3006+ /* rx packet handle */
3007+ done += cur;
3008+ } while (cur && done < budget);
3009+
3010+ rcu_read_unlock();
3011+
3012+ if (done < budget && napi_complete(napi))
3013+ mtk_wed_wo_poll_complete(wo);
3014+
3015+ return done;
3016+}
3017+
3018+static void mtk_wed_wo_isr_tasklet(unsigned long data)
3019+{
3020+ struct mtk_wed_wo *wo = (struct mtk_wed_wo *)data;
3021+ u32 intr, mask;
3022+
3023+ /* disable isr */
3024+ wo->drv_ops->set_isr(wo, 0);
3025+
3026+ intr = wo->drv_ops->get_csr(wo);
3027+ intr &= wo->ccif.irqmask;
3028+
3029+ mask = intr & (wo->ccif.q_int_mask | wo->ccif.q_exep_mask);
3030+ mtk_wed_wo_isr_disable(wo, mask);
3031+
3032+ if (intr & wo->ccif.q_int_mask)
3033+ napi_schedule(&wo->napi);
3034+
3035+ if (intr & wo->ccif.q_exep_mask) {
3036+ /* todo */
3037+ }
3038+}
3039+
3040+static irqreturn_t mtk_wed_wo_isr_handler(int irq, void *wo_instance)
3041+{
3042+ struct mtk_wed_wo *wo = wo_instance;
3043+
3044+ wo->drv_ops->set_isr(wo, 0);
3045+
3046+ tasklet_schedule(&wo->irq_tasklet);
3047+
3048+ return IRQ_HANDLED;
3049+}
3050+
3051+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
3052+{
3053+ struct mtk_wed_wo *wo;
3054+ int ret = 0;
3055+
3056+ wo = kzalloc(sizeof(struct mtk_wed_wo), GFP_KERNEL);
3057+ if (!wo)
3058+ return -ENOMEM;
3059+
3060+ wo->hw = hw;
3061+ wo->queue_ops = &wo_queue_ops;
3062+ hw->wed_wo = wo;
3063+
3064+ tasklet_init(&wo->irq_tasklet, mtk_wed_wo_isr_tasklet,
3065+ (unsigned long)wo);
3066+
3067+ skb_queue_head_init(&wo->mcu.res_q);
3068+ init_waitqueue_head(&wo->mcu.wait);
3069+ mutex_init(&wo->mcu.mutex);
3070+
3071+ ret = wed_wo_hardware_init(wo, mtk_wed_wo_isr_handler);
3072+ if (ret)
3073+ goto error;
3074+
3075+ /* fw download */
3076+ ret = wed_wo_mcu_init(wo);
3077+ if (ret)
3078+ goto error;
3079+
3080+ ret = mtk_wed_exception_init(wo);
3081+ if (ret)
3082+ goto error;
3083+
3084+ return ret;
3085+
3086+error:
3087+ kfree(wo);
3088+
3089+ return ret;
3090+}
3091+
3092+void mtk_wed_wo_exit(struct mtk_wed_hw *hw)
3093+{
developer8cb3ac72022-07-04 10:55:14 +08003094+ struct mtk_wed_wo *wo = hw->wed_wo;
3095+
developera3f86ed2022-07-08 14:15:13 +08003096+ wed_wo_hardware_exit(wo);
3097+
developer8cb3ac72022-07-04 10:55:14 +08003098+ if (wo->exp.log) {
3099+ dma_unmap_single(wo->hw->dev, wo->exp.phys, wo->exp.log_size, DMA_FROM_DEVICE);
3100+ kfree(wo->exp.log);
3101+ }
3102+
developera3f86ed2022-07-08 14:15:13 +08003103+ wo->hw = NULL;
3104+ memset(wo, 0, sizeof(*wo));
3105+ kfree(wo);
developer8cb3ac72022-07-04 10:55:14 +08003106+}
3107diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
3108new file mode 100644
developere0cbe332022-09-10 17:36:02 +08003109index 0000000..5824f39
developer8cb3ac72022-07-04 10:55:14 +08003110--- /dev/null
3111+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
developer53bfd362022-09-29 12:02:18 +08003112@@ -0,0 +1,324 @@
developer8cb3ac72022-07-04 10:55:14 +08003113+// SPDX-License-Identifier: GPL-2.0-only
3114+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
3115+
3116+#ifndef __MTK_WED_WO_H
3117+#define __MTK_WED_WO_H
3118+
3119+#include <linux/netdevice.h>
3120+#include <linux/skbuff.h>
3121+#include "mtk_wed.h"
3122+
3123+#define WED_CTL_SD_LEN1 GENMASK(13, 0)
3124+#define WED_CTL_LAST_SEC1 BIT(14)
3125+#define WED_CTL_BURST BIT(15)
3126+#define WED_CTL_SD_LEN0_SHIFT 16
3127+#define WED_CTL_SD_LEN0 GENMASK(29, 16)
3128+#define WED_CTL_LAST_SEC0 BIT(30)
3129+#define WED_CTL_DMA_DONE BIT(31)
3130+#define WED_INFO_WINFO GENMASK(15, 0)
3131+
3132+#define MTK_WED_WO_TXQ_FREE_THR 10
3133+
3134+#define WED_WO_PROFILE_MAX_LVL 6
3135+
3136+
3137+enum mtk_wed_fw_region_id {
3138+ WO_REGION_EMI = 0,
3139+ WO_REGION_ILM,
3140+ WO_REGION_DATA,
3141+ WO_REGION_BOOT,
3142+ __WO_REGION_MAX
3143+};
3144+
3145+struct wed_wo_profile_stat {
3146+ u32 bound;
3147+ u32 record;
3148+};
3149+
3150+#define PROFILE_STAT(record, val) do { \
3151+ u8 lvl = 0; \
3152+ while (lvl < WED_WO_PROFILE_MAX_LVL) { \
3153+ if (val < record[lvl].bound) { \
3154+ record[lvl].record++; \
3155+ break; \
3156+ } \
3157+ lvl++; \
3158+ } \
3159+ } while (0)
3160+
3161+/* align with wo report structure */
3162+struct wed_wo_log {
3163+ u32 sn;
3164+ u32 total;
3165+ u32 rro;
3166+ u32 mod;
3167+};
3168+
3169+struct wed_wo_rxcnt {
3170+ u16 wlan_idx;
3171+ u16 tid;
3172+ u32 rx_pkt_cnt;
3173+ u32 rx_byte_cnt;
3174+ u32 rx_err_cnt;
3175+ u32 rx_drop_cnt;
3176+};
3177+
3178+struct wed_wo_queue {
3179+ struct wed_wo_queue_regs *regs;
3180+
3181+ spinlock_t lock;
3182+ spinlock_t cleanup_lock;
3183+ struct wed_wo_queue_entry *entry;
3184+ struct wed_wo_desc *desc;
3185+
3186+ u16 first;
3187+ u16 head;
3188+ u16 tail;
3189+ int ndesc;
3190+ int queued;
3191+ int buf_size;
3192+
3193+ u8 hw_idx;
3194+ u8 qid;
3195+ u8 flags;
3196+
3197+ dma_addr_t desc_dma;
3198+ struct page_frag_cache rx_page;
developer53bfd362022-09-29 12:02:18 +08003199+ struct page_frag_cache tx_page;
developer8cb3ac72022-07-04 10:55:14 +08003200+};
3201+
3202+
3203+struct wed_wo_mmio {
3204+ struct regmap *regs;
3205+
3206+ spinlock_t irq_lock;
3207+ u8 irq;
3208+ u32 irqmask;
3209+
3210+ u32 q_int_mask;
3211+ u32 q_exep_mask;
3212+};
3213+
3214+struct wed_wo_mcu {
3215+ struct mutex mutex;
3216+ u32 msg_seq;
3217+ int timeout;
3218+
3219+ struct sk_buff_head res_q;
3220+ wait_queue_head_t wait;
3221+};
3222+
3223+struct wed_wo_exception {
3224+ void* log;
3225+ int log_size;
3226+ dma_addr_t phys;
3227+};
3228+
3229+struct wed_wo_queue_regs {
3230+ u32 desc_base;
3231+ u32 ring_size;
3232+ u32 cpu_idx;
3233+ u32 dma_idx;
3234+};
3235+
3236+struct wed_wo_desc {
3237+ __le32 buf0;
3238+ __le32 ctrl;
3239+ __le32 buf1;
3240+ __le32 info;
3241+ __le32 reserved[4];
3242+} __packed __aligned(32);
3243+
3244+struct wed_wo_queue_entry {
3245+ union {
3246+ void *buf;
3247+ struct sk_buff *skb;
3248+ };
3249+
3250+ u32 dma_addr;
3251+ u16 dma_len;
3252+ u16 wcid;
3253+ bool skip_buf0:1;
3254+ bool skip_buf1:1;
3255+ bool done:1;
3256+};
3257+
developer8cb3ac72022-07-04 10:55:14 +08003258+struct wo_cmd_query {
3259+ u32 query0;
3260+ u32 query1;
3261+};
3262+
3263+struct wed_cmd_hdr {
3264+ /*DW0*/
3265+ u8 ver;
3266+ u8 cmd_id;
3267+ u16 length;
3268+
3269+ /*DW1*/
3270+ u16 uni_id;
3271+ u16 flag;
3272+
3273+ /*DW2*/
3274+ int status;
3275+
3276+ /*DW3*/
3277+ u8 reserved[20];
3278+};
3279+
3280+struct mtk_wed_fw_region {
3281+ void *addr;
3282+ u32 addr_pa;
3283+ u32 size;
3284+ u32 shared;
3285+};
3286+
3287+struct wed_wo_queue_ops;
3288+struct wed_wo_drv_ops;
3289+struct wed_wo_mcu_ops;
3290+
3291+struct wo_rx_total_cnt {
3292+ u64 rx_pkt_cnt;
3293+ u64 rx_byte_cnt;
3294+ u64 rx_err_cnt;
3295+ u64 rx_drop_cnt;
3296+};
3297+
3298+struct mtk_wed_wo {
3299+ struct mtk_wed_hw *hw;
3300+
3301+ struct wed_wo_mmio ccif;
3302+ struct wed_wo_mcu mcu;
3303+ struct wed_wo_exception exp;
3304+
3305+ const struct wed_wo_drv_ops *drv_ops;
3306+ const struct wed_wo_mcu_ops *mcu_ops;
3307+ const struct wed_wo_queue_ops *queue_ops;
3308+
3309+ struct net_device napi_dev;
3310+ spinlock_t rx_lock;
3311+ struct napi_struct napi;
3312+ struct sk_buff_head rx_skb;
3313+ struct wed_wo_queue q_rx;
3314+ struct tasklet_struct irq_tasklet;
3315+
3316+ struct wed_wo_queue q_tx;
3317+
3318+ struct mtk_wed_fw_region region[__WO_REGION_MAX];
3319+
3320+ struct wed_wo_profile_stat total[WED_WO_PROFILE_MAX_LVL];
3321+ struct wed_wo_profile_stat mod[WED_WO_PROFILE_MAX_LVL];
3322+ struct wed_wo_profile_stat rro[WED_WO_PROFILE_MAX_LVL];
3323+ char dirname[4];
3324+ struct wo_rx_total_cnt wo_rxcnt[8][544];
3325+};
3326+
3327+struct wed_wo_queue_ops {
3328+ int (*init)(struct mtk_wed_wo *wo,
3329+ int (*poll)(struct napi_struct *napi, int budget));
3330+
3331+ int (*alloc)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3332+ int idx, int n_desc, int bufsize,
3333+ struct wed_wo_queue_regs *regs);
developera3f86ed2022-07-08 14:15:13 +08003334+ void (*free)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
developer8cb3ac72022-07-04 10:55:14 +08003335+ void (*reset)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3336+
3337+ int (*tx_skb)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3338+ struct sk_buff *skb);
developer53bfd362022-09-29 12:02:18 +08003339+ void (*tx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
developer8cb3ac72022-07-04 10:55:14 +08003340+
3341+ void (*rx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3342+
3343+ void (*kick)(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset);
3344+};
3345+
3346+struct wed_wo_drv_ops {
3347+ void (*kickout)(struct mtk_wed_wo *wo);
3348+ void (*set_ack)(struct mtk_wed_wo *wo, u32 mask);
3349+ void (*set_isr)(struct mtk_wed_wo *wo, u32 mask);
3350+ u32 (*get_csr)(struct mtk_wed_wo *wo);
3351+ int (*tx_prepare_skb)(struct mtk_wed_wo *wo);
3352+ bool (*check_excpetion)(struct mtk_wed_wo *wo);
3353+ void (*clear_int)(struct mtk_wed_wo *wo, u32 mask);
3354+};
3355+
3356+struct wed_wo_mcu_ops {
3357+ u32 headroom;
3358+
3359+ int (*mcu_skb_send_msg)(struct mtk_wed_wo *wo, int to_id,
3360+ int cmd, struct sk_buff *skb,
3361+ int *seq, bool wait_resp);
3362+
3363+ int (*mcu_parse_response)(struct mtk_wed_wo *wo, int cmd,
3364+ struct sk_buff *skb, int seq);
3365+
3366+ int (*mcu_restart)(struct mtk_wed_wo *wo);
3367+};
3368+
3369+#define mtk_wed_wo_q_init(wo, ...) (wo)->queue_ops->init((wo), __VA_ARGS__)
3370+#define mtk_wed_wo_q_alloc(wo, ...) (wo)->queue_ops->alloc((wo), __VA_ARGS__)
developera3f86ed2022-07-08 14:15:13 +08003371+#define mtk_wed_wo_q_free(wo, ...) (wo)->queue_ops->free((wo), __VA_ARGS__)
3372+#define mtk_wed_wo_q_reset(wo, ...) (wo)->queue_ops->reset((wo), __VA_ARGS__)
developer8cb3ac72022-07-04 10:55:14 +08003373+#define mtk_wed_wo_q_tx_skb(wo, ...) (wo)->queue_ops->tx_skb((wo), __VA_ARGS__)
developer8cb3ac72022-07-04 10:55:14 +08003374+#define mtk_wed_wo_q_tx_clean(wo, ...) (wo)->queue_ops->tx_clean((wo), __VA_ARGS__)
3375+#define mtk_wed_wo_q_rx_clean(wo, ...) (wo)->queue_ops->rx_clean((wo), __VA_ARGS__)
3376+#define mtk_wed_wo_q_kick(wo, ...) (wo)->queue_ops->kick((wo), __VA_ARGS__)
3377+
3378+enum {
3379+ WARP_CMD_FLAG_RSP = 1 << 0, /* is responce*/
3380+ WARP_CMD_FLAG_NEED_RSP = 1 << 1, /* need responce */
3381+ WARP_CMD_FLAG_FROM_TO_WO = 1 << 2, /* send between host and wo */
3382+};
3383+
3384+#define WED_WO_CMD_FLAG_IS_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_RSP))
3385+#define WED_WO_CMD_FLAG_SET_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_RSP))
3386+#define WED_WO_CMD_FLAG_IS_NEED_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_NEED_RSP))
3387+#define WED_WO_CMD_FLAG_SET_NEED_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_NEED_RSP))
3388+#define WED_WO_CMD_FLAG_IS_FROM_TO_WO(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_FROM_TO_WO))
3389+#define WED_WO_CMD_FLAG_SET_FROM_TO_WO(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_FROM_TO_WO))
3390+
3391+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3392+ u32 clear, u32 val);
3393+
3394+static inline void mtk_wed_wo_isr_enable(struct mtk_wed_wo *wo, u32 mask)
3395+{
3396+ mtk_wed_wo_set_isr_mask(wo, false, 0, mask);
3397+
3398+ tasklet_schedule(&wo->irq_tasklet);
3399+}
3400+
3401+static inline void mtk_wed_wo_isr_disable(struct mtk_wed_wo *wo, u32 mask)
3402+{
3403+ mtk_wed_wo_set_isr_mask(wo, true, mask, 0);
3404+}
3405+
3406+static inline void
3407+wo_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3408+{
3409+ writel(val, dev->region[WO_REGION_BOOT].addr + reg);
3410+}
3411+
3412+static inline u32
3413+wo_r32(struct mtk_wed_wo *dev, u32 reg)
3414+{
3415+ return readl(dev->region[WO_REGION_BOOT].addr + reg);
3416+}
3417+static inline void
3418+woccif_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3419+{
3420+ regmap_write(dev->ccif.regs, reg, val);
3421+}
3422+
3423+static inline u32
3424+woccif_r32(struct mtk_wed_wo *dev, u32 reg)
3425+{
3426+ unsigned int val;
3427+
3428+ regmap_read(dev->ccif.regs, reg, &val);
3429+
3430+ return val;
3431+}
3432+
3433+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
developera3f86ed2022-07-08 14:15:13 +08003434+void mtk_wed_wo_exit(struct mtk_wed_hw *hw);
developer8cb3ac72022-07-04 10:55:14 +08003435+#endif
3436+
3437diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
developer203096a2022-09-13 21:07:19 +08003438index e914cb4..cfa1120 100644
developer8cb3ac72022-07-04 10:55:14 +08003439--- a/include/linux/soc/mediatek/mtk_wed.h
3440+++ b/include/linux/soc/mediatek/mtk_wed.h
3441@@ -7,6 +7,9 @@
3442 #include <linux/pci.h>
3443
3444 #define MTK_WED_TX_QUEUES 2
3445+#define MTK_WED_RX_QUEUES 2
3446+
3447+#define WED_WO_STA_REC 0x6
3448
3449 enum {
3450 MTK_NO_WED,
developer8fec8ae2022-08-15 15:01:09 -07003451@@ -33,6 +36,33 @@ struct mtk_wed_ring {
developer8cb3ac72022-07-04 10:55:14 +08003452 void __iomem *wpdma;
3453 };
3454
3455+struct mtk_rxbm_desc {
3456+ __le32 buf0;
3457+ __le32 token;
3458+} __packed __aligned(4);
3459+
3460+struct dma_buf {
3461+ int size;
3462+ void **pages;
3463+ struct mtk_wdma_desc *desc;
3464+ dma_addr_t desc_phys;
3465+};
3466+
3467+struct dma_entry {
3468+ int size;
3469+ struct mtk_rxbm_desc *desc;
3470+ dma_addr_t desc_phys;
3471+};
3472+
developer8fec8ae2022-08-15 15:01:09 -07003473+struct wo_cmd_rxcnt_t {
3474+ u16 wlan_idx;
3475+ u16 tid;
3476+ u32 rx_pkt_cnt;
3477+ u32 rx_byte_cnt;
3478+ u32 rx_err_cnt;
3479+ u32 rx_drop_cnt;
3480+};
3481+
developer8cb3ac72022-07-04 10:55:14 +08003482 struct mtk_wed_device {
3483 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3484 const struct mtk_wed_ops *ops;
developer203096a2022-09-13 21:07:19 +08003485@@ -47,37 +77,56 @@ struct mtk_wed_device {
developer8cb3ac72022-07-04 10:55:14 +08003486 struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3487 struct mtk_wed_ring txfree_ring;
3488 struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3489+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
3490+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
3491+
3492+ struct dma_buf buf_ring;
3493+ struct dma_entry rx_buf_ring;
3494+ struct page_frag_cache rx_page;
3495
3496 struct {
3497- int size;
3498- void **pages;
3499- struct mtk_wdma_desc *desc;
3500- dma_addr_t desc_phys;
3501- } buf_ring;
3502+ struct mtk_wed_ring rro_ring;
3503+ void __iomem *rro_desc;
3504+ dma_addr_t miod_desc_phys;
3505+ dma_addr_t fdbk_desc_phys;
3506+ u32 mcu_view_miod;
3507+ } rro;
3508
3509 /* filled by driver: */
3510 struct {
3511 struct pci_dev *pci_dev;
3512 void __iomem *base;
3513 u32 bus_type;
3514+ u32 phy_base;
3515
developerbbca0f92022-07-26 17:26:12 +08003516 u32 wpdma_phys;
3517 u32 wpdma_int;
developer8cb3ac72022-07-04 10:55:14 +08003518 u32 wpdma_mask;
3519 u32 wpdma_tx;
3520 u32 wpdma_txfree;
3521+ u32 wpdma_rx_glo;
3522+ u32 wpdma_rx;
3523
3524 u8 tx_tbit[MTK_WED_TX_QUEUES];
3525+ u8 rx_tbit[MTK_WED_RX_QUEUES];
3526 u8 txfree_tbit;
3527
3528 u16 token_start;
3529 unsigned int nbuf;
3530+ unsigned int rx_nbuf;
3531+ unsigned int rx_pkt;
3532+ unsigned int rx_pkt_size;
3533
developer203096a2022-09-13 21:07:19 +08003534 bool wcid_512;
3535
developer8cb3ac72022-07-04 10:55:14 +08003536 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3537 int (*offload_enable)(struct mtk_wed_device *wed);
3538 void (*offload_disable)(struct mtk_wed_device *wed);
3539+ u32 (*init_rx_buf)(struct mtk_wed_device *wed,
3540+ int pkt_num);
3541+ void (*release_rx_buf)(struct mtk_wed_device *wed);
developer8fec8ae2022-08-15 15:01:09 -07003542+ void (*update_wo_rxcnt)(struct mtk_wed_device *wed,
3543+ struct wo_cmd_rxcnt_t *rxcnt);
developer8cb3ac72022-07-04 10:55:14 +08003544 } wlan;
3545 #endif
3546 };
developer203096a2022-09-13 21:07:19 +08003547@@ -88,6 +137,10 @@ struct mtk_wed_ops {
developer8cb3ac72022-07-04 10:55:14 +08003548 void __iomem *regs);
3549 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3550 void __iomem *regs);
3551+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3552+ void __iomem *regs);
3553+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3554+ void *data, int len);
3555 void (*detach)(struct mtk_wed_device *dev);
3556
3557 void (*stop)(struct mtk_wed_device *dev);
developer203096a2022-09-13 21:07:19 +08003558@@ -99,6 +152,8 @@ struct mtk_wed_ops {
developer8cb3ac72022-07-04 10:55:14 +08003559
3560 u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3561 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
developerbbca0f92022-07-26 17:26:12 +08003562+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
developer8cb3ac72022-07-04 10:55:14 +08003563+ u32 reason, u32 hash);
3564 };
3565
3566 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
developer203096a2022-09-13 21:07:19 +08003567@@ -131,6 +186,10 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08003568 (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3569 #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3570 (_dev)->ops->txfree_ring_setup(_dev, _regs)
3571+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
3572+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
3573+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3574+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
3575 #define mtk_wed_device_reg_read(_dev, _reg) \
3576 (_dev)->ops->reg_read(_dev, _reg)
3577 #define mtk_wed_device_reg_write(_dev, _reg, _val) \
developer203096a2022-09-13 21:07:19 +08003578@@ -139,6 +198,8 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08003579 (_dev)->ops->irq_get(_dev, _mask)
3580 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
3581 (_dev)->ops->irq_set_mask(_dev, _mask)
3582+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3583+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
3584 #else
3585 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3586 {
developer203096a2022-09-13 21:07:19 +08003587@@ -148,10 +209,13 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08003588 #define mtk_wed_device_start(_dev, _mask) do {} while (0)
3589 #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3590 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3591+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3592+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
3593 #define mtk_wed_device_reg_read(_dev, _reg) 0
3594 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3595 #define mtk_wed_device_irq_get(_dev, _mask) 0
3596 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3597+#define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
3598 #endif
3599
3600 #endif
3601--
developere0cbe332022-09-10 17:36:02 +080036022.18.0
developer8cb3ac72022-07-04 10:55:14 +08003603