blob: 8af31426fe000c46a25f48fe31e0b16d507e3286 [file] [log] [blame]
developer8fec8ae2022-08-15 15:01:09 -07001From 7c81104d65728fb1c0f156c46e3cfc5dec24b119 Mon Sep 17 00:00:00 2001
developer8cb3ac72022-07-04 10:55:14 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Wed, 15 Jun 2022 14:38:54 +0800
4Subject: [PATCH 8/8] 9997-add-wed-rx-support-for-mt7896
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7986a.dtsi | 42 +-
9 arch/arm64/boot/dts/mediatek/mt7986b.dtsi | 42 +-
10 drivers/net/ethernet/mediatek/Makefile | 2 +-
developerf11dcd72022-08-27 18:29:27 +080011 drivers/net/ethernet/mediatek/mtk_wed.c | 631 ++++++++++++++++--
developera3f86ed2022-07-08 14:15:13 +080012 drivers/net/ethernet/mediatek/mtk_wed.h | 51 ++
13 drivers/net/ethernet/mediatek/mtk_wed_ccif.c | 133 ++++
developer8cb3ac72022-07-04 10:55:14 +080014 drivers/net/ethernet/mediatek/mtk_wed_ccif.h | 45 ++
15 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 90 +++
developer8fec8ae2022-08-15 15:01:09 -070016 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 586 ++++++++++++++++
developer8cb3ac72022-07-04 10:55:14 +080017 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 125 ++++
developerc1b2cd12022-07-28 18:35:24 +080018 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 145 +++-
developerf11dcd72022-08-27 18:29:27 +080019 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 573 ++++++++++++++++
20 drivers/net/ethernet/mediatek/mtk_wed_wo.h | 336 ++++++++++
developer8fec8ae2022-08-15 15:01:09 -070021 include/linux/soc/mediatek/mtk_wed.h | 75 ++-
22 14 files changed, 2796 insertions(+), 75 deletions(-)
developer8cb3ac72022-07-04 10:55:14 +080023 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.c
24 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.h
25 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c
26 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.h
27 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
28 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h
29
30diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
developer8fec8ae2022-08-15 15:01:09 -070031index 87d2b11a9..6abc06db8 100644
developer8cb3ac72022-07-04 10:55:14 +080032--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
33+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
34@@ -65,6 +65,12 @@
35 interrupt-parent = <&gic>;
36 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
37 mediatek,wed_pcie = <&wed_pcie>;
38+ mediatek,ap2woccif = <&ap2woccif0>;
39+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
40+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
41+ mediatek,wocpu_boot = <&cpu_boot>;
42+ mediatek,wocpu_emi = <&wocpu0_emi>;
43+ mediatek,wocpu_data = <&wocpu_data>;
44 };
45
46 wed1: wed@15011000 {
47@@ -74,15 +80,26 @@
48 interrupt-parent = <&gic>;
49 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
50 mediatek,wed_pcie = <&wed_pcie>;
51+ mediatek,ap2woccif = <&ap2woccif1>;
52+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
53+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
54+ mediatek,wocpu_boot = <&cpu_boot>;
55+ mediatek,wocpu_emi = <&wocpu1_emi>;
56+ mediatek,wocpu_data = <&wocpu_data>;
57 };
58
59- ap2woccif: ap2woccif@151A5000 {
60- compatible = "mediatek,ap2woccif";
61- reg = <0 0x151A5000 0 0x1000>,
62- <0 0x151AD000 0 0x1000>;
63+ ap2woccif0: ap2woccif@151A5000 {
64+ compatible = "mediatek,ap2woccif", "syscon";
65+ reg = <0 0x151A5000 0 0x1000>;
66 interrupt-parent = <&gic>;
67- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
68- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
69+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
70+ };
71+
72+ ap2woccif1: ap2woccif@0x151AD000 {
73+ compatible = "mediatek,ap2woccif", "syscon";
74+ reg = <0 0x151AD000 0 0x1000>;
75+ interrupt-parent = <&gic>;
76+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
77 };
78
79 wocpu0_ilm: wocpu0_ilm@151E0000 {
80@@ -95,10 +112,17 @@
81 reg = <0 0x151F0000 0 0x8000>;
82 };
83
84- wocpu_dlm: wocpu_dlm@151E8000 {
85+ wocpu0_dlm: wocpu_dlm@151E8000 {
86+ compatible = "mediatek,wocpu_dlm";
87+ reg = <0 0x151E8000 0 0x2000>;
88+
89+ resets = <&ethsysrst 0>;
90+ reset-names = "wocpu_rst";
91+ };
92+
93+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
94 compatible = "mediatek,wocpu_dlm";
95- reg = <0 0x151E8000 0 0x2000>,
96- <0 0x151F8000 0 0x2000>;
97+ reg = <0 0x151F8000 0 0x2000>;
98
99 resets = <&ethsysrst 0>;
100 reset-names = "wocpu_rst";
101diff --git a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
102index 67bf86f6a..6710b388b 100644
103--- a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
104+++ b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
105@@ -65,6 +65,12 @@
106 interrupt-parent = <&gic>;
107 interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
108 mediatek,wed_pcie = <&wed_pcie>;
109+ mediatek,ap2woccif = <&ap2woccif0>;
110+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
111+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
112+ mediatek,wocpu_boot = <&cpu_boot>;
113+ mediatek,wocpu_emi = <&wocpu0_emi>;
114+ mediatek,wocpu_data = <&wocpu_data>;
115 };
116
117 wed1: wed@15011000 {
118@@ -74,15 +80,26 @@
119 interrupt-parent = <&gic>;
120 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
121 mediatek,wed_pcie = <&wed_pcie>;
122+ mediatek,ap2woccif = <&ap2woccif1>;
123+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
124+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
125+ mediatek,wocpu_boot = <&cpu_boot>;
126+ mediatek,wocpu_emi = <&wocpu1_emi>;
127+ mediatek,wocpu_data = <&wocpu_data>;
128 };
129
130- ap2woccif: ap2woccif@151A5000 {
131- compatible = "mediatek,ap2woccif";
132- reg = <0 0x151A5000 0 0x1000>,
133- <0 0x151AD000 0 0x1000>;
134+ ap2woccif0: ap2woccif@151A5000 {
135+ compatible = "mediatek,ap2woccif", "syscon";
136+ reg = <0 0x151A5000 0 0x1000>;
137 interrupt-parent = <&gic>;
138- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
139- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
140+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
141+ };
142+
143+ ap2woccif1: ap2woccif@0x151AD000 {
144+ compatible = "mediatek,ap2woccif", "syscon";
145+ reg = <0 0x151AD000 0 0x1000>;
146+ interrupt-parent = <&gic>;
147+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
148 };
149
150 wocpu0_ilm: wocpu0_ilm@151E0000 {
151@@ -95,10 +112,17 @@
152 reg = <0 0x151F0000 0 0x8000>;
153 };
154
155- wocpu_dlm: wocpu_dlm@151E8000 {
156+ wocpu0_dlm: wocpu_dlm@151E8000 {
157+ compatible = "mediatek,wocpu_dlm";
158+ reg = <0 0x151E8000 0 0x2000>;
159+
160+ resets = <&ethsysrst 0>;
161+ reset-names = "wocpu_rst";
162+ };
163+
164+ wocpu1_dlm: wocpu_dlm@0x151F8000 {
165 compatible = "mediatek,wocpu_dlm";
166- reg = <0 0x151E8000 0 0x2000>,
167- <0 0x151F8000 0 0x2000>;
168+ reg = <0 0x151F8000 0 0x2000>;
169
170 resets = <&ethsysrst 0>;
171 reset-names = "wocpu_rst";
172diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
developer8fec8ae2022-08-15 15:01:09 -0700173index 3528f1b3c..0c724a55c 100644
developer8cb3ac72022-07-04 10:55:14 +0800174--- a/drivers/net/ethernet/mediatek/Makefile
175+++ b/drivers/net/ethernet/mediatek/Makefile
176@@ -10,5 +10,5 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
177 ifdef CONFIG_DEBUG_FS
178 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
179 endif
180-obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
181+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o mtk_wed_wo.o mtk_wed_mcu.o mtk_wed_ccif.o
182 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
183diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developer8fec8ae2022-08-15 15:01:09 -0700184index 48b0353bb..75527956b 100644
developer8cb3ac72022-07-04 10:55:14 +0800185--- a/drivers/net/ethernet/mediatek/mtk_wed.c
186+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
187@@ -13,11 +13,19 @@
188 #include <linux/debugfs.h>
189 #include <linux/iopoll.h>
190 #include <linux/soc/mediatek/mtk_wed.h>
191+
192 #include "mtk_eth_soc.h"
193 #include "mtk_wed_regs.h"
194 #include "mtk_wed.h"
195 #include "mtk_ppe.h"
196-
197+#include "mtk_wed_mcu.h"
198+#include "mtk_wed_wo.h"
199+
200+struct wo_cmd_ring {
201+ u32 q_base;
202+ u32 cnt;
203+ u32 unit;
204+};
205 static struct mtk_wed_hw *hw_list[2];
206 static DEFINE_MUTEX(hw_lock);
207
developera3f86ed2022-07-08 14:15:13 +0800208@@ -51,6 +59,56 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
developer8cb3ac72022-07-04 10:55:14 +0800209 wdma_m32(dev, reg, 0, mask);
210 }
211
212+static void
213+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
214+{
215+ wdma_m32(dev, reg, mask, 0);
216+}
217+
developera3f86ed2022-07-08 14:15:13 +0800218+static u32
219+mtk_wdma_read_reset(struct mtk_wed_device *dev)
220+{
221+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
222+}
223+
224+static void
225+mtk_wdma_rx_reset(struct mtk_wed_device *dev)
226+{
227+ u32 status;
228+ u32 mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
229+ int i;
230+
231+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
232+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
233+ !(status & mask), 0, 1000))
234+ WARN_ON_ONCE(1);
235+
236+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
237+ if (!dev->rx_wdma[i].desc) {
238+ wdma_w32(dev, MTK_WDMA_RING_RX(i) +
239+ MTK_WED_RING_OFS_CPU_IDX, 0);
240+ }
241+}
242+
243+static void
244+mtk_wdma_tx_reset(struct mtk_wed_device *dev)
245+{
246+ u32 status;
247+ u32 mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
248+ int i;
249+
250+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
251+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
252+ !(status & mask), 0, 1000))
253+ WARN_ON_ONCE(1);
254+
255+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
256+ if (!dev->tx_wdma[i].desc) {
257+ wdma_w32(dev, MTK_WDMA_RING_TX(i) +
258+ MTK_WED_RING_OFS_CPU_IDX, 0);
259+ }
260+}
261+
developer8cb3ac72022-07-04 10:55:14 +0800262 static u32
263 mtk_wed_read_reset(struct mtk_wed_device *dev)
264 {
developera3f86ed2022-07-08 14:15:13 +0800265@@ -68,6 +126,52 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
developer8cb3ac72022-07-04 10:55:14 +0800266 WARN_ON_ONCE(1);
267 }
268
269+static void
270+mtk_wed_wo_reset(struct mtk_wed_device *dev)
271+{
272+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
273+ u8 state = WO_STATE_DISABLE;
274+ u8 state_done = WOIF_DISABLE_DONE;
275+ void __iomem *reg;
276+ u32 value;
277+ unsigned long timeout = jiffies + WOCPU_TIMEOUT;
278+
developerc1b2cd12022-07-28 18:35:24 +0800279+ mtk_wdma_tx_reset(dev);
developera3f86ed2022-07-08 14:15:13 +0800280+
281+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
282+
developer8cb3ac72022-07-04 10:55:14 +0800283+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE,
284+ &state, sizeof(state), false);
285+
286+ do {
287+ value = wed_r32(dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_WO_STATUS);
288+ } while (value != state_done && !time_after(jiffies, timeout));
289+
290+ reg = ioremap(WOCPU_MCUSYS_RESET_ADDR, 4);
291+ value = readl((void *)reg);
292+ switch(dev->hw->index) {
293+ case 0:
294+ value |= WOCPU_WO0_MCUSYS_RESET_MASK;
295+ writel(value, (void *)reg);
296+ value &= ~WOCPU_WO0_MCUSYS_RESET_MASK;
297+ writel(value, (void *)reg);
298+ break;
299+ case 1:
300+ value |= WOCPU_WO1_MCUSYS_RESET_MASK;
301+ writel(value, (void *)reg);
302+ value &= ~WOCPU_WO1_MCUSYS_RESET_MASK;
303+ writel(value, (void *)reg);
304+ break;
305+ default:
306+ dev_err(dev->hw->dev, "wrong mtk_wed%d\n",
307+ dev->hw->index);
308+
309+ break;
310+ }
311+
312+ iounmap((void *)reg);
313+}
314+
315 static struct mtk_wed_hw *
316 mtk_wed_assign(struct mtk_wed_device *dev)
317 {
developera3f86ed2022-07-08 14:15:13 +0800318@@ -178,7 +282,7 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
319 {
320 struct mtk_wdma_desc *desc = dev->buf_ring.desc;
321 void **page_list = dev->buf_ring.pages;
322- int page_idx;
323+ int ring_size, page_idx;
324 int i;
325
326 if (!page_list)
developerf11dcd72022-08-27 18:29:27 +0800327@@ -187,7 +291,14 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
developera3f86ed2022-07-08 14:15:13 +0800328 if (!desc)
329 goto free_pagelist;
330
developerf11dcd72022-08-27 18:29:27 +0800331- for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
developera3f86ed2022-07-08 14:15:13 +0800332+ if (dev->ver == MTK_WED_V1) {
333+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
334+ } else {
335+ ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
336+ MTK_WED_WDMA_RING_SIZE * 2;
337+ }
338+
developerf11dcd72022-08-27 18:29:27 +0800339+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
developera3f86ed2022-07-08 14:15:13 +0800340 void *page = page_list[page_idx++];
341
developerf11dcd72022-08-27 18:29:27 +0800342 if (!page)
343@@ -198,13 +309,49 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
344 __free_page(page);
345 }
346
347- dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
348+ dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
349 desc, dev->buf_ring.desc_phys);
350
351 free_pagelist:
developer8cb3ac72022-07-04 10:55:14 +0800352 kfree(page_list);
353 }
354
355+static int
356+mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
357+{
358+ struct mtk_rxbm_desc *desc;
359+ dma_addr_t desc_phys;
360+ int ring_size;
361+
362+ ring_size = dev->wlan.rx_nbuf;
363+ dev->rx_buf_ring.size = ring_size;
364+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
365+ &desc_phys, GFP_KERNEL);
366+ if (!desc)
367+ return -ENOMEM;
368+
369+ dev->rx_buf_ring.desc = desc;
370+ dev->rx_buf_ring.desc_phys = desc_phys;
371+
372+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_pkt);
373+ return 0;
374+}
375+
376+static void
377+mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
378+{
379+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
developera3f86ed2022-07-08 14:15:13 +0800380+ int ring_size = dev->rx_buf_ring.size;
developer8cb3ac72022-07-04 10:55:14 +0800381+
382+ if (!desc)
383+ return;
384+
385+ dev->wlan.release_rx_buf(dev);
386+
developer9dbe57a2022-08-05 18:23:53 +0800387+ dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
388+ desc, dev->rx_buf_ring.desc_phys);
developer8cb3ac72022-07-04 10:55:14 +0800389+}
390+
391 static void
392 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
393 {
developera3f86ed2022-07-08 14:15:13 +0800394@@ -226,13 +373,22 @@ mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800395 mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
396 }
397
398+static void
399+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
400+{
401+ mtk_wed_free_rx_bm(dev);
402+ mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
403+}
404+
405 static void
406 mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
407 {
408 u32 wdma_mask;
409
410 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
411-
412+ if (dev->ver > MTK_WED_V1)
413+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
414+ GENMASK(1, 0));
415 /* wed control cr set */
416 wed_set(dev, MTK_WED_CTRL,
417 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
developera3f86ed2022-07-08 14:15:13 +0800418@@ -251,7 +407,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800419 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
420 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
421 } else {
422- /* initail tx interrupt trigger */
423+
424 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
425 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
426 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
developera3f86ed2022-07-08 14:15:13 +0800427@@ -262,22 +418,30 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800428 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
429 dev->wlan.tx_tbit[1]));
430
431- /* initail txfree interrupt trigger */
432 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
433 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
434 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
435 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
436 dev->wlan.txfree_tbit));
437+
438+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
439+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
440+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
441+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
442+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
443+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
444+ dev->wlan.rx_tbit[0]) |
445+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
446+ dev->wlan.rx_tbit[1]));
447 }
448- /* initail wdma interrupt agent */
449 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
450 if (dev->ver == MTK_WED_V1) {
451 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
452 } else {
453 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
454 wed_set(dev, MTK_WED_WDMA_INT_CTRL,
455- FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,dev->wdma_idx));
456-
457+ FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,
458+ dev->wdma_idx));
459 }
460
461 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
developerc1b2cd12022-07-28 18:35:24 +0800462@@ -312,6 +476,40 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
developer8cb3ac72022-07-04 10:55:14 +0800463 }
464 }
465
466+static void
467+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
468+{
469+#define MTK_WFMDA_RX_DMA_EN BIT(2)
470+
471+ int timeout = 3;
472+ u32 cur_idx, regs;
473+
474+ do {
475+ regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
developerc1b2cd12022-07-28 18:35:24 +0800476+ MTK_WED_RING_OFS_CPU_IDX;
developer8cb3ac72022-07-04 10:55:14 +0800477+ cur_idx = wed_r32(dev, regs);
478+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
479+ break;
480+
481+ usleep_range(100000, 200000);
developerc1b2cd12022-07-28 18:35:24 +0800482+ timeout--;
483+ } while (timeout > 0);
developer8cb3ac72022-07-04 10:55:14 +0800484+
485+ if (timeout) {
486+ unsigned int val;
487+
488+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
489+ dev->wlan.phy_base);
490+ val |= MTK_WFMDA_RX_DMA_EN;
491+
492+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
493+ dev->wlan.phy_base, val);
494+ } else {
495+ dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n",
496+ dev->hw->index);
497+ }
498+}
499+
500 static void
501 mtk_wed_dma_enable(struct mtk_wed_device *dev)
502 {
developerc1b2cd12022-07-28 18:35:24 +0800503@@ -336,9 +534,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800504 wdma_set(dev, MTK_WDMA_GLO_CFG,
505 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
506 } else {
507+ int idx = 0;
508+
509 wed_set(dev, MTK_WED_WPDMA_CTRL,
510 MTK_WED_WPDMA_CTRL_SDL1_FIXED);
511
512+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
developerc1b2cd12022-07-28 18:35:24 +0800513+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
developer8cb3ac72022-07-04 10:55:14 +0800514+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
515+
516 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
517 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
518 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
developerc1b2cd12022-07-28 18:35:24 +0800519@@ -346,6 +550,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800520 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
521 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
522 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
523+
524+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
525+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
526+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
527+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
528+ 0x2));
529+
530+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++)
531+ mtk_wed_check_wfdma_rx_fill(dev, idx);
532 }
533 }
534
developerc1b2cd12022-07-28 18:35:24 +0800535@@ -363,19 +576,23 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800536 MTK_WED_GLO_CFG_TX_DMA_EN |
537 MTK_WED_GLO_CFG_RX_DMA_EN);
538
539- wdma_m32(dev, MTK_WDMA_GLO_CFG,
540+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
541 MTK_WDMA_GLO_CFG_TX_DMA_EN |
542 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
543- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
544+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
545
546 if (dev->ver == MTK_WED_V1) {
547 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
548- wdma_m32(dev, MTK_WDMA_GLO_CFG,
549- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
550+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
551+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
552 } else {
553 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
554 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
555 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
556+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
557+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
558+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
559+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
560 }
561 }
562
developerc1b2cd12022-07-28 18:35:24 +0800563@@ -383,10 +600,12 @@ static void
564 mtk_wed_stop(struct mtk_wed_device *dev)
developera3f86ed2022-07-08 14:15:13 +0800565 {
566 mtk_wed_dma_disable(dev);
developerc1b2cd12022-07-28 18:35:24 +0800567+ mtk_wed_set_512_support(dev, false);
developera3f86ed2022-07-08 14:15:13 +0800568
569- if (dev->ver > MTK_WED_V1)
developerc1b2cd12022-07-28 18:35:24 +0800570- mtk_wed_set_512_support(dev, false);
571-
developera3f86ed2022-07-08 14:15:13 +0800572+ if (dev->ver > MTK_WED_V1) {
developera3f86ed2022-07-08 14:15:13 +0800573+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
574+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
575+ }
developera3f86ed2022-07-08 14:15:13 +0800576 mtk_wed_set_ext_int(dev, false);
577
developerc1b2cd12022-07-28 18:35:24 +0800578 wed_clr(dev, MTK_WED_CTRL,
579@@ -395,6 +614,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800580 MTK_WED_CTRL_WED_TX_BM_EN |
581 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
582
583+ if (dev->ver > MTK_WED_V1) {
584+ wed_clr(dev, MTK_WED_CTRL,
585+ MTK_WED_CTRL_WED_RX_BM_EN);
586+ }
587+
588 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
589 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
590 wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
developerc1b2cd12022-07-28 18:35:24 +0800591@@ -417,8 +641,19 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800592
593 mtk_wed_reset(dev, MTK_WED_RESET_WED);
developera3f86ed2022-07-08 14:15:13 +0800594
developer8cb3ac72022-07-04 10:55:14 +0800595+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
596+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
597+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
developera3f86ed2022-07-08 14:15:13 +0800598+
developer8cb3ac72022-07-04 10:55:14 +0800599 mtk_wed_free_buffer(dev);
600 mtk_wed_free_tx_rings(dev);
developera3f86ed2022-07-08 14:15:13 +0800601+ if (dev->ver > MTK_WED_V1) {
602+ mtk_wed_wo_reset(dev);
developerf50c1802022-07-05 20:35:53 +0800603+ mtk_wed_free_rx_rings(dev);
developera3f86ed2022-07-08 14:15:13 +0800604+ mtk_wed_wo_exit(hw);
605+ }
606+
developerc1b2cd12022-07-28 18:35:24 +0800607+ mtk_wdma_rx_reset(dev);
developer8cb3ac72022-07-04 10:55:14 +0800608
609 if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) {
610 wlan_node = dev->wlan.pci_dev->dev.of_node;
developerc1b2cd12022-07-28 18:35:24 +0800611@@ -477,7 +712,6 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800612 value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
613 value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
614
615- /* pcie interrupt status trigger register */
616 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
617 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
618
developerc1b2cd12022-07-28 18:35:24 +0800619@@ -501,6 +735,9 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800620 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
621 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
622 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
623+
624+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
625+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
626 } else {
627 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
628 }
developerc1b2cd12022-07-28 18:35:24 +0800629@@ -549,24 +786,92 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800630 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
631 MTK_WDMA_RING_RX(0)));
632 }
633+}
developera3f86ed2022-07-08 14:15:13 +0800634
developer8cb3ac72022-07-04 10:55:14 +0800635+static void
636+mtk_wed_rx_bm_hw_init(struct mtk_wed_device *dev)
637+{
638+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
639+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_pkt_size));
640+
641+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
developera3f86ed2022-07-08 14:15:13 +0800642+
developer8cb3ac72022-07-04 10:55:14 +0800643+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
644+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_pkt));
645+
646+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
647+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
648+
649+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
650 }
651
652 static void
653-mtk_wed_hw_init(struct mtk_wed_device *dev)
654+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
655+{
656+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
657+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
658+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
659+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
660+ MTK_WED_MIOD_ENTRY_CNT >> 2));
661+
662+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_desc_phys);
663+
664+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
665+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
666+
667+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_desc_phys);
668+
669+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
670+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
671+
672+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
673+
674+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.rro_ring.desc_phys);
675+
676+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
677+ MTK_WED_RROQM_RST_IDX_MIOD |
678+ MTK_WED_RROQM_RST_IDX_FDBK);
679+
680+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
681+
682+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT -1);
683+
684+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
685+}
686+
687+static void
688+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
689+{
690+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
691+
692+ do {
693+ udelay(100);
694+
695+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
696+ break;
697+ } while (1);
698+
699+ /* configure RX_ROUTE_QM */
700+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
701+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
702+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
703+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
704+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
705+
706+ /* enable RX_ROUTE_QM */
707+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
708+}
709+
710+static void
711+mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
712 {
713 int size = dev->buf_ring.size;
714 int rev_size = MTK_WED_TX_RING_SIZE / 2;
715 int thr = 1;
716
717- if (dev->init_done)
718- return;
719-
720- dev->init_done = true;
721- mtk_wed_set_ext_int(dev, false);
722-
723 if (dev->ver > MTK_WED_V1) {
724- size = MTK_WED_WDMA_RING_SIZE * 2 + dev->buf_ring.size;
725+ size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
726+ dev->buf_ring.size;
727 rev_size = size;
728 thr = 0;
729 }
developerc1b2cd12022-07-28 18:35:24 +0800730@@ -609,13 +914,46 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800731 }
732
733 static void
734-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale)
735+mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
developerbbca0f92022-07-26 17:26:12 +0800736 {
developer8cb3ac72022-07-04 10:55:14 +0800737+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
developerc1b2cd12022-07-28 18:35:24 +0800738+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
739+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
developer8cb3ac72022-07-04 10:55:14 +0800740+
741+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
742+
743+ mtk_wed_rx_bm_hw_init(dev);
744+ mtk_wed_rro_hw_init(dev);
745+ mtk_wed_route_qm_hw_init(dev);
746+}
747+
748+static void
749+mtk_wed_hw_init(struct mtk_wed_device *dev)
750+{
751+ if (dev->init_done)
752+ return;
753+
754+ dev->init_done = true;
755+ mtk_wed_set_ext_int(dev, false);
756+ mtk_wed_tx_hw_init(dev);
757+ if (dev->ver > MTK_WED_V1)
758+ mtk_wed_rx_hw_init(dev);
759+}
760+
761+static void
762+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
developerbbca0f92022-07-26 17:26:12 +0800763+{
developer8cb3ac72022-07-04 10:55:14 +0800764+ __le32 ctrl;
765 int i;
766
767+ if (tx)
768+ ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
769+ else
770+ ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
771+
772 for (i = 0; i < size; i++) {
773 desc->buf0 = 0;
774- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
775+ desc->ctrl = ctrl;
776 desc->buf1 = 0;
777 desc->info = 0;
778 desc += scale;
developerc1b2cd12022-07-28 18:35:24 +0800779@@ -674,7 +1012,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800780 if (!desc)
781 continue;
782
783- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver);
784+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true);
785 }
786
787 if (mtk_wed_poll_busy(dev))
developerc1b2cd12022-07-28 18:35:24 +0800788@@ -692,6 +1030,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
789 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
790 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
791
792+ mtk_wdma_rx_reset(dev);
793+
794 if (busy) {
795 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
796 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
797@@ -729,9 +1069,24 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +0800798
799 }
800
801+static int
802+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
803+ int size)
804+{
805+ ring->desc = dma_alloc_coherent(dev->hw->dev,
806+ size * sizeof(*ring->desc),
807+ &ring->desc_phys, GFP_KERNEL);
808+ if (!ring->desc)
809+ return -ENOMEM;
810+
811+ ring->size = size;
812+ memset(ring->desc, 0, size);
813+ return 0;
814+}
815+
816 static int
817 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
818- int size, int scale)
819+ int size, int scale, bool tx)
820 {
821 ring->desc = dma_alloc_coherent(dev->hw->dev,
822 size * sizeof(*ring->desc) * scale,
developerc1b2cd12022-07-28 18:35:24 +0800823@@ -740,17 +1095,18 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
developer8cb3ac72022-07-04 10:55:14 +0800824 return -ENOMEM;
825
826 ring->size = size;
827- mtk_wed_ring_reset(ring->desc, size, scale);
828+ mtk_wed_ring_reset(ring->desc, size, scale, tx);
829
830 return 0;
831 }
832
833 static int
834-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
835+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
836 {
837 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
838
839- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, dev->ver))
840+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
841+ dev->ver, true))
842 return -ENOMEM;
843
844 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
developerc1b2cd12022-07-28 18:35:24 +0800845@@ -767,22 +1123,143 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
developer8cb3ac72022-07-04 10:55:14 +0800846 return 0;
847 }
848
849+static int
850+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
851+{
852+ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
853+
854+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
855+ dev->ver, true))
856+ return -ENOMEM;
857+
858+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
859+ wdma->desc_phys);
860+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
861+ size);
862+ wdma_w32(dev,
863+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
864+ wdma_w32(dev,
865+ MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
866+
867+ if (idx == 0) {
868+ wed_w32(dev, MTK_WED_WDMA_RING_TX
869+ + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
870+ wed_w32(dev, MTK_WED_WDMA_RING_TX
871+ + MTK_WED_RING_OFS_COUNT, size);
872+ wed_w32(dev, MTK_WED_WDMA_RING_TX
873+ + MTK_WED_RING_OFS_CPU_IDX, 0);
874+ wed_w32(dev, MTK_WED_WDMA_RING_TX
875+ + MTK_WED_RING_OFS_DMA_IDX, 0);
876+ }
877+
878+ return 0;
879+}
880+
881+static int
882+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
883+{
884+ struct device_node *np, *node = dev->hw->node;
885+ struct mtk_wed_ring *ring;
886+ struct resource res;
887+ int ret;
888+
889+ np = of_parse_phandle(node, "mediatek,wocpu_dlm", 0);
890+ if (!np)
891+ return -ENODEV;
892+
893+ ret = of_address_to_resource(np, 0, &res);
894+ if (ret)
895+ return ret;
896+
897+ dev->rro.rro_desc = ioremap(res.start, resource_size(&res));
898+
899+ ring = &dev->rro.rro_ring;
900+
901+ dev->rro.miod_desc_phys = res.start;
902+
903+ dev->rro.mcu_view_miod = MTK_WED_WOCPU_VIEW_MIOD_BASE;
904+ dev->rro.fdbk_desc_phys = MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT
905+ + dev->rro.miod_desc_phys;
906+
907+ if (mtk_wed_rro_ring_alloc(dev, ring, MTK_WED_RRO_QUE_CNT))
908+ return -ENOMEM;
909+
910+ return 0;
911+}
912+
913+static int
914+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
915+{
916+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
917+ struct {
918+ struct wo_cmd_ring ring[2];
919+
920+ u32 wed;
921+ u8 ver;
922+ } req = {
923+ .ring = {
924+ [0] = {
925+ .q_base = dev->rro.mcu_view_miod,
926+ .cnt = MTK_WED_MIOD_CNT,
927+ .unit = MTK_WED_MIOD_ENTRY_CNT,
928+ },
929+ [1] = {
930+ .q_base = dev->rro.mcu_view_miod +
931+ MTK_WED_MIOD_ENTRY_CNT *
932+ MTK_WED_MIOD_CNT,
933+ .cnt = MTK_WED_FB_CMD_CNT,
934+ .unit = 4,
935+ },
936+ },
937+ .wed = 0,
938+ };
939+
940+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_WED_CFG,
941+ &req, sizeof(req), true);
942+}
943+
944+static int
945+mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
946+{
947+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
948+
developerf50c1802022-07-05 20:35:53 +0800949+ if (dev->ver == MTK_WED_V1)
950+ return 0;
951+
developer8cb3ac72022-07-04 10:55:14 +0800952+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
953+}
954+
955+static void
956+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
957+ u32 reason, u32 hash)
958+{
959+ int idx = dev->hw->index;
960+ struct mtk_eth *eth = dev->hw->eth;
961+ struct ethhdr *eh;
962+
963+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) {
964+ if (!skb)
965+ return;
966+
967+ skb_set_mac_header(skb, 0);
968+ eh = eth_hdr(skb);
969+ skb->protocol = eh->h_proto;
970+ mtk_ppe_check_skb(eth->ppe[idx], skb, hash);
971+ }
972+}
973+
974 static void
975 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
976 {
977- u32 wdma_mask;
978- int i;
979+ int i, ret;
980
981 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
982 if (!dev->tx_wdma[i].desc)
983- mtk_wed_wdma_ring_setup(dev, i, 16);
984-
985+ mtk_wed_wdma_rx_ring_setup(dev, i, 16);
986
987 mtk_wed_hw_init(dev);
988
989 mtk_wed_set_int(dev, irq_mask);
990-
991-
992 mtk_wed_set_ext_int(dev, true);
993
994 if (dev->ver == MTK_WED_V1) {
developerc1b2cd12022-07-28 18:35:24 +0800995@@ -797,8 +1274,20 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer8cb3ac72022-07-04 10:55:14 +0800996 val |= BIT(0);
997 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
998 } else {
developerc1b2cd12022-07-28 18:35:24 +0800999- mtk_wed_set_512_support(dev, true);
developer8cb3ac72022-07-04 10:55:14 +08001000+ /* driver set mid ready and only once */
1001+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1002+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1003+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1004+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1005+
1006+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1007+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1008+
1009+ ret = mtk_wed_rro_cfg(dev);
1010+ if (ret)
1011+ return;
developer8cb3ac72022-07-04 10:55:14 +08001012 }
developerc1b2cd12022-07-28 18:35:24 +08001013+ mtk_wed_set_512_support(dev, true);
developer8cb3ac72022-07-04 10:55:14 +08001014
developerc1b2cd12022-07-28 18:35:24 +08001015 mtk_wed_dma_enable(dev);
1016 dev->running = true;
developerbbca0f92022-07-26 17:26:12 +08001017@@ -809,6 +1298,7 @@ mtk_wed_attach(struct mtk_wed_device *dev)
1018 __releases(RCU)
1019 {
1020 struct mtk_wed_hw *hw;
1021+ u16 ver;
1022 int ret = 0;
1023
1024 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1025@@ -839,11 +1329,24 @@ mtk_wed_attach(struct mtk_wed_device *dev)
1026
1027 dev->ver = FIELD_GET(MTK_WED_REV_ID_MAJOR,
developer8cb3ac72022-07-04 10:55:14 +08001028 wed_r32(dev, MTK_WED_REV_ID));
developerbbca0f92022-07-26 17:26:12 +08001029+ if (dev->ver > MTK_WED_V1)
1030+ ver = FIELD_GET(MTK_WED_REV_ID_MINOR,
1031+ wed_r32(dev, MTK_WED_REV_ID));
1032+
1033+ dev->rev_id = ((dev->ver << 28) | ver << 16);
developer8cb3ac72022-07-04 10:55:14 +08001034
1035 ret = mtk_wed_buffer_alloc(dev);
1036- if (ret) {
1037- mtk_wed_detach(dev);
1038- goto out;
1039+ if (ret)
1040+ goto error;
1041+
1042+ if (dev->ver > MTK_WED_V1) {
1043+ ret = mtk_wed_rx_bm_alloc(dev);
1044+ if (ret)
1045+ goto error;
1046+
1047+ ret = mtk_wed_rro_alloc(dev);
1048+ if (ret)
1049+ goto error;
1050 }
1051
1052 mtk_wed_hw_init_early(dev);
developerbbca0f92022-07-26 17:26:12 +08001053@@ -851,7 +1354,12 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08001054 if (dev->ver == MTK_WED_V1)
1055 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1056 BIT(hw->index), 0);
1057+ else
1058+ ret = mtk_wed_wo_init(hw);
1059
1060+error:
1061+ if (ret)
1062+ mtk_wed_detach(dev);
1063 out:
1064 mutex_unlock(&hw_lock);
1065
developerbbca0f92022-07-26 17:26:12 +08001066@@ -877,10 +1385,10 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +08001067
1068 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
1069
1070- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1))
1071+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true))
1072 return -ENOMEM;
1073
1074- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1075+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1076 return -ENOMEM;
1077
1078 ring->reg_base = MTK_WED_RING_TX(idx);
developerbbca0f92022-07-26 17:26:12 +08001079@@ -927,6 +1435,35 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer8cb3ac72022-07-04 10:55:14 +08001080 return 0;
1081 }
1082
1083+static int
1084+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1085+{
1086+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1087+
1088+ BUG_ON(idx > ARRAY_SIZE(dev->rx_ring));
1089+
1090+
1091+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false))
1092+ return -ENOMEM;
1093+
1094+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1095+ return -ENOMEM;
1096+
1097+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
1098+ ring->wpdma = regs;
1099+
1100+ /* WPDMA -> WED */
1101+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1102+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
1103+
1104+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
1105+ ring->desc_phys);
1106+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
1107+ MTK_WED_RX_RING_SIZE);
1108+
1109+ return 0;
1110+}
1111+
1112 static u32
1113 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
1114 {
developerbbca0f92022-07-26 17:26:12 +08001115@@ -1014,6 +1551,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +08001116 .attach = mtk_wed_attach,
1117 .tx_ring_setup = mtk_wed_tx_ring_setup,
1118 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
1119+ .rx_ring_setup = mtk_wed_rx_ring_setup,
1120+ .msg_update = mtk_wed_send_msg,
1121 .start = mtk_wed_start,
1122 .stop = mtk_wed_stop,
1123 .reset_dma = mtk_wed_reset_dma,
developerbbca0f92022-07-26 17:26:12 +08001124@@ -1022,6 +1561,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer8cb3ac72022-07-04 10:55:14 +08001125 .irq_get = mtk_wed_irq_get,
1126 .irq_set_mask = mtk_wed_irq_set_mask,
1127 .detach = mtk_wed_detach,
1128+ .ppe_check = mtk_wed_ppe_check,
1129 };
1130 struct device_node *eth_np = eth->dev->of_node;
1131 struct platform_device *pdev;
developerc1b2cd12022-07-28 18:35:24 +08001132@@ -1077,6 +1617,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1133 regmap_write(hw->mirror, 0, 0);
1134 regmap_write(hw->mirror, 4, 0);
1135 }
1136+ hw->ver = MTK_WED_V1;
1137 }
1138
1139 mtk_wed_hw_add_debugfs(hw);
developer8cb3ac72022-07-04 10:55:14 +08001140diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
developer8fec8ae2022-08-15 15:01:09 -07001141index 9b17b7405..8ef5253ca 100644
developer8cb3ac72022-07-04 10:55:14 +08001142--- a/drivers/net/ethernet/mediatek/mtk_wed.h
1143+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
1144@@ -13,6 +13,7 @@
1145 #define MTK_WED_PKT_SIZE 1900
1146 #define MTK_WED_BUF_SIZE 2048
1147 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1148+#define MTK_WED_RX_RING_SIZE 1536
1149
1150 #define MTK_WED_TX_RING_SIZE 2048
1151 #define MTK_WED_WDMA_RING_SIZE 512
1152@@ -21,8 +22,15 @@
1153 #define MTK_WED_PER_GROUP_PKT 128
1154
1155 #define MTK_WED_FBUF_SIZE 128
1156+#define MTK_WED_MIOD_CNT 16
1157+#define MTK_WED_FB_CMD_CNT 1024
1158+#define MTK_WED_RRO_QUE_CNT 8192
1159+#define MTK_WED_MIOD_ENTRY_CNT 128
1160+
1161+#define MODULE_ID_WO 1
1162
1163 struct mtk_eth;
1164+struct mtk_wed_wo;
1165
1166 struct mtk_wed_hw {
1167 struct device_node *node;
1168@@ -34,12 +42,14 @@ struct mtk_wed_hw {
1169 struct regmap *mirror;
1170 struct dentry *debugfs_dir;
1171 struct mtk_wed_device *wed_dev;
1172+ struct mtk_wed_wo *wed_wo;
1173 u32 debugfs_reg;
1174 u32 num_flows;
1175 u32 wdma_phy;
1176 char dirname[5];
1177 int irq;
1178 int index;
1179+ u32 ver;
1180 };
1181
1182 struct mtk_wdma_info {
1183@@ -66,6 +76,18 @@ wed_r32(struct mtk_wed_device *dev, u32 reg)
1184 return val;
1185 }
1186
1187+static inline u32
1188+wifi_r32(struct mtk_wed_device *dev, u32 reg)
1189+{
1190+ return readl(dev->wlan.base + reg);
1191+}
1192+
1193+static inline void
1194+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1195+{
1196+ writel(val, dev->wlan.base + reg);
1197+}
1198+
1199 static inline void
1200 wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1201 {
1202@@ -114,6 +136,23 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
1203 writel(val, dev->txfree_ring.wpdma + reg);
1204 }
1205
1206+static inline u32
1207+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
1208+{
1209+ if (!dev->rx_ring[ring].wpdma)
1210+ return 0;
1211+
1212+ return readl(dev->rx_ring[ring].wpdma + reg);
1213+}
1214+
1215+static inline void
1216+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
1217+{
1218+ if (!dev->rx_ring[ring].wpdma)
1219+ return;
1220+
1221+ writel(val, dev->rx_ring[ring].wpdma + reg);
1222+}
1223 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1224 void __iomem *wdma, u32 wdma_phy, int index);
1225 void mtk_wed_exit(void);
developera3f86ed2022-07-08 14:15:13 +08001226@@ -146,4 +185,16 @@ static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
developer8cb3ac72022-07-04 10:55:14 +08001227 }
1228 #endif
1229
1230+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr);
developera3f86ed2022-07-08 14:15:13 +08001231+void wed_wo_hardware_exit(struct mtk_wed_wo *wo);
developer8cb3ac72022-07-04 10:55:14 +08001232+int wed_wo_mcu_init(struct mtk_wed_wo *wo);
1233+int mtk_wed_exception_init(struct mtk_wed_wo *wo);
1234+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1235+int mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb);
1236+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir);
1237+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
1238+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd,
1239+ const void *data, int len, bool wait_resp);
1240+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget);
1241+
1242 #endif
1243diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.c b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
1244new file mode 100644
developer8fec8ae2022-08-15 15:01:09 -07001245index 000000000..22ef337d0
developer8cb3ac72022-07-04 10:55:14 +08001246--- /dev/null
1247+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c
developera3f86ed2022-07-08 14:15:13 +08001248@@ -0,0 +1,133 @@
developer8cb3ac72022-07-04 10:55:14 +08001249+// SPDX-License-Identifier: GPL-2.0-only
1250+
1251+#include <linux/soc/mediatek/mtk_wed.h>
1252+#include <linux/of_address.h>
1253+#include <linux/mfd/syscon.h>
1254+#include <linux/of_irq.h>
1255+#include "mtk_wed_ccif.h"
1256+#include "mtk_wed_regs.h"
1257+#include "mtk_wed_wo.h"
1258+
1259+static inline void woif_set_isr(struct mtk_wed_wo *wo, u32 mask)
1260+{
1261+ woccif_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
1262+}
1263+
1264+static inline u32 woif_get_csr(struct mtk_wed_wo *wo)
1265+{
1266+ u32 val;
1267+
1268+ val = woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1269+
1270+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
1271+}
1272+
1273+static inline void woif_set_ack(struct mtk_wed_wo *wo, u32 mask)
1274+{
1275+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1276+}
1277+
1278+static inline void woif_kickout(struct mtk_wed_wo *wo)
1279+{
1280+ woccif_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
1281+ woccif_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
1282+}
1283+
1284+static inline void woif_clear_int(struct mtk_wed_wo *wo, u32 mask)
1285+{
1286+ woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
1287+ woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
1288+}
1289+
1290+int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr)
1291+{
1292+ static const struct wed_wo_drv_ops wo_drv_ops = {
1293+ .kickout = woif_kickout,
1294+ .set_ack = woif_set_ack,
1295+ .set_isr = woif_set_isr,
1296+ .get_csr = woif_get_csr,
1297+ .clear_int = woif_clear_int,
1298+ };
1299+ struct device_node *np, *node = wo->hw->node;
1300+ struct wed_wo_queue_regs queues;
1301+ struct regmap *regs;
1302+ int ret;
1303+
1304+ np = of_parse_phandle(node, "mediatek,ap2woccif", 0);
1305+ if (!np)
1306+ return -ENODEV;
1307+
1308+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
1309+ if (!regs)
1310+ return -ENODEV;
1311+
1312+ wo->drv_ops = &wo_drv_ops;
1313+
1314+ wo->ccif.regs = regs;
1315+ wo->ccif.irq = irq_of_parse_and_map(np, 0);
1316+
1317+ spin_lock_init(&wo->ccif.irq_lock);
1318+
1319+ ret = request_irq(wo->ccif.irq, isr, IRQF_TRIGGER_HIGH,
1320+ "wo_ccif_isr", wo);
1321+ if (ret)
1322+ goto free_irq;
1323+
1324+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY1;
1325+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY2;
1326+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
1327+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
1328+
1329+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
1330+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
1331+ &queues);
1332+
1333+ if (ret)
1334+ goto free_irq;
1335+
1336+ queues.desc_base = MTK_WED_WO_CCIF_DUMMY5;
1337+ queues.ring_size = MTK_WED_WO_CCIF_DUMMY6;
1338+ queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
1339+ queues.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
1340+
1341+ ret = mtk_wed_wo_q_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
1342+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
1343+ &queues);
1344+ if (ret)
1345+ goto free_irq;
1346+
1347+ wo->ccif.q_int_mask = MTK_WED_WO_RXCH_INT_MASK;
1348+
1349+ ret = mtk_wed_wo_q_init(wo, mtk_wed_wo_rx_poll);
1350+ if (ret)
1351+ goto free_irq;
1352+
1353+ wo->ccif.q_exep_mask = MTK_WED_WO_EXCEPTION_INT_MASK;
1354+ wo->ccif.irqmask = MTK_WED_WO_ALL_INT_MASK;
1355+
1356+ /* rx queue irqmask */
1357+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
1358+
1359+ return 0;
1360+
1361+free_irq:
developera3f86ed2022-07-08 14:15:13 +08001362+ free_irq(wo->ccif.irq, wo);
developer8cb3ac72022-07-04 10:55:14 +08001363+
1364+ return ret;
1365+}
1366+
developera3f86ed2022-07-08 14:15:13 +08001367+void wed_wo_hardware_exit(struct mtk_wed_wo *wo)
developer8cb3ac72022-07-04 10:55:14 +08001368+{
developera3f86ed2022-07-08 14:15:13 +08001369+ wo->drv_ops->set_isr(wo, 0);
1370+
1371+ disable_irq(wo->ccif.irq);
1372+ free_irq(wo->ccif.irq, wo);
1373+
1374+ tasklet_disable(&wo->irq_tasklet);
1375+ netif_napi_del(&wo->napi);
1376+
1377+ mtk_wed_wo_q_tx_clean(wo, &wo->q_tx, true);
1378+ mtk_wed_wo_q_rx_clean(wo, &wo->q_rx);
1379+ mtk_wed_wo_q_free(wo, &wo->q_tx);
1380+ mtk_wed_wo_q_free(wo, &wo->q_rx);
developer8cb3ac72022-07-04 10:55:14 +08001381+}
1382diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.h b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1383new file mode 100644
developer8fec8ae2022-08-15 15:01:09 -07001384index 000000000..68ade449c
developer8cb3ac72022-07-04 10:55:14 +08001385--- /dev/null
1386+++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h
1387@@ -0,0 +1,45 @@
1388+// SPDX-License-Identifier: GPL-2.0-only
1389+
1390+#ifndef __MTK_WED_CCIF_H
1391+#define __MTK_WED_CCIF_H
1392+
1393+#define MTK_WED_WO_RING_SIZE 256
1394+#define MTK_WED_WO_CMD_LEN 1504
1395+
1396+#define MTK_WED_WO_TXCH_NUM 0
1397+#define MTK_WED_WO_RXCH_NUM 1
1398+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
1399+
1400+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
1401+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
1402+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
1403+#define MTK_WED_WO_ALL_INT_MASK MTK_WED_WO_RXCH_INT_MASK | \
1404+ MTK_WED_WO_EXCEPTION_INT_MASK
1405+
1406+#define MTK_WED_WO_CCIF_BUSY 0x004
1407+#define MTK_WED_WO_CCIF_START 0x008
1408+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
1409+#define MTK_WED_WO_CCIF_RCHNUM 0x010
1410+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
1411+
1412+#define MTK_WED_WO_CCIF_ACK 0x014
1413+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
1414+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
1415+#define MTK_WED_WO_CCIF_DUMMY1 0x020
1416+#define MTK_WED_WO_CCIF_DUMMY2 0x024
1417+#define MTK_WED_WO_CCIF_DUMMY3 0x028
1418+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
1419+#define MTK_WED_WO_CCIF_SHADOW1 0x030
1420+#define MTK_WED_WO_CCIF_SHADOW2 0x034
1421+#define MTK_WED_WO_CCIF_SHADOW3 0x038
1422+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
1423+#define MTK_WED_WO_CCIF_DUMMY5 0x050
1424+#define MTK_WED_WO_CCIF_DUMMY6 0x054
1425+#define MTK_WED_WO_CCIF_DUMMY7 0x058
1426+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
1427+#define MTK_WED_WO_CCIF_SHADOW5 0x060
1428+#define MTK_WED_WO_CCIF_SHADOW6 0x064
1429+#define MTK_WED_WO_CCIF_SHADOW7 0x068
1430+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
1431+
1432+#endif
1433diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
developer8fec8ae2022-08-15 15:01:09 -07001434index f420f187e..4a9e684ed 100644
developer8cb3ac72022-07-04 10:55:14 +08001435--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1436+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
1437@@ -2,6 +2,7 @@
1438 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1439
1440 #include <linux/seq_file.h>
1441+#include <linux/soc/mediatek/mtk_wed.h>
1442 #include "mtk_wed.h"
1443 #include "mtk_wed_regs.h"
1444
1445@@ -18,6 +19,8 @@ enum {
1446 DUMP_TYPE_WDMA,
1447 DUMP_TYPE_WPDMA_TX,
1448 DUMP_TYPE_WPDMA_TXFREE,
1449+ DUMP_TYPE_WPDMA_RX,
1450+ DUMP_TYPE_WED_RRO,
1451 };
1452
1453 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
1454@@ -36,6 +39,10 @@ enum {
1455
1456 #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
1457 #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
1458+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
1459+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
1460+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
1461+
1462
1463 static void
1464 print_reg_val(struct seq_file *s, const char *name, u32 val)
1465@@ -58,6 +65,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1466 cur->name);
1467 continue;
1468 case DUMP_TYPE_WED:
1469+ case DUMP_TYPE_WED_RRO:
1470 val = wed_r32(dev, cur->offset);
1471 break;
1472 case DUMP_TYPE_WDMA:
1473@@ -69,6 +77,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
1474 case DUMP_TYPE_WPDMA_TXFREE:
1475 val = wpdma_txfree_r32(dev, cur->offset);
1476 break;
1477+ case DUMP_TYPE_WPDMA_RX:
1478+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
1479+ break;
1480 }
1481 print_reg_val(s, cur->name, val);
1482 }
1483@@ -132,6 +143,81 @@ wed_txinfo_show(struct seq_file *s, void *data)
1484 }
1485 DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
1486
1487+static int
1488+wed_rxinfo_show(struct seq_file *s, void *data)
1489+{
1490+ static const struct reg_dump regs[] = {
1491+ DUMP_STR("WPDMA RX"),
1492+ DUMP_WPDMA_RX_RING(0),
1493+ DUMP_WPDMA_RX_RING(1),
1494+
1495+ DUMP_STR("WPDMA RX"),
1496+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
1497+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
1498+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
1499+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
1500+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
1501+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
1502+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
1503+
1504+ DUMP_STR("WED RX"),
1505+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
1506+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
1507+
1508+ DUMP_STR("WED RRO"),
1509+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
1510+ DUMP_WED(WED_RROQM_MID_MIB),
1511+ DUMP_WED(WED_RROQM_MOD_MIB),
1512+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
1513+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
1514+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
1515+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
1516+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
1517+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
1518+
1519+ DUMP_STR("WED Route QM"),
1520+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
1521+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
1522+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
1523+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
1524+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
1525+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
1526+ DUMP_WED(WED_RTQM_Q2N_MIB),
1527+ DUMP_WED(WED_RTQM_Q2B_MIB),
1528+ DUMP_WED(WED_RTQM_PFDBK_MIB),
1529+
1530+ DUMP_STR("WED WDMA TX"),
1531+ DUMP_WED(WED_WDMA_TX_MIB),
1532+ DUMP_WED_RING(WED_WDMA_RING_TX),
1533+
1534+ DUMP_STR("WDMA TX"),
1535+ DUMP_WDMA(WDMA_GLO_CFG),
1536+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
1537+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
1538+
1539+ DUMP_STR("WED RX BM"),
1540+ DUMP_WED(WED_RX_BM_BASE),
1541+ DUMP_WED(WED_RX_BM_RX_DMAD),
1542+ DUMP_WED(WED_RX_BM_PTR),
1543+ DUMP_WED(WED_RX_BM_TKID_MIB),
1544+ DUMP_WED(WED_RX_BM_BLEN),
1545+ DUMP_WED(WED_RX_BM_STS),
1546+ DUMP_WED(WED_RX_BM_INTF2),
1547+ DUMP_WED(WED_RX_BM_INTF),
1548+ DUMP_WED(WED_RX_BM_ERR_STS),
1549+ };
1550+
1551+ struct mtk_wed_hw *hw = s->private;
1552+ struct mtk_wed_device *dev = hw->wed_dev;
1553+
1554+ if (!dev)
1555+ return 0;
1556+
1557+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
1558+
1559+ return 0;
1560+}
1561+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
1562
1563 static int
1564 mtk_wed_reg_set(void *data, u64 val)
1565@@ -175,4 +261,8 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
1566 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
1567 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
1568 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
1569+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
developerc1b2cd12022-07-28 18:35:24 +08001570+ if (hw->ver != MTK_WED_V1) {
developer8cb3ac72022-07-04 10:55:14 +08001571+ wed_wo_mcu_debugfs(hw, dir);
1572+ }
1573 }
1574diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
1575new file mode 100644
developer8fec8ae2022-08-15 15:01:09 -07001576index 000000000..723bdfd55
developer8cb3ac72022-07-04 10:55:14 +08001577--- /dev/null
1578+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
developer8fec8ae2022-08-15 15:01:09 -07001579@@ -0,0 +1,586 @@
developer8cb3ac72022-07-04 10:55:14 +08001580+// SPDX-License-Identifier: GPL-2.0-only
1581+
1582+#include <linux/skbuff.h>
1583+#include <linux/debugfs.h>
1584+#include <linux/firmware.h>
1585+#include <linux/of_address.h>
1586+#include <linux/soc/mediatek/mtk_wed.h>
1587+#include "mtk_wed_regs.h"
1588+#include "mtk_wed_mcu.h"
1589+#include "mtk_wed_wo.h"
1590+
1591+struct sk_buff *
1592+mtk_wed_mcu_msg_alloc(struct mtk_wed_wo *wo,
1593+ const void *data, int data_len)
1594+{
1595+ const struct wed_wo_mcu_ops *ops = wo->mcu_ops;
1596+ int length = ops->headroom + data_len;
1597+ struct sk_buff *skb;
1598+
1599+ skb = alloc_skb(length, GFP_KERNEL);
1600+ if (!skb)
1601+ return NULL;
1602+
1603+ memset(skb->head, 0, length);
1604+ skb_reserve(skb, ops->headroom);
1605+
1606+ if (data && data_len)
1607+ skb_put_data(skb, data, data_len);
1608+
1609+ return skb;
1610+}
1611+
1612+struct sk_buff *
1613+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
1614+{
1615+ unsigned long timeout;
1616+
1617+ if (!time_is_after_jiffies(expires))
1618+ return NULL;
1619+
1620+ timeout = expires - jiffies;
1621+ wait_event_timeout(wo->mcu.wait,
1622+ (!skb_queue_empty(&wo->mcu.res_q)),
1623+ timeout);
1624+
1625+ return skb_dequeue(&wo->mcu.res_q);
1626+}
1627+
1628+int
1629+mtk_wed_mcu_skb_send_and_get_msg(struct mtk_wed_wo *wo,
1630+ int to_id, int cmd, struct sk_buff *skb,
1631+ bool wait_resp, struct sk_buff **ret_skb)
1632+{
1633+ unsigned long expires;
1634+ int ret, seq;
1635+
1636+ if (ret_skb)
1637+ *ret_skb = NULL;
1638+
1639+ mutex_lock(&wo->mcu.mutex);
1640+
1641+ ret = wo->mcu_ops->mcu_skb_send_msg(wo, to_id, cmd, skb, &seq, wait_resp);
1642+ if (ret < 0)
1643+ goto out;
1644+
1645+ if (!wait_resp) {
1646+ ret = 0;
1647+ goto out;
1648+ }
1649+
1650+ expires = jiffies + wo->mcu.timeout;
1651+
1652+ do {
1653+ skb = mtk_wed_mcu_get_response(wo, expires);
1654+ ret = wo->mcu_ops->mcu_parse_response(wo, cmd, skb, seq);
1655+
1656+ if (!ret && ret_skb)
1657+ *ret_skb = skb;
1658+ else
1659+ dev_kfree_skb(skb);
1660+ } while (ret == -EAGAIN);
1661+
1662+out:
1663+ mutex_unlock(&wo->mcu.mutex);
1664+
1665+ return ret;
1666+}
1667+
1668+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo,
1669+ struct sk_buff *skb)
1670+{
1671+ skb_queue_tail(&wo->mcu.res_q, skb);
1672+ wake_up(&wo->mcu.wait);
1673+}
1674+
1675+static int mtk_wed_mcu_send_and_get_msg(struct mtk_wed_wo *wo,
1676+ int to_id, int cmd, const void *data, int len,
1677+ bool wait_resp, struct sk_buff **ret_skb)
1678+{
1679+ struct sk_buff *skb;
1680+
1681+ skb = mtk_wed_mcu_msg_alloc(wo, data, len);
1682+ if (!skb)
1683+ return -ENOMEM;
1684+
1685+ return mtk_wed_mcu_skb_send_and_get_msg(wo, to_id, cmd, skb, wait_resp, ret_skb);
1686+}
1687+
1688+int
1689+mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,
1690+ int to_id, int cmd,
1691+ const void *data, int len, bool wait_resp)
1692+{
1693+ struct sk_buff *skb = NULL;
1694+ int ret = 0;
1695+
1696+ ret = mtk_wed_mcu_send_and_get_msg(wo, to_id, cmd, data,
1697+ len, wait_resp, &skb);
1698+ if (skb)
1699+ dev_kfree_skb(skb);
1700+
1701+ return ret;
1702+}
1703+
1704+int mtk_wed_exception_init(struct mtk_wed_wo *wo)
1705+{
1706+ struct wed_wo_exception *exp = &wo->exp;
1707+ struct {
1708+ u32 arg0;
1709+ u32 arg1;
1710+ }req;
1711+
1712+ exp->log_size = EXCEPTION_LOG_SIZE;
1713+ exp->log = kmalloc(exp->log_size, GFP_ATOMIC);
1714+ if (!exp->log)
1715+ return -ENOMEM;
1716+
1717+ memset(exp->log, 0, exp->log_size);
1718+ exp->phys = dma_map_single(wo->hw->dev, exp->log, exp->log_size,
1719+ DMA_FROM_DEVICE);
1720+
1721+ if (unlikely(dma_mapping_error(wo->hw->dev, exp->phys))) {
1722+ dev_info(wo->hw->dev, "dma map error\n");
1723+ goto free;
1724+ }
1725+
1726+ req.arg0 = (u32)exp->phys;
1727+ req.arg1 = (u32)exp->log_size;
1728+
1729+ return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_EXCEPTION_INIT,
1730+ &req, sizeof(req), false);
1731+
1732+free:
1733+ kfree(exp->log);
1734+ return -ENOMEM;
1735+}
1736+
1737+int
1738+mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb)
1739+{
1740+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1741+
1742+ if (hdr->ver != 0)
1743+ return WARP_INVALID_PARA_STATUS;
1744+
1745+ if (skb->len < sizeof(struct wed_cmd_hdr))
1746+ return WARP_INVALID_PARA_STATUS;
1747+
1748+ if (skb->len != hdr->length)
1749+ return WARP_INVALID_PARA_STATUS;
1750+
1751+ return WARP_OK_STATUS;
1752+}
1753+
1754+void
1755+mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
1756+{
developer8fec8ae2022-08-15 15:01:09 -07001757+ struct mtk_wed_device *wed = wo->hw->wed_dev;
developer8cb3ac72022-07-04 10:55:14 +08001758+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
1759+ struct wed_wo_log *record;
developer8fec8ae2022-08-15 15:01:09 -07001760+ struct wo_cmd_rxcnt_t *rxcnt;
developer8cb3ac72022-07-04 10:55:14 +08001761+ char *msg = (char *)(skb->data + sizeof(struct wed_cmd_hdr));
1762+ u16 msg_len = skb->len - sizeof(struct wed_cmd_hdr);
1763+ u32 i, cnt = 0;
1764+
1765+ switch (hdr->cmd_id) {
1766+ case WO_EVT_LOG_DUMP:
1767+ pr_info("[WO LOG]: %s\n", msg);
1768+ break;
1769+ case WO_EVT_PROFILING:
1770+ cnt = msg_len / (sizeof(struct wed_wo_log));
1771+ record = (struct wed_wo_log *) msg;
1772+ dev_info(wo->hw->dev, "[WO Profiling]: %d report arrived!\n", cnt);
1773+
1774+ for (i = 0 ; i < cnt ; i++) {
1775+ //PROFILE_STAT(wo->total, record[i].total);
1776+ //PROFILE_STAT(wo->mod, record[i].mod);
1777+ //PROFILE_STAT(wo->rro, record[i].rro);
1778+
1779+ dev_info(wo->hw->dev, "[WO Profiling]: SN:%u with latency: total=%u, rro:%u, mod:%u\n",
1780+ record[i].sn,
1781+ record[i].total,
1782+ record[i].rro,
1783+ record[i].mod);
1784+ }
1785+ break;
developer8fec8ae2022-08-15 15:01:09 -07001786+ case WO_EVT_RXCNT_INFO:
1787+ cnt = *(u32 *)msg;
1788+ rxcnt = (struct wo_cmd_rxcnt_t *)((u32 *)msg+1);
developer8cb3ac72022-07-04 10:55:14 +08001789+
developer8fec8ae2022-08-15 15:01:09 -07001790+ for (i = 0; i < cnt; i++)
1791+ if (wed->wlan.update_wo_rxcnt)
1792+ wed->wlan.update_wo_rxcnt(wed, rxcnt);
1793+ break;
developer8cb3ac72022-07-04 10:55:14 +08001794+ default:
1795+ break;
1796+ }
1797+
1798+ dev_kfree_skb(skb);
1799+
1800+}
1801+
1802+static int
1803+mtk_wed_load_firmware(struct mtk_wed_wo *wo)
1804+{
1805+ struct fw_info {
1806+ __le32 decomp_crc;
1807+ __le32 decomp_len;
1808+ __le32 decomp_blk_sz;
1809+ u8 reserved[4];
1810+ __le32 addr;
1811+ __le32 len;
1812+ u8 feature_set;
1813+ u8 reserved1[15];
1814+ } __packed *region;
1815+
1816+ char *mcu;
1817+ const struct mtk_wed_fw_trailer *hdr;
1818+ static u8 shared[MAX_REGION_SIZE] = {0};
1819+ const struct firmware *fw;
1820+ int ret, i;
1821+ u32 ofs = 0;
1822+ u32 boot_cr, val;
1823+
1824+ mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 : MT7986_FIRMWARE_WO_1;
1825+
1826+ ret = request_firmware(&fw, mcu, wo->hw->dev);
1827+ if (ret)
1828+ return ret;
1829+
1830+ hdr = (const struct mtk_wed_fw_trailer *)(fw->data + fw->size -
1831+ sizeof(*hdr));
1832+
1833+ dev_info(wo->hw->dev, "WO Firmware Version: %.10s, Build Time: %.15s\n",
1834+ hdr->fw_ver, hdr->build_date);
1835+
1836+ for (i = 0; i < hdr->n_region; i++) {
1837+ int j = 0;
1838+ region = (struct fw_info *)(fw->data + fw->size -
1839+ sizeof(*hdr) -
1840+ sizeof(*region) *
1841+ (hdr->n_region - i));
1842+
1843+ while (j < MAX_REGION_SIZE) {
1844+ struct mtk_wed_fw_region *wo_region;
1845+
1846+ wo_region = &wo->region[j];
1847+ if (!wo_region->addr)
1848+ break;
1849+
1850+ if (wo_region->addr_pa == region->addr) {
1851+ if (!wo_region->shared) {
1852+ memcpy(wo_region->addr,
1853+ fw->data + ofs, region->len);
1854+ } else if (!shared[j]) {
1855+ memcpy(wo_region->addr,
1856+ fw->data + ofs, region->len);
1857+ shared[j] = true;
1858+ }
1859+ }
1860+ j++;
1861+ }
1862+
1863+ if (j == __WO_REGION_MAX) {
1864+ ret = -ENOENT;
1865+ goto done;
1866+ }
1867+ ofs += region->len;
1868+ }
1869+
1870+ /* write the start address */
1871+ boot_cr = wo->hw->index ?
1872+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
1873+ wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
1874+
1875+ /* wo firmware reset */
1876+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
1877+
1878+ val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
1879+
1880+ val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
1881+ WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
1882+
1883+ wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
1884+
1885+done:
1886+ release_firmware(fw);
1887+
1888+ return ret;
1889+}
1890+
1891+static int
1892+mtk_wed_get_firmware_region(struct mtk_wed_wo *wo)
1893+{
1894+ struct device_node *node, *np = wo->hw->node;
1895+ struct mtk_wed_fw_region *region;
1896+ struct resource res;
1897+ const char *compat;
1898+ int i, ret;
1899+
1900+ static const char *const wo_region_compat[__WO_REGION_MAX] = {
1901+ [WO_REGION_EMI] = WOCPU_EMI_DEV_NODE,
1902+ [WO_REGION_ILM] = WOCPU_ILM_DEV_NODE,
1903+ [WO_REGION_DATA] = WOCPU_DATA_DEV_NODE,
1904+ [WO_REGION_BOOT] = WOCPU_BOOT_DEV_NODE,
1905+ };
1906+
1907+ for (i = 0; i < __WO_REGION_MAX; i++) {
1908+ region = &wo->region[i];
1909+ compat = wo_region_compat[i];
1910+
1911+ node = of_parse_phandle(np, compat, 0);
1912+ if (!node)
1913+ return -ENODEV;
1914+
1915+ ret = of_address_to_resource(node, 0, &res);
1916+ if (ret)
1917+ return ret;
1918+
1919+ region->addr_pa = res.start;
1920+ region->size = resource_size(&res);
1921+ region->addr = ioremap(region->addr_pa, region->size);
1922+
1923+ of_property_read_u32_index(node, "shared", 0, &region->shared);
1924+ }
1925+
1926+ return 0;
1927+}
1928+
1929+static int
1930+wo_mcu_send_message(struct mtk_wed_wo *wo,
1931+ int to_id, int cmd, struct sk_buff *skb,
1932+ int *wait_seq, bool wait_resp)
1933+{
1934+ struct wed_cmd_hdr *hdr;
1935+ u8 seq = 0;
1936+
1937+ /* TDO: make dynamic based on msg type */
1938+ wo->mcu.timeout = 20 * HZ;
1939+
1940+ if (wait_resp && wait_seq) {
1941+ seq = wo->mcu.msg_seq++ ;
1942+ *wait_seq = seq;
1943+ }
1944+
1945+ hdr = (struct wed_cmd_hdr *)skb_push(skb, sizeof(*hdr));
1946+
1947+ hdr->cmd_id = cmd;
1948+ hdr->length = cpu_to_le16(skb->len);
1949+ hdr->uni_id = seq;
1950+
1951+ if (to_id == MODULE_ID_WO)
1952+ hdr->flag |= WARP_CMD_FLAG_FROM_TO_WO;
1953+
1954+ if (wait_resp && wait_seq)
1955+ hdr->flag |= WARP_CMD_FLAG_NEED_RSP;
1956+
1957+ return mtk_wed_wo_q_tx_skb(wo, &wo->q_tx, skb);
1958+}
1959+
1960+static int
1961+wo_mcu_parse_response(struct mtk_wed_wo *wo, int cmd,
1962+ struct sk_buff *skb, int seq)
1963+{
developer8fec8ae2022-08-15 15:01:09 -07001964+ struct mtk_wed_device *wed = wo->hw->wed_dev;
developer8cb3ac72022-07-04 10:55:14 +08001965+ struct wed_cmd_hdr *hdr;
developer8fec8ae2022-08-15 15:01:09 -07001966+ struct wo_cmd_rxcnt_t *rxcnt = NULL;
1967+ u32 i, cnt = 0;
developer8cb3ac72022-07-04 10:55:14 +08001968+
1969+ if (!skb) {
1970+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
1971+ cmd, seq);
1972+ return -ETIMEDOUT;
1973+ }
1974+
1975+ hdr = (struct wed_cmd_hdr *)skb->data;
1976+ if (seq != hdr->uni_id) {
1977+ dev_err(wo->hw->dev, "Message %08x (seq %d) with not match uid(%d)\n",
1978+ cmd, seq, hdr->uni_id);
1979+ return -EAGAIN;
1980+ }
1981+
developer8fec8ae2022-08-15 15:01:09 -07001982+ skb_pull(skb, sizeof(struct wed_cmd_hdr));
1983+
1984+ switch (cmd) {
1985+ case WO_CMD_RXCNT_INFO:
1986+ cnt = *(u32 *)skb->data;
1987+ rxcnt = (struct wo_cmd_rxcnt_t *)((u32 *)skb->data+1);
1988+
1989+ for (i = 0; i < cnt; i++)
1990+ if (wed->wlan.update_wo_rxcnt)
1991+ wed->wlan.update_wo_rxcnt(wed, rxcnt);
1992+ break;
1993+ default:
1994+ break;
1995+ }
developer8cb3ac72022-07-04 10:55:14 +08001996+
1997+ return 0;
1998+}
1999+
2000+int wed_wo_mcu_init(struct mtk_wed_wo *wo)
2001+{
2002+ static const struct wed_wo_mcu_ops wo_mcu_ops = {
2003+ .headroom = sizeof(struct wed_cmd_hdr),
2004+ .mcu_skb_send_msg = wo_mcu_send_message,
2005+ .mcu_parse_response = wo_mcu_parse_response,
2006+ /*TDO .mcu_restart = wo_mcu_restart,*/
2007+ };
2008+ unsigned long timeout = jiffies + FW_DL_TIMEOUT;
2009+ int ret;
2010+ u32 val;
2011+
2012+ wo->mcu_ops = &wo_mcu_ops;
2013+
2014+ ret = mtk_wed_get_firmware_region(wo);
2015+ if (ret)
2016+ return ret;
2017+
2018+ /* set dummy cr */
2019+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL,
2020+ wo->hw->index + 1);
2021+
2022+ ret = mtk_wed_load_firmware(wo);
2023+ if (ret)
2024+ return ret;
2025+
2026+ do {
2027+ /* get dummy cr */
2028+ val = wed_r32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL);
2029+ } while (val != 0 && !time_after(jiffies, timeout));
2030+
2031+ if (val)
2032+ return -EBUSY;
2033+
2034+ return 0;
2035+}
2036+
2037+static ssize_t
2038+mtk_wed_wo_ctrl(struct file *file,
2039+ const char __user *user_buf,
2040+ size_t count,
2041+ loff_t *ppos)
2042+{
2043+ struct mtk_wed_hw *hw = file->private_data;
2044+ struct mtk_wed_wo *wo = hw->wed_wo;
2045+ char buf[100], *cmd = NULL, *input[11] = {0};
2046+ char msgbuf[128] = {0};
2047+ struct wo_cmd_query *query = (struct wo_cmd_query *)msgbuf;
2048+ u32 cmd_id;
2049+ bool wait = false;
2050+ char *sub_str = NULL;
2051+ int input_idx = 0, input_total = 0, scan_num = 0;
2052+ char *p;
2053+
2054+ if (count > sizeof(buf))
2055+ return -EINVAL;
2056+
2057+ if (copy_from_user(buf, user_buf, count))
2058+ return -EFAULT;
2059+
2060+ if (count && buf[count - 1] == '\n')
2061+ buf[count - 1] = '\0';
2062+ else
2063+ buf[count] = '\0';
2064+
2065+ p = buf;
2066+
2067+ while ((sub_str = strsep(&p, " ")) != NULL) {
2068+ input[input_idx] = sub_str;
2069+ input_idx++;
2070+ input_total++;
2071+ }
2072+ cmd = input[0];
2073+ if (input_total == 1 && cmd) {
2074+ if (strncmp(cmd, "bainfo", strlen(cmd)) == 0) {
2075+ cmd_id = WO_CMD_BA_INFO_DUMP;
2076+ } else if (strncmp(cmd, "bactrl", strlen(cmd)) == 0) {
2077+ cmd_id = WO_CMD_BA_CTRL_DUMP;
2078+ } else if (strncmp(cmd, "fbcmdq", strlen(cmd)) == 0) {
2079+ cmd_id = WO_CMD_FBCMD_Q_DUMP;
2080+ } else if (strncmp(cmd, "logflush", strlen(cmd)) == 0) {
2081+ cmd_id = WO_CMD_LOG_FLUSH;
2082+ } else if (strncmp(cmd, "cpustat.dump", strlen(cmd)) == 0) {
2083+ cmd_id = WO_CMD_CPU_STATS_DUMP;
2084+ } else if (strncmp(cmd, "state", strlen(cmd)) == 0) {
2085+ cmd_id = WO_CMD_WED_RX_STAT;
2086+ } else if (strncmp(cmd, "prof_hit_dump", strlen(cmd)) == 0) {
2087+ //wo_profiling_report();
2088+ return count;
2089+ } else if (strncmp(cmd, "rxcnt_info", strlen(cmd)) == 0) {
2090+ cmd_id = WO_CMD_RXCNT_INFO;
2091+ wait = true;
2092+ } else {
2093+ pr_info("(%s) unknown comand string(%s)!\n", __func__, cmd);
2094+ return count;
2095+ }
2096+ } else if (input_total > 1) {
2097+ for (input_idx = 1 ; input_idx < input_total ; input_idx++) {
2098+ scan_num = sscanf(input[input_idx], "%u", &query->query0+(input_idx - 1));
2099+
2100+ if (scan_num < 1) {
2101+ pr_info("(%s) require more input!\n", __func__);
2102+ return count;
2103+ }
2104+ }
2105+ if(strncmp(cmd, "devinfo", strlen(cmd)) == 0) {
2106+ cmd_id = WO_CMD_DEV_INFO_DUMP;
2107+ } else if (strncmp(cmd, "bssinfo", strlen(cmd)) == 0) {
2108+ cmd_id = WO_CMD_BSS_INFO_DUMP;
2109+ } else if (strncmp(cmd, "starec", strlen(cmd)) == 0) {
2110+ cmd_id = WO_CMD_STA_REC_DUMP;
2111+ } else if (strncmp(cmd, "starec_ba", strlen(cmd)) == 0) {
2112+ cmd_id = WO_CMD_STA_BA_DUMP;
2113+ } else if (strncmp(cmd, "logctrl", strlen(cmd)) == 0) {
2114+ cmd_id = WO_CMD_FW_LOG_CTRL;
2115+ } else if (strncmp(cmd, "cpustat.en", strlen(cmd)) == 0) {
2116+ cmd_id = WO_CMD_CPU_STATS_ENABLE;
2117+ } else if (strncmp(cmd, "prof_conf", strlen(cmd)) == 0) {
2118+ cmd_id = WO_CMD_PROF_CTRL;
2119+ } else if (strncmp(cmd, "rxcnt_ctrl", strlen(cmd)) == 0) {
2120+ cmd_id = WO_CMD_RXCNT_CTRL;
2121+ } else if (strncmp(cmd, "dbg_set", strlen(cmd)) == 0) {
2122+ cmd_id = WO_CMD_DBG_INFO;
2123+ }
2124+ } else {
2125+ dev_info(hw->dev, "usage: echo cmd='cmd_str' > wo_write\n");
2126+ dev_info(hw->dev, "cmd_str value range:\n");
2127+ dev_info(hw->dev, "\tbainfo:\n");
2128+ dev_info(hw->dev, "\tbactrl:\n");
2129+ dev_info(hw->dev, "\tfbcmdq:\n");
2130+ dev_info(hw->dev, "\tlogflush:\n");
2131+ dev_info(hw->dev, "\tcpustat.dump:\n");
2132+ dev_info(hw->dev, "\tprof_hit_dump:\n");
2133+ dev_info(hw->dev, "\trxcnt_info:\n");
2134+ dev_info(hw->dev, "\tdevinfo:\n");
2135+ dev_info(hw->dev, "\tbssinfo:\n");
2136+ dev_info(hw->dev, "\tstarec:\n");
2137+ dev_info(hw->dev, "\tstarec_ba:\n");
2138+ dev_info(hw->dev, "\tlogctrl:\n");
2139+ dev_info(hw->dev, "\tcpustat.en:\n");
2140+ dev_info(hw->dev, "\tprof_conf:\n");
2141+ dev_info(hw->dev, "\trxcnt_ctrl:\n");
2142+ dev_info(hw->dev, "\tdbg_set [level] [category]:\n");
2143+ return count;
2144+ }
2145+
2146+ mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, (void *)msgbuf, sizeof(struct wo_cmd_query), wait);
2147+
2148+ return count;
2149+
2150+}
2151+
2152+static const struct file_operations fops_wo_ctrl = {
2153+ .write = mtk_wed_wo_ctrl,
2154+ .open = simple_open,
2155+ .llseek = default_llseek,
2156+};
2157+
2158+void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir)
2159+{
2160+ if (!dir)
2161+ return;
2162+
2163+ debugfs_create_file("wo_write", 0600, dir, hw, &fops_wo_ctrl);
2164+}
2165+
2166diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2167new file mode 100644
developer8fec8ae2022-08-15 15:01:09 -07002168index 000000000..6a5ac7672
developer8cb3ac72022-07-04 10:55:14 +08002169--- /dev/null
2170+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2171@@ -0,0 +1,125 @@
2172+// SPDX-License-Identifier: GPL-2.0-only
2173+
2174+#ifndef __MTK_WED_MCU_H
2175+#define __MTK_WED_MCU_H
2176+
2177+#define EXCEPTION_LOG_SIZE 32768
2178+#define WOCPU_MCUSYS_RESET_ADDR 0x15194050
2179+#define WOCPU_WO0_MCUSYS_RESET_MASK 0x20
2180+#define WOCPU_WO1_MCUSYS_RESET_MASK 0x1
2181+
2182+#define WARP_INVALID_LENGTH_STATUS (-2)
2183+#define WARP_NULL_POINTER_STATUS (-3)
2184+#define WARP_INVALID_PARA_STATUS (-4)
2185+#define WARP_NOT_HANDLE_STATUS (-5)
2186+#define WARP_FAIL_STATUS (-1)
2187+#define WARP_OK_STATUS (0)
2188+#define WARP_ALREADY_DONE_STATUS (1)
2189+
2190+#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2191+#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2192+
2193+#define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2194+#define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2195+#define WOCPU_DLM_DEV_NODE "mediatek,wocpu_dlm"
2196+#define WOCPU_DATA_DEV_NODE "mediatek,wocpu_data"
2197+#define WOCPU_BOOT_DEV_NODE "mediatek,wocpu_boot"
2198+
2199+#define FW_DL_TIMEOUT ((3000 * HZ) / 1000)
2200+#define WOCPU_TIMEOUT ((1000 * HZ) / 1000)
2201+
2202+#define MAX_REGION_SIZE 3
2203+
2204+#define WOX_MCU_CFG_LS_BASE 0 /*0x15194000*/
2205+
2206+#define WOX_MCU_CFG_LS_HW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x000) // 4000
2207+#define WOX_MCU_CFG_LS_FW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x004) // 4004
2208+#define WOX_MCU_CFG_LS_CFG_DBG1_ADDR (WOX_MCU_CFG_LS_BASE + 0x00C) // 400C
2209+#define WOX_MCU_CFG_LS_CFG_DBG2_ADDR (WOX_MCU_CFG_LS_BASE + 0x010) // 4010
2210+#define WOX_MCU_CFG_LS_WF_MCCR_ADDR (WOX_MCU_CFG_LS_BASE + 0x014) // 4014
2211+#define WOX_MCU_CFG_LS_WF_MCCR_SET_ADDR (WOX_MCU_CFG_LS_BASE + 0x018) // 4018
2212+#define WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR (WOX_MCU_CFG_LS_BASE + 0x01C) // 401C
2213+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (WOX_MCU_CFG_LS_BASE + 0x050) // 4050
2214+#define WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x060) // 4060
2215+#define WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x064) // 4064
2216+
2217+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK BIT(5)
2218+#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK BIT(0)
2219+
2220+
2221+enum wo_event_id {
2222+ WO_EVT_LOG_DUMP = 0x1,
2223+ WO_EVT_PROFILING = 0x2,
2224+ WO_EVT_RXCNT_INFO = 0x3
2225+};
2226+
2227+enum wo_cmd_id {
2228+ WO_CMD_WED_CFG = 0,
2229+ WO_CMD_WED_RX_STAT,
2230+ WO_CMD_RRO_SER,
2231+ WO_CMD_DBG_INFO,
2232+ WO_CMD_DEV_INFO,
2233+ WO_CMD_BSS_INFO,
2234+ WO_CMD_STA_REC,
2235+ WO_CMD_DEV_INFO_DUMP,
2236+ WO_CMD_BSS_INFO_DUMP,
2237+ WO_CMD_STA_REC_DUMP,
2238+ WO_CMD_BA_INFO_DUMP,
2239+ WO_CMD_FBCMD_Q_DUMP,
2240+ WO_CMD_FW_LOG_CTRL,
2241+ WO_CMD_LOG_FLUSH,
2242+ WO_CMD_CHANGE_STATE,
2243+ WO_CMD_CPU_STATS_ENABLE,
2244+ WO_CMD_CPU_STATS_DUMP,
2245+ WO_CMD_EXCEPTION_INIT,
2246+ WO_CMD_PROF_CTRL,
2247+ WO_CMD_STA_BA_DUMP,
2248+ WO_CMD_BA_CTRL_DUMP,
2249+ WO_CMD_RXCNT_CTRL,
2250+ WO_CMD_RXCNT_INFO,
2251+ WO_CMD_SET_CAP,
2252+ WO_CMD_CCIF_RING_DUMP,
2253+ WO_CMD_WED_END
2254+};
2255+
2256+enum wo_state {
2257+ WO_STATE_UNDEFINED = 0x0,
2258+ WO_STATE_INIT = 0x1,
2259+ WO_STATE_ENABLE = 0x2,
2260+ WO_STATE_DISABLE = 0x3,
2261+ WO_STATE_HALT = 0x4,
2262+ WO_STATE_GATING = 0x5,
2263+ WO_STATE_SER_RESET = 0x6,
2264+ WO_STATE_WF_RESET = 0x7,
2265+ WO_STATE_END
2266+};
2267+
2268+enum wo_done_state {
2269+ WOIF_UNDEFINED = 0,
2270+ WOIF_DISABLE_DONE = 1,
2271+ WOIF_TRIGGER_ENABLE = 2,
2272+ WOIF_ENABLE_DONE = 3,
2273+ WOIF_TRIGGER_GATING = 4,
2274+ WOIF_GATING_DONE = 5,
2275+ WOIF_TRIGGER_HALT = 6,
2276+ WOIF_HALT_DONE = 7,
2277+};
2278+
2279+enum wed_dummy_cr_idx {
2280+ WED_DUMMY_CR_FWDL = 0,
2281+ WED_DUMMY_CR_WO_STATUS = 1
2282+};
2283+
2284+struct mtk_wed_fw_trailer {
2285+ u8 chip_id;
2286+ u8 eco_code;
2287+ u8 n_region;
2288+ u8 format_ver;
2289+ u8 format_flag;
2290+ u8 reserved[2];
2291+ char fw_ver[10];
2292+ char build_date[15];
2293+ u32 crc;
2294+};
2295+
2296+#endif
2297diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
developer8fec8ae2022-08-15 15:01:09 -07002298index e107de7ba..9d021e2da 100644
developer8cb3ac72022-07-04 10:55:14 +08002299--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2300+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2301@@ -4,6 +4,8 @@
2302 #ifndef __MTK_WED_REGS_H
2303 #define __MTK_WED_REGS_H
2304
2305+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
2306+
2307 #if defined(CONFIG_MEDIATEK_NETSYS_V2)
2308 #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(13, 0)
2309 #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(14)
2310@@ -16,6 +18,7 @@
2311 #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2312 #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2313 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2314+#define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
2315
2316 struct mtk_wdma_desc {
2317 __le32 buf0;
developerbbca0f92022-07-26 17:26:12 +08002318@@ -31,6 +34,7 @@ struct mtk_wdma_desc {
2319 #define MTK_WED_REV_ID 0x000
2320 #define MTK_WED_REV_ID_MAJOR GENMASK(7, 0)
2321 #endif
2322+#define MTK_WED_REV_ID_MINOR GENMASK(27, 16)
2323
2324 #define MTK_WED_RESET 0x008
2325 #define MTK_WED_RESET_TX_BM BIT(0)
2326@@ -41,6 +45,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002327 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
2328 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2329 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2330+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
2331+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
2332 #define MTK_WED_RESET_WED BIT(31)
2333
2334 #define MTK_WED_CTRL 0x00c
developerbbca0f92022-07-26 17:26:12 +08002335@@ -52,8 +58,12 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002336 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2337 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2338 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2339-#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2340-#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2341+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
2342+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
2343+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
2344+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
2345+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
2346+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
2347 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2348 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
2349 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
developerbbca0f92022-07-26 17:26:12 +08002350@@ -68,8 +78,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002351 #define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
2352 #define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
2353 #endif
2354-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2355-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2356+#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
2357+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
2358 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2359 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2360 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
developerbbca0f92022-07-26 17:26:12 +08002361@@ -86,8 +96,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002362 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2363 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2364 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2365- MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | \
2366- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | \
2367+ MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
2368+ MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
2369 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2370 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2371 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | \
developerbbca0f92022-07-26 17:26:12 +08002372@@ -96,6 +106,8 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002373 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
2374
2375 #define MTK_WED_EXT_INT_MASK 0x028
2376+#define MTK_WED_EXT_INT_MASK1 0x02c
2377+#define MTK_WED_EXT_INT_MASK2 0x030
2378
2379 #define MTK_WED_STATUS 0x060
2380 #define MTK_WED_STATUS_TX GENMASK(15, 8)
developerbbca0f92022-07-26 17:26:12 +08002381@@ -183,6 +195,9 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002382
2383 #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2384
2385+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
2386+
2387+#define MTK_WED_SCR0 0x3c0
2388 #define MTK_WED_WPDMA_INT_TRIGGER 0x504
2389 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2390 #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
developerbbca0f92022-07-26 17:26:12 +08002391@@ -239,13 +254,19 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002392
2393 #define MTK_WED_WPDMA_INT_CTRL_TX 0x530
2394 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
2395-#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2396+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
2397 #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
2398 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
2399 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
2400 #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
2401
2402 #define MTK_WED_WPDMA_INT_CTRL_RX 0x534
2403+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
2404+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
2405+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
2406+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
2407+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
2408+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
2409
2410 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
2411 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
developerc1b2cd12022-07-28 18:35:24 +08002412@@ -270,13 +291,40 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002413 #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2414 #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2415
2416+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
2417+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
2418+
2419 #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2420 #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
developerc1b2cd12022-07-28 18:35:24 +08002421+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
developer8cb3ac72022-07-04 10:55:14 +08002422+
2423+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
2424+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
2425+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
2426+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
2427+
2428+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
developerc1b2cd12022-07-28 18:35:24 +08002429+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
2430+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
developer8cb3ac72022-07-04 10:55:14 +08002431+
2432+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
2433+#define MTK_WED_WPDMA_RX_RING 0x770
2434+
2435+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
2436+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
2437+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
2438+
2439+#define MTK_WED_WDMA_RING_TX 0x800
2440+
2441+#define MTK_WED_WDMA_TX_MIB 0x810
2442+
2443+
2444 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2445 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2446
2447 #define MTK_WED_WDMA_GLO_CFG 0xa04
2448 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2449+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
2450 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2451 #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2452 #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
developerc1b2cd12022-07-28 18:35:24 +08002453@@ -320,6 +368,20 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002454 #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
2455 #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
2456
2457+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2458+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
2459+
2460+#define MTK_WED_RX_BM_BASE 0xd84
2461+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2462+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
2463+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
2464+
2465+#define MTK_WED_RX_PTR 0xd8c
2466+
2467+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
2468+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
2469+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
2470+
2471 #define MTK_WED_RING_OFS_BASE 0x00
2472 #define MTK_WED_RING_OFS_COUNT 0x04
2473 #define MTK_WED_RING_OFS_CPU_IDX 0x08
developerc1b2cd12022-07-28 18:35:24 +08002474@@ -330,12 +392,13 @@ struct mtk_wdma_desc {
developera3f86ed2022-07-08 14:15:13 +08002475
2476 #define MTK_WDMA_GLO_CFG 0x204
2477 #define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
2478+#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
2479 #define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
2480+#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
2481 #define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
2482 #define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
2483 #define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
developerc1b2cd12022-07-28 18:35:24 +08002484
2485-
2486 #define MTK_WDMA_RESET_IDX 0x208
2487 #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
2488 #define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
2489@@ -359,4 +422,70 @@ struct mtk_wdma_desc {
developer8cb3ac72022-07-04 10:55:14 +08002490 /* DMA channel mapping */
2491 #define HIFSYS_DMA_AG_MAP 0x008
2492
2493+#define MTK_WED_RTQM_GLO_CFG 0xb00
2494+#define MTK_WED_RTQM_BUSY BIT(1)
2495+#define MTK_WED_RTQM_Q_RST BIT(2)
2496+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
2497+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
2498+
2499+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
2500+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
2501+#define MTK_WED_RTQM_Q2N_MIB 0xb80
2502+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
2503+
2504+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
2505+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
2506+
2507+#define MTK_WED_RROQM_GLO_CFG 0xc04
2508+#define MTK_WED_RROQM_RST_IDX 0xc08
2509+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
2510+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
2511+
2512+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
2513+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
2514+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
2515+
2516+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
2517+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
2518+
2519+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
2520+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
2521+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
2522+
2523+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
2524+
2525+#define MTK_WED_RROQ_BASE_L 0xc80
2526+#define MTK_WED_RROQ_BASE_H 0xc84
2527+
developer8cb3ac72022-07-04 10:55:14 +08002528+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
2529+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
2530+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
2531+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
2532+
2533+#define MTK_WED_RROQM_MID_MIB 0xcc0
2534+#define MTK_WED_RROQM_MOD_MIB 0xcc4
2535+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
2536+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
2537+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
2538+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
2539+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
2540+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
2541+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
2542+
2543+#define MTK_WED_RX_BM_RX_DMAD 0xd80
2544+#define MTK_WED_RX_BM_BASE 0xd84
2545+#define MTK_WED_RX_BM_INIT_PTR 0xd88
2546+#define MTK_WED_RX_BM_PTR 0xd8c
2547+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
2548+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
2549+
2550+#define MTK_WED_RX_BM_BLEN 0xd90
2551+#define MTK_WED_RX_BM_STS 0xd94
2552+#define MTK_WED_RX_BM_INTF2 0xd98
2553+#define MTK_WED_RX_BM_INTF 0xd9c
2554+#define MTK_WED_RX_BM_ERR_STS 0xda8
2555+
2556+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
2557+#define MTK_WED_PCIE_INT_MASK 0x0
2558+
2559 #endif
2560diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
2561new file mode 100644
developer8fec8ae2022-08-15 15:01:09 -07002562index 000000000..67dcffb26
developer8cb3ac72022-07-04 10:55:14 +08002563--- /dev/null
2564+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
developerf11dcd72022-08-27 18:29:27 +08002565@@ -0,0 +1,573 @@
developer8cb3ac72022-07-04 10:55:14 +08002566+// SPDX-License-Identifier: GPL-2.0-only
2567+
2568+#include <linux/kernel.h>
2569+#include <linux/bitfield.h>
2570+#include <linux/dma-mapping.h>
2571+#include <linux/skbuff.h>
2572+#include <linux/of_platform.h>
2573+#include <linux/interrupt.h>
2574+#include <linux/of_address.h>
2575+#include <linux/iopoll.h>
2576+#include <linux/soc/mediatek/mtk_wed.h>
2577+#include "mtk_wed.h"
2578+#include "mtk_wed_regs.h"
2579+#include "mtk_wed_ccif.h"
2580+#include "mtk_wed_wo.h"
2581+
2582+struct wed_wo_profile_stat profile_total[6] = {
2583+ {1001, 0},
2584+ {1501, 0},
2585+ {3001, 0},
2586+ {5001, 0},
2587+ {10001, 0},
2588+ {0xffffffff, 0}
2589+};
2590+
2591+struct wed_wo_profile_stat profiling_mod[6] = {
2592+ {1001, 0},
2593+ {1501, 0},
2594+ {3001, 0},
2595+ {5001, 0},
2596+ {10001, 0},
2597+ {0xffffffff, 0}
2598+};
2599+
2600+struct wed_wo_profile_stat profiling_rro[6] = {
2601+ {1001, 0},
2602+ {1501, 0},
2603+ {3001, 0},
2604+ {5001, 0},
2605+ {10001, 0},
2606+ {0xffffffff, 0}
2607+};
2608+
2609+static void
2610+woif_q_sync_idx(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2611+{
2612+ woccif_w32(wo, q->regs->desc_base, q->desc_dma);
2613+ woccif_w32(wo, q->regs->ring_size, q->ndesc);
2614+
developer8cb3ac72022-07-04 10:55:14 +08002615+}
2616+
2617+static void
2618+woif_q_reset(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2619+{
2620+
2621+ if (!q || !q->ndesc)
2622+ return;
2623+
2624+ woccif_w32(dev, q->regs->cpu_idx, 0);
2625+
2626+ woif_q_sync_idx(dev, q);
2627+}
2628+
2629+static void
2630+woif_q_kick(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset)
2631+{
2632+ wmb();
2633+ woccif_w32(wo, q->regs->cpu_idx, q->head + offset);
2634+}
2635+
2636+static int
2637+woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2638+{
2639+ int len = q->buf_size, frames = 0;
2640+ struct wed_wo_queue_entry *entry;
2641+ struct wed_wo_desc *desc;
2642+ dma_addr_t addr;
2643+ u32 ctrl = 0;
2644+ void *buf;
2645+
2646+ if (!q->ndesc)
2647+ return 0;
2648+
2649+ spin_lock_bh(&q->lock);
2650+
developer9dbe57a2022-08-05 18:23:53 +08002651+ while (q->queued < q->ndesc) {
developer8cb3ac72022-07-04 10:55:14 +08002652+
2653+ buf = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
2654+ if (!buf)
2655+ break;
2656+
2657+ addr = dma_map_single(wo->hw->dev, buf, len, DMA_FROM_DEVICE);
2658+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
2659+ skb_free_frag(buf);
2660+ break;
2661+ }
2662+ dma_sync_single_for_cpu(wo->hw->dev, addr, len,
2663+ DMA_TO_DEVICE);
developerf11dcd72022-08-27 18:29:27 +08002664+
2665+ q->head = (q->head + 1) % q->ndesc;
2666+
developer8cb3ac72022-07-04 10:55:14 +08002667+ desc = &q->desc[q->head];
2668+ entry = &q->entry[q->head];
2669+
2670+ entry->dma_addr = addr;
2671+ entry->dma_len = len;
2672+
2673+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, entry->dma_len);
2674+ ctrl |= WED_CTL_LAST_SEC0;
2675+
2676+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2677+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2678+ dma_sync_single_for_device(wo->hw->dev, addr, len,
2679+ DMA_TO_DEVICE);
2680+ q->queued++;
2681+ q->entry[q->head].buf = buf;
2682+
developer8cb3ac72022-07-04 10:55:14 +08002683+ frames++;
2684+ }
2685+
2686+ spin_unlock_bh(&q->lock);
2687+
2688+ return frames;
2689+}
2690+
2691+static void
2692+woif_q_rx_fill_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2693+{
2694+ if(woif_q_rx_fill(wo, q))
2695+ woif_q_kick(wo, q, -1);
2696+}
2697+
2698+static int
2699+woif_q_alloc(struct mtk_wed_wo *dev, struct wed_wo_queue *q,
2700+ int n_desc, int bufsize, int idx,
2701+ struct wed_wo_queue_regs *regs)
2702+{
2703+ struct wed_wo_queue_regs *q_regs;
2704+ int size;
2705+
2706+ spin_lock_init(&q->lock);
2707+ spin_lock_init(&q->cleanup_lock);
2708+
2709+ q_regs = devm_kzalloc(dev->hw->dev, sizeof(*q_regs), GFP_KERNEL);
2710+
2711+ q_regs->desc_base = regs->desc_base;
2712+ q_regs->ring_size = regs->ring_size;
2713+ q_regs->cpu_idx = regs->cpu_idx;
2714+ q_regs->dma_idx = regs->dma_idx;
2715+
2716+ q->regs = q_regs;
2717+ q->ndesc = n_desc;
2718+ q->buf_size = bufsize;
2719+
2720+ size = q->ndesc * sizeof(struct wed_wo_desc);
2721+
2722+ q->desc = dmam_alloc_coherent(dev->hw->dev, size,
2723+ &q->desc_dma, GFP_KERNEL);
2724+ if (!q->desc)
2725+ return -ENOMEM;
2726+
2727+ size = q->ndesc * sizeof(*q->entry);
2728+ q->entry = devm_kzalloc(dev->hw->dev, size, GFP_KERNEL);
2729+ if (!q->entry)
2730+ return -ENOMEM;
2731+
2732+ if (idx == 0)
2733+ woif_q_reset(dev, &dev->q_tx);
2734+
2735+ return 0;
2736+}
2737+
2738+static void
developera3f86ed2022-07-08 14:15:13 +08002739+woif_q_free(struct mtk_wed_wo *dev, struct wed_wo_queue *q)
2740+{
2741+ int size;
2742+
2743+ if (!q)
2744+ return;
2745+
2746+ if (!q->desc)
2747+ return;
2748+
2749+ woccif_w32(dev, q->regs->cpu_idx, 0);
2750+
2751+ size = q->ndesc * sizeof(struct wed_wo_desc);
2752+ dma_free_coherent(dev->hw->dev, size, q->desc, q->desc_dma);
2753+}
2754+
2755+static void
developer8cb3ac72022-07-04 10:55:14 +08002756+woif_q_tx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush)
2757+{
2758+ int last;
2759+
2760+ if (!q || !q->ndesc)
2761+ return;
2762+
2763+ spin_lock_bh(&q->cleanup_lock);
2764+ if (flush)
2765+ last = -1;
2766+ else
developerf11dcd72022-08-27 18:29:27 +08002767+ last = woccif_r32(wo, q->regs->dma_idx);
developer8cb3ac72022-07-04 10:55:14 +08002768+
2769+ while (q->queued > 0 && q->tail != last) {
2770+ struct wed_wo_queue_entry *e;
2771+
developerf11dcd72022-08-27 18:29:27 +08002772+ e = &q->entry[q->tail + 1];
developer8cb3ac72022-07-04 10:55:14 +08002773+
2774+ dma_unmap_single(wo->hw->dev, e->dma_addr, e->dma_len,
2775+ DMA_TO_DEVICE);
2776+
developer8cb3ac72022-07-04 10:55:14 +08002777+
2778+ memset(e, 0, sizeof(*e));
2779+
2780+ spin_lock_bh(&q->lock);
2781+ q->tail = (q->tail + 1) % q->ndesc;
2782+ q->queued--;
2783+ spin_unlock_bh(&q->lock);
2784+
developer8cb3ac72022-07-04 10:55:14 +08002785+ }
2786+ spin_unlock_bh(&q->cleanup_lock);
2787+
2788+ if (flush) {
2789+ spin_lock_bh(&q->lock);
2790+ woif_q_sync_idx(wo, q);
2791+ woif_q_kick(wo, q, 0);
2792+ spin_unlock_bh(&q->lock);
2793+ }
2794+}
2795+
developer8cb3ac72022-07-04 10:55:14 +08002796+static void *
2797+woif_q_deq(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush,
2798+ int *len, u32 *info, bool *more)
2799+{
2800+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
2801+ struct wed_wo_queue_entry *e;
2802+ struct wed_wo_desc *desc;
developerf11dcd72022-08-27 18:29:27 +08002803+ int idx = (q->tail + 1) % q->ndesc;;
developer8cb3ac72022-07-04 10:55:14 +08002804+ void *buf;
2805+
2806+ *more = false;
2807+ if (!q->queued)
2808+ return NULL;
2809+
2810+ if (flush)
2811+ q->desc[idx].ctrl |= cpu_to_le32(WED_CTL_DMA_DONE);
2812+ else if (!(q->desc[idx].ctrl & cpu_to_le32(WED_CTL_DMA_DONE)))
2813+ return NULL;
2814+
developerf11dcd72022-08-27 18:29:27 +08002815+ q->tail = idx;
developer8cb3ac72022-07-04 10:55:14 +08002816+ q->queued--;
2817+
2818+ desc = &q->desc[idx];
2819+ e = &q->entry[idx];
2820+
2821+ buf = e->buf;
2822+ if (len) {
2823+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
2824+ *len = FIELD_GET(WED_CTL_SD_LEN0, ctl);
2825+ *more = !(ctl & WED_CTL_LAST_SEC0);
2826+ }
2827+
2828+ if (info)
2829+ *info = le32_to_cpu(desc->info);
2830+ if(buf)
2831+ dma_unmap_single(wo->hw->dev, e->dma_addr, buf_len,
2832+ DMA_FROM_DEVICE);
2833+ e->skb = NULL;
2834+
2835+ return buf;
2836+}
2837+
developera3f86ed2022-07-08 14:15:13 +08002838+static void
2839+woif_q_rx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q)
2840+{
2841+ struct page *page;
2842+ void *buf;
2843+ bool more;
2844+
2845+ if (!q->ndesc)
2846+ return;
2847+
2848+ spin_lock_bh(&q->lock);
2849+ do {
2850+ buf = woif_q_deq(wo, q, true, NULL, NULL, &more);
2851+ if (!buf)
2852+ break;
2853+
2854+ skb_free_frag(buf);
2855+ } while (1);
2856+ spin_unlock_bh(&q->lock);
2857+
2858+ if (!q->rx_page.va)
2859+ return;
2860+
2861+ page = virt_to_page(q->rx_page.va);
2862+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
2863+ memset(&q->rx_page, 0, sizeof(q->rx_page));
2864+
2865+}
2866+
developer8cb3ac72022-07-04 10:55:14 +08002867+static int
2868+woif_q_init(struct mtk_wed_wo *dev,
2869+ int (*poll)(struct napi_struct *napi, int budget))
2870+{
2871+ init_dummy_netdev(&dev->napi_dev);
2872+ snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
2873+ "woif_q");
2874+
2875+ if (dev->q_rx.ndesc) {
2876+ netif_napi_add(&dev->napi_dev, &dev->napi, poll, 64);
2877+ woif_q_rx_fill(dev, &dev->q_rx);
2878+ woif_q_reset(dev, &dev->q_rx);
2879+ napi_enable(&dev->napi);
2880+ }
2881+
2882+ return 0;
2883+}
2884+
2885+void woif_q_rx_skb(struct mtk_wed_wo *wo, struct sk_buff *skb)
2886+{
2887+ struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data;
2888+ int ret;
2889+
2890+ ret = mtk_wed_mcu_cmd_sanity_check(wo, skb);
2891+ if (ret)
2892+ goto free_skb;
2893+
2894+ if (WED_WO_CMD_FLAG_IS_RSP(hdr))
2895+ mtk_wed_mcu_rx_event(wo, skb);
2896+ else
2897+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
2898+
2899+ return;
2900+free_skb:
2901+ dev_kfree_skb(skb);
2902+}
2903+
2904+static int
2905+woif_q_tx_skb(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
2906+ struct sk_buff *skb)
2907+{
2908+ struct wed_wo_queue_entry *entry;
2909+ struct wed_wo_desc *desc;
2910+ int len, ret, idx = -1;
2911+ dma_addr_t addr;
2912+ u32 ctrl = 0;
2913+
2914+ len = skb->len;
2915+ addr = dma_map_single(wo->hw->dev, skb->data, len, DMA_TO_DEVICE);
2916+ if (unlikely(dma_mapping_error(wo->hw->dev, addr)))
2917+ goto error;
2918+
2919+ /* packet tx, force trigger tx clean. */
developer8cb3ac72022-07-04 10:55:14 +08002920+ woif_q_tx_clean(wo, q, false);
2921+
developerf11dcd72022-08-27 18:29:27 +08002922+ if (q->queued >= q->ndesc) {
developer8cb3ac72022-07-04 10:55:14 +08002923+ ret = -ENOMEM;
2924+ goto error;
2925+ }
2926+
2927+ spin_lock_bh(&q->lock);
2928+
developerf11dcd72022-08-27 18:29:27 +08002929+ q->head = (q->head + 1) % q->ndesc;
developer8cb3ac72022-07-04 10:55:14 +08002930+ idx = q->head;
2931+
2932+ desc = &q->desc[idx];
2933+ entry = &q->entry[idx];
2934+
2935+ entry->dma_addr = addr;
2936+ entry->dma_len = len;
2937+
2938+ ctrl = FIELD_PREP(WED_CTL_SD_LEN0, len);
2939+ ctrl |= WED_CTL_LAST_SEC0;
2940+ ctrl |= WED_CTL_DMA_DONE;
2941+
2942+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
2943+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
2944+
2945+ q->queued++;
2946+ q->entry[idx].skb = skb;
2947+
2948+ woif_q_kick(wo, q, 0);
2949+ wo->drv_ops->kickout(wo);
2950+
developer8cb3ac72022-07-04 10:55:14 +08002951+ spin_unlock_bh(&q->lock);
2952+ return 0;
2953+
2954+error:
2955+ dev_kfree_skb(skb);
2956+ return -ENOMEM;
2957+}
2958+
2959+static const struct wed_wo_queue_ops wo_queue_ops = {
2960+ .init = woif_q_init,
2961+ .alloc = woif_q_alloc,
developera3f86ed2022-07-08 14:15:13 +08002962+ .free = woif_q_free,
developer8cb3ac72022-07-04 10:55:14 +08002963+ .reset = woif_q_reset,
2964+ .tx_skb = woif_q_tx_skb,
2965+ .tx_clean = woif_q_tx_clean,
2966+ .rx_clean = woif_q_rx_clean,
2967+ .kick = woif_q_kick,
2968+};
2969+
2970+static int
2971+mtk_wed_wo_rx_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int budget)
2972+{
2973+ int len, data_len, done = 0;
2974+ struct sk_buff *skb;
2975+ unsigned char *data;
2976+ bool more;
2977+
2978+ while (done < budget) {
2979+ u32 info;
2980+
2981+ data = woif_q_deq(wo, q, false, &len, &info, &more);
2982+ if (!data)
2983+ break;
2984+
developer8cb3ac72022-07-04 10:55:14 +08002985+ skb = build_skb(data, q->buf_size);
2986+ if (!skb) {
2987+ skb_free_frag(data);
2988+ continue;
2989+ }
2990+
2991+ __skb_put(skb, len);
2992+ done++;
2993+
2994+ woif_q_rx_skb(wo, skb);
2995+ }
2996+
2997+ woif_q_rx_fill_process(wo, q);
2998+
2999+ return done;
3000+}
3001+
3002+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3003+ u32 clear, u32 val)
3004+{
3005+ unsigned long flags;
3006+
3007+ spin_lock_irqsave(&wo->ccif.irq_lock, flags);
3008+ wo->ccif.irqmask &= ~clear;
3009+ wo->ccif.irqmask |= val;
3010+ if (set)
3011+ wo->drv_ops->set_isr(wo, wo->ccif.irqmask);
3012+
3013+ spin_unlock_irqrestore(&wo->ccif.irq_lock, flags);
3014+}
3015+
3016+static inline void mtk_wed_wo_set_ack_mask(struct mtk_wed_wo *wo, u32 mask)
3017+{
3018+ wo->drv_ops->set_ack(wo, mask);
3019+}
3020+
3021+static void mtk_wed_wo_poll_complete(struct mtk_wed_wo *wo)
3022+{
3023+ mtk_wed_wo_set_ack_mask(wo, wo->ccif.q_int_mask);
3024+ mtk_wed_wo_isr_enable(wo, wo->ccif.q_int_mask);
3025+}
3026+
3027+int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget)
3028+{
3029+ struct mtk_wed_wo *wo;
3030+ int done = 0, cur;
3031+
3032+ wo = container_of(napi->dev, struct mtk_wed_wo, napi_dev);
3033+
3034+ rcu_read_lock();
3035+
3036+ do {
3037+ cur = mtk_wed_wo_rx_process(wo, &wo->q_rx, budget - done);
3038+ /* rx packet handle */
3039+ done += cur;
3040+ } while (cur && done < budget);
3041+
3042+ rcu_read_unlock();
3043+
3044+ if (done < budget && napi_complete(napi))
3045+ mtk_wed_wo_poll_complete(wo);
3046+
3047+ return done;
3048+}
3049+
3050+static void mtk_wed_wo_isr_tasklet(unsigned long data)
3051+{
3052+ struct mtk_wed_wo *wo = (struct mtk_wed_wo *)data;
3053+ u32 intr, mask;
3054+
3055+ /* disable isr */
3056+ wo->drv_ops->set_isr(wo, 0);
3057+
3058+ intr = wo->drv_ops->get_csr(wo);
3059+ intr &= wo->ccif.irqmask;
3060+
3061+ mask = intr & (wo->ccif.q_int_mask | wo->ccif.q_exep_mask);
3062+ mtk_wed_wo_isr_disable(wo, mask);
3063+
3064+ if (intr & wo->ccif.q_int_mask)
3065+ napi_schedule(&wo->napi);
3066+
3067+ if (intr & wo->ccif.q_exep_mask) {
3068+ /* todo */
3069+ }
3070+}
3071+
3072+static irqreturn_t mtk_wed_wo_isr_handler(int irq, void *wo_instance)
3073+{
3074+ struct mtk_wed_wo *wo = wo_instance;
3075+
3076+ wo->drv_ops->set_isr(wo, 0);
3077+
3078+ tasklet_schedule(&wo->irq_tasklet);
3079+
3080+ return IRQ_HANDLED;
3081+}
3082+
3083+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
3084+{
3085+ struct mtk_wed_wo *wo;
3086+ int ret = 0;
3087+
3088+ wo = kzalloc(sizeof(struct mtk_wed_wo), GFP_KERNEL);
3089+ if (!wo)
3090+ return -ENOMEM;
3091+
3092+ wo->hw = hw;
3093+ wo->queue_ops = &wo_queue_ops;
3094+ hw->wed_wo = wo;
3095+
3096+ tasklet_init(&wo->irq_tasklet, mtk_wed_wo_isr_tasklet,
3097+ (unsigned long)wo);
3098+
3099+ skb_queue_head_init(&wo->mcu.res_q);
3100+ init_waitqueue_head(&wo->mcu.wait);
3101+ mutex_init(&wo->mcu.mutex);
3102+
3103+ ret = wed_wo_hardware_init(wo, mtk_wed_wo_isr_handler);
3104+ if (ret)
3105+ goto error;
3106+
3107+ /* fw download */
3108+ ret = wed_wo_mcu_init(wo);
3109+ if (ret)
3110+ goto error;
3111+
3112+ ret = mtk_wed_exception_init(wo);
3113+ if (ret)
3114+ goto error;
3115+
3116+ return ret;
3117+
3118+error:
3119+ kfree(wo);
3120+
3121+ return ret;
3122+}
3123+
3124+void mtk_wed_wo_exit(struct mtk_wed_hw *hw)
3125+{
developer8cb3ac72022-07-04 10:55:14 +08003126+ struct mtk_wed_wo *wo = hw->wed_wo;
3127+
developera3f86ed2022-07-08 14:15:13 +08003128+ wed_wo_hardware_exit(wo);
3129+
developer8cb3ac72022-07-04 10:55:14 +08003130+ if (wo->exp.log) {
3131+ dma_unmap_single(wo->hw->dev, wo->exp.phys, wo->exp.log_size, DMA_FROM_DEVICE);
3132+ kfree(wo->exp.log);
3133+ }
3134+
developera3f86ed2022-07-08 14:15:13 +08003135+ wo->hw = NULL;
3136+ memset(wo, 0, sizeof(*wo));
3137+ kfree(wo);
developer8cb3ac72022-07-04 10:55:14 +08003138+}
3139diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
3140new file mode 100644
developer8fec8ae2022-08-15 15:01:09 -07003141index 000000000..d962e3a33
developer8cb3ac72022-07-04 10:55:14 +08003142--- /dev/null
3143+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
developer8fec8ae2022-08-15 15:01:09 -07003144@@ -0,0 +1,327 @@
developer8cb3ac72022-07-04 10:55:14 +08003145+// SPDX-License-Identifier: GPL-2.0-only
3146+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
3147+
3148+#ifndef __MTK_WED_WO_H
3149+#define __MTK_WED_WO_H
3150+
3151+#include <linux/netdevice.h>
3152+#include <linux/skbuff.h>
3153+#include "mtk_wed.h"
3154+
3155+#define WED_CTL_SD_LEN1 GENMASK(13, 0)
3156+#define WED_CTL_LAST_SEC1 BIT(14)
3157+#define WED_CTL_BURST BIT(15)
3158+#define WED_CTL_SD_LEN0_SHIFT 16
3159+#define WED_CTL_SD_LEN0 GENMASK(29, 16)
3160+#define WED_CTL_LAST_SEC0 BIT(30)
3161+#define WED_CTL_DMA_DONE BIT(31)
3162+#define WED_INFO_WINFO GENMASK(15, 0)
3163+
3164+#define MTK_WED_WO_TXQ_FREE_THR 10
3165+
3166+#define WED_WO_PROFILE_MAX_LVL 6
3167+
3168+
3169+enum mtk_wed_fw_region_id {
3170+ WO_REGION_EMI = 0,
3171+ WO_REGION_ILM,
3172+ WO_REGION_DATA,
3173+ WO_REGION_BOOT,
3174+ __WO_REGION_MAX
3175+};
3176+
3177+struct wed_wo_profile_stat {
3178+ u32 bound;
3179+ u32 record;
3180+};
3181+
3182+#define PROFILE_STAT(record, val) do { \
3183+ u8 lvl = 0; \
3184+ while (lvl < WED_WO_PROFILE_MAX_LVL) { \
3185+ if (val < record[lvl].bound) { \
3186+ record[lvl].record++; \
3187+ break; \
3188+ } \
3189+ lvl++; \
3190+ } \
3191+ } while (0)
3192+
3193+/* align with wo report structure */
3194+struct wed_wo_log {
3195+ u32 sn;
3196+ u32 total;
3197+ u32 rro;
3198+ u32 mod;
3199+};
3200+
3201+struct wed_wo_rxcnt {
3202+ u16 wlan_idx;
3203+ u16 tid;
3204+ u32 rx_pkt_cnt;
3205+ u32 rx_byte_cnt;
3206+ u32 rx_err_cnt;
3207+ u32 rx_drop_cnt;
3208+};
3209+
3210+struct wed_wo_queue {
3211+ struct wed_wo_queue_regs *regs;
3212+
3213+ spinlock_t lock;
3214+ spinlock_t cleanup_lock;
3215+ struct wed_wo_queue_entry *entry;
3216+ struct wed_wo_desc *desc;
3217+
3218+ u16 first;
3219+ u16 head;
3220+ u16 tail;
3221+ int ndesc;
3222+ int queued;
3223+ int buf_size;
3224+
3225+ u8 hw_idx;
3226+ u8 qid;
3227+ u8 flags;
3228+
3229+ dma_addr_t desc_dma;
3230+ struct page_frag_cache rx_page;
3231+};
3232+
3233+
3234+struct wed_wo_mmio {
3235+ struct regmap *regs;
3236+
3237+ spinlock_t irq_lock;
3238+ u8 irq;
3239+ u32 irqmask;
3240+
3241+ u32 q_int_mask;
3242+ u32 q_exep_mask;
3243+};
3244+
3245+struct wed_wo_mcu {
3246+ struct mutex mutex;
3247+ u32 msg_seq;
3248+ int timeout;
3249+
3250+ struct sk_buff_head res_q;
3251+ wait_queue_head_t wait;
3252+};
3253+
3254+struct wed_wo_exception {
3255+ void* log;
3256+ int log_size;
3257+ dma_addr_t phys;
3258+};
3259+
3260+struct wed_wo_queue_regs {
3261+ u32 desc_base;
3262+ u32 ring_size;
3263+ u32 cpu_idx;
3264+ u32 dma_idx;
3265+};
3266+
3267+struct wed_wo_desc {
3268+ __le32 buf0;
3269+ __le32 ctrl;
3270+ __le32 buf1;
3271+ __le32 info;
3272+ __le32 reserved[4];
3273+} __packed __aligned(32);
3274+
3275+struct wed_wo_queue_entry {
3276+ union {
3277+ void *buf;
3278+ struct sk_buff *skb;
3279+ };
3280+
3281+ u32 dma_addr;
3282+ u16 dma_len;
3283+ u16 wcid;
3284+ bool skip_buf0:1;
3285+ bool skip_buf1:1;
3286+ bool done:1;
3287+};
3288+
developer8cb3ac72022-07-04 10:55:14 +08003289+struct wo_cmd_query {
3290+ u32 query0;
3291+ u32 query1;
3292+};
3293+
3294+struct wed_cmd_hdr {
3295+ /*DW0*/
3296+ u8 ver;
3297+ u8 cmd_id;
3298+ u16 length;
3299+
3300+ /*DW1*/
3301+ u16 uni_id;
3302+ u16 flag;
3303+
3304+ /*DW2*/
3305+ int status;
3306+
3307+ /*DW3*/
3308+ u8 reserved[20];
3309+};
3310+
3311+struct mtk_wed_fw_region {
3312+ void *addr;
3313+ u32 addr_pa;
3314+ u32 size;
3315+ u32 shared;
3316+};
3317+
3318+struct wed_wo_queue_ops;
3319+struct wed_wo_drv_ops;
3320+struct wed_wo_mcu_ops;
3321+
3322+struct wo_rx_total_cnt {
3323+ u64 rx_pkt_cnt;
3324+ u64 rx_byte_cnt;
3325+ u64 rx_err_cnt;
3326+ u64 rx_drop_cnt;
3327+};
3328+
3329+struct mtk_wed_wo {
3330+ struct mtk_wed_hw *hw;
3331+
3332+ struct wed_wo_mmio ccif;
3333+ struct wed_wo_mcu mcu;
3334+ struct wed_wo_exception exp;
3335+
3336+ const struct wed_wo_drv_ops *drv_ops;
3337+ const struct wed_wo_mcu_ops *mcu_ops;
3338+ const struct wed_wo_queue_ops *queue_ops;
3339+
3340+ struct net_device napi_dev;
3341+ spinlock_t rx_lock;
3342+ struct napi_struct napi;
3343+ struct sk_buff_head rx_skb;
3344+ struct wed_wo_queue q_rx;
3345+ struct tasklet_struct irq_tasklet;
3346+
3347+ struct wed_wo_queue q_tx;
3348+
3349+ struct mtk_wed_fw_region region[__WO_REGION_MAX];
3350+
3351+ struct wed_wo_profile_stat total[WED_WO_PROFILE_MAX_LVL];
3352+ struct wed_wo_profile_stat mod[WED_WO_PROFILE_MAX_LVL];
3353+ struct wed_wo_profile_stat rro[WED_WO_PROFILE_MAX_LVL];
3354+ char dirname[4];
3355+ struct wo_rx_total_cnt wo_rxcnt[8][544];
3356+};
3357+
3358+struct wed_wo_queue_ops {
3359+ int (*init)(struct mtk_wed_wo *wo,
3360+ int (*poll)(struct napi_struct *napi, int budget));
3361+
3362+ int (*alloc)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3363+ int idx, int n_desc, int bufsize,
3364+ struct wed_wo_queue_regs *regs);
developera3f86ed2022-07-08 14:15:13 +08003365+ void (*free)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
developer8cb3ac72022-07-04 10:55:14 +08003366+ void (*reset)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3367+
3368+ int (*tx_skb)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3369+ struct sk_buff *skb);
3370+ int (*tx_skb1)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3371+ u8 *msg, u32 msg_len);
3372+ void (*tx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q,
3373+ bool flush);
3374+
3375+ void (*rx_clean)(struct mtk_wed_wo *wo, struct wed_wo_queue *q);
3376+
3377+ void (*kick)(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset);
3378+};
3379+
3380+struct wed_wo_drv_ops {
3381+ void (*kickout)(struct mtk_wed_wo *wo);
3382+ void (*set_ack)(struct mtk_wed_wo *wo, u32 mask);
3383+ void (*set_isr)(struct mtk_wed_wo *wo, u32 mask);
3384+ u32 (*get_csr)(struct mtk_wed_wo *wo);
3385+ int (*tx_prepare_skb)(struct mtk_wed_wo *wo);
3386+ bool (*check_excpetion)(struct mtk_wed_wo *wo);
3387+ void (*clear_int)(struct mtk_wed_wo *wo, u32 mask);
3388+};
3389+
3390+struct wed_wo_mcu_ops {
3391+ u32 headroom;
3392+
3393+ int (*mcu_skb_send_msg)(struct mtk_wed_wo *wo, int to_id,
3394+ int cmd, struct sk_buff *skb,
3395+ int *seq, bool wait_resp);
3396+
3397+ int (*mcu_parse_response)(struct mtk_wed_wo *wo, int cmd,
3398+ struct sk_buff *skb, int seq);
3399+
3400+ int (*mcu_restart)(struct mtk_wed_wo *wo);
3401+};
3402+
3403+#define mtk_wed_wo_q_init(wo, ...) (wo)->queue_ops->init((wo), __VA_ARGS__)
3404+#define mtk_wed_wo_q_alloc(wo, ...) (wo)->queue_ops->alloc((wo), __VA_ARGS__)
developera3f86ed2022-07-08 14:15:13 +08003405+#define mtk_wed_wo_q_free(wo, ...) (wo)->queue_ops->free((wo), __VA_ARGS__)
3406+#define mtk_wed_wo_q_reset(wo, ...) (wo)->queue_ops->reset((wo), __VA_ARGS__)
developer8cb3ac72022-07-04 10:55:14 +08003407+#define mtk_wed_wo_q_tx_skb(wo, ...) (wo)->queue_ops->tx_skb((wo), __VA_ARGS__)
3408+#define mtk_wed_wo_q_tx_skb1(wo, ...) (wo)->queue_ops->tx_skb1((wo), __VA_ARGS__)
3409+#define mtk_wed_wo_q_tx_clean(wo, ...) (wo)->queue_ops->tx_clean((wo), __VA_ARGS__)
3410+#define mtk_wed_wo_q_rx_clean(wo, ...) (wo)->queue_ops->rx_clean((wo), __VA_ARGS__)
3411+#define mtk_wed_wo_q_kick(wo, ...) (wo)->queue_ops->kick((wo), __VA_ARGS__)
3412+
3413+enum {
3414+ WARP_CMD_FLAG_RSP = 1 << 0, /* is responce*/
3415+ WARP_CMD_FLAG_NEED_RSP = 1 << 1, /* need responce */
3416+ WARP_CMD_FLAG_FROM_TO_WO = 1 << 2, /* send between host and wo */
3417+};
3418+
3419+#define WED_WO_CMD_FLAG_IS_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_RSP))
3420+#define WED_WO_CMD_FLAG_SET_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_RSP))
3421+#define WED_WO_CMD_FLAG_IS_NEED_RSP(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_NEED_RSP))
3422+#define WED_WO_CMD_FLAG_SET_NEED_RSP(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_NEED_RSP))
3423+#define WED_WO_CMD_FLAG_IS_FROM_TO_WO(_hdr) ((_hdr)->flag & (WARP_CMD_FLAG_FROM_TO_WO))
3424+#define WED_WO_CMD_FLAG_SET_FROM_TO_WO(_hdr) ((_hdr)->flag |= (WARP_CMD_FLAG_FROM_TO_WO))
3425+
3426+void mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, bool set,
3427+ u32 clear, u32 val);
3428+
3429+static inline void mtk_wed_wo_isr_enable(struct mtk_wed_wo *wo, u32 mask)
3430+{
3431+ mtk_wed_wo_set_isr_mask(wo, false, 0, mask);
3432+
3433+ tasklet_schedule(&wo->irq_tasklet);
3434+}
3435+
3436+static inline void mtk_wed_wo_isr_disable(struct mtk_wed_wo *wo, u32 mask)
3437+{
3438+ mtk_wed_wo_set_isr_mask(wo, true, mask, 0);
3439+}
3440+
3441+static inline void
3442+wo_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3443+{
3444+ writel(val, dev->region[WO_REGION_BOOT].addr + reg);
3445+}
3446+
3447+static inline u32
3448+wo_r32(struct mtk_wed_wo *dev, u32 reg)
3449+{
3450+ return readl(dev->region[WO_REGION_BOOT].addr + reg);
3451+}
3452+static inline void
3453+woccif_w32(struct mtk_wed_wo *dev, u32 reg, u32 val)
3454+{
3455+ regmap_write(dev->ccif.regs, reg, val);
3456+}
3457+
3458+static inline u32
3459+woccif_r32(struct mtk_wed_wo *dev, u32 reg)
3460+{
3461+ unsigned int val;
3462+
3463+ regmap_read(dev->ccif.regs, reg, &val);
3464+
3465+ return val;
3466+}
3467+
3468+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
developera3f86ed2022-07-08 14:15:13 +08003469+void mtk_wed_wo_exit(struct mtk_wed_hw *hw);
developer8cb3ac72022-07-04 10:55:14 +08003470+#endif
3471+
3472diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
developer8fec8ae2022-08-15 15:01:09 -07003473index ffd547a4c..c74dd4aad 100644
developer8cb3ac72022-07-04 10:55:14 +08003474--- a/include/linux/soc/mediatek/mtk_wed.h
3475+++ b/include/linux/soc/mediatek/mtk_wed.h
3476@@ -7,6 +7,9 @@
3477 #include <linux/pci.h>
3478
3479 #define MTK_WED_TX_QUEUES 2
3480+#define MTK_WED_RX_QUEUES 2
3481+
3482+#define WED_WO_STA_REC 0x6
3483
3484 enum {
3485 MTK_NO_WED,
developer8fec8ae2022-08-15 15:01:09 -07003486@@ -33,6 +36,33 @@ struct mtk_wed_ring {
developer8cb3ac72022-07-04 10:55:14 +08003487 void __iomem *wpdma;
3488 };
3489
3490+struct mtk_rxbm_desc {
3491+ __le32 buf0;
3492+ __le32 token;
3493+} __packed __aligned(4);
3494+
3495+struct dma_buf {
3496+ int size;
3497+ void **pages;
3498+ struct mtk_wdma_desc *desc;
3499+ dma_addr_t desc_phys;
3500+};
3501+
3502+struct dma_entry {
3503+ int size;
3504+ struct mtk_rxbm_desc *desc;
3505+ dma_addr_t desc_phys;
3506+};
3507+
developer8fec8ae2022-08-15 15:01:09 -07003508+struct wo_cmd_rxcnt_t {
3509+ u16 wlan_idx;
3510+ u16 tid;
3511+ u32 rx_pkt_cnt;
3512+ u32 rx_byte_cnt;
3513+ u32 rx_err_cnt;
3514+ u32 rx_drop_cnt;
3515+};
3516+
developer8cb3ac72022-07-04 10:55:14 +08003517 struct mtk_wed_device {
3518 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3519 const struct mtk_wed_ops *ops;
developer8fec8ae2022-08-15 15:01:09 -07003520@@ -42,39 +63,59 @@ struct mtk_wed_device {
developerbbca0f92022-07-26 17:26:12 +08003521 int wdma_idx;
3522 int irq;
3523 u8 ver;
3524+ u32 rev_id;
3525
developer8cb3ac72022-07-04 10:55:14 +08003526 struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3527 struct mtk_wed_ring txfree_ring;
3528 struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3529+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
3530+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
3531+
3532+ struct dma_buf buf_ring;
3533+ struct dma_entry rx_buf_ring;
3534+ struct page_frag_cache rx_page;
3535
3536 struct {
3537- int size;
3538- void **pages;
3539- struct mtk_wdma_desc *desc;
3540- dma_addr_t desc_phys;
3541- } buf_ring;
3542+ struct mtk_wed_ring rro_ring;
3543+ void __iomem *rro_desc;
3544+ dma_addr_t miod_desc_phys;
3545+ dma_addr_t fdbk_desc_phys;
3546+ u32 mcu_view_miod;
3547+ } rro;
3548
3549 /* filled by driver: */
3550 struct {
3551 struct pci_dev *pci_dev;
3552 void __iomem *base;
3553 u32 bus_type;
3554+ u32 phy_base;
3555
developerbbca0f92022-07-26 17:26:12 +08003556 u32 wpdma_phys;
3557 u32 wpdma_int;
developer8cb3ac72022-07-04 10:55:14 +08003558 u32 wpdma_mask;
3559 u32 wpdma_tx;
3560 u32 wpdma_txfree;
3561+ u32 wpdma_rx_glo;
3562+ u32 wpdma_rx;
3563
3564 u8 tx_tbit[MTK_WED_TX_QUEUES];
3565+ u8 rx_tbit[MTK_WED_RX_QUEUES];
3566 u8 txfree_tbit;
3567
3568 u16 token_start;
3569 unsigned int nbuf;
3570+ unsigned int rx_nbuf;
3571+ unsigned int rx_pkt;
3572+ unsigned int rx_pkt_size;
3573
3574 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3575 int (*offload_enable)(struct mtk_wed_device *wed);
3576 void (*offload_disable)(struct mtk_wed_device *wed);
3577+ u32 (*init_rx_buf)(struct mtk_wed_device *wed,
3578+ int pkt_num);
3579+ void (*release_rx_buf)(struct mtk_wed_device *wed);
developer8fec8ae2022-08-15 15:01:09 -07003580+ void (*update_wo_rxcnt)(struct mtk_wed_device *wed,
3581+ struct wo_cmd_rxcnt_t *rxcnt);
developer8cb3ac72022-07-04 10:55:14 +08003582 } wlan;
3583 #endif
3584 };
developer8fec8ae2022-08-15 15:01:09 -07003585@@ -85,6 +126,10 @@ struct mtk_wed_ops {
developer8cb3ac72022-07-04 10:55:14 +08003586 void __iomem *regs);
3587 int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3588 void __iomem *regs);
3589+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3590+ void __iomem *regs);
3591+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3592+ void *data, int len);
3593 void (*detach)(struct mtk_wed_device *dev);
3594
3595 void (*stop)(struct mtk_wed_device *dev);
developer8fec8ae2022-08-15 15:01:09 -07003596@@ -96,6 +141,8 @@ struct mtk_wed_ops {
developer8cb3ac72022-07-04 10:55:14 +08003597
3598 u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3599 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
developerbbca0f92022-07-26 17:26:12 +08003600+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
developer8cb3ac72022-07-04 10:55:14 +08003601+ u32 reason, u32 hash);
3602 };
3603
3604 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
developer8fec8ae2022-08-15 15:01:09 -07003605@@ -128,6 +175,10 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08003606 (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3607 #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3608 (_dev)->ops->txfree_ring_setup(_dev, _regs)
3609+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
3610+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
3611+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3612+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
3613 #define mtk_wed_device_reg_read(_dev, _reg) \
3614 (_dev)->ops->reg_read(_dev, _reg)
3615 #define mtk_wed_device_reg_write(_dev, _reg, _val) \
developer8fec8ae2022-08-15 15:01:09 -07003616@@ -136,6 +187,8 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08003617 (_dev)->ops->irq_get(_dev, _mask)
3618 #define mtk_wed_device_irq_set_mask(_dev, _mask) \
3619 (_dev)->ops->irq_set_mask(_dev, _mask)
3620+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3621+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
3622 #else
3623 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3624 {
developer8fec8ae2022-08-15 15:01:09 -07003625@@ -145,10 +198,13 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08003626 #define mtk_wed_device_start(_dev, _mask) do {} while (0)
3627 #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3628 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3629+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3630+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
3631 #define mtk_wed_device_reg_read(_dev, _reg) 0
3632 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3633 #define mtk_wed_device_irq_get(_dev, _mask) 0
3634 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3635+#define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
3636 #endif
3637
3638 #endif
3639--
developer8fec8ae2022-08-15 15:01:09 -070036402.32.0
developer8cb3ac72022-07-04 10:55:14 +08003641