blob: 6d1273cf7de50017343b1cdf44f8d688c1ae44d2 [file] [log] [blame]
developer23f9f0f2023-06-15 13:06:25 +08001From 400f8349a31ffc48538aa7df64a88111de9a738b Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 13 Apr 2023 15:51:08 +0800
4Subject: [PATCH] mtk:wed:add wed3 support
5
6Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7988.dtsi | 152 ++-
9 .../dts/mediatek/mt7988a-dsa-10g-spim-nor.dts | 16 +-
10 .../dts/mediatek/mt7988d-dsa-10g-spim-nor.dts | 16 +-
11 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 +-
12 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.c | 17 +-
14 drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +-
15 .../net/ethernet/mediatek/mtk_ppe_offload.c | 13 +-
developer8c109ee2023-06-29 16:44:45 +080016 drivers/net/ethernet/mediatek/mtk_wed.c | 1174 +++++++++++++----
developer23f9f0f2023-06-15 13:06:25 +080017 drivers/net/ethernet/mediatek/mtk_wed.h | 25 +-
18 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 584 ++++++++-
19 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 13 +-
20 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 5 +-
21 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 338 ++++-
22 include/linux/netdevice.h | 7 +
23 include/linux/soc/mediatek/mtk_wed.h | 81 +-
24 16 files changed, 1446 insertions(+), 333 deletions(-)
25 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
26
27diff --git a/arch/arm64/boot/dts/mediatek/mt7988.dtsi b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
28index 364deef..f9a0120 100644
29--- a/arch/arm64/boot/dts/mediatek/mt7988.dtsi
30+++ b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
31@@ -191,44 +191,49 @@
32 status = "disabled";
33 };
34
35- wed: wed@15010000 {
36- compatible = "mediatek,wed";
37- wed_num = <3>;
38- /* add this property for wed get the pci slot number. */
39- pci_slot_map = <0>, <1>, <2>;
40- reg = <0 0x15010000 0 0x2000>,
41- <0 0x15012000 0 0x2000>,
42- <0 0x15014000 0 0x2000>;
43+ wed0: wed@15010000 {
44+ compatible = "mediatek,mt7988-wed",
45+ "syscon";
46+ reg = <0 0x15010000 0 0x2000>;
47 interrupt-parent = <&gic>;
48- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
49- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
50- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
51- };
52-
53- wed2: wed2@15012000 {
54- compatible = "mediatek,wed2";
55- wed_num = <3>;
56- /* add this property for wed get the pci slot number. */
57- reg = <0 0x15010000 0 0x2000>,
58- <0 0x15012000 0 0x2000>,
59- <0 0x15014000 0 0x2000>;
60+ interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
61+ mediatek,wed_pcie = <&wed_pcie>;
62+ mediatek,ap2woccif = <&ap2woccif0>;
63+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
64+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
65+ mediatek,wocpu_boot = <&cpu0_boot>;
66+ mediatek,wocpu_emi = <&wocpu0_emi>;
67+ mediatek,wocpu_data = <&wocpu_data>;
68+ };
69+
70+ wed1: wed@15012000 {
71+ compatible = "mediatek,mt7988-wed",
72+ "syscon";
73+ reg = <0 0x15012000 0 0x2000>;
74 interrupt-parent = <&gic>;
75- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
76- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
77- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
78- };
79-
80- wed3: wed3@15014000 {
81- compatible = "mediatek,wed3";
82- wed_num = <3>;
83- /* add this property for wed get the pci slot number. */
84- reg = <0 0x15010000 0 0x2000>,
85- <0 0x15012000 0 0x2000>,
86- <0 0x15014000 0 0x2000>;
87+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
88+ mediatek,wed_pcie = <&wed_pcie>;
89+ mediatek,ap2woccif = <&ap2woccif1>;
90+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
91+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
92+ mediatek,wocpu_boot = <&cpu1_boot>;
93+ mediatek,wocpu_emi = <&wocpu1_emi>;
94+ mediatek,wocpu_data = <&wocpu_data>;
95+ };
96+
97+ wed2: wed@15014000 {
98+ compatible = "mediatek,mt7988-wed",
99+ "syscon";
100+ reg = <0 0x15014000 0 0x2000>;
101 interrupt-parent = <&gic>;
102- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
103- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
104- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
105+ interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
106+ mediatek,wed_pcie = <&wed_pcie>;
107+ mediatek,ap2woccif = <&ap2woccif2>;
108+ mediatek,wocpu_ilm = <&wocpu2_ilm>;
109+ mediatek,wocpu_dlm = <&wocpu2_dlm>;
110+ mediatek,wocpu_boot = <&cpu2_boot>;
111+ mediatek,wocpu_emi = <&wocpu2_emi>;
112+ mediatek,wocpu_data = <&wocpu_data>;
113 };
114
115 wdma: wdma@15104800 {
116@@ -238,15 +243,25 @@
117 <0 0x15105000 0 0x400>;
118 };
119
120- ap2woccif: ap2woccif@151A5000 {
121- compatible = "mediatek,ap2woccif";
122- reg = <0 0x151A5000 0 0x1000>,
123- <0 0x152A5000 0 0x1000>,
124- <0 0x153A5000 0 0x1000>;
125+ ap2woccif0: ap2woccif@151A5000 {
126+ compatible = "mediatek,ap2woccif", "syscon";
127+ reg = <0 0x151A5000 0 0x1000>;
128+ interrupt-parent = <&gic>;
129+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
130+ };
131+
132+ ap2woccif1: ap2woccif@152A5000 {
133+ compatible = "mediatek,ap2woccif", "syscon";
134+ reg = <0 0x152A5000 0 0x1000>;
135 interrupt-parent = <&gic>;
136- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
137- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
138- <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
139+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
140+ };
141+
142+ ap2woccif2: ap2woccif@153A5000 {
143+ compatible = "mediatek,ap2woccif", "syscon";
144+ reg = <0 0x153A5000 0 0x1000>;
145+ interrupt-parent = <&gic>;
146+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
147 };
148
149 wocpu0_ilm: wocpu0_ilm@151E0000 {
150@@ -254,31 +269,53 @@
151 reg = <0 0x151E0000 0 0x8000>;
152 };
153
154- wocpu1_ilm: wocpu1_ilm@152E0000 {
155- compatible = "mediatek,wocpu1_ilm";
156+ wocpu1_ilm: wocpu_ilm@152E0000 {
157+ compatible = "mediatek,wocpu_ilm";
158 reg = <0 0x152E0000 0 0x8000>;
159 };
160
161- wocpu2_ilm: wocpu2_ilm@153E0000 {
162- compatible = "mediatek,wocpu2_ilm";
163- reg = <0 0x153E0000 0 0x8000>;
164+ wocpu2_ilm: wocpu_ilm@153E0000 {
165+ compatible = "mediatek,wocpu_ilm";
166+ reg = <0 0x153E0000 0 0x8000>;
167+ };
168+
169+ wocpu0_dlm: wocpu_dlm@151E8000 {
170+ compatible = "mediatek,wocpu_dlm";
171+ reg = <0 0x151E8000 0 0x2000>;
172+
173+ resets = <&ethsysrst 0>;
174+ reset-names = "wocpu_rst";
175+ };
176+
177+ wocpu1_dlm: wocpu_dlm@0x152E8000 {
178+ compatible = "mediatek,wocpu_dlm";
179+ reg = <0 0x152E8000 0 0x2000>;
180+
181+ resets = <&ethsysrst 0>;
182+ reset-names = "wocpu_rst";
183 };
184
185- wocpu_dlm: wocpu_dlm@151E8000 {
186+ wocpu2_dlm: wocpu_dlm@0x153E8000 {
187 compatible = "mediatek,wocpu_dlm";
188- reg = <0 0x151E8000 0 0x2000>,
189- <0 0x152E8000 0 0x2000>,
190- <0 0x153E8000 0 0x2000>;
191+ reg = <0 0x153E8000 0 0x2000>;
192
193 resets = <&ethsysrst 0>;
194 reset-names = "wocpu_rst";
195 };
196
197- cpu_boot: wocpu_boot@15194000 {
198- compatible = "mediatek,wocpu_boot";
199- reg = <0 0x15194000 0 0x1000>,
200- <0 0x15294000 0 0x1000>,
201- <0 0x15394000 0 0x1000>;
202+ cpu0_boot: wocpu_boot@15194000 {
203+ compatible = "mediatek,wocpu0_boot";
204+ reg = <0 0x15194000 0 0x1000>;
205+ };
206+
207+ cpu1_boot: wocpu_boot@15294000 {
208+ compatible = "mediatek,wocpu1_boot";
209+ reg = <0 0x15294000 0 0x1000>;
210+ };
211+
212+ cpu2_boot: wocpu_boot@15394000 {
213+ compatible = "mediatek,wocpu2_boot";
214+ reg = <0 0x15394000 0 0x1000>;
215 };
216
217 reserved-memory {
218@@ -827,6 +864,7 @@
219 <&topckgen CK_TOP_CB_SGM_325M>;
220 mediatek,ethsys = <&ethsys>;
221 mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
222+ mediatek,wed = <&wed0>, <&wed1>, <&wed2>;
223 mediatek,usxgmiisys = <&usxgmiisys0>, <&usxgmiisys1>;
224 mediatek,xfi_pextp = <&xfi_pextp0>, <&xfi_pextp1>;
225 mediatek,xfi_pll = <&xfi_pll>;
226diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
227index 7db5164..0a6db8b 100644
228--- a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
229+++ b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
230@@ -341,9 +341,23 @@
231 status = "okay";
232 };
233
234-&wed {
235+&wed0 {
236 dy_txbm_enable = "true";
237 dy_txbm_budge = <8>;
238 txbm_init_sz = <10>;
239 status = "okay";
240 };
241+
242+&wed1 {
243+ dy_txbm_enable = "true";
244+ dy_txbm_budge = <8>;
245+ txbm_init_sz = <10>;
246+ status = "okay";
247+};
248+
249+&wed2 {
250+ dy_txbm_enable = "true";
251+ dy_txbm_budge = <8>;
252+ txbm_init_sz = <10>;
253+ status = "okay";
254+};
255\ No newline at end of file
256diff --git a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
257index 67c6508..c407b33 100644
258--- a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
259+++ b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
260@@ -325,9 +325,23 @@
261 status = "okay";
262 };
263
264-&wed {
265+&wed0 {
266 dy_txbm_enable = "true";
267 dy_txbm_budge = <8>;
268 txbm_init_sz = <10>;
269 status = "okay";
270 };
271+
272+&wed1 {
273+ dy_txbm_enable = "true";
274+ dy_txbm_budge = <8>;
275+ txbm_init_sz = <10>;
276+ status = "okay";
277+};
278+
279+&wed2 {
280+ dy_txbm_enable = "true";
281+ dy_txbm_budge = <8>;
282+ txbm_init_sz = <10>;
283+ status = "okay";
284+};
285\ No newline at end of file
286diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
287index 388982c..d59c29f 100644
288--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
289+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
290@@ -4709,7 +4709,8 @@ static int mtk_probe(struct platform_device *pdev)
291 "mediatek,wed", i);
292 static const u32 wdma_regs[] = {
293 MTK_WDMA0_BASE,
294- MTK_WDMA1_BASE
295+ MTK_WDMA1_BASE,
296+ MTK_WDMA2_BASE
297 };
298 void __iomem *wdma;
299 u32 wdma_phy;
300diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
301index a9feaed..70e8377 100644
302--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
303+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer8c109ee2023-06-29 16:44:45 +0800304@@ -600,9 +600,12 @@
developer23f9f0f2023-06-15 13:06:25 +0800305 #define RX_DMA_SPORT_MASK 0x7
306 #define RX_DMA_SPORT_MASK_V2 0xf
307
308-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
309+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
310 #define MTK_WDMA0_BASE 0x4800
311 #define MTK_WDMA1_BASE 0x4c00
312+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
313+#define MTK_WDMA2_BASE 0x5000
314+#endif
315 #else
316 #define MTK_WDMA0_BASE 0x2800
317 #define MTK_WDMA1_BASE 0x2c00
318diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
319old mode 100755
320new mode 100644
321index bc13a9b..3910163
322--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
323+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
324@@ -9,6 +9,7 @@
325 #include <linux/if_ether.h>
326 #include <linux/if_vlan.h>
327 #include <net/dsa.h>
328+#include <net/route.h>
329 #include "mtk_eth_soc.h"
330 #include "mtk_ppe.h"
331 #include "mtk_ppe_regs.h"
332@@ -396,7 +397,7 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
333 }
334
335 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
336- int bss, int wcid)
337+ int bss, int wcid, bool amsdu_en)
338 {
339 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
340 u32 *ib2 = mtk_foe_entry_ib2(entry);
341@@ -408,6 +409,9 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
342
343 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
344 FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
345+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
346+ l2->winfo_pao = FIELD_PREP(MTK_FOE_WINFO_PAO_AMSDU_EN, amsdu_en);
347+#endif
348 #else
349 if (wdma_idx)
350 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
351@@ -443,6 +447,17 @@ int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp)
352 *ib2 &= ~MTK_FOE_IB2_DSCP;
353 *ib2 |= FIELD_PREP(MTK_FOE_IB2_DSCP, dscp);
354
355+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
356+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
357+
358+ if (*ib2 & MTK_FOE_IB2_WDMA_WINFO &&
359+ l2->winfo_pao & MTK_FOE_WINFO_PAO_AMSDU_EN) {
360+ u8 tid = rt_tos2priority(dscp) & 0xf;
361+
362+ l2->winfo_pao |= FIELD_PREP(MTK_FOE_WINFO_PAO_TID, tid);
363+ }
364+#endif
365+
366 return 0;
367 }
368
369diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
370index df10040..9e7d5aa 100644
371--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
372+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
373@@ -428,7 +428,7 @@ int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
374 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
375 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
376 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
377- int bss, int wcid);
378+ int bss, int wcid, bool amsdu_en);
379 int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
380 int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp);
381 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
382diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
383index 9bc0857..86fc9a1 100644
384--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
385+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
386@@ -112,6 +112,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
387 info->queue = path.mtk_wdma.queue;
388 info->bss = path.mtk_wdma.bss;
389 info->wcid = path.mtk_wdma.wcid;
390+ info->amsdu_en = path.mtk_wdma.amsdu_en;
391
392 return 0;
393 }
394@@ -193,13 +194,15 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
395
396 if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
397 mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
398- info.wcid);
399+ info.wcid, info.amsdu_en);
400 pse_port = PSE_PPE0_PORT;
401 #if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
402 if (info.wdma_idx == 0)
403 pse_port = PSE_WDMA0_PORT;
404 else if (info.wdma_idx == 1)
405 pse_port = PSE_WDMA1_PORT;
406+ else if (info.wdma_idx == 2)
407+ pse_port = PSE_WDMA2_PORT;
408 else
409 return -EOPNOTSUPP;
410 #endif
411@@ -458,8 +461,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
412 if (err)
413 return err;
414
415- if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
416- return err;
417+ /*if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
418+ return err;*/
419
420 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
421 if (!entry)
422@@ -499,8 +502,8 @@ clear:
423 mtk_foe_entry_clear(eth->ppe[i], entry);
424 free:
425 kfree(entry);
426- if (wed_index >= 0)
427- mtk_wed_flow_remove(wed_index);
428+ /*if (wed_index >= 0)
429+ mtk_wed_flow_remove(wed_index);*/
430 return err;
431 }
432
433diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
434index 37a86c3..e3809db 100644
435--- a/drivers/net/ethernet/mediatek/mtk_wed.c
436+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
437@@ -28,7 +28,7 @@ struct wo_cmd_ring {
438 u32 cnt;
439 u32 unit;
440 };
441-static struct mtk_wed_hw *hw_list[2];
442+static struct mtk_wed_hw *hw_list[3];
443 static DEFINE_MUTEX(hw_lock);
444
445 static void
446@@ -73,6 +73,26 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
447 return wdma_r32(dev, MTK_WDMA_GLO_CFG);
448 }
449
450+static u32
451+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
452+{
453+ if (wed_r32(dev, reg) & mask)
454+ return true;
455+
456+ return false;
457+}
458+
459+static int
460+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
461+{
462+ int sleep = 1000;
463+ int timeout = 100 * sleep;
464+ u32 val;
465+
466+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
467+ timeout, false, dev, reg, mask);
468+}
469+
470 static int
471 mtk_wdma_rx_reset(struct mtk_wed_device *dev)
472 {
473@@ -235,6 +255,8 @@ mtk_wed_assign(struct mtk_wed_device *dev)
474 continue;
475
476 hw->wed_dev = dev;
477+ hw->pci_base = MTK_WED_PCIE_BASE;
478+
479 return hw;
480 }
481
482@@ -242,23 +264,84 @@ mtk_wed_assign(struct mtk_wed_device *dev)
483 }
484
485 static int
486-mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
487+mtk_wed_pao_buffer_alloc(struct mtk_wed_device *dev)
488+{
489+ struct mtk_wed_pao *pao;
490+ int i, j;
491+
492+ pao = kzalloc(sizeof(struct mtk_wed_pao), GFP_KERNEL);
493+ if (!pao)
494+ return -ENOMEM;
495+
496+ dev->hw->wed_pao = pao;
497+
498+ for (i = 0; i < 32; i++) {
499+ /* each segment is 64K*/
500+ pao->hif_txd[i] = (char *)__get_free_pages(GFP_ATOMIC |
501+ GFP_DMA32 |
502+ __GFP_ZERO, 4);
503+ if (!pao->hif_txd[i])
504+ goto err;
505+
506+ pao->hif_txd_phys[i] = dma_map_single(dev->hw->dev,
507+ pao->hif_txd[i],
508+ 16 * PAGE_SIZE,
509+ DMA_TO_DEVICE);
510+ if (unlikely(dma_mapping_error(dev->hw->dev,
511+ pao->hif_txd_phys[i])))
512+ goto err;
513+ }
514+
515+ return 0;
516+
517+err:
518+ for (j = 0; j < i; j++)
519+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[j],
520+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
521+
522+ return -ENOMEM;
523+}
524+
525+static int
526+mtk_wed_pao_free_buffer(struct mtk_wed_device *dev)
527+{
528+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
529+ int i;
530+
531+ for (i = 0; i < 32; i++) {
532+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[i],
533+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
534+ free_pages((unsigned long)pao->hif_txd[i], 4);
535+ }
536+
537+ return 0;
538+}
539+
540+static int
541+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
542 {
543 struct mtk_wdma_desc *desc;
544+ void *desc_ptr;
545 dma_addr_t desc_phys;
546- void **page_list;
547+ struct dma_page_info *page_list;
548 u32 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG1;
549 int token = dev->wlan.token_start;
550- int ring_size, n_pages, page_idx;
551- int i;
552-
553+ int ring_size, pkt_nums, n_pages, page_idx;
554+ int i, ret = 0;
555
556 if (dev->ver == MTK_WED_V1) {
557 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
558- } else {
559+ pkt_nums = ring_size;
560+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
561+ } else if (dev->hw->version == 2) {
562 ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
563 MTK_WED_WDMA_RING_SIZE * 2;
564 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG0;
565+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
566+ } else if (dev->hw->version == 3) {
567+ ring_size = MTK_WED_TX_BM_DMA_SIZE;
568+ pkt_nums = MTK_WED_TX_BM_PKT_CNT;
569+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_rxbm_desc);
570 }
571
572 n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
573@@ -267,18 +350,20 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
574 if (!page_list)
575 return -ENOMEM;
576
577- dev->buf_ring.size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
578- dev->buf_ring.pages = page_list;
579+ dev->tx_buf_ring.size = ring_size;
580+ dev->tx_buf_ring.pages = page_list;
581+ dev->tx_buf_ring.pkt_nums = pkt_nums;
582
583- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
584- &desc_phys, GFP_KERNEL);
585- if (!desc)
586+ desc_ptr = dma_alloc_coherent(dev->hw->dev,
587+ ring_size * dev->tx_buf_ring.desc_size,
588+ &desc_phys, GFP_KERNEL);
589+ if (!desc_ptr)
590 return -ENOMEM;
591
592- dev->buf_ring.desc = desc;
593- dev->buf_ring.desc_phys = desc_phys;
594+ dev->tx_buf_ring.desc = desc_ptr;
595+ dev->tx_buf_ring.desc_phys = desc_phys;
596
597- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
598+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
599 dma_addr_t page_phys, buf_phys;
600 struct page *page;
601 void *buf;
602@@ -295,7 +380,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
603 return -ENOMEM;
604 }
605
606- page_list[page_idx++] = page;
607+ page_list[page_idx].addr = page;
608+ page_list[page_idx].addr_phys = page_phys;
609+ page_idx++;
610+
611 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
612 DMA_BIDIRECTIONAL);
613
614@@ -303,19 +391,23 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
615 buf_phys = page_phys;
616
617 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
618- u32 txd_size;
619-
620- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
621-
622+ desc = desc_ptr;
623 desc->buf0 = buf_phys;
624- desc->buf1 = buf_phys + txd_size;
625- desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
626- txd_size) |
627- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
628- MTK_WED_BUF_SIZE - txd_size) |
629- last_seg;
630- desc->info = 0;
631- desc++;
632+ if (dev->hw->version < 3) {
633+ u32 txd_size;
634+
635+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
636+ desc->buf1 = buf_phys + txd_size;
637+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
638+ txd_size) |
639+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
640+ MTK_WED_BUF_SIZE - txd_size) |
641+ last_seg;
642+ desc->info = 0;
643+ } else {
644+ desc->ctrl = token << 16;
645+ }
646+ desc_ptr += dev->tx_buf_ring.desc_size;
647
648 buf += MTK_WED_BUF_SIZE;
649 buf_phys += MTK_WED_BUF_SIZE;
650@@ -325,15 +417,18 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
651 DMA_BIDIRECTIONAL);
652 }
653
654- return 0;
655+ if (dev->hw->version == 3)
656+ ret = mtk_wed_pao_buffer_alloc(dev);
657+
658+ return ret;
659 }
660
661 static void
662-mtk_wed_free_buffer(struct mtk_wed_device *dev)
663+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
664 {
665- struct mtk_wdma_desc *desc = dev->buf_ring.desc;
666- void **page_list = dev->buf_ring.pages;
667- int ring_size, page_idx;
668+ struct mtk_rxbm_desc *desc = dev->tx_buf_ring.desc;
669+ struct dma_page_info *page_list = dev->tx_buf_ring.pages;
670+ int ring_size, page_idx, pkt_nums;
671 int i;
672
673 if (!page_list)
674@@ -342,33 +437,33 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
675 if (!desc)
676 goto free_pagelist;
677
678- if (dev->ver == MTK_WED_V1) {
679- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
680- } else {
681- ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
682- MTK_WED_WDMA_RING_SIZE * 2;
683+ pkt_nums = ring_size = dev->tx_buf_ring.size;
684+ if (dev->hw->version == 3) {
685+ mtk_wed_pao_free_buffer(dev);
686+ pkt_nums = dev->tx_buf_ring.pkt_nums;
687 }
688
689- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
690- void *page = page_list[page_idx++];
691+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
692+ void *page = page_list[page_idx].addr;
693
694 if (!page)
695 break;
696
697- dma_unmap_page(dev->hw->dev, desc[i].buf0,
698+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
699 PAGE_SIZE, DMA_BIDIRECTIONAL);
700 __free_page(page);
701+ page_idx++;
702 }
703
704- dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
705- desc, dev->buf_ring.desc_phys);
706+ dma_free_coherent(dev->hw->dev, ring_size * dev->tx_buf_ring.desc_size,
707+ dev->tx_buf_ring.desc, dev->tx_buf_ring.desc_phys);
708
709 free_pagelist:
710 kfree(page_list);
711 }
712
713 static int
714-mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
715+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
716 {
717 struct mtk_rxbm_desc *desc;
718 dma_addr_t desc_phys;
719@@ -389,7 +484,7 @@ mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
720 }
721
722 static void
723-mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
724+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
725 {
726 struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
727 int ring_size = dev->rx_buf_ring.size;
728@@ -403,6 +498,113 @@ mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
729 desc, dev->rx_buf_ring.desc_phys);
730 }
731
732+/* TODO */
733+static int
734+mtk_wed_rx_page_buffer_alloc(struct mtk_wed_device *dev)
735+{
736+ int ring_size = dev->wlan.rx_nbuf, buf_num = MTK_WED_RX_PG_BM_CNT;
737+ struct mtk_rxbm_desc *desc;
738+ dma_addr_t desc_phys;
739+ struct dma_page_info *page_list;
740+ int n_pages, page_idx;
741+ int i;
742+
743+ n_pages = buf_num / MTK_WED_RX_PAGE_BUF_PER_PAGE;
744+
745+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
746+ if (!page_list)
747+ return -ENOMEM;
748+
749+ dev->rx_page_buf_ring.size = ring_size & ~(MTK_WED_BUF_PER_PAGE - 1);
750+ dev->rx_page_buf_ring.pages = page_list;
751+ dev->rx_page_buf_ring.pkt_nums = buf_num;
752+
753+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
754+ &desc_phys, GFP_KERNEL);
755+ if (!desc)
756+ return -ENOMEM;
757+
758+ dev->rx_page_buf_ring.desc = desc;
759+ dev->rx_page_buf_ring.desc_phys = desc_phys;
760+
761+ for (i = 0, page_idx = 0; i < buf_num; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
762+ dma_addr_t page_phys, buf_phys;
763+ struct page *page;
764+ void *buf;
765+ int s;
766+
767+ page = __dev_alloc_pages(GFP_KERNEL, 0);
768+ if (!page)
769+ return -ENOMEM;
770+
771+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
772+ DMA_BIDIRECTIONAL);
773+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
774+ __free_page(page);
775+ return -ENOMEM;
776+ }
777+
778+ page_list[page_idx].addr= page;
779+ page_list[page_idx].addr_phys= page_phys;
780+ page_idx++;
781+
782+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
783+ DMA_BIDIRECTIONAL);
784+
785+ buf = page_to_virt(page);
786+ buf_phys = page_phys;
787+
788+ for (s = 0; s < MTK_WED_RX_PAGE_BUF_PER_PAGE; s++) {
789+
790+ desc->buf0 = cpu_to_le32(buf_phys);
791+ desc++;
792+
793+ buf += MTK_WED_PAGE_BUF_SIZE;
794+ buf_phys += MTK_WED_PAGE_BUF_SIZE;
795+ }
796+
797+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
798+ DMA_BIDIRECTIONAL);
799+ }
800+
801+ return 0;
802+}
803+
804+static void
805+mtk_wed_rx_page_free_buffer(struct mtk_wed_device *dev)
806+{
807+ struct mtk_rxbm_desc *desc = dev->rx_page_buf_ring.desc;
808+ struct dma_page_info *page_list = dev->rx_page_buf_ring.pages;
809+ int ring_size, page_idx;
810+ int i;
811+
812+ if (!page_list)
813+ return;
814+
815+ if (!desc)
816+ goto free_pagelist;
817+
818+ ring_size = dev->rx_page_buf_ring.pkt_nums;
819+
820+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
821+ void *page = page_list[page_idx].addr;
822+
823+ if (!page)
824+ break;
825+
826+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
827+ PAGE_SIZE, DMA_BIDIRECTIONAL);
828+ __free_page(page);
829+ page_idx++;
830+ }
831+
developera60ce2b2023-06-16 13:07:18 +0800832+ dma_free_coherent(dev->hw->dev, dev->rx_page_buf_ring.size * sizeof(*desc),
developer23f9f0f2023-06-15 13:06:25 +0800833+ desc, dev->rx_page_buf_ring.desc_phys);
834+
835+free_pagelist:
836+ kfree(page_list);
837+}
838+
839 static void
840 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
841 {
842@@ -416,19 +618,25 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int sca
843 static void
844 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
845 {
846- int i;
847+ int i, scale = dev->hw->version > 1 ? 2 : 1;
848
849 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
850- mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
851+ if (!(dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
852+ mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
853+
854 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
855- mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
856+ if ((dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
857+ mtk_wed_free_ring(dev, &dev->tx_wdma[i], scale);
858 }
859
860 static void
861 mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
862 {
863- mtk_wed_free_rx_bm(dev);
864+ mtk_wed_free_rx_buffer(dev);
865 mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
866+
867+ if (dev->wlan.hwrro)
868+ mtk_wed_rx_page_free_buffer(dev);
869 }
870
871 static void
872@@ -437,7 +645,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
873 u32 wdma_mask;
874
875 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
876- if (dev->ver > MTK_WED_V1)
877+ if (mtk_wed_get_rx_capa(dev))
878 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
879 GENMASK(1, 0));
880 /* wed control cr set */
881@@ -447,7 +655,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
882 MTK_WED_CTRL_WED_TX_BM_EN |
883 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
884
885- if (dev->ver == MTK_WED_V1) {
886+ if (dev->hw->version == 1) {
887 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
888 MTK_WED_PCIE_INT_TRIGGER_STATUS);
889
890@@ -458,6 +666,8 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
891 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
892 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
893 } else {
894+ if (dev->hw->version == 3)
895+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
896
897 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
898 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
899@@ -475,18 +685,20 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
900 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
901 dev->wlan.txfree_tbit));
902
903- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
904- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
905- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
906- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
907- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
908- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
909- dev->wlan.rx_tbit[0]) |
910- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
911- dev->wlan.rx_tbit[1]));
912+ if (mtk_wed_get_rx_capa(dev))
913+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
914+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
915+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
916+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
917+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
918+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
919+ dev->wlan.rx_tbit[0]) |
920+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
921+ dev->wlan.rx_tbit[1]));
922 }
923+
924 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
925- if (dev->ver == MTK_WED_V1) {
926+ if (dev->hw->version == 1) {
927 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
928 } else {
929 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
930@@ -506,6 +718,21 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
931 {
932 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
933
934+ switch (dev->hw->version) {
935+ case 1:
936+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
937+ break;
938+ case 2 :
939+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2 |
940+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2 |
941+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
942+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
943+ break;
944+ case 3:
945+ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
946+ break;
947+ }
948+
949 if (!dev->hw->num_flows)
950 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
951
952@@ -514,31 +741,86 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
953 }
954
955 static void
956-mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
957+mtk_wed_pao_init(struct mtk_wed_device *dev)
958 {
959- if (en) {
960- wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
961- wed_w32(dev, MTK_WED_TXP_DW1,
962- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
963- } else {
964- wed_w32(dev, MTK_WED_TXP_DW1,
965- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
966- wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
967+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
968+ int i;
969+
970+ for (i = 0; i < 32; i++)
971+ wed_w32(dev, MTK_WED_PAO_HIFTXD_BASE_L(i),
972+ pao->hif_txd_phys[i]);
973+
974+ /* init all sta parameter */
975+ wed_w32(dev, MTK_WED_PAO_STA_INFO_INIT, MTK_WED_PAO_STA_RMVL |
976+ MTK_WED_PAO_STA_WTBL_HDRT_MODE |
977+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_LEN,
978+ dev->wlan.max_amsdu_len >> 8) |
979+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_NUM,
980+ dev->wlan.max_amsdu_nums));
981+
982+ wed_w32(dev, MTK_WED_PAO_STA_INFO, MTK_WED_PAO_STA_INFO_DO_INIT);
983+
984+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_STA_INFO,
985+ MTK_WED_PAO_STA_INFO_DO_INIT)) {
986+ dev_err(dev->hw->dev, "mtk_wed%d: pao init failed!\n",
987+ dev->hw->index);
988+ return;
989 }
990+
991+ /* init pao txd src */
992+ wed_set(dev, MTK_WED_PAO_HIFTXD_CFG,
993+ FIELD_PREP(MTK_WED_PAO_HIFTXD_SRC, dev->hw->index));
994+
995+ /* init qmem */
996+ wed_set(dev, MTK_WED_PAO_PSE, MTK_WED_PAO_PSE_RESET);
997+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_MON_QMEM_STS1, BIT(29))) {
998+ pr_info("%s: init pao qmem fail\n", __func__);
999+ return;
1000+ }
1001+
1002+ /* eagle E1 PCIE1 tx ring 22 flow control issue */
1003+ if (dev->wlan.chip_id == 0x7991) {
1004+ wed_clr(dev, MTK_WED_PAO_AMSDU_FIFO,
1005+ MTK_WED_PAO_AMSDU_IS_PRIOR0_RING);
1006+ }
1007+
1008+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
1009+
1010+ return;
1011 }
1012
1013-static void
1014-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
1015+static int
1016+mtk_wed_hwrro_init(struct mtk_wed_device *dev)
1017 {
1018-#define MTK_WFMDA_RX_DMA_EN BIT(2)
1019+ if (!mtk_wed_get_rx_capa(dev))
1020+ return 0;
developer8c109ee2023-06-29 16:44:45 +08001021
developer23f9f0f2023-06-15 13:06:25 +08001022+ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
1023+ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
1024+
1025+ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE,
1026+ dev->rx_page_buf_ring.desc_phys);
1027+
1028+ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
1029+ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
1030+ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
1031+ MTK_WED_RX_PG_BM_CNT));
1032+
1033+ /* enable rx_page_bm to fetch dmad */
1034+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
1035+
1036+ return 0;
1037+}
developer8c109ee2023-06-29 16:44:45 +08001038+
developer23f9f0f2023-06-15 13:06:25 +08001039+static int
1040+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
1041+ struct mtk_wed_ring *ring)
1042+{
1043 int timeout = 3;
1044- u32 cur_idx, regs;
1045+ u32 cur_idx;
1046
1047 do {
1048- regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
1049- MTK_WED_RING_OFS_CPU_IDX;
1050- cur_idx = wed_r32(dev, regs);
1051+ cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
1052 if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
1053 break;
1054
1055@@ -546,70 +828,133 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
1056 timeout--;
1057 } while (timeout > 0);
1058
1059- if (timeout) {
1060- unsigned int val;
1061+ return timeout;
1062+}
1063
1064- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
1065- dev->wlan.phy_base);
1066- val |= MTK_WFMDA_RX_DMA_EN;
1067
1068- wifi_w32(dev, dev->wlan.wpdma_rx_glo -
1069- dev->wlan.phy_base, val);
1070+static void
1071+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
1072+{
1073+ if (en) {
1074+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
1075+ wed_w32(dev, MTK_WED_TXP_DW1,
1076+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
1077 } else {
1078- dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
1079- dev->hw->index, idx);
1080+ wed_w32(dev, MTK_WED_TXP_DW1,
1081+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
1082+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
1083 }
1084 }
1085
1086 static void
1087 mtk_wed_dma_enable(struct mtk_wed_device *dev)
1088 {
1089- wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1090- MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1091+#define MTK_WFMDA_RX_DMA_EN BIT(2)
1092+
1093+ if (dev->hw->version == 1)
1094+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1095+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1096
1097 wed_set(dev, MTK_WED_GLO_CFG,
1098 MTK_WED_GLO_CFG_TX_DMA_EN |
1099 MTK_WED_GLO_CFG_RX_DMA_EN);
1100+
1101+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
1102+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
1103+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
1104+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
1105+ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
1106+
1107+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
1108+
1109 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1110 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1111- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1112+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
1113+ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
1114 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1115 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1116
1117 wdma_set(dev, MTK_WDMA_GLO_CFG,
1118- MTK_WDMA_GLO_CFG_TX_DMA_EN |
1119+ MTK_WDMA_GLO_CFG_TX_DMA_EN /*|
1120 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
1121- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
1122+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES*/);
1123
1124- if (dev->ver == MTK_WED_V1) {
1125+ if (dev->hw->version == 1) {
1126 wdma_set(dev, MTK_WDMA_GLO_CFG,
1127 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
1128 } else {
1129 int idx = 0;
1130
1131- wed_set(dev, MTK_WED_WPDMA_CTRL,
1132- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
1133-
1134- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1135- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1136- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1137+ if (mtk_wed_get_rx_capa(dev))
1138+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1139+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1140+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1141
1142 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1143 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
1144 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
1145
1146+ if (dev->hw->version == 3) {
1147+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1148+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
1149+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1150+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
1151+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
1152+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
1153+
1154+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
1155+ //wdma_w32(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
1156+ if (mtk_wed_get_rx_capa(dev)) {
1157+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
1158+ MTK_WED_WPDMA_RX_D_PREF_EN |
1159+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
1160+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
1161+
1162+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
1163+
1164+ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
1165+
1166+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
1167+ }
1168+ }
1169+
1170 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1171 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
1172 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
1173
1174+ if (!mtk_wed_get_rx_capa(dev))
1175+ return;
1176+
1177+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
1178 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1179 MTK_WED_WPDMA_RX_D_RX_DRV_EN |
1180 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
1181 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
1182 0x2));
1183
1184- for (idx = 0; idx < dev->hw->ring_num; idx++)
1185- mtk_wed_check_wfdma_rx_fill(dev, idx);
1186+ for (idx = 0; idx < dev->hw->ring_num; idx++) {
1187+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1188+
1189+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1190+ continue;
1191+
1192+ if(mtk_wed_check_wfdma_rx_fill(dev, ring)) {
1193+ unsigned int val;
1194+
1195+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
1196+ dev->wlan.phy_base);
1197+ val |= MTK_WFMDA_RX_DMA_EN;
1198+
1199+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
1200+ dev->wlan.phy_base, val);
1201+
1202+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable successful!\n",
1203+ dev->hw->index, idx);
1204+ } else {
1205+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
1206+ dev->hw->index, idx);
1207+ }
1208+ }
1209 }
1210 }
1211
1212@@ -644,15 +989,20 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
1213 MTK_WED_WPDMA_RX_D_RX_DRV_EN);
1214 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1215 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1216- }
1217
1218- mtk_wed_set_512_support(dev, false);
1219+ if (dev->hw->version == 3 && mtk_wed_get_rx_capa(dev)) {
1220+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
1221+ MTK_WDMA_PREF_TX_CFG_PREF_EN);
1222+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
1223+ MTK_WDMA_PREF_RX_CFG_PREF_EN);
1224+ }
1225+ }
1226 }
1227
1228 static void
1229 mtk_wed_stop(struct mtk_wed_device *dev)
1230 {
1231- if (dev->ver > MTK_WED_V1) {
1232+ if (mtk_wed_get_rx_capa(dev)) {
1233 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
1234 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
1235 }
developer8c109ee2023-06-29 16:44:45 +08001236@@ -665,6 +1015,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
1237 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
1238 }
1239
1240+#define IRQ_MASK_APMCU 0x1000301c
1241 static void
1242 mtk_wed_deinit(struct mtk_wed_device *dev)
1243 {
1244@@ -677,13 +1028,31 @@ mtk_wed_deinit(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001245 MTK_WED_CTRL_WED_TX_BM_EN |
1246 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1247
1248- if (dev->hw->ver == 1)
1249+ if (dev->hw->version == 1)
1250 return;
1251
1252 wed_clr(dev, MTK_WED_CTRL,
1253 MTK_WED_CTRL_RX_ROUTE_QM_EN |
1254 MTK_WED_CTRL_WED_RX_BM_EN |
1255 MTK_WED_CTRL_RX_RRO_QM_EN);
1256+
1257+ if (dev->hw->version == 3) {
developer8c109ee2023-06-29 16:44:45 +08001258+ unsigned long addr;
1259+ u32 value;
1260+
developer23f9f0f2023-06-15 13:06:25 +08001261+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
1262+ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_PAO);
1263+ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
1264+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
1265+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
developer8c109ee2023-06-29 16:44:45 +08001266+
1267+ /* clear mask apmcu */
1268+ addr = (unsigned long)ioremap(IRQ_MASK_APMCU, 4);
1269+ value = readl((void *)addr);
1270+ value &= ~0x7;
1271+ writel(value, (void *)addr);
1272+ iounmap((void *)addr);
developer23f9f0f2023-06-15 13:06:25 +08001273+ }
1274 }
1275
1276 static void
developer8c109ee2023-06-29 16:44:45 +08001277@@ -702,9 +1071,9 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001278
1279 mtk_wdma_tx_reset(dev);
1280
1281- mtk_wed_free_buffer(dev);
1282+ mtk_wed_free_tx_buffer(dev);
1283 mtk_wed_free_tx_rings(dev);
1284- if (dev->ver > MTK_WED_V1) {
1285+ if (mtk_wed_get_rx_capa(dev)) {
1286 mtk_wed_wo_reset(dev);
1287 mtk_wed_free_rx_rings(dev);
1288 mtk_wed_wo_exit(hw);
developer8c109ee2023-06-29 16:44:45 +08001289@@ -731,70 +1100,93 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001290 static void
1291 mtk_wed_bus_init(struct mtk_wed_device *dev)
1292 {
1293-#define PCIE_BASE_ADDR0 0x11280000
1294+ switch (dev->wlan.bus_type) {
1295+ case MTK_WED_BUS_PCIE: {
1296+ struct device_node *np = dev->hw->eth->dev->of_node;
1297+ struct regmap *regs;
1298+ unsigned long addr;
1299+ u32 value;
1300
1301- if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
1302- struct device_node *node;
1303- void __iomem * base_addr;
1304- u32 value = 0;
1305+ if (dev->hw->version == 2) {
1306+ regs = syscon_regmap_lookup_by_phandle(np,
1307+ "mediatek,wed-pcie");
1308+ if (IS_ERR(regs))
1309+ break;
1310
1311- node = of_parse_phandle(dev->hw->node, "mediatek,wed_pcie", 0);
1312- if (!node) {
1313- pr_err("%s: no wed_pcie node\n", __func__);
1314- return;
1315+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
1316 }
1317
1318- base_addr = of_iomap(node, 0);
1319-
1320- value = readl(base_addr);
1321- value |= BIT(0);
1322- writel(value, base_addr);
1323+ if (dev->wlan.msi) {
1324+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base| 0xc08);
1325+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0xc04);
1326+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
1327+ } else {
1328+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base | 0x180);
1329+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0x184);
1330+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
1331+ }
1332
1333- wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
1334- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
1335+ if (dev->hw->version < 3 || dev->hw->index) {
1336+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
1337+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
1338+ } else {
1339+ /* set mask apmcu */
1340+ addr = (unsigned long)ioremap(IRQ_MASK_APMCU, 4);
1341+ value = readl((void *)addr);
1342+ value |= 0x7;
1343+ writel(value, (void *)addr);
1344+ iounmap((void *)addr);
1345+ }
1346
1347 /* pcie interrupt control: pola/source selection */
1348 wed_set(dev, MTK_WED_PCIE_INT_CTRL,
1349 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
1350- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
1351- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
1352+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
1353+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, dev->hw->index));
1354
1355- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
1356- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
1357- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
1358- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
1359-
1360- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
1361- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
1362-
1363- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
1364- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
1365-
1366- /* pola setting */
1367- value = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
1368- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
1369- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
1370- } else if (dev->wlan.bus_type == MTK_WED_BUS_AXI) {
1371+ break;
1372+ }
1373+ case MTK_WED_BUS_AXI:
1374 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1375 MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
1376 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
1377+ break;
1378+ default:
1379+ break;
1380 }
1381+
1382 return;
1383 }
1384
1385 static void
1386 mtk_wed_set_wpdma(struct mtk_wed_device *dev)
1387 {
1388- if (dev->ver > MTK_WED_V1) {
1389+ if (dev->hw->version == 1) {
1390+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1391+ } else {
1392+ mtk_wed_bus_init(dev);
1393+
1394 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
1395 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
1396- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
1397+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
1398 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
1399
1400- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
1401- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
1402- } else {
1403- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1404+ if (mtk_wed_get_rx_capa(dev)) {
1405+ int i;
1406+
1407+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
1408+ wed_w32(dev, MTK_WED_WPDMA_RX_RING0, dev->wlan.wpdma_rx);
1409+ wed_w32(dev, MTK_WED_WPDMA_RX_RING1, dev->wlan.wpdma_rx + 0x10);
1410+
1411+ if (dev->wlan.hwrro) {
1412+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
1413+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
1414+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
1415+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
1416+ dev->wlan.wpdma_rx_pg + i * 0x10);
1417+ }
1418+ }
1419+ }
1420 }
1421 }
1422
developer8c109ee2023-06-29 16:44:45 +08001423@@ -806,21 +1198,25 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001424 mtk_wed_deinit(dev);
1425 mtk_wed_reset(dev, MTK_WED_RESET_WED);
1426
1427- if (dev->ver > MTK_WED_V1)
1428- mtk_wed_bus_init(dev);
1429-
1430 mtk_wed_set_wpdma(dev);
1431
1432- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1433- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1434- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1435- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1436- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1437- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1438+ if (dev->hw->version == 3) {
1439+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
1440+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
1441+ } else {
1442+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1443+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1444+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1445+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1446+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1447+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1448+ }
1449+
1450 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1451
1452- if (dev->ver == MTK_WED_V1) {
1453+ if (dev->hw->version == 1) {
1454 u32 offset;
1455+
1456 offset = dev->hw->index ? 0x04000400 : 0;
1457 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1458 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
developer8c109ee2023-06-29 16:44:45 +08001459@@ -907,11 +1303,16 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001460 } while (1);
1461
1462 /* configure RX_ROUTE_QM */
1463- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1464- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
1465- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1466- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
1467- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1468+ if (dev->hw->version == 2) {
1469+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1470+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
1471+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1472+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
1473+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1474+ } else {
1475+ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
1476+ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 0x3 + dev->hw->index));
1477+ }
1478
1479 /* enable RX_ROUTE_QM */
1480 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
developer8c109ee2023-06-29 16:44:45 +08001481@@ -920,23 +1321,45 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001482 static void
1483 mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
1484 {
1485- int size = dev->buf_ring.size;
1486+ int size = dev->wlan.nbuf;
1487 int rev_size = MTK_WED_TX_RING_SIZE / 2;
1488- int thr = 1;
1489+ int thr_lo = 1, thr_hi = 1;
1490
1491- if (dev->ver > MTK_WED_V1) {
1492+ if (dev->hw->version == 1) {
1493+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1494+ MTK_WED_TX_BM_CTRL_PAUSE |
1495+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
1496+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
1497+ } else {
1498 size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
1499- dev->buf_ring.size;
1500+ dev->tx_buf_ring.size;
1501 rev_size = size;
1502- thr = 0;
1503+ thr_lo = 0;
1504+ thr_hi = MTK_WED_TX_BM_DYN_THR_HI;
1505+
1506+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
1507+ MTK_WED_TX_TKID_CTRL_PAUSE |
1508+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
1509+ size / 128) |
1510+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
1511+ size / 128));
1512+
1513+ /* return SKBID + SDP back to bm */
1514+ if (dev->ver == 3) {
1515+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
1516+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
1517+ size = dev->wlan.nbuf;
1518+ rev_size = size;
1519+ } else {
1520+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1521+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1522+ MTK_WED_TX_TKID_DYN_THR_HI);
1523+ }
1524 }
1525
1526- wed_w32(dev, MTK_WED_TX_BM_CTRL,
1527- MTK_WED_TX_BM_CTRL_PAUSE |
1528- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
1529- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
1530+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1531
1532- wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1533+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
1534
1535 wed_w32(dev, MTK_WED_TX_BM_TKID,
1536 FIELD_PREP(MTK_WED_TX_BM_TKID_START,
developer8c109ee2023-06-29 16:44:45 +08001537@@ -946,25 +1369,44 @@ mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001538
1539 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1540
1541- wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1542- FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr) |
1543- MTK_WED_TX_BM_DYN_THR_HI);
1544+ if (dev->hw->version < 3)
1545+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1546+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_lo) |
1547+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_hi));
1548+ else {
1549+ /* change to new bm */
1550+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
1551+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
1552+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_LEGACY_EN);
1553+ }
1554
1555- if (dev->ver > MTK_WED_V1) {
1556+ if (dev->hw->version != 1) {
1557 wed_w32(dev, MTK_WED_TX_TKID_CTRL,
1558 MTK_WED_TX_TKID_CTRL_PAUSE |
1559 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
1560- dev->buf_ring.size / 128) |
1561+ size / 128) |
1562 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
1563- dev->buf_ring.size / 128));
1564- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1565- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1566- MTK_WED_TX_TKID_DYN_THR_HI);
1567+ size / 128));
1568+
1569+ /* return SKBID + SDP back to bm */
1570+ if (dev->ver == 3)
1571+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
1572+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
1573+ else
1574+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1575+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1576+ MTK_WED_TX_TKID_DYN_THR_HI);
1577 }
1578- mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1579+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1580+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1581+ dev->wlan.token_start) |
1582+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1583+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1584
1585+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
1586+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
1587 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1588- if (dev->ver > MTK_WED_V1)
1589+ if (dev->hw->version != 1)
1590 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
1591 }
1592
developer8c109ee2023-06-29 16:44:45 +08001593@@ -977,7 +1419,26 @@ mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001594
1595 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
1596
1597+ /* reset prefetch index of ring */
1598+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
1599+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1600+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
1601+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1602+
1603+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
1604+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1605+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
1606+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1607+
1608+ /* reset prefetch FIFO of ring */
1609+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
1610+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
1611+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
1612+ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
1613+
1614 mtk_wed_rx_bm_hw_init(dev);
1615+ if (dev->wlan.hwrro)
1616+ mtk_wed_hwrro_init(dev);
1617 mtk_wed_rro_hw_init(dev);
1618 mtk_wed_route_qm_hw_init(dev);
1619 }
developer8c109ee2023-06-29 16:44:45 +08001620@@ -991,7 +1452,7 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001621 dev->init_done = true;
1622 mtk_wed_set_ext_int(dev, false);
1623 mtk_wed_tx_hw_init(dev);
1624- if (dev->ver > MTK_WED_V1)
1625+ if (mtk_wed_get_rx_capa(dev))
1626 mtk_wed_rx_hw_init(dev);
1627 }
1628
developer8c109ee2023-06-29 16:44:45 +08001629@@ -1015,26 +1476,6 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
developer23f9f0f2023-06-15 13:06:25 +08001630 }
1631 }
1632
1633-static u32
1634-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1635-{
1636- if (wed_r32(dev, reg) & mask)
1637- return true;
1638-
1639- return false;
1640-}
1641-
1642-static int
1643-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1644-{
1645- int sleep = 1000;
1646- int timeout = 100 * sleep;
1647- u32 val;
1648-
1649- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1650- timeout, false, dev, reg, mask);
1651-}
1652-
1653 static void
1654 mtk_wed_rx_reset(struct mtk_wed_device *dev)
1655 {
developer8c109ee2023-06-29 16:44:45 +08001656@@ -1133,7 +1574,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001657 mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false);
1658 }
1659
1660- mtk_wed_free_rx_bm(dev);
1661+ mtk_wed_free_rx_buffer(dev);
1662 }
1663
1664
developer8c109ee2023-06-29 16:44:45 +08001665@@ -1271,12 +1712,15 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001666 int idx, int size, bool reset)
1667 {
1668 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
1669+ int scale = dev->hw->version > 1 ? 2 : 1;
1670
1671 if(!reset)
1672 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1673- dev->ver, true))
1674+ scale, true))
1675 return -ENOMEM;
1676
1677+ wdma->flags |= MTK_WED_RING_CONFIGURED;
1678+
1679 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1680 wdma->desc_phys);
1681 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
developer8c109ee2023-06-29 16:44:45 +08001682@@ -1296,12 +1740,31 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001683 int idx, int size, bool reset)
1684 {
1685 struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
1686+ int scale = dev->hw->version > 1 ? 2 : 1;
1687
1688 if (!reset)
1689 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1690- dev->ver, true))
1691+ scale, true))
1692 return -ENOMEM;
1693
1694+ if (dev->hw->version == 3) {
1695+ struct mtk_wdma_desc *desc = wdma->desc;
1696+ int i;
1697+
1698+ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
1699+ desc->buf0 = 0;
1700+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
1701+ desc->buf1 = 0;
1702+ desc->info = MTK_WDMA_TXD0_DESC_INFO_DMA_DONE;
1703+ desc++;
1704+ desc->buf0 = 0;
1705+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
1706+ desc->buf1 = 0;
1707+ desc->info = MTK_WDMA_TXD1_DESC_INFO_DMA_DONE;
1708+ desc++;
1709+ }
1710+ }
1711+
1712 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
1713 wdma->desc_phys);
1714 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
developer8c109ee2023-06-29 16:44:45 +08001715@@ -1312,7 +1775,7 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001716 MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
1717 if (reset)
1718 mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE,
1719- dev->ver, true);
1720+ scale, true);
1721 if (idx == 0) {
1722 wed_w32(dev, MTK_WED_WDMA_RING_TX
1723 + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
developer8c109ee2023-06-29 16:44:45 +08001724@@ -1395,7 +1858,7 @@ mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
developer23f9f0f2023-06-15 13:06:25 +08001725 {
1726 struct mtk_wed_wo *wo = dev->hw->wed_wo;
1727
1728- if (dev->ver == MTK_WED_V1)
1729+ if (!mtk_wed_get_rx_capa(dev))
1730 return 0;
1731
1732 return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
developer8c109ee2023-06-29 16:44:45 +08001733@@ -1420,13 +1883,87 @@ mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
developer23f9f0f2023-06-15 13:06:25 +08001734 }
1735 }
1736
1737+static void
1738+mtk_wed_start_hwrro(struct mtk_wed_device *dev, u32 irq_mask)
1739+{
1740+ int idx, ret;
1741+
1742+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
1743+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
1744+
1745+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hwrro)
1746+ return;
1747+
1748+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
1749+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR);
1750+
1751+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
1752+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
1753+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
1754+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
1755+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
1756+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
1757+ dev->wlan.rro_rx_tbit[0]) |
1758+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
1759+ dev->wlan.rro_rx_tbit[1]));
1760+
1761+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
1762+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
1763+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
1764+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
1765+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
1766+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
1767+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
1768+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
1769+ dev->wlan.rx_pg_tbit[0]) |
1770+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
1771+ dev->wlan.rx_pg_tbit[1])|
1772+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
1773+ dev->wlan.rx_pg_tbit[2]));
1774+
1775+ /*
1776+ * RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
1777+ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
1778+ */
1779+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN);
1780+
1781+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++) {
1782+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
1783+
1784+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1785+ continue;
1786+
1787+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
1788+ if (!ret)
1789+ dev_err(dev->hw->dev, "mtk_wed%d: rx_rro_ring(%d) init failed!\n",
1790+ dev->hw->index, idx);
1791+ }
1792+
1793+ for (idx = 0; idx < MTK_WED_RX_PAGE_QUEUES; idx++){
1794+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
1795+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1796+ continue;
1797+
1798+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
1799+ if (!ret)
1800+ dev_err(dev->hw->dev, "mtk_wed%d: rx_page_ring(%d) init failed!\n",
1801+ dev->hw->index, idx);
1802+ }
1803+}
1804+
1805 static void
1806 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1807 {
1808 int i, ret;
1809
1810- if (dev->ver > MTK_WED_V1)
1811- ret = mtk_wed_rx_bm_alloc(dev);
1812+ if (mtk_wed_get_rx_capa(dev)) {
1813+ ret = mtk_wed_rx_buffer_alloc(dev);
1814+ if (ret)
1815+ return;
1816+
1817+ if (dev->wlan.hwrro)
1818+ mtk_wed_rx_page_buffer_alloc(dev);
1819+ }
1820
1821 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1822 if (!dev->tx_wdma[i].desc)
developer8c109ee2023-06-29 16:44:45 +08001823@@ -1437,7 +1974,7 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer23f9f0f2023-06-15 13:06:25 +08001824 mtk_wed_set_int(dev, irq_mask);
1825 mtk_wed_set_ext_int(dev, true);
1826
1827- if (dev->ver == MTK_WED_V1) {
1828+ if (dev->hw->version == 1) {
1829 u32 val;
1830
1831 val = dev->wlan.wpdma_phys |
developer8c109ee2023-06-29 16:44:45 +08001832@@ -1448,33 +1985,52 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer23f9f0f2023-06-15 13:06:25 +08001833 val |= BIT(1);
1834 val |= BIT(0);
1835 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
1836- } else {
1837+ } else if (mtk_wed_get_rx_capa(dev)) {
1838 /* driver set mid ready and only once */
1839 wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1840 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1841 wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1842 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1843+ if (dev->hw->version == 3)
1844+ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
1845+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1846
1847 wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1848 wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1849+ if (dev->hw->version == 3)
1850+ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
1851
1852 ret = mtk_wed_rro_cfg(dev);
1853 if (ret)
1854 return;
1855 }
1856- mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1857+
1858+ if (dev->hw->version == 2)
1859+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1860+ else if (dev->hw->version == 3)
1861+ mtk_wed_pao_init(dev);
1862
1863 mtk_wed_dma_enable(dev);
1864 dev->running = true;
1865 }
1866
1867+static int
1868+mtk_wed_get_pci_base(struct mtk_wed_device *dev)
1869+{
1870+ if (dev->hw->index == 0)
1871+ return MTK_WED_PCIE_BASE0;
1872+ else if (dev->hw->index == 1)
1873+ return MTK_WED_PCIE_BASE1;
1874+ else
1875+ return MTK_WED_PCIE_BASE2;
1876+}
1877+
1878 static int
1879 mtk_wed_attach(struct mtk_wed_device *dev)
1880 __releases(RCU)
1881 {
1882 struct mtk_wed_hw *hw;
1883 struct device *device;
1884- u16 ver;
1885 int ret = 0;
1886
1887 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
developer8c109ee2023-06-29 16:44:45 +08001888@@ -1494,34 +2050,30 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001889 goto out;
1890 }
1891
1892- device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
1893- ? &dev->wlan.pci_dev->dev
1894- : &dev->wlan.platform_dev->dev;
1895+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE ?
1896+ &dev->wlan.pci_dev->dev
1897+ : &dev->wlan.platform_dev->dev;
1898 dev_info(device, "attaching wed device %d version %d\n",
1899- hw->index, hw->ver);
1900+ hw->index, hw->version);
1901
1902 dev->hw = hw;
1903 dev->dev = hw->dev;
1904 dev->irq = hw->irq;
1905 dev->wdma_idx = hw->index;
1906+ dev->ver = hw->version;
1907+
1908+ if (dev->hw->version == 3)
1909+ dev->hw->pci_base = mtk_wed_get_pci_base(dev);
1910
1911 if (hw->eth->dma_dev == hw->eth->dev &&
1912 of_dma_is_coherent(hw->eth->dev->of_node))
1913 mtk_eth_set_dma_device(hw->eth, hw->dev);
1914
1915- dev->ver = FIELD_GET(MTK_WED_REV_ID_MAJOR,
1916- wed_r32(dev, MTK_WED_REV_ID));
1917- if (dev->ver > MTK_WED_V1)
1918- ver = FIELD_GET(MTK_WED_REV_ID_MINOR,
1919- wed_r32(dev, MTK_WED_REV_ID));
1920-
1921- dev->rev_id = ((dev->ver << 28) | ver << 16);
1922-
1923- ret = mtk_wed_buffer_alloc(dev);
1924+ ret = mtk_wed_tx_buffer_alloc(dev);
1925 if (ret)
1926 goto error;
1927
1928- if (dev->ver > MTK_WED_V1) {
1929+ if (mtk_wed_get_rx_capa(dev)) {
1930 ret = mtk_wed_rro_alloc(dev);
1931 if (ret)
1932 goto error;
developer8c109ee2023-06-29 16:44:45 +08001933@@ -1533,15 +2085,20 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001934 init_completion(&dev->wlan_reset_done);
1935 atomic_set(&dev->fe_reset, 0);
1936
1937- if (dev->ver == MTK_WED_V1)
1938+ if (dev->hw->version != 1)
1939+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
1940+ else
1941 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1942 BIT(hw->index), 0);
1943- else
1944+
1945+ if (mtk_wed_get_rx_capa(dev))
1946 ret = mtk_wed_wo_init(hw);
1947
1948 error:
1949- if (ret)
1950+ if (ret) {
1951+ pr_info("%s: detach wed\n", __func__);
1952 mtk_wed_detach(dev);
1953+ }
1954 out:
1955 mutex_unlock(&hw_lock);
1956
developer8c109ee2023-06-29 16:44:45 +08001957@@ -1576,8 +2133,26 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx,
developer23f9f0f2023-06-15 13:06:25 +08001958 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
1959 return -ENOMEM;
1960
1961+ if (dev->hw->version == 3 && idx == 1) {
1962+ /* reset prefetch index */
1963+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
1964+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
1965+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
1966+
1967+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
1968+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
1969+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
1970+
1971+ /* reset prefetch FIFO */
1972+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
1973+ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
1974+ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
1975+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
1976+ }
1977+
1978 ring->reg_base = MTK_WED_RING_TX(idx);
1979 ring->wpdma = regs;
1980+ ring->flags |= MTK_WED_RING_CONFIGURED;
1981
1982 /* WED -> WPDMA */
1983 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
developer8c109ee2023-06-29 16:44:45 +08001984@@ -1599,7 +2174,7 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer23f9f0f2023-06-15 13:06:25 +08001985 struct mtk_wed_ring *ring = &dev->txfree_ring;
1986 int i, idx = 1;
1987
1988- if(dev->ver > MTK_WED_V1)
1989+ if(dev->hw->version > 1)
1990 idx = 0;
1991
1992 /*
developer8c109ee2023-06-29 16:44:45 +08001993@@ -1652,6 +2227,129 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001994 return 0;
1995 }
1996
1997+static int
1998+mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1999+{
2000+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
2001+
2002+ ring->wpdma = regs;
2003+
2004+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
2005+ readl(regs));
2006+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
2007+ readl(regs + MTK_WED_RING_OFS_COUNT));
2008+
2009+ ring->flags |= MTK_WED_RING_CONFIGURED;
2010+
2011+ return 0;
2012+}
2013+
2014+static int
2015+mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
2016+{
2017+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
2018+
2019+ ring->wpdma = regs;
2020+
2021+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
2022+ readl(regs));
2023+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
2024+ readl(regs + MTK_WED_RING_OFS_COUNT));
2025+
2026+ ring->flags |= MTK_WED_RING_CONFIGURED;
2027+
2028+ return 0;
2029+}
2030+
2031+static int
2032+mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2033+{
2034+ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
2035+ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
2036+ int i = 0, cnt = 0;
2037+
2038+ ring->wpdma = regs;
2039+
2040+ if (readl(regs) & 0xf)
2041+ pr_info("%s(): address is not 16-byte alignment\n", __func__);
2042+
2043+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
2044+ readl(regs) & 0xfffffff0);
2045+
2046+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
2047+ readl(regs + MTK_WED_RING_OFS_COUNT));
2048+
2049+ /* ack sn cr */
2050+ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
2051+ dev->wlan.ind_cmd.ack_sn_addr);
2052+ wed_w32(dev, MTK_WED_RRO_CFG1,
2053+ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
2054+ dev->wlan.ind_cmd.win_size) |
2055+ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
2056+ dev->wlan.ind_cmd.particular_sid));
2057+
2058+ /* particular session addr element */
2059+ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, dev->wlan.ind_cmd.particular_se_phys);
2060+
2061+ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
2062+ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
2063+ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
2064+ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
2065+ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
2066+
2067+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
2068+ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) &&
2069+ cnt < 100) {
2070+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
2071+ cnt++;
2072+ }
2073+ if (cnt >= 100) {
2074+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
2075+ dev->hw->index);
2076+ }
2077+ /*if (mtk_wed_poll_busy(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
2078+ MTK_WED_ADDR_ELEM_TBL_WR_RDY)) {
2079+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
2080+ dev->hw->index);
2081+ return -1;
2082+ }*/
2083+ }
2084+
2085+ /* pn check init */
2086+ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
2087+ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
2088+ MTK_WED_PN_CHECK_IS_FIRST);
2089+
2090+ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
2091+ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
2092+
2093+ cnt = 0;
2094+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
2095+ while (!(val & MTK_WED_PN_CHECK_WR_RDY) &&
2096+ cnt < 100) {
2097+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
2098+ cnt++;
2099+ }
2100+ if (cnt >= 100) {
2101+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
2102+ dev->hw->index, i);
2103+ }
2104+ /*if (mtk_wed_poll_busy(dev, MTK_WED_PN_CHECK_CFG,
2105+ MTK_WED_PN_CHECK_WR_RDY)) {
2106+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
2107+ dev->hw->index, i);
2108+ //return -1;
2109+ }*/
2110+ }
2111+
2112+ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
2113+
2114+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
2115+
2116+ return 0;
2117+}
2118+
2119+
2120 static u32
2121 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2122 {
developer8c109ee2023-06-29 16:44:45 +08002123@@ -1660,6 +2358,8 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
developer23f9f0f2023-06-15 13:06:25 +08002124 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2125 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
2126 val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2127+ if (dev->hw->version == 3)
2128+ val &= MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
2129 WARN_RATELIMIT(val, "mtk_wed%d: error status=%08x\n",
2130 dev->hw->index, val);
2131
developer8c109ee2023-06-29 16:44:45 +08002132@@ -1752,6 +2452,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002133 .tx_ring_setup = mtk_wed_tx_ring_setup,
2134 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2135 .rx_ring_setup = mtk_wed_rx_ring_setup,
2136+ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
2137+ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
2138+ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
2139 .msg_update = mtk_wed_send_msg,
2140 .start = mtk_wed_start,
2141 .stop = mtk_wed_stop,
developer8c109ee2023-06-29 16:44:45 +08002142@@ -1763,6 +2466,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002143 .detach = mtk_wed_detach,
2144 .setup_tc = mtk_wed_eth_setup_tc,
2145 .ppe_check = mtk_wed_ppe_check,
2146+ .start_hwrro = mtk_wed_start_hwrro,
2147 };
2148 struct device_node *eth_np = eth->dev->of_node;
2149 struct platform_device *pdev;
developer8c109ee2023-06-29 16:44:45 +08002150@@ -1802,9 +2506,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002151 hw->wdma_phy = wdma_phy;
2152 hw->index = index;
2153 hw->irq = irq;
2154- hw->ver = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
2155+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) ?
2156+ 3 : MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
2157
2158- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2159+ if (hw->version == 1) {
2160 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2161 "mediatek,pcie-mirror");
2162 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
developer8c109ee2023-06-29 16:44:45 +08002163@@ -1819,7 +2524,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002164 regmap_write(hw->mirror, 0, 0);
2165 regmap_write(hw->mirror, 4, 0);
2166 }
2167- hw->ver = MTK_WED_V1;
2168 }
2169
2170 mtk_wed_hw_add_debugfs(hw);
2171diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2172index 490873c..fcf7bd0 100644
2173--- a/drivers/net/ethernet/mediatek/mtk_wed.h
2174+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2175@@ -10,10 +10,13 @@
2176 #include <linux/netdevice.h>
2177 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
2178
2179-#define MTK_WED_PKT_SIZE 1900
2180+#define MTK_WED_PKT_SIZE 1920//1900
2181 #define MTK_WED_BUF_SIZE 2048
2182+#define MTK_WED_PAGE_BUF_SIZE 128
2183 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
2184+#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
2185 #define MTK_WED_RX_RING_SIZE 1536
2186+#define MTK_WED_RX_PG_BM_CNT 8192
2187
2188 #define MTK_WED_TX_RING_SIZE 2048
2189 #define MTK_WED_WDMA_RING_SIZE 512
2190@@ -27,6 +30,9 @@
2191 #define MTK_WED_RRO_QUE_CNT 8192
2192 #define MTK_WED_MIOD_ENTRY_CNT 128
2193
2194+#define MTK_WED_TX_BM_DMA_SIZE 65536
2195+#define MTK_WED_TX_BM_PKT_CNT 32768
2196+
2197 #define MODULE_ID_WO 1
2198
2199 struct mtk_eth;
2200@@ -43,6 +49,8 @@ struct mtk_wed_hw {
2201 struct dentry *debugfs_dir;
2202 struct mtk_wed_device *wed_dev;
2203 struct mtk_wed_wo *wed_wo;
2204+ struct mtk_wed_pao *wed_pao;
2205+ u32 pci_base;
2206 u32 debugfs_reg;
2207 u32 num_flows;
2208 u32 wdma_phy;
2209@@ -50,7 +58,8 @@ struct mtk_wed_hw {
2210 int ring_num;
2211 int irq;
2212 int index;
2213- u32 ver;
2214+ int token_id;
2215+ u32 version;
2216 };
2217
2218 struct mtk_wdma_info {
2219@@ -58,6 +67,18 @@ struct mtk_wdma_info {
2220 u8 queue;
2221 u16 wcid;
2222 u8 bss;
2223+ u32 usr_info;
2224+ u8 tid;
2225+ u8 is_fixedrate;
2226+ u8 is_prior;
2227+ u8 is_sp;
2228+ u8 hf;
2229+ u8 amsdu_en;
2230+};
2231+
2232+struct mtk_wed_pao {
2233+ char *hif_txd[32];
2234+ dma_addr_t hif_txd_phys[32];
2235 };
2236
2237 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
2238diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2239index 4a9e684..51e3d7c 100644
2240--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2241+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2242@@ -11,9 +11,11 @@ struct reg_dump {
2243 u16 offset;
2244 u8 type;
2245 u8 base;
2246+ u32 mask;
2247 };
2248
2249 enum {
2250+ DUMP_TYPE_END,
2251 DUMP_TYPE_STRING,
2252 DUMP_TYPE_WED,
2253 DUMP_TYPE_WDMA,
2254@@ -23,8 +25,11 @@ enum {
2255 DUMP_TYPE_WED_RRO,
2256 };
2257
2258+#define DUMP_END() { .type = DUMP_TYPE_END }
2259 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2260 #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2261+#define DUMP_REG_MASK(_reg, _mask) { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
2262+
2263 #define DUMP_RING(_prefix, _base, ...) \
2264 { _prefix " BASE", _base, __VA_ARGS__ }, \
2265 { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2266@@ -32,6 +37,7 @@ enum {
2267 { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2268
2269 #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2270+#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
2271 #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2272
2273 #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2274@@ -52,36 +58,49 @@ print_reg_val(struct seq_file *s, const char *name, u32 val)
2275
2276 static void
2277 dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2278- const struct reg_dump *regs, int n_regs)
2279+ const struct reg_dump **regs)
2280 {
2281- const struct reg_dump *cur;
2282+ const struct reg_dump **cur_o = regs, *cur;
2283+ bool newline = false;
2284 u32 val;
2285
2286- for (cur = regs; cur < &regs[n_regs]; cur++) {
2287- switch (cur->type) {
2288- case DUMP_TYPE_STRING:
2289- seq_printf(s, "%s======== %s:\n",
2290- cur > regs ? "\n" : "",
2291- cur->name);
2292- continue;
2293- case DUMP_TYPE_WED:
2294- case DUMP_TYPE_WED_RRO:
2295- val = wed_r32(dev, cur->offset);
2296- break;
2297- case DUMP_TYPE_WDMA:
2298- val = wdma_r32(dev, cur->offset);
2299- break;
2300- case DUMP_TYPE_WPDMA_TX:
2301- val = wpdma_tx_r32(dev, cur->base, cur->offset);
2302- break;
2303- case DUMP_TYPE_WPDMA_TXFREE:
2304- val = wpdma_txfree_r32(dev, cur->offset);
2305- break;
2306- case DUMP_TYPE_WPDMA_RX:
2307- val = wpdma_rx_r32(dev, cur->base, cur->offset);
2308- break;
2309+ while (*cur_o) {
2310+ cur = *cur_o;
2311+
2312+ while (cur->type != DUMP_TYPE_END) {
2313+ switch (cur->type) {
2314+ case DUMP_TYPE_STRING:
2315+ seq_printf(s, "%s======== %s:\n",
2316+ newline ? "\n" : "",
2317+ cur->name);
2318+ newline = true;
2319+ cur++;
2320+ continue;
2321+ case DUMP_TYPE_WED:
2322+ case DUMP_TYPE_WED_RRO:
2323+ val = wed_r32(dev, cur->offset);
2324+ break;
2325+ case DUMP_TYPE_WDMA:
2326+ val = wdma_r32(dev, cur->offset);
2327+ break;
2328+ case DUMP_TYPE_WPDMA_TX:
2329+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2330+ break;
2331+ case DUMP_TYPE_WPDMA_TXFREE:
2332+ val = wpdma_txfree_r32(dev, cur->offset);
2333+ break;
2334+ case DUMP_TYPE_WPDMA_RX:
2335+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
2336+ break;
2337+ }
2338+
2339+ if (cur->mask)
2340+ val = (cur->mask & val) >> (ffs(cur->mask) - 1);
2341+
2342+ print_reg_val(s, cur->name, val);
2343+ cur++;
2344 }
2345- print_reg_val(s, cur->name, val);
2346+ cur_o++;
2347 }
2348 }
2349
2350@@ -89,7 +108,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2351 static int
2352 wed_txinfo_show(struct seq_file *s, void *data)
2353 {
2354- static const struct reg_dump regs[] = {
2355+ static const struct reg_dump regs_common[] = {
2356 DUMP_STR("WED TX"),
2357 DUMP_WED(WED_TX_MIB(0)),
2358 DUMP_WED_RING(WED_RING_TX(0)),
2359@@ -128,16 +147,32 @@ wed_txinfo_show(struct seq_file *s, void *data)
2360 DUMP_WDMA_RING(WDMA_RING_RX(0)),
2361 DUMP_WDMA_RING(WDMA_RING_RX(1)),
2362
2363- DUMP_STR("TX FREE"),
2364+ DUMP_STR("WED TX FREE"),
2365 DUMP_WED(WED_RX_MIB(0)),
2366+ DUMP_WED_RING(WED_RING_RX(0)),
2367+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
2368+
2369+ DUMP_WED(WED_RX_MIB(1)),
2370+ DUMP_WED_RING(WED_RING_RX(1)),
2371+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
2372+ DUMP_STR("WED_WPDMA TX FREE"),
2373+ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
2374+ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
2375+ DUMP_END(),
2376+ };
2377+
2378+ static const struct reg_dump *regs[] = {
2379+ &regs_common[0],
2380+ NULL,
2381 };
2382+
2383 struct mtk_wed_hw *hw = s->private;
2384 struct mtk_wed_device *dev = hw->wed_dev;
2385
2386 if (!dev)
2387 return 0;
2388
2389- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2390+ dump_wed_regs(s, dev, regs);
2391
2392 return 0;
2393 }
2394@@ -146,7 +181,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2395 static int
2396 wed_rxinfo_show(struct seq_file *s, void *data)
2397 {
2398- static const struct reg_dump regs[] = {
2399+ static const struct reg_dump regs_common[] = {
2400 DUMP_STR("WPDMA RX"),
2401 DUMP_WPDMA_RX_RING(0),
2402 DUMP_WPDMA_RX_RING(1),
2403@@ -164,7 +199,7 @@ wed_rxinfo_show(struct seq_file *s, void *data)
2404 DUMP_WED_RING(WED_RING_RX_DATA(0)),
2405 DUMP_WED_RING(WED_RING_RX_DATA(1)),
2406
2407- DUMP_STR("WED RRO"),
2408+ DUMP_STR("WED WO RRO"),
2409 DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
2410 DUMP_WED(WED_RROQM_MID_MIB),
2411 DUMP_WED(WED_RROQM_MOD_MIB),
2412@@ -175,16 +210,6 @@ wed_rxinfo_show(struct seq_file *s, void *data)
2413 DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
2414 DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
2415
2416- DUMP_STR("WED Route QM"),
2417- DUMP_WED(WED_RTQM_R2H_MIB(0)),
2418- DUMP_WED(WED_RTQM_R2Q_MIB(0)),
2419- DUMP_WED(WED_RTQM_Q2H_MIB(0)),
2420- DUMP_WED(WED_RTQM_R2H_MIB(1)),
2421- DUMP_WED(WED_RTQM_R2Q_MIB(1)),
2422- DUMP_WED(WED_RTQM_Q2H_MIB(1)),
2423- DUMP_WED(WED_RTQM_Q2N_MIB),
2424- DUMP_WED(WED_RTQM_Q2B_MIB),
2425- DUMP_WED(WED_RTQM_PFDBK_MIB),
2426
2427 DUMP_STR("WED WDMA TX"),
2428 DUMP_WED(WED_WDMA_TX_MIB),
2429@@ -205,15 +230,99 @@ wed_rxinfo_show(struct seq_file *s, void *data)
2430 DUMP_WED(WED_RX_BM_INTF2),
2431 DUMP_WED(WED_RX_BM_INTF),
2432 DUMP_WED(WED_RX_BM_ERR_STS),
2433+ DUMP_END()
2434+ };
2435+
2436+ static const struct reg_dump regs_v2[] = {
2437+ DUMP_STR("WED Route QM"),
2438+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
2439+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
2440+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
2441+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
2442+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
2443+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
2444+ DUMP_WED(WED_RTQM_Q2N_MIB),
2445+ DUMP_WED(WED_RTQM_Q2B_MIB),
2446+ DUMP_WED(WED_RTQM_PFDBK_MIB),
2447+
2448+ DUMP_END()
2449+ };
2450+
2451+ static const struct reg_dump regs_v3[] = {
2452+ DUMP_STR("WED RX RRO DATA"),
2453+ DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
2454+ DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
2455+
2456+ DUMP_STR("WED RX MSDU PAGE"),
2457+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
2458+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
2459+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
2460+
2461+ DUMP_STR("WED RX IND CMD"),
2462+ DUMP_WED(WED_IND_CMD_RX_CTRL1),
2463+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
2464+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
2465+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
2466+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
2467+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
2468+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
2469+ WED_IND_CMD_PREFETCH_FREE_CNT),
2470+ DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
2471+
2472+ DUMP_STR("WED ADDR ELEM"),
2473+ DUMP_WED(WED_ADDR_ELEM_CFG0),
2474+ DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
2475+ WED_ADDR_ELEM_PREFETCH_FREE_CNT),
2476+
2477+ DUMP_STR("WED Route QM"),
2478+ DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
2479+ DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
2480+ DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
2481+ DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
2482+ DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
2483+ DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
2484+
2485+ DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
2486+ DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
2487+ DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
2488+ DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
2489+ DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
2490+ DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
2491+
2492+ DUMP_END()
2493+ };
2494+
2495+ static const struct reg_dump *regs_new_v2[] = {
2496+ &regs_common[0],
2497+ &regs_v2[0],
2498+ NULL,
2499+ };
2500+
2501+ static const struct reg_dump *regs_new_v3[] = {
2502+ &regs_common[0],
2503+ &regs_v3[0],
2504+ NULL,
2505 };
2506
2507 struct mtk_wed_hw *hw = s->private;
2508 struct mtk_wed_device *dev = hw->wed_dev;
2509+ const struct reg_dump **regs;
2510
2511 if (!dev)
2512 return 0;
2513
2514- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2515+ switch(dev->hw->version) {
2516+ case 2:
2517+ regs = regs_new_v2;
2518+ break;
2519+ case 3:
2520+ regs = regs_new_v3;
2521+ break;
2522+ default:
2523+ return 0;
2524+ }
2525+
2526+ dump_wed_regs(s, dev, regs);
2527
2528 return 0;
2529 }
2530@@ -248,6 +357,383 @@ mtk_wed_reg_get(void *data, u64 *val)
2531 DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2532 "0x%08llx\n");
2533
2534+static int
2535+wed_token_txd_show(struct seq_file *s, void *data)
2536+{
2537+ struct mtk_wed_hw *hw = s->private;
2538+ struct mtk_wed_device *dev = hw->wed_dev;
2539+ struct dma_page_info *page_list = dev->tx_buf_ring.pages;
2540+ int token = dev->wlan.token_start;
2541+ u32 val = hw->token_id, size = 1;
2542+ int page_idx = (val - token) / 2;
2543+ int i;
2544+
2545+ if (val < token) {
2546+ size = val;
2547+ page_idx = 0;
2548+ }
2549+
2550+ for (i = 0; i < size; i += MTK_WED_BUF_PER_PAGE) {
2551+ void *page = page_list[page_idx++].addr;
2552+ void *buf;
2553+ int j;
2554+
2555+ if (!page)
2556+ break;
2557+
2558+ buf = page_to_virt(page);
2559+
2560+ for (j = 0; j < MTK_WED_BUF_PER_PAGE; j++) {
2561+ printk("[TXD]:token id = %d\n", token + 2 * (page_idx - 1) + j);
2562+ print_hex_dump(KERN_ERR , "", DUMP_PREFIX_OFFSET, 16, 1, (u8 *)buf, 128, false);
2563+ seq_printf(s, "\n");
2564+
2565+ buf += MTK_WED_BUF_SIZE;
2566+ }
2567+ }
2568+
2569+ return 0;
2570+}
2571+
2572+DEFINE_SHOW_ATTRIBUTE(wed_token_txd);
2573+
2574+static int
2575+wed_pao_show(struct seq_file *s, void *data)
2576+{
2577+ static const struct reg_dump regs_common[] = {
2578+ DUMP_STR("PAO AMDSU INFO"),
2579+ DUMP_WED(WED_PAO_MON_AMSDU_FIFO_DMAD),
2580+
2581+ DUMP_STR("PAO AMDSU ENG0 INFO"),
2582+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(0)),
2583+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(0)),
2584+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(0)),
2585+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(0)),
2586+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(0)),
2587+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
2588+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2589+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
2590+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2591+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
2592+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2593+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
2594+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2595+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
2596+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2597+
2598+ DUMP_STR("PAO AMDSU ENG1 INFO"),
2599+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(1)),
2600+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(1)),
2601+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(1)),
2602+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(1)),
2603+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(1)),
2604+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
2605+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2606+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
2607+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2608+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(1),
2609+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2610+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2611+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2612+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2613+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2614+
2615+ DUMP_STR("PAO AMDSU ENG2 INFO"),
2616+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(2)),
2617+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(2)),
2618+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(2)),
2619+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(2)),
2620+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(2)),
2621+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
2622+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2623+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
2624+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2625+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2626+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2627+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2628+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2629+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2630+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2631+
2632+ DUMP_STR("PAO AMDSU ENG3 INFO"),
2633+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(3)),
2634+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(3)),
2635+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(3)),
2636+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(3)),
2637+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(3)),
2638+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
2639+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2640+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
2641+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2642+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
2643+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2644+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
2645+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2646+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
2647+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2648+
2649+ DUMP_STR("PAO AMDSU ENG4 INFO"),
2650+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(4)),
2651+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(4)),
2652+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(4)),
2653+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(4)),
2654+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(4)),
2655+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
2656+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2657+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
2658+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2659+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2660+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2661+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2662+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2663+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2664+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2665+
2666+ DUMP_STR("PAO AMDSU ENG5 INFO"),
2667+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(5)),
2668+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(5)),
2669+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(5)),
2670+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(5)),
2671+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(5)),
2672+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(5),
2673+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2674+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(5),
2675+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2676+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
2677+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2678+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
2679+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2680+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
2681+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2682+
2683+ DUMP_STR("PAO AMDSU ENG6 INFO"),
2684+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(6)),
2685+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(6)),
2686+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(6)),
2687+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(6)),
2688+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(6)),
2689+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(6),
2690+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2691+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(6),
2692+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2693+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
2694+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2695+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
2696+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2697+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
2698+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2699+
2700+ DUMP_STR("PAO AMDSU ENG7 INFO"),
2701+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(7)),
2702+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(7)),
2703+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(7)),
2704+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(7)),
2705+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(7)),
2706+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(7),
2707+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2708+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(7),
2709+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2710+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(7),
2711+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2712+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(7),
2713+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2714+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2715+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2716+
2717+ DUMP_STR("PAO AMDSU ENG8 INFO"),
2718+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(8)),
2719+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(8)),
2720+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(8)),
2721+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(8)),
2722+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(8)),
2723+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(8),
2724+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2725+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(8),
2726+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2727+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
2728+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2729+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
2730+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2731+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
2732+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2733+
2734+ DUMP_STR("PAO QMEM INFO"),
2735+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(0), WED_PAO_QMEM_FQ_CNT),
2736+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(0), WED_PAO_QMEM_SP_QCNT),
2737+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(1), WED_PAO_QMEM_TID0_QCNT),
2738+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(1), WED_PAO_QMEM_TID1_QCNT),
2739+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(2), WED_PAO_QMEM_TID2_QCNT),
2740+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(2), WED_PAO_QMEM_TID3_QCNT),
2741+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(3), WED_PAO_QMEM_TID4_QCNT),
2742+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(3), WED_PAO_QMEM_TID5_QCNT),
2743+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(4), WED_PAO_QMEM_TID6_QCNT),
2744+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(4), WED_PAO_QMEM_TID7_QCNT),
2745+
2746+
2747+ DUMP_STR("PAO QMEM HEAD INFO"),
2748+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(0), WED_PAO_QMEM_FQ_HEAD),
2749+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(0), WED_PAO_QMEM_SP_QHEAD),
2750+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(1), WED_PAO_QMEM_TID0_QHEAD),
2751+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(1), WED_PAO_QMEM_TID1_QHEAD),
2752+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(2), WED_PAO_QMEM_TID2_QHEAD),
2753+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(2), WED_PAO_QMEM_TID3_QHEAD),
2754+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(3), WED_PAO_QMEM_TID4_QHEAD),
2755+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(3), WED_PAO_QMEM_TID5_QHEAD),
2756+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(4), WED_PAO_QMEM_TID6_QHEAD),
2757+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(4), WED_PAO_QMEM_TID7_QHEAD),
2758+
2759+ DUMP_STR("PAO QMEM TAIL INFO"),
2760+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(5), WED_PAO_QMEM_FQ_TAIL),
2761+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(5), WED_PAO_QMEM_SP_QTAIL),
2762+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(6), WED_PAO_QMEM_TID0_QTAIL),
2763+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(6), WED_PAO_QMEM_TID1_QTAIL),
2764+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(7), WED_PAO_QMEM_TID2_QTAIL),
2765+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(7), WED_PAO_QMEM_TID3_QTAIL),
2766+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(8), WED_PAO_QMEM_TID4_QTAIL),
2767+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(8), WED_PAO_QMEM_TID5_QTAIL),
2768+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(9), WED_PAO_QMEM_TID6_QTAIL),
2769+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(9), WED_PAO_QMEM_TID7_QTAIL),
2770+
2771+ DUMP_STR("PAO HIFTXD MSDU INFO"),
2772+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(1)),
2773+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(2)),
2774+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(3)),
2775+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(4)),
2776+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(5)),
2777+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(6)),
2778+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(7)),
2779+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(8)),
2780+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(9)),
2781+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(10)),
2782+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(11)),
2783+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(12)),
2784+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(13)),
2785+ DUMP_END()
2786+ };
2787+
2788+ static const struct reg_dump *regs[] = {
2789+ &regs_common[0],
2790+ NULL,
2791+ };
2792+ struct mtk_wed_hw *hw = s->private;
2793+ struct mtk_wed_device *dev = hw->wed_dev;
2794+
2795+ if (!dev)
2796+ return 0;
2797+
2798+ dump_wed_regs(s, dev, regs);
2799+
2800+ return 0;
2801+}
2802+DEFINE_SHOW_ATTRIBUTE(wed_pao);
2803+
2804+static int
2805+wed_rtqm_show(struct seq_file *s, void *data)
2806+{
2807+ static const struct reg_dump regs_common[] = {
2808+ DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
2809+ DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
2810+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
2811+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
2812+ DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
2813+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
2814+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
2815+ DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
2816+
2817+
2818+ DUMP_STR("WED Route QM IGRS1(Legacy)"),
2819+ DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
2820+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
2821+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
2822+ DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
2823+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
2824+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
2825+ DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
2826+
2827+ DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
2828+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
2829+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
2830+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
2831+ DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
2832+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
2833+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
2834+ DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
2835+
2836+ DUMP_STR("WED Route QM IGRS3(DEBUG)"),
2837+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
2838+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
2839+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
2840+ DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
2841+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
2842+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
2843+ DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
2844+
2845+ DUMP_END()
2846+ };
2847+
2848+ static const struct reg_dump *regs[] = {
2849+ &regs_common[0],
2850+ NULL,
2851+ };
2852+ struct mtk_wed_hw *hw = s->private;
2853+ struct mtk_wed_device *dev = hw->wed_dev;
2854+
2855+ if (!dev)
2856+ return 0;
2857+
2858+ dump_wed_regs(s, dev, regs);
2859+
2860+ return 0;
2861+}
2862+DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
2863+
2864+
2865+static int
2866+wed_rro_show(struct seq_file *s, void *data)
2867+{
2868+ static const struct reg_dump regs_common[] = {
2869+ DUMP_STR("RRO/IND CMD CNT"),
2870+ DUMP_WED(WED_RX_IND_CMD_CNT(1)),
2871+ DUMP_WED(WED_RX_IND_CMD_CNT(2)),
2872+ DUMP_WED(WED_RX_IND_CMD_CNT(3)),
2873+ DUMP_WED(WED_RX_IND_CMD_CNT(4)),
2874+ DUMP_WED(WED_RX_IND_CMD_CNT(5)),
2875+ DUMP_WED(WED_RX_IND_CMD_CNT(6)),
2876+ DUMP_WED(WED_RX_IND_CMD_CNT(7)),
2877+ DUMP_WED(WED_RX_IND_CMD_CNT(8)),
2878+ DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
2879+ WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
2880+
2881+ DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
2882+ DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
2883+ WED_ADDR_ELEM_SIG_FAIL_CNT),
2884+ DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
2885+ DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
2886+ DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
2887+ DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
2888+ DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
2889+ DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
2890+ WED_PN_CHK_FAIL_CNT),
2891+
2892+ DUMP_END()
2893+ };
2894+
2895+ static const struct reg_dump *regs[] = {
2896+ &regs_common[0],
2897+ NULL,
2898+ };
2899+ struct mtk_wed_hw *hw = s->private;
2900+ struct mtk_wed_device *dev = hw->wed_dev;
2901+
2902+ if (!dev)
2903+ return 0;
2904+
2905+ dump_wed_regs(s, dev, regs);
2906+
2907+ return 0;
2908+}
2909+DEFINE_SHOW_ATTRIBUTE(wed_rro);
2910+
2911 void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2912 {
2913 struct dentry *dir;
2914@@ -261,8 +747,18 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2915 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2916 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2917 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2918- debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
2919- if (hw->ver != MTK_WED_V1) {
2920+ debugfs_create_u32("token_id", 0600, dir, &hw->token_id);
2921+ debugfs_create_file_unsafe("token_txd", 0600, dir, hw, &wed_token_txd_fops);
2922+
2923+ if (hw->version == 3)
2924+ debugfs_create_file_unsafe("pao", 0400, dir, hw, &wed_pao_fops);
2925+
2926+ if (hw->version != 1) {
2927+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
2928+ if (hw->version == 3) {
2929+ debugfs_create_file_unsafe("rtqm", 0400, dir, hw, &wed_rtqm_fops);
2930+ debugfs_create_file_unsafe("rro", 0400, dir, hw, &wed_rro_fops);
2931+ }
2932 wed_wo_mcu_debugfs(hw, dir);
2933 }
2934 }
2935diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
2936index 96e30a3..055594d 100644
2937--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
2938+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
2939@@ -242,7 +242,7 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
2940 u32 ofs = 0;
2941 u32 boot_cr, val;
2942
2943- mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 : MT7986_FIRMWARE_WO_1;
2944+ mcu = wo->hw->index ? MTK_FIRMWARE_WO_1 : MTK_FIRMWARE_WO_0;
2945
2946 ret = request_firmware(&fw, mcu, wo->hw->dev);
2947 if (ret)
2948@@ -289,8 +289,12 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
2949 }
2950
2951 /* write the start address */
2952- boot_cr = wo->hw->index ?
2953- WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
2954+ if (wo->hw->version == 3)
2955+ boot_cr = WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
2956+ else
2957+ boot_cr = wo->hw->index ?
2958+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
2959+
2960 wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
2961
2962 /* wo firmware reset */
2963@@ -298,8 +302,7 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
2964
2965 val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
2966
2967- val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
2968- WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
2969+ val |= WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
2970
2971 wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
2972
2973diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2974index 19e1199..c07bdb6 100644
2975--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2976+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2977@@ -16,8 +16,9 @@
2978 #define WARP_OK_STATUS (0)
2979 #define WARP_ALREADY_DONE_STATUS (1)
2980
2981-#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2982-#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2983+#define MTK_FIRMWARE_WO_0 "mediatek/mtk_wo_0.bin"
2984+#define MTK_FIRMWARE_WO_1 "mediatek/mtk_wo_1.bin"
2985+#define MTK_FIRMWARE_WO_2 "mediatek/mtk_wo_2.bin"
2986
2987 #define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2988 #define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2989diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2990index 403a36b..4e619ff 100644
2991--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2992+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2993@@ -20,6 +20,9 @@
2994 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2995 #define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
2996
2997+#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29)
2998+#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31)
2999+
3000 struct mtk_wdma_desc {
3001 __le32 buf0;
3002 __le32 ctrl;
3003@@ -51,6 +54,7 @@ struct mtk_wdma_desc {
3004 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
3005 #define MTK_WED_RESET_RX_RRO_QM BIT(20)
3006 #define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
3007+#define MTK_WED_RESET_TX_PAO BIT(22)
3008 #define MTK_WED_RESET_WED BIT(31)
3009
3010 #define MTK_WED_CTRL 0x00c
3011@@ -58,6 +62,9 @@ struct mtk_wdma_desc {
3012 #define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
3013 #define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
3014 #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
3015+#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5)
3016+#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6)
3017+#define MTK_WED_CTRL_WED_RX_PG_BM_BUSU BIT(7)
3018 #define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
3019 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
3020 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
3021@@ -68,9 +75,14 @@ struct mtk_wdma_desc {
3022 #define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
3023 #define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
3024 #define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
3025+#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20)
3026+#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21)
3027+#define MTK_WED_CTRL_TX_PAO_EN BIT(22)
3028+#define MTK_WED_CTRL_TX_PAO_BUSY BIT(23)
3029 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
3030 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
3031 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
3032+#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28)
3033
3034 #define MTK_WED_EXT_INT_STATUS 0x020
3035 #define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
3036@@ -78,12 +90,10 @@ struct mtk_wdma_desc {
3037 #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
3038 #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
3039 #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
3040-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
3041-#define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
3042-#define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
3043-#endif
3044-#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
3045-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
3046+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2 BIT(10)
3047+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2 BIT(11)
3048+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
3049+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
3050 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
3051 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
3052 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
3053@@ -100,17 +110,15 @@ struct mtk_wdma_desc {
3054 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
3055 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
3056 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
3057- MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
3058- MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
3059 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
3060 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
3061 MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
3062- MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR | \
3063- MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
3064+ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
3065
3066 #define MTK_WED_EXT_INT_MASK 0x028
3067 #define MTK_WED_EXT_INT_MASK1 0x02c
3068 #define MTK_WED_EXT_INT_MASK2 0x030
3069+#define MTK_WED_EXT_INT_MASK3 0x034
3070
3071 #define MTK_WED_STATUS 0x060
3072 #define MTK_WED_STATUS_TX GENMASK(15, 8)
3073@@ -118,9 +126,14 @@ struct mtk_wdma_desc {
3074 #define MTK_WED_TX_BM_CTRL 0x080
3075 #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
3076 #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
3077+#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26)
3078+#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27)
3079 #define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
3080
3081 #define MTK_WED_TX_BM_BASE 0x084
3082+#define MTK_WED_TX_BM_INIT_PTR 0x088
3083+#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0)
3084+#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16)
3085
3086 #define MTK_WED_TX_BM_BUF_LEN 0x08c
3087
3088@@ -134,22 +147,24 @@ struct mtk_wdma_desc {
3089 #if defined(CONFIG_MEDIATEK_NETSYS_V2)
3090 #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(8, 0)
3091 #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(24, 16)
3092-
3093-#define MTK_WED_TX_BM_TKID 0x0c8
3094-#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
3095-#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
3096 #else
3097 #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
3098 #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
3099+#endif
3100
3101-#define MTK_WED_TX_BM_TKID 0x088
3102+#define MTK_WED_TX_BM_TKID 0x0c8
3103 #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
3104 #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
3105-#endif
3106
3107 #define MTK_WED_TX_TKID_CTRL 0x0c0
3108+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
3109+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(7, 0)
3110+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(23, 16)
3111+#else
3112 #define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
3113 #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
3114+#endif
3115+
3116 #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
3117
3118 #define MTK_WED_TX_TKID_DYN_THR 0x0e0
3119@@ -220,12 +235,15 @@ struct mtk_wdma_desc {
3120 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
3121 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
3122 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
3123-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
3124+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12)
3125+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18)
3126 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
3127-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
3128+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20)
3129 #define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
3130 #define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
3131+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25)
3132 #define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
3133+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30)
3134
3135 /* CONFIG_MEDIATEK_NETSYS_V1 */
3136 #define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
3137@@ -288,9 +306,11 @@ struct mtk_wdma_desc {
3138 #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
3139
3140 #define MTK_WED_PCIE_INT_CTRL 0x57c
3141-#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
3142-#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
3143 #define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
3144+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
3145+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
3146+#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21)
3147+
3148 #define MTK_WED_WPDMA_CFG_BASE 0x580
3149 #define MTK_WED_WPDMA_CFG_INT_MASK 0x584
3150 #define MTK_WED_WPDMA_CFG_TX 0x588
3151@@ -319,20 +339,50 @@ struct mtk_wdma_desc {
3152 #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
3153
3154 #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
3155-#define MTK_WED_WPDMA_RX_RING 0x770
3156+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
3157+#define MTK_WED_WPDMA_RX_RING0 0x770
3158+#else
3159+#define MTK_WED_WPDMA_RX_RING0 0x7d0
3160+#endif
3161+#define MTK_WED_WPDMA_RX_RING1 0x7d8
3162
3163 #define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
3164 #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
3165 #define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
3166
3167+#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
3168+#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
3169+#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
3170+#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
3171+
3172+#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8
3173+#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15)
3174+
3175+#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc
3176+
3177+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0
3178+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0)
3179+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16)
3180+
3181 #define MTK_WED_WDMA_RING_TX 0x800
3182
3183 #define MTK_WED_WDMA_TX_MIB 0x810
3184
3185-
3186 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
3187 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
3188
3189+#define MTK_WED_WDMA_RX_PREF_CFG 0x950
3190+#define MTK_WED_WDMA_RX_PREF_EN BIT(0)
3191+#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
3192+#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
3193+#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
3194+#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
3195+#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
3196+
3197+#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
3198+#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
3199+#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16)
3200+
3201 #define MTK_WED_WDMA_GLO_CFG 0xa04
3202 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
3203 #define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
3204@@ -365,6 +415,7 @@ struct mtk_wdma_desc {
3205 #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
3206
3207 #define MTK_WED_WDMA_INT_CTRL 0xa2c
3208+#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0)
3209 #define MTK_WED_WDMA_INT_POLL_SRC_SEL GENMASK(17, 16)
3210
3211 #define MTK_WED_WDMA_CFG_BASE 0xaa0
3212@@ -426,6 +477,18 @@ struct mtk_wdma_desc {
3213 #define MTK_WDMA_INT_GRP1 0x250
3214 #define MTK_WDMA_INT_GRP2 0x254
3215
3216+#define MTK_WDMA_PREF_TX_CFG 0x2d0
3217+#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
3218+
3219+#define MTK_WDMA_PREF_RX_CFG 0x2dc
3220+#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
3221+
3222+#define MTK_WDMA_WRBK_TX_CFG 0x300
3223+#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
3224+
3225+#define MTK_WDMA_WRBK_RX_CFG 0x344
3226+#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
3227+
3228 #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
3229 #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
3230 #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
3231@@ -439,6 +502,31 @@ struct mtk_wdma_desc {
3232 #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
3233 #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
3234
3235+#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
3236+#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
3237+#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
3238+#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4)
3239+#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34
3240+
3241+
3242+#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44
3243+#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4)
3244+#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50
3245+#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54+ (_n) * 0x4)
3246+#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c
3247+
3248+#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c
3249+#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4)
3250+#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78
3251+#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c+ (_n) * 0x4)
3252+#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84
3253+
3254+#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94
3255+#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4)
3256+#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0
3257+#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4+ (_n) * 0x4)
3258+#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac
3259+
3260 #define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
3261 #define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
3262 #define MTK_WED_RTQM_Q2N_MIB 0xb80
3263@@ -447,6 +535,24 @@ struct mtk_wdma_desc {
3264 #define MTK_WED_RTQM_Q2B_MIB 0xb8c
3265 #define MTK_WED_RTQM_PFDBK_MIB 0xb90
3266
3267+#define MTK_WED_RTQM_ENQ_CFG0 0xbb8
3268+#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12)
3269+
3270+#define MTK_WED_RTQM_FDROP_MIB 0xb84
3271+#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc
3272+#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0
3273+#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4
3274+#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8
3275+#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc
3276+#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0
3277+
3278+#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8
3279+#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc
3280+#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0
3281+#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4
3282+#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8
3283+#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec
3284+
3285 #define MTK_WED_RROQM_GLO_CFG 0xc04
3286 #define MTK_WED_RROQM_RST_IDX 0xc08
3287 #define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
3288@@ -487,8 +593,8 @@ struct mtk_wdma_desc {
3289 #define MTK_WED_RX_BM_BASE 0xd84
3290 #define MTK_WED_RX_BM_INIT_PTR 0xd88
3291 #define MTK_WED_RX_BM_PTR 0xd8c
3292-#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
3293 #define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
3294+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
3295
3296 #define MTK_WED_RX_BM_BLEN 0xd90
3297 #define MTK_WED_RX_BM_STS 0xd94
3298@@ -496,7 +602,193 @@ struct mtk_wdma_desc {
3299 #define MTK_WED_RX_BM_INTF 0xd9c
3300 #define MTK_WED_RX_BM_ERR_STS 0xda8
3301
3302+#define MTK_RRO_IND_CMD_SIGNATURE 0xe00
3303+#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0)
3304+#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28)
3305+
3306+#define MTK_WED_IND_CMD_RX_CTRL0 0xe04
3307+#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0)
3308+#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16)
3309+#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28)
3310+
3311+#define MTK_WED_IND_CMD_RX_CTRL1 0xe08
3312+#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c
3313+#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0)
3314+#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16)
3315+
3316+#define MTK_WED_RRO_CFG0 0xe10
3317+#define MTK_WED_RRO_CFG1 0xe14
3318+#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29)
3319+#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16)
3320+#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0)
3321+
3322+#define MTK_WED_ADDR_ELEM_CFG0 0xe18
3323+#define MTK_WED_ADDR_ELEM_CFG1 0xe1c
3324+#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16)
3325+
3326+#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20
3327+#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0)
3328+#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28)
3329+#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29)
3330+#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30)
3331+#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31)
3332+
3333+#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24
3334+#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28
3335+
3336+#define MTK_WED_PN_CHECK_CFG 0xe30
3337+#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0)
3338+#define MTK_WED_PN_CHECK_RD_RDY BIT(28)
3339+#define MTK_WED_PN_CHECK_WR_RDY BIT(29)
3340+#define MTK_WED_PN_CHECK_RD BIT(30)
3341+#define MTK_WED_PN_CHECK_WR BIT(31)
3342+
3343+#define MTK_WED_PN_CHECK_WDATA_M 0xe38
3344+#define MTK_WED_PN_CHECK_IS_FIRST BIT(17)
3345+
3346+#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8)
3347+
3348+#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58
3349+#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26)
3350+#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31)
3351+
3352+#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc)
3353+#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc)
3354+#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc)
3355+
3356+#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10)
3357+
3358+#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13)
3359+
3360+#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4)
3361+#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26)
3362+#define MTK_WED_RRO_RX_D_DRV_EN BIT(31)
3363+
3364+#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0
3365+#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0)
3366+
3367+#define MTK_WED_RRO_PG_BM_BASE 0xeb4
3368+#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8
3369+#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0)
3370+#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16)
3371+
3372+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec
3373+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0)
3374+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1)
3375+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2)
3376+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8)
3377+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9)
3378+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10)
3379+
3380+#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4
3381+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0)
3382+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1)
3383+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2)
3384+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8)
3385+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9)
3386+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10)
3387+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16)
3388+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
3389+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
3390+
3391+#define MTK_WED_RX_IND_CMD_CNT0 0xf20
3392+#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
3393+
3394+#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4)
3395+#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0)
3396+
3397+#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4)
3398+#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0)
3399+#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16)
3400+#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0)
3401+
3402+#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4)
3403+
3404+#define MTK_WED_RX_PN_CHK_CNT 0xf70
3405+#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0)
3406+
3407 #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
3408 #define MTK_WED_PCIE_INT_MASK 0x0
3409
3410+#define MTK_WED_PAO_AMSDU_FIFO 0x1800
3411+#define MTK_WED_PAO_AMSDU_IS_PRIOR0_RING BIT(10)
3412+
3413+#define MTK_WED_PAO_STA_INFO 0x01810
3414+#define MTK_WED_PAO_STA_INFO_DO_INIT BIT(0)
3415+#define MTK_WED_PAO_STA_INFO_SET_INIT BIT(1)
3416+
3417+#define MTK_WED_PAO_STA_INFO_INIT 0x01814
3418+#define MTK_WED_PAO_STA_WTBL_HDRT_MODE BIT(0)
3419+#define MTK_WED_PAO_STA_RMVL BIT(1)
3420+#define MTK_WED_PAO_STA_MAX_AMSDU_LEN GENMASK(7, 2)
3421+#define MTK_WED_PAO_STA_MAX_AMSDU_NUM GENMASK(11, 8)
3422+
3423+#define MTK_WED_PAO_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
3424+
3425+#define MTK_WED_PAO_PSE 0x1910
3426+#define MTK_WED_PAO_PSE_RESET BIT(16)
3427+
3428+#define MTK_WED_PAO_HIFTXD_CFG 0x1968
3429+#define MTK_WED_PAO_HIFTXD_SRC GENMASK(16, 15)
3430+
3431+#define MTK_WED_PAO_MON_AMSDU_FIFO_DMAD 0x1a34
3432+
3433+#define MTK_WED_PAO_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
3434+#define MTK_WED_PAO_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
3435+#define MTK_WED_PAO_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
3436+#define MTK_WED_PAO_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
3437+#define MTK_WED_PAO_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
3438+
3439+#define MTK_WED_PAO_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
3440+#define MTK_WED_PAO_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
3441+#define MTK_WED_PAO_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
3442+
3443+#define MTK_WED_PAO_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
3444+#define MTK_WED_PAO_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
3445+#define MTK_WED_PAO_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
3446+#define MTK_WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
3447+
3448+#define MTK_WED_PAO_MON_QMEM_STS1 0x1e04
3449+
3450+#define MTK_WED_PAO_MON_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
3451+#define MTK_WED_PAO_QMEM_FQ_CNT GENMASK(27, 16)
3452+#define MTK_WED_PAO_QMEM_SP_QCNT GENMASK(11, 0)
3453+#define MTK_WED_PAO_QMEM_TID0_QCNT GENMASK(27, 16)
3454+#define MTK_WED_PAO_QMEM_TID1_QCNT GENMASK(11, 0)
3455+#define MTK_WED_PAO_QMEM_TID2_QCNT GENMASK(27, 16)
3456+#define MTK_WED_PAO_QMEM_TID3_QCNT GENMASK(11, 0)
3457+#define MTK_WED_PAO_QMEM_TID4_QCNT GENMASK(27, 16)
3458+#define MTK_WED_PAO_QMEM_TID5_QCNT GENMASK(11, 0)
3459+#define MTK_WED_PAO_QMEM_TID6_QCNT GENMASK(27, 16)
3460+#define MTK_WED_PAO_QMEM_TID7_QCNT GENMASK(11, 0)
3461+
3462+#define MTK_WED_PAO_MON_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
3463+#define MTK_WED_PAO_QMEM_FQ_HEAD GENMASK(27, 16)
3464+#define MTK_WED_PAO_QMEM_SP_QHEAD GENMASK(11, 0)
3465+#define MTK_WED_PAO_QMEM_TID0_QHEAD GENMASK(27, 16)
3466+#define MTK_WED_PAO_QMEM_TID1_QHEAD GENMASK(11, 0)
3467+#define MTK_WED_PAO_QMEM_TID2_QHEAD GENMASK(27, 16)
3468+#define MTK_WED_PAO_QMEM_TID3_QHEAD GENMASK(11, 0)
3469+#define MTK_WED_PAO_QMEM_TID4_QHEAD GENMASK(27, 16)
3470+#define MTK_WED_PAO_QMEM_TID5_QHEAD GENMASK(11, 0)
3471+#define MTK_WED_PAO_QMEM_TID6_QHEAD GENMASK(27, 16)
3472+#define MTK_WED_PAO_QMEM_TID7_QHEAD GENMASK(11, 0)
3473+#define MTK_WED_PAO_QMEM_FQ_TAIL GENMASK(27, 16)
3474+#define MTK_WED_PAO_QMEM_SP_QTAIL GENMASK(11, 0)
3475+#define MTK_WED_PAO_QMEM_TID0_QTAIL GENMASK(27, 16)
3476+#define MTK_WED_PAO_QMEM_TID1_QTAIL GENMASK(11, 0)
3477+#define MTK_WED_PAO_QMEM_TID2_QTAIL GENMASK(27, 16)
3478+#define MTK_WED_PAO_QMEM_TID3_QTAIL GENMASK(11, 0)
3479+#define MTK_WED_PAO_QMEM_TID4_QTAIL GENMASK(27, 16)
3480+#define MTK_WED_PAO_QMEM_TID5_QTAIL GENMASK(11, 0)
3481+#define MTK_WED_PAO_QMEM_TID6_QTAIL GENMASK(27, 16)
3482+#define MTK_WED_PAO_QMEM_TID7_QTAIL GENMASK(11, 0)
3483+
3484+#define MTK_WED_PAO_MON_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
3485+
3486+#define MTK_WED_PCIE_BASE 0x11280000
3487+
3488+#define MTK_WED_PCIE_BASE0 0x11300000
3489+#define MTK_WED_PCIE_BASE1 0x11310000
3490+#define MTK_WED_PCIE_BASE2 0x11290000
3491 #endif
3492diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3493index 58b5ce6..5e51790 100644
3494--- a/include/linux/netdevice.h
3495+++ b/include/linux/netdevice.h
3496@@ -873,6 +873,13 @@ struct net_device_path {
3497 u8 queue;
3498 u16 wcid;
3499 u8 bss;
3500+ u32 usr_info;
3501+ u8 tid;
3502+ u8 is_fixedrate;
3503+ u8 is_prior;
3504+ u8 is_sp;
3505+ u8 hf;
3506+ u8 amsdu_en;
3507 } mtk_wdma;
3508 };
3509 };
3510diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3511index 27cf284..60336e0 100644
3512--- a/include/linux/soc/mediatek/mtk_wed.h
3513+++ b/include/linux/soc/mediatek/mtk_wed.h
3514@@ -5,11 +5,14 @@
3515 #include <linux/rcupdate.h>
3516 #include <linux/regmap.h>
3517 #include <linux/pci.h>
3518+#include <linux/skbuff.h>
3519+#include <linux/iopoll.h>
3520
3521 #define WED_WO_STA_REC 0x6
3522
3523 #define MTK_WED_TX_QUEUES 2
3524 #define MTK_WED_RX_QUEUES 2
3525+#define MTK_WED_RX_PAGE_QUEUES 3
3526
3527 enum mtk_wed_wo_cmd {
3528 MTK_WED_WO_CMD_WED_CFG,
3529@@ -55,10 +58,13 @@ enum mtk_wed_bus_tye {
3530 struct mtk_wed_hw;
3531 struct mtk_wdma_desc;
3532
3533+#define MTK_WED_RING_CONFIGURED BIT(0)
3534+
3535 struct mtk_wed_ring {
3536 struct mtk_wdma_desc *desc;
3537 dma_addr_t desc_phys;
3538 int size;
3539+ u32 flags;
3540
3541 u32 reg_base;
3542 void __iomem *wpdma;
3543@@ -69,11 +75,18 @@ struct mtk_rxbm_desc {
3544 __le32 token;
3545 } __packed __aligned(4);
3546
3547+struct dma_page_info {
3548+ void *addr;
3549+ dma_addr_t addr_phys;
3550+};
3551+
3552 struct dma_buf {
3553 int size;
3554- void **pages;
3555- struct mtk_wdma_desc *desc;
3556+ int pkt_nums;
3557+ void *desc;
3558+ int desc_size;
3559 dma_addr_t desc_phys;
3560+ struct dma_page_info *pages;
3561 };
3562
3563 struct dma_entry {
3564@@ -97,6 +110,7 @@ struct mtk_wed_device {
3565 struct device *dev;
3566 struct mtk_wed_hw *hw;
3567 bool init_done, running;
3568+ bool wdma_init_done;
3569 int wdma_idx;
3570 int irq;
3571 u8 ver;
3572@@ -108,7 +122,11 @@ struct mtk_wed_device {
3573 struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
3574 struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
3575
3576- struct dma_buf buf_ring;
3577+ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
3578+ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
3579+ struct mtk_wed_ring ind_cmd_ring;
3580+
3581+ struct dma_buf tx_buf_ring;
3582
3583 struct {
3584 int size;
3585@@ -117,6 +135,8 @@ struct mtk_wed_device {
3586 dma_addr_t desc_phys;
3587 } rx_buf_ring;
3588
3589+ struct dma_buf rx_page_buf_ring;
3590+
3591 struct {
3592 struct mtk_wed_ring rro_ring;
3593 void __iomem *rro_desc;
3594@@ -131,8 +151,9 @@ struct mtk_wed_device {
3595 struct platform_device *platform_dev;
3596 struct pci_dev *pci_dev;
3597 };
3598+ enum mtk_wed_bus_tye bus_type;
3599 void __iomem *base;
3600- u32 bus_type;
3601+ void __iomem *regs;
3602 u32 phy_base;
3603
3604 u32 wpdma_phys;
3605@@ -142,9 +163,13 @@ struct mtk_wed_device {
3606 u32 wpdma_txfree;
3607 u32 wpdma_rx_glo;
3608 u32 wpdma_rx;
3609+ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
3610+ u32 wpdma_rx_pg;
3611
3612 u8 tx_tbit[MTK_WED_TX_QUEUES];
3613 u8 rx_tbit[MTK_WED_RX_QUEUES];
3614+ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
3615+ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
3616 u8 txfree_tbit;
3617
3618 u16 token_start;
3619@@ -154,12 +179,26 @@ struct mtk_wed_device {
3620 unsigned int rx_size;
3621
3622 bool wcid_512;
3623-
3624+ bool hwrro;
3625+ bool msi;
3626+
3627+ u8 max_amsdu_nums;
3628+ u32 max_amsdu_len;
3629+
3630+ struct {
3631+ u8 se_group_nums;
3632+ u16 win_size;
3633+ u16 particular_sid;
3634+ u32 ack_sn_addr;
3635+ dma_addr_t particular_se_phys;
3636+ dma_addr_t addr_elem_phys[1024];
3637+ } ind_cmd;
3638+
3639+ u32 chip_id;
3640 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3641 int (*offload_enable)(struct mtk_wed_device *wed);
3642 void (*offload_disable)(struct mtk_wed_device *wed);
3643- u32 (*init_rx_buf)(struct mtk_wed_device *wed,
3644- int pkt_num);
3645+ u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size);
3646 void (*release_rx_buf)(struct mtk_wed_device *wed);
3647 void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
3648 struct mtk_wed_wo_rx_stats *stats);
3649@@ -180,6 +219,11 @@ struct mtk_wed_ops {
3650 void __iomem *regs);
3651 int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3652 void __iomem *regs, bool reset);
3653+ int (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3654+ void __iomem *regs);
3655+ int (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3656+ void __iomem *regs);
3657+ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev, void __iomem *regs);
3658 int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3659 void *data, int len);
3660 void (*detach)(struct mtk_wed_device *dev);
3661@@ -196,6 +240,7 @@ struct mtk_wed_ops {
3662 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3663 void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
3664 u32 reason, u32 hash);
3665+ void (*start_hwrro)(struct mtk_wed_device *dev, u32 irq_mask);
3666 };
3667
3668 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3669@@ -224,12 +269,21 @@ static inline bool
3670 mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3671 {
3672 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3673+ if (dev->ver == 3 && !dev->wlan.hwrro)
3674+ return false;
3675+
3676 return dev->ver != 1;
3677 #else
3678 return false;
3679 #endif
3680 }
3681
3682+static inline bool
3683+mtk_wed_device_support_pao(struct mtk_wed_device *dev)
3684+{
3685+ return dev->ver == 3;
3686+}
3687+
3688 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3689 #define mtk_wed_device_active(_dev) !!(_dev)->ops
3690 #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3691@@ -243,6 +297,12 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3692 (_dev)->ops->txfree_ring_setup(_dev, _regs)
3693 #define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
3694 (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
3695+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
3696+ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
3697+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
3698+ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
3699+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
3700+ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
3701 #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3702 (_dev)->ops->msg_update(_dev, _id, _msg, _len)
3703 #define mtk_wed_device_reg_read(_dev, _reg) \
3704@@ -257,6 +317,9 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3705 (_dev)->ops->reset_dma(_dev)
3706 #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3707 (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
3708+#define mtk_wed_device_start_hwrro(_dev, _mask) \
3709+ (_dev)->ops->start_hwrro(_dev, _mask)
3710+
3711 #else
3712 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3713 {
3714@@ -268,6 +331,9 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3715 #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
3716 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3717 #define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
3718+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3719+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3720+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
3721 #define mtk_wed_device_reg_read(_dev, _reg) 0
3722 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3723 #define mtk_wed_device_irq_get(_dev, _mask) 0
3724@@ -275,6 +341,7 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3725 #define mtk_wed_device_dma_reset(_dev) do {} while (0)
3726 #define mtk_wed_device_setup_tc(_dev, _ndev, _type, _data) do {} while (0)
3727 #define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
3728+#define mtk_wed_device_start_hwrro(_dev, _mask) do {} while (0)
3729 #endif
3730
3731 #endif
3732--
37332.18.0
3734