blob: 2f2b204cf81765579836003bd7790cf7c89018a1 [file] [log] [blame]
developer23f9f0f2023-06-15 13:06:25 +08001From 400f8349a31ffc48538aa7df64a88111de9a738b Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 13 Apr 2023 15:51:08 +0800
4Subject: [PATCH] mtk:wed:add wed3 support
5
6Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7988.dtsi | 152 ++-
9 .../dts/mediatek/mt7988a-dsa-10g-spim-nor.dts | 16 +-
10 .../dts/mediatek/mt7988d-dsa-10g-spim-nor.dts | 16 +-
11 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 +-
12 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.c | 17 +-
14 drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +-
15 .../net/ethernet/mediatek/mtk_ppe_offload.c | 13 +-
developerb74821e2023-09-08 14:19:59 +080016 drivers/net/ethernet/mediatek/mtk_wed.c | 1165 +++++++++++++----
developer23f9f0f2023-06-15 13:06:25 +080017 drivers/net/ethernet/mediatek/mtk_wed.h | 25 +-
18 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 584 ++++++++-
19 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 13 +-
20 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 5 +-
21 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 338 ++++-
22 include/linux/netdevice.h | 7 +
23 include/linux/soc/mediatek/mtk_wed.h | 81 +-
24 16 files changed, 1446 insertions(+), 333 deletions(-)
25 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
26
27diff --git a/arch/arm64/boot/dts/mediatek/mt7988.dtsi b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
28index 364deef..f9a0120 100644
29--- a/arch/arm64/boot/dts/mediatek/mt7988.dtsi
30+++ b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
31@@ -191,44 +191,49 @@
32 status = "disabled";
33 };
34
35- wed: wed@15010000 {
36- compatible = "mediatek,wed";
37- wed_num = <3>;
38- /* add this property for wed get the pci slot number. */
39- pci_slot_map = <0>, <1>, <2>;
40- reg = <0 0x15010000 0 0x2000>,
41- <0 0x15012000 0 0x2000>,
42- <0 0x15014000 0 0x2000>;
43+ wed0: wed@15010000 {
44+ compatible = "mediatek,mt7988-wed",
45+ "syscon";
46+ reg = <0 0x15010000 0 0x2000>;
47 interrupt-parent = <&gic>;
48- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
49- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
50- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
51- };
52-
53- wed2: wed2@15012000 {
54- compatible = "mediatek,wed2";
55- wed_num = <3>;
56- /* add this property for wed get the pci slot number. */
57- reg = <0 0x15010000 0 0x2000>,
58- <0 0x15012000 0 0x2000>,
59- <0 0x15014000 0 0x2000>;
60+ interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
61+ mediatek,wed_pcie = <&wed_pcie>;
62+ mediatek,ap2woccif = <&ap2woccif0>;
63+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
64+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
65+ mediatek,wocpu_boot = <&cpu0_boot>;
66+ mediatek,wocpu_emi = <&wocpu0_emi>;
67+ mediatek,wocpu_data = <&wocpu_data>;
68+ };
69+
70+ wed1: wed@15012000 {
71+ compatible = "mediatek,mt7988-wed",
72+ "syscon";
73+ reg = <0 0x15012000 0 0x2000>;
74 interrupt-parent = <&gic>;
75- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
76- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
77- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
78- };
79-
80- wed3: wed3@15014000 {
81- compatible = "mediatek,wed3";
82- wed_num = <3>;
83- /* add this property for wed get the pci slot number. */
84- reg = <0 0x15010000 0 0x2000>,
85- <0 0x15012000 0 0x2000>,
86- <0 0x15014000 0 0x2000>;
87+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
88+ mediatek,wed_pcie = <&wed_pcie>;
89+ mediatek,ap2woccif = <&ap2woccif1>;
90+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
91+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
92+ mediatek,wocpu_boot = <&cpu1_boot>;
93+ mediatek,wocpu_emi = <&wocpu1_emi>;
94+ mediatek,wocpu_data = <&wocpu_data>;
95+ };
96+
97+ wed2: wed@15014000 {
98+ compatible = "mediatek,mt7988-wed",
99+ "syscon";
100+ reg = <0 0x15014000 0 0x2000>;
101 interrupt-parent = <&gic>;
102- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
103- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
104- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
105+ interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
106+ mediatek,wed_pcie = <&wed_pcie>;
107+ mediatek,ap2woccif = <&ap2woccif2>;
108+ mediatek,wocpu_ilm = <&wocpu2_ilm>;
109+ mediatek,wocpu_dlm = <&wocpu2_dlm>;
110+ mediatek,wocpu_boot = <&cpu2_boot>;
111+ mediatek,wocpu_emi = <&wocpu2_emi>;
112+ mediatek,wocpu_data = <&wocpu_data>;
113 };
114
115 wdma: wdma@15104800 {
116@@ -238,15 +243,25 @@
117 <0 0x15105000 0 0x400>;
118 };
119
120- ap2woccif: ap2woccif@151A5000 {
121- compatible = "mediatek,ap2woccif";
122- reg = <0 0x151A5000 0 0x1000>,
123- <0 0x152A5000 0 0x1000>,
124- <0 0x153A5000 0 0x1000>;
125+ ap2woccif0: ap2woccif@151A5000 {
126+ compatible = "mediatek,ap2woccif", "syscon";
127+ reg = <0 0x151A5000 0 0x1000>;
128+ interrupt-parent = <&gic>;
129+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
130+ };
131+
132+ ap2woccif1: ap2woccif@152A5000 {
133+ compatible = "mediatek,ap2woccif", "syscon";
134+ reg = <0 0x152A5000 0 0x1000>;
135 interrupt-parent = <&gic>;
136- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
137- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
138- <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
139+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
140+ };
141+
142+ ap2woccif2: ap2woccif@153A5000 {
143+ compatible = "mediatek,ap2woccif", "syscon";
144+ reg = <0 0x153A5000 0 0x1000>;
145+ interrupt-parent = <&gic>;
146+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
147 };
148
149 wocpu0_ilm: wocpu0_ilm@151E0000 {
150@@ -254,31 +269,53 @@
151 reg = <0 0x151E0000 0 0x8000>;
152 };
153
154- wocpu1_ilm: wocpu1_ilm@152E0000 {
155- compatible = "mediatek,wocpu1_ilm";
156+ wocpu1_ilm: wocpu_ilm@152E0000 {
157+ compatible = "mediatek,wocpu_ilm";
158 reg = <0 0x152E0000 0 0x8000>;
159 };
160
161- wocpu2_ilm: wocpu2_ilm@153E0000 {
162- compatible = "mediatek,wocpu2_ilm";
163- reg = <0 0x153E0000 0 0x8000>;
164+ wocpu2_ilm: wocpu_ilm@153E0000 {
165+ compatible = "mediatek,wocpu_ilm";
166+ reg = <0 0x153E0000 0 0x8000>;
167+ };
168+
169+ wocpu0_dlm: wocpu_dlm@151E8000 {
170+ compatible = "mediatek,wocpu_dlm";
171+ reg = <0 0x151E8000 0 0x2000>;
172+
173+ resets = <&ethsysrst 0>;
174+ reset-names = "wocpu_rst";
175+ };
176+
177+ wocpu1_dlm: wocpu_dlm@0x152E8000 {
178+ compatible = "mediatek,wocpu_dlm";
179+ reg = <0 0x152E8000 0 0x2000>;
180+
181+ resets = <&ethsysrst 0>;
182+ reset-names = "wocpu_rst";
183 };
184
185- wocpu_dlm: wocpu_dlm@151E8000 {
186+ wocpu2_dlm: wocpu_dlm@0x153E8000 {
187 compatible = "mediatek,wocpu_dlm";
188- reg = <0 0x151E8000 0 0x2000>,
189- <0 0x152E8000 0 0x2000>,
190- <0 0x153E8000 0 0x2000>;
191+ reg = <0 0x153E8000 0 0x2000>;
192
193 resets = <&ethsysrst 0>;
194 reset-names = "wocpu_rst";
195 };
196
197- cpu_boot: wocpu_boot@15194000 {
198- compatible = "mediatek,wocpu_boot";
199- reg = <0 0x15194000 0 0x1000>,
200- <0 0x15294000 0 0x1000>,
201- <0 0x15394000 0 0x1000>;
202+ cpu0_boot: wocpu_boot@15194000 {
203+ compatible = "mediatek,wocpu0_boot";
204+ reg = <0 0x15194000 0 0x1000>;
205+ };
206+
207+ cpu1_boot: wocpu_boot@15294000 {
208+ compatible = "mediatek,wocpu1_boot";
209+ reg = <0 0x15294000 0 0x1000>;
210+ };
211+
212+ cpu2_boot: wocpu_boot@15394000 {
213+ compatible = "mediatek,wocpu2_boot";
214+ reg = <0 0x15394000 0 0x1000>;
215 };
216
217 reserved-memory {
218@@ -827,6 +864,7 @@
219 <&topckgen CK_TOP_CB_SGM_325M>;
220 mediatek,ethsys = <&ethsys>;
221 mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
222+ mediatek,wed = <&wed0>, <&wed1>, <&wed2>;
223 mediatek,usxgmiisys = <&usxgmiisys0>, <&usxgmiisys1>;
224 mediatek,xfi_pextp = <&xfi_pextp0>, <&xfi_pextp1>;
225 mediatek,xfi_pll = <&xfi_pll>;
226diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
227index 7db5164..0a6db8b 100644
228--- a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
229+++ b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
230@@ -341,9 +341,23 @@
231 status = "okay";
232 };
233
234-&wed {
235+&wed0 {
236 dy_txbm_enable = "true";
237 dy_txbm_budge = <8>;
238 txbm_init_sz = <10>;
239 status = "okay";
240 };
241+
242+&wed1 {
243+ dy_txbm_enable = "true";
244+ dy_txbm_budge = <8>;
245+ txbm_init_sz = <10>;
246+ status = "okay";
247+};
248+
249+&wed2 {
250+ dy_txbm_enable = "true";
251+ dy_txbm_budge = <8>;
252+ txbm_init_sz = <10>;
253+ status = "okay";
254+};
255\ No newline at end of file
256diff --git a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
257index 67c6508..c407b33 100644
258--- a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
259+++ b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
260@@ -325,9 +325,23 @@
261 status = "okay";
262 };
263
264-&wed {
265+&wed0 {
266 dy_txbm_enable = "true";
267 dy_txbm_budge = <8>;
268 txbm_init_sz = <10>;
269 status = "okay";
270 };
271+
272+&wed1 {
273+ dy_txbm_enable = "true";
274+ dy_txbm_budge = <8>;
275+ txbm_init_sz = <10>;
276+ status = "okay";
277+};
278+
279+&wed2 {
280+ dy_txbm_enable = "true";
281+ dy_txbm_budge = <8>;
282+ txbm_init_sz = <10>;
283+ status = "okay";
284+};
285\ No newline at end of file
286diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
287index 388982c..d59c29f 100644
288--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
289+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer7ccd1942023-07-07 16:15:05 +0800290@@ -4865,7 +4865,8 @@ static int mtk_probe(struct platform_device *pdev)
developer23f9f0f2023-06-15 13:06:25 +0800291 "mediatek,wed", i);
292 static const u32 wdma_regs[] = {
293 MTK_WDMA0_BASE,
294- MTK_WDMA1_BASE
295+ MTK_WDMA1_BASE,
296+ MTK_WDMA2_BASE
297 };
298 void __iomem *wdma;
299 u32 wdma_phy;
300diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
301index a9feaed..70e8377 100644
302--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
303+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer7ccd1942023-07-07 16:15:05 +0800304@@ -605,9 +605,12 @@
developer23f9f0f2023-06-15 13:06:25 +0800305 #define RX_DMA_SPORT_MASK 0x7
306 #define RX_DMA_SPORT_MASK_V2 0xf
307
308-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
309+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
310 #define MTK_WDMA0_BASE 0x4800
311 #define MTK_WDMA1_BASE 0x4c00
312+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
313+#define MTK_WDMA2_BASE 0x5000
314+#endif
315 #else
316 #define MTK_WDMA0_BASE 0x2800
317 #define MTK_WDMA1_BASE 0x2c00
318diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
319old mode 100755
320new mode 100644
321index bc13a9b..3910163
322--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
323+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
324@@ -9,6 +9,7 @@
325 #include <linux/if_ether.h>
326 #include <linux/if_vlan.h>
327 #include <net/dsa.h>
328+#include <net/route.h>
329 #include "mtk_eth_soc.h"
330 #include "mtk_ppe.h"
331 #include "mtk_ppe_regs.h"
332@@ -396,7 +397,7 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
333 }
334
335 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
336- int bss, int wcid)
337+ int bss, int wcid, bool amsdu_en)
338 {
339 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
340 u32 *ib2 = mtk_foe_entry_ib2(entry);
341@@ -408,6 +409,9 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
342
343 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
344 FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
345+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
346+ l2->winfo_pao = FIELD_PREP(MTK_FOE_WINFO_PAO_AMSDU_EN, amsdu_en);
347+#endif
348 #else
349 if (wdma_idx)
350 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
351@@ -443,6 +447,17 @@ int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp)
352 *ib2 &= ~MTK_FOE_IB2_DSCP;
353 *ib2 |= FIELD_PREP(MTK_FOE_IB2_DSCP, dscp);
354
355+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
356+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
357+
358+ if (*ib2 & MTK_FOE_IB2_WDMA_WINFO &&
359+ l2->winfo_pao & MTK_FOE_WINFO_PAO_AMSDU_EN) {
360+ u8 tid = rt_tos2priority(dscp) & 0xf;
361+
362+ l2->winfo_pao |= FIELD_PREP(MTK_FOE_WINFO_PAO_TID, tid);
363+ }
364+#endif
365+
366 return 0;
367 }
368
369diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
370index df10040..9e7d5aa 100644
371--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
372+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
373@@ -428,7 +428,7 @@ int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
374 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
375 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
376 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
377- int bss, int wcid);
378+ int bss, int wcid, bool amsdu_en);
379 int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
380 int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp);
381 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
382diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
383index 9bc0857..86fc9a1 100644
384--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
385+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
386@@ -112,6 +112,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
387 info->queue = path.mtk_wdma.queue;
388 info->bss = path.mtk_wdma.bss;
389 info->wcid = path.mtk_wdma.wcid;
390+ info->amsdu_en = path.mtk_wdma.amsdu_en;
391
392 return 0;
393 }
394@@ -193,13 +194,15 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
395
396 if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
397 mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
398- info.wcid);
399+ info.wcid, info.amsdu_en);
400 pse_port = PSE_PPE0_PORT;
401 #if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
402 if (info.wdma_idx == 0)
403 pse_port = PSE_WDMA0_PORT;
404 else if (info.wdma_idx == 1)
405 pse_port = PSE_WDMA1_PORT;
406+ else if (info.wdma_idx == 2)
407+ pse_port = PSE_WDMA2_PORT;
408 else
409 return -EOPNOTSUPP;
410 #endif
411@@ -458,8 +461,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
412 if (err)
413 return err;
414
415- if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
416- return err;
417+ /*if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
418+ return err;*/
419
420 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
421 if (!entry)
422@@ -499,8 +502,8 @@ clear:
423 mtk_foe_entry_clear(eth->ppe[i], entry);
424 free:
425 kfree(entry);
426- if (wed_index >= 0)
427- mtk_wed_flow_remove(wed_index);
428+ /*if (wed_index >= 0)
429+ mtk_wed_flow_remove(wed_index);*/
430 return err;
431 }
432
433diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
434index 37a86c3..e3809db 100644
435--- a/drivers/net/ethernet/mediatek/mtk_wed.c
436+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
437@@ -28,7 +28,7 @@ struct wo_cmd_ring {
438 u32 cnt;
439 u32 unit;
440 };
441-static struct mtk_wed_hw *hw_list[2];
442+static struct mtk_wed_hw *hw_list[3];
443 static DEFINE_MUTEX(hw_lock);
444
445 static void
446@@ -73,6 +73,26 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
447 return wdma_r32(dev, MTK_WDMA_GLO_CFG);
448 }
449
450+static u32
451+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
452+{
453+ if (wed_r32(dev, reg) & mask)
454+ return true;
455+
456+ return false;
457+}
458+
459+static int
460+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
461+{
462+ int sleep = 1000;
463+ int timeout = 100 * sleep;
464+ u32 val;
465+
466+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
467+ timeout, false, dev, reg, mask);
468+}
469+
470 static int
471 mtk_wdma_rx_reset(struct mtk_wed_device *dev)
472 {
473@@ -235,6 +255,8 @@ mtk_wed_assign(struct mtk_wed_device *dev)
474 continue;
475
476 hw->wed_dev = dev;
477+ hw->pci_base = MTK_WED_PCIE_BASE;
478+
479 return hw;
480 }
481
482@@ -242,23 +264,84 @@ mtk_wed_assign(struct mtk_wed_device *dev)
483 }
484
485 static int
486-mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
487+mtk_wed_pao_buffer_alloc(struct mtk_wed_device *dev)
488+{
489+ struct mtk_wed_pao *pao;
490+ int i, j;
491+
492+ pao = kzalloc(sizeof(struct mtk_wed_pao), GFP_KERNEL);
493+ if (!pao)
494+ return -ENOMEM;
495+
496+ dev->hw->wed_pao = pao;
497+
498+ for (i = 0; i < 32; i++) {
499+ /* each segment is 64K*/
500+ pao->hif_txd[i] = (char *)__get_free_pages(GFP_ATOMIC |
501+ GFP_DMA32 |
502+ __GFP_ZERO, 4);
503+ if (!pao->hif_txd[i])
504+ goto err;
505+
506+ pao->hif_txd_phys[i] = dma_map_single(dev->hw->dev,
507+ pao->hif_txd[i],
508+ 16 * PAGE_SIZE,
509+ DMA_TO_DEVICE);
510+ if (unlikely(dma_mapping_error(dev->hw->dev,
511+ pao->hif_txd_phys[i])))
512+ goto err;
513+ }
514+
515+ return 0;
516+
517+err:
518+ for (j = 0; j < i; j++)
519+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[j],
520+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
521+
522+ return -ENOMEM;
523+}
524+
525+static int
526+mtk_wed_pao_free_buffer(struct mtk_wed_device *dev)
527+{
528+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
529+ int i;
530+
531+ for (i = 0; i < 32; i++) {
532+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[i],
533+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
534+ free_pages((unsigned long)pao->hif_txd[i], 4);
535+ }
536+
537+ return 0;
538+}
539+
540+static int
541+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
542 {
543 struct mtk_wdma_desc *desc;
544+ void *desc_ptr;
545 dma_addr_t desc_phys;
546- void **page_list;
547+ struct dma_page_info *page_list;
548 u32 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG1;
549 int token = dev->wlan.token_start;
550- int ring_size, n_pages, page_idx;
551- int i;
552-
553+ int ring_size, pkt_nums, n_pages, page_idx;
554+ int i, ret = 0;
555
556 if (dev->ver == MTK_WED_V1) {
557 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
558- } else {
559+ pkt_nums = ring_size;
560+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
561+ } else if (dev->hw->version == 2) {
562 ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
563 MTK_WED_WDMA_RING_SIZE * 2;
564 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG0;
565+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
566+ } else if (dev->hw->version == 3) {
567+ ring_size = MTK_WED_TX_BM_DMA_SIZE;
568+ pkt_nums = MTK_WED_TX_BM_PKT_CNT;
569+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_rxbm_desc);
570 }
571
572 n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
573@@ -267,18 +350,20 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
574 if (!page_list)
575 return -ENOMEM;
576
577- dev->buf_ring.size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
578- dev->buf_ring.pages = page_list;
579+ dev->tx_buf_ring.size = ring_size;
580+ dev->tx_buf_ring.pages = page_list;
581+ dev->tx_buf_ring.pkt_nums = pkt_nums;
582
583- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
584- &desc_phys, GFP_KERNEL);
585- if (!desc)
586+ desc_ptr = dma_alloc_coherent(dev->hw->dev,
587+ ring_size * dev->tx_buf_ring.desc_size,
588+ &desc_phys, GFP_KERNEL);
589+ if (!desc_ptr)
590 return -ENOMEM;
591
592- dev->buf_ring.desc = desc;
593- dev->buf_ring.desc_phys = desc_phys;
594+ dev->tx_buf_ring.desc = desc_ptr;
595+ dev->tx_buf_ring.desc_phys = desc_phys;
596
597- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
598+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
599 dma_addr_t page_phys, buf_phys;
600 struct page *page;
601 void *buf;
602@@ -295,7 +380,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
603 return -ENOMEM;
604 }
605
606- page_list[page_idx++] = page;
607+ page_list[page_idx].addr = page;
608+ page_list[page_idx].addr_phys = page_phys;
609+ page_idx++;
610+
611 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
612 DMA_BIDIRECTIONAL);
613
614@@ -303,19 +391,23 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
615 buf_phys = page_phys;
616
617 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
618- u32 txd_size;
619-
620- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
621-
622+ desc = desc_ptr;
623 desc->buf0 = buf_phys;
624- desc->buf1 = buf_phys + txd_size;
625- desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
626- txd_size) |
627- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
628- MTK_WED_BUF_SIZE - txd_size) |
629- last_seg;
630- desc->info = 0;
631- desc++;
632+ if (dev->hw->version < 3) {
633+ u32 txd_size;
634+
635+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
636+ desc->buf1 = buf_phys + txd_size;
637+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
638+ txd_size) |
639+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
640+ MTK_WED_BUF_SIZE - txd_size) |
641+ last_seg;
642+ desc->info = 0;
643+ } else {
644+ desc->ctrl = token << 16;
645+ }
646+ desc_ptr += dev->tx_buf_ring.desc_size;
647
648 buf += MTK_WED_BUF_SIZE;
649 buf_phys += MTK_WED_BUF_SIZE;
650@@ -325,15 +417,18 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
651 DMA_BIDIRECTIONAL);
652 }
653
654- return 0;
655+ if (dev->hw->version == 3)
656+ ret = mtk_wed_pao_buffer_alloc(dev);
657+
658+ return ret;
659 }
660
661 static void
662-mtk_wed_free_buffer(struct mtk_wed_device *dev)
663+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
664 {
665- struct mtk_wdma_desc *desc = dev->buf_ring.desc;
666- void **page_list = dev->buf_ring.pages;
667- int ring_size, page_idx;
668+ struct mtk_rxbm_desc *desc = dev->tx_buf_ring.desc;
669+ struct dma_page_info *page_list = dev->tx_buf_ring.pages;
670+ int ring_size, page_idx, pkt_nums;
671 int i;
672
673 if (!page_list)
674@@ -342,33 +437,33 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
675 if (!desc)
676 goto free_pagelist;
677
678- if (dev->ver == MTK_WED_V1) {
679- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
680- } else {
681- ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
682- MTK_WED_WDMA_RING_SIZE * 2;
683+ pkt_nums = ring_size = dev->tx_buf_ring.size;
684+ if (dev->hw->version == 3) {
685+ mtk_wed_pao_free_buffer(dev);
686+ pkt_nums = dev->tx_buf_ring.pkt_nums;
687 }
688
689- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
690- void *page = page_list[page_idx++];
691+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
692+ void *page = page_list[page_idx].addr;
693
694 if (!page)
695 break;
696
697- dma_unmap_page(dev->hw->dev, desc[i].buf0,
698+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
699 PAGE_SIZE, DMA_BIDIRECTIONAL);
700 __free_page(page);
701+ page_idx++;
702 }
703
704- dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
705- desc, dev->buf_ring.desc_phys);
706+ dma_free_coherent(dev->hw->dev, ring_size * dev->tx_buf_ring.desc_size,
707+ dev->tx_buf_ring.desc, dev->tx_buf_ring.desc_phys);
708
709 free_pagelist:
710 kfree(page_list);
711 }
712
713 static int
714-mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
715+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
716 {
717 struct mtk_rxbm_desc *desc;
718 dma_addr_t desc_phys;
719@@ -389,7 +484,7 @@ mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
720 }
721
722 static void
723-mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
724+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
725 {
726 struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
727 int ring_size = dev->rx_buf_ring.size;
728@@ -403,6 +498,113 @@ mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
729 desc, dev->rx_buf_ring.desc_phys);
730 }
731
732+/* TODO */
733+static int
734+mtk_wed_rx_page_buffer_alloc(struct mtk_wed_device *dev)
735+{
736+ int ring_size = dev->wlan.rx_nbuf, buf_num = MTK_WED_RX_PG_BM_CNT;
737+ struct mtk_rxbm_desc *desc;
738+ dma_addr_t desc_phys;
739+ struct dma_page_info *page_list;
740+ int n_pages, page_idx;
741+ int i;
742+
743+ n_pages = buf_num / MTK_WED_RX_PAGE_BUF_PER_PAGE;
744+
745+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
746+ if (!page_list)
747+ return -ENOMEM;
748+
749+ dev->rx_page_buf_ring.size = ring_size & ~(MTK_WED_BUF_PER_PAGE - 1);
750+ dev->rx_page_buf_ring.pages = page_list;
751+ dev->rx_page_buf_ring.pkt_nums = buf_num;
752+
753+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
754+ &desc_phys, GFP_KERNEL);
755+ if (!desc)
756+ return -ENOMEM;
757+
758+ dev->rx_page_buf_ring.desc = desc;
759+ dev->rx_page_buf_ring.desc_phys = desc_phys;
760+
761+ for (i = 0, page_idx = 0; i < buf_num; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
762+ dma_addr_t page_phys, buf_phys;
763+ struct page *page;
764+ void *buf;
765+ int s;
766+
767+ page = __dev_alloc_pages(GFP_KERNEL, 0);
768+ if (!page)
769+ return -ENOMEM;
770+
771+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
772+ DMA_BIDIRECTIONAL);
773+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
774+ __free_page(page);
775+ return -ENOMEM;
776+ }
777+
778+ page_list[page_idx].addr= page;
779+ page_list[page_idx].addr_phys= page_phys;
780+ page_idx++;
781+
782+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
783+ DMA_BIDIRECTIONAL);
784+
785+ buf = page_to_virt(page);
786+ buf_phys = page_phys;
787+
788+ for (s = 0; s < MTK_WED_RX_PAGE_BUF_PER_PAGE; s++) {
789+
790+ desc->buf0 = cpu_to_le32(buf_phys);
791+ desc++;
792+
793+ buf += MTK_WED_PAGE_BUF_SIZE;
794+ buf_phys += MTK_WED_PAGE_BUF_SIZE;
795+ }
796+
797+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
798+ DMA_BIDIRECTIONAL);
799+ }
800+
801+ return 0;
802+}
803+
804+static void
805+mtk_wed_rx_page_free_buffer(struct mtk_wed_device *dev)
806+{
807+ struct mtk_rxbm_desc *desc = dev->rx_page_buf_ring.desc;
808+ struct dma_page_info *page_list = dev->rx_page_buf_ring.pages;
809+ int ring_size, page_idx;
810+ int i;
811+
812+ if (!page_list)
813+ return;
814+
815+ if (!desc)
816+ goto free_pagelist;
817+
818+ ring_size = dev->rx_page_buf_ring.pkt_nums;
819+
820+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
821+ void *page = page_list[page_idx].addr;
822+
823+ if (!page)
824+ break;
825+
826+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
827+ PAGE_SIZE, DMA_BIDIRECTIONAL);
828+ __free_page(page);
829+ page_idx++;
830+ }
831+
developera60ce2b2023-06-16 13:07:18 +0800832+ dma_free_coherent(dev->hw->dev, dev->rx_page_buf_ring.size * sizeof(*desc),
developer23f9f0f2023-06-15 13:06:25 +0800833+ desc, dev->rx_page_buf_ring.desc_phys);
834+
835+free_pagelist:
836+ kfree(page_list);
837+}
838+
839 static void
840 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
841 {
842@@ -416,19 +618,25 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int sca
843 static void
844 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
845 {
846- int i;
847+ int i, scale = dev->hw->version > 1 ? 2 : 1;
848
849 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
850- mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
851+ if (!(dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
852+ mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
853+
854 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
855- mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
856+ if ((dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
857+ mtk_wed_free_ring(dev, &dev->tx_wdma[i], scale);
858 }
859
860 static void
861 mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
862 {
863- mtk_wed_free_rx_bm(dev);
864+ mtk_wed_free_rx_buffer(dev);
865 mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
866+
867+ if (dev->wlan.hwrro)
868+ mtk_wed_rx_page_free_buffer(dev);
869 }
870
871 static void
872@@ -437,7 +645,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
873 u32 wdma_mask;
874
875 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
876- if (dev->ver > MTK_WED_V1)
877+ if (mtk_wed_get_rx_capa(dev))
878 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
879 GENMASK(1, 0));
880 /* wed control cr set */
881@@ -447,7 +655,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
882 MTK_WED_CTRL_WED_TX_BM_EN |
883 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
884
885- if (dev->ver == MTK_WED_V1) {
886+ if (dev->hw->version == 1) {
887 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
888 MTK_WED_PCIE_INT_TRIGGER_STATUS);
889
890@@ -458,6 +666,8 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
891 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
892 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
893 } else {
894+ if (dev->hw->version == 3)
895+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
896
897 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
898 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
899@@ -475,18 +685,20 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
900 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
901 dev->wlan.txfree_tbit));
902
903- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
904- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
905- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
906- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
907- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
908- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
909- dev->wlan.rx_tbit[0]) |
910- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
911- dev->wlan.rx_tbit[1]));
912+ if (mtk_wed_get_rx_capa(dev))
913+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
914+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
915+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
916+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
917+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
918+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
919+ dev->wlan.rx_tbit[0]) |
920+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
921+ dev->wlan.rx_tbit[1]));
922 }
923+
924 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
925- if (dev->ver == MTK_WED_V1) {
926+ if (dev->hw->version == 1) {
927 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
928 } else {
929 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
930@@ -506,6 +718,21 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
931 {
932 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
933
934+ switch (dev->hw->version) {
935+ case 1:
936+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
937+ break;
938+ case 2 :
939+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2 |
940+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2 |
941+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
942+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
943+ break;
944+ case 3:
945+ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
946+ break;
947+ }
948+
949 if (!dev->hw->num_flows)
950 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
951
952@@ -514,31 +741,86 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
953 }
954
955 static void
956-mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
957+mtk_wed_pao_init(struct mtk_wed_device *dev)
958 {
959- if (en) {
960- wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
961- wed_w32(dev, MTK_WED_TXP_DW1,
962- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
963- } else {
964- wed_w32(dev, MTK_WED_TXP_DW1,
965- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
966- wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
967+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
968+ int i;
969+
970+ for (i = 0; i < 32; i++)
971+ wed_w32(dev, MTK_WED_PAO_HIFTXD_BASE_L(i),
972+ pao->hif_txd_phys[i]);
973+
974+ /* init all sta parameter */
975+ wed_w32(dev, MTK_WED_PAO_STA_INFO_INIT, MTK_WED_PAO_STA_RMVL |
976+ MTK_WED_PAO_STA_WTBL_HDRT_MODE |
977+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_LEN,
978+ dev->wlan.max_amsdu_len >> 8) |
979+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_NUM,
980+ dev->wlan.max_amsdu_nums));
981+
982+ wed_w32(dev, MTK_WED_PAO_STA_INFO, MTK_WED_PAO_STA_INFO_DO_INIT);
983+
984+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_STA_INFO,
985+ MTK_WED_PAO_STA_INFO_DO_INIT)) {
986+ dev_err(dev->hw->dev, "mtk_wed%d: pao init failed!\n",
987+ dev->hw->index);
988+ return;
989 }
990+
991+ /* init pao txd src */
992+ wed_set(dev, MTK_WED_PAO_HIFTXD_CFG,
993+ FIELD_PREP(MTK_WED_PAO_HIFTXD_SRC, dev->hw->index));
994+
995+ /* init qmem */
996+ wed_set(dev, MTK_WED_PAO_PSE, MTK_WED_PAO_PSE_RESET);
997+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_MON_QMEM_STS1, BIT(29))) {
998+ pr_info("%s: init pao qmem fail\n", __func__);
999+ return;
1000+ }
1001+
1002+ /* eagle E1 PCIE1 tx ring 22 flow control issue */
1003+ if (dev->wlan.chip_id == 0x7991) {
1004+ wed_clr(dev, MTK_WED_PAO_AMSDU_FIFO,
1005+ MTK_WED_PAO_AMSDU_IS_PRIOR0_RING);
1006+ }
1007+
1008+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
1009+
1010+ return;
1011 }
1012
1013-static void
1014-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
1015+static int
1016+mtk_wed_hwrro_init(struct mtk_wed_device *dev)
1017 {
1018-#define MTK_WFMDA_RX_DMA_EN BIT(2)
1019+ if (!mtk_wed_get_rx_capa(dev))
1020+ return 0;
developer18d0d712023-08-23 11:50:09 +08001021+
developer23f9f0f2023-06-15 13:06:25 +08001022+ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
1023+ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
developer18d0d712023-08-23 11:50:09 +08001024
developer23f9f0f2023-06-15 13:06:25 +08001025+ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE,
1026+ dev->rx_page_buf_ring.desc_phys);
1027+
1028+ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
1029+ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
1030+ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
1031+ MTK_WED_RX_PG_BM_CNT));
1032+
1033+ /* enable rx_page_bm to fetch dmad */
1034+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
1035+
1036+ return 0;
1037+}
developer7ccd1942023-07-07 16:15:05 +08001038+
developer23f9f0f2023-06-15 13:06:25 +08001039+static int
1040+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
1041+ struct mtk_wed_ring *ring)
1042+{
1043 int timeout = 3;
1044- u32 cur_idx, regs;
1045+ u32 cur_idx;
1046
1047 do {
1048- regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
1049- MTK_WED_RING_OFS_CPU_IDX;
1050- cur_idx = wed_r32(dev, regs);
1051+ cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
1052 if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
1053 break;
1054
1055@@ -546,70 +828,133 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
1056 timeout--;
1057 } while (timeout > 0);
1058
1059- if (timeout) {
1060- unsigned int val;
1061+ return timeout;
1062+}
1063
1064- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
1065- dev->wlan.phy_base);
1066- val |= MTK_WFMDA_RX_DMA_EN;
1067
1068- wifi_w32(dev, dev->wlan.wpdma_rx_glo -
1069- dev->wlan.phy_base, val);
1070+static void
1071+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
1072+{
1073+ if (en) {
1074+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
1075+ wed_w32(dev, MTK_WED_TXP_DW1,
1076+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
1077 } else {
1078- dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
1079- dev->hw->index, idx);
1080+ wed_w32(dev, MTK_WED_TXP_DW1,
1081+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
1082+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
1083 }
1084 }
1085
1086 static void
1087 mtk_wed_dma_enable(struct mtk_wed_device *dev)
1088 {
1089- wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1090- MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1091+#define MTK_WFMDA_RX_DMA_EN BIT(2)
1092+
1093+ if (dev->hw->version == 1)
1094+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1095+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1096
1097 wed_set(dev, MTK_WED_GLO_CFG,
1098 MTK_WED_GLO_CFG_TX_DMA_EN |
1099 MTK_WED_GLO_CFG_RX_DMA_EN);
1100+
1101+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
1102+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
1103+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
1104+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
1105+ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
1106+
1107+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
1108+
1109 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1110 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1111- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1112+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
1113+ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
1114 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1115 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1116
1117 wdma_set(dev, MTK_WDMA_GLO_CFG,
1118- MTK_WDMA_GLO_CFG_TX_DMA_EN |
1119+ MTK_WDMA_GLO_CFG_TX_DMA_EN /*|
1120 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
1121- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
1122+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES*/);
1123
1124- if (dev->ver == MTK_WED_V1) {
1125+ if (dev->hw->version == 1) {
1126 wdma_set(dev, MTK_WDMA_GLO_CFG,
1127 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
1128 } else {
1129 int idx = 0;
1130
1131- wed_set(dev, MTK_WED_WPDMA_CTRL,
1132- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
1133-
1134- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1135- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1136- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1137+ if (mtk_wed_get_rx_capa(dev))
1138+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1139+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1140+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1141
1142 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1143 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
1144 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
1145
1146+ if (dev->hw->version == 3) {
1147+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1148+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
1149+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1150+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
1151+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
1152+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
1153+
1154+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
1155+ //wdma_w32(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
1156+ if (mtk_wed_get_rx_capa(dev)) {
1157+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
1158+ MTK_WED_WPDMA_RX_D_PREF_EN |
1159+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
1160+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
1161+
1162+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
1163+
1164+ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
1165+
1166+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
1167+ }
1168+ }
1169+
1170 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1171 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
1172 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
1173
1174+ if (!mtk_wed_get_rx_capa(dev))
1175+ return;
1176+
1177+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
1178 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1179 MTK_WED_WPDMA_RX_D_RX_DRV_EN |
1180 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
1181 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
1182 0x2));
1183
1184- for (idx = 0; idx < dev->hw->ring_num; idx++)
1185- mtk_wed_check_wfdma_rx_fill(dev, idx);
1186+ for (idx = 0; idx < dev->hw->ring_num; idx++) {
1187+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1188+
1189+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1190+ continue;
1191+
1192+ if(mtk_wed_check_wfdma_rx_fill(dev, ring)) {
1193+ unsigned int val;
1194+
1195+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
1196+ dev->wlan.phy_base);
1197+ val |= MTK_WFMDA_RX_DMA_EN;
1198+
1199+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
1200+ dev->wlan.phy_base, val);
1201+
1202+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable successful!\n",
1203+ dev->hw->index, idx);
1204+ } else {
1205+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
1206+ dev->hw->index, idx);
1207+ }
1208+ }
1209 }
1210 }
1211
1212@@ -644,15 +989,20 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
1213 MTK_WED_WPDMA_RX_D_RX_DRV_EN);
1214 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1215 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1216- }
1217
1218- mtk_wed_set_512_support(dev, false);
1219+ if (dev->hw->version == 3 && mtk_wed_get_rx_capa(dev)) {
1220+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
1221+ MTK_WDMA_PREF_TX_CFG_PREF_EN);
1222+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
1223+ MTK_WDMA_PREF_RX_CFG_PREF_EN);
1224+ }
1225+ }
1226 }
1227
1228 static void
1229 mtk_wed_stop(struct mtk_wed_device *dev)
1230 {
1231- if (dev->ver > MTK_WED_V1) {
1232+ if (mtk_wed_get_rx_capa(dev)) {
1233 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
1234 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
1235 }
developera8336302023-07-07 11:29:01 +08001236@@ -677,13 +1027,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001237 MTK_WED_CTRL_WED_TX_BM_EN |
1238 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1239
1240- if (dev->hw->ver == 1)
1241+ if (dev->hw->version == 1)
1242 return;
1243
1244 wed_clr(dev, MTK_WED_CTRL,
1245 MTK_WED_CTRL_RX_ROUTE_QM_EN |
1246 MTK_WED_CTRL_WED_RX_BM_EN |
1247 MTK_WED_CTRL_RX_RRO_QM_EN);
1248+
1249+ if (dev->hw->version == 3) {
1250+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
1251+ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_PAO);
1252+ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
1253+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
1254+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
1255+ }
1256 }
1257
1258 static void
developera8336302023-07-07 11:29:01 +08001259@@ -702,9 +1060,9 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001260
1261 mtk_wdma_tx_reset(dev);
1262
1263- mtk_wed_free_buffer(dev);
1264+ mtk_wed_free_tx_buffer(dev);
1265 mtk_wed_free_tx_rings(dev);
1266- if (dev->ver > MTK_WED_V1) {
1267+ if (mtk_wed_get_rx_capa(dev)) {
1268 mtk_wed_wo_reset(dev);
1269 mtk_wed_free_rx_rings(dev);
1270 mtk_wed_wo_exit(hw);
developera8336302023-07-07 11:29:01 +08001271@@ -731,24 +1089,29 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001272 static void
1273 mtk_wed_bus_init(struct mtk_wed_device *dev)
1274 {
1275-#define PCIE_BASE_ADDR0 0x11280000
1276+ switch (dev->wlan.bus_type) {
1277+ case MTK_WED_BUS_PCIE: {
1278+ struct device_node *np = dev->hw->eth->dev->of_node;
1279+ struct regmap *regs;
developer23f9f0f2023-06-15 13:06:25 +08001280
1281- if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
1282- struct device_node *node;
1283- void __iomem * base_addr;
1284- u32 value = 0;
1285+ if (dev->hw->version == 2) {
1286+ regs = syscon_regmap_lookup_by_phandle(np,
1287+ "mediatek,wed-pcie");
1288+ if (IS_ERR(regs))
1289+ break;
1290
1291- node = of_parse_phandle(dev->hw->node, "mediatek,wed_pcie", 0);
1292- if (!node) {
1293- pr_err("%s: no wed_pcie node\n", __func__);
1294- return;
1295+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
1296 }
1297
1298- base_addr = of_iomap(node, 0);
1299-
1300- value = readl(base_addr);
1301- value |= BIT(0);
1302- writel(value, base_addr);
1303+ if (dev->wlan.msi) {
1304+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base| 0xc08);
1305+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0xc04);
1306+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
1307+ } else {
1308+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base | 0x180);
1309+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0x184);
1310+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
1311+ }
1312
developera8336302023-07-07 11:29:01 +08001313 wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
1314 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
developerb74821e2023-09-08 14:19:59 +08001315@@ -756,45 +1119,53 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001316 /* pcie interrupt control: pola/source selection */
1317 wed_set(dev, MTK_WED_PCIE_INT_CTRL,
1318 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
1319- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
1320- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
developera8336302023-07-07 11:29:01 +08001321-
developer23f9f0f2023-06-15 13:06:25 +08001322- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
1323- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
1324- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
1325- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
developera8336302023-07-07 11:29:01 +08001326+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
1327+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, dev->hw->index));
1328
developer18d0d712023-08-23 11:50:09 +08001329- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
1330- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
1331-
developer23f9f0f2023-06-15 13:06:25 +08001332- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
1333- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
1334-
1335- /* pola setting */
1336- value = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
1337- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
1338- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
1339- } else if (dev->wlan.bus_type == MTK_WED_BUS_AXI) {
1340+ break;
1341+ }
1342+ case MTK_WED_BUS_AXI:
1343 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1344 MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
1345 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
1346+ break;
1347+ default:
1348+ break;
1349 }
1350+
1351 return;
1352 }
1353
1354 static void
1355 mtk_wed_set_wpdma(struct mtk_wed_device *dev)
1356 {
1357- if (dev->ver > MTK_WED_V1) {
1358+ if (dev->hw->version == 1) {
1359+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1360+ } else {
1361+ mtk_wed_bus_init(dev);
1362+
1363 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
1364 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
1365- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
1366+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
1367 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
1368
1369- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
1370- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
1371- } else {
1372- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1373+ if (mtk_wed_get_rx_capa(dev)) {
1374+ int i;
1375+
1376+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
developerb74821e2023-09-08 14:19:59 +08001377+ wed_w32(dev, MTK_WED_WPDMA_RX_RING0, dev->wlan.wpdma_rx[0]);
1378+ if (dev->wlan.wpdma_rx[1])
1379+ wed_w32(dev, MTK_WED_WPDMA_RX_RING1, dev->wlan.wpdma_rx[1]);
developer23f9f0f2023-06-15 13:06:25 +08001380+
1381+ if (dev->wlan.hwrro) {
developerb74821e2023-09-08 14:19:59 +08001382+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
1383+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
1384+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
1385+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
1386+ dev->wlan.wpdma_rx_pg + i * 0x10);
developer23f9f0f2023-06-15 13:06:25 +08001387+ }
1388+ }
1389+ }
1390 }
1391 }
1392
developerb74821e2023-09-08 14:19:59 +08001393@@ -806,21 +1177,25 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001394 mtk_wed_deinit(dev);
1395 mtk_wed_reset(dev, MTK_WED_RESET_WED);
1396
1397- if (dev->ver > MTK_WED_V1)
1398- mtk_wed_bus_init(dev);
1399-
1400 mtk_wed_set_wpdma(dev);
1401
1402- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1403- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1404- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1405- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1406- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1407- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1408+ if (dev->hw->version == 3) {
1409+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
1410+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
1411+ } else {
1412+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1413+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1414+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1415+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1416+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1417+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1418+ }
1419+
1420 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1421
1422- if (dev->ver == MTK_WED_V1) {
1423+ if (dev->hw->version == 1) {
1424 u32 offset;
1425+
1426 offset = dev->hw->index ? 0x04000400 : 0;
1427 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1428 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
developerb74821e2023-09-08 14:19:59 +08001429@@ -907,11 +1282,16 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001430 } while (1);
1431
1432 /* configure RX_ROUTE_QM */
1433- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1434- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
1435- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1436- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
1437- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1438+ if (dev->hw->version == 2) {
1439+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1440+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
1441+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1442+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
1443+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1444+ } else {
1445+ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
1446+ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 0x3 + dev->hw->index));
1447+ }
1448
1449 /* enable RX_ROUTE_QM */
1450 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
developerb74821e2023-09-08 14:19:59 +08001451@@ -920,23 +1300,45 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001452 static void
1453 mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
1454 {
1455- int size = dev->buf_ring.size;
1456+ int size = dev->wlan.nbuf;
1457 int rev_size = MTK_WED_TX_RING_SIZE / 2;
1458- int thr = 1;
1459+ int thr_lo = 1, thr_hi = 1;
1460
1461- if (dev->ver > MTK_WED_V1) {
1462+ if (dev->hw->version == 1) {
1463+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1464+ MTK_WED_TX_BM_CTRL_PAUSE |
1465+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
1466+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
1467+ } else {
1468 size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
1469- dev->buf_ring.size;
1470+ dev->tx_buf_ring.size;
1471 rev_size = size;
1472- thr = 0;
1473+ thr_lo = 0;
1474+ thr_hi = MTK_WED_TX_BM_DYN_THR_HI;
1475+
1476+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
1477+ MTK_WED_TX_TKID_CTRL_PAUSE |
1478+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
1479+ size / 128) |
1480+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
1481+ size / 128));
1482+
1483+ /* return SKBID + SDP back to bm */
1484+ if (dev->ver == 3) {
1485+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
1486+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
1487+ size = dev->wlan.nbuf;
1488+ rev_size = size;
1489+ } else {
1490+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1491+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1492+ MTK_WED_TX_TKID_DYN_THR_HI);
1493+ }
1494 }
1495
1496- wed_w32(dev, MTK_WED_TX_BM_CTRL,
1497- MTK_WED_TX_BM_CTRL_PAUSE |
1498- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
1499- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
1500+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1501
1502- wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1503+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
1504
1505 wed_w32(dev, MTK_WED_TX_BM_TKID,
1506 FIELD_PREP(MTK_WED_TX_BM_TKID_START,
developerb74821e2023-09-08 14:19:59 +08001507@@ -946,25 +1348,44 @@ mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001508
1509 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1510
1511- wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1512- FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr) |
1513- MTK_WED_TX_BM_DYN_THR_HI);
1514+ if (dev->hw->version < 3)
1515+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1516+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_lo) |
1517+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_hi));
1518+ else {
1519+ /* change to new bm */
1520+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
developerb74821e2023-09-08 14:19:59 +08001521+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
developer23f9f0f2023-06-15 13:06:25 +08001522+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_LEGACY_EN);
1523+ }
1524
1525- if (dev->ver > MTK_WED_V1) {
1526+ if (dev->hw->version != 1) {
1527 wed_w32(dev, MTK_WED_TX_TKID_CTRL,
1528 MTK_WED_TX_TKID_CTRL_PAUSE |
1529 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
1530- dev->buf_ring.size / 128) |
1531+ size / 128) |
1532 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
1533- dev->buf_ring.size / 128));
1534- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1535- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1536- MTK_WED_TX_TKID_DYN_THR_HI);
1537+ size / 128));
1538+
1539+ /* return SKBID + SDP back to bm */
1540+ if (dev->ver == 3)
1541+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
1542+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
1543+ else
1544+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1545+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1546+ MTK_WED_TX_TKID_DYN_THR_HI);
1547 }
1548- mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1549+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1550+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1551+ dev->wlan.token_start) |
1552+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1553+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1554
1555+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
1556+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
1557 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1558- if (dev->ver > MTK_WED_V1)
1559+ if (dev->hw->version != 1)
1560 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
1561 }
1562
developerb74821e2023-09-08 14:19:59 +08001563@@ -977,7 +1398,26 @@ mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001564
1565 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
1566
1567+ /* reset prefetch index of ring */
1568+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
1569+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1570+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
1571+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1572+
1573+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
1574+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1575+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
1576+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1577+
1578+ /* reset prefetch FIFO of ring */
1579+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
1580+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
1581+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
1582+ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
1583+
1584 mtk_wed_rx_bm_hw_init(dev);
1585+ if (dev->wlan.hwrro)
1586+ mtk_wed_hwrro_init(dev);
1587 mtk_wed_rro_hw_init(dev);
1588 mtk_wed_route_qm_hw_init(dev);
1589 }
developerb74821e2023-09-08 14:19:59 +08001590@@ -991,7 +1431,7 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001591 dev->init_done = true;
1592 mtk_wed_set_ext_int(dev, false);
1593 mtk_wed_tx_hw_init(dev);
1594- if (dev->ver > MTK_WED_V1)
1595+ if (mtk_wed_get_rx_capa(dev))
1596 mtk_wed_rx_hw_init(dev);
1597 }
1598
developerb74821e2023-09-08 14:19:59 +08001599@@ -1015,26 +1455,6 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
developer23f9f0f2023-06-15 13:06:25 +08001600 }
1601 }
1602
1603-static u32
1604-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1605-{
1606- if (wed_r32(dev, reg) & mask)
1607- return true;
1608-
1609- return false;
1610-}
1611-
1612-static int
1613-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1614-{
1615- int sleep = 1000;
1616- int timeout = 100 * sleep;
1617- u32 val;
1618-
1619- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1620- timeout, false, dev, reg, mask);
1621-}
1622-
1623 static void
1624 mtk_wed_rx_reset(struct mtk_wed_device *dev)
1625 {
developerb74821e2023-09-08 14:19:59 +08001626@@ -1133,7 +1553,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001627 mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false);
1628 }
1629
1630- mtk_wed_free_rx_bm(dev);
1631+ mtk_wed_free_rx_buffer(dev);
1632 }
1633
1634
developerb74821e2023-09-08 14:19:59 +08001635@@ -1271,12 +1691,15 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001636 int idx, int size, bool reset)
1637 {
1638 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
1639+ int scale = dev->hw->version > 1 ? 2 : 1;
1640
1641 if(!reset)
1642 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1643- dev->ver, true))
1644+ scale, true))
1645 return -ENOMEM;
1646
1647+ wdma->flags |= MTK_WED_RING_CONFIGURED;
1648+
1649 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1650 wdma->desc_phys);
1651 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
developerb74821e2023-09-08 14:19:59 +08001652@@ -1296,12 +1719,31 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001653 int idx, int size, bool reset)
1654 {
1655 struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
1656+ int scale = dev->hw->version > 1 ? 2 : 1;
1657
1658 if (!reset)
1659 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1660- dev->ver, true))
1661+ scale, true))
1662 return -ENOMEM;
1663
1664+ if (dev->hw->version == 3) {
1665+ struct mtk_wdma_desc *desc = wdma->desc;
1666+ int i;
1667+
1668+ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
1669+ desc->buf0 = 0;
1670+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
1671+ desc->buf1 = 0;
1672+ desc->info = MTK_WDMA_TXD0_DESC_INFO_DMA_DONE;
1673+ desc++;
1674+ desc->buf0 = 0;
1675+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
1676+ desc->buf1 = 0;
1677+ desc->info = MTK_WDMA_TXD1_DESC_INFO_DMA_DONE;
1678+ desc++;
1679+ }
1680+ }
1681+
1682 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
1683 wdma->desc_phys);
1684 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
developerb74821e2023-09-08 14:19:59 +08001685@@ -1312,7 +1754,7 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001686 MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
1687 if (reset)
1688 mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE,
1689- dev->ver, true);
1690+ scale, true);
1691 if (idx == 0) {
1692 wed_w32(dev, MTK_WED_WDMA_RING_TX
1693 + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
developerb74821e2023-09-08 14:19:59 +08001694@@ -1395,7 +1837,7 @@ mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
developer23f9f0f2023-06-15 13:06:25 +08001695 {
1696 struct mtk_wed_wo *wo = dev->hw->wed_wo;
1697
1698- if (dev->ver == MTK_WED_V1)
1699+ if (!mtk_wed_get_rx_capa(dev))
1700 return 0;
1701
1702 return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
developerb74821e2023-09-08 14:19:59 +08001703@@ -1420,24 +1862,106 @@ mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
developer23f9f0f2023-06-15 13:06:25 +08001704 }
1705 }
1706
1707+static void
1708+mtk_wed_start_hwrro(struct mtk_wed_device *dev, u32 irq_mask)
1709+{
1710+ int idx, ret;
1711+
1712+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
1713+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
1714+
1715+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hwrro)
1716+ return;
1717+
1718+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
1719+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR);
1720+
1721+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
1722+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
1723+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
1724+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
1725+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
1726+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
1727+ dev->wlan.rro_rx_tbit[0]) |
1728+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
1729+ dev->wlan.rro_rx_tbit[1]));
1730+
1731+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
1732+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
1733+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
1734+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
1735+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
1736+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
1737+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
1738+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
1739+ dev->wlan.rx_pg_tbit[0]) |
1740+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
1741+ dev->wlan.rx_pg_tbit[1])|
1742+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
1743+ dev->wlan.rx_pg_tbit[2]));
1744+
1745+ /*
1746+ * RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
1747+ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
1748+ */
1749+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN);
1750+
1751+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++) {
1752+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
1753+
1754+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1755+ continue;
1756+
1757+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
1758+ if (!ret)
1759+ dev_err(dev->hw->dev, "mtk_wed%d: rx_rro_ring(%d) init failed!\n",
1760+ dev->hw->index, idx);
1761+ }
1762+
1763+ for (idx = 0; idx < MTK_WED_RX_PAGE_QUEUES; idx++){
1764+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
1765+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1766+ continue;
1767+
1768+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
1769+ if (!ret)
1770+ dev_err(dev->hw->dev, "mtk_wed%d: rx_page_ring(%d) init failed!\n",
1771+ dev->hw->index, idx);
1772+ }
1773+}
1774+
1775 static void
1776 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1777 {
1778 int i, ret;
1779
1780- if (dev->ver > MTK_WED_V1)
1781- ret = mtk_wed_rx_bm_alloc(dev);
1782+ if (mtk_wed_get_rx_capa(dev)) {
1783+ ret = mtk_wed_rx_buffer_alloc(dev);
1784+ if (ret)
1785+ return;
1786+
1787+ if (dev->wlan.hwrro)
1788+ mtk_wed_rx_page_buffer_alloc(dev);
1789+ }
1790
1791 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1792 if (!dev->tx_wdma[i].desc)
developerb74821e2023-09-08 14:19:59 +08001793 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
1794
1795+ for (i = 0; i < ARRAY_SIZE(dev->rx_page_ring); i++) {
1796+ u32 count = MTK_WED_RRO_MSDU_PG_CTRL0(i) +
1797+ MTK_WED_RING_OFS_COUNT;
1798+
1799+ if (!wed_r32(dev, count))
1800+ wed_w32(dev, count, 1);
1801+ }
1802+
1803 mtk_wed_hw_init(dev);
1804
developer23f9f0f2023-06-15 13:06:25 +08001805 mtk_wed_set_int(dev, irq_mask);
1806 mtk_wed_set_ext_int(dev, true);
1807
1808- if (dev->ver == MTK_WED_V1) {
1809+ if (dev->hw->version == 1) {
1810 u32 val;
1811
1812 val = dev->wlan.wpdma_phys |
developerb74821e2023-09-08 14:19:59 +08001813@@ -1448,33 +1972,52 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer23f9f0f2023-06-15 13:06:25 +08001814 val |= BIT(1);
1815 val |= BIT(0);
1816 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
1817- } else {
1818+ } else if (mtk_wed_get_rx_capa(dev)) {
1819 /* driver set mid ready and only once */
1820 wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1821 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1822 wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1823 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1824+ if (dev->hw->version == 3)
1825+ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
1826+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1827
1828 wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1829 wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1830+ if (dev->hw->version == 3)
1831+ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
1832
1833 ret = mtk_wed_rro_cfg(dev);
1834 if (ret)
1835 return;
1836 }
1837- mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1838+
1839+ if (dev->hw->version == 2)
1840+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1841+ else if (dev->hw->version == 3)
1842+ mtk_wed_pao_init(dev);
1843
1844 mtk_wed_dma_enable(dev);
1845 dev->running = true;
1846 }
1847
1848+static int
1849+mtk_wed_get_pci_base(struct mtk_wed_device *dev)
1850+{
1851+ if (dev->hw->index == 0)
1852+ return MTK_WED_PCIE_BASE0;
1853+ else if (dev->hw->index == 1)
1854+ return MTK_WED_PCIE_BASE1;
1855+ else
1856+ return MTK_WED_PCIE_BASE2;
1857+}
1858+
1859 static int
1860 mtk_wed_attach(struct mtk_wed_device *dev)
1861 __releases(RCU)
1862 {
1863 struct mtk_wed_hw *hw;
1864 struct device *device;
1865- u16 ver;
1866 int ret = 0;
1867
1868 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
developerb74821e2023-09-08 14:19:59 +08001869@@ -1494,34 +2037,30 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001870 goto out;
1871 }
1872
1873- device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
1874- ? &dev->wlan.pci_dev->dev
1875- : &dev->wlan.platform_dev->dev;
1876+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE ?
1877+ &dev->wlan.pci_dev->dev
1878+ : &dev->wlan.platform_dev->dev;
1879 dev_info(device, "attaching wed device %d version %d\n",
1880- hw->index, hw->ver);
1881+ hw->index, hw->version);
1882
1883 dev->hw = hw;
1884 dev->dev = hw->dev;
1885 dev->irq = hw->irq;
1886 dev->wdma_idx = hw->index;
1887+ dev->ver = hw->version;
1888+
1889+ if (dev->hw->version == 3)
1890+ dev->hw->pci_base = mtk_wed_get_pci_base(dev);
1891
1892 if (hw->eth->dma_dev == hw->eth->dev &&
1893 of_dma_is_coherent(hw->eth->dev->of_node))
1894 mtk_eth_set_dma_device(hw->eth, hw->dev);
1895
1896- dev->ver = FIELD_GET(MTK_WED_REV_ID_MAJOR,
1897- wed_r32(dev, MTK_WED_REV_ID));
1898- if (dev->ver > MTK_WED_V1)
1899- ver = FIELD_GET(MTK_WED_REV_ID_MINOR,
1900- wed_r32(dev, MTK_WED_REV_ID));
1901-
1902- dev->rev_id = ((dev->ver << 28) | ver << 16);
1903-
1904- ret = mtk_wed_buffer_alloc(dev);
1905+ ret = mtk_wed_tx_buffer_alloc(dev);
1906 if (ret)
1907 goto error;
1908
1909- if (dev->ver > MTK_WED_V1) {
1910+ if (mtk_wed_get_rx_capa(dev)) {
1911 ret = mtk_wed_rro_alloc(dev);
1912 if (ret)
1913 goto error;
developerb74821e2023-09-08 14:19:59 +08001914@@ -1533,15 +2072,20 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001915 init_completion(&dev->wlan_reset_done);
1916 atomic_set(&dev->fe_reset, 0);
1917
1918- if (dev->ver == MTK_WED_V1)
1919+ if (dev->hw->version != 1)
1920+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
1921+ else
1922 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1923 BIT(hw->index), 0);
1924- else
1925+
1926+ if (mtk_wed_get_rx_capa(dev))
1927 ret = mtk_wed_wo_init(hw);
1928
1929 error:
1930- if (ret)
1931+ if (ret) {
1932+ pr_info("%s: detach wed\n", __func__);
1933 mtk_wed_detach(dev);
1934+ }
1935 out:
1936 mutex_unlock(&hw_lock);
1937
developerb74821e2023-09-08 14:19:59 +08001938@@ -1576,8 +2120,26 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx,
developer23f9f0f2023-06-15 13:06:25 +08001939 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
1940 return -ENOMEM;
1941
1942+ if (dev->hw->version == 3 && idx == 1) {
1943+ /* reset prefetch index */
1944+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
1945+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
1946+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
1947+
1948+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
1949+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
1950+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
1951+
1952+ /* reset prefetch FIFO */
1953+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
1954+ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
1955+ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
1956+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
1957+ }
1958+
1959 ring->reg_base = MTK_WED_RING_TX(idx);
1960 ring->wpdma = regs;
1961+ ring->flags |= MTK_WED_RING_CONFIGURED;
1962
1963 /* WED -> WPDMA */
1964 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
developerb74821e2023-09-08 14:19:59 +08001965@@ -1599,7 +2161,7 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer23f9f0f2023-06-15 13:06:25 +08001966 struct mtk_wed_ring *ring = &dev->txfree_ring;
1967 int i, idx = 1;
1968
1969- if(dev->ver > MTK_WED_V1)
1970+ if(dev->hw->version > 1)
1971 idx = 0;
1972
1973 /*
developerb74821e2023-09-08 14:19:59 +08001974@@ -1652,6 +2214,129 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001975 return 0;
1976 }
1977
1978+static int
1979+mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1980+{
1981+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
1982+
1983+ ring->wpdma = regs;
1984+
1985+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
1986+ readl(regs));
1987+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
1988+ readl(regs + MTK_WED_RING_OFS_COUNT));
1989+
1990+ ring->flags |= MTK_WED_RING_CONFIGURED;
1991+
1992+ return 0;
1993+}
1994+
1995+static int
1996+mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1997+{
1998+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
1999+
2000+ ring->wpdma = regs;
2001+
2002+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
2003+ readl(regs));
2004+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
2005+ readl(regs + MTK_WED_RING_OFS_COUNT));
2006+
2007+ ring->flags |= MTK_WED_RING_CONFIGURED;
2008+
2009+ return 0;
2010+}
2011+
2012+static int
2013+mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2014+{
2015+ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
2016+ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
2017+ int i = 0, cnt = 0;
2018+
2019+ ring->wpdma = regs;
2020+
2021+ if (readl(regs) & 0xf)
2022+ pr_info("%s(): address is not 16-byte alignment\n", __func__);
2023+
2024+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
2025+ readl(regs) & 0xfffffff0);
2026+
2027+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
2028+ readl(regs + MTK_WED_RING_OFS_COUNT));
2029+
2030+ /* ack sn cr */
2031+ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
2032+ dev->wlan.ind_cmd.ack_sn_addr);
2033+ wed_w32(dev, MTK_WED_RRO_CFG1,
2034+ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
2035+ dev->wlan.ind_cmd.win_size) |
2036+ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
2037+ dev->wlan.ind_cmd.particular_sid));
2038+
2039+ /* particular session addr element */
2040+ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, dev->wlan.ind_cmd.particular_se_phys);
2041+
2042+ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
2043+ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
2044+ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
2045+ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
2046+ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
2047+
2048+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
2049+ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) &&
2050+ cnt < 100) {
2051+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
2052+ cnt++;
2053+ }
2054+ if (cnt >= 100) {
2055+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
2056+ dev->hw->index);
2057+ }
2058+ /*if (mtk_wed_poll_busy(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
2059+ MTK_WED_ADDR_ELEM_TBL_WR_RDY)) {
2060+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
2061+ dev->hw->index);
2062+ return -1;
2063+ }*/
2064+ }
2065+
2066+ /* pn check init */
2067+ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
2068+ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
2069+ MTK_WED_PN_CHECK_IS_FIRST);
2070+
2071+ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
2072+ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
2073+
2074+ cnt = 0;
2075+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
2076+ while (!(val & MTK_WED_PN_CHECK_WR_RDY) &&
2077+ cnt < 100) {
2078+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
2079+ cnt++;
2080+ }
2081+ if (cnt >= 100) {
2082+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
2083+ dev->hw->index, i);
2084+ }
2085+ /*if (mtk_wed_poll_busy(dev, MTK_WED_PN_CHECK_CFG,
2086+ MTK_WED_PN_CHECK_WR_RDY)) {
2087+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
2088+ dev->hw->index, i);
2089+ //return -1;
2090+ }*/
2091+ }
2092+
2093+ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
2094+
2095+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
2096+
2097+ return 0;
2098+}
2099+
2100+
2101 static u32
2102 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2103 {
developerb74821e2023-09-08 14:19:59 +08002104@@ -1659,9 +2344,13 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
developer18d0d712023-08-23 11:50:09 +08002105
developer23f9f0f2023-06-15 13:06:25 +08002106 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2107 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
developer18d0d712023-08-23 11:50:09 +08002108- val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2109- if (!dev->hw->num_flows)
2110- val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2111+ if (dev->hw->version == 3) {
developer23f9f0f2023-06-15 13:06:25 +08002112+ val &= MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
developer18d0d712023-08-23 11:50:09 +08002113+ } else {
2114+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2115+ if (!dev->hw->num_flows)
2116+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2117+ }
2118 if (val && net_ratelimit())
2119 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
developer23f9f0f2023-06-15 13:06:25 +08002120
developerb74821e2023-09-08 14:19:59 +08002121@@ -1754,6 +2443,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002122 .tx_ring_setup = mtk_wed_tx_ring_setup,
2123 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2124 .rx_ring_setup = mtk_wed_rx_ring_setup,
2125+ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
2126+ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
2127+ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
2128 .msg_update = mtk_wed_send_msg,
2129 .start = mtk_wed_start,
2130 .stop = mtk_wed_stop,
developerb74821e2023-09-08 14:19:59 +08002131@@ -1765,6 +2457,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002132 .detach = mtk_wed_detach,
2133 .setup_tc = mtk_wed_eth_setup_tc,
2134 .ppe_check = mtk_wed_ppe_check,
2135+ .start_hwrro = mtk_wed_start_hwrro,
2136 };
2137 struct device_node *eth_np = eth->dev->of_node;
2138 struct platform_device *pdev;
developerb74821e2023-09-08 14:19:59 +08002139@@ -1804,9 +2497,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002140 hw->wdma_phy = wdma_phy;
2141 hw->index = index;
2142 hw->irq = irq;
2143- hw->ver = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
2144+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) ?
2145+ 3 : MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
2146
2147- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2148+ if (hw->version == 1) {
2149 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2150 "mediatek,pcie-mirror");
2151 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
developerb74821e2023-09-08 14:19:59 +08002152@@ -1821,7 +2515,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002153 regmap_write(hw->mirror, 0, 0);
2154 regmap_write(hw->mirror, 4, 0);
2155 }
2156- hw->ver = MTK_WED_V1;
2157 }
2158
2159 mtk_wed_hw_add_debugfs(hw);
2160diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2161index 490873c..fcf7bd0 100644
2162--- a/drivers/net/ethernet/mediatek/mtk_wed.h
2163+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2164@@ -10,10 +10,13 @@
2165 #include <linux/netdevice.h>
2166 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
2167
2168-#define MTK_WED_PKT_SIZE 1900
2169+#define MTK_WED_PKT_SIZE 1920//1900
2170 #define MTK_WED_BUF_SIZE 2048
2171+#define MTK_WED_PAGE_BUF_SIZE 128
2172 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
2173+#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
2174 #define MTK_WED_RX_RING_SIZE 1536
2175+#define MTK_WED_RX_PG_BM_CNT 8192
2176
2177 #define MTK_WED_TX_RING_SIZE 2048
2178 #define MTK_WED_WDMA_RING_SIZE 512
2179@@ -27,6 +30,9 @@
2180 #define MTK_WED_RRO_QUE_CNT 8192
2181 #define MTK_WED_MIOD_ENTRY_CNT 128
2182
2183+#define MTK_WED_TX_BM_DMA_SIZE 65536
2184+#define MTK_WED_TX_BM_PKT_CNT 32768
2185+
2186 #define MODULE_ID_WO 1
2187
2188 struct mtk_eth;
2189@@ -43,6 +49,8 @@ struct mtk_wed_hw {
2190 struct dentry *debugfs_dir;
2191 struct mtk_wed_device *wed_dev;
2192 struct mtk_wed_wo *wed_wo;
2193+ struct mtk_wed_pao *wed_pao;
2194+ u32 pci_base;
2195 u32 debugfs_reg;
2196 u32 num_flows;
2197 u32 wdma_phy;
2198@@ -50,7 +58,8 @@ struct mtk_wed_hw {
2199 int ring_num;
2200 int irq;
2201 int index;
2202- u32 ver;
2203+ int token_id;
2204+ u32 version;
2205 };
2206
2207 struct mtk_wdma_info {
2208@@ -58,6 +67,18 @@ struct mtk_wdma_info {
2209 u8 queue;
2210 u16 wcid;
2211 u8 bss;
2212+ u32 usr_info;
2213+ u8 tid;
2214+ u8 is_fixedrate;
2215+ u8 is_prior;
2216+ u8 is_sp;
2217+ u8 hf;
2218+ u8 amsdu_en;
2219+};
2220+
2221+struct mtk_wed_pao {
2222+ char *hif_txd[32];
2223+ dma_addr_t hif_txd_phys[32];
2224 };
2225
2226 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
2227diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2228index 4a9e684..51e3d7c 100644
2229--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2230+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2231@@ -11,9 +11,11 @@ struct reg_dump {
2232 u16 offset;
2233 u8 type;
2234 u8 base;
2235+ u32 mask;
2236 };
2237
2238 enum {
2239+ DUMP_TYPE_END,
2240 DUMP_TYPE_STRING,
2241 DUMP_TYPE_WED,
2242 DUMP_TYPE_WDMA,
2243@@ -23,8 +25,11 @@ enum {
2244 DUMP_TYPE_WED_RRO,
2245 };
2246
2247+#define DUMP_END() { .type = DUMP_TYPE_END }
2248 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2249 #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2250+#define DUMP_REG_MASK(_reg, _mask) { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
2251+
2252 #define DUMP_RING(_prefix, _base, ...) \
2253 { _prefix " BASE", _base, __VA_ARGS__ }, \
2254 { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2255@@ -32,6 +37,7 @@ enum {
2256 { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2257
2258 #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2259+#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
2260 #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2261
2262 #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2263@@ -52,36 +58,49 @@ print_reg_val(struct seq_file *s, const char *name, u32 val)
2264
2265 static void
2266 dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2267- const struct reg_dump *regs, int n_regs)
2268+ const struct reg_dump **regs)
2269 {
2270- const struct reg_dump *cur;
2271+ const struct reg_dump **cur_o = regs, *cur;
2272+ bool newline = false;
2273 u32 val;
2274
2275- for (cur = regs; cur < &regs[n_regs]; cur++) {
2276- switch (cur->type) {
2277- case DUMP_TYPE_STRING:
2278- seq_printf(s, "%s======== %s:\n",
2279- cur > regs ? "\n" : "",
2280- cur->name);
2281- continue;
2282- case DUMP_TYPE_WED:
2283- case DUMP_TYPE_WED_RRO:
2284- val = wed_r32(dev, cur->offset);
2285- break;
2286- case DUMP_TYPE_WDMA:
2287- val = wdma_r32(dev, cur->offset);
2288- break;
2289- case DUMP_TYPE_WPDMA_TX:
2290- val = wpdma_tx_r32(dev, cur->base, cur->offset);
2291- break;
2292- case DUMP_TYPE_WPDMA_TXFREE:
2293- val = wpdma_txfree_r32(dev, cur->offset);
2294- break;
2295- case DUMP_TYPE_WPDMA_RX:
2296- val = wpdma_rx_r32(dev, cur->base, cur->offset);
2297- break;
2298+ while (*cur_o) {
2299+ cur = *cur_o;
2300+
2301+ while (cur->type != DUMP_TYPE_END) {
2302+ switch (cur->type) {
2303+ case DUMP_TYPE_STRING:
2304+ seq_printf(s, "%s======== %s:\n",
2305+ newline ? "\n" : "",
2306+ cur->name);
2307+ newline = true;
2308+ cur++;
2309+ continue;
2310+ case DUMP_TYPE_WED:
2311+ case DUMP_TYPE_WED_RRO:
2312+ val = wed_r32(dev, cur->offset);
2313+ break;
2314+ case DUMP_TYPE_WDMA:
2315+ val = wdma_r32(dev, cur->offset);
2316+ break;
2317+ case DUMP_TYPE_WPDMA_TX: