blob: ae2aa14cffc03c9b878b65cca7b83ad656f61401 [file] [log] [blame]
developer58aa0682023-09-18 14:02:26 +08001From d7b7f03e5ab30b0420d0c97d6112dc3fa6e64d71 Mon Sep 17 00:00:00 2001
developer23f9f0f2023-06-15 13:06:25 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
developer58aa0682023-09-18 14:02:26 +08003Date: Mon, 18 Sep 2023 13:21:15 +0800
4Subject: [PATCH 20/22] mtk:wed:add wed3 support
developer23f9f0f2023-06-15 13:06:25 +08005
developer23f9f0f2023-06-15 13:06:25 +08006---
7 arch/arm64/boot/dts/mediatek/mt7988.dtsi | 152 ++-
8 .../dts/mediatek/mt7988a-dsa-10g-spim-nor.dts | 16 +-
9 .../dts/mediatek/mt7988d-dsa-10g-spim-nor.dts | 16 +-
10 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 +-
11 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +-
12 drivers/net/ethernet/mediatek/mtk_ppe.c | 17 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +-
14 .../net/ethernet/mediatek/mtk_ppe_offload.c | 13 +-
developerb74821e2023-09-08 14:19:59 +080015 drivers/net/ethernet/mediatek/mtk_wed.c | 1165 +++++++++++++----
developer23f9f0f2023-06-15 13:06:25 +080016 drivers/net/ethernet/mediatek/mtk_wed.h | 25 +-
17 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 584 ++++++++-
18 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 13 +-
19 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 5 +-
20 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 338 ++++-
21 include/linux/netdevice.h | 7 +
developer58aa0682023-09-18 14:02:26 +080022 include/linux/soc/mediatek/mtk_wed.h | 83 +-
23 16 files changed, 2056 insertions(+), 388 deletions(-)
developer23f9f0f2023-06-15 13:06:25 +080024 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
25
26diff --git a/arch/arm64/boot/dts/mediatek/mt7988.dtsi b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
developer58aa0682023-09-18 14:02:26 +080027index 561450e..8995ea3 100644
developer23f9f0f2023-06-15 13:06:25 +080028--- a/arch/arm64/boot/dts/mediatek/mt7988.dtsi
29+++ b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
developer58aa0682023-09-18 14:02:26 +080030@@ -205,44 +205,49 @@
developer23f9f0f2023-06-15 13:06:25 +080031 status = "disabled";
32 };
33
34- wed: wed@15010000 {
35- compatible = "mediatek,wed";
36- wed_num = <3>;
37- /* add this property for wed get the pci slot number. */
38- pci_slot_map = <0>, <1>, <2>;
39- reg = <0 0x15010000 0 0x2000>,
40- <0 0x15012000 0 0x2000>,
41- <0 0x15014000 0 0x2000>;
42+ wed0: wed@15010000 {
43+ compatible = "mediatek,mt7988-wed",
44+ "syscon";
45+ reg = <0 0x15010000 0 0x2000>;
46 interrupt-parent = <&gic>;
47- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
48- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
49- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
50- };
51-
52- wed2: wed2@15012000 {
53- compatible = "mediatek,wed2";
54- wed_num = <3>;
55- /* add this property for wed get the pci slot number. */
56- reg = <0 0x15010000 0 0x2000>,
57- <0 0x15012000 0 0x2000>,
58- <0 0x15014000 0 0x2000>;
59+ interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
60+ mediatek,wed_pcie = <&wed_pcie>;
61+ mediatek,ap2woccif = <&ap2woccif0>;
62+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
63+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
64+ mediatek,wocpu_boot = <&cpu0_boot>;
65+ mediatek,wocpu_emi = <&wocpu0_emi>;
66+ mediatek,wocpu_data = <&wocpu_data>;
67+ };
68+
69+ wed1: wed@15012000 {
70+ compatible = "mediatek,mt7988-wed",
71+ "syscon";
72+ reg = <0 0x15012000 0 0x2000>;
73 interrupt-parent = <&gic>;
74- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
75- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
76- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
77- };
78-
79- wed3: wed3@15014000 {
80- compatible = "mediatek,wed3";
81- wed_num = <3>;
82- /* add this property for wed get the pci slot number. */
83- reg = <0 0x15010000 0 0x2000>,
84- <0 0x15012000 0 0x2000>,
85- <0 0x15014000 0 0x2000>;
86+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
87+ mediatek,wed_pcie = <&wed_pcie>;
88+ mediatek,ap2woccif = <&ap2woccif1>;
89+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
90+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
91+ mediatek,wocpu_boot = <&cpu1_boot>;
92+ mediatek,wocpu_emi = <&wocpu1_emi>;
93+ mediatek,wocpu_data = <&wocpu_data>;
94+ };
95+
96+ wed2: wed@15014000 {
97+ compatible = "mediatek,mt7988-wed",
98+ "syscon";
99+ reg = <0 0x15014000 0 0x2000>;
100 interrupt-parent = <&gic>;
101- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
102- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
103- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
104+ interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
105+ mediatek,wed_pcie = <&wed_pcie>;
106+ mediatek,ap2woccif = <&ap2woccif2>;
107+ mediatek,wocpu_ilm = <&wocpu2_ilm>;
108+ mediatek,wocpu_dlm = <&wocpu2_dlm>;
109+ mediatek,wocpu_boot = <&cpu2_boot>;
110+ mediatek,wocpu_emi = <&wocpu2_emi>;
111+ mediatek,wocpu_data = <&wocpu_data>;
112 };
113
114 wdma: wdma@15104800 {
developer58aa0682023-09-18 14:02:26 +0800115@@ -252,15 +257,25 @@
developer23f9f0f2023-06-15 13:06:25 +0800116 <0 0x15105000 0 0x400>;
117 };
118
119- ap2woccif: ap2woccif@151A5000 {
120- compatible = "mediatek,ap2woccif";
121- reg = <0 0x151A5000 0 0x1000>,
122- <0 0x152A5000 0 0x1000>,
123- <0 0x153A5000 0 0x1000>;
124+ ap2woccif0: ap2woccif@151A5000 {
125+ compatible = "mediatek,ap2woccif", "syscon";
126+ reg = <0 0x151A5000 0 0x1000>;
127+ interrupt-parent = <&gic>;
128+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
129+ };
130+
131+ ap2woccif1: ap2woccif@152A5000 {
132+ compatible = "mediatek,ap2woccif", "syscon";
133+ reg = <0 0x152A5000 0 0x1000>;
134 interrupt-parent = <&gic>;
135- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
136- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
137- <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
138+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
139+ };
140+
141+ ap2woccif2: ap2woccif@153A5000 {
142+ compatible = "mediatek,ap2woccif", "syscon";
143+ reg = <0 0x153A5000 0 0x1000>;
144+ interrupt-parent = <&gic>;
145+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
146 };
147
148 wocpu0_ilm: wocpu0_ilm@151E0000 {
developer58aa0682023-09-18 14:02:26 +0800149@@ -268,31 +283,53 @@
developer23f9f0f2023-06-15 13:06:25 +0800150 reg = <0 0x151E0000 0 0x8000>;
151 };
152
153- wocpu1_ilm: wocpu1_ilm@152E0000 {
154- compatible = "mediatek,wocpu1_ilm";
155+ wocpu1_ilm: wocpu_ilm@152E0000 {
156+ compatible = "mediatek,wocpu_ilm";
157 reg = <0 0x152E0000 0 0x8000>;
158 };
159
160- wocpu2_ilm: wocpu2_ilm@153E0000 {
161- compatible = "mediatek,wocpu2_ilm";
162- reg = <0 0x153E0000 0 0x8000>;
163+ wocpu2_ilm: wocpu_ilm@153E0000 {
164+ compatible = "mediatek,wocpu_ilm";
165+ reg = <0 0x153E0000 0 0x8000>;
166+ };
167+
168+ wocpu0_dlm: wocpu_dlm@151E8000 {
169+ compatible = "mediatek,wocpu_dlm";
170+ reg = <0 0x151E8000 0 0x2000>;
171+
172+ resets = <&ethsysrst 0>;
173+ reset-names = "wocpu_rst";
174+ };
175+
176+ wocpu1_dlm: wocpu_dlm@0x152E8000 {
177+ compatible = "mediatek,wocpu_dlm";
178+ reg = <0 0x152E8000 0 0x2000>;
179+
180+ resets = <&ethsysrst 0>;
181+ reset-names = "wocpu_rst";
182 };
183
184- wocpu_dlm: wocpu_dlm@151E8000 {
185+ wocpu2_dlm: wocpu_dlm@0x153E8000 {
186 compatible = "mediatek,wocpu_dlm";
187- reg = <0 0x151E8000 0 0x2000>,
188- <0 0x152E8000 0 0x2000>,
189- <0 0x153E8000 0 0x2000>;
190+ reg = <0 0x153E8000 0 0x2000>;
191
192 resets = <&ethsysrst 0>;
193 reset-names = "wocpu_rst";
194 };
195
196- cpu_boot: wocpu_boot@15194000 {
197- compatible = "mediatek,wocpu_boot";
198- reg = <0 0x15194000 0 0x1000>,
199- <0 0x15294000 0 0x1000>,
200- <0 0x15394000 0 0x1000>;
201+ cpu0_boot: wocpu_boot@15194000 {
202+ compatible = "mediatek,wocpu0_boot";
203+ reg = <0 0x15194000 0 0x1000>;
204+ };
205+
206+ cpu1_boot: wocpu_boot@15294000 {
207+ compatible = "mediatek,wocpu1_boot";
208+ reg = <0 0x15294000 0 0x1000>;
209+ };
210+
211+ cpu2_boot: wocpu_boot@15394000 {
212+ compatible = "mediatek,wocpu2_boot";
213+ reg = <0 0x15394000 0 0x1000>;
214 };
215
216 reserved-memory {
developer58aa0682023-09-18 14:02:26 +0800217@@ -902,6 +939,7 @@
developer23f9f0f2023-06-15 13:06:25 +0800218 <&topckgen CK_TOP_CB_SGM_325M>;
219 mediatek,ethsys = <&ethsys>;
220 mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
221+ mediatek,wed = <&wed0>, <&wed1>, <&wed2>;
222 mediatek,usxgmiisys = <&usxgmiisys0>, <&usxgmiisys1>;
223 mediatek,xfi_pextp = <&xfi_pextp0>, <&xfi_pextp1>;
224 mediatek,xfi_pll = <&xfi_pll>;
225diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
developer58aa0682023-09-18 14:02:26 +0800226index 70a7554..bed27b4 100644
developer23f9f0f2023-06-15 13:06:25 +0800227--- a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
228+++ b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
developer58aa0682023-09-18 14:02:26 +0800229@@ -369,9 +369,23 @@
developer23f9f0f2023-06-15 13:06:25 +0800230 status = "okay";
231 };
232
233-&wed {
234+&wed0 {
235 dy_txbm_enable = "true";
236 dy_txbm_budge = <8>;
237 txbm_init_sz = <10>;
238 status = "okay";
239 };
240+
241+&wed1 {
242+ dy_txbm_enable = "true";
243+ dy_txbm_budge = <8>;
244+ txbm_init_sz = <10>;
245+ status = "okay";
246+};
247+
248+&wed2 {
249+ dy_txbm_enable = "true";
250+ dy_txbm_budge = <8>;
251+ txbm_init_sz = <10>;
252+ status = "okay";
253+};
254\ No newline at end of file
255diff --git a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
developer58aa0682023-09-18 14:02:26 +0800256index e8e3a69..5dd481b 100644
developer23f9f0f2023-06-15 13:06:25 +0800257--- a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
258+++ b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
developer58aa0682023-09-18 14:02:26 +0800259@@ -379,9 +379,23 @@
developer23f9f0f2023-06-15 13:06:25 +0800260 status = "okay";
261 };
262
263-&wed {
264+&wed0 {
265 dy_txbm_enable = "true";
266 dy_txbm_budge = <8>;
267 txbm_init_sz = <10>;
268 status = "okay";
269 };
270+
271+&wed1 {
272+ dy_txbm_enable = "true";
273+ dy_txbm_budge = <8>;
274+ txbm_init_sz = <10>;
275+ status = "okay";
276+};
277+
278+&wed2 {
279+ dy_txbm_enable = "true";
280+ dy_txbm_budge = <8>;
281+ txbm_init_sz = <10>;
282+ status = "okay";
283+};
284\ No newline at end of file
285diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer58aa0682023-09-18 14:02:26 +0800286index 8bd526a..dea66d7 100644
developer23f9f0f2023-06-15 13:06:25 +0800287--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
288+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer58aa0682023-09-18 14:02:26 +0800289@@ -5095,7 +5095,8 @@ static int mtk_probe(struct platform_device *pdev)
developer23f9f0f2023-06-15 13:06:25 +0800290 "mediatek,wed", i);
291 static const u32 wdma_regs[] = {
292 MTK_WDMA0_BASE,
293- MTK_WDMA1_BASE
294+ MTK_WDMA1_BASE,
295+ MTK_WDMA2_BASE
296 };
297 void __iomem *wdma;
298 u32 wdma_phy;
299diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer58aa0682023-09-18 14:02:26 +0800300index ee89b4c..8656b5f 100644
developer23f9f0f2023-06-15 13:06:25 +0800301--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
302+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer58aa0682023-09-18 14:02:26 +0800303@@ -613,9 +613,12 @@
developer23f9f0f2023-06-15 13:06:25 +0800304 #define RX_DMA_SPORT_MASK 0x7
305 #define RX_DMA_SPORT_MASK_V2 0xf
306
307-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
308+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
309 #define MTK_WDMA0_BASE 0x4800
310 #define MTK_WDMA1_BASE 0x4c00
311+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
312+#define MTK_WDMA2_BASE 0x5000
313+#endif
314 #else
315 #define MTK_WDMA0_BASE 0x2800
316 #define MTK_WDMA1_BASE 0x2c00
317diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
318old mode 100755
319new mode 100644
developer58aa0682023-09-18 14:02:26 +0800320index 384e811..eda23c2
developer23f9f0f2023-06-15 13:06:25 +0800321--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
322+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
323@@ -9,6 +9,7 @@
324 #include <linux/if_ether.h>
325 #include <linux/if_vlan.h>
326 #include <net/dsa.h>
327+#include <net/route.h>
328 #include "mtk_eth_soc.h"
329 #include "mtk_ppe.h"
330 #include "mtk_ppe_regs.h"
331@@ -396,7 +397,7 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
332 }
333
334 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
335- int bss, int wcid)
336+ int bss, int wcid, bool amsdu_en)
337 {
338 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
339 u32 *ib2 = mtk_foe_entry_ib2(entry);
340@@ -408,6 +409,9 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
341
342 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
343 FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
344+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
345+ l2->winfo_pao = FIELD_PREP(MTK_FOE_WINFO_PAO_AMSDU_EN, amsdu_en);
346+#endif
347 #else
348 if (wdma_idx)
349 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
350@@ -443,6 +447,17 @@ int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp)
351 *ib2 &= ~MTK_FOE_IB2_DSCP;
352 *ib2 |= FIELD_PREP(MTK_FOE_IB2_DSCP, dscp);
353
354+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
355+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
356+
357+ if (*ib2 & MTK_FOE_IB2_WDMA_WINFO &&
358+ l2->winfo_pao & MTK_FOE_WINFO_PAO_AMSDU_EN) {
359+ u8 tid = rt_tos2priority(dscp) & 0xf;
360+
361+ l2->winfo_pao |= FIELD_PREP(MTK_FOE_WINFO_PAO_TID, tid);
362+ }
363+#endif
364+
365 return 0;
366 }
367
368diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
developer58aa0682023-09-18 14:02:26 +0800369index 2a8b6ef..66c7f10 100644
developer23f9f0f2023-06-15 13:06:25 +0800370--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
371+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
372@@ -428,7 +428,7 @@ int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
373 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
374 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
375 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
376- int bss, int wcid);
377+ int bss, int wcid, bool amsdu_en);
378 int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
379 int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp);
developer58aa0682023-09-18 14:02:26 +0800380 bool mtk_foe_entry_match(struct mtk_foe_entry *entry, struct mtk_foe_entry *data);
developer23f9f0f2023-06-15 13:06:25 +0800381diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developer58aa0682023-09-18 14:02:26 +0800382index 95174b7..eab9e9d 100644
developer23f9f0f2023-06-15 13:06:25 +0800383--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
384+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
385@@ -112,6 +112,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
386 info->queue = path.mtk_wdma.queue;
387 info->bss = path.mtk_wdma.bss;
388 info->wcid = path.mtk_wdma.wcid;
389+ info->amsdu_en = path.mtk_wdma.amsdu_en;
390
391 return 0;
392 }
393@@ -193,13 +194,15 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
394
395 if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
396 mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
397- info.wcid);
398+ info.wcid, info.amsdu_en);
399 pse_port = PSE_PPE0_PORT;
400 #if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
401 if (info.wdma_idx == 0)
402 pse_port = PSE_WDMA0_PORT;
403 else if (info.wdma_idx == 1)
404 pse_port = PSE_WDMA1_PORT;
405+ else if (info.wdma_idx == 2)
406+ pse_port = PSE_WDMA2_PORT;
407 else
408 return -EOPNOTSUPP;
409 #endif
developer58aa0682023-09-18 14:02:26 +0800410@@ -490,8 +493,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
developer23f9f0f2023-06-15 13:06:25 +0800411 if (err)
412 return err;
413
414- if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
415- return err;
416+ /*if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
417+ return err;*/
418
419 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
420 if (!entry)
developer58aa0682023-09-18 14:02:26 +0800421@@ -516,8 +519,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
422 mtk_foe_entry_clear(eth->ppe[ppe_index], entry);
developer23f9f0f2023-06-15 13:06:25 +0800423 free:
424 kfree(entry);
425- if (wed_index >= 0)
426- mtk_wed_flow_remove(wed_index);
427+ /*if (wed_index >= 0)
428+ mtk_wed_flow_remove(wed_index);*/
429 return err;
430 }
431
432diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developer58aa0682023-09-18 14:02:26 +0800433index 3e760f7..9047cb0 100644
developer23f9f0f2023-06-15 13:06:25 +0800434--- a/drivers/net/ethernet/mediatek/mtk_wed.c
435+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
436@@ -28,7 +28,7 @@ struct wo_cmd_ring {
437 u32 cnt;
438 u32 unit;
439 };
440-static struct mtk_wed_hw *hw_list[2];
441+static struct mtk_wed_hw *hw_list[3];
442 static DEFINE_MUTEX(hw_lock);
443
444 static void
445@@ -73,6 +73,26 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
446 return wdma_r32(dev, MTK_WDMA_GLO_CFG);
447 }
448
449+static u32
450+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
451+{
452+ if (wed_r32(dev, reg) & mask)
453+ return true;
454+
455+ return false;
456+}
457+
458+static int
459+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
460+{
461+ int sleep = 1000;
462+ int timeout = 100 * sleep;
463+ u32 val;
464+
465+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
466+ timeout, false, dev, reg, mask);
467+}
468+
469 static int
470 mtk_wdma_rx_reset(struct mtk_wed_device *dev)
471 {
472@@ -235,6 +255,8 @@ mtk_wed_assign(struct mtk_wed_device *dev)
473 continue;
474
475 hw->wed_dev = dev;
476+ hw->pci_base = MTK_WED_PCIE_BASE;
477+
478 return hw;
479 }
480
481@@ -242,23 +264,84 @@ mtk_wed_assign(struct mtk_wed_device *dev)
482 }
483
484 static int
485-mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
486+mtk_wed_pao_buffer_alloc(struct mtk_wed_device *dev)
487+{
488+ struct mtk_wed_pao *pao;
489+ int i, j;
490+
491+ pao = kzalloc(sizeof(struct mtk_wed_pao), GFP_KERNEL);
492+ if (!pao)
493+ return -ENOMEM;
494+
495+ dev->hw->wed_pao = pao;
496+
497+ for (i = 0; i < 32; i++) {
498+ /* each segment is 64K*/
499+ pao->hif_txd[i] = (char *)__get_free_pages(GFP_ATOMIC |
500+ GFP_DMA32 |
501+ __GFP_ZERO, 4);
502+ if (!pao->hif_txd[i])
503+ goto err;
504+
505+ pao->hif_txd_phys[i] = dma_map_single(dev->hw->dev,
506+ pao->hif_txd[i],
507+ 16 * PAGE_SIZE,
508+ DMA_TO_DEVICE);
509+ if (unlikely(dma_mapping_error(dev->hw->dev,
510+ pao->hif_txd_phys[i])))
511+ goto err;
512+ }
513+
514+ return 0;
515+
516+err:
517+ for (j = 0; j < i; j++)
518+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[j],
519+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
520+
521+ return -ENOMEM;
522+}
523+
524+static int
525+mtk_wed_pao_free_buffer(struct mtk_wed_device *dev)
526+{
527+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
528+ int i;
529+
530+ for (i = 0; i < 32; i++) {
531+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[i],
532+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
533+ free_pages((unsigned long)pao->hif_txd[i], 4);
534+ }
535+
536+ return 0;
537+}
538+
539+static int
540+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
541 {
542 struct mtk_wdma_desc *desc;
543+ void *desc_ptr;
544 dma_addr_t desc_phys;
545- void **page_list;
546+ struct dma_page_info *page_list;
547 u32 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG1;
548 int token = dev->wlan.token_start;
549- int ring_size, n_pages, page_idx;
550- int i;
551-
552+ int ring_size, pkt_nums, n_pages, page_idx;
553+ int i, ret = 0;
554
555 if (dev->ver == MTK_WED_V1) {
556 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
557- } else {
558+ pkt_nums = ring_size;
559+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
560+ } else if (dev->hw->version == 2) {
561 ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
562 MTK_WED_WDMA_RING_SIZE * 2;
563 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG0;
564+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
565+ } else if (dev->hw->version == 3) {
566+ ring_size = MTK_WED_TX_BM_DMA_SIZE;
567+ pkt_nums = MTK_WED_TX_BM_PKT_CNT;
568+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_rxbm_desc);
569 }
570
571 n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
572@@ -267,18 +350,20 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
573 if (!page_list)
574 return -ENOMEM;
575
576- dev->buf_ring.size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
577- dev->buf_ring.pages = page_list;
578+ dev->tx_buf_ring.size = ring_size;
579+ dev->tx_buf_ring.pages = page_list;
580+ dev->tx_buf_ring.pkt_nums = pkt_nums;
581
582- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
583- &desc_phys, GFP_KERNEL);
584- if (!desc)
585+ desc_ptr = dma_alloc_coherent(dev->hw->dev,
586+ ring_size * dev->tx_buf_ring.desc_size,
587+ &desc_phys, GFP_KERNEL);
588+ if (!desc_ptr)
589 return -ENOMEM;
590
591- dev->buf_ring.desc = desc;
592- dev->buf_ring.desc_phys = desc_phys;
593+ dev->tx_buf_ring.desc = desc_ptr;
594+ dev->tx_buf_ring.desc_phys = desc_phys;
595
596- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
597+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
598 dma_addr_t page_phys, buf_phys;
599 struct page *page;
600 void *buf;
601@@ -295,7 +380,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
602 return -ENOMEM;
603 }
604
605- page_list[page_idx++] = page;
606+ page_list[page_idx].addr = page;
607+ page_list[page_idx].addr_phys = page_phys;
608+ page_idx++;
609+
610 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
611 DMA_BIDIRECTIONAL);
612
613@@ -303,19 +391,23 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
614 buf_phys = page_phys;
615
616 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
617- u32 txd_size;
618-
619- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
620-
621+ desc = desc_ptr;
622 desc->buf0 = buf_phys;
623- desc->buf1 = buf_phys + txd_size;
624- desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
625- txd_size) |
626- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
627- MTK_WED_BUF_SIZE - txd_size) |
628- last_seg;
629- desc->info = 0;
630- desc++;
631+ if (dev->hw->version < 3) {
632+ u32 txd_size;
633+
634+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
635+ desc->buf1 = buf_phys + txd_size;
636+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
637+ txd_size) |
638+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
639+ MTK_WED_BUF_SIZE - txd_size) |
640+ last_seg;
641+ desc->info = 0;
642+ } else {
643+ desc->ctrl = token << 16;
644+ }
645+ desc_ptr += dev->tx_buf_ring.desc_size;
646
647 buf += MTK_WED_BUF_SIZE;
648 buf_phys += MTK_WED_BUF_SIZE;
649@@ -325,15 +417,18 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
650 DMA_BIDIRECTIONAL);
651 }
652
653- return 0;
654+ if (dev->hw->version == 3)
655+ ret = mtk_wed_pao_buffer_alloc(dev);
656+
657+ return ret;
658 }
659
660 static void
661-mtk_wed_free_buffer(struct mtk_wed_device *dev)
662+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
663 {
664- struct mtk_wdma_desc *desc = dev->buf_ring.desc;
665- void **page_list = dev->buf_ring.pages;
666- int ring_size, page_idx;
667+ struct mtk_rxbm_desc *desc = dev->tx_buf_ring.desc;
668+ struct dma_page_info *page_list = dev->tx_buf_ring.pages;
669+ int ring_size, page_idx, pkt_nums;
670 int i;
671
672 if (!page_list)
673@@ -342,33 +437,33 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
674 if (!desc)
675 goto free_pagelist;
676
677- if (dev->ver == MTK_WED_V1) {
678- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
679- } else {
680- ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
681- MTK_WED_WDMA_RING_SIZE * 2;
682+ pkt_nums = ring_size = dev->tx_buf_ring.size;
683+ if (dev->hw->version == 3) {
684+ mtk_wed_pao_free_buffer(dev);
685+ pkt_nums = dev->tx_buf_ring.pkt_nums;
686 }
687
688- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
689- void *page = page_list[page_idx++];
690+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
691+ void *page = page_list[page_idx].addr;
692
693 if (!page)
694 break;
695
696- dma_unmap_page(dev->hw->dev, desc[i].buf0,
697+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
698 PAGE_SIZE, DMA_BIDIRECTIONAL);
699 __free_page(page);
700+ page_idx++;
701 }
702
703- dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
704- desc, dev->buf_ring.desc_phys);
705+ dma_free_coherent(dev->hw->dev, ring_size * dev->tx_buf_ring.desc_size,
706+ dev->tx_buf_ring.desc, dev->tx_buf_ring.desc_phys);
707
708 free_pagelist:
709 kfree(page_list);
710 }
711
712 static int
713-mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
714+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
715 {
716 struct mtk_rxbm_desc *desc;
717 dma_addr_t desc_phys;
718@@ -389,7 +484,7 @@ mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
719 }
720
721 static void
722-mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
723+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
724 {
725 struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
726 int ring_size = dev->rx_buf_ring.size;
727@@ -403,6 +498,113 @@ mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
728 desc, dev->rx_buf_ring.desc_phys);
729 }
730
731+/* TODO */
732+static int
733+mtk_wed_rx_page_buffer_alloc(struct mtk_wed_device *dev)
734+{
735+ int ring_size = dev->wlan.rx_nbuf, buf_num = MTK_WED_RX_PG_BM_CNT;
736+ struct mtk_rxbm_desc *desc;
737+ dma_addr_t desc_phys;
738+ struct dma_page_info *page_list;
739+ int n_pages, page_idx;
740+ int i;
741+
742+ n_pages = buf_num / MTK_WED_RX_PAGE_BUF_PER_PAGE;
743+
744+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
745+ if (!page_list)
746+ return -ENOMEM;
747+
748+ dev->rx_page_buf_ring.size = ring_size & ~(MTK_WED_BUF_PER_PAGE - 1);
749+ dev->rx_page_buf_ring.pages = page_list;
750+ dev->rx_page_buf_ring.pkt_nums = buf_num;
751+
752+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
753+ &desc_phys, GFP_KERNEL);
754+ if (!desc)
755+ return -ENOMEM;
756+
757+ dev->rx_page_buf_ring.desc = desc;
758+ dev->rx_page_buf_ring.desc_phys = desc_phys;
759+
760+ for (i = 0, page_idx = 0; i < buf_num; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
761+ dma_addr_t page_phys, buf_phys;
762+ struct page *page;
763+ void *buf;
764+ int s;
765+
766+ page = __dev_alloc_pages(GFP_KERNEL, 0);
767+ if (!page)
768+ return -ENOMEM;
769+
770+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
771+ DMA_BIDIRECTIONAL);
772+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
773+ __free_page(page);
774+ return -ENOMEM;
775+ }
776+
777+ page_list[page_idx].addr= page;
778+ page_list[page_idx].addr_phys= page_phys;
779+ page_idx++;
780+
781+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
782+ DMA_BIDIRECTIONAL);
783+
784+ buf = page_to_virt(page);
785+ buf_phys = page_phys;
786+
787+ for (s = 0; s < MTK_WED_RX_PAGE_BUF_PER_PAGE; s++) {
788+
789+ desc->buf0 = cpu_to_le32(buf_phys);
790+ desc++;
791+
792+ buf += MTK_WED_PAGE_BUF_SIZE;
793+ buf_phys += MTK_WED_PAGE_BUF_SIZE;
794+ }
795+
796+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
797+ DMA_BIDIRECTIONAL);
798+ }
799+
800+ return 0;
801+}
802+
803+static void
804+mtk_wed_rx_page_free_buffer(struct mtk_wed_device *dev)
805+{
806+ struct mtk_rxbm_desc *desc = dev->rx_page_buf_ring.desc;
807+ struct dma_page_info *page_list = dev->rx_page_buf_ring.pages;
808+ int ring_size, page_idx;
809+ int i;
810+
811+ if (!page_list)
812+ return;
813+
814+ if (!desc)
815+ goto free_pagelist;
816+
817+ ring_size = dev->rx_page_buf_ring.pkt_nums;
818+
819+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
820+ void *page = page_list[page_idx].addr;
821+
822+ if (!page)
823+ break;
824+
825+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
826+ PAGE_SIZE, DMA_BIDIRECTIONAL);
827+ __free_page(page);
828+ page_idx++;
829+ }
830+
developera60ce2b2023-06-16 13:07:18 +0800831+ dma_free_coherent(dev->hw->dev, dev->rx_page_buf_ring.size * sizeof(*desc),
developer23f9f0f2023-06-15 13:06:25 +0800832+ desc, dev->rx_page_buf_ring.desc_phys);
833+
834+free_pagelist:
835+ kfree(page_list);
836+}
837+
838 static void
839 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
840 {
841@@ -416,19 +618,25 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int sca
842 static void
843 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
844 {
845- int i;
846+ int i, scale = dev->hw->version > 1 ? 2 : 1;
847
848 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
849- mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
850+ if (!(dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
851+ mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
852+
853 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
854- mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
855+ if ((dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
856+ mtk_wed_free_ring(dev, &dev->tx_wdma[i], scale);
857 }
858
859 static void
860 mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
861 {
862- mtk_wed_free_rx_bm(dev);
863+ mtk_wed_free_rx_buffer(dev);
864 mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
865+
866+ if (dev->wlan.hwrro)
867+ mtk_wed_rx_page_free_buffer(dev);
868 }
869
870 static void
871@@ -437,7 +645,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
872 u32 wdma_mask;
873
874 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
875- if (dev->ver > MTK_WED_V1)
876+ if (mtk_wed_get_rx_capa(dev))
877 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
878 GENMASK(1, 0));
879 /* wed control cr set */
880@@ -447,7 +655,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
881 MTK_WED_CTRL_WED_TX_BM_EN |
882 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
883
884- if (dev->ver == MTK_WED_V1) {
885+ if (dev->hw->version == 1) {
886 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
887 MTK_WED_PCIE_INT_TRIGGER_STATUS);
888
889@@ -458,6 +666,8 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
890 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
891 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
892 } else {
893+ if (dev->hw->version == 3)
894+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
895
896 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
897 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
898@@ -475,18 +685,20 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
899 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
900 dev->wlan.txfree_tbit));
901
902- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
903- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
904- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
905- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
906- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
907- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
908- dev->wlan.rx_tbit[0]) |
909- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
910- dev->wlan.rx_tbit[1]));
911+ if (mtk_wed_get_rx_capa(dev))
912+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
913+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
914+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
915+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
916+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
917+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
918+ dev->wlan.rx_tbit[0]) |
919+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
920+ dev->wlan.rx_tbit[1]));
921 }
922+
923 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
924- if (dev->ver == MTK_WED_V1) {
925+ if (dev->hw->version == 1) {
926 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
927 } else {
928 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
929@@ -506,6 +718,21 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
930 {
931 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
932
933+ switch (dev->hw->version) {
934+ case 1:
935+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
936+ break;
937+ case 2 :
938+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2 |
939+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2 |
940+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
941+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
942+ break;
943+ case 3:
944+ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
945+ break;
946+ }
947+
948 if (!dev->hw->num_flows)
949 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
950
951@@ -514,31 +741,86 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
952 }
953
954 static void
955-mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
956+mtk_wed_pao_init(struct mtk_wed_device *dev)
957 {
958- if (en) {
959- wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
960- wed_w32(dev, MTK_WED_TXP_DW1,
961- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
962- } else {
963- wed_w32(dev, MTK_WED_TXP_DW1,
964- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
965- wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
966+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
967+ int i;
968+
969+ for (i = 0; i < 32; i++)
970+ wed_w32(dev, MTK_WED_PAO_HIFTXD_BASE_L(i),
971+ pao->hif_txd_phys[i]);
972+
973+ /* init all sta parameter */
974+ wed_w32(dev, MTK_WED_PAO_STA_INFO_INIT, MTK_WED_PAO_STA_RMVL |
975+ MTK_WED_PAO_STA_WTBL_HDRT_MODE |
976+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_LEN,
977+ dev->wlan.max_amsdu_len >> 8) |
978+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_NUM,
979+ dev->wlan.max_amsdu_nums));
980+
981+ wed_w32(dev, MTK_WED_PAO_STA_INFO, MTK_WED_PAO_STA_INFO_DO_INIT);
982+
983+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_STA_INFO,
984+ MTK_WED_PAO_STA_INFO_DO_INIT)) {
985+ dev_err(dev->hw->dev, "mtk_wed%d: pao init failed!\n",
986+ dev->hw->index);
987+ return;
988 }
989+
990+ /* init pao txd src */
991+ wed_set(dev, MTK_WED_PAO_HIFTXD_CFG,
992+ FIELD_PREP(MTK_WED_PAO_HIFTXD_SRC, dev->hw->index));
993+
994+ /* init qmem */
995+ wed_set(dev, MTK_WED_PAO_PSE, MTK_WED_PAO_PSE_RESET);
996+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_MON_QMEM_STS1, BIT(29))) {
997+ pr_info("%s: init pao qmem fail\n", __func__);
998+ return;
999+ }
1000+
1001+ /* eagle E1 PCIE1 tx ring 22 flow control issue */
1002+ if (dev->wlan.chip_id == 0x7991) {
1003+ wed_clr(dev, MTK_WED_PAO_AMSDU_FIFO,
1004+ MTK_WED_PAO_AMSDU_IS_PRIOR0_RING);
1005+ }
1006+
1007+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
1008+
1009+ return;
1010 }
1011
1012-static void
1013-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
1014+static int
1015+mtk_wed_hwrro_init(struct mtk_wed_device *dev)
1016 {
1017-#define MTK_WFMDA_RX_DMA_EN BIT(2)
1018+ if (!mtk_wed_get_rx_capa(dev))
1019+ return 0;
developer18d0d712023-08-23 11:50:09 +08001020+
developer23f9f0f2023-06-15 13:06:25 +08001021+ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
1022+ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
developer18d0d712023-08-23 11:50:09 +08001023
developer23f9f0f2023-06-15 13:06:25 +08001024+ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE,
1025+ dev->rx_page_buf_ring.desc_phys);
1026+
1027+ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
1028+ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
1029+ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
1030+ MTK_WED_RX_PG_BM_CNT));
1031+
1032+ /* enable rx_page_bm to fetch dmad */
1033+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
1034+
1035+ return 0;
1036+}
developer7ccd1942023-07-07 16:15:05 +08001037+
developer23f9f0f2023-06-15 13:06:25 +08001038+static int
1039+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
1040+ struct mtk_wed_ring *ring)
1041+{
1042 int timeout = 3;
1043- u32 cur_idx, regs;
1044+ u32 cur_idx;
1045
1046 do {
1047- regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
1048- MTK_WED_RING_OFS_CPU_IDX;
1049- cur_idx = wed_r32(dev, regs);
1050+ cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
1051 if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
1052 break;
1053
1054@@ -546,70 +828,133 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
1055 timeout--;
1056 } while (timeout > 0);
1057
1058- if (timeout) {
1059- unsigned int val;
1060+ return timeout;
1061+}
1062
1063- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
1064- dev->wlan.phy_base);
1065- val |= MTK_WFMDA_RX_DMA_EN;
1066
1067- wifi_w32(dev, dev->wlan.wpdma_rx_glo -
1068- dev->wlan.phy_base, val);
1069+static void
1070+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
1071+{
1072+ if (en) {
1073+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
1074+ wed_w32(dev, MTK_WED_TXP_DW1,
1075+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
1076 } else {
1077- dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
1078- dev->hw->index, idx);
1079+ wed_w32(dev, MTK_WED_TXP_DW1,
1080+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
1081+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
1082 }
1083 }
1084
1085 static void
1086 mtk_wed_dma_enable(struct mtk_wed_device *dev)
1087 {
1088- wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1089- MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1090+#define MTK_WFMDA_RX_DMA_EN BIT(2)
1091+
1092+ if (dev->hw->version == 1)
1093+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1094+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1095
1096 wed_set(dev, MTK_WED_GLO_CFG,
1097 MTK_WED_GLO_CFG_TX_DMA_EN |
1098 MTK_WED_GLO_CFG_RX_DMA_EN);
1099+
1100+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
1101+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
1102+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
1103+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
1104+ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
1105+
1106+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
1107+
1108 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1109 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1110- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1111+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
1112+ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
1113 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1114 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1115
1116 wdma_set(dev, MTK_WDMA_GLO_CFG,
1117- MTK_WDMA_GLO_CFG_TX_DMA_EN |
1118+ MTK_WDMA_GLO_CFG_TX_DMA_EN /*|
1119 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
1120- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
1121+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES*/);
1122
1123- if (dev->ver == MTK_WED_V1) {
1124+ if (dev->hw->version == 1) {
1125 wdma_set(dev, MTK_WDMA_GLO_CFG,
1126 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
1127 } else {
1128 int idx = 0;
1129
1130- wed_set(dev, MTK_WED_WPDMA_CTRL,
1131- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
1132-
1133- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1134- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1135- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1136+ if (mtk_wed_get_rx_capa(dev))
1137+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1138+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1139+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1140
1141 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1142 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
1143 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
1144
1145+ if (dev->hw->version == 3) {
1146+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1147+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
1148+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1149+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
1150+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
1151+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
1152+
1153+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
1154+ //wdma_w32(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
1155+ if (mtk_wed_get_rx_capa(dev)) {
1156+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
1157+ MTK_WED_WPDMA_RX_D_PREF_EN |
1158+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
1159+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
1160+
1161+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
1162+
1163+ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
1164+
1165+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
1166+ }
1167+ }
1168+
1169 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1170 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
1171 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
1172
1173+ if (!mtk_wed_get_rx_capa(dev))
1174+ return;
1175+
1176+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
1177 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1178 MTK_WED_WPDMA_RX_D_RX_DRV_EN |
1179 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
1180 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
1181 0x2));
1182
1183- for (idx = 0; idx < dev->hw->ring_num; idx++)
1184- mtk_wed_check_wfdma_rx_fill(dev, idx);
1185+ for (idx = 0; idx < dev->hw->ring_num; idx++) {
1186+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1187+
1188+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1189+ continue;
1190+
1191+ if(mtk_wed_check_wfdma_rx_fill(dev, ring)) {
1192+ unsigned int val;
1193+
1194+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
1195+ dev->wlan.phy_base);
1196+ val |= MTK_WFMDA_RX_DMA_EN;
1197+
1198+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
1199+ dev->wlan.phy_base, val);
1200+
1201+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable successful!\n",
1202+ dev->hw->index, idx);
1203+ } else {
1204+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
1205+ dev->hw->index, idx);
1206+ }
1207+ }
1208 }
1209 }
1210
1211@@ -644,15 +989,20 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
1212 MTK_WED_WPDMA_RX_D_RX_DRV_EN);
1213 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1214 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1215- }
1216
1217- mtk_wed_set_512_support(dev, false);
1218+ if (dev->hw->version == 3 && mtk_wed_get_rx_capa(dev)) {
1219+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
1220+ MTK_WDMA_PREF_TX_CFG_PREF_EN);
1221+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
1222+ MTK_WDMA_PREF_RX_CFG_PREF_EN);
1223+ }
1224+ }
1225 }
1226
1227 static void
1228 mtk_wed_stop(struct mtk_wed_device *dev)
1229 {
1230- if (dev->ver > MTK_WED_V1) {
1231+ if (mtk_wed_get_rx_capa(dev)) {
1232 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
1233 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
1234 }
developera8336302023-07-07 11:29:01 +08001235@@ -677,13 +1027,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001236 MTK_WED_CTRL_WED_TX_BM_EN |
1237 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1238
1239- if (dev->hw->ver == 1)
1240+ if (dev->hw->version == 1)
1241 return;
1242
1243 wed_clr(dev, MTK_WED_CTRL,
1244 MTK_WED_CTRL_RX_ROUTE_QM_EN |
1245 MTK_WED_CTRL_WED_RX_BM_EN |
1246 MTK_WED_CTRL_RX_RRO_QM_EN);
1247+
1248+ if (dev->hw->version == 3) {
1249+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
1250+ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_PAO);
1251+ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
1252+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
1253+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
1254+ }
1255 }
1256
1257 static void
developera8336302023-07-07 11:29:01 +08001258@@ -702,9 +1060,9 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001259
1260 mtk_wdma_tx_reset(dev);
1261
1262- mtk_wed_free_buffer(dev);
1263+ mtk_wed_free_tx_buffer(dev);
1264 mtk_wed_free_tx_rings(dev);
1265- if (dev->ver > MTK_WED_V1) {
1266+ if (mtk_wed_get_rx_capa(dev)) {
1267 mtk_wed_wo_reset(dev);
1268 mtk_wed_free_rx_rings(dev);
1269 mtk_wed_wo_exit(hw);
developera8336302023-07-07 11:29:01 +08001270@@ -731,24 +1089,29 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001271 static void
1272 mtk_wed_bus_init(struct mtk_wed_device *dev)
1273 {
1274-#define PCIE_BASE_ADDR0 0x11280000
1275+ switch (dev->wlan.bus_type) {
1276+ case MTK_WED_BUS_PCIE: {
1277+ struct device_node *np = dev->hw->eth->dev->of_node;
1278+ struct regmap *regs;
developer23f9f0f2023-06-15 13:06:25 +08001279
1280- if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
1281- struct device_node *node;
1282- void __iomem * base_addr;
1283- u32 value = 0;
1284+ if (dev->hw->version == 2) {
1285+ regs = syscon_regmap_lookup_by_phandle(np,
1286+ "mediatek,wed-pcie");
1287+ if (IS_ERR(regs))
1288+ break;
1289
1290- node = of_parse_phandle(dev->hw->node, "mediatek,wed_pcie", 0);
1291- if (!node) {
1292- pr_err("%s: no wed_pcie node\n", __func__);
1293- return;
1294+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
1295 }
1296
1297- base_addr = of_iomap(node, 0);
1298-
1299- value = readl(base_addr);
1300- value |= BIT(0);
1301- writel(value, base_addr);
1302+ if (dev->wlan.msi) {
1303+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base| 0xc08);
1304+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0xc04);
1305+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
1306+ } else {
1307+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base | 0x180);
1308+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0x184);
1309+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
1310+ }
1311
developera8336302023-07-07 11:29:01 +08001312 wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
1313 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
developerb74821e2023-09-08 14:19:59 +08001314@@ -756,45 +1119,53 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001315 /* pcie interrupt control: pola/source selection */
1316 wed_set(dev, MTK_WED_PCIE_INT_CTRL,
1317 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
1318- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
1319- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
developera8336302023-07-07 11:29:01 +08001320-
developer23f9f0f2023-06-15 13:06:25 +08001321- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
1322- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
1323- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
1324- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
developera8336302023-07-07 11:29:01 +08001325+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
1326+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, dev->hw->index));
1327
developer18d0d712023-08-23 11:50:09 +08001328- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
1329- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
1330-
developer23f9f0f2023-06-15 13:06:25 +08001331- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
1332- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
1333-
1334- /* pola setting */
1335- value = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
1336- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
1337- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
1338- } else if (dev->wlan.bus_type == MTK_WED_BUS_AXI) {
1339+ break;
1340+ }
1341+ case MTK_WED_BUS_AXI:
1342 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1343 MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
1344 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
1345+ break;
1346+ default:
1347+ break;
1348 }
1349+
1350 return;
1351 }
1352
1353 static void
1354 mtk_wed_set_wpdma(struct mtk_wed_device *dev)
1355 {
1356- if (dev->ver > MTK_WED_V1) {
1357+ if (dev->hw->version == 1) {
1358+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1359+ } else {
1360+ mtk_wed_bus_init(dev);
1361+
1362 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
1363 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
1364- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
1365+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
1366 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
1367
1368- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
1369- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
1370- } else {
1371- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1372+ if (mtk_wed_get_rx_capa(dev)) {
1373+ int i;
1374+
1375+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
developerb74821e2023-09-08 14:19:59 +08001376+ wed_w32(dev, MTK_WED_WPDMA_RX_RING0, dev->wlan.wpdma_rx[0]);
1377+ if (dev->wlan.wpdma_rx[1])
1378+ wed_w32(dev, MTK_WED_WPDMA_RX_RING1, dev->wlan.wpdma_rx[1]);
developer23f9f0f2023-06-15 13:06:25 +08001379+
1380+ if (dev->wlan.hwrro) {
developerb74821e2023-09-08 14:19:59 +08001381+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
1382+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
1383+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
1384+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
1385+ dev->wlan.wpdma_rx_pg + i * 0x10);
developer23f9f0f2023-06-15 13:06:25 +08001386+ }
1387+ }
1388+ }
1389 }
1390 }
1391
developerb74821e2023-09-08 14:19:59 +08001392@@ -806,21 +1177,25 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001393 mtk_wed_deinit(dev);
1394 mtk_wed_reset(dev, MTK_WED_RESET_WED);
1395
1396- if (dev->ver > MTK_WED_V1)
1397- mtk_wed_bus_init(dev);
1398-
1399 mtk_wed_set_wpdma(dev);
1400
1401- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1402- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1403- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1404- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1405- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1406- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1407+ if (dev->hw->version == 3) {
1408+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
1409+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
1410+ } else {
1411+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1412+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1413+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1414+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1415+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1416+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1417+ }
1418+
1419 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1420
1421- if (dev->ver == MTK_WED_V1) {
1422+ if (dev->hw->version == 1) {
1423 u32 offset;
1424+
1425 offset = dev->hw->index ? 0x04000400 : 0;
1426 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1427 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
developerb74821e2023-09-08 14:19:59 +08001428@@ -907,11 +1282,16 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001429 } while (1);
1430
1431 /* configure RX_ROUTE_QM */
1432- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1433- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
1434- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1435- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
1436- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1437+ if (dev->hw->version == 2) {
1438+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1439+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
1440+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1441+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
1442+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1443+ } else {
1444+ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
1445+ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 0x3 + dev->hw->index));
1446+ }
1447
1448 /* enable RX_ROUTE_QM */
1449 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
developerb74821e2023-09-08 14:19:59 +08001450@@ -920,23 +1300,45 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001451 static void
1452 mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
1453 {
1454- int size = dev->buf_ring.size;
1455+ int size = dev->wlan.nbuf;
1456 int rev_size = MTK_WED_TX_RING_SIZE / 2;
1457- int thr = 1;
1458+ int thr_lo = 1, thr_hi = 1;
1459
1460- if (dev->ver > MTK_WED_V1) {
1461+ if (dev->hw->version == 1) {
1462+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1463+ MTK_WED_TX_BM_CTRL_PAUSE |
1464+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
1465+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
1466+ } else {
1467 size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
1468- dev->buf_ring.size;
1469+ dev->tx_buf_ring.size;
1470 rev_size = size;
1471- thr = 0;
1472+ thr_lo = 0;
1473+ thr_hi = MTK_WED_TX_BM_DYN_THR_HI;
1474+
1475+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
1476+ MTK_WED_TX_TKID_CTRL_PAUSE |
1477+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
1478+ size / 128) |
1479+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
1480+ size / 128));
1481+
1482+ /* return SKBID + SDP back to bm */
1483+ if (dev->ver == 3) {
1484+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
1485+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
1486+ size = dev->wlan.nbuf;
1487+ rev_size = size;
1488+ } else {
1489+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1490+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1491+ MTK_WED_TX_TKID_DYN_THR_HI);
1492+ }
1493 }
1494
1495- wed_w32(dev, MTK_WED_TX_BM_CTRL,
1496- MTK_WED_TX_BM_CTRL_PAUSE |
1497- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
1498- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
1499+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1500
1501- wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1502+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
1503
1504 wed_w32(dev, MTK_WED_TX_BM_TKID,
1505 FIELD_PREP(MTK_WED_TX_BM_TKID_START,
developerb74821e2023-09-08 14:19:59 +08001506@@ -946,25 +1348,44 @@ mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001507
1508 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1509
1510- wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1511- FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr) |
1512- MTK_WED_TX_BM_DYN_THR_HI);
1513+ if (dev->hw->version < 3)
1514+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1515+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_lo) |
1516+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_hi));
1517+ else {
1518+ /* change to new bm */
1519+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
developerb74821e2023-09-08 14:19:59 +08001520+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
developer23f9f0f2023-06-15 13:06:25 +08001521+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_LEGACY_EN);
1522+ }
1523
1524- if (dev->ver > MTK_WED_V1) {
1525+ if (dev->hw->version != 1) {
1526 wed_w32(dev, MTK_WED_TX_TKID_CTRL,
1527 MTK_WED_TX_TKID_CTRL_PAUSE |
1528 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
1529- dev->buf_ring.size / 128) |
1530+ size / 128) |
1531 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
1532- dev->buf_ring.size / 128));
1533- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1534- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1535- MTK_WED_TX_TKID_DYN_THR_HI);
1536+ size / 128));
1537+
1538+ /* return SKBID + SDP back to bm */
1539+ if (dev->ver == 3)
1540+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
1541+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
1542+ else
1543+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1544+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1545+ MTK_WED_TX_TKID_DYN_THR_HI);
1546 }
1547- mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1548+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1549+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1550+ dev->wlan.token_start) |
1551+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1552+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1553
1554+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
1555+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
1556 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1557- if (dev->ver > MTK_WED_V1)
1558+ if (dev->hw->version != 1)
1559 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
1560 }
1561
developerb74821e2023-09-08 14:19:59 +08001562@@ -977,7 +1398,26 @@ mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001563
1564 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
1565
1566+ /* reset prefetch index of ring */
1567+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
1568+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1569+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
1570+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1571+
1572+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
1573+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1574+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
1575+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1576+
1577+ /* reset prefetch FIFO of ring */
1578+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
1579+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
1580+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
1581+ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
1582+
1583 mtk_wed_rx_bm_hw_init(dev);
1584+ if (dev->wlan.hwrro)
1585+ mtk_wed_hwrro_init(dev);
1586 mtk_wed_rro_hw_init(dev);
1587 mtk_wed_route_qm_hw_init(dev);
1588 }
developerb74821e2023-09-08 14:19:59 +08001589@@ -991,7 +1431,7 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001590 dev->init_done = true;
1591 mtk_wed_set_ext_int(dev, false);
1592 mtk_wed_tx_hw_init(dev);
1593- if (dev->ver > MTK_WED_V1)
1594+ if (mtk_wed_get_rx_capa(dev))
1595 mtk_wed_rx_hw_init(dev);
1596 }
1597
developerb74821e2023-09-08 14:19:59 +08001598@@ -1015,26 +1455,6 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
developer23f9f0f2023-06-15 13:06:25 +08001599 }
1600 }
1601
1602-static u32
1603-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1604-{
1605- if (wed_r32(dev, reg) & mask)
1606- return true;
1607-
1608- return false;
1609-}
1610-
1611-static int
1612-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1613-{
1614- int sleep = 1000;
1615- int timeout = 100 * sleep;
1616- u32 val;
1617-
1618- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1619- timeout, false, dev, reg, mask);
1620-}
1621-
1622 static void
1623 mtk_wed_rx_reset(struct mtk_wed_device *dev)
1624 {
developerb74821e2023-09-08 14:19:59 +08001625@@ -1133,7 +1553,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001626 mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false);
1627 }
1628
1629- mtk_wed_free_rx_bm(dev);
1630+ mtk_wed_free_rx_buffer(dev);
1631 }
1632
1633
developerb74821e2023-09-08 14:19:59 +08001634@@ -1271,12 +1691,15 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001635 int idx, int size, bool reset)
1636 {
1637 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
1638+ int scale = dev->hw->version > 1 ? 2 : 1;
1639
1640 if(!reset)
1641 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1642- dev->ver, true))
1643+ scale, true))
1644 return -ENOMEM;
1645
1646+ wdma->flags |= MTK_WED_RING_CONFIGURED;
1647+
1648 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1649 wdma->desc_phys);
1650 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
developerb74821e2023-09-08 14:19:59 +08001651@@ -1296,12 +1719,31 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001652 int idx, int size, bool reset)
1653 {
1654 struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
1655+ int scale = dev->hw->version > 1 ? 2 : 1;
1656
1657 if (!reset)
1658 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1659- dev->ver, true))
1660+ scale, true))
1661 return -ENOMEM;
1662
1663+ if (dev->hw->version == 3) {
1664+ struct mtk_wdma_desc *desc = wdma->desc;
1665+ int i;
1666+
1667+ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
1668+ desc->buf0 = 0;
1669+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
1670+ desc->buf1 = 0;
1671+ desc->info = MTK_WDMA_TXD0_DESC_INFO_DMA_DONE;
1672+ desc++;
1673+ desc->buf0 = 0;
1674+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
1675+ desc->buf1 = 0;
1676+ desc->info = MTK_WDMA_TXD1_DESC_INFO_DMA_DONE;
1677+ desc++;
1678+ }
1679+ }
1680+
1681 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
1682 wdma->desc_phys);
1683 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
developerb74821e2023-09-08 14:19:59 +08001684@@ -1312,7 +1754,7 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001685 MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
1686 if (reset)
1687 mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE,
1688- dev->ver, true);
1689+ scale, true);
1690 if (idx == 0) {
1691 wed_w32(dev, MTK_WED_WDMA_RING_TX
1692 + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
developerb74821e2023-09-08 14:19:59 +08001693@@ -1395,7 +1837,7 @@ mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
developer23f9f0f2023-06-15 13:06:25 +08001694 {
1695 struct mtk_wed_wo *wo = dev->hw->wed_wo;
1696
1697- if (dev->ver == MTK_WED_V1)
1698+ if (!mtk_wed_get_rx_capa(dev))
1699 return 0;
1700
1701 return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
developerb74821e2023-09-08 14:19:59 +08001702@@ -1420,24 +1862,106 @@ mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
developer23f9f0f2023-06-15 13:06:25 +08001703 }
1704 }
1705
1706+static void
1707+mtk_wed_start_hwrro(struct mtk_wed_device *dev, u32 irq_mask)
1708+{
1709+ int idx, ret;
1710+
1711+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
1712+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
1713+
1714+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hwrro)
1715+ return;
1716+
1717+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
1718+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR);
1719+
1720+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
1721+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
1722+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
1723+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
1724+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
1725+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
1726+ dev->wlan.rro_rx_tbit[0]) |
1727+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
1728+ dev->wlan.rro_rx_tbit[1]));
1729+
1730+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
1731+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
1732+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
1733+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
1734+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
1735+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
1736+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
1737+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
1738+ dev->wlan.rx_pg_tbit[0]) |
1739+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
1740+ dev->wlan.rx_pg_tbit[1])|
1741+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
1742+ dev->wlan.rx_pg_tbit[2]));
1743+
1744+ /*
1745+ * RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
1746+ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
1747+ */
1748+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN);
1749+
1750+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++) {
1751+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
1752+
1753+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1754+ continue;
1755+
1756+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
1757+ if (!ret)
1758+ dev_err(dev->hw->dev, "mtk_wed%d: rx_rro_ring(%d) init failed!\n",
1759+ dev->hw->index, idx);
1760+ }
1761+
1762+ for (idx = 0; idx < MTK_WED_RX_PAGE_QUEUES; idx++){
1763+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
1764+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1765+ continue;
1766+
1767+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
1768+ if (!ret)
1769+ dev_err(dev->hw->dev, "mtk_wed%d: rx_page_ring(%d) init failed!\n",
1770+ dev->hw->index, idx);
1771+ }
1772+}
1773+
1774 static void
1775 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1776 {
1777 int i, ret;
1778
1779- if (dev->ver > MTK_WED_V1)
1780- ret = mtk_wed_rx_bm_alloc(dev);
1781+ if (mtk_wed_get_rx_capa(dev)) {
1782+ ret = mtk_wed_rx_buffer_alloc(dev);
1783+ if (ret)
1784+ return;
1785+
1786+ if (dev->wlan.hwrro)
1787+ mtk_wed_rx_page_buffer_alloc(dev);
1788+ }
1789
1790 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1791 if (!dev->tx_wdma[i].desc)
developerb74821e2023-09-08 14:19:59 +08001792 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
1793
1794+ for (i = 0; i < ARRAY_SIZE(dev->rx_page_ring); i++) {
1795+ u32 count = MTK_WED_RRO_MSDU_PG_CTRL0(i) +
1796+ MTK_WED_RING_OFS_COUNT;
1797+
1798+ if (!wed_r32(dev, count))
1799+ wed_w32(dev, count, 1);
1800+ }
1801+
1802 mtk_wed_hw_init(dev);
1803
developer23f9f0f2023-06-15 13:06:25 +08001804 mtk_wed_set_int(dev, irq_mask);
1805 mtk_wed_set_ext_int(dev, true);
1806
1807- if (dev->ver == MTK_WED_V1) {
1808+ if (dev->hw->version == 1) {
1809 u32 val;
1810
1811 val = dev->wlan.wpdma_phys |
developerb74821e2023-09-08 14:19:59 +08001812@@ -1448,33 +1972,52 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer23f9f0f2023-06-15 13:06:25 +08001813 val |= BIT(1);
1814 val |= BIT(0);
1815 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
1816- } else {
1817+ } else if (mtk_wed_get_rx_capa(dev)) {
1818 /* driver set mid ready and only once */
1819 wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1820 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1821 wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1822 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1823+ if (dev->hw->version == 3)
1824+ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
1825+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1826
1827 wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1828 wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1829+ if (dev->hw->version == 3)
1830+ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
1831
1832 ret = mtk_wed_rro_cfg(dev);
1833 if (ret)
1834 return;
1835 }
1836- mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1837+
1838+ if (dev->hw->version == 2)
1839+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1840+ else if (dev->hw->version == 3)
1841+ mtk_wed_pao_init(dev);
1842
1843 mtk_wed_dma_enable(dev);
1844 dev->running = true;
1845 }
1846
1847+static int
1848+mtk_wed_get_pci_base(struct mtk_wed_device *dev)
1849+{
1850+ if (dev->hw->index == 0)
1851+ return MTK_WED_PCIE_BASE0;
1852+ else if (dev->hw->index == 1)
1853+ return MTK_WED_PCIE_BASE1;
1854+ else
1855+ return MTK_WED_PCIE_BASE2;
1856+}
1857+
1858 static int
1859 mtk_wed_attach(struct mtk_wed_device *dev)
1860 __releases(RCU)
1861 {
1862 struct mtk_wed_hw *hw;
1863 struct device *device;
1864- u16 ver;
1865 int ret = 0;
1866
1867 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
developerb74821e2023-09-08 14:19:59 +08001868@@ -1494,34 +2037,30 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001869 goto out;
1870 }
1871
1872- device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
1873- ? &dev->wlan.pci_dev->dev
1874- : &dev->wlan.platform_dev->dev;
1875+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE ?
1876+ &dev->wlan.pci_dev->dev
1877+ : &dev->wlan.platform_dev->dev;
1878 dev_info(device, "attaching wed device %d version %d\n",
1879- hw->index, hw->ver);
1880+ hw->index, hw->version);
1881
1882 dev->hw = hw;
1883 dev->dev = hw->dev;
1884 dev->irq = hw->irq;
1885 dev->wdma_idx = hw->index;
1886+ dev->ver = hw->version;
1887+
1888+ if (dev->hw->version == 3)
1889+ dev->hw->pci_base = mtk_wed_get_pci_base(dev);
1890
1891 if (hw->eth->dma_dev == hw->eth->dev &&
1892 of_dma_is_coherent(hw->eth->dev->of_node))
1893 mtk_eth_set_dma_device(hw->eth, hw->dev);
1894
1895- dev->ver = FIELD_GET(MTK_WED_REV_ID_MAJOR,
1896- wed_r32(dev, MTK_WED_REV_ID));
1897- if (dev->ver > MTK_WED_V1)
1898- ver = FIELD_GET(MTK_WED_REV_ID_MINOR,
1899- wed_r32(dev, MTK_WED_REV_ID));
1900-
1901- dev->rev_id = ((dev->ver << 28) | ver << 16);
1902-
1903- ret = mtk_wed_buffer_alloc(dev);
1904+ ret = mtk_wed_tx_buffer_alloc(dev);
1905 if (ret)
1906 goto error;
1907
1908- if (dev->ver > MTK_WED_V1) {
1909+ if (mtk_wed_get_rx_capa(dev)) {
1910 ret = mtk_wed_rro_alloc(dev);
1911 if (ret)
1912 goto error;
developerb74821e2023-09-08 14:19:59 +08001913@@ -1533,15 +2072,20 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001914 init_completion(&dev->wlan_reset_done);
1915 atomic_set(&dev->fe_reset, 0);
1916
1917- if (dev->ver == MTK_WED_V1)
1918+ if (dev->hw->version != 1)
1919+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
1920+ else
1921 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1922 BIT(hw->index), 0);
1923- else
1924+
1925+ if (mtk_wed_get_rx_capa(dev))
1926 ret = mtk_wed_wo_init(hw);
1927
1928 error:
1929- if (ret)
1930+ if (ret) {
1931+ pr_info("%s: detach wed\n", __func__);
1932 mtk_wed_detach(dev);
1933+ }
1934 out:
1935 mutex_unlock(&hw_lock);
1936
developerb74821e2023-09-08 14:19:59 +08001937@@ -1576,8 +2120,26 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx,
developer23f9f0f2023-06-15 13:06:25 +08001938 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
1939 return -ENOMEM;
1940
1941+ if (dev->hw->version == 3 && idx == 1) {
1942+ /* reset prefetch index */
1943+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
1944+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
1945+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
1946+
1947+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
1948+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
1949+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
1950+
1951+ /* reset prefetch FIFO */
1952+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
1953+ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
1954+ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
1955+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
1956+ }
1957+
1958 ring->reg_base = MTK_WED_RING_TX(idx);
1959 ring->wpdma = regs;
1960+ ring->flags |= MTK_WED_RING_CONFIGURED;
1961
1962 /* WED -> WPDMA */
1963 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
developerb74821e2023-09-08 14:19:59 +08001964@@ -1599,7 +2161,7 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer23f9f0f2023-06-15 13:06:25 +08001965 struct mtk_wed_ring *ring = &dev->txfree_ring;
1966 int i, idx = 1;
1967
1968- if(dev->ver > MTK_WED_V1)
1969+ if(dev->hw->version > 1)
1970 idx = 0;
1971
1972 /*
developerb74821e2023-09-08 14:19:59 +08001973@@ -1652,6 +2214,129 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001974 return 0;
1975 }
1976
1977+static int
1978+mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1979+{
1980+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
1981+
1982+ ring->wpdma = regs;
1983+
1984+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
1985+ readl(regs));
1986+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
1987+ readl(regs + MTK_WED_RING_OFS_COUNT));
1988+
1989+ ring->flags |= MTK_WED_RING_CONFIGURED;
1990+
1991+ return 0;
1992+}
1993+
1994+static int
1995+mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1996+{
1997+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
1998+
1999+ ring->wpdma = regs;
2000+
2001+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
2002+ readl(regs));
2003+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
2004+ readl(regs + MTK_WED_RING_OFS_COUNT));
2005+
2006+ ring->flags |= MTK_WED_RING_CONFIGURED;
2007+
2008+ return 0;
2009+}
2010+
2011+static int
2012+mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2013+{
2014+ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
2015+ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
2016+ int i = 0, cnt = 0;
2017+
2018+ ring->wpdma = regs;
2019+
2020+ if (readl(regs) & 0xf)
2021+ pr_info("%s(): address is not 16-byte alignment\n", __func__);
2022+
2023+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
2024+ readl(regs) & 0xfffffff0);
2025+
2026+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
2027+ readl(regs + MTK_WED_RING_OFS_COUNT));
2028+
2029+ /* ack sn cr */
2030+ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
2031+ dev->wlan.ind_cmd.ack_sn_addr);
2032+ wed_w32(dev, MTK_WED_RRO_CFG1,
2033+ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
2034+ dev->wlan.ind_cmd.win_size) |
2035+ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
2036+ dev->wlan.ind_cmd.particular_sid));
2037+
2038+ /* particular session addr element */
2039+ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, dev->wlan.ind_cmd.particular_se_phys);
2040+
2041+ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
2042+ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
2043+ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
2044+ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
2045+ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
2046+
2047+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
2048+ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) &&
2049+ cnt < 100) {
2050+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
2051+ cnt++;
2052+ }
2053+ if (cnt >= 100) {
2054+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
2055+ dev->hw->index);
2056+ }
2057+ /*if (mtk_wed_poll_busy(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
2058+ MTK_WED_ADDR_ELEM_TBL_WR_RDY)) {
2059+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
2060+ dev->hw->index);
2061+ return -1;
2062+ }*/
2063+ }
2064+
2065+ /* pn check init */
2066+ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
2067+ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
2068+ MTK_WED_PN_CHECK_IS_FIRST);
2069+
2070+ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
2071+ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
2072+
2073+ cnt = 0;
2074+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
2075+ while (!(val & MTK_WED_PN_CHECK_WR_RDY) &&
2076+ cnt < 100) {
2077+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
2078+ cnt++;
2079+ }
2080+ if (cnt >= 100) {
2081+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
2082+ dev->hw->index, i);
2083+ }
2084+ /*if (mtk_wed_poll_busy(dev, MTK_WED_PN_CHECK_CFG,
2085+ MTK_WED_PN_CHECK_WR_RDY)) {
2086+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
2087+ dev->hw->index, i);
2088+ //return -1;
2089+ }*/
2090+ }
2091+
2092+ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
2093+
2094+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
2095+
2096+ return 0;
2097+}
2098+
2099+
2100 static u32
2101 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2102 {
developerb74821e2023-09-08 14:19:59 +08002103@@ -1659,9 +2344,13 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
developer18d0d712023-08-23 11:50:09 +08002104
developer23f9f0f2023-06-15 13:06:25 +08002105 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2106 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
developer18d0d712023-08-23 11:50:09 +08002107- val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2108- if (!dev->hw->num_flows)
2109- val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2110+ if (dev->hw->version == 3) {
developer23f9f0f2023-06-15 13:06:25 +08002111+ val &= MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
developer18d0d712023-08-23 11:50:09 +08002112+ } else {
2113+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2114+ if (!dev->hw->num_flows)
2115+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2116+ }
2117 if (val && net_ratelimit())
2118 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
developer23f9f0f2023-06-15 13:06:25 +08002119
developerb74821e2023-09-08 14:19:59 +08002120@@ -1754,6 +2443,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002121 .tx_ring_setup = mtk_wed_tx_ring_setup,
2122 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2123 .rx_ring_setup = mtk_wed_rx_ring_setup,
2124+ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
2125+ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
2126+ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
2127 .msg_update = mtk_wed_send_msg,
2128 .start = mtk_wed_start,
2129 .stop = mtk_wed_stop,
developerb74821e2023-09-08 14:19:59 +08002130@@ -1765,6 +2457,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002131 .detach = mtk_wed_detach,
2132 .setup_tc = mtk_wed_eth_setup_tc,
2133 .ppe_check = mtk_wed_ppe_check,
2134+ .start_hwrro = mtk_wed_start_hwrro,
2135 };
2136 struct device_node *eth_np = eth->dev->of_node;
2137 struct platform_device *pdev;
developerb74821e2023-09-08 14:19:59 +08002138@@ -1804,9 +2497,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002139 hw->wdma_phy = wdma_phy;
2140 hw->index = index;
2141 hw->irq = irq;
2142- hw->ver = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
2143+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) ?
2144+ 3 : MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
2145
2146- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2147+ if (hw->version == 1) {
2148 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2149 "mediatek,pcie-mirror");
2150 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
developerb74821e2023-09-08 14:19:59 +08002151@@ -1821,7 +2515,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002152 regmap_write(hw->mirror, 0, 0);
2153 regmap_write(hw->mirror, 4, 0);
2154 }
2155- hw->ver = MTK_WED_V1;
2156 }
2157
2158 mtk_wed_hw_add_debugfs(hw);
2159diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2160index 490873c..fcf7bd0 100644
2161--- a/drivers/net/ethernet/mediatek/mtk_wed.h
2162+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2163@@ -10,10 +10,13 @@
2164 #include <linux/netdevice.h>
2165 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
2166
2167-#define MTK_WED_PKT_SIZE 1900
2168+#define MTK_WED_PKT_SIZE 1920//1900
2169 #define MTK_WED_BUF_SIZE 2048
2170+#define MTK_WED_PAGE_BUF_SIZE 128
2171 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
2172+#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
2173 #define MTK_WED_RX_RING_SIZE 1536
2174+#define MTK_WED_RX_PG_BM_CNT 8192
2175
2176 #define MTK_WED_TX_RING_SIZE 2048
2177 #define MTK_WED_WDMA_RING_SIZE 512
2178@@ -27,6 +30,9 @@
2179 #define MTK_WED_RRO_QUE_CNT 8192
2180 #define MTK_WED_MIOD_ENTRY_CNT 128
2181
2182+#define MTK_WED_TX_BM_DMA_SIZE 65536
2183+#define MTK_WED_TX_BM_PKT_CNT 32768
2184+
2185 #define MODULE_ID_WO 1
2186
2187 struct mtk_eth;
2188@@ -43,6 +49,8 @@ struct mtk_wed_hw {
2189 struct dentry *debugfs_dir;
2190 struct mtk_wed_device *wed_dev;
2191 struct mtk_wed_wo *wed_wo;
2192+ struct mtk_wed_pao *wed_pao;
2193+ u32 pci_base;
2194 u32 debugfs_reg;
2195 u32 num_flows;
2196 u32 wdma_phy;
2197@@ -50,7 +58,8 @@ struct mtk_wed_hw {
2198 int ring_num;
2199 int irq;
2200 int index;
2201- u32 ver;
2202+ int token_id;
2203+ u32 version;
2204 };
2205
2206 struct mtk_wdma_info {
2207@@ -58,6 +67,18 @@ struct mtk_wdma_info {
2208 u8 queue;
2209 u16 wcid;
2210 u8 bss;
2211+ u32 usr_info;
2212+ u8 tid;
2213+ u8 is_fixedrate;
2214+ u8 is_prior;
2215+ u8 is_sp;
2216+ u8 hf;
2217+ u8 amsdu_en;
2218+};
2219+
2220+struct mtk_wed_pao {
2221+ char *hif_txd[32];
2222+ dma_addr_t hif_txd_phys[32];
2223 };
2224
2225 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
2226diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2227index 4a9e684..51e3d7c 100644
2228--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2229+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2230@@ -11,9 +11,11 @@ struct reg_dump {
2231 u16 offset;
2232 u8 type;
2233 u8 base;
2234+ u32 mask;
2235 };
2236
2237 enum {
2238+ DUMP_TYPE_END,
2239 DUMP_TYPE_STRING,
2240 DUMP_TYPE_WED,
2241 DUMP_TYPE_WDMA,
2242@@ -23,8 +25,11 @@ enum {
2243 DUMP_TYPE_WED_RRO,
2244 };
2245
2246+#define DUMP_END() { .type = DUMP_TYPE_END }
2247 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2248 #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2249+#define DUMP_REG_MASK(_reg, _mask) { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
2250+
2251 #define DUMP_RING(_prefix, _base, ...) \
2252 { _prefix " BASE", _base, __VA_ARGS__ }, \
2253 { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2254@@ -32,6 +37,7 @@ enum {
2255 { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2256
2257 #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2258+#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
2259 #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2260
2261 #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2262@@ -52,36 +58,49 @@ print_reg_val(struct seq_file *s, const char *name, u32 val)
2263
2264 static void
2265 dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2266- const struct reg_dump *regs, int n_regs)
2267+ const struct reg_dump **regs)
2268 {
2269- const struct reg_dump *cur;
2270+ const struct reg_dump **cur_o = regs, *cur;
2271+ bool newline = false;
2272 u32 val;
2273
2274- for (cur = regs; cur < &regs[n_regs]; cur++) {
2275- switch (cur->type) {
2276- case DUMP_TYPE_STRING:
2277- seq_printf(s, "%s======== %s:\n",
2278- cur > regs ? "\n" : "",
2279- cur->name);
2280- continue;
2281- case DUMP_TYPE_WED:
2282- case DUMP_TYPE_WED_RRO:
2283- val = wed_r32(dev, cur->offset);
2284- break;
2285- case DUMP_TYPE_WDMA:
2286- val = wdma_r32(dev, cur->offset);
2287- break;
2288- case DUMP_TYPE_WPDMA_TX:
2289- val = wpdma_tx_r32(dev, cur->base, cur->offset);
2290- break;
2291- case DUMP_TYPE_WPDMA_TXFREE:
2292- val = wpdma_txfree_r32(dev, cur->offset);
2293- break;
2294- case DUMP_TYPE_WPDMA_RX:
2295- val = wpdma_rx_r32(dev, cur->base, cur->offset);
2296- break;
2297+ while (*cur_o) {
2298+ cur = *cur_o;
2299+
2300+ while (cur->type != DUMP_TYPE_END) {
2301+ switch (cur->type) {
2302+ case DUMP_TYPE_STRING:
2303+ seq_printf(s, "%s======== %s:\n",
2304+ newline ? "\n" : "",
2305+ cur->name);
2306+ newline = true;
2307+ cur++;
2308+ continue;
2309+ case DUMP_TYPE_WED:
2310+ case DUMP_TYPE_WED_RRO:
2311+ val = wed_r32(dev, cur->offset);
2312+ break;
2313+ case DUMP_TYPE_WDMA:
2314+ val = wdma_r32(dev, cur->offset);
2315+ break;
2316+ case DUMP_TYPE_WPDMA_TX:
2317+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2318+ break;
2319+ case DUMP_TYPE_WPDMA_TXFREE:
2320+ val = wpdma_txfree_r32(dev, cur->offset);
2321+ break;
2322+ case DUMP_TYPE_WPDMA_RX:
2323+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
2324+ break;
2325+ }
2326+
2327+ if (cur->mask)
2328+ val = (cur->mask & val) >> (ffs(cur->mask) - 1);
2329+
2330+ print_reg_val(s, cur->name, val);
2331+ cur++;
2332 }
2333- print_reg_val(s, cur->name, val);
2334+ cur_o++;
2335 }
2336 }
2337
2338@@ -89,7 +108,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2339 static int
2340 wed_txinfo_show(struct seq_file *s, void *data)
2341 {
2342- static const struct reg_dump regs[] = {
2343+ static const struct reg_dump regs_common[] = {
2344 DUMP_STR("WED TX"),
2345 DUMP_WED(WED_TX_MIB(0)),
2346 DUMP_WED_RING(WED_RING_TX(0)),
2347@@ -128,16 +147,32 @@ wed_txinfo_show(struct seq_file *s, void *data)
2348 DUMP_WDMA_RING(WDMA_RING_RX(0)),
2349 DUMP_WDMA_RING(WDMA_RING_RX(1)),
2350
2351- DUMP_STR("TX FREE"),
2352+ DUMP_STR("WED TX FREE"),
2353 DUMP_WED(WED_RX_MIB(0)),
2354+ DUMP_WED_RING(WED_RING_RX(0)),
2355+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
2356+
2357+ DUMP_WED(WED_RX_MIB(1)),
2358+ DUMP_WED_RING(WED_RING_RX(1)),
2359+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
2360+ DUMP_STR("WED_WPDMA TX FREE"),
2361+ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
2362+ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
2363+ DUMP_END(),
2364+ };
2365+
2366+ static const struct reg_dump *regs[] = {
2367+ &regs_common[0],
2368+ NULL,
2369 };
2370+
2371 struct mtk_wed_hw *hw = s->private;
2372 struct mtk_wed_device *dev = hw->wed_dev;
2373
2374 if (!dev)
2375 return 0;
2376
2377- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2378+ dump_wed_regs(s, dev, regs);
2379
2380 return 0;
2381 }
2382@@ -146,7 +181,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2383 static int
2384 wed_rxinfo_show(struct seq_file *s, void *data)
2385 {
2386- static const struct reg_dump regs[] = {
2387+ static const struct reg_dump regs_common[] = {
2388 DUMP_STR("WPDMA RX"),
2389 DUMP_WPDMA_RX_RING(0),
2390 DUMP_WPDMA_RX_RING(1),
2391@@ -164,7 +199,7 @@ wed_rxinfo_show(struct seq_file *s, void *data)
2392 DUMP_WED_RING(WED_RING_RX_DATA(0)),
2393 DUMP_WED_RING(WED_RING_RX_DATA(1)),
2394
2395- DUMP_STR("WED RRO"),
2396+ DUMP_STR("WED WO RRO"),
2397 DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
2398 DUMP_WED(WED_RROQM_MID_MIB),
2399 DUMP_WED(WED_RROQM_MOD_MIB),
2400@@ -175,16 +210,6 @@ wed_rxinfo_show(struct seq_file *s, void *data)
2401 DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
2402 DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
2403
2404- DUMP_STR("WED Route QM"),
2405- DUMP_WED(WED_RTQM_R2H_MIB(0)),
2406- DUMP_WED(WED_RTQM_R2Q_MIB(0)),
2407- DUMP_WED(WED_RTQM_Q2H_MIB(0)),
2408- DUMP_WED(WED_RTQM_R2H_MIB(1)),
2409- DUMP_WED(WED_RTQM_R2Q_MIB(1)),
2410- DUMP_WED(WED_RTQM_Q2H_MIB(1)),
2411- DUMP_WED(WED_RTQM_Q2N_MIB),
2412- DUMP_WED(WED_RTQM_Q2B_MIB),
2413- DUMP_WED(WED_RTQM_PFDBK_MIB),
2414
2415 DUMP_STR("WED WDMA TX"),
2416 DUMP_WED(WED_WDMA_TX_MIB),
2417@@ -205,15 +230,99 @@ wed_rxinfo_show(struct seq_file *s, void *data)
2418 DUMP_WED(WED_RX_BM_INTF2),
2419 DUMP_WED(WED_RX_BM_INTF),
2420 DUMP_WED(WED_RX_BM_ERR_STS),
2421+ DUMP_END()
2422+ };
2423+
2424+ static const struct reg_dump regs_v2[] = {
2425+ DUMP_STR("WED Route QM"),
2426+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
2427+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
2428+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
2429+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
2430+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
2431+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
2432+ DUMP_WED(WED_RTQM_Q2N_MIB),
2433+ DUMP_WED(WED_RTQM_Q2B_MIB),
2434+ DUMP_WED(WED_RTQM_PFDBK_MIB),
2435+
2436+ DUMP_END()
2437+ };
2438+
2439+ static const struct reg_dump regs_v3[] = {
2440+ DUMP_STR("WED RX RRO DATA"),
2441+ DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
2442+ DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
2443+
2444+ DUMP_STR("WED RX MSDU PAGE"),
2445+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
2446+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
2447+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
2448+
2449+ DUMP_STR("WED RX IND CMD"),
2450+ DUMP_WED(WED_IND_CMD_RX_CTRL1),
2451+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
2452+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
2453+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
2454+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
2455+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
2456+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
2457+ WED_IND_CMD_PREFETCH_FREE_CNT),
2458+ DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
2459+
2460+ DUMP_STR("WED ADDR ELEM"),
2461+ DUMP_WED(WED_ADDR_ELEM_CFG0),
2462+ DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
2463+ WED_ADDR_ELEM_PREFETCH_FREE_CNT),
2464+
2465+ DUMP_STR("WED Route QM"),
2466+ DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
2467+ DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
2468+ DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
2469+ DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
2470+ DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
2471+ DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
2472+
2473+ DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
2474+ DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
2475+ DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
2476+ DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
2477+ DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
2478+ DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
2479+
2480+ DUMP_END()
2481+ };
2482+
2483+ static const struct reg_dump *regs_new_v2[] = {
2484+ &regs_common[0],
2485+ &regs_v2[0],
2486+ NULL,
2487+ };
2488+
2489+ static const struct reg_dump *regs_new_v3[] = {
2490+ &regs_common[0],
2491+ &regs_v3[0],
2492+ NULL,
2493 };
2494
2495 struct mtk_wed_hw *hw = s->private;
2496 struct mtk_wed_device *dev = hw->wed_dev;
2497+ const struct reg_dump **regs;
2498
2499 if (!dev)
2500 return 0;
2501
2502- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2503+ switch(dev->hw->version) {
2504+ case 2:
2505+ regs = regs_new_v2;
2506+ break;
2507+ case 3:
2508+ regs = regs_new_v3;
2509+ break;
2510+ default:
2511+ return 0;
2512+ }
2513+
2514+ dump_wed_regs(s, dev, regs);
2515
2516 return 0;
2517 }
2518@@ -248,6 +357,383 @@ mtk_wed_reg_get(void *data, u64 *val)
2519 DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2520 "0x%08llx\n");
2521
2522+static int
2523+wed_token_txd_show(struct seq_file *s, void *data)
2524+{
2525+ struct mtk_wed_hw *hw = s->private;
2526+ struct mtk_wed_device *dev = hw->wed_dev;
2527+ struct dma_page_info *page_list = dev->tx_buf_ring.pages;
2528+ int token = dev->wlan.token_start;
2529+ u32 val = hw->token_id, size = 1;
2530+ int page_idx = (val - token) / 2;
2531+ int i;
2532+
2533+ if (val < token) {
2534+ size = val;
2535+ page_idx = 0;
2536+ }
2537+
2538+ for (i = 0; i < size; i += MTK_WED_BUF_PER_PAGE) {
2539+ void *page = page_list[page_idx++].addr;
2540+ void *buf;
2541+ int j;
2542+
2543+ if (!page)
2544+ break;
2545+
2546+ buf = page_to_virt(page);
2547+
2548+ for (j = 0; j < MTK_WED_BUF_PER_PAGE; j++) {
2549+ printk("[TXD]:token id = %d\n", token + 2 * (page_idx - 1) + j);
2550+ print_hex_dump(KERN_ERR , "", DUMP_PREFIX_OFFSET, 16, 1, (u8 *)buf, 128, false);
2551+ seq_printf(s, "\n");
2552+
2553+ buf += MTK_WED_BUF_SIZE;
2554+ }
2555+ }
2556+
2557+ return 0;
2558+}
2559+
2560+DEFINE_SHOW_ATTRIBUTE(wed_token_txd);
2561+
2562+static int
2563+wed_pao_show(struct seq_file *s, void *data)
2564+{
2565+ static const struct reg_dump regs_common[] = {
2566+ DUMP_STR("PAO AMDSU INFO"),
2567+ DUMP_WED(WED_PAO_MON_AMSDU_FIFO_DMAD),
2568+
2569+ DUMP_STR("PAO AMDSU ENG0 INFO"),
2570+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(0)),
2571+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(0)),
2572+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(0)),
2573+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(0)),
2574+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(0)),
2575+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
2576+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2577+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
2578+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2579+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
2580+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2581+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
2582+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2583+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
2584+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2585+
2586+ DUMP_STR("PAO AMDSU ENG1 INFO"),
2587+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(1)),
2588+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(1)),
2589+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(1)),
2590+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(1)),
2591+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(1)),
2592+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
2593+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2594+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
2595+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2596+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(1),
2597+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2598+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2599+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2600+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2601+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2602+
2603+ DUMP_STR("PAO AMDSU ENG2 INFO"),
2604+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(2)),
2605+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(2)),
2606+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(2)),
2607+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(2)),
2608+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(2)),
2609+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
2610+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2611+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
2612+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2613+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2614+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2615+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2616+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2617+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2618+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2619+
2620+ DUMP_STR("PAO AMDSU ENG3 INFO"),
2621+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(3)),
2622+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(3)),
2623+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(3)),
2624+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(3)),
2625+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(3)),
2626+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
2627+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2628+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
2629+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2630+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
2631+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2632+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
2633+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2634+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
2635+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2636+
2637+ DUMP_STR("PAO AMDSU ENG4 INFO"),
2638+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(4)),
2639+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(4)),
2640+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(4)),
2641+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(4)),
2642+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(4)),
2643+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
2644+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2645+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
2646+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2647+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2648+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2649+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2650+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2651+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2652+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2653+
2654+ DUMP_STR("PAO AMDSU ENG5 INFO"),
2655+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(5)),
2656+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(5)),
2657+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(5)),
2658+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(5)),
2659+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(5)),
2660+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(5),
2661+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2662+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(5),
2663+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2664+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
2665+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2666+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
2667+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2668+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
2669+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2670+
2671+ DUMP_STR("PAO AMDSU ENG6 INFO"),
2672+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(6)),
2673+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(6)),
2674+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(6)),
2675+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(6)),
2676+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(6)),
2677+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(6),
2678+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2679+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(6),
2680+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2681+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
2682+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2683+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
2684+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2685+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
2686+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2687+
2688+ DUMP_STR("PAO AMDSU ENG7 INFO"),
2689+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(7)),
2690+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(7)),
2691+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(7)),
2692+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(7)),
2693+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(7)),
2694+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(7),
2695+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2696+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(7),
2697+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2698+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(7),
2699+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2700+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(7),
2701+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2702+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2703+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2704+
2705+ DUMP_STR("PAO AMDSU ENG8 INFO"),
2706+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(8)),
2707+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(8)),
2708+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(8)),
2709+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(8)),
2710+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(8)),
2711+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(8),
2712+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2713+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(8),
2714+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2715+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
2716+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2717+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
2718+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2719+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
2720+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2721+
2722+ DUMP_STR("PAO QMEM INFO"),
2723+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(0), WED_PAO_QMEM_FQ_CNT),
2724+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(0), WED_PAO_QMEM_SP_QCNT),
2725+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(1), WED_PAO_QMEM_TID0_QCNT),
2726+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(1), WED_PAO_QMEM_TID1_QCNT),
2727+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(2), WED_PAO_QMEM_TID2_QCNT),
2728+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(2), WED_PAO_QMEM_TID3_QCNT),
2729+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(3), WED_PAO_QMEM_TID4_QCNT),
2730+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(3), WED_PAO_QMEM_TID5_QCNT),
2731+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(4), WED_PAO_QMEM_TID6_QCNT),
2732+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(4), WED_PAO_QMEM_TID7_QCNT),
2733+
2734+
2735+ DUMP_STR("PAO QMEM HEAD INFO"),
2736+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(0), WED_PAO_QMEM_FQ_HEAD),
2737+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(0), WED_PAO_QMEM_SP_QHEAD),
2738+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(1), WED_PAO_QMEM_TID0_QHEAD),
2739+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(1), WED_PAO_QMEM_TID1_QHEAD),
2740+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(2), WED_PAO_QMEM_TID2_QHEAD),
2741+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(2), WED_PAO_QMEM_TID3_QHEAD),
2742+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(3), WED_PAO_QMEM_TID4_QHEAD),
2743+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(3), WED_PAO_QMEM_TID5_QHEAD),
2744+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(4), WED_PAO_QMEM_TID6_QHEAD),
2745+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(4), WED_PAO_QMEM_TID7_QHEAD),
2746+
2747+ DUMP_STR("PAO QMEM TAIL INFO"),
2748+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(5), WED_PAO_QMEM_FQ_TAIL),
2749+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(5), WED_PAO_QMEM_SP_QTAIL),
2750+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(6), WED_PAO_QMEM_TID0_QTAIL),
2751+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(6), WED_PAO_QMEM_TID1_QTAIL),
2752+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(7), WED_PAO_QMEM_TID2_QTAIL),
2753+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(7), WED_PAO_QMEM_TID3_QTAIL),
2754+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(8), WED_PAO_QMEM_TID4_QTAIL),
2755+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(8), WED_PAO_QMEM_TID5_QTAIL),
2756+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(9), WED_PAO_QMEM_TID6_QTAIL),
2757+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(9), WED_PAO_QMEM_TID7_QTAIL),
2758+
2759+ DUMP_STR("PAO HIFTXD MSDU INFO"),
2760+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(1)),
2761+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(2)),
2762+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(3)),
2763+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(4)),
2764+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(5)),
2765+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(6)),
2766+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(7)),
2767+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(8)),
2768+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(9)),
2769+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(10)),
2770+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(11)),
2771+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(12)),
2772+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(13)),
2773+ DUMP_END()
2774+ };
2775+
2776+ static const struct reg_dump *regs[] = {
2777+ &regs_common[0],
2778+ NULL,
2779+ };
2780+ struct mtk_wed_hw *hw = s->private;
2781+ struct mtk_wed_device *dev = hw->wed_dev;
2782+
2783+ if (!dev)
2784+ return 0;
2785+
2786+ dump_wed_regs(s, dev, regs);
2787+
2788+ return 0;
2789+}
2790+DEFINE_SHOW_ATTRIBUTE(wed_pao);
2791+
2792+static int
2793+wed_rtqm_show(struct seq_file *s, void *data)
2794+{
2795+ static const struct reg_dump regs_common[] = {
2796+ DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
2797+ DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
2798+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
2799+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
2800+ DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
2801+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
2802+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
2803+ DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
2804+
2805+
2806+ DUMP_STR("WED Route QM IGRS1(Legacy)"),
2807+ DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
2808+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
2809+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
2810+ DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
2811+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
2812+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
2813+ DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
2814+
2815+ DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
2816+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
2817+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
2818+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
2819+ DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
2820+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
2821+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
2822+ DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
2823+
2824+ DUMP_STR("WED Route QM IGRS3(DEBUG)"),
2825+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
2826+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
2827+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
2828+ DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
2829+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
2830+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
2831+ DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
2832+
2833+ DUMP_END()
2834+ };
2835+
2836+ static const struct reg_dump *regs[] = {
2837+ &regs_common[0],
2838+ NULL,
2839+ };
2840+ struct mtk_wed_hw *hw = s->private;
2841+ struct mtk_wed_device *dev = hw->wed_dev;
2842+
2843+ if (!dev)
2844+ return 0;
2845+
2846+ dump_wed_regs(s, dev, regs);
2847+
2848+ return 0;
2849+}
2850+DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
2851+
2852+
2853+static int
2854+wed_rro_show(struct seq_file *s, void *data)
2855+{
2856+ static const struct reg_dump regs_common[] = {
2857+ DUMP_STR("RRO/IND CMD CNT"),
2858+ DUMP_WED(WED_RX_IND_CMD_CNT(1)),
2859+ DUMP_WED(WED_RX_IND_CMD_CNT(2)),
2860+ DUMP_WED(WED_RX_IND_CMD_CNT(3)),
2861+ DUMP_WED(WED_RX_IND_CMD_CNT(4)),
2862+ DUMP_WED(WED_RX_IND_CMD_CNT(5)),
2863+ DUMP_WED(WED_RX_IND_CMD_CNT(6)),
2864+ DUMP_WED(WED_RX_IND_CMD_CNT(7)),
2865+ DUMP_WED(WED_RX_IND_CMD_CNT(8)),
2866+ DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
2867+ WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
2868+
2869+ DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
2870+ DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
2871+ WED_ADDR_ELEM_SIG_FAIL_CNT),
2872+ DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
2873+ DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
2874+ DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
2875+ DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
2876+ DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
2877+ DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
2878+ WED_PN_CHK_FAIL_CNT),
2879+
2880+ DUMP_END()
2881+ };
2882+
2883+ static const struct reg_dump *regs[] = {
2884+ &regs_common[0],
2885+ NULL,
2886+ };
2887+ struct mtk_wed_hw *hw = s->private;
2888+ struct mtk_wed_device *dev = hw->wed_dev;
2889+
2890+ if (!dev)
2891+ return 0;
2892+
2893+ dump_wed_regs(s, dev, regs);
2894+
2895+ return 0;
2896+}
2897+DEFINE_SHOW_ATTRIBUTE(wed_rro);
2898+
2899 void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2900 {
2901 struct dentry *dir;
2902@@ -261,8 +747,18 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2903 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2904 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2905 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2906- debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
2907- if (hw->ver != MTK_WED_V1) {
2908+ debugfs_create_u32("token_id", 0600, dir, &hw->token_id);
2909+ debugfs_create_file_unsafe("token_txd", 0600, dir, hw, &wed_token_txd_fops);
2910+
2911+ if (hw->version == 3)
2912+ debugfs_create_file_unsafe("pao", 0400, dir, hw, &wed_pao_fops);
2913+
2914+ if (hw->version != 1) {
2915+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
2916+ if (hw->version == 3) {
2917+ debugfs_create_file_unsafe("rtqm", 0400, dir, hw, &wed_rtqm_fops);
2918+ debugfs_create_file_unsafe("rro", 0400, dir, hw, &wed_rro_fops);
2919+ }
2920 wed_wo_mcu_debugfs(hw, dir);
2921 }
2922 }
2923diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
2924index 96e30a3..055594d 100644
2925--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
2926+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
2927@@ -242,7 +242,7 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
2928 u32 ofs = 0;
2929 u32 boot_cr, val;
2930
2931- mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 : MT7986_FIRMWARE_WO_1;
2932+ mcu = wo->hw->index ? MTK_FIRMWARE_WO_1 : MTK_FIRMWARE_WO_0;
2933
2934 ret = request_firmware(&fw, mcu, wo->hw->dev);
2935 if (ret)
2936@@ -289,8 +289,12 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
2937 }
2938
2939 /* write the start address */
2940- boot_cr = wo->hw->index ?
2941- WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
2942+ if (wo->hw->version == 3)
2943+ boot_cr = WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
2944+ else
2945+ boot_cr = wo->hw->index ?
2946+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
2947+
2948 wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
2949
2950 /* wo firmware reset */
2951@@ -298,8 +302,7 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
2952
2953 val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
2954
2955- val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
2956- WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
2957+ val |= WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
2958
2959 wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
2960
2961diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2962index 19e1199..c07bdb6 100644
2963--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2964+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2965@@ -16,8 +16,9 @@
2966 #define WARP_OK_STATUS (0)
2967 #define WARP_ALREADY_DONE_STATUS (1)
2968
2969-#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2970-#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2971+#define MTK_FIRMWARE_WO_0 "mediatek/mtk_wo_0.bin"
2972+#define MTK_FIRMWARE_WO_1 "mediatek/mtk_wo_1.bin"
2973+#define MTK_FIRMWARE_WO_2 "mediatek/mtk_wo_2.bin"
2974
2975 #define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2976 #define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2977diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
developer58aa0682023-09-18 14:02:26 +08002978index 403a36b..25be547 100644
developer23f9f0f2023-06-15 13:06:25 +08002979--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2980+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2981@@ -20,6 +20,9 @@
2982 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2983 #define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
2984
2985+#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29)
2986+#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31)
2987+
2988 struct mtk_wdma_desc {
2989 __le32 buf0;
2990 __le32 ctrl;
2991@@ -51,6 +54,7 @@ struct mtk_wdma_desc {
2992 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2993 #define MTK_WED_RESET_RX_RRO_QM BIT(20)
2994 #define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
2995+#define MTK_WED_RESET_TX_PAO BIT(22)
2996 #define MTK_WED_RESET_WED BIT(31)
2997
2998 #define MTK_WED_CTRL 0x00c
2999@@ -58,6 +62,9 @@ struct mtk_wdma_desc {
3000 #define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
3001 #define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
3002 #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
3003+#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5)
3004+#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6)
3005+#define MTK_WED_CTRL_WED_RX_PG_BM_BUSU BIT(7)
3006 #define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
3007 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
3008 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
3009@@ -68,9 +75,14 @@ struct mtk_wdma_desc {
3010 #define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
3011 #define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
3012 #define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
3013+#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20)
3014+#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21)
3015+#define MTK_WED_CTRL_TX_PAO_EN BIT(22)
3016+#define MTK_WED_CTRL_TX_PAO_BUSY BIT(23)
3017 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
3018 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
3019 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
3020+#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28)
3021
3022 #define MTK_WED_EXT_INT_STATUS 0x020
3023 #define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
3024@@ -78,12 +90,10 @@ struct mtk_wdma_desc {
3025 #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
3026 #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
3027 #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
3028-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
3029-#define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
3030-#define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
3031-#endif
3032-#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
3033-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
3034+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2 BIT(10)
3035+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2 BIT(11)
3036+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
3037+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
3038 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
3039 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
3040 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
3041@@ -100,17 +110,15 @@ struct mtk_wdma_desc {
3042 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
3043 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
3044 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
3045- MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
3046- MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
3047 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
3048 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
3049 MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
3050- MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR | \
3051- MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
3052+ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
3053
3054 #define MTK_WED_EXT_INT_MASK 0x028
3055 #define MTK_WED_EXT_INT_MASK1 0x02c
3056 #define MTK_WED_EXT_INT_MASK2 0x030
3057+#define MTK_WED_EXT_INT_MASK3 0x034
3058
3059 #define MTK_WED_STATUS 0x060
3060 #define MTK_WED_STATUS_TX GENMASK(15, 8)
3061@@ -118,9 +126,14 @@ struct mtk_wdma_desc {
3062 #define MTK_WED_TX_BM_CTRL 0x080
3063 #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
3064 #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
3065+#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26)
3066+#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27)
3067 #define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
3068
3069 #define MTK_WED_TX_BM_BASE 0x084
3070+#define MTK_WED_TX_BM_INIT_PTR 0x088
3071+#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0)
3072+#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16)
3073
3074 #define MTK_WED_TX_BM_BUF_LEN 0x08c
3075
3076@@ -134,22 +147,24 @@ struct mtk_wdma_desc {
3077 #if defined(CONFIG_MEDIATEK_NETSYS_V2)
3078 #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(8, 0)
3079 #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(24, 16)
3080-
3081-#define MTK_WED_TX_BM_TKID 0x0c8
3082-#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
3083-#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
3084 #else
3085 #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
3086 #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
3087+#endif
3088
3089-#define MTK_WED_TX_BM_TKID 0x088
3090+#define MTK_WED_TX_BM_TKID 0x0c8
3091 #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
3092 #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
3093-#endif
3094
3095 #define MTK_WED_TX_TKID_CTRL 0x0c0
3096+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
3097+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(7, 0)
3098+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(23, 16)
3099+#else
3100 #define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
3101 #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
3102+#endif
3103+
3104 #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
3105
3106 #define MTK_WED_TX_TKID_DYN_THR 0x0e0
3107@@ -220,12 +235,15 @@ struct mtk_wdma_desc {
3108 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
3109 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
3110 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
3111-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
3112+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12)
3113+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18)
3114 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
3115-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
3116+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20)
3117 #define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
3118 #define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
3119+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25)
3120 #define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
3121+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30)
3122
3123 /* CONFIG_MEDIATEK_NETSYS_V1 */
3124 #define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
3125@@ -288,9 +306,11 @@ struct mtk_wdma_desc {
3126 #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
3127
3128 #define MTK_WED_PCIE_INT_CTRL 0x57c
3129-#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
3130-#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
3131 #define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
3132+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
3133+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
3134+#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21)
3135+
3136 #define MTK_WED_WPDMA_CFG_BASE 0x580
3137 #define MTK_WED_WPDMA_CFG_INT_MASK 0x584
3138 #define MTK_WED_WPDMA_CFG_TX 0x588
3139@@ -319,20 +339,50 @@ struct mtk_wdma_desc {
3140 #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
3141
3142 #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
3143-#define MTK_WED_WPDMA_RX_RING 0x770
3144+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
3145+#define MTK_WED_WPDMA_RX_RING0 0x770
3146+#else
3147+#define MTK_WED_WPDMA_RX_RING0 0x7d0
3148+#endif
3149+#define MTK_WED_WPDMA_RX_RING1 0x7d8
3150
3151 #define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
3152 #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
3153 #define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
3154
3155+#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
3156+#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
3157+#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
3158+#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
3159+
3160+#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8
3161+#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15)
3162+
3163+#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc
3164+
3165+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0
3166+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0)
3167+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16)
3168+
3169 #define MTK_WED_WDMA_RING_TX 0x800
3170
3171 #define MTK_WED_WDMA_TX_MIB 0x810
3172
3173-
3174 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
3175 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
3176
3177+#define MTK_WED_WDMA_RX_PREF_CFG 0x950
3178+#define MTK_WED_WDMA_RX_PREF_EN BIT(0)
3179+#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
3180+#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
3181+#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
3182+#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
3183+#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
3184+
3185+#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
3186+#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
3187+#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16)
3188+
3189 #define MTK_WED_WDMA_GLO_CFG 0xa04
3190 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
3191 #define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
3192@@ -365,6 +415,7 @@ struct mtk_wdma_desc {
3193 #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
3194
3195 #define MTK_WED_WDMA_INT_CTRL 0xa2c
3196+#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0)
3197 #define MTK_WED_WDMA_INT_POLL_SRC_SEL GENMASK(17, 16)
3198
3199 #define MTK_WED_WDMA_CFG_BASE 0xaa0
3200@@ -426,6 +477,18 @@ struct mtk_wdma_desc {
3201 #define MTK_WDMA_INT_GRP1 0x250
3202 #define MTK_WDMA_INT_GRP2 0x254
3203
3204+#define MTK_WDMA_PREF_TX_CFG 0x2d0
3205+#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
3206+
3207+#define MTK_WDMA_PREF_RX_CFG 0x2dc
3208+#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
3209+
3210+#define MTK_WDMA_WRBK_TX_CFG 0x300
3211+#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
3212+
3213+#define MTK_WDMA_WRBK_RX_CFG 0x344
3214+#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
3215+
3216 #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
3217 #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
3218 #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
3219@@ -439,6 +502,31 @@ struct mtk_wdma_desc {
3220 #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
3221 #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
3222
3223+#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
3224+#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
3225+#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
3226+#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4)
3227+#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34
3228+
3229+
3230+#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44
3231+#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4)
3232+#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50
3233+#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54+ (_n) * 0x4)
3234+#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c
3235+
3236+#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c
3237+#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4)
3238+#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78
3239+#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c+ (_n) * 0x4)
3240+#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84
3241+
3242+#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94
3243+#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4)
3244+#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0
3245+#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4+ (_n) * 0x4)
3246+#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac
3247+
3248 #define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
3249 #define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
3250 #define MTK_WED_RTQM_Q2N_MIB 0xb80
3251@@ -447,6 +535,24 @@ struct mtk_wdma_desc {
3252 #define MTK_WED_RTQM_Q2B_MIB 0xb8c
3253 #define MTK_WED_RTQM_PFDBK_MIB 0xb90
3254
3255+#define MTK_WED_RTQM_ENQ_CFG0 0xbb8
3256+#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12)
3257+
3258+#define MTK_WED_RTQM_FDROP_MIB 0xb84
3259+#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc
3260+#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0
3261+#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4
3262+#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8
3263+#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc
3264+#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0
3265+
3266+#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8
3267+#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc
3268+#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0
3269+#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4
3270+#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8
3271+#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec
3272+
3273 #define MTK_WED_RROQM_GLO_CFG 0xc04
3274 #define MTK_WED_RROQM_RST_IDX 0xc08
3275 #define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
3276@@ -487,8 +593,8 @@ struct mtk_wdma_desc {
3277 #define MTK_WED_RX_BM_BASE 0xd84
3278 #define MTK_WED_RX_BM_INIT_PTR 0xd88
3279 #define MTK_WED_RX_BM_PTR 0xd8c
3280-#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
3281 #define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
3282+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
3283
3284 #define MTK_WED_RX_BM_BLEN 0xd90
3285 #define MTK_WED_RX_BM_STS 0xd94
3286@@ -496,7 +602,193 @@ struct mtk_wdma_desc {
3287 #define MTK_WED_RX_BM_INTF 0xd9c
3288 #define MTK_WED_RX_BM_ERR_STS 0xda8
3289
3290+#define MTK_RRO_IND_CMD_SIGNATURE 0xe00
3291+#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0)
3292+#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28)
3293+
3294+#define MTK_WED_IND_CMD_RX_CTRL0 0xe04
3295+#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0)
3296+#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16)
3297+#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28)
3298+
3299+#define MTK_WED_IND_CMD_RX_CTRL1 0xe08
3300+#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c
3301+#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0)
3302+#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16)
3303+
3304+#define MTK_WED_RRO_CFG0 0xe10
3305+#define MTK_WED_RRO_CFG1 0xe14
3306+#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29)
3307+#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16)
3308+#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0)
3309+
3310+#define MTK_WED_ADDR_ELEM_CFG0 0xe18
3311+#define MTK_WED_ADDR_ELEM_CFG1 0xe1c
3312+#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16)
3313+
3314+#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20
3315+#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0)
3316+#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28)
3317+#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29)
3318+#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30)
3319+#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31)
3320+
3321+#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24
3322+#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28
3323+
3324+#define MTK_WED_PN_CHECK_CFG 0xe30
3325+#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0)
3326+#define MTK_WED_PN_CHECK_RD_RDY BIT(28)
3327+#define MTK_WED_PN_CHECK_WR_RDY BIT(29)
3328+#define MTK_WED_PN_CHECK_RD BIT(30)
3329+#define MTK_WED_PN_CHECK_WR BIT(31)
3330+
3331+#define MTK_WED_PN_CHECK_WDATA_M 0xe38
3332+#define MTK_WED_PN_CHECK_IS_FIRST BIT(17)
3333+
3334+#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8)
3335+
3336+#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58
3337+#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26)
3338+#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31)
3339+
3340+#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc)
3341+#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc)
3342+#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc)
3343+
3344+#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10)
3345+
3346+#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13)
3347+
3348+#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4)
3349+#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26)
3350+#define MTK_WED_RRO_RX_D_DRV_EN BIT(31)
3351+
3352+#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0
3353+#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0)
3354+
3355+#define MTK_WED_RRO_PG_BM_BASE 0xeb4
3356+#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8
3357+#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0)
3358+#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16)
3359+
3360+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec
3361+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0)
3362+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1)
3363+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2)
3364+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8)
3365+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9)
3366+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10)
3367+
3368+#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4
3369+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0)
3370+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1)
3371+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2)
3372+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8)
3373+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9)
3374+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10)
3375+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16)
3376+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
3377+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
3378+
3379+#define MTK_WED_RX_IND_CMD_CNT0 0xf20
3380+#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
3381+
3382+#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4)
3383+#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0)
3384+
3385+#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4)
3386+#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0)
3387+#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16)
3388+#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0)
3389+
3390+#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4)
3391+
3392+#define MTK_WED_RX_PN_CHK_CNT 0xf70
3393+#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0)
3394+
3395 #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
3396 #define MTK_WED_PCIE_INT_MASK 0x0
3397
3398+#define MTK_WED_PAO_AMSDU_FIFO 0x1800
3399+#define MTK_WED_PAO_AMSDU_IS_PRIOR0_RING BIT(10)
3400+
3401+#define MTK_WED_PAO_STA_INFO 0x01810
3402+#define MTK_WED_PAO_STA_INFO_DO_INIT BIT(0)
3403+#define MTK_WED_PAO_STA_INFO_SET_INIT BIT(1)
3404+
3405+#define MTK_WED_PAO_STA_INFO_INIT 0x01814
3406+#define MTK_WED_PAO_STA_WTBL_HDRT_MODE BIT(0)
3407+#define MTK_WED_PAO_STA_RMVL BIT(1)
3408+#define MTK_WED_PAO_STA_MAX_AMSDU_LEN GENMASK(7, 2)
3409+#define MTK_WED_PAO_STA_MAX_AMSDU_NUM GENMASK(11, 8)
3410+
3411+#define MTK_WED_PAO_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
3412+
3413+#define MTK_WED_PAO_PSE 0x1910
3414+#define MTK_WED_PAO_PSE_RESET BIT(16)
3415+
3416+#define MTK_WED_PAO_HIFTXD_CFG 0x1968
3417+#define MTK_WED_PAO_HIFTXD_SRC GENMASK(16, 15)
3418+
3419+#define MTK_WED_PAO_MON_AMSDU_FIFO_DMAD 0x1a34
3420+
3421+#define MTK_WED_PAO_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
3422+#define MTK_WED_PAO_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
3423+#define MTK_WED_PAO_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
3424+#define MTK_WED_PAO_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
3425+#define MTK_WED_PAO_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
3426+
3427+#define MTK_WED_PAO_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
3428+#define MTK_WED_PAO_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
3429+#define MTK_WED_PAO_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
3430+
3431+#define MTK_WED_PAO_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
3432+#define MTK_WED_PAO_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
3433+#define MTK_WED_PAO_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
3434+#define MTK_WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
3435+
3436+#define MTK_WED_PAO_MON_QMEM_STS1 0x1e04
3437+
3438+#define MTK_WED_PAO_MON_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
3439+#define MTK_WED_PAO_QMEM_FQ_CNT GENMASK(27, 16)
3440+#define MTK_WED_PAO_QMEM_SP_QCNT GENMASK(11, 0)
3441+#define MTK_WED_PAO_QMEM_TID0_QCNT GENMASK(27, 16)
3442+#define MTK_WED_PAO_QMEM_TID1_QCNT GENMASK(11, 0)
3443+#define MTK_WED_PAO_QMEM_TID2_QCNT GENMASK(27, 16)
3444+#define MTK_WED_PAO_QMEM_TID3_QCNT GENMASK(11, 0)
3445+#define MTK_WED_PAO_QMEM_TID4_QCNT GENMASK(27, 16)
3446+#define MTK_WED_PAO_QMEM_TID5_QCNT GENMASK(11, 0)
3447+#define MTK_WED_PAO_QMEM_TID6_QCNT GENMASK(27, 16)
3448+#define MTK_WED_PAO_QMEM_TID7_QCNT GENMASK(11, 0)
3449+
3450+#define MTK_WED_PAO_MON_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
3451+#define MTK_WED_PAO_QMEM_FQ_HEAD GENMASK(27, 16)
3452+#define MTK_WED_PAO_QMEM_SP_QHEAD GENMASK(11, 0)
3453+#define MTK_WED_PAO_QMEM_TID0_QHEAD GENMASK(27, 16)
3454+#define MTK_WED_PAO_QMEM_TID1_QHEAD GENMASK(11, 0)
3455+#define MTK_WED_PAO_QMEM_TID2_QHEAD GENMASK(27, 16)
3456+#define MTK_WED_PAO_QMEM_TID3_QHEAD GENMASK(11, 0)
3457+#define MTK_WED_PAO_QMEM_TID4_QHEAD GENMASK(27, 16)
3458+#define MTK_WED_PAO_QMEM_TID5_QHEAD GENMASK(11, 0)
3459+#define MTK_WED_PAO_QMEM_TID6_QHEAD GENMASK(27, 16)
3460+#define MTK_WED_PAO_QMEM_TID7_QHEAD GENMASK(11, 0)
3461+#define MTK_WED_PAO_QMEM_FQ_TAIL GENMASK(27, 16)
3462+#define MTK_WED_PAO_QMEM_SP_QTAIL GENMASK(11, 0)
3463+#define MTK_WED_PAO_QMEM_TID0_QTAIL GENMASK(27, 16)
3464+#define MTK_WED_PAO_QMEM_TID1_QTAIL GENMASK(11, 0)
3465+#define MTK_WED_PAO_QMEM_TID2_QTAIL GENMASK(27, 16)
3466+#define MTK_WED_PAO_QMEM_TID3_QTAIL GENMASK(11, 0)
3467+#define MTK_WED_PAO_QMEM_TID4_QTAIL GENMASK(27, 16)
3468+#define MTK_WED_PAO_QMEM_TID5_QTAIL GENMASK(11, 0)
3469+#define MTK_WED_PAO_QMEM_TID6_QTAIL GENMASK(27, 16)
3470+#define MTK_WED_PAO_QMEM_TID7_QTAIL GENMASK(11, 0)
3471+
3472+#define MTK_WED_PAO_MON_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
3473+
3474+#define MTK_WED_PCIE_BASE 0x11280000
3475+
3476+#define MTK_WED_PCIE_BASE0 0x11300000
3477+#define MTK_WED_PCIE_BASE1 0x11310000
3478+#define MTK_WED_PCIE_BASE2 0x11290000
3479 #endif
3480diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
developer58aa0682023-09-18 14:02:26 +08003481index 0967dc2..3211f34 100644
developer23f9f0f2023-06-15 13:06:25 +08003482--- a/include/linux/netdevice.h
3483+++ b/include/linux/netdevice.h
developer58aa0682023-09-18 14:02:26 +08003484@@ -875,6 +875,13 @@ struct net_device_path {
developer23f9f0f2023-06-15 13:06:25 +08003485 u8 queue;
3486 u16 wcid;
3487 u8 bss;
3488+ u32 usr_info;
3489+ u8 tid;
3490+ u8 is_fixedrate;
3491+ u8 is_prior;
3492+ u8 is_sp;
3493+ u8 hf;
3494+ u8 amsdu_en;
3495 } mtk_wdma;
3496 };
3497 };
3498diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
developer58aa0682023-09-18 14:02:26 +08003499index 27cf284..92df4ba 100644
developer23f9f0f2023-06-15 13:06:25 +08003500--- a/include/linux/soc/mediatek/mtk_wed.h
3501+++ b/include/linux/soc/mediatek/mtk_wed.h
3502@@ -5,11 +5,14 @@
3503 #include <linux/rcupdate.h>
3504 #include <linux/regmap.h>
3505 #include <linux/pci.h>
3506+#include <linux/skbuff.h>
3507+#include <linux/iopoll.h>
3508
3509 #define WED_WO_STA_REC 0x6
3510
3511 #define MTK_WED_TX_QUEUES 2
3512 #define MTK_WED_RX_QUEUES 2
3513+#define MTK_WED_RX_PAGE_QUEUES 3
3514
3515 enum mtk_wed_wo_cmd {
3516 MTK_WED_WO_CMD_WED_CFG,
3517@@ -55,10 +58,13 @@ enum mtk_wed_bus_tye {
3518 struct mtk_wed_hw;
3519 struct mtk_wdma_desc;
3520
3521+#define MTK_WED_RING_CONFIGURED BIT(0)
3522+
3523 struct mtk_wed_ring {
3524 struct mtk_wdma_desc *desc;
3525 dma_addr_t desc_phys;
3526 int size;
3527+ u32 flags;
3528
3529 u32 reg_base;
3530 void __iomem *wpdma;
3531@@ -69,11 +75,18 @@ struct mtk_rxbm_desc {
3532 __le32 token;
3533 } __packed __aligned(4);
3534
3535+struct dma_page_info {
3536+ void *addr;
3537+ dma_addr_t addr_phys;
3538+};
3539+
3540 struct dma_buf {
3541 int size;
3542- void **pages;
3543- struct mtk_wdma_desc *desc;
3544+ int pkt_nums;
3545+ void *desc;
3546+ int desc_size;
3547 dma_addr_t desc_phys;
3548+ struct dma_page_info *pages;
3549 };
3550
3551 struct dma_entry {
3552@@ -97,6 +110,7 @@ struct mtk_wed_device {
3553 struct device *dev;
3554 struct mtk_wed_hw *hw;
3555 bool init_done, running;
3556+ bool wdma_init_done;
3557 int wdma_idx;
3558 int irq;
3559 u8 ver;
3560@@ -108,7 +122,11 @@ struct mtk_wed_device {
3561 struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
3562 struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
3563
3564- struct dma_buf buf_ring;
3565+ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
3566+ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
3567+ struct mtk_wed_ring ind_cmd_ring;
3568+
3569+ struct dma_buf tx_buf_ring;
3570
3571 struct {
3572 int size;
3573@@ -117,6 +135,8 @@ struct mtk_wed_device {
3574 dma_addr_t desc_phys;
3575 } rx_buf_ring;
3576
3577+ struct dma_buf rx_page_buf_ring;
3578+
3579 struct {
3580 struct mtk_wed_ring rro_ring;
3581 void __iomem *rro_desc;
3582@@ -131,8 +151,9 @@ struct mtk_wed_device {
3583 struct platform_device *platform_dev;
3584 struct pci_dev *pci_dev;
3585 };
3586+ enum mtk_wed_bus_tye bus_type;
3587 void __iomem *base;
3588- u32 bus_type;
3589+ void __iomem *regs;
3590 u32 phy_base;
3591
3592 u32 wpdma_phys;
developer58aa0682023-09-18 14:02:26 +08003593@@ -141,10 +162,14 @@ struct mtk_wed_device {
3594 u32 wpdma_tx;
developer23f9f0f2023-06-15 13:06:25 +08003595 u32 wpdma_txfree;
3596 u32 wpdma_rx_glo;
developerb74821e2023-09-08 14:19:59 +08003597- u32 wpdma_rx;
3598+ u32 wpdma_rx[MTK_WED_RX_QUEUES];
developer23f9f0f2023-06-15 13:06:25 +08003599+ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
3600+ u32 wpdma_rx_pg;
3601
3602 u8 tx_tbit[MTK_WED_TX_QUEUES];
3603 u8 rx_tbit[MTK_WED_RX_QUEUES];
3604+ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
3605+ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
3606 u8 txfree_tbit;
3607
3608 u16 token_start;
3609@@ -154,12 +179,26 @@ struct mtk_wed_device {
3610 unsigned int rx_size;
3611
3612 bool wcid_512;
3613-
3614+ bool hwrro;
3615+ bool msi;
3616+
3617+ u8 max_amsdu_nums;
3618+ u32 max_amsdu_len;
3619+
3620+ struct {
3621+ u8 se_group_nums;
3622+ u16 win_size;
3623+ u16 particular_sid;
3624+ u32 ack_sn_addr;
3625+ dma_addr_t particular_se_phys;
3626+ dma_addr_t addr_elem_phys[1024];
3627+ } ind_cmd;
3628+
3629+ u32 chip_id;
3630 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3631 int (*offload_enable)(struct mtk_wed_device *wed);
3632 void (*offload_disable)(struct mtk_wed_device *wed);
3633- u32 (*init_rx_buf)(struct mtk_wed_device *wed,
3634- int pkt_num);
3635+ u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size);
3636 void (*release_rx_buf)(struct mtk_wed_device *wed);
3637 void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
3638 struct mtk_wed_wo_rx_stats *stats);
3639@@ -180,6 +219,11 @@ struct mtk_wed_ops {
3640 void __iomem *regs);
3641 int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3642 void __iomem *regs, bool reset);
3643+ int (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3644+ void __iomem *regs);
3645+ int (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3646+ void __iomem *regs);
3647+ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev, void __iomem *regs);
3648 int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3649 void *data, int len);
3650 void (*detach)(struct mtk_wed_device *dev);
3651@@ -196,6 +240,7 @@ struct mtk_wed_ops {
3652 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3653 void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
3654 u32 reason, u32 hash);
3655+ void (*start_hwrro)(struct mtk_wed_device *dev, u32 irq_mask);
3656 };
3657
3658 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3659@@ -224,12 +269,21 @@ static inline bool
3660 mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3661 {
3662 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3663+ if (dev->ver == 3 && !dev->wlan.hwrro)
3664+ return false;
3665+
3666 return dev->ver != 1;
3667 #else
3668 return false;
3669 #endif
3670 }
3671
3672+static inline bool
3673+mtk_wed_device_support_pao(struct mtk_wed_device *dev)
3674+{
3675+ return dev->ver == 3;
3676+}
3677+
3678 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3679 #define mtk_wed_device_active(_dev) !!(_dev)->ops
3680 #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3681@@ -243,6 +297,12 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3682 (_dev)->ops->txfree_ring_setup(_dev, _regs)
3683 #define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
3684 (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
3685+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
3686+ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
3687+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
3688+ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
3689+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
3690+ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
3691 #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3692 (_dev)->ops->msg_update(_dev, _id, _msg, _len)
3693 #define mtk_wed_device_reg_read(_dev, _reg) \
3694@@ -257,6 +317,9 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3695 (_dev)->ops->reset_dma(_dev)
3696 #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3697 (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
3698+#define mtk_wed_device_start_hwrro(_dev, _mask) \
3699+ (_dev)->ops->start_hwrro(_dev, _mask)
3700+
3701 #else
3702 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3703 {
3704@@ -268,6 +331,9 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3705 #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
3706 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3707 #define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
3708+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3709+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3710+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
3711 #define mtk_wed_device_reg_read(_dev, _reg) 0
3712 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3713 #define mtk_wed_device_irq_get(_dev, _mask) 0
3714@@ -275,6 +341,7 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3715 #define mtk_wed_device_dma_reset(_dev) do {} while (0)
3716 #define mtk_wed_device_setup_tc(_dev, _ndev, _type, _data) do {} while (0)
3717 #define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
3718+#define mtk_wed_device_start_hwrro(_dev, _mask) do {} while (0)
3719 #endif
3720
3721 #endif
3722--
37232.18.0
3724