blob: 1edbab95a02c036b2e32c61c6a6f477e00113717 [file] [log] [blame]
developer2b762412023-09-21 19:13:58 +08001From b54ca484993804cec5941bd12c6cafc9ce51e4dc Mon Sep 17 00:00:00 2001
developer23f9f0f2023-06-15 13:06:25 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
developer58aa0682023-09-18 14:02:26 +08003Date: Mon, 18 Sep 2023 13:21:15 +0800
developer2b762412023-09-21 19:13:58 +08004Subject: [PATCH] mtk:wed:add wed3 support
developer23f9f0f2023-06-15 13:06:25 +08005
developer23f9f0f2023-06-15 13:06:25 +08006---
7 arch/arm64/boot/dts/mediatek/mt7988.dtsi | 152 ++-
8 .../dts/mediatek/mt7988a-dsa-10g-spim-nor.dts | 16 +-
9 .../dts/mediatek/mt7988d-dsa-10g-spim-nor.dts | 16 +-
10 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 +-
11 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +-
12 drivers/net/ethernet/mediatek/mtk_ppe.c | 17 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +-
14 .../net/ethernet/mediatek/mtk_ppe_offload.c | 13 +-
developer2b762412023-09-21 19:13:58 +080015 drivers/net/ethernet/mediatek/mtk_wed.c | 1178 +++++++++++++----
developer23f9f0f2023-06-15 13:06:25 +080016 drivers/net/ethernet/mediatek/mtk_wed.h | 25 +-
developer2b762412023-09-21 19:13:58 +080017 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 584 +++++++-
developer23f9f0f2023-06-15 13:06:25 +080018 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 13 +-
19 drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 5 +-
20 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 338 ++++-
21 include/linux/netdevice.h | 7 +
developer58aa0682023-09-18 14:02:26 +080022 include/linux/soc/mediatek/mtk_wed.h | 83 +-
developer2b762412023-09-21 19:13:58 +080023 16 files changed, 2069 insertions(+), 388 deletions(-)
developer23f9f0f2023-06-15 13:06:25 +080024 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
25
26diff --git a/arch/arm64/boot/dts/mediatek/mt7988.dtsi b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
developer58aa0682023-09-18 14:02:26 +080027index 561450e..8995ea3 100644
developer23f9f0f2023-06-15 13:06:25 +080028--- a/arch/arm64/boot/dts/mediatek/mt7988.dtsi
29+++ b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
developer58aa0682023-09-18 14:02:26 +080030@@ -205,44 +205,49 @@
developer23f9f0f2023-06-15 13:06:25 +080031 status = "disabled";
32 };
33
34- wed: wed@15010000 {
35- compatible = "mediatek,wed";
36- wed_num = <3>;
37- /* add this property for wed get the pci slot number. */
38- pci_slot_map = <0>, <1>, <2>;
39- reg = <0 0x15010000 0 0x2000>,
40- <0 0x15012000 0 0x2000>,
41- <0 0x15014000 0 0x2000>;
42+ wed0: wed@15010000 {
43+ compatible = "mediatek,mt7988-wed",
44+ "syscon";
45+ reg = <0 0x15010000 0 0x2000>;
46 interrupt-parent = <&gic>;
47- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
48- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
49- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
50- };
51-
52- wed2: wed2@15012000 {
53- compatible = "mediatek,wed2";
54- wed_num = <3>;
55- /* add this property for wed get the pci slot number. */
56- reg = <0 0x15010000 0 0x2000>,
57- <0 0x15012000 0 0x2000>,
58- <0 0x15014000 0 0x2000>;
59+ interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
60+ mediatek,wed_pcie = <&wed_pcie>;
61+ mediatek,ap2woccif = <&ap2woccif0>;
62+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
63+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
64+ mediatek,wocpu_boot = <&cpu0_boot>;
65+ mediatek,wocpu_emi = <&wocpu0_emi>;
66+ mediatek,wocpu_data = <&wocpu_data>;
67+ };
68+
69+ wed1: wed@15012000 {
70+ compatible = "mediatek,mt7988-wed",
71+ "syscon";
72+ reg = <0 0x15012000 0 0x2000>;
73 interrupt-parent = <&gic>;
74- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
75- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
76- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
77- };
78-
79- wed3: wed3@15014000 {
80- compatible = "mediatek,wed3";
81- wed_num = <3>;
82- /* add this property for wed get the pci slot number. */
83- reg = <0 0x15010000 0 0x2000>,
84- <0 0x15012000 0 0x2000>,
85- <0 0x15014000 0 0x2000>;
86+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
87+ mediatek,wed_pcie = <&wed_pcie>;
88+ mediatek,ap2woccif = <&ap2woccif1>;
89+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
90+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
91+ mediatek,wocpu_boot = <&cpu1_boot>;
92+ mediatek,wocpu_emi = <&wocpu1_emi>;
93+ mediatek,wocpu_data = <&wocpu_data>;
94+ };
95+
96+ wed2: wed@15014000 {
97+ compatible = "mediatek,mt7988-wed",
98+ "syscon";
99+ reg = <0 0x15014000 0 0x2000>;
100 interrupt-parent = <&gic>;
101- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
102- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
103- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
104+ interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
105+ mediatek,wed_pcie = <&wed_pcie>;
106+ mediatek,ap2woccif = <&ap2woccif2>;
107+ mediatek,wocpu_ilm = <&wocpu2_ilm>;
108+ mediatek,wocpu_dlm = <&wocpu2_dlm>;
109+ mediatek,wocpu_boot = <&cpu2_boot>;
110+ mediatek,wocpu_emi = <&wocpu2_emi>;
111+ mediatek,wocpu_data = <&wocpu_data>;
112 };
113
114 wdma: wdma@15104800 {
developer58aa0682023-09-18 14:02:26 +0800115@@ -252,15 +257,25 @@
developer23f9f0f2023-06-15 13:06:25 +0800116 <0 0x15105000 0 0x400>;
117 };
118
119- ap2woccif: ap2woccif@151A5000 {
120- compatible = "mediatek,ap2woccif";
121- reg = <0 0x151A5000 0 0x1000>,
122- <0 0x152A5000 0 0x1000>,
123- <0 0x153A5000 0 0x1000>;
124+ ap2woccif0: ap2woccif@151A5000 {
125+ compatible = "mediatek,ap2woccif", "syscon";
126+ reg = <0 0x151A5000 0 0x1000>;
127+ interrupt-parent = <&gic>;
128+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
129+ };
130+
131+ ap2woccif1: ap2woccif@152A5000 {
132+ compatible = "mediatek,ap2woccif", "syscon";
133+ reg = <0 0x152A5000 0 0x1000>;
134 interrupt-parent = <&gic>;
135- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
136- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
137- <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
138+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
139+ };
140+
141+ ap2woccif2: ap2woccif@153A5000 {
142+ compatible = "mediatek,ap2woccif", "syscon";
143+ reg = <0 0x153A5000 0 0x1000>;
144+ interrupt-parent = <&gic>;
145+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
146 };
147
148 wocpu0_ilm: wocpu0_ilm@151E0000 {
developer58aa0682023-09-18 14:02:26 +0800149@@ -268,31 +283,53 @@
developer23f9f0f2023-06-15 13:06:25 +0800150 reg = <0 0x151E0000 0 0x8000>;
151 };
152
153- wocpu1_ilm: wocpu1_ilm@152E0000 {
154- compatible = "mediatek,wocpu1_ilm";
155+ wocpu1_ilm: wocpu_ilm@152E0000 {
156+ compatible = "mediatek,wocpu_ilm";
157 reg = <0 0x152E0000 0 0x8000>;
158 };
159
160- wocpu2_ilm: wocpu2_ilm@153E0000 {
161- compatible = "mediatek,wocpu2_ilm";
162- reg = <0 0x153E0000 0 0x8000>;
163+ wocpu2_ilm: wocpu_ilm@153E0000 {
164+ compatible = "mediatek,wocpu_ilm";
165+ reg = <0 0x153E0000 0 0x8000>;
166+ };
167+
168+ wocpu0_dlm: wocpu_dlm@151E8000 {
169+ compatible = "mediatek,wocpu_dlm";
170+ reg = <0 0x151E8000 0 0x2000>;
171+
172+ resets = <&ethsysrst 0>;
173+ reset-names = "wocpu_rst";
174+ };
175+
176+ wocpu1_dlm: wocpu_dlm@0x152E8000 {
177+ compatible = "mediatek,wocpu_dlm";
178+ reg = <0 0x152E8000 0 0x2000>;
179+
180+ resets = <&ethsysrst 0>;
181+ reset-names = "wocpu_rst";
182 };
183
184- wocpu_dlm: wocpu_dlm@151E8000 {
185+ wocpu2_dlm: wocpu_dlm@0x153E8000 {
186 compatible = "mediatek,wocpu_dlm";
187- reg = <0 0x151E8000 0 0x2000>,
188- <0 0x152E8000 0 0x2000>,
189- <0 0x153E8000 0 0x2000>;
190+ reg = <0 0x153E8000 0 0x2000>;
191
192 resets = <&ethsysrst 0>;
193 reset-names = "wocpu_rst";
194 };
195
196- cpu_boot: wocpu_boot@15194000 {
197- compatible = "mediatek,wocpu_boot";
198- reg = <0 0x15194000 0 0x1000>,
199- <0 0x15294000 0 0x1000>,
200- <0 0x15394000 0 0x1000>;
201+ cpu0_boot: wocpu_boot@15194000 {
202+ compatible = "mediatek,wocpu0_boot";
203+ reg = <0 0x15194000 0 0x1000>;
204+ };
205+
206+ cpu1_boot: wocpu_boot@15294000 {
207+ compatible = "mediatek,wocpu1_boot";
208+ reg = <0 0x15294000 0 0x1000>;
209+ };
210+
211+ cpu2_boot: wocpu_boot@15394000 {
212+ compatible = "mediatek,wocpu2_boot";
213+ reg = <0 0x15394000 0 0x1000>;
214 };
215
216 reserved-memory {
developer58aa0682023-09-18 14:02:26 +0800217@@ -902,6 +939,7 @@
developer23f9f0f2023-06-15 13:06:25 +0800218 <&topckgen CK_TOP_CB_SGM_325M>;
219 mediatek,ethsys = <&ethsys>;
220 mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
221+ mediatek,wed = <&wed0>, <&wed1>, <&wed2>;
222 mediatek,usxgmiisys = <&usxgmiisys0>, <&usxgmiisys1>;
223 mediatek,xfi_pextp = <&xfi_pextp0>, <&xfi_pextp1>;
224 mediatek,xfi_pll = <&xfi_pll>;
225diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
developer58aa0682023-09-18 14:02:26 +0800226index 70a7554..bed27b4 100644
developer23f9f0f2023-06-15 13:06:25 +0800227--- a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
228+++ b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
developer58aa0682023-09-18 14:02:26 +0800229@@ -369,9 +369,23 @@
developer23f9f0f2023-06-15 13:06:25 +0800230 status = "okay";
231 };
232
233-&wed {
234+&wed0 {
235 dy_txbm_enable = "true";
236 dy_txbm_budge = <8>;
237 txbm_init_sz = <10>;
238 status = "okay";
239 };
240+
241+&wed1 {
242+ dy_txbm_enable = "true";
243+ dy_txbm_budge = <8>;
244+ txbm_init_sz = <10>;
245+ status = "okay";
246+};
247+
248+&wed2 {
249+ dy_txbm_enable = "true";
250+ dy_txbm_budge = <8>;
251+ txbm_init_sz = <10>;
252+ status = "okay";
253+};
254\ No newline at end of file
255diff --git a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
developer58aa0682023-09-18 14:02:26 +0800256index e8e3a69..5dd481b 100644
developer23f9f0f2023-06-15 13:06:25 +0800257--- a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
258+++ b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
developer58aa0682023-09-18 14:02:26 +0800259@@ -379,9 +379,23 @@
developer23f9f0f2023-06-15 13:06:25 +0800260 status = "okay";
261 };
262
263-&wed {
264+&wed0 {
265 dy_txbm_enable = "true";
266 dy_txbm_budge = <8>;
267 txbm_init_sz = <10>;
268 status = "okay";
269 };
270+
271+&wed1 {
272+ dy_txbm_enable = "true";
273+ dy_txbm_budge = <8>;
274+ txbm_init_sz = <10>;
275+ status = "okay";
276+};
277+
278+&wed2 {
279+ dy_txbm_enable = "true";
280+ dy_txbm_budge = <8>;
281+ txbm_init_sz = <10>;
282+ status = "okay";
283+};
284\ No newline at end of file
285diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer58aa0682023-09-18 14:02:26 +0800286index 8bd526a..dea66d7 100644
developer23f9f0f2023-06-15 13:06:25 +0800287--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
288+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developer58aa0682023-09-18 14:02:26 +0800289@@ -5095,7 +5095,8 @@ static int mtk_probe(struct platform_device *pdev)
developer23f9f0f2023-06-15 13:06:25 +0800290 "mediatek,wed", i);
291 static const u32 wdma_regs[] = {
292 MTK_WDMA0_BASE,
293- MTK_WDMA1_BASE
294+ MTK_WDMA1_BASE,
295+ MTK_WDMA2_BASE
296 };
297 void __iomem *wdma;
298 u32 wdma_phy;
299diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer58aa0682023-09-18 14:02:26 +0800300index ee89b4c..8656b5f 100644
developer23f9f0f2023-06-15 13:06:25 +0800301--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
302+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developer58aa0682023-09-18 14:02:26 +0800303@@ -613,9 +613,12 @@
developer23f9f0f2023-06-15 13:06:25 +0800304 #define RX_DMA_SPORT_MASK 0x7
305 #define RX_DMA_SPORT_MASK_V2 0xf
306
307-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
308+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
309 #define MTK_WDMA0_BASE 0x4800
310 #define MTK_WDMA1_BASE 0x4c00
311+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
312+#define MTK_WDMA2_BASE 0x5000
313+#endif
314 #else
315 #define MTK_WDMA0_BASE 0x2800
316 #define MTK_WDMA1_BASE 0x2c00
317diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
318old mode 100755
319new mode 100644
developer58aa0682023-09-18 14:02:26 +0800320index 384e811..eda23c2
developer23f9f0f2023-06-15 13:06:25 +0800321--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
322+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
323@@ -9,6 +9,7 @@
324 #include <linux/if_ether.h>
325 #include <linux/if_vlan.h>
326 #include <net/dsa.h>
327+#include <net/route.h>
328 #include "mtk_eth_soc.h"
329 #include "mtk_ppe.h"
330 #include "mtk_ppe_regs.h"
331@@ -396,7 +397,7 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
332 }
333
334 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
335- int bss, int wcid)
336+ int bss, int wcid, bool amsdu_en)
337 {
338 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
339 u32 *ib2 = mtk_foe_entry_ib2(entry);
340@@ -408,6 +409,9 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
341
342 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
343 FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
344+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
345+ l2->winfo_pao = FIELD_PREP(MTK_FOE_WINFO_PAO_AMSDU_EN, amsdu_en);
346+#endif
347 #else
348 if (wdma_idx)
349 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
350@@ -443,6 +447,17 @@ int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp)
351 *ib2 &= ~MTK_FOE_IB2_DSCP;
352 *ib2 |= FIELD_PREP(MTK_FOE_IB2_DSCP, dscp);
353
354+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
355+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
356+
357+ if (*ib2 & MTK_FOE_IB2_WDMA_WINFO &&
358+ l2->winfo_pao & MTK_FOE_WINFO_PAO_AMSDU_EN) {
359+ u8 tid = rt_tos2priority(dscp) & 0xf;
360+
361+ l2->winfo_pao |= FIELD_PREP(MTK_FOE_WINFO_PAO_TID, tid);
362+ }
363+#endif
364+
365 return 0;
366 }
367
368diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
developer58aa0682023-09-18 14:02:26 +0800369index 2a8b6ef..66c7f10 100644
developer23f9f0f2023-06-15 13:06:25 +0800370--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
371+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
372@@ -428,7 +428,7 @@ int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
373 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
374 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
375 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
376- int bss, int wcid);
377+ int bss, int wcid, bool amsdu_en);
378 int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
379 int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp);
developer58aa0682023-09-18 14:02:26 +0800380 bool mtk_foe_entry_match(struct mtk_foe_entry *entry, struct mtk_foe_entry *data);
developer23f9f0f2023-06-15 13:06:25 +0800381diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developer58aa0682023-09-18 14:02:26 +0800382index 95174b7..eab9e9d 100644
developer23f9f0f2023-06-15 13:06:25 +0800383--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
384+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
385@@ -112,6 +112,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
386 info->queue = path.mtk_wdma.queue;
387 info->bss = path.mtk_wdma.bss;
388 info->wcid = path.mtk_wdma.wcid;
389+ info->amsdu_en = path.mtk_wdma.amsdu_en;
390
391 return 0;
392 }
393@@ -193,13 +194,15 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
394
395 if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
396 mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
397- info.wcid);
398+ info.wcid, info.amsdu_en);
399 pse_port = PSE_PPE0_PORT;
400 #if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
401 if (info.wdma_idx == 0)
402 pse_port = PSE_WDMA0_PORT;
403 else if (info.wdma_idx == 1)
404 pse_port = PSE_WDMA1_PORT;
405+ else if (info.wdma_idx == 2)
406+ pse_port = PSE_WDMA2_PORT;
407 else
408 return -EOPNOTSUPP;
409 #endif
developer58aa0682023-09-18 14:02:26 +0800410@@ -490,8 +493,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
developer23f9f0f2023-06-15 13:06:25 +0800411 if (err)
412 return err;
413
414- if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
415- return err;
416+ /*if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
417+ return err;*/
418
419 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
420 if (!entry)
developer58aa0682023-09-18 14:02:26 +0800421@@ -516,8 +519,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
422 mtk_foe_entry_clear(eth->ppe[ppe_index], entry);
developer23f9f0f2023-06-15 13:06:25 +0800423 free:
424 kfree(entry);
425- if (wed_index >= 0)
426- mtk_wed_flow_remove(wed_index);
427+ /*if (wed_index >= 0)
428+ mtk_wed_flow_remove(wed_index);*/
429 return err;
430 }
431
432diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developer2b762412023-09-21 19:13:58 +0800433index 3e760f7..7b2e199 100644
developer23f9f0f2023-06-15 13:06:25 +0800434--- a/drivers/net/ethernet/mediatek/mtk_wed.c
435+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
436@@ -28,7 +28,7 @@ struct wo_cmd_ring {
437 u32 cnt;
438 u32 unit;
439 };
440-static struct mtk_wed_hw *hw_list[2];
441+static struct mtk_wed_hw *hw_list[3];
442 static DEFINE_MUTEX(hw_lock);
443
444 static void
445@@ -73,6 +73,26 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
446 return wdma_r32(dev, MTK_WDMA_GLO_CFG);
447 }
448
449+static u32
450+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
451+{
452+ if (wed_r32(dev, reg) & mask)
453+ return true;
454+
455+ return false;
456+}
457+
458+static int
459+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
460+{
461+ int sleep = 1000;
462+ int timeout = 100 * sleep;
463+ u32 val;
464+
465+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
466+ timeout, false, dev, reg, mask);
467+}
468+
469 static int
470 mtk_wdma_rx_reset(struct mtk_wed_device *dev)
471 {
472@@ -235,6 +255,8 @@ mtk_wed_assign(struct mtk_wed_device *dev)
473 continue;
474
475 hw->wed_dev = dev;
476+ hw->pci_base = MTK_WED_PCIE_BASE;
477+
478 return hw;
479 }
480
481@@ -242,23 +264,84 @@ mtk_wed_assign(struct mtk_wed_device *dev)
482 }
483
484 static int
485-mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
486+mtk_wed_pao_buffer_alloc(struct mtk_wed_device *dev)
487+{
488+ struct mtk_wed_pao *pao;
489+ int i, j;
490+
491+ pao = kzalloc(sizeof(struct mtk_wed_pao), GFP_KERNEL);
492+ if (!pao)
493+ return -ENOMEM;
494+
495+ dev->hw->wed_pao = pao;
496+
497+ for (i = 0; i < 32; i++) {
498+ /* each segment is 64K*/
499+ pao->hif_txd[i] = (char *)__get_free_pages(GFP_ATOMIC |
500+ GFP_DMA32 |
501+ __GFP_ZERO, 4);
502+ if (!pao->hif_txd[i])
503+ goto err;
504+
505+ pao->hif_txd_phys[i] = dma_map_single(dev->hw->dev,
506+ pao->hif_txd[i],
507+ 16 * PAGE_SIZE,
508+ DMA_TO_DEVICE);
509+ if (unlikely(dma_mapping_error(dev->hw->dev,
510+ pao->hif_txd_phys[i])))
511+ goto err;
512+ }
513+
514+ return 0;
515+
516+err:
517+ for (j = 0; j < i; j++)
518+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[j],
519+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
520+
521+ return -ENOMEM;
522+}
523+
524+static int
525+mtk_wed_pao_free_buffer(struct mtk_wed_device *dev)
526+{
527+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
528+ int i;
529+
530+ for (i = 0; i < 32; i++) {
531+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[i],
532+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
533+ free_pages((unsigned long)pao->hif_txd[i], 4);
534+ }
535+
536+ return 0;
537+}
538+
539+static int
540+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
541 {
542 struct mtk_wdma_desc *desc;
543+ void *desc_ptr;
544 dma_addr_t desc_phys;
545- void **page_list;
546+ struct dma_page_info *page_list;
547 u32 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG1;
548 int token = dev->wlan.token_start;
549- int ring_size, n_pages, page_idx;
550- int i;
551-
552+ int ring_size, pkt_nums, n_pages, page_idx;
553+ int i, ret = 0;
554
555 if (dev->ver == MTK_WED_V1) {
556 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
557- } else {
558+ pkt_nums = ring_size;
559+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
560+ } else if (dev->hw->version == 2) {
561 ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
562 MTK_WED_WDMA_RING_SIZE * 2;
563 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG0;
564+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
565+ } else if (dev->hw->version == 3) {
566+ ring_size = MTK_WED_TX_BM_DMA_SIZE;
567+ pkt_nums = MTK_WED_TX_BM_PKT_CNT;
568+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_rxbm_desc);
569 }
570
571 n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
572@@ -267,18 +350,20 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
573 if (!page_list)
574 return -ENOMEM;
575
576- dev->buf_ring.size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
577- dev->buf_ring.pages = page_list;
578+ dev->tx_buf_ring.size = ring_size;
579+ dev->tx_buf_ring.pages = page_list;
580+ dev->tx_buf_ring.pkt_nums = pkt_nums;
581
582- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
583- &desc_phys, GFP_KERNEL);
584- if (!desc)
585+ desc_ptr = dma_alloc_coherent(dev->hw->dev,
586+ ring_size * dev->tx_buf_ring.desc_size,
587+ &desc_phys, GFP_KERNEL);
588+ if (!desc_ptr)
589 return -ENOMEM;
590
591- dev->buf_ring.desc = desc;
592- dev->buf_ring.desc_phys = desc_phys;
593+ dev->tx_buf_ring.desc = desc_ptr;
594+ dev->tx_buf_ring.desc_phys = desc_phys;
595
596- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
597+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
598 dma_addr_t page_phys, buf_phys;
599 struct page *page;
600 void *buf;
601@@ -295,7 +380,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
602 return -ENOMEM;
603 }
604
605- page_list[page_idx++] = page;
606+ page_list[page_idx].addr = page;
607+ page_list[page_idx].addr_phys = page_phys;
608+ page_idx++;
609+
610 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
611 DMA_BIDIRECTIONAL);
612
613@@ -303,19 +391,23 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
614 buf_phys = page_phys;
615
616 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
617- u32 txd_size;
618-
619- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
620-
621+ desc = desc_ptr;
622 desc->buf0 = buf_phys;
623- desc->buf1 = buf_phys + txd_size;
624- desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
625- txd_size) |
626- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
627- MTK_WED_BUF_SIZE - txd_size) |
628- last_seg;
629- desc->info = 0;
630- desc++;
631+ if (dev->hw->version < 3) {
632+ u32 txd_size;
633+
634+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
635+ desc->buf1 = buf_phys + txd_size;
636+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
637+ txd_size) |
638+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
639+ MTK_WED_BUF_SIZE - txd_size) |
640+ last_seg;
641+ desc->info = 0;
642+ } else {
643+ desc->ctrl = token << 16;
644+ }
645+ desc_ptr += dev->tx_buf_ring.desc_size;
646
647 buf += MTK_WED_BUF_SIZE;
648 buf_phys += MTK_WED_BUF_SIZE;
649@@ -325,15 +417,18 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
650 DMA_BIDIRECTIONAL);
651 }
652
653- return 0;
654+ if (dev->hw->version == 3)
655+ ret = mtk_wed_pao_buffer_alloc(dev);
656+
657+ return ret;
658 }
659
660 static void
661-mtk_wed_free_buffer(struct mtk_wed_device *dev)
662+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
663 {
664- struct mtk_wdma_desc *desc = dev->buf_ring.desc;
665- void **page_list = dev->buf_ring.pages;
666- int ring_size, page_idx;
667+ struct mtk_rxbm_desc *desc = dev->tx_buf_ring.desc;
668+ struct dma_page_info *page_list = dev->tx_buf_ring.pages;
669+ int ring_size, page_idx, pkt_nums;
670 int i;
671
672 if (!page_list)
673@@ -342,33 +437,33 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
674 if (!desc)
675 goto free_pagelist;
676
677- if (dev->ver == MTK_WED_V1) {
678- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
679- } else {
680- ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
681- MTK_WED_WDMA_RING_SIZE * 2;
682+ pkt_nums = ring_size = dev->tx_buf_ring.size;
683+ if (dev->hw->version == 3) {
684+ mtk_wed_pao_free_buffer(dev);
685+ pkt_nums = dev->tx_buf_ring.pkt_nums;
686 }
687
688- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
689- void *page = page_list[page_idx++];
690+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
691+ void *page = page_list[page_idx].addr;
692
693 if (!page)
694 break;
695
696- dma_unmap_page(dev->hw->dev, desc[i].buf0,
697+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
698 PAGE_SIZE, DMA_BIDIRECTIONAL);
699 __free_page(page);
700+ page_idx++;
701 }
702
703- dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
704- desc, dev->buf_ring.desc_phys);
705+ dma_free_coherent(dev->hw->dev, ring_size * dev->tx_buf_ring.desc_size,
706+ dev->tx_buf_ring.desc, dev->tx_buf_ring.desc_phys);
707
708 free_pagelist:
709 kfree(page_list);
710 }
711
712 static int
713-mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
714+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
715 {
716 struct mtk_rxbm_desc *desc;
717 dma_addr_t desc_phys;
718@@ -389,7 +484,7 @@ mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
719 }
720
721 static void
722-mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
723+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
724 {
725 struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
726 int ring_size = dev->rx_buf_ring.size;
727@@ -403,6 +498,113 @@ mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
728 desc, dev->rx_buf_ring.desc_phys);
729 }
730
731+/* TODO */
732+static int
733+mtk_wed_rx_page_buffer_alloc(struct mtk_wed_device *dev)
734+{
735+ int ring_size = dev->wlan.rx_nbuf, buf_num = MTK_WED_RX_PG_BM_CNT;
736+ struct mtk_rxbm_desc *desc;
737+ dma_addr_t desc_phys;
738+ struct dma_page_info *page_list;
739+ int n_pages, page_idx;
740+ int i;
741+
742+ n_pages = buf_num / MTK_WED_RX_PAGE_BUF_PER_PAGE;
743+
744+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
745+ if (!page_list)
746+ return -ENOMEM;
747+
748+ dev->rx_page_buf_ring.size = ring_size & ~(MTK_WED_BUF_PER_PAGE - 1);
749+ dev->rx_page_buf_ring.pages = page_list;
750+ dev->rx_page_buf_ring.pkt_nums = buf_num;
751+
752+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
753+ &desc_phys, GFP_KERNEL);
754+ if (!desc)
755+ return -ENOMEM;
756+
757+ dev->rx_page_buf_ring.desc = desc;
758+ dev->rx_page_buf_ring.desc_phys = desc_phys;
759+
760+ for (i = 0, page_idx = 0; i < buf_num; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
761+ dma_addr_t page_phys, buf_phys;
762+ struct page *page;
763+ void *buf;
764+ int s;
765+
766+ page = __dev_alloc_pages(GFP_KERNEL, 0);
767+ if (!page)
768+ return -ENOMEM;
769+
770+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
771+ DMA_BIDIRECTIONAL);
772+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
773+ __free_page(page);
774+ return -ENOMEM;
775+ }
776+
777+ page_list[page_idx].addr= page;
778+ page_list[page_idx].addr_phys= page_phys;
779+ page_idx++;
780+
781+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
782+ DMA_BIDIRECTIONAL);
783+
784+ buf = page_to_virt(page);
785+ buf_phys = page_phys;
786+
787+ for (s = 0; s < MTK_WED_RX_PAGE_BUF_PER_PAGE; s++) {
788+
789+ desc->buf0 = cpu_to_le32(buf_phys);
790+ desc++;
791+
792+ buf += MTK_WED_PAGE_BUF_SIZE;
793+ buf_phys += MTK_WED_PAGE_BUF_SIZE;
794+ }
795+
796+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
797+ DMA_BIDIRECTIONAL);
798+ }
799+
800+ return 0;
801+}
802+
803+static void
804+mtk_wed_rx_page_free_buffer(struct mtk_wed_device *dev)
805+{
806+ struct mtk_rxbm_desc *desc = dev->rx_page_buf_ring.desc;
807+ struct dma_page_info *page_list = dev->rx_page_buf_ring.pages;
808+ int ring_size, page_idx;
809+ int i;
810+
811+ if (!page_list)
812+ return;
813+
814+ if (!desc)
815+ goto free_pagelist;
816+
817+ ring_size = dev->rx_page_buf_ring.pkt_nums;
818+
819+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
820+ void *page = page_list[page_idx].addr;
821+
822+ if (!page)
823+ break;
824+
825+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
826+ PAGE_SIZE, DMA_BIDIRECTIONAL);
827+ __free_page(page);
828+ page_idx++;
829+ }
830+
developera60ce2b2023-06-16 13:07:18 +0800831+ dma_free_coherent(dev->hw->dev, dev->rx_page_buf_ring.size * sizeof(*desc),
developer23f9f0f2023-06-15 13:06:25 +0800832+ desc, dev->rx_page_buf_ring.desc_phys);
833+
834+free_pagelist:
835+ kfree(page_list);
836+}
837+
838 static void
839 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
840 {
developer2b762412023-09-21 19:13:58 +0800841@@ -416,19 +618,35 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int sca
developer23f9f0f2023-06-15 13:06:25 +0800842 static void
843 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
844 {
845- int i;
846+ int i, scale = dev->hw->version > 1 ? 2 : 1;
847
848 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
849- mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
developer2b762412023-09-21 19:13:58 +0800850+ if ((dev->tx_ring[i].flags & MTK_WED_RING_CONFIGURED))
developer23f9f0f2023-06-15 13:06:25 +0800851+ mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
852+
853 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
854- mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
developer2b762412023-09-21 19:13:58 +0800855+ if ((dev->tx_wdma[i].flags & MTK_WED_RING_CONFIGURED))
developer23f9f0f2023-06-15 13:06:25 +0800856+ mtk_wed_free_ring(dev, &dev->tx_wdma[i], scale);
857 }
858
859 static void
860 mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
861 {
862- mtk_wed_free_rx_bm(dev);
developer2b762412023-09-21 19:13:58 +0800863+ int i, scale = dev->hw->version > 1 ? 2 : 1;
864+
865+ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++)
866+ if ((dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
867+ mtk_wed_free_ring(dev, &dev->rx_ring[i], 1);
868+
869+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
870+ if ((dev->rx_wdma[i].flags & MTK_WED_RING_CONFIGURED))
871+ mtk_wed_free_ring(dev, &dev->rx_wdma[i], scale);
872+
developer23f9f0f2023-06-15 13:06:25 +0800873+ mtk_wed_free_rx_buffer(dev);
874 mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
875+
876+ if (dev->wlan.hwrro)
877+ mtk_wed_rx_page_free_buffer(dev);
878 }
879
880 static void
developer2b762412023-09-21 19:13:58 +0800881@@ -437,7 +655,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer23f9f0f2023-06-15 13:06:25 +0800882 u32 wdma_mask;
883
884 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
885- if (dev->ver > MTK_WED_V1)
886+ if (mtk_wed_get_rx_capa(dev))
887 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
888 GENMASK(1, 0));
889 /* wed control cr set */
developer2b762412023-09-21 19:13:58 +0800890@@ -447,7 +665,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer23f9f0f2023-06-15 13:06:25 +0800891 MTK_WED_CTRL_WED_TX_BM_EN |
892 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
893
894- if (dev->ver == MTK_WED_V1) {
895+ if (dev->hw->version == 1) {
896 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
897 MTK_WED_PCIE_INT_TRIGGER_STATUS);
898
developer2b762412023-09-21 19:13:58 +0800899@@ -458,6 +676,8 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer23f9f0f2023-06-15 13:06:25 +0800900 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
901 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
902 } else {
903+ if (dev->hw->version == 3)
904+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
905
906 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
907 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
developer2b762412023-09-21 19:13:58 +0800908@@ -475,18 +695,20 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
developer23f9f0f2023-06-15 13:06:25 +0800909 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
910 dev->wlan.txfree_tbit));
911
912- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
913- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
914- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
915- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
916- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
917- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
918- dev->wlan.rx_tbit[0]) |
919- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
920- dev->wlan.rx_tbit[1]));
921+ if (mtk_wed_get_rx_capa(dev))
922+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
923+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
924+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
925+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
926+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
927+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
928+ dev->wlan.rx_tbit[0]) |
929+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
930+ dev->wlan.rx_tbit[1]));
931 }
932+
933 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
934- if (dev->ver == MTK_WED_V1) {
935+ if (dev->hw->version == 1) {
936 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
937 } else {
938 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
developer2b762412023-09-21 19:13:58 +0800939@@ -506,6 +728,21 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
developer23f9f0f2023-06-15 13:06:25 +0800940 {
941 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
942
943+ switch (dev->hw->version) {
944+ case 1:
945+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
946+ break;
947+ case 2 :
948+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2 |
949+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2 |
950+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
951+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
952+ break;
953+ case 3:
954+ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
955+ break;
956+ }
957+
958 if (!dev->hw->num_flows)
959 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
960
developer2b762412023-09-21 19:13:58 +0800961@@ -514,31 +751,86 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
developer23f9f0f2023-06-15 13:06:25 +0800962 }
963
964 static void
965-mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
966+mtk_wed_pao_init(struct mtk_wed_device *dev)
967 {
968- if (en) {
969- wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
970- wed_w32(dev, MTK_WED_TXP_DW1,
971- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
972- } else {
973- wed_w32(dev, MTK_WED_TXP_DW1,
974- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
975- wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
976+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
977+ int i;
978+
979+ for (i = 0; i < 32; i++)
980+ wed_w32(dev, MTK_WED_PAO_HIFTXD_BASE_L(i),
981+ pao->hif_txd_phys[i]);
982+
983+ /* init all sta parameter */
984+ wed_w32(dev, MTK_WED_PAO_STA_INFO_INIT, MTK_WED_PAO_STA_RMVL |
985+ MTK_WED_PAO_STA_WTBL_HDRT_MODE |
986+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_LEN,
987+ dev->wlan.max_amsdu_len >> 8) |
988+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_NUM,
989+ dev->wlan.max_amsdu_nums));
990+
991+ wed_w32(dev, MTK_WED_PAO_STA_INFO, MTK_WED_PAO_STA_INFO_DO_INIT);
992+
993+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_STA_INFO,
994+ MTK_WED_PAO_STA_INFO_DO_INIT)) {
995+ dev_err(dev->hw->dev, "mtk_wed%d: pao init failed!\n",
996+ dev->hw->index);
997+ return;
998 }
999+
1000+ /* init pao txd src */
1001+ wed_set(dev, MTK_WED_PAO_HIFTXD_CFG,
1002+ FIELD_PREP(MTK_WED_PAO_HIFTXD_SRC, dev->hw->index));
1003+
1004+ /* init qmem */
1005+ wed_set(dev, MTK_WED_PAO_PSE, MTK_WED_PAO_PSE_RESET);
1006+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_MON_QMEM_STS1, BIT(29))) {
1007+ pr_info("%s: init pao qmem fail\n", __func__);
1008+ return;
1009+ }
1010+
1011+ /* eagle E1 PCIE1 tx ring 22 flow control issue */
1012+ if (dev->wlan.chip_id == 0x7991) {
1013+ wed_clr(dev, MTK_WED_PAO_AMSDU_FIFO,
1014+ MTK_WED_PAO_AMSDU_IS_PRIOR0_RING);
1015+ }
1016+
1017+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
1018+
1019+ return;
1020 }
1021
1022-static void
1023-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
1024+static int
1025+mtk_wed_hwrro_init(struct mtk_wed_device *dev)
1026 {
1027-#define MTK_WFMDA_RX_DMA_EN BIT(2)
1028+ if (!mtk_wed_get_rx_capa(dev))
1029+ return 0;
developer2b762412023-09-21 19:13:58 +08001030
developer23f9f0f2023-06-15 13:06:25 +08001031+ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
1032+ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
developer2b762412023-09-21 19:13:58 +08001033+
developer23f9f0f2023-06-15 13:06:25 +08001034+ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE,
1035+ dev->rx_page_buf_ring.desc_phys);
1036+
1037+ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
1038+ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
1039+ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
1040+ MTK_WED_RX_PG_BM_CNT));
1041+
1042+ /* enable rx_page_bm to fetch dmad */
1043+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
1044+
1045+ return 0;
1046+}
developer7ccd1942023-07-07 16:15:05 +08001047+
developer23f9f0f2023-06-15 13:06:25 +08001048+static int
1049+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
1050+ struct mtk_wed_ring *ring)
1051+{
1052 int timeout = 3;
1053- u32 cur_idx, regs;
1054+ u32 cur_idx;
1055
1056 do {
1057- regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
1058- MTK_WED_RING_OFS_CPU_IDX;
1059- cur_idx = wed_r32(dev, regs);
1060+ cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
1061 if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
1062 break;
1063
developer2b762412023-09-21 19:13:58 +08001064@@ -546,70 +838,133 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
developer23f9f0f2023-06-15 13:06:25 +08001065 timeout--;
1066 } while (timeout > 0);
1067
1068- if (timeout) {
1069- unsigned int val;
1070+ return timeout;
1071+}
1072
1073- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
1074- dev->wlan.phy_base);
1075- val |= MTK_WFMDA_RX_DMA_EN;
1076
1077- wifi_w32(dev, dev->wlan.wpdma_rx_glo -
1078- dev->wlan.phy_base, val);
1079+static void
1080+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
1081+{
1082+ if (en) {
1083+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
1084+ wed_w32(dev, MTK_WED_TXP_DW1,
1085+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
1086 } else {
1087- dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
1088- dev->hw->index, idx);
1089+ wed_w32(dev, MTK_WED_TXP_DW1,
1090+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
1091+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
1092 }
1093 }
1094
1095 static void
1096 mtk_wed_dma_enable(struct mtk_wed_device *dev)
1097 {
1098- wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1099- MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1100+#define MTK_WFMDA_RX_DMA_EN BIT(2)
1101+
1102+ if (dev->hw->version == 1)
1103+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1104+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1105
1106 wed_set(dev, MTK_WED_GLO_CFG,
1107 MTK_WED_GLO_CFG_TX_DMA_EN |
1108 MTK_WED_GLO_CFG_RX_DMA_EN);
1109+
1110+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
1111+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
1112+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
1113+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
1114+ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
1115+
1116+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
1117+
1118 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1119 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1120- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1121+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
1122+ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
1123 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1124 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1125
1126 wdma_set(dev, MTK_WDMA_GLO_CFG,
1127- MTK_WDMA_GLO_CFG_TX_DMA_EN |
1128+ MTK_WDMA_GLO_CFG_TX_DMA_EN /*|
1129 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
1130- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
1131+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES*/);
1132
1133- if (dev->ver == MTK_WED_V1) {
1134+ if (dev->hw->version == 1) {
1135 wdma_set(dev, MTK_WDMA_GLO_CFG,
1136 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
1137 } else {
1138 int idx = 0;
1139
1140- wed_set(dev, MTK_WED_WPDMA_CTRL,
1141- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
1142-
1143- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1144- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1145- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1146+ if (mtk_wed_get_rx_capa(dev))
1147+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1148+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1149+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1150
1151 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1152 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
1153 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
1154
1155+ if (dev->hw->version == 3) {
1156+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1157+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
1158+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1159+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
1160+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
1161+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
1162+
1163+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
1164+ //wdma_w32(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
1165+ if (mtk_wed_get_rx_capa(dev)) {
1166+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
1167+ MTK_WED_WPDMA_RX_D_PREF_EN |
1168+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
1169+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
1170+
1171+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
1172+
1173+ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
1174+
1175+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
1176+ }
1177+ }
1178+
1179 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1180 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
1181 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
1182
1183+ if (!mtk_wed_get_rx_capa(dev))
1184+ return;
1185+
1186+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
1187 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1188 MTK_WED_WPDMA_RX_D_RX_DRV_EN |
1189 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
1190 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
1191 0x2));
1192
1193- for (idx = 0; idx < dev->hw->ring_num; idx++)
1194- mtk_wed_check_wfdma_rx_fill(dev, idx);
1195+ for (idx = 0; idx < dev->hw->ring_num; idx++) {
1196+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1197+
1198+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1199+ continue;
1200+
1201+ if(mtk_wed_check_wfdma_rx_fill(dev, ring)) {
1202+ unsigned int val;
1203+
1204+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
1205+ dev->wlan.phy_base);
1206+ val |= MTK_WFMDA_RX_DMA_EN;
1207+
1208+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
1209+ dev->wlan.phy_base, val);
1210+
1211+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable successful!\n",
1212+ dev->hw->index, idx);
1213+ } else {
1214+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
1215+ dev->hw->index, idx);
1216+ }
1217+ }
1218 }
1219 }
1220
developer2b762412023-09-21 19:13:58 +08001221@@ -644,15 +999,20 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001222 MTK_WED_WPDMA_RX_D_RX_DRV_EN);
1223 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1224 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1225- }
1226
1227- mtk_wed_set_512_support(dev, false);
1228+ if (dev->hw->version == 3 && mtk_wed_get_rx_capa(dev)) {
1229+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
1230+ MTK_WDMA_PREF_TX_CFG_PREF_EN);
1231+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
1232+ MTK_WDMA_PREF_RX_CFG_PREF_EN);
1233+ }
1234+ }
1235 }
1236
1237 static void
1238 mtk_wed_stop(struct mtk_wed_device *dev)
1239 {
1240- if (dev->ver > MTK_WED_V1) {
1241+ if (mtk_wed_get_rx_capa(dev)) {
1242 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
1243 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
1244 }
developer2b762412023-09-21 19:13:58 +08001245@@ -677,13 +1037,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001246 MTK_WED_CTRL_WED_TX_BM_EN |
1247 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1248
1249- if (dev->hw->ver == 1)
1250+ if (dev->hw->version == 1)
1251 return;
1252
1253 wed_clr(dev, MTK_WED_CTRL,
1254 MTK_WED_CTRL_RX_ROUTE_QM_EN |
1255 MTK_WED_CTRL_WED_RX_BM_EN |
1256 MTK_WED_CTRL_RX_RRO_QM_EN);
1257+
1258+ if (dev->hw->version == 3) {
1259+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
1260+ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_PAO);
1261+ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
1262+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
1263+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
1264+ }
1265 }
1266
1267 static void
developer2b762412023-09-21 19:13:58 +08001268@@ -702,9 +1070,9 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001269
1270 mtk_wdma_tx_reset(dev);
1271
1272- mtk_wed_free_buffer(dev);
1273+ mtk_wed_free_tx_buffer(dev);
1274 mtk_wed_free_tx_rings(dev);
1275- if (dev->ver > MTK_WED_V1) {
1276+ if (mtk_wed_get_rx_capa(dev)) {
1277 mtk_wed_wo_reset(dev);
1278 mtk_wed_free_rx_rings(dev);
1279 mtk_wed_wo_exit(hw);
developer2b762412023-09-21 19:13:58 +08001280@@ -731,24 +1099,29 @@ mtk_wed_detach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001281 static void
1282 mtk_wed_bus_init(struct mtk_wed_device *dev)
1283 {
1284-#define PCIE_BASE_ADDR0 0x11280000
1285+ switch (dev->wlan.bus_type) {
1286+ case MTK_WED_BUS_PCIE: {
1287+ struct device_node *np = dev->hw->eth->dev->of_node;
1288+ struct regmap *regs;
developer23f9f0f2023-06-15 13:06:25 +08001289
1290- if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
1291- struct device_node *node;
1292- void __iomem * base_addr;
1293- u32 value = 0;
1294+ if (dev->hw->version == 2) {
1295+ regs = syscon_regmap_lookup_by_phandle(np,
1296+ "mediatek,wed-pcie");
1297+ if (IS_ERR(regs))
1298+ break;
1299
1300- node = of_parse_phandle(dev->hw->node, "mediatek,wed_pcie", 0);
1301- if (!node) {
1302- pr_err("%s: no wed_pcie node\n", __func__);
1303- return;
1304+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
1305 }
1306
1307- base_addr = of_iomap(node, 0);
1308-
1309- value = readl(base_addr);
1310- value |= BIT(0);
1311- writel(value, base_addr);
1312+ if (dev->wlan.msi) {
1313+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base| 0xc08);
1314+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0xc04);
1315+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
1316+ } else {
1317+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base | 0x180);
1318+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0x184);
1319+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
1320+ }
1321
developera8336302023-07-07 11:29:01 +08001322 wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
1323 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
developer2b762412023-09-21 19:13:58 +08001324@@ -756,45 +1129,53 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001325 /* pcie interrupt control: pola/source selection */
1326 wed_set(dev, MTK_WED_PCIE_INT_CTRL,
1327 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
1328- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
1329- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
developera8336302023-07-07 11:29:01 +08001330-
developer23f9f0f2023-06-15 13:06:25 +08001331- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
1332- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
1333- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
1334- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
developera8336302023-07-07 11:29:01 +08001335+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
1336+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, dev->hw->index));
1337
developer18d0d712023-08-23 11:50:09 +08001338- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
1339- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
1340-
developer23f9f0f2023-06-15 13:06:25 +08001341- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
1342- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
1343-
1344- /* pola setting */
1345- value = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
1346- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
1347- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
1348- } else if (dev->wlan.bus_type == MTK_WED_BUS_AXI) {
1349+ break;
1350+ }
1351+ case MTK_WED_BUS_AXI:
1352 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1353 MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
1354 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
1355+ break;
1356+ default:
1357+ break;
1358 }
1359+
1360 return;
1361 }
1362
1363 static void
1364 mtk_wed_set_wpdma(struct mtk_wed_device *dev)
1365 {
1366- if (dev->ver > MTK_WED_V1) {
1367+ if (dev->hw->version == 1) {
1368+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1369+ } else {
1370+ mtk_wed_bus_init(dev);
1371+
1372 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
1373 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
1374- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
1375+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
1376 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
1377
1378- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
1379- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
1380- } else {
1381- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1382+ if (mtk_wed_get_rx_capa(dev)) {
1383+ int i;
1384+
1385+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
developerb74821e2023-09-08 14:19:59 +08001386+ wed_w32(dev, MTK_WED_WPDMA_RX_RING0, dev->wlan.wpdma_rx[0]);
1387+ if (dev->wlan.wpdma_rx[1])
1388+ wed_w32(dev, MTK_WED_WPDMA_RX_RING1, dev->wlan.wpdma_rx[1]);
developer23f9f0f2023-06-15 13:06:25 +08001389+
1390+ if (dev->wlan.hwrro) {
developerb74821e2023-09-08 14:19:59 +08001391+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
1392+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
1393+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
1394+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
1395+ dev->wlan.wpdma_rx_pg + i * 0x10);
developer23f9f0f2023-06-15 13:06:25 +08001396+ }
1397+ }
1398+ }
1399 }
1400 }
1401
developer2b762412023-09-21 19:13:58 +08001402@@ -806,21 +1187,25 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001403 mtk_wed_deinit(dev);
1404 mtk_wed_reset(dev, MTK_WED_RESET_WED);
1405
1406- if (dev->ver > MTK_WED_V1)
1407- mtk_wed_bus_init(dev);
1408-
1409 mtk_wed_set_wpdma(dev);
1410
1411- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1412- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1413- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1414- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1415- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1416- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1417+ if (dev->hw->version == 3) {
1418+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
1419+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
1420+ } else {
1421+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1422+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1423+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1424+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1425+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1426+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1427+ }
1428+
1429 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1430
1431- if (dev->ver == MTK_WED_V1) {
1432+ if (dev->hw->version == 1) {
1433 u32 offset;
1434+
1435 offset = dev->hw->index ? 0x04000400 : 0;
1436 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1437 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
developer2b762412023-09-21 19:13:58 +08001438@@ -907,11 +1292,16 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001439 } while (1);
1440
1441 /* configure RX_ROUTE_QM */
1442- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1443- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
1444- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1445- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
1446- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1447+ if (dev->hw->version == 2) {
1448+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1449+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
1450+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1451+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
1452+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
1453+ } else {
1454+ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
1455+ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 0x3 + dev->hw->index));
1456+ }
1457
1458 /* enable RX_ROUTE_QM */
1459 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
developer2b762412023-09-21 19:13:58 +08001460@@ -920,23 +1310,45 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001461 static void
1462 mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
1463 {
1464- int size = dev->buf_ring.size;
1465+ int size = dev->wlan.nbuf;
1466 int rev_size = MTK_WED_TX_RING_SIZE / 2;
1467- int thr = 1;
1468+ int thr_lo = 1, thr_hi = 1;
1469
1470- if (dev->ver > MTK_WED_V1) {
1471+ if (dev->hw->version == 1) {
1472+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1473+ MTK_WED_TX_BM_CTRL_PAUSE |
1474+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
1475+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
1476+ } else {
1477 size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
1478- dev->buf_ring.size;
1479+ dev->tx_buf_ring.size;
1480 rev_size = size;
1481- thr = 0;
1482+ thr_lo = 0;
1483+ thr_hi = MTK_WED_TX_BM_DYN_THR_HI;
1484+
1485+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
1486+ MTK_WED_TX_TKID_CTRL_PAUSE |
1487+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
1488+ size / 128) |
1489+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
1490+ size / 128));
1491+
1492+ /* return SKBID + SDP back to bm */
1493+ if (dev->ver == 3) {
1494+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
1495+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
1496+ size = dev->wlan.nbuf;
1497+ rev_size = size;
1498+ } else {
1499+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1500+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1501+ MTK_WED_TX_TKID_DYN_THR_HI);
1502+ }
1503 }
1504
1505- wed_w32(dev, MTK_WED_TX_BM_CTRL,
1506- MTK_WED_TX_BM_CTRL_PAUSE |
1507- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
1508- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
1509+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1510
1511- wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1512+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
1513
1514 wed_w32(dev, MTK_WED_TX_BM_TKID,
1515 FIELD_PREP(MTK_WED_TX_BM_TKID_START,
developer2b762412023-09-21 19:13:58 +08001516@@ -946,25 +1358,44 @@ mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001517
1518 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1519
1520- wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1521- FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr) |
1522- MTK_WED_TX_BM_DYN_THR_HI);
1523+ if (dev->hw->version < 3)
1524+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1525+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_lo) |
1526+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_hi));
1527+ else {
1528+ /* change to new bm */
1529+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
developerb74821e2023-09-08 14:19:59 +08001530+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
developer23f9f0f2023-06-15 13:06:25 +08001531+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_LEGACY_EN);
1532+ }
1533
1534- if (dev->ver > MTK_WED_V1) {
1535+ if (dev->hw->version != 1) {
1536 wed_w32(dev, MTK_WED_TX_TKID_CTRL,
1537 MTK_WED_TX_TKID_CTRL_PAUSE |
1538 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
1539- dev->buf_ring.size / 128) |
1540+ size / 128) |
1541 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
1542- dev->buf_ring.size / 128));
1543- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1544- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1545- MTK_WED_TX_TKID_DYN_THR_HI);
1546+ size / 128));
1547+
1548+ /* return SKBID + SDP back to bm */
1549+ if (dev->ver == 3)
1550+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
1551+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
1552+ else
1553+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
1554+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
1555+ MTK_WED_TX_TKID_DYN_THR_HI);
1556 }
1557- mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1558+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1559+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1560+ dev->wlan.token_start) |
1561+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1562+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1563
1564+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
1565+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
1566 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1567- if (dev->ver > MTK_WED_V1)
1568+ if (dev->hw->version != 1)
1569 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
1570 }
1571
developer2b762412023-09-21 19:13:58 +08001572@@ -977,7 +1408,26 @@ mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001573
1574 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
1575
1576+ /* reset prefetch index of ring */
1577+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
1578+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1579+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
1580+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1581+
1582+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
1583+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1584+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
1585+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
1586+
1587+ /* reset prefetch FIFO of ring */
1588+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
1589+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
1590+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
1591+ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
1592+
1593 mtk_wed_rx_bm_hw_init(dev);
1594+ if (dev->wlan.hwrro)
1595+ mtk_wed_hwrro_init(dev);
1596 mtk_wed_rro_hw_init(dev);
1597 mtk_wed_route_qm_hw_init(dev);
1598 }
developer2b762412023-09-21 19:13:58 +08001599@@ -991,7 +1441,7 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001600 dev->init_done = true;
1601 mtk_wed_set_ext_int(dev, false);
1602 mtk_wed_tx_hw_init(dev);
1603- if (dev->ver > MTK_WED_V1)
1604+ if (mtk_wed_get_rx_capa(dev))
1605 mtk_wed_rx_hw_init(dev);
1606 }
1607
developer2b762412023-09-21 19:13:58 +08001608@@ -1015,26 +1465,6 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
developer23f9f0f2023-06-15 13:06:25 +08001609 }
1610 }
1611
1612-static u32
1613-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1614-{
1615- if (wed_r32(dev, reg) & mask)
1616- return true;
1617-
1618- return false;
1619-}
1620-
1621-static int
1622-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1623-{
1624- int sleep = 1000;
1625- int timeout = 100 * sleep;
1626- u32 val;
1627-
1628- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1629- timeout, false, dev, reg, mask);
1630-}
1631-
1632 static void
1633 mtk_wed_rx_reset(struct mtk_wed_device *dev)
1634 {
developer2b762412023-09-21 19:13:58 +08001635@@ -1133,7 +1563,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001636 mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false);
1637 }
1638
1639- mtk_wed_free_rx_bm(dev);
1640+ mtk_wed_free_rx_buffer(dev);
1641 }
1642
1643
developer2b762412023-09-21 19:13:58 +08001644@@ -1271,12 +1701,15 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001645 int idx, int size, bool reset)
1646 {
1647 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
1648+ int scale = dev->hw->version > 1 ? 2 : 1;
1649
1650 if(!reset)
1651 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1652- dev->ver, true))
1653+ scale, true))
1654 return -ENOMEM;
1655
1656+ wdma->flags |= MTK_WED_RING_CONFIGURED;
1657+
1658 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1659 wdma->desc_phys);
1660 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
developer2b762412023-09-21 19:13:58 +08001661@@ -1296,12 +1729,33 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001662 int idx, int size, bool reset)
1663 {
1664 struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
1665+ int scale = dev->hw->version > 1 ? 2 : 1;
1666
1667 if (!reset)
1668 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1669- dev->ver, true))
1670+ scale, true))
1671 return -ENOMEM;
1672
1673+ if (dev->hw->version == 3) {
1674+ struct mtk_wdma_desc *desc = wdma->desc;
1675+ int i;
1676+
1677+ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
1678+ desc->buf0 = 0;
1679+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
1680+ desc->buf1 = 0;
1681+ desc->info = MTK_WDMA_TXD0_DESC_INFO_DMA_DONE;
1682+ desc++;
1683+ desc->buf0 = 0;
1684+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
1685+ desc->buf1 = 0;
1686+ desc->info = MTK_WDMA_TXD1_DESC_INFO_DMA_DONE;
1687+ desc++;
1688+ }
1689+ }
1690+
developer2b762412023-09-21 19:13:58 +08001691+ wdma->flags |= MTK_WED_RING_CONFIGURED;
1692+
developer23f9f0f2023-06-15 13:06:25 +08001693 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
1694 wdma->desc_phys);
1695 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
developer2b762412023-09-21 19:13:58 +08001696@@ -1312,7 +1766,7 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001697 MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
1698 if (reset)
1699 mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE,
1700- dev->ver, true);
1701+ scale, true);
1702 if (idx == 0) {
1703 wed_w32(dev, MTK_WED_WDMA_RING_TX
1704 + MTK_WED_RING_OFS_BASE, wdma->desc_phys);
developer2b762412023-09-21 19:13:58 +08001705@@ -1395,7 +1849,7 @@ mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
developer23f9f0f2023-06-15 13:06:25 +08001706 {
1707 struct mtk_wed_wo *wo = dev->hw->wed_wo;
1708
1709- if (dev->ver == MTK_WED_V1)
1710+ if (!mtk_wed_get_rx_capa(dev))
1711 return 0;
1712
1713 return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
developer2b762412023-09-21 19:13:58 +08001714@@ -1420,24 +1874,106 @@ mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
developer23f9f0f2023-06-15 13:06:25 +08001715 }
1716 }
1717
1718+static void
1719+mtk_wed_start_hwrro(struct mtk_wed_device *dev, u32 irq_mask)
1720+{
1721+ int idx, ret;
1722+
1723+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
1724+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
1725+
1726+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hwrro)
1727+ return;
1728+
1729+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
1730+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR);
1731+
1732+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
1733+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
1734+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
1735+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
1736+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
1737+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
1738+ dev->wlan.rro_rx_tbit[0]) |
1739+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
1740+ dev->wlan.rro_rx_tbit[1]));
1741+
1742+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
1743+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
1744+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
1745+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
1746+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
1747+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
1748+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
1749+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
1750+ dev->wlan.rx_pg_tbit[0]) |
1751+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
1752+ dev->wlan.rx_pg_tbit[1])|
1753+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
1754+ dev->wlan.rx_pg_tbit[2]));
1755+
1756+ /*
1757+ * RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
1758+ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
1759+ */
1760+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN);
1761+
1762+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++) {
1763+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
1764+
1765+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1766+ continue;
1767+
1768+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
1769+ if (!ret)
1770+ dev_err(dev->hw->dev, "mtk_wed%d: rx_rro_ring(%d) init failed!\n",
1771+ dev->hw->index, idx);
1772+ }
1773+
1774+ for (idx = 0; idx < MTK_WED_RX_PAGE_QUEUES; idx++){
1775+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
1776+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
1777+ continue;
1778+
1779+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
1780+ if (!ret)
1781+ dev_err(dev->hw->dev, "mtk_wed%d: rx_page_ring(%d) init failed!\n",
1782+ dev->hw->index, idx);
1783+ }
1784+}
1785+
1786 static void
1787 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1788 {
1789 int i, ret;
1790
1791- if (dev->ver > MTK_WED_V1)
1792- ret = mtk_wed_rx_bm_alloc(dev);
1793+ if (mtk_wed_get_rx_capa(dev)) {
1794+ ret = mtk_wed_rx_buffer_alloc(dev);
1795+ if (ret)
1796+ return;
1797+
1798+ if (dev->wlan.hwrro)
1799+ mtk_wed_rx_page_buffer_alloc(dev);
1800+ }
1801
1802 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1803 if (!dev->tx_wdma[i].desc)
developerb74821e2023-09-08 14:19:59 +08001804 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
1805
1806+ for (i = 0; i < ARRAY_SIZE(dev->rx_page_ring); i++) {
1807+ u32 count = MTK_WED_RRO_MSDU_PG_CTRL0(i) +
1808+ MTK_WED_RING_OFS_COUNT;
1809+
1810+ if (!wed_r32(dev, count))
1811+ wed_w32(dev, count, 1);
1812+ }
1813+
1814 mtk_wed_hw_init(dev);
1815
developer23f9f0f2023-06-15 13:06:25 +08001816 mtk_wed_set_int(dev, irq_mask);
1817 mtk_wed_set_ext_int(dev, true);
1818
1819- if (dev->ver == MTK_WED_V1) {
1820+ if (dev->hw->version == 1) {
1821 u32 val;
1822
1823 val = dev->wlan.wpdma_phys |
developer2b762412023-09-21 19:13:58 +08001824@@ -1448,33 +1984,52 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
developer23f9f0f2023-06-15 13:06:25 +08001825 val |= BIT(1);
1826 val |= BIT(0);
1827 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
1828- } else {
1829+ } else if (mtk_wed_get_rx_capa(dev)) {
1830 /* driver set mid ready and only once */
1831 wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1832 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1833 wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1834 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1835+ if (dev->hw->version == 3)
1836+ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
1837+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1838
1839 wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1840 wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1841+ if (dev->hw->version == 3)
1842+ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
1843
1844 ret = mtk_wed_rro_cfg(dev);
1845 if (ret)
1846 return;
1847 }
1848- mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1849+
1850+ if (dev->hw->version == 2)
1851+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1852+ else if (dev->hw->version == 3)
1853+ mtk_wed_pao_init(dev);
1854
1855 mtk_wed_dma_enable(dev);
1856 dev->running = true;
1857 }
1858
1859+static int
1860+mtk_wed_get_pci_base(struct mtk_wed_device *dev)
1861+{
1862+ if (dev->hw->index == 0)
1863+ return MTK_WED_PCIE_BASE0;
1864+ else if (dev->hw->index == 1)
1865+ return MTK_WED_PCIE_BASE1;
1866+ else
1867+ return MTK_WED_PCIE_BASE2;
1868+}
1869+
1870 static int
1871 mtk_wed_attach(struct mtk_wed_device *dev)
1872 __releases(RCU)
1873 {
1874 struct mtk_wed_hw *hw;
1875 struct device *device;
1876- u16 ver;
1877 int ret = 0;
1878
1879 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
developer2b762412023-09-21 19:13:58 +08001880@@ -1494,34 +2049,30 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001881 goto out;
1882 }
1883
1884- device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
1885- ? &dev->wlan.pci_dev->dev
1886- : &dev->wlan.platform_dev->dev;
1887+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE ?
1888+ &dev->wlan.pci_dev->dev
1889+ : &dev->wlan.platform_dev->dev;
1890 dev_info(device, "attaching wed device %d version %d\n",
1891- hw->index, hw->ver);
1892+ hw->index, hw->version);
1893
1894 dev->hw = hw;
1895 dev->dev = hw->dev;
1896 dev->irq = hw->irq;
1897 dev->wdma_idx = hw->index;
1898+ dev->ver = hw->version;
1899+
1900+ if (dev->hw->version == 3)
1901+ dev->hw->pci_base = mtk_wed_get_pci_base(dev);
1902
1903 if (hw->eth->dma_dev == hw->eth->dev &&
1904 of_dma_is_coherent(hw->eth->dev->of_node))
1905 mtk_eth_set_dma_device(hw->eth, hw->dev);
1906
1907- dev->ver = FIELD_GET(MTK_WED_REV_ID_MAJOR,
1908- wed_r32(dev, MTK_WED_REV_ID));
1909- if (dev->ver > MTK_WED_V1)
1910- ver = FIELD_GET(MTK_WED_REV_ID_MINOR,
1911- wed_r32(dev, MTK_WED_REV_ID));
1912-
1913- dev->rev_id = ((dev->ver << 28) | ver << 16);
1914-
1915- ret = mtk_wed_buffer_alloc(dev);
1916+ ret = mtk_wed_tx_buffer_alloc(dev);
1917 if (ret)
1918 goto error;
1919
1920- if (dev->ver > MTK_WED_V1) {
1921+ if (mtk_wed_get_rx_capa(dev)) {
1922 ret = mtk_wed_rro_alloc(dev);
1923 if (ret)
1924 goto error;
developer2b762412023-09-21 19:13:58 +08001925@@ -1533,15 +2084,20 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer23f9f0f2023-06-15 13:06:25 +08001926 init_completion(&dev->wlan_reset_done);
1927 atomic_set(&dev->fe_reset, 0);
1928
1929- if (dev->ver == MTK_WED_V1)
1930+ if (dev->hw->version != 1)
1931+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
1932+ else
1933 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1934 BIT(hw->index), 0);
1935- else
1936+
1937+ if (mtk_wed_get_rx_capa(dev))
1938 ret = mtk_wed_wo_init(hw);
1939
1940 error:
1941- if (ret)
1942+ if (ret) {
1943+ pr_info("%s: detach wed\n", __func__);
1944 mtk_wed_detach(dev);
1945+ }
1946 out:
1947 mutex_unlock(&hw_lock);
1948
developer2b762412023-09-21 19:13:58 +08001949@@ -1576,8 +2132,26 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx,
developer23f9f0f2023-06-15 13:06:25 +08001950 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
1951 return -ENOMEM;
1952
1953+ if (dev->hw->version == 3 && idx == 1) {
1954+ /* reset prefetch index */
1955+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
1956+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
1957+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
1958+
1959+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
1960+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
1961+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
1962+
1963+ /* reset prefetch FIFO */
1964+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
1965+ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
1966+ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
1967+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
1968+ }
1969+
1970 ring->reg_base = MTK_WED_RING_TX(idx);
1971 ring->wpdma = regs;
1972+ ring->flags |= MTK_WED_RING_CONFIGURED;
1973
1974 /* WED -> WPDMA */
1975 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
developer2b762412023-09-21 19:13:58 +08001976@@ -1599,7 +2173,7 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
developer23f9f0f2023-06-15 13:06:25 +08001977 struct mtk_wed_ring *ring = &dev->txfree_ring;
1978 int i, idx = 1;
1979
1980- if(dev->ver > MTK_WED_V1)
1981+ if(dev->hw->version > 1)
1982 idx = 0;
1983
1984 /*
developer2b762412023-09-21 19:13:58 +08001985@@ -1638,6 +2212,7 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
1986
1987 ring->reg_base = MTK_WED_RING_RX_DATA(idx);
1988 ring->wpdma = regs;
1989+ ring->flags |= MTK_WED_RING_CONFIGURED;
1990 dev->hw->ring_num = idx + 1;
1991
1992 /* WPDMA -> WED */
1993@@ -1652,6 +2227,129 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
developer23f9f0f2023-06-15 13:06:25 +08001994 return 0;
1995 }
1996
1997+static int
1998+mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1999+{
2000+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
2001+
2002+ ring->wpdma = regs;
2003+
2004+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
2005+ readl(regs));
2006+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
2007+ readl(regs + MTK_WED_RING_OFS_COUNT));
2008+
2009+ ring->flags |= MTK_WED_RING_CONFIGURED;
2010+
2011+ return 0;
2012+}
2013+
2014+static int
2015+mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
2016+{
2017+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
2018+
2019+ ring->wpdma = regs;
2020+
2021+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
2022+ readl(regs));
2023+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
2024+ readl(regs + MTK_WED_RING_OFS_COUNT));
2025+
2026+ ring->flags |= MTK_WED_RING_CONFIGURED;
2027+
2028+ return 0;
2029+}
2030+
2031+static int
2032+mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2033+{
2034+ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
2035+ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
2036+ int i = 0, cnt = 0;
2037+
2038+ ring->wpdma = regs;
2039+
2040+ if (readl(regs) & 0xf)
2041+ pr_info("%s(): address is not 16-byte alignment\n", __func__);
2042+
2043+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
2044+ readl(regs) & 0xfffffff0);
2045+
2046+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
2047+ readl(regs + MTK_WED_RING_OFS_COUNT));
2048+
2049+ /* ack sn cr */
2050+ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
2051+ dev->wlan.ind_cmd.ack_sn_addr);
2052+ wed_w32(dev, MTK_WED_RRO_CFG1,
2053+ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
2054+ dev->wlan.ind_cmd.win_size) |
2055+ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
2056+ dev->wlan.ind_cmd.particular_sid));
2057+
2058+ /* particular session addr element */
2059+ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, dev->wlan.ind_cmd.particular_se_phys);
2060+
2061+ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
2062+ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
2063+ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
2064+ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
2065+ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
2066+
2067+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
2068+ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) &&
2069+ cnt < 100) {
2070+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
2071+ cnt++;
2072+ }
2073+ if (cnt >= 100) {
2074+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
2075+ dev->hw->index);
2076+ }
2077+ /*if (mtk_wed_poll_busy(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
2078+ MTK_WED_ADDR_ELEM_TBL_WR_RDY)) {
2079+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
2080+ dev->hw->index);
2081+ return -1;
2082+ }*/
2083+ }
2084+
2085+ /* pn check init */
2086+ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
2087+ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
2088+ MTK_WED_PN_CHECK_IS_FIRST);
2089+
2090+ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
2091+ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
2092+
2093+ cnt = 0;
2094+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
2095+ while (!(val & MTK_WED_PN_CHECK_WR_RDY) &&
2096+ cnt < 100) {
2097+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
2098+ cnt++;
2099+ }
2100+ if (cnt >= 100) {
2101+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
2102+ dev->hw->index, i);
2103+ }
2104+ /*if (mtk_wed_poll_busy(dev, MTK_WED_PN_CHECK_CFG,
2105+ MTK_WED_PN_CHECK_WR_RDY)) {
2106+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
2107+ dev->hw->index, i);
2108+ //return -1;
2109+ }*/
2110+ }
2111+
2112+ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
2113+
2114+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
2115+
2116+ return 0;
2117+}
2118+
2119+
2120 static u32
2121 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2122 {
developer2b762412023-09-21 19:13:58 +08002123@@ -1659,9 +2357,13 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
developer18d0d712023-08-23 11:50:09 +08002124
developer23f9f0f2023-06-15 13:06:25 +08002125 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2126 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
developer18d0d712023-08-23 11:50:09 +08002127- val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2128- if (!dev->hw->num_flows)
2129- val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2130+ if (dev->hw->version == 3) {
developer23f9f0f2023-06-15 13:06:25 +08002131+ val &= MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
developer18d0d712023-08-23 11:50:09 +08002132+ } else {
2133+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2134+ if (!dev->hw->num_flows)
2135+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2136+ }
2137 if (val && net_ratelimit())
2138 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
developer23f9f0f2023-06-15 13:06:25 +08002139
developer2b762412023-09-21 19:13:58 +08002140@@ -1754,6 +2456,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002141 .tx_ring_setup = mtk_wed_tx_ring_setup,
2142 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2143 .rx_ring_setup = mtk_wed_rx_ring_setup,
2144+ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
2145+ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
2146+ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
2147 .msg_update = mtk_wed_send_msg,
2148 .start = mtk_wed_start,
2149 .stop = mtk_wed_stop,
developer2b762412023-09-21 19:13:58 +08002150@@ -1765,6 +2470,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002151 .detach = mtk_wed_detach,
2152 .setup_tc = mtk_wed_eth_setup_tc,
2153 .ppe_check = mtk_wed_ppe_check,
2154+ .start_hwrro = mtk_wed_start_hwrro,
2155 };
2156 struct device_node *eth_np = eth->dev->of_node;
2157 struct platform_device *pdev;
developer2b762412023-09-21 19:13:58 +08002158@@ -1804,9 +2510,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002159 hw->wdma_phy = wdma_phy;
2160 hw->index = index;
2161 hw->irq = irq;
2162- hw->ver = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
2163+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) ?
2164+ 3 : MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
2165
2166- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2167+ if (hw->version == 1) {
2168 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2169 "mediatek,pcie-mirror");
2170 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
developer2b762412023-09-21 19:13:58 +08002171@@ -1821,7 +2528,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
developer23f9f0f2023-06-15 13:06:25 +08002172 regmap_write(hw->mirror, 0, 0);
2173 regmap_write(hw->mirror, 4, 0);
2174 }
2175- hw->ver = MTK_WED_V1;
2176 }
2177
2178 mtk_wed_hw_add_debugfs(hw);
2179diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2180index 490873c..fcf7bd0 100644
2181--- a/drivers/net/ethernet/mediatek/mtk_wed.h
2182+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2183@@ -10,10 +10,13 @@
2184 #include <linux/netdevice.h>
2185 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
2186
2187-#define MTK_WED_PKT_SIZE 1900
2188+#define MTK_WED_PKT_SIZE 1920//1900
2189 #define MTK_WED_BUF_SIZE 2048
2190+#define MTK_WED_PAGE_BUF_SIZE 128
2191 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
2192+#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
2193 #define MTK_WED_RX_RING_SIZE 1536
2194+#define MTK_WED_RX_PG_BM_CNT 8192
2195
2196 #define MTK_WED_TX_RING_SIZE 2048
2197 #define MTK_WED_WDMA_RING_SIZE 512
2198@@ -27,6 +30,9 @@
2199 #define MTK_WED_RRO_QUE_CNT 8192
2200 #define MTK_WED_MIOD_ENTRY_CNT 128
2201
2202+#define MTK_WED_TX_BM_DMA_SIZE 65536
2203+#define MTK_WED_TX_BM_PKT_CNT 32768
2204+
2205 #define MODULE_ID_WO 1
2206
2207 struct mtk_eth;
2208@@ -43,6 +49,8 @@ struct mtk_wed_hw {
2209 struct dentry *debugfs_dir;
2210 struct mtk_wed_device *wed_dev;
2211 struct mtk_wed_wo *wed_wo;
2212+ struct mtk_wed_pao *wed_pao;
2213+ u32 pci_base;
2214 u32 debugfs_reg;
2215 u32 num_flows;
2216 u32 wdma_phy;
2217@@ -50,7 +58,8 @@ struct mtk_wed_hw {
2218 int ring_num;
2219 int irq;
2220 int index;
2221- u32 ver;
2222+ int token_id;
2223+ u32 version;
2224 };
2225
2226 struct mtk_wdma_info {
2227@@ -58,6 +67,18 @@ struct mtk_wdma_info {
2228 u8 queue;
2229 u16 wcid;
2230 u8 bss;
2231+ u32 usr_info;
2232+ u8 tid;
2233+ u8 is_fixedrate;
2234+ u8 is_prior;
2235+ u8 is_sp;
2236+ u8 hf;
2237+ u8 amsdu_en;
2238+};
2239+
2240+struct mtk_wed_pao {
2241+ char *hif_txd[32];
2242+ dma_addr_t hif_txd_phys[32];
2243 };
2244
2245 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
2246diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2247index 4a9e684..51e3d7c 100644
2248--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2249+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2250@@ -11,9 +11,11 @@ struct reg_dump {
2251 u16 offset;
2252 u8 type;
2253 u8 base;
2254+ u32 mask;
2255 };
2256
2257 enum {
2258+ DUMP_TYPE_END,
2259 DUMP_TYPE_STRING,
2260 DUMP_TYPE_WED,
2261 DUMP_TYPE_WDMA,
2262@@ -23,8 +25,11 @@ enum {
2263 DUMP_TYPE_WED_RRO,
2264 };
2265
2266+#define DUMP_END() { .type = DUMP_TYPE_END }
2267 #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2268 #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2269+#define DUMP_REG_MASK(_reg, _mask) { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
2270+
2271 #define DUMP_RING(_prefix, _base, ...) \
2272 { _prefix " BASE", _base, __VA_ARGS__ }, \
2273 { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2274@@ -32,6 +37,7 @@ enum {
2275 { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2276
2277 #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2278+#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
2279 #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2280
2281 #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2282@@ -52,36 +58,49 @@ print_reg_val(struct seq_file *s, const char *name, u32 val)
2283
2284 static void
2285 dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2286- const struct reg_dump *regs, int n_regs)
2287+ const struct reg_dump **regs)
2288 {
2289- const struct reg_dump *cur;
2290+ const struct reg_dump **cur_o = regs, *cur;
2291+ bool newline = false;
2292 u32 val;
2293
2294- for (cur = regs; cur < &regs[n_regs]; cur++) {
2295- switch (cur->type) {
2296- case DUMP_TYPE_STRING:
2297- seq_printf(s, "%s======== %s:\n",
2298- cur > regs ? "\n" : "",
2299- cur->name);
2300- continue;
2301- case DUMP_TYPE_WED:
2302- case DUMP_TYPE_WED_RRO:
2303- val = wed_r32(dev, cur->offset);
2304- break;
2305- case DUMP_TYPE_WDMA:
2306- val = wdma_r32(dev, cur->offset);
2307- break;
2308- case DUMP_TYPE_WPDMA_TX:
2309- val = wpdma_tx_r32(dev, cur->base, cur->offset);
2310- break;
2311- case DUMP_TYPE_WPDMA_TXFREE:
2312- val = wpdma_txfree_r32(dev, cur->offset);
2313- break;
2314- case DUMP_TYPE_WPDMA_RX:
2315- val = wpdma_rx_r32(dev, cur->base, cur->offset);
2316- break;
2317+ while (*cur_o) {
2318+ cur = *cur_o;
2319+
2320+ while (cur->type != DUMP_TYPE_END) {
2321+ switch (cur->type) {
2322+ case DUMP_TYPE_STRING:
2323+ seq_printf(s, "%s======== %s:\n",
2324+ newline ? "\n" : "",
2325+ cur->name);
2326+ newline = true;
2327+ cur++;
2328+ continue;
2329+ case DUMP_TYPE_WED:
2330+ case DUMP_TYPE_WED_RRO:
2331+ val = wed_r32(dev, cur->offset);
2332+ break;
2333+ case DUMP_TYPE_WDMA:
2334+ val = wdma_r32(dev, cur->offset);
2335+ break;
2336+ case DUMP_TYPE_WPDMA_TX:
2337+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2338+ break;
2339+ case DUMP_TYPE_WPDMA_TXFREE:
2340+ val = wpdma_txfree_r32(dev, cur->offset);
2341+ break;
2342+ case DUMP_TYPE_WPDMA_RX:
2343+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
2344+ break;
2345+ }
2346+
2347+ if (cur->mask)
2348+ val = (cur->mask & val) >> (ffs(cur->mask) - 1);
2349+
2350+ print_reg_val(s, cur->name, val);
2351+ cur++;
2352 }
2353- print_reg_val(s, cur->name, val);
2354+ cur_o++;
2355 }
2356 }
2357
2358@@ -89,7 +108,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2359 static int
2360 wed_txinfo_show(struct seq_file *s, void *data)
2361 {
2362- static const struct reg_dump regs[] = {
2363+ static const struct reg_dump regs_common[] = {
2364 DUMP_STR("WED TX"),
2365 DUMP_WED(WED_TX_MIB(0)),
2366 DUMP_WED_RING(WED_RING_TX(0)),
2367@@ -128,16 +147,32 @@ wed_txinfo_show(struct seq_file *s, void *data)
2368 DUMP_WDMA_RING(WDMA_RING_RX(0)),
2369 DUMP_WDMA_RING(WDMA_RING_RX(1)),
2370
2371- DUMP_STR("TX FREE"),
2372+ DUMP_STR("WED TX FREE"),
2373 DUMP_WED(WED_RX_MIB(0)),
2374+ DUMP_WED_RING(WED_RING_RX(0)),
2375+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
2376+
2377+ DUMP_WED(WED_RX_MIB(1)),
2378+ DUMP_WED_RING(WED_RING_RX(1)),
2379+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
2380+ DUMP_STR("WED_WPDMA TX FREE"),
2381+ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
2382+ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
2383+ DUMP_END(),
2384+ };
2385+
2386+ static const struct reg_dump *regs[] = {
2387+ &regs_common[0],
2388+ NULL,
2389 };
2390+
2391 struct mtk_wed_hw *hw = s->private;
2392 struct mtk_wed_device *dev = hw->wed_dev;
2393
2394 if (!dev)
2395 return 0;
2396
2397- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2398+ dump_wed_regs(s, dev, regs);
2399
2400 return 0;
2401 }
2402@@ -146,7 +181,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2403 static int
2404 wed_rxinfo_show(struct seq_file *s, void *data)
2405 {
2406- static const struct reg_dump regs[] = {
2407+ static const struct reg_dump regs_common[] = {
2408 DUMP_STR("WPDMA RX"),
2409 DUMP_WPDMA_RX_RING(0),
2410 DUMP_WPDMA_RX_RING(1),
2411@@ -164,7 +199,7 @@ wed_rxinfo_show(struct seq_file *s, void *data)
2412 DUMP_WED_RING(WED_RING_RX_DATA(0)),
2413 DUMP_WED_RING(WED_RING_RX_DATA(1)),
2414
2415- DUMP_STR("WED RRO"),
2416+ DUMP_STR("WED WO RRO"),
2417 DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
2418 DUMP_WED(WED_RROQM_MID_MIB),
2419 DUMP_WED(WED_RROQM_MOD_MIB),
2420@@ -175,16 +210,6 @@ wed_rxinfo_show(struct seq_file *s, void *data)
2421 DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
2422 DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
2423
2424- DUMP_STR("WED Route QM"),
2425- DUMP_WED(WED_RTQM_R2H_MIB(0)),
2426- DUMP_WED(WED_RTQM_R2Q_MIB(0)),
2427- DUMP_WED(WED_RTQM_Q2H_MIB(0)),
2428- DUMP_WED(WED_RTQM_R2H_MIB(1)),
2429- DUMP_WED(WED_RTQM_R2Q_MIB(1)),
2430- DUMP_WED(WED_RTQM_Q2H_MIB(1)),
2431- DUMP_WED(WED_RTQM_Q2N_MIB),
2432- DUMP_WED(WED_RTQM_Q2B_MIB),
2433- DUMP_WED(WED_RTQM_PFDBK_MIB),
2434
2435 DUMP_STR("WED WDMA TX"),
2436 DUMP_WED(WED_WDMA_TX_MIB),
2437@@ -205,15 +230,99 @@ wed_rxinfo_show(struct seq_file *s, void *data)
2438 DUMP_WED(WED_RX_BM_INTF2),
2439 DUMP_WED(WED_RX_BM_INTF),
2440 DUMP_WED(WED_RX_BM_ERR_STS),
2441+ DUMP_END()
2442+ };
2443+
2444+ static const struct reg_dump regs_v2[] = {
2445+ DUMP_STR("WED Route QM"),
2446+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
2447+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
2448+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
2449+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
2450+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
2451+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
2452+ DUMP_WED(WED_RTQM_Q2N_MIB),
2453+ DUMP_WED(WED_RTQM_Q2B_MIB),
2454+ DUMP_WED(WED_RTQM_PFDBK_MIB),
2455+
2456+ DUMP_END()
2457+ };
2458+
2459+ static const struct reg_dump regs_v3[] = {
2460+ DUMP_STR("WED RX RRO DATA"),
2461+ DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
2462+ DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
2463+
2464+ DUMP_STR("WED RX MSDU PAGE"),
2465+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
2466+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
2467+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
2468+
2469+ DUMP_STR("WED RX IND CMD"),
2470+ DUMP_WED(WED_IND_CMD_RX_CTRL1),
2471+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
2472+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
2473+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
2474+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
2475+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
2476+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
2477+ WED_IND_CMD_PREFETCH_FREE_CNT),
2478+ DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
2479+
2480+ DUMP_STR("WED ADDR ELEM"),
2481+ DUMP_WED(WED_ADDR_ELEM_CFG0),
2482+ DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
2483+ WED_ADDR_ELEM_PREFETCH_FREE_CNT),
2484+
2485+ DUMP_STR("WED Route QM"),
2486+ DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
2487+ DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
2488+ DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
2489+ DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
2490+ DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
2491+ DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
2492+
2493+ DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
2494+ DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
2495+ DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
2496+ DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
2497+ DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
2498+ DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
2499+
2500+ DUMP_END()
2501+ };
2502+
2503+ static const struct reg_dump *regs_new_v2[] = {
2504+ &regs_common[0],
2505+ &regs_v2[0],
2506+ NULL,
2507+ };
2508+
2509+ static const struct reg_dump *regs_new_v3[] = {
2510+ &regs_common[0],
2511+ &regs_v3[0],
2512+ NULL,
2513 };
2514
2515 struct mtk_wed_hw *hw = s->private;
2516 struct mtk_wed_device *dev = hw->wed_dev;
2517+ const struct reg_dump **regs;
2518
2519 if (!dev)
2520 return 0;
2521
2522- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2523+ switch(dev->hw->version) {
2524+ case 2:
2525+ regs = regs_new_v2;
2526+ break;
2527+ case 3:
2528+ regs = regs_new_v3;
2529+ break;
2530+ default:
2531+ return 0;
2532+ }
2533+
2534+ dump_wed_regs(s, dev, regs);
2535
2536 return 0;
2537 }
2538@@ -248,6 +357,383 @@ mtk_wed_reg_get(void *data, u64 *val)
2539 DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2540 "0x%08llx\n");
2541
2542+static int
2543+wed_token_txd_show(struct seq_file *s, void *data)
2544+{
2545+ struct mtk_wed_hw *hw = s->private;
2546+ struct mtk_wed_device *dev = hw->wed_dev;
2547+ struct dma_page_info *page_list = dev->tx_buf_ring.pages;
2548+ int token = dev->wlan.token_start;
2549+ u32 val = hw->token_id, size = 1;
2550+ int page_idx = (val - token) / 2;
2551+ int i;
2552+
2553+ if (val < token) {
2554+ size = val;
2555+ page_idx = 0;
2556+ }
2557+
2558+ for (i = 0; i < size; i += MTK_WED_BUF_PER_PAGE) {
2559+ void *page = page_list[page_idx++].addr;
2560+ void *buf;
2561+ int j;
2562+
2563+ if (!page)
2564+ break;
2565+
2566+ buf = page_to_virt(page);
2567+
2568+ for (j = 0; j < MTK_WED_BUF_PER_PAGE; j++) {
2569+ printk("[TXD]:token id = %d\n", token + 2 * (page_idx - 1) + j);
2570+ print_hex_dump(KERN_ERR , "", DUMP_PREFIX_OFFSET, 16, 1, (u8 *)buf, 128, false);
2571+ seq_printf(s, "\n");
2572+
2573+ buf += MTK_WED_BUF_SIZE;
2574+ }
2575+ }
2576+
2577+ return 0;
2578+}
2579+
2580+DEFINE_SHOW_ATTRIBUTE(wed_token_txd);
2581+
2582+static int
2583+wed_pao_show(struct seq_file *s, void *data)
2584+{
2585+ static const struct reg_dump regs_common[] = {
2586+ DUMP_STR("PAO AMDSU INFO"),
2587+ DUMP_WED(WED_PAO_MON_AMSDU_FIFO_DMAD),
2588+
2589+ DUMP_STR("PAO AMDSU ENG0 INFO"),
2590+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(0)),
2591+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(0)),
2592+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(0)),
2593+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(0)),
2594+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(0)),
2595+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
2596+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2597+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
2598+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2599+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
2600+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2601+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
2602+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2603+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
2604+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2605+
2606+ DUMP_STR("PAO AMDSU ENG1 INFO"),
2607+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(1)),
2608+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(1)),
2609+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(1)),
2610+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(1)),
2611+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(1)),
2612+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
2613+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2614+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
2615+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2616+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(1),
2617+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2618+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2619+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2620+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2621+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2622+
2623+ DUMP_STR("PAO AMDSU ENG2 INFO"),
2624+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(2)),
2625+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(2)),
2626+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(2)),
2627+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(2)),
2628+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(2)),
2629+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
2630+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2631+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
2632+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2633+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2634+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2635+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2636+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2637+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
2638+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2639+
2640+ DUMP_STR("PAO AMDSU ENG3 INFO"),
2641+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(3)),
2642+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(3)),
2643+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(3)),
2644+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(3)),
2645+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(3)),
2646+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
2647+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2648+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
2649+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2650+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
2651+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2652+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
2653+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2654+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
2655+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2656+
2657+ DUMP_STR("PAO AMDSU ENG4 INFO"),
2658+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(4)),
2659+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(4)),
2660+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(4)),
2661+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(4)),
2662+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(4)),
2663+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
2664+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2665+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
2666+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2667+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2668+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2669+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2670+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2671+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2672+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2673+
2674+ DUMP_STR("PAO AMDSU ENG5 INFO"),
2675+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(5)),
2676+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(5)),
2677+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(5)),
2678+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(5)),
2679+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(5)),
2680+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(5),
2681+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2682+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(5),
2683+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2684+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
2685+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2686+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
2687+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2688+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
2689+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2690+
2691+ DUMP_STR("PAO AMDSU ENG6 INFO"),
2692+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(6)),
2693+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(6)),
2694+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(6)),
2695+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(6)),
2696+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(6)),
2697+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(6),
2698+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2699+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(6),
2700+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2701+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
2702+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2703+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
2704+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2705+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
2706+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2707+
2708+ DUMP_STR("PAO AMDSU ENG7 INFO"),
2709+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(7)),
2710+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(7)),
2711+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(7)),
2712+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(7)),
2713+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(7)),
2714+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(7),
2715+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2716+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(7),
2717+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2718+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(7),
2719+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2720+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(7),
2721+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2722+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
2723+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2724+
2725+ DUMP_STR("PAO AMDSU ENG8 INFO"),
2726+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(8)),
2727+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(8)),
2728+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(8)),
2729+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(8)),
2730+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(8)),
2731+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(8),
2732+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
2733+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(8),
2734+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
2735+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
2736+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
2737+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
2738+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
2739+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
2740+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
2741+
2742+ DUMP_STR("PAO QMEM INFO"),
2743+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(0), WED_PAO_QMEM_FQ_CNT),
2744+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(0), WED_PAO_QMEM_SP_QCNT),
2745+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(1), WED_PAO_QMEM_TID0_QCNT),
2746+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(1), WED_PAO_QMEM_TID1_QCNT),
2747+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(2), WED_PAO_QMEM_TID2_QCNT),
2748+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(2), WED_PAO_QMEM_TID3_QCNT),
2749+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(3), WED_PAO_QMEM_TID4_QCNT),
2750+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(3), WED_PAO_QMEM_TID5_QCNT),
2751+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(4), WED_PAO_QMEM_TID6_QCNT),
2752+ DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(4), WED_PAO_QMEM_TID7_QCNT),
2753+
2754+
2755+ DUMP_STR("PAO QMEM HEAD INFO"),
2756+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(0), WED_PAO_QMEM_FQ_HEAD),
2757+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(0), WED_PAO_QMEM_SP_QHEAD),
2758+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(1), WED_PAO_QMEM_TID0_QHEAD),
2759+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(1), WED_PAO_QMEM_TID1_QHEAD),
2760+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(2), WED_PAO_QMEM_TID2_QHEAD),
2761+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(2), WED_PAO_QMEM_TID3_QHEAD),
2762+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(3), WED_PAO_QMEM_TID4_QHEAD),
2763+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(3), WED_PAO_QMEM_TID5_QHEAD),
2764+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(4), WED_PAO_QMEM_TID6_QHEAD),
2765+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(4), WED_PAO_QMEM_TID7_QHEAD),
2766+
2767+ DUMP_STR("PAO QMEM TAIL INFO"),
2768+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(5), WED_PAO_QMEM_FQ_TAIL),
2769+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(5), WED_PAO_QMEM_SP_QTAIL),
2770+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(6), WED_PAO_QMEM_TID0_QTAIL),
2771+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(6), WED_PAO_QMEM_TID1_QTAIL),
2772+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(7), WED_PAO_QMEM_TID2_QTAIL),
2773+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(7), WED_PAO_QMEM_TID3_QTAIL),
2774+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(8), WED_PAO_QMEM_TID4_QTAIL),
2775+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(8), WED_PAO_QMEM_TID5_QTAIL),
2776+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(9), WED_PAO_QMEM_TID6_QTAIL),
2777+ DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(9), WED_PAO_QMEM_TID7_QTAIL),
2778+
2779+ DUMP_STR("PAO HIFTXD MSDU INFO"),
2780+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(1)),
2781+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(2)),
2782+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(3)),
2783+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(4)),
2784+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(5)),
2785+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(6)),
2786+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(7)),
2787+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(8)),
2788+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(9)),
2789+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(10)),
2790+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(11)),
2791+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(12)),
2792+ DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(13)),
2793+ DUMP_END()
2794+ };
2795+
2796+ static const struct reg_dump *regs[] = {
2797+ &regs_common[0],
2798+ NULL,
2799+ };
2800+ struct mtk_wed_hw *hw = s->private;
2801+ struct mtk_wed_device *dev = hw->wed_dev;
2802+
2803+ if (!dev)
2804+ return 0;
2805+
2806+ dump_wed_regs(s, dev, regs);
2807+
2808+ return 0;
2809+}
2810+DEFINE_SHOW_ATTRIBUTE(wed_pao);
2811+
2812+static int
2813+wed_rtqm_show(struct seq_file *s, void *data)
2814+{
2815+ static const struct reg_dump regs_common[] = {
2816+ DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
2817+ DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
2818+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
2819+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
2820+ DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
2821+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
2822+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
2823+ DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
2824+
2825+
2826+ DUMP_STR("WED Route QM IGRS1(Legacy)"),
2827+ DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
2828+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
2829+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
2830+ DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
2831+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
2832+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
2833+ DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
2834+
2835+ DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
2836+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
2837+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
2838+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
2839+ DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
2840+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
2841+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
2842+ DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
2843+
2844+ DUMP_STR("WED Route QM IGRS3(DEBUG)"),
2845+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
2846+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
2847+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
2848+ DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
2849+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
2850+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
2851+ DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
2852+
2853+ DUMP_END()
2854+ };
2855+
2856+ static const struct reg_dump *regs[] = {
2857+ &regs_common[0],
2858+ NULL,
2859+ };
2860+ struct mtk_wed_hw *hw = s->private;
2861+ struct mtk_wed_device *dev = hw->wed_dev;
2862+
2863+ if (!dev)
2864+ return 0;
2865+
2866+ dump_wed_regs(s, dev, regs);
2867+
2868+ return 0;
2869+}
2870+DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
2871+
2872+
2873+static int
2874+wed_rro_show(struct seq_file *s, void *data)
2875+{
2876+ static const struct reg_dump regs_common[] = {
2877+ DUMP_STR("RRO/IND CMD CNT"),
2878+ DUMP_WED(WED_RX_IND_CMD_CNT(1)),
2879+ DUMP_WED(WED_RX_IND_CMD_CNT(2)),
2880+ DUMP_WED(WED_RX_IND_CMD_CNT(3)),
2881+ DUMP_WED(WED_RX_IND_CMD_CNT(4)),
2882+ DUMP_WED(WED_RX_IND_CMD_CNT(5)),
2883+ DUMP_WED(WED_RX_IND_CMD_CNT(6)),
2884+ DUMP_WED(WED_RX_IND_CMD_CNT(7)),
2885+ DUMP_WED(WED_RX_IND_CMD_CNT(8)),
2886+ DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
2887+ WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
2888+
2889+ DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
2890+ DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
2891+ WED_ADDR_ELEM_SIG_FAIL_CNT),
2892+ DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
2893+ DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
2894+ DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
2895+ DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
2896+ DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
2897+ DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
2898+ WED_PN_CHK_FAIL_CNT),
2899+
2900+ DUMP_END()
2901+ };
2902+
2903+ static const struct reg_dump *regs[] = {
2904+ &regs_common[0],
2905+ NULL,
2906+ };
2907+ struct mtk_wed_hw *hw = s->private;
2908+ struct mtk_wed_device *dev = hw->wed_dev;
2909+
2910+ if (!dev)
2911+ return 0;
2912+
2913+ dump_wed_regs(s, dev, regs);
2914+
2915+ return 0;
2916+}
2917+DEFINE_SHOW_ATTRIBUTE(wed_rro);
2918+
2919 void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2920 {
2921 struct dentry *dir;
2922@@ -261,8 +747,18 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2923 debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2924 debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2925 debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2926- debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
2927- if (hw->ver != MTK_WED_V1) {
2928+ debugfs_create_u32("token_id", 0600, dir, &hw->token_id);
2929+ debugfs_create_file_unsafe("token_txd", 0600, dir, hw, &wed_token_txd_fops);
2930+
2931+ if (hw->version == 3)
2932+ debugfs_create_file_unsafe("pao", 0400, dir, hw, &wed_pao_fops);
2933+
2934+ if (hw->version != 1) {
2935+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
2936+ if (hw->version == 3) {
2937+ debugfs_create_file_unsafe("rtqm", 0400, dir, hw, &wed_rtqm_fops);
2938+ debugfs_create_file_unsafe("rro", 0400, dir, hw, &wed_rro_fops);
2939+ }
2940 wed_wo_mcu_debugfs(hw, dir);
2941 }
2942 }
2943diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
2944index 96e30a3..055594d 100644
2945--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
2946+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
developer3e2d75a2023-10-12 18:29:47 +08002947@@ -245,8 +245,7 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
2948 if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
2949 mcu = MT7981_FIRMWARE_WO;
2950 else
2951- mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 :
2952- MT7986_FIRMWARE_WO_1;
2953+ mcu = wo->hw->index ? MTK_FIRMWARE_WO_1 : MTK_FIRMWARE_WO_0;
developer23f9f0f2023-06-15 13:06:25 +08002954
2955 ret = request_firmware(&fw, mcu, wo->hw->dev);
2956 if (ret)
2957@@ -289,8 +289,12 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
2958 }
2959
2960 /* write the start address */
2961- boot_cr = wo->hw->index ?
2962- WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
2963+ if (wo->hw->version == 3)
2964+ boot_cr = WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
2965+ else
2966+ boot_cr = wo->hw->index ?
2967+ WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
2968+
2969 wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
2970
2971 /* wo firmware reset */
2972@@ -298,8 +302,7 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
2973
2974 val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
2975
2976- val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
2977- WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
2978+ val |= WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
2979
2980 wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
2981
2982diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2983index 19e1199..c07bdb6 100644
2984--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
2985+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
developer3e2d75a2023-10-12 18:29:47 +08002986@@ -17,8 +17,9 @@
developer23f9f0f2023-06-15 13:06:25 +08002987 #define WARP_ALREADY_DONE_STATUS (1)
2988
developer3e2d75a2023-10-12 18:29:47 +08002989 #define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
developer23f9f0f2023-06-15 13:06:25 +08002990-#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin"
2991-#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin"
2992+#define MTK_FIRMWARE_WO_0 "mediatek/mtk_wo_0.bin"
2993+#define MTK_FIRMWARE_WO_1 "mediatek/mtk_wo_1.bin"
2994+#define MTK_FIRMWARE_WO_2 "mediatek/mtk_wo_2.bin"
2995
2996 #define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi"
2997 #define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm"
2998diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
developer58aa0682023-09-18 14:02:26 +08002999index 403a36b..25be547 100644
developer23f9f0f2023-06-15 13:06:25 +08003000--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
3001+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
3002@@ -20,6 +20,9 @@
3003 #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
3004 #define MTK_WED_RX_BM_TOKEN GENMASK(31, 16)
3005
3006+#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29)
3007+#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31)
3008+
3009 struct mtk_wdma_desc {
3010 __le32 buf0;
3011 __le32 ctrl;
3012@@ -51,6 +54,7 @@ struct mtk_wdma_desc {
3013 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
3014 #define MTK_WED_RESET_RX_RRO_QM BIT(20)
3015 #define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
3016+#define MTK_WED_RESET_TX_PAO BIT(22)
3017 #define MTK_WED_RESET_WED BIT(31)
3018
3019 #define MTK_WED_CTRL 0x00c
3020@@ -58,6 +62,9 @@ struct mtk_wdma_desc {
3021 #define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
3022 #define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
3023 #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
3024+#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5)
3025+#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6)
3026+#define MTK_WED_CTRL_WED_RX_PG_BM_BUSU BIT(7)
3027 #define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
3028 #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
3029 #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
3030@@ -68,9 +75,14 @@ struct mtk_wdma_desc {
3031 #define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
3032 #define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
3033 #define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
3034+#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20)
3035+#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21)
3036+#define MTK_WED_CTRL_TX_PAO_EN BIT(22)
3037+#define MTK_WED_CTRL_TX_PAO_BUSY BIT(23)
3038 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
3039 #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
3040 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
3041+#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28)
3042
3043 #define MTK_WED_EXT_INT_STATUS 0x020
3044 #define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
3045@@ -78,12 +90,10 @@ struct mtk_wdma_desc {
3046 #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
3047 #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
3048 #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
3049-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
3050-#define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10)
3051-#define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11)
3052-#endif
3053-#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12)
3054-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13)
3055+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2 BIT(10)
3056+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2 BIT(11)
3057+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
3058+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
3059 #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
3060 #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
3061 #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
3062@@ -100,17 +110,15 @@ struct mtk_wdma_desc {
3063 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
3064 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
3065 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
3066- MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
3067- MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
3068 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
3069 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
3070 MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
3071- MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR | \
3072- MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
3073+ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
3074
3075 #define MTK_WED_EXT_INT_MASK 0x028
3076 #define MTK_WED_EXT_INT_MASK1 0x02c
3077 #define MTK_WED_EXT_INT_MASK2 0x030
3078+#define MTK_WED_EXT_INT_MASK3 0x034
3079
3080 #define MTK_WED_STATUS 0x060
3081 #define MTK_WED_STATUS_TX GENMASK(15, 8)
3082@@ -118,9 +126,14 @@ struct mtk_wdma_desc {
3083 #define MTK_WED_TX_BM_CTRL 0x080
3084 #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
3085 #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
3086+#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26)
3087+#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27)
3088 #define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
3089
3090 #define MTK_WED_TX_BM_BASE 0x084
3091+#define MTK_WED_TX_BM_INIT_PTR 0x088
3092+#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0)
3093+#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16)
3094
3095 #define MTK_WED_TX_BM_BUF_LEN 0x08c
3096
3097@@ -134,22 +147,24 @@ struct mtk_wdma_desc {
3098 #if defined(CONFIG_MEDIATEK_NETSYS_V2)
3099 #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(8, 0)
3100 #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(24, 16)
3101-
3102-#define MTK_WED_TX_BM_TKID 0x0c8
3103-#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
3104-#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
3105 #else
3106 #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
3107 #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
3108+#endif
3109
3110-#define MTK_WED_TX_BM_TKID 0x088
3111+#define MTK_WED_TX_BM_TKID 0x0c8
3112 #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
3113 #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
3114-#endif
3115
3116 #define MTK_WED_TX_TKID_CTRL 0x0c0
3117+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
3118+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(7, 0)
3119+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(23, 16)
3120+#else
3121 #define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
3122 #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
3123+#endif
3124+
3125 #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
3126
3127 #define MTK_WED_TX_TKID_DYN_THR 0x0e0
3128@@ -220,12 +235,15 @@ struct mtk_wdma_desc {
3129 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
3130 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
3131 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
3132-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
3133+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12)
3134+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18)
3135 #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
3136-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
3137+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20)
3138 #define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
3139 #define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
3140+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25)
3141 #define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
3142+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30)
3143
3144 /* CONFIG_MEDIATEK_NETSYS_V1 */
3145 #define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
3146@@ -288,9 +306,11 @@ struct mtk_wdma_desc {
3147 #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
3148
3149 #define MTK_WED_PCIE_INT_CTRL 0x57c
3150-#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
3151-#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
3152 #define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
3153+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
3154+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
3155+#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21)
3156+
3157 #define MTK_WED_WPDMA_CFG_BASE 0x580
3158 #define MTK_WED_WPDMA_CFG_INT_MASK 0x584
3159 #define MTK_WED_WPDMA_CFG_TX 0x588
3160@@ -319,20 +339,50 @@ struct mtk_wdma_desc {
3161 #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
3162
3163 #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
3164-#define MTK_WED_WPDMA_RX_RING 0x770
3165+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
3166+#define MTK_WED_WPDMA_RX_RING0 0x770
3167+#else
3168+#define MTK_WED_WPDMA_RX_RING0 0x7d0
3169+#endif
3170+#define MTK_WED_WPDMA_RX_RING1 0x7d8
3171
3172 #define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
3173 #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
3174 #define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
3175
3176+#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
3177+#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
3178+#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
3179+#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
3180+
3181+#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8
3182+#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15)
3183+
3184+#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc
3185+
3186+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0
3187+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0)
3188+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16)
3189+
3190 #define MTK_WED_WDMA_RING_TX 0x800
3191
3192 #define MTK_WED_WDMA_TX_MIB 0x810
3193
3194-
3195 #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
3196 #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
3197
3198+#define MTK_WED_WDMA_RX_PREF_CFG 0x950
3199+#define MTK_WED_WDMA_RX_PREF_EN BIT(0)
3200+#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
3201+#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
3202+#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
3203+#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
3204+#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
3205+
3206+#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
3207+#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
3208+#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16)
3209+
3210 #define MTK_WED_WDMA_GLO_CFG 0xa04
3211 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
3212 #define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
3213@@ -365,6 +415,7 @@ struct mtk_wdma_desc {
3214 #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
3215
3216 #define MTK_WED_WDMA_INT_CTRL 0xa2c
3217+#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0)
3218 #define MTK_WED_WDMA_INT_POLL_SRC_SEL GENMASK(17, 16)
3219
3220 #define MTK_WED_WDMA_CFG_BASE 0xaa0
3221@@ -426,6 +477,18 @@ struct mtk_wdma_desc {
3222 #define MTK_WDMA_INT_GRP1 0x250
3223 #define MTK_WDMA_INT_GRP2 0x254
3224
3225+#define MTK_WDMA_PREF_TX_CFG 0x2d0
3226+#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
3227+
3228+#define MTK_WDMA_PREF_RX_CFG 0x2dc
3229+#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
3230+
3231+#define MTK_WDMA_WRBK_TX_CFG 0x300
3232+#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
3233+
3234+#define MTK_WDMA_WRBK_RX_CFG 0x344
3235+#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
3236+
3237 #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
3238 #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
3239 #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
3240@@ -439,6 +502,31 @@ struct mtk_wdma_desc {
3241 #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
3242 #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
3243
3244+#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
3245+#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
3246+#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
3247+#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4)
3248+#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34
3249+
3250+
3251+#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44
3252+#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4)
3253+#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50
3254+#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54+ (_n) * 0x4)
3255+#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c
3256+
3257+#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c
3258+#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4)
3259+#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78
3260+#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c+ (_n) * 0x4)
3261+#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84
3262+
3263+#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94
3264+#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4)
3265+#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0
3266+#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4+ (_n) * 0x4)
3267+#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac
3268+
3269 #define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
3270 #define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
3271 #define MTK_WED_RTQM_Q2N_MIB 0xb80
3272@@ -447,6 +535,24 @@ struct mtk_wdma_desc {
3273 #define MTK_WED_RTQM_Q2B_MIB 0xb8c
3274 #define MTK_WED_RTQM_PFDBK_MIB 0xb90
3275
3276+#define MTK_WED_RTQM_ENQ_CFG0 0xbb8
3277+#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12)
3278+
3279+#define MTK_WED_RTQM_FDROP_MIB 0xb84
3280+#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc
3281+#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0
3282+#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4
3283+#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8
3284+#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc
3285+#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0
3286+
3287+#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8
3288+#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc
3289+#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0
3290+#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4
3291+#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8
3292+#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec
3293+
3294 #define MTK_WED_RROQM_GLO_CFG 0xc04
3295 #define MTK_WED_RROQM_RST_IDX 0xc08
3296 #define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
3297@@ -487,8 +593,8 @@ struct mtk_wdma_desc {
3298 #define MTK_WED_RX_BM_BASE 0xd84
3299 #define MTK_WED_RX_BM_INIT_PTR 0xd88
3300 #define MTK_WED_RX_BM_PTR 0xd8c
3301-#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
3302 #define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
3303+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
3304
3305 #define MTK_WED_RX_BM_BLEN 0xd90
3306 #define MTK_WED_RX_BM_STS 0xd94
3307@@ -496,7 +602,193 @@ struct mtk_wdma_desc {
3308 #define MTK_WED_RX_BM_INTF 0xd9c
3309 #define MTK_WED_RX_BM_ERR_STS 0xda8
3310
3311+#define MTK_RRO_IND_CMD_SIGNATURE 0xe00
3312+#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0)
3313+#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28)
3314+
3315+#define MTK_WED_IND_CMD_RX_CTRL0 0xe04
3316+#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0)
3317+#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16)
3318+#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28)
3319+
3320+#define MTK_WED_IND_CMD_RX_CTRL1 0xe08
3321+#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c
3322+#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0)
3323+#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16)
3324+
3325+#define MTK_WED_RRO_CFG0 0xe10
3326+#define MTK_WED_RRO_CFG1 0xe14
3327+#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29)
3328+#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16)
3329+#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0)
3330+
3331+#define MTK_WED_ADDR_ELEM_CFG0 0xe18
3332+#define MTK_WED_ADDR_ELEM_CFG1 0xe1c
3333+#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16)
3334+
3335+#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20
3336+#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0)
3337+#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28)
3338+#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29)
3339+#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30)
3340+#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31)
3341+
3342+#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24
3343+#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28
3344+
3345+#define MTK_WED_PN_CHECK_CFG 0xe30
3346+#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0)
3347+#define MTK_WED_PN_CHECK_RD_RDY BIT(28)
3348+#define MTK_WED_PN_CHECK_WR_RDY BIT(29)
3349+#define MTK_WED_PN_CHECK_RD BIT(30)
3350+#define MTK_WED_PN_CHECK_WR BIT(31)
3351+
3352+#define MTK_WED_PN_CHECK_WDATA_M 0xe38
3353+#define MTK_WED_PN_CHECK_IS_FIRST BIT(17)
3354+
3355+#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8)
3356+
3357+#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58
3358+#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26)
3359+#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31)
3360+
3361+#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc)
3362+#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc)
3363+#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc)
3364+
3365+#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10)
3366+
3367+#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13)
3368+
3369+#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4)
3370+#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26)
3371+#define MTK_WED_RRO_RX_D_DRV_EN BIT(31)
3372+
3373+#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0
3374+#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0)
3375+
3376+#define MTK_WED_RRO_PG_BM_BASE 0xeb4
3377+#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8
3378+#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0)
3379+#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16)
3380+
3381+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec
3382+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0)
3383+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1)
3384+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2)
3385+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8)
3386+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9)
3387+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10)
3388+
3389+#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4
3390+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0)
3391+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1)
3392+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2)
3393+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8)
3394+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9)
3395+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10)
3396+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16)
3397+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
3398+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
3399+
3400+#define MTK_WED_RX_IND_CMD_CNT0 0xf20
3401+#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
3402+
3403+#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4)
3404+#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0)
3405+
3406+#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4)
3407+#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0)
3408+#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16)
3409+#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0)
3410+
3411+#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4)
3412+
3413+#define MTK_WED_RX_PN_CHK_CNT 0xf70
3414+#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0)
3415+
3416 #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
3417 #define MTK_WED_PCIE_INT_MASK 0x0
3418
3419+#define MTK_WED_PAO_AMSDU_FIFO 0x1800
3420+#define MTK_WED_PAO_AMSDU_IS_PRIOR0_RING BIT(10)
3421+
3422+#define MTK_WED_PAO_STA_INFO 0x01810
3423+#define MTK_WED_PAO_STA_INFO_DO_INIT BIT(0)
3424+#define MTK_WED_PAO_STA_INFO_SET_INIT BIT(1)
3425+
3426+#define MTK_WED_PAO_STA_INFO_INIT 0x01814
3427+#define MTK_WED_PAO_STA_WTBL_HDRT_MODE BIT(0)
3428+#define MTK_WED_PAO_STA_RMVL BIT(1)
3429+#define MTK_WED_PAO_STA_MAX_AMSDU_LEN GENMASK(7, 2)
3430+#define MTK_WED_PAO_STA_MAX_AMSDU_NUM GENMASK(11, 8)
3431+
3432+#define MTK_WED_PAO_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
3433+
3434+#define MTK_WED_PAO_PSE 0x1910
3435+#define MTK_WED_PAO_PSE_RESET BIT(16)
3436+
3437+#define MTK_WED_PAO_HIFTXD_CFG 0x1968
3438+#define MTK_WED_PAO_HIFTXD_SRC GENMASK(16, 15)
3439+
3440+#define MTK_WED_PAO_MON_AMSDU_FIFO_DMAD 0x1a34
3441+
3442+#define MTK_WED_PAO_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
3443+#define MTK_WED_PAO_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
3444+#define MTK_WED_PAO_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
3445+#define MTK_WED_PAO_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
3446+#define MTK_WED_PAO_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
3447+
3448+#define MTK_WED_PAO_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
3449+#define MTK_WED_PAO_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
3450+#define MTK_WED_PAO_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
3451+
3452+#define MTK_WED_PAO_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
3453+#define MTK_WED_PAO_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
3454+#define MTK_WED_PAO_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
3455+#define MTK_WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
3456+
3457+#define MTK_WED_PAO_MON_QMEM_STS1 0x1e04
3458+
3459+#define MTK_WED_PAO_MON_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
3460+#define MTK_WED_PAO_QMEM_FQ_CNT GENMASK(27, 16)
3461+#define MTK_WED_PAO_QMEM_SP_QCNT GENMASK(11, 0)
3462+#define MTK_WED_PAO_QMEM_TID0_QCNT GENMASK(27, 16)
3463+#define MTK_WED_PAO_QMEM_TID1_QCNT GENMASK(11, 0)
3464+#define MTK_WED_PAO_QMEM_TID2_QCNT GENMASK(27, 16)
3465+#define MTK_WED_PAO_QMEM_TID3_QCNT GENMASK(11, 0)
3466+#define MTK_WED_PAO_QMEM_TID4_QCNT GENMASK(27, 16)
3467+#define MTK_WED_PAO_QMEM_TID5_QCNT GENMASK(11, 0)
3468+#define MTK_WED_PAO_QMEM_TID6_QCNT GENMASK(27, 16)
3469+#define MTK_WED_PAO_QMEM_TID7_QCNT GENMASK(11, 0)
3470+
3471+#define MTK_WED_PAO_MON_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
3472+#define MTK_WED_PAO_QMEM_FQ_HEAD GENMASK(27, 16)
3473+#define MTK_WED_PAO_QMEM_SP_QHEAD GENMASK(11, 0)
3474+#define MTK_WED_PAO_QMEM_TID0_QHEAD GENMASK(27, 16)
3475+#define MTK_WED_PAO_QMEM_TID1_QHEAD GENMASK(11, 0)
3476+#define MTK_WED_PAO_QMEM_TID2_QHEAD GENMASK(27, 16)
3477+#define MTK_WED_PAO_QMEM_TID3_QHEAD GENMASK(11, 0)
3478+#define MTK_WED_PAO_QMEM_TID4_QHEAD GENMASK(27, 16)
3479+#define MTK_WED_PAO_QMEM_TID5_QHEAD GENMASK(11, 0)
3480+#define MTK_WED_PAO_QMEM_TID6_QHEAD GENMASK(27, 16)
3481+#define MTK_WED_PAO_QMEM_TID7_QHEAD GENMASK(11, 0)
3482+#define MTK_WED_PAO_QMEM_FQ_TAIL GENMASK(27, 16)
3483+#define MTK_WED_PAO_QMEM_SP_QTAIL GENMASK(11, 0)
3484+#define MTK_WED_PAO_QMEM_TID0_QTAIL GENMASK(27, 16)
3485+#define MTK_WED_PAO_QMEM_TID1_QTAIL GENMASK(11, 0)
3486+#define MTK_WED_PAO_QMEM_TID2_QTAIL GENMASK(27, 16)
3487+#define MTK_WED_PAO_QMEM_TID3_QTAIL GENMASK(11, 0)
3488+#define MTK_WED_PAO_QMEM_TID4_QTAIL GENMASK(27, 16)
3489+#define MTK_WED_PAO_QMEM_TID5_QTAIL GENMASK(11, 0)
3490+#define MTK_WED_PAO_QMEM_TID6_QTAIL GENMASK(27, 16)
3491+#define MTK_WED_PAO_QMEM_TID7_QTAIL GENMASK(11, 0)
3492+
3493+#define MTK_WED_PAO_MON_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
3494+
3495+#define MTK_WED_PCIE_BASE 0x11280000
3496+
3497+#define MTK_WED_PCIE_BASE0 0x11300000
3498+#define MTK_WED_PCIE_BASE1 0x11310000
3499+#define MTK_WED_PCIE_BASE2 0x11290000
3500 #endif
3501diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
developer58aa0682023-09-18 14:02:26 +08003502index 0967dc2..3211f34 100644
developer23f9f0f2023-06-15 13:06:25 +08003503--- a/include/linux/netdevice.h
3504+++ b/include/linux/netdevice.h
developer58aa0682023-09-18 14:02:26 +08003505@@ -875,6 +875,13 @@ struct net_device_path {
developer23f9f0f2023-06-15 13:06:25 +08003506 u8 queue;
3507 u16 wcid;
3508 u8 bss;
3509+ u32 usr_info;
3510+ u8 tid;
3511+ u8 is_fixedrate;
3512+ u8 is_prior;
3513+ u8 is_sp;
3514+ u8 hf;
3515+ u8 amsdu_en;
3516 } mtk_wdma;
3517 };
3518 };
3519diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
developer58aa0682023-09-18 14:02:26 +08003520index 27cf284..92df4ba 100644
developer23f9f0f2023-06-15 13:06:25 +08003521--- a/include/linux/soc/mediatek/mtk_wed.h
3522+++ b/include/linux/soc/mediatek/mtk_wed.h
3523@@ -5,11 +5,14 @@
3524 #include <linux/rcupdate.h>
3525 #include <linux/regmap.h>
3526 #include <linux/pci.h>
3527+#include <linux/skbuff.h>
3528+#include <linux/iopoll.h>
3529
3530 #define WED_WO_STA_REC 0x6
3531
3532 #define MTK_WED_TX_QUEUES 2
3533 #define MTK_WED_RX_QUEUES 2
3534+#define MTK_WED_RX_PAGE_QUEUES 3
3535
3536 enum mtk_wed_wo_cmd {
3537 MTK_WED_WO_CMD_WED_CFG,
3538@@ -55,10 +58,13 @@ enum mtk_wed_bus_tye {
3539 struct mtk_wed_hw;
3540 struct mtk_wdma_desc;
3541
3542+#define MTK_WED_RING_CONFIGURED BIT(0)
3543+
3544 struct mtk_wed_ring {
3545 struct mtk_wdma_desc *desc;
3546 dma_addr_t desc_phys;
3547 int size;
3548+ u32 flags;
3549
3550 u32 reg_base;
3551 void __iomem *wpdma;
3552@@ -69,11 +75,18 @@ struct mtk_rxbm_desc {
3553 __le32 token;
3554 } __packed __aligned(4);
3555
3556+struct dma_page_info {
3557+ void *addr;
3558+ dma_addr_t addr_phys;
3559+};
3560+
3561 struct dma_buf {
3562 int size;
3563- void **pages;
3564- struct mtk_wdma_desc *desc;
3565+ int pkt_nums;
3566+ void *desc;
3567+ int desc_size;
3568 dma_addr_t desc_phys;
3569+ struct dma_page_info *pages;
3570 };
3571
3572 struct dma_entry {
3573@@ -97,6 +110,7 @@ struct mtk_wed_device {
3574 struct device *dev;
3575 struct mtk_wed_hw *hw;
3576 bool init_done, running;
3577+ bool wdma_init_done;
3578 int wdma_idx;
3579 int irq;
3580 u8 ver;
3581@@ -108,7 +122,11 @@ struct mtk_wed_device {
3582 struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
3583 struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
3584
3585- struct dma_buf buf_ring;
3586+ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
3587+ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
3588+ struct mtk_wed_ring ind_cmd_ring;
3589+
3590+ struct dma_buf tx_buf_ring;
3591
3592 struct {
3593 int size;
3594@@ -117,6 +135,8 @@ struct mtk_wed_device {
3595 dma_addr_t desc_phys;
3596 } rx_buf_ring;
3597
3598+ struct dma_buf rx_page_buf_ring;
3599+
3600 struct {
3601 struct mtk_wed_ring rro_ring;
3602 void __iomem *rro_desc;
3603@@ -131,8 +151,9 @@ struct mtk_wed_device {
3604 struct platform_device *platform_dev;
3605 struct pci_dev *pci_dev;
3606 };
3607+ enum mtk_wed_bus_tye bus_type;
3608 void __iomem *base;
3609- u32 bus_type;
3610+ void __iomem *regs;
3611 u32 phy_base;
3612
3613 u32 wpdma_phys;
developer58aa0682023-09-18 14:02:26 +08003614@@ -141,10 +162,14 @@ struct mtk_wed_device {
3615 u32 wpdma_tx;
developer23f9f0f2023-06-15 13:06:25 +08003616 u32 wpdma_txfree;
3617 u32 wpdma_rx_glo;
developerb74821e2023-09-08 14:19:59 +08003618- u32 wpdma_rx;
3619+ u32 wpdma_rx[MTK_WED_RX_QUEUES];
developer23f9f0f2023-06-15 13:06:25 +08003620+ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
3621+ u32 wpdma_rx_pg;
3622
3623 u8 tx_tbit[MTK_WED_TX_QUEUES];
3624 u8 rx_tbit[MTK_WED_RX_QUEUES];
3625+ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
3626+ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
3627 u8 txfree_tbit;
3628
3629 u16 token_start;
3630@@ -154,12 +179,26 @@ struct mtk_wed_device {
3631 unsigned int rx_size;
3632
3633 bool wcid_512;
3634-
3635+ bool hwrro;
3636+ bool msi;
3637+
3638+ u8 max_amsdu_nums;
3639+ u32 max_amsdu_len;
3640+
3641+ struct {
3642+ u8 se_group_nums;
3643+ u16 win_size;
3644+ u16 particular_sid;
3645+ u32 ack_sn_addr;
3646+ dma_addr_t particular_se_phys;
3647+ dma_addr_t addr_elem_phys[1024];
3648+ } ind_cmd;
3649+
3650+ u32 chip_id;
3651 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3652 int (*offload_enable)(struct mtk_wed_device *wed);
3653 void (*offload_disable)(struct mtk_wed_device *wed);
3654- u32 (*init_rx_buf)(struct mtk_wed_device *wed,
3655- int pkt_num);
3656+ u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size);
3657 void (*release_rx_buf)(struct mtk_wed_device *wed);
3658 void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
3659 struct mtk_wed_wo_rx_stats *stats);
3660@@ -180,6 +219,11 @@ struct mtk_wed_ops {
3661 void __iomem *regs);
3662 int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3663 void __iomem *regs, bool reset);
3664+ int (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3665+ void __iomem *regs);
3666+ int (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
3667+ void __iomem *regs);
3668+ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev, void __iomem *regs);
3669 int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
3670 void *data, int len);
3671 void (*detach)(struct mtk_wed_device *dev);
3672@@ -196,6 +240,7 @@ struct mtk_wed_ops {
3673 void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3674 void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
3675 u32 reason, u32 hash);
3676+ void (*start_hwrro)(struct mtk_wed_device *dev, u32 irq_mask);
3677 };
3678
3679 extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3680@@ -224,12 +269,21 @@ static inline bool
3681 mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3682 {
3683 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3684+ if (dev->ver == 3 && !dev->wlan.hwrro)
3685+ return false;
3686+
3687 return dev->ver != 1;
3688 #else
3689 return false;
3690 #endif
3691 }
3692
3693+static inline bool
3694+mtk_wed_device_support_pao(struct mtk_wed_device *dev)
3695+{
3696+ return dev->ver == 3;
3697+}
3698+
3699 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
3700 #define mtk_wed_device_active(_dev) !!(_dev)->ops
3701 #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3702@@ -243,6 +297,12 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3703 (_dev)->ops->txfree_ring_setup(_dev, _regs)
3704 #define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
3705 (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
3706+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
3707+ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
3708+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
3709+ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
3710+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
3711+ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
3712 #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
3713 (_dev)->ops->msg_update(_dev, _id, _msg, _len)
3714 #define mtk_wed_device_reg_read(_dev, _reg) \
3715@@ -257,6 +317,9 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
3716 (_dev)->ops->reset_dma(_dev)
3717 #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
3718 (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
3719+#define mtk_wed_device_start_hwrro(_dev, _mask) \
3720+ (_dev)->ops->start_hwrro(_dev, _mask)
3721+
3722 #else
3723 static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3724 {
3725@@ -268,6 +331,9 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3726 #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
3727 #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3728 #define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
3729+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3730+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
3731+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
3732 #define mtk_wed_device_reg_read(_dev, _reg) 0
3733 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3734 #define mtk_wed_device_irq_get(_dev, _mask) 0
3735@@ -275,6 +341,7 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3736 #define mtk_wed_device_dma_reset(_dev) do {} while (0)
3737 #define mtk_wed_device_setup_tc(_dev, _ndev, _type, _data) do {} while (0)
3738 #define mtk_wed_device_ppe_check(_dev, _hash) do {} while (0)
3739+#define mtk_wed_device_start_hwrro(_dev, _mask) do {} while (0)
3740 #endif
3741
3742 #endif
3743--
37442.18.0
3745