[rdkb][common][bsp][Refactor and sync kernel from openwrt]

[Description]
f1638c4b [MAC80211][WED][Fix SER cause memory leak]
91ea3956 [kernel][mt7988][eth][Fix Jaguar ethernet 8GB addressing]
766c82c3 [kernel][common][eth][Fix compile warning messages]
702d8b82 [kernel][mt7988][pinctrl][Fix pin number assignment]
11e693b5 [mt7988][Add append-opteenode in kernel command]

[Release-log]

Change-Id: Ie3d2a7bad9517f0c56f0919e249af9471d09a8b7
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/generic/files/drivers/mtd/mtdsplit/mtdsplit_bcm_wfi.c b/recipes-kernel/linux/linux-mediatek-5.4/generic/files/drivers/mtd/mtdsplit/mtdsplit_bcm_wfi.c
index 1ddcf67..1cafc91 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/generic/files/drivers/mtd/mtdsplit/mtdsplit_bcm_wfi.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/generic/files/drivers/mtd/mtdsplit/mtdsplit_bcm_wfi.c
@@ -31,6 +31,7 @@
 
 #define CFERAM_NAME		"cferam"
 #define CFERAM_NAME_LEN		(sizeof(CFERAM_NAME) - 1)
+#define CFERAM_NAME_MAX_LEN	32
 #define KERNEL_NAME		"vmlinux.lz"
 #define KERNEL_NAME_LEN		(sizeof(KERNEL_NAME) - 1)
 #define OPENWRT_NAME		"1-openwrt"
@@ -157,17 +158,28 @@
 			 const struct mtd_partition **pparts,
 			 uint8_t *buf, loff_t off, loff_t size, bool cfe_part)
 {
+	struct device_node *mtd_node;
 	struct mtd_partition *parts;
 	loff_t cfe_off, kernel_off, rootfs_off;
 	unsigned int num_parts = BCM_WFI_PARTS, cur_part = 0;
+	const char *cferam_name = CFERAM_NAME;
+	size_t cferam_name_len;
 	int ret;
 
+	mtd_node = mtd_get_of_node(master);
+	if (mtd_node)
+		of_property_read_string(mtd_node, "brcm,cferam", &cferam_name);
+
+	cferam_name_len = strnlen(cferam_name, CFERAM_NAME_MAX_LEN);
+	if (cferam_name_len > 0)
+		cferam_name_len--;
+
 	if (cfe_part) {
 		num_parts++;
 		cfe_off = off;
 
-		ret = jffs2_find_file(master, buf, CFERAM_NAME,
-				      CFERAM_NAME_LEN, &cfe_off,
+		ret = jffs2_find_file(master, buf, cferam_name,
+				      cferam_name_len, &cfe_off,
 				      size - (cfe_off - off), NULL, NULL);
 		if (ret)
 			return ret;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7988.dtsi b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7988.dtsi
index 98dc4df..9ef502b 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7988.dtsi
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7988.dtsi
@@ -732,7 +732,7 @@
 			    "eint";
 		gpio-controller;
 		#gpio-cells = <2>;
-		gpio-ranges = <&pio 0 0 83>;
+		gpio-ranges = <&pio 0 0 84>;
 		interrupt-controller;
 		interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-parent = <&gic>;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
index 189d409..1b5d356 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
@@ -304,10 +304,10 @@
 	if (kstrtoul(token, 16, (unsigned long *)&value))
 		return -EINVAL;
 
-	pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
+	pr_info("%s:phy=%d, reg=0x%lx, val=0x%lx\n", __func__,
 		0x1f, reg, value);
 	mt7530_mdio_w32(eth, reg, value);
-	pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
+	pr_info("%s:phy=%d, reg=0x%lx, val=0x%x confirm..\n", __func__,
 		0x1f, reg, mt7530_mdio_r32(eth, reg));
 
 	return len;
@@ -352,12 +352,12 @@
 	if (kstrtoul(token, 16, (unsigned long *)&value))
 		return -EINVAL;
 
-	pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
+	pr_info("%s:phy=%ld, reg=0x%lx, val=0x%lx\n", __func__,
 		phy, reg, value);
 
 	_mtk_mdio_write(eth, phy,  reg, value);
 
-	pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
+	pr_info("%s:phy=%ld, reg=0x%lx, val=0x%x confirm..\n", __func__,
 		phy, reg, _mtk_mdio_read(eth, phy, reg));
 
 	return len;
@@ -371,7 +371,7 @@
 	int count = len;
 	unsigned long dbg_level = 0;
 
-	len = min(count, sizeof(buf) - 1);
+	len = min((size_t)count, sizeof(buf) - 1);
 	if (copy_from_user(buf, ptr, len))
 		return -EFAULT;
 
@@ -697,8 +697,6 @@
 
 int esw_cnt_read(struct seq_file *seq, void *v)
 {
-	unsigned int pkt_cnt = 0;
-	int i = 0;
 	struct mtk_eth *eth = g_eth;
 
 	gdm_cnt_read(eth);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c
index 5ab74ad..66b4646 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c
@@ -326,7 +326,7 @@
 {
 	static u32 err_cnt_qtx;
 	u32 err_flag = 0;
-	u32 i = 0, is_rx_fc = 0;
+	u32 is_rx_fc = 0;
 
 	u32 is_qfsm_hang = (mtk_r32(eth, MTK_QDMA_FSM) & 0xF00) != 0;
 	u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
@@ -624,7 +624,7 @@
 	switch (event) {
 	case MTK_WIFI_RESET_DONE:
 	case MTK_FE_STOP_TRAFFIC_DONE:
-		pr_info("%s rcv done event:%x\n", __func__, event);
+		pr_info("%s rcv done event:%lx\n", __func__, event);
 		mtk_rest_cnt--;
 		if(!mtk_rest_cnt) {
 			complete(&wait_ser_done);
@@ -642,7 +642,7 @@
 	case MTK_FE_STOP_TRAFFIC_DONE_FAIL:
 		mtk_stop_fail = true;
 		mtk_reset_flag = MTK_FE_START_RESET;
-		pr_info("%s rcv done event:%x\n", __func__, event);
+		pr_info("%s rcv done event:%lx\n", __func__, event);
 		complete(&wait_ser_done);
 		mtk_rest_cnt = mtk_wifi_num;
 		break;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 8f94fc5..78ac67e 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1053,11 +1053,11 @@
 		/* fall through */
 	case PHY_INTERFACE_MODE_1000BASEX:
 		phylink_set(mask, 1000baseX_Full);
-		/* fall through; */
+		/* fall through */
 	case PHY_INTERFACE_MODE_2500BASEX:
 		phylink_set(mask, 2500baseX_Full);
 		phylink_set(mask, 2500baseT_Full);
-		/* fall through; */
+		/* fall through */
 	case PHY_INTERFACE_MODE_GMII:
 	case PHY_INTERFACE_MODE_RGMII:
 	case PHY_INTERFACE_MODE_RGMII_ID:
@@ -3983,8 +3983,6 @@
 static void mtk_pending_work(struct work_struct *work)
 {
 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
-	struct device_node *phy_node = NULL;
-	struct mtk_mac *mac = NULL;
 	int err, i = 0;
 	unsigned long restart = 0;
 	u32 val = 0;
@@ -4357,7 +4355,6 @@
 static int mtk_set_eee(struct net_device *dev, struct ethtool_eee *eee)
 {
 	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
 
 	if (mac->type == MTK_GDM_TYPE) {
 		if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 06c2b0a..01e15fe 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -1528,7 +1528,7 @@
 		       MTK_GMAC1_USXGMII | MTK_GMAC2_USXGMII | \
 		       MTK_GMAC3_USXGMII | MTK_MUX_GMAC123_TO_USXGMII | \
 		       MTK_GMAC2_XGMII | MTK_MUX_GMAC2_TO_XGMII | MTK_RSS | \
-		       MTK_NETSYS_RX_V2)
+		       MTK_NETSYS_RX_V2 | MTK_8GB_ADDRESSING)
 
 struct mtk_tx_dma_desc_info {
 	dma_addr_t	addr;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_usxgmii.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_usxgmii.c
index e6007f6..a181fc2 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_usxgmii.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_usxgmii.c
@@ -792,7 +792,8 @@
 	unsigned int cur = offset;
 	unsigned int val1 = 0, val2 = 0, val3 = 0, val4 = 0;
 
-	pr_info("\n============ %s ============ pmap:%x\n", name, pmap);
+	pr_info("\n============ %s ============ pmap:%lx\n",
+		name, (unsigned long)pmap);
 	while (cur < offset + range) {
 		regmap_read(pmap, cur, &val1);
 		regmap_read(pmap, cur + 0x4, &val2);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/pinctrl/mediatek/pinctrl-mt7988.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/pinctrl/mediatek/pinctrl-mt7988.c
index d685328..49c0be1 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/pinctrl/mediatek/pinctrl-mt7988.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/pinctrl/mediatek/pinctrl-mt7988.c
@@ -817,7 +817,7 @@
 static int mt7988_i2c2_0_pins[] = { 69, 70 };
 static int mt7988_i2c2_0_funcs[] = { 4, 4 };
 
-static int mt7988_i2c2_1_pins[] = { 70, 71 };
+static int mt7988_i2c2_1_pins[] = { 71, 72 };
 static int mt7988_i2c2_1_funcs[] = { 1, 1 };
 
 /* eth */
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/wed3/999-3021-mtk-wed-add-wed3-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/wed3/999-3021-mtk-wed-add-wed3-support.patch
new file mode 100644
index 0000000..bccc43e
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/wed3/999-3021-mtk-wed-add-wed3-support.patch
@@ -0,0 +1,3720 @@
+From 400f8349a31ffc48538aa7df64a88111de9a738b Mon Sep 17 00:00:00 2001
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Thu, 13 Apr 2023 15:51:08 +0800
+Subject: [PATCH] mtk:wed:add wed3 support
+
+Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com>
+---
+ arch/arm64/boot/dts/mediatek/mt7988.dtsi      |  152 ++-
+ .../dts/mediatek/mt7988a-dsa-10g-spim-nor.dts |   16 +-
+ .../dts/mediatek/mt7988d-dsa-10g-spim-nor.dts |   16 +-
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c   |    3 +-
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h   |    5 +-
+ drivers/net/ethernet/mediatek/mtk_ppe.c       |   17 +-
+ drivers/net/ethernet/mediatek/mtk_ppe.h       |    2 +-
+ .../net/ethernet/mediatek/mtk_ppe_offload.c   |   13 +-
+ drivers/net/ethernet/mediatek/mtk_wed.c       | 1164 +++++++++++++----
+ drivers/net/ethernet/mediatek/mtk_wed.h       |   25 +-
+ .../net/ethernet/mediatek/mtk_wed_debugfs.c   |  584 ++++++++-
+ drivers/net/ethernet/mediatek/mtk_wed_mcu.c   |   13 +-
+ drivers/net/ethernet/mediatek/mtk_wed_mcu.h   |    5 +-
+ drivers/net/ethernet/mediatek/mtk_wed_regs.h  |  338 ++++-
+ include/linux/netdevice.h                     |    7 +
+ include/linux/soc/mediatek/mtk_wed.h          |   81 +-
+ 16 files changed, 1446 insertions(+), 333 deletions(-)
+ mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt7988.dtsi b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
+index 364deef..f9a0120 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7988.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
+@@ -191,44 +191,49 @@
+ 		status = "disabled";
+ 	};
+ 
+-	wed: wed@15010000 {
+-		compatible = "mediatek,wed";
+-		wed_num = <3>;
+-		/* add this property for wed get the pci slot number. */
+-		pci_slot_map = <0>, <1>, <2>;
+-		reg = <0 0x15010000 0 0x2000>,
+-		      <0 0x15012000 0 0x2000>,
+-		      <0 0x15014000 0 0x2000>;
++	wed0: wed@15010000 {
++		compatible = "mediatek,mt7988-wed",
++			     "syscon";
++		reg = <0 0x15010000 0 0x2000>;
+ 		interrupt-parent = <&gic>;
+-		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
+-	};
+-
+-	wed2: wed2@15012000 {
+-		compatible = "mediatek,wed2";
+-		wed_num = <3>;
+-		/* add this property for wed get the pci slot number. */
+-		reg = <0 0x15010000 0 0x2000>,
+-		      <0 0x15012000 0 0x2000>,
+-		      <0 0x15014000 0 0x2000>;
++		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
++		mediatek,wed_pcie = <&wed_pcie>;
++		mediatek,ap2woccif = <&ap2woccif0>;
++		mediatek,wocpu_ilm = <&wocpu0_ilm>;
++		mediatek,wocpu_dlm = <&wocpu0_dlm>;
++		mediatek,wocpu_boot = <&cpu0_boot>;
++		mediatek,wocpu_emi = <&wocpu0_emi>;
++		mediatek,wocpu_data = <&wocpu_data>;
++	};
++
++	wed1: wed@15012000 {
++		compatible = "mediatek,mt7988-wed",
++                             "syscon";
++		reg = <0 0x15012000 0 0x2000>;
+ 		interrupt-parent = <&gic>;
+-		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
+-	};
+-
+-	wed3: wed3@15014000 {
+-		compatible = "mediatek,wed3";
+-		wed_num = <3>;
+-		/* add this property for wed get the pci slot number. */
+-		reg = <0 0x15010000 0 0x2000>,
+-		      <0 0x15012000 0 0x2000>,
+-		      <0 0x15014000 0 0x2000>;
++		interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
++		mediatek,wed_pcie = <&wed_pcie>;
++		mediatek,ap2woccif = <&ap2woccif1>;
++		mediatek,wocpu_ilm = <&wocpu1_ilm>;
++		mediatek,wocpu_dlm = <&wocpu1_dlm>;
++		mediatek,wocpu_boot = <&cpu1_boot>;
++		mediatek,wocpu_emi = <&wocpu1_emi>;
++		mediatek,wocpu_data = <&wocpu_data>;
++	};
++
++	wed2: wed@15014000 {
++		compatible = "mediatek,mt7988-wed",
++                             "syscon";
++		reg = <0 0x15014000 0 0x2000>;
+ 		interrupt-parent = <&gic>;
+-		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
++		mediatek,wed_pcie = <&wed_pcie>;
++		mediatek,ap2woccif = <&ap2woccif2>;
++		mediatek,wocpu_ilm = <&wocpu2_ilm>;
++		mediatek,wocpu_dlm = <&wocpu2_dlm>;
++		mediatek,wocpu_boot = <&cpu2_boot>;
++		mediatek,wocpu_emi = <&wocpu2_emi>;
++		mediatek,wocpu_data = <&wocpu_data>;
+ 	};
+ 
+ 	wdma: wdma@15104800 {
+@@ -238,15 +243,25 @@
+ 		      <0 0x15105000 0 0x400>;
+ 	};
+ 
+-	ap2woccif: ap2woccif@151A5000 {
+-		compatible = "mediatek,ap2woccif";
+-		reg = <0 0x151A5000 0 0x1000>,
+-		      <0 0x152A5000 0 0x1000>,
+-		      <0 0x153A5000 0 0x1000>;
++	ap2woccif0: ap2woccif@151A5000 {
++		compatible = "mediatek,ap2woccif", "syscon";
++		reg = <0 0x151A5000 0 0x1000>;
++		interrupt-parent = <&gic>;
++		interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
++	};
++
++	ap2woccif1: ap2woccif@152A5000 {
++		compatible = "mediatek,ap2woccif", "syscon";
++		reg = <0 0x152A5000 0 0x1000>;
+ 		interrupt-parent = <&gic>;
+-		interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+-			     <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
++	};
++
++	ap2woccif2: ap2woccif@153A5000 {
++		compatible = "mediatek,ap2woccif", "syscon";
++		reg = <0 0x153A5000 0 0x1000>;
++		interrupt-parent = <&gic>;
++		interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ 	};
+ 
+ 	wocpu0_ilm: wocpu0_ilm@151E0000 {
+@@ -254,31 +269,53 @@
+ 		reg = <0 0x151E0000 0 0x8000>;
+ 	};
+ 
+-	wocpu1_ilm: wocpu1_ilm@152E0000 {
+-		compatible = "mediatek,wocpu1_ilm";
++	wocpu1_ilm: wocpu_ilm@152E0000 {
++		compatible = "mediatek,wocpu_ilm";
+ 		reg = <0 0x152E0000 0 0x8000>;
+ 	};
+ 
+-	wocpu2_ilm: wocpu2_ilm@153E0000 {
+-		compatible = "mediatek,wocpu2_ilm";
+-		reg = <0 0x153E0000 0 0x8000>;
++	wocpu2_ilm: wocpu_ilm@153E0000 {
++                compatible = "mediatek,wocpu_ilm";
++                reg = <0 0x153E0000 0 0x8000>;
++    };
++
++	wocpu0_dlm: wocpu_dlm@151E8000 {
++		compatible = "mediatek,wocpu_dlm";
++		reg = <0 0x151E8000 0 0x2000>;
++
++		resets = <&ethsysrst 0>;
++		reset-names = "wocpu_rst";
++	};
++
++	wocpu1_dlm: wocpu_dlm@0x152E8000 {
++		compatible = "mediatek,wocpu_dlm";
++		reg = <0 0x152E8000 0 0x2000>;
++
++		resets = <&ethsysrst 0>;
++		reset-names = "wocpu_rst";
+ 	};
+ 
+-	wocpu_dlm: wocpu_dlm@151E8000 {
++	wocpu2_dlm: wocpu_dlm@0x153E8000 {
+ 		compatible = "mediatek,wocpu_dlm";
+-		reg = <0 0x151E8000 0 0x2000>,
+-		      <0 0x152E8000 0 0x2000>,
+-		      <0 0x153E8000 0 0x2000>;
++		reg = <0 0x153E8000 0 0x2000>;
+ 
+ 		resets = <&ethsysrst 0>;
+ 		reset-names = "wocpu_rst";
+ 	};
+ 
+-	cpu_boot: wocpu_boot@15194000 {
+-		compatible = "mediatek,wocpu_boot";
+-		reg = <0 0x15194000 0 0x1000>,
+-		      <0 0x15294000 0 0x1000>,
+-		      <0 0x15394000 0 0x1000>;
++	cpu0_boot: wocpu_boot@15194000 {
++		compatible = "mediatek,wocpu0_boot";
++		reg = <0 0x15194000 0 0x1000>;
++	};
++
++	cpu1_boot: wocpu_boot@15294000 {
++		compatible = "mediatek,wocpu1_boot";
++		reg = <0 0x15294000 0 0x1000>;
++	};
++
++	cpu2_boot: wocpu_boot@15394000 {
++		compatible = "mediatek,wocpu2_boot";
++		reg = <0 0x15394000 0 0x1000>;
+ 	};
+ 
+ 	reserved-memory {
+@@ -827,6 +864,7 @@
+ 					 <&topckgen CK_TOP_CB_SGM_325M>;
+ 		mediatek,ethsys = <&ethsys>;
+ 		mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
++		mediatek,wed = <&wed0>, <&wed1>, <&wed2>;
+ 		mediatek,usxgmiisys = <&usxgmiisys0>, <&usxgmiisys1>;
+ 		mediatek,xfi_pextp = <&xfi_pextp0>, <&xfi_pextp1>;
+ 		mediatek,xfi_pll = <&xfi_pll>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
+index 7db5164..0a6db8b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
+@@ -341,9 +341,23 @@
+ 	status = "okay";
+ };
+ 
+-&wed {
++&wed0 {
+ 	dy_txbm_enable = "true";
+ 	dy_txbm_budge = <8>;
+ 	txbm_init_sz = <10>;
+ 	status = "okay";
+ };
++
++&wed1 {
++	dy_txbm_enable = "true";
++	dy_txbm_budge = <8>;
++	txbm_init_sz = <10>;
++	status = "okay";
++};
++
++&wed2 {
++	dy_txbm_enable = "true";
++	dy_txbm_budge = <8>;
++	txbm_init_sz = <10>;
++	status = "okay";
++};
+\ No newline at end of file
+diff --git a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
+index 67c6508..c407b33 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
+@@ -325,9 +325,23 @@
+ 	status = "okay";
+ };
+ 
+-&wed {
++&wed0 {
+ 	dy_txbm_enable = "true";
+ 	dy_txbm_budge = <8>;
+ 	txbm_init_sz = <10>;
+ 	status = "okay";
+ };
++
++&wed1 {
++	dy_txbm_enable = "true";
++	dy_txbm_budge = <8>;
++	txbm_init_sz = <10>;
++	status = "okay";
++};
++
++&wed2 {
++	dy_txbm_enable = "true";
++	dy_txbm_budge = <8>;
++	txbm_init_sz = <10>;
++	status = "okay";
++};
+\ No newline at end of file
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 388982c..d59c29f 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -4709,7 +4709,8 @@ static int mtk_probe(struct platform_device *pdev)
+ 							  "mediatek,wed", i);
+ 		static const u32 wdma_regs[] = {
+ 			MTK_WDMA0_BASE,
+-			MTK_WDMA1_BASE
++			MTK_WDMA1_BASE,
++			MTK_WDMA2_BASE
+ 		};
+ 		void __iomem *wdma;
+ 		u32 wdma_phy;
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index a9feaed..70e8377 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -605,9 +605,12 @@
+ #define RX_DMA_SPORT_MASK       0x7
+ #define RX_DMA_SPORT_MASK_V2    0xf
+ 
+-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
++#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
+ #define MTK_WDMA0_BASE		0x4800
+ #define MTK_WDMA1_BASE		0x4c00
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++#define MTK_WDMA2_BASE		0x5000
++#endif
+ #else
+ #define MTK_WDMA0_BASE		0x2800
+ #define MTK_WDMA1_BASE		0x2c00
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
+old mode 100755
+new mode 100644
+index bc13a9b..3910163
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -9,6 +9,7 @@
+ #include <linux/if_ether.h>
+ #include <linux/if_vlan.h>
+ #include <net/dsa.h>
++#include <net/route.h>
+ #include "mtk_eth_soc.h"
+ #include "mtk_ppe.h"
+ #include "mtk_ppe_regs.h"
+@@ -396,7 +397,7 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
+ }
+ 
+ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+-			   int bss, int wcid)
++			   int bss, int wcid, bool amsdu_en)
+ {
+ 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ 	u32 *ib2 = mtk_foe_entry_ib2(entry);
+@@ -408,6 +409,9 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+ 
+ 	l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
+ 		    FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++	l2->winfo_pao = FIELD_PREP(MTK_FOE_WINFO_PAO_AMSDU_EN, amsdu_en);
++#endif
+ #else
+ 	if (wdma_idx)
+ 		*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+@@ -443,6 +447,17 @@ int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp)
+ 	*ib2 &= ~MTK_FOE_IB2_DSCP;
+ 	*ib2 |= FIELD_PREP(MTK_FOE_IB2_DSCP, dscp);
+ 
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
++
++	if (*ib2 & MTK_FOE_IB2_WDMA_WINFO &&
++	    l2->winfo_pao & MTK_FOE_WINFO_PAO_AMSDU_EN) {
++		u8 tid = rt_tos2priority(dscp) & 0xf;
++
++		l2->winfo_pao |= FIELD_PREP(MTK_FOE_WINFO_PAO_TID, tid);
++	}
++#endif
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
+index df10040..9e7d5aa 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -428,7 +428,7 @@ int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
+ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
+ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
+ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+-			   int bss, int wcid);
++			   int bss, int wcid, bool amsdu_en);
+ int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
+ int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp);
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index 9bc0857..86fc9a1 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -112,6 +112,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
+ 	info->queue = path.mtk_wdma.queue;
+ 	info->bss = path.mtk_wdma.bss;
+ 	info->wcid = path.mtk_wdma.wcid;
++	info->amsdu_en = path.mtk_wdma.amsdu_en;
+ 
+ 	return 0;
+ }
+@@ -193,13 +194,15 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ 
+ 	if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+ 		mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
+-				       info.wcid);
++				       info.wcid, info.amsdu_en);
+ 		pse_port = PSE_PPE0_PORT;
+ #if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
+ 		if (info.wdma_idx == 0)
+ 			pse_port = PSE_WDMA0_PORT;
+ 		else if (info.wdma_idx == 1)
+ 			pse_port = PSE_WDMA1_PORT;
++		else if (info.wdma_idx == 2)
++			pse_port = PSE_WDMA2_PORT;
+ 		else
+ 			return -EOPNOTSUPP;
+ #endif
+@@ -458,8 +461,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
+ 	if (err)
+ 		return err;
+ 
+-	if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
+-		return err;
++	/*if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
++		return err;*/
+ 
+ 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ 	if (!entry)
+@@ -499,8 +502,8 @@ clear:
+ 	mtk_foe_entry_clear(eth->ppe[i], entry);
+ free:
+ 	kfree(entry);
+-	if (wed_index >= 0)
+-	    mtk_wed_flow_remove(wed_index);
++	/*if (wed_index >= 0)
++	    mtk_wed_flow_remove(wed_index);*/
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
+index 37a86c3..e3809db 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -28,7 +28,7 @@ struct wo_cmd_ring {
+ 	u32 cnt;
+ 	u32 unit;
+ };
+-static struct mtk_wed_hw *hw_list[2];
++static struct mtk_wed_hw *hw_list[3];
+ static DEFINE_MUTEX(hw_lock);
+ 
+ static void
+@@ -73,6 +73,26 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
+ 	return wdma_r32(dev, MTK_WDMA_GLO_CFG);
+ }
+ 
++static u32
++mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
++{
++	if (wed_r32(dev, reg) & mask)
++		return true;
++
++	return false;
++}
++
++static int
++mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
++{
++	int sleep = 1000;
++	int timeout = 100 * sleep;
++	u32 val;
++
++	return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
++				 timeout, false, dev, reg, mask);
++}
++
+ static int
+ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
+ {
+@@ -235,6 +255,8 @@ mtk_wed_assign(struct mtk_wed_device *dev)
+ 			continue;
+ 
+ 		hw->wed_dev = dev;
++		hw->pci_base = MTK_WED_PCIE_BASE;
++
+ 		return hw;
+ 	}
+ 
+@@ -242,23 +264,84 @@ mtk_wed_assign(struct mtk_wed_device *dev)
+ }
+ 
+ static int
+-mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
++mtk_wed_pao_buffer_alloc(struct mtk_wed_device *dev)
++{
++	struct mtk_wed_pao *pao;
++	int i, j;
++
++	pao = kzalloc(sizeof(struct mtk_wed_pao), GFP_KERNEL);
++	if (!pao)
++		return -ENOMEM;
++
++	dev->hw->wed_pao = pao;
++
++	for (i = 0; i < 32; i++) {
++		/* each segment is 64K*/
++		pao->hif_txd[i] = (char *)__get_free_pages(GFP_ATOMIC |
++							   GFP_DMA32 |
++							   __GFP_ZERO, 4);
++		if (!pao->hif_txd[i])
++			goto err;
++
++		pao->hif_txd_phys[i] = dma_map_single(dev->hw->dev,
++						      pao->hif_txd[i],
++						      16 * PAGE_SIZE,
++						      DMA_TO_DEVICE);
++		if (unlikely(dma_mapping_error(dev->hw->dev,
++					       pao->hif_txd_phys[i])))
++			goto err;
++	}
++
++	return 0;
++
++err:
++	for (j = 0; j < i; j++)
++		dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[j],
++			     16 * PAGE_SIZE, DMA_TO_DEVICE);
++
++	return -ENOMEM;
++}
++
++static int
++mtk_wed_pao_free_buffer(struct mtk_wed_device *dev)
++{
++	struct mtk_wed_pao *pao = dev->hw->wed_pao;
++	int i;
++
++	for (i = 0; i < 32; i++) {
++		dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[i],
++				 16 * PAGE_SIZE, DMA_TO_DEVICE);
++		free_pages((unsigned long)pao->hif_txd[i], 4);
++	}
++
++	return 0;
++}
++
++static int
++mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+ 	struct mtk_wdma_desc *desc;
++	void *desc_ptr;
+ 	dma_addr_t desc_phys;
+-	void **page_list;
++	struct dma_page_info *page_list;
+ 	u32 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ 	int token = dev->wlan.token_start;
+-	int ring_size, n_pages, page_idx;
+-	int i;
+-
++	int ring_size, pkt_nums, n_pages, page_idx;
++	int i, ret = 0;
+ 
+ 	if (dev->ver == MTK_WED_V1) {
+ 		ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+-	} else {
++		pkt_nums = ring_size;
++		dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
++	} else if (dev->hw->version == 2) {
+ 		ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
+ 			    MTK_WED_WDMA_RING_SIZE * 2;
+ 		last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG0;
++		dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
++	} else if (dev->hw->version == 3) {
++		ring_size = MTK_WED_TX_BM_DMA_SIZE;
++		pkt_nums = MTK_WED_TX_BM_PKT_CNT;
++		dev->tx_buf_ring.desc_size = sizeof(struct mtk_rxbm_desc);
+ 	}
+ 
+ 	n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
+@@ -267,18 +350,20 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+ 	if (!page_list)
+ 		return -ENOMEM;
+ 
+-	dev->buf_ring.size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+-	dev->buf_ring.pages = page_list;
++	dev->tx_buf_ring.size = ring_size;
++	dev->tx_buf_ring.pages = page_list;
++	dev->tx_buf_ring.pkt_nums = pkt_nums;
+ 
+-	desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+-				  &desc_phys, GFP_KERNEL);
+-	if (!desc)
++	desc_ptr = dma_alloc_coherent(dev->hw->dev,
++				      ring_size * dev->tx_buf_ring.desc_size,
++				      &desc_phys, GFP_KERNEL);
++	if (!desc_ptr)
+ 		return -ENOMEM;
+ 
+-	dev->buf_ring.desc = desc;
+-	dev->buf_ring.desc_phys = desc_phys;
++	dev->tx_buf_ring.desc = desc_ptr;
++	dev->tx_buf_ring.desc_phys = desc_phys;
+ 
+-	for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
++	for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
+ 		dma_addr_t page_phys, buf_phys;
+ 		struct page *page;
+ 		void *buf;
+@@ -295,7 +380,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+ 			return -ENOMEM;
+ 		}
+ 
+-		page_list[page_idx++] = page;
++		page_list[page_idx].addr = page;
++		page_list[page_idx].addr_phys = page_phys;
++		page_idx++;
++
+ 		dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ 					DMA_BIDIRECTIONAL);
+ 
+@@ -303,19 +391,23 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+ 		buf_phys = page_phys;
+ 
+ 		for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+-			u32 txd_size;
+-
+-			txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+-
++			desc = desc_ptr;
+ 			desc->buf0 = buf_phys;
+-			desc->buf1 = buf_phys + txd_size;
+-			desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
+-						txd_size) |
+-				     FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+-						MTK_WED_BUF_SIZE - txd_size) |
+-						last_seg;
+-			desc->info = 0;
+-			desc++;
++			if (dev->hw->version < 3) {
++				u32 txd_size;
++
++				txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
++				desc->buf1 = buf_phys + txd_size;
++				desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
++							txd_size) |
++					     FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
++							MTK_WED_BUF_SIZE - txd_size) |
++							last_seg;
++				desc->info = 0;
++			} else {
++				desc->ctrl = token << 16;
++			}
++			desc_ptr += dev->tx_buf_ring.desc_size;
+ 
+ 			buf += MTK_WED_BUF_SIZE;
+ 			buf_phys += MTK_WED_BUF_SIZE;
+@@ -325,15 +417,18 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+ 					   DMA_BIDIRECTIONAL);
+ 	}
+ 
+-	return 0;
++	if (dev->hw->version == 3)
++		ret = mtk_wed_pao_buffer_alloc(dev);
++
++	return ret;
+ }
+ 
+ static void
+-mtk_wed_free_buffer(struct mtk_wed_device *dev)
++mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
+ {
+-	struct mtk_wdma_desc *desc = dev->buf_ring.desc;
+-	void **page_list = dev->buf_ring.pages;
+-	int ring_size, page_idx;
++	struct mtk_rxbm_desc *desc = dev->tx_buf_ring.desc;
++	struct dma_page_info *page_list = dev->tx_buf_ring.pages;
++	int ring_size, page_idx, pkt_nums;
+ 	int i;
+ 
+ 	if (!page_list)
+@@ -342,33 +437,33 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
+ 	if (!desc)
+ 		goto free_pagelist;
+ 
+-	if (dev->ver == MTK_WED_V1) {
+-		ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+-	} else {
+-		ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
+-			    MTK_WED_WDMA_RING_SIZE * 2;
++	pkt_nums = ring_size = dev->tx_buf_ring.size;
++	if (dev->hw->version == 3) {
++		mtk_wed_pao_free_buffer(dev);
++		pkt_nums = dev->tx_buf_ring.pkt_nums;
+ 	}
+ 
+-	for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+-		void *page = page_list[page_idx++];
++	for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
++		void *page = page_list[page_idx].addr;
+ 
+ 		if (!page)
+ 			break;
+ 
+-		dma_unmap_page(dev->hw->dev, desc[i].buf0,
++		dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
+ 			       PAGE_SIZE, DMA_BIDIRECTIONAL);
+ 		__free_page(page);
++		page_idx++;
+ 	}
+ 
+-	dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+-			  desc, dev->buf_ring.desc_phys);
++	dma_free_coherent(dev->hw->dev, ring_size * dev->tx_buf_ring.desc_size,
++			  dev->tx_buf_ring.desc, dev->tx_buf_ring.desc_phys);
+ 
+ free_pagelist:
+ 	kfree(page_list);
+ }
+ 
+ static int
+-mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
++mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+ 	struct mtk_rxbm_desc *desc;
+ 	dma_addr_t desc_phys;
+@@ -389,7 +484,7 @@ mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
+ }
+ 
+ static void
+-mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
++mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
+ {
+ 	struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
+ 	int ring_size = dev->rx_buf_ring.size;
+@@ -403,6 +498,113 @@ mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
+ 			  desc, dev->rx_buf_ring.desc_phys);
+ }
+ 
++/* TODO */
++static int
++mtk_wed_rx_page_buffer_alloc(struct mtk_wed_device *dev)
++{
++	int ring_size = dev->wlan.rx_nbuf, buf_num = MTK_WED_RX_PG_BM_CNT;
++	struct mtk_rxbm_desc *desc;
++	dma_addr_t desc_phys;
++	struct dma_page_info *page_list;
++	int n_pages, page_idx;
++	int i;
++
++	n_pages = buf_num / MTK_WED_RX_PAGE_BUF_PER_PAGE;
++
++	page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
++	if (!page_list)
++		return -ENOMEM;
++
++	dev->rx_page_buf_ring.size = ring_size & ~(MTK_WED_BUF_PER_PAGE - 1);
++	dev->rx_page_buf_ring.pages = page_list;
++	dev->rx_page_buf_ring.pkt_nums = buf_num;
++
++	desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
++	                         &desc_phys, GFP_KERNEL);
++	if (!desc)
++		return -ENOMEM;
++
++	dev->rx_page_buf_ring.desc = desc;
++	dev->rx_page_buf_ring.desc_phys = desc_phys;
++
++	for (i = 0, page_idx = 0; i < buf_num; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
++		dma_addr_t page_phys, buf_phys;
++		struct page *page;
++		void *buf;
++		int s;
++
++		page = __dev_alloc_pages(GFP_KERNEL, 0);
++		if (!page)
++			return -ENOMEM;
++
++		page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
++		                        DMA_BIDIRECTIONAL);
++		if (dma_mapping_error(dev->hw->dev, page_phys)) {
++			__free_page(page);
++			return -ENOMEM;
++		}
++
++		page_list[page_idx].addr= page;
++		page_list[page_idx].addr_phys= page_phys;
++		page_idx++;
++
++		dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
++		                       DMA_BIDIRECTIONAL);
++
++		buf = page_to_virt(page);
++		buf_phys = page_phys;
++
++		for (s = 0; s < MTK_WED_RX_PAGE_BUF_PER_PAGE; s++) {
++
++			desc->buf0 = cpu_to_le32(buf_phys);
++			desc++;
++
++			buf += MTK_WED_PAGE_BUF_SIZE;
++			buf_phys += MTK_WED_PAGE_BUF_SIZE;
++		}
++
++		dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
++					   DMA_BIDIRECTIONAL);
++	}
++
++	return 0;
++}
++
++static void
++mtk_wed_rx_page_free_buffer(struct mtk_wed_device *dev)
++{
++	struct mtk_rxbm_desc *desc = dev->rx_page_buf_ring.desc;
++	struct dma_page_info *page_list = dev->rx_page_buf_ring.pages;
++	int ring_size, page_idx;
++	int i;
++
++	if (!page_list)
++		return;
++
++	if (!desc)
++		goto free_pagelist;
++
++	ring_size = dev->rx_page_buf_ring.pkt_nums;
++
++	for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
++		void *page = page_list[page_idx].addr;
++
++		if (!page)
++			break;
++
++		dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
++                              PAGE_SIZE, DMA_BIDIRECTIONAL);
++		__free_page(page);
++		page_idx++;
++       }
++
++	dma_free_coherent(dev->hw->dev, dev->rx_page_buf_ring.size * sizeof(*desc),
++                         desc, dev->rx_page_buf_ring.desc_phys);
++
++free_pagelist:
++       kfree(page_list);
++}
++
+ static void
+ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
+ {
+@@ -416,19 +618,25 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int sca
+ static void
+ mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
+ {
+-	int i;
++	int i, scale = dev->hw->version > 1 ? 2 : 1;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
+-		mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
++		if (!(dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
++			mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
++
+ 	for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+-		mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
++		if ((dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
++			mtk_wed_free_ring(dev, &dev->tx_wdma[i], scale);
+ }
+ 
+ static void
+ mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
+ {
+-	mtk_wed_free_rx_bm(dev);
++	mtk_wed_free_rx_buffer(dev);
+ 	mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
++
++	if (dev->wlan.hwrro)
++		mtk_wed_rx_page_free_buffer(dev);
+ }
+ 
+ static void
+@@ -437,7 +645,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
+ 	u32 wdma_mask;
+ 
+ 	wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+-	if (dev->ver > MTK_WED_V1)
++	if (mtk_wed_get_rx_capa(dev))
+ 		wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
+ 					GENMASK(1, 0));
+ 	/* wed control cr set */
+@@ -447,7 +655,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
+ 		MTK_WED_CTRL_WED_TX_BM_EN |
+ 		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ 
+-	if (dev->ver == MTK_WED_V1) {
++	if (dev->hw->version == 1) {
+ 		wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+ 			MTK_WED_PCIE_INT_TRIGGER_STATUS);
+ 
+@@ -458,6 +666,8 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
+ 		wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ 			MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+ 	} else {
++		if (dev->hw->version == 3)
++			wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
+ 
+ 		wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ 			MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+@@ -475,18 +685,20 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
+ 			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+ 				    dev->wlan.txfree_tbit));
+ 
+-		wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+-			MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+-			MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+-			MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+-			MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+-			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+-				   dev->wlan.rx_tbit[0]) |
+-			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+-				   dev->wlan.rx_tbit[1]));
++		if (mtk_wed_get_rx_capa(dev))
++			wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
++				MTK_WED_WPDMA_INT_CTRL_RX0_EN |
++				MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
++				MTK_WED_WPDMA_INT_CTRL_RX1_EN |
++				MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
++				FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
++					   dev->wlan.rx_tbit[0]) |
++				FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
++					   dev->wlan.rx_tbit[1]));
+ 	}
++
+ 	wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
+-	if (dev->ver == MTK_WED_V1) {
++	if (dev->hw->version == 1) {
+ 		wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ 	} else {
+ 		wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+@@ -506,6 +718,21 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
+ {
+ 	u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ 
++	switch (dev->hw->version) {
++	case 1:
++		mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
++		break;
++	case 2 :
++		mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2 |
++			MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2 |
++			MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
++			MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
++		break;
++	case 3:
++		mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
++		break;
++	}
++
+ 	if (!dev->hw->num_flows)
+ 		mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ 
+@@ -514,31 +741,86 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
+ }
+ 
+ static void
+-mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
++mtk_wed_pao_init(struct mtk_wed_device *dev)
+ {
+-	if (en) {
+-		wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+-		wed_w32(dev, MTK_WED_TXP_DW1,
+-			FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
+-	} else {
+-		wed_w32(dev, MTK_WED_TXP_DW1,
+-			FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
+-		wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
++	struct mtk_wed_pao *pao = dev->hw->wed_pao;
++	int i;
++
++	for (i = 0; i < 32; i++)
++		wed_w32(dev, MTK_WED_PAO_HIFTXD_BASE_L(i),
++			pao->hif_txd_phys[i]);
++
++	/* init all sta parameter */
++	wed_w32(dev, MTK_WED_PAO_STA_INFO_INIT, MTK_WED_PAO_STA_RMVL |
++		MTK_WED_PAO_STA_WTBL_HDRT_MODE |
++		FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_LEN,
++			   dev->wlan.max_amsdu_len >> 8) |
++		FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_NUM,
++			   dev->wlan.max_amsdu_nums));
++
++	wed_w32(dev, MTK_WED_PAO_STA_INFO, MTK_WED_PAO_STA_INFO_DO_INIT);
++
++	if (mtk_wed_poll_busy(dev, MTK_WED_PAO_STA_INFO,
++			      MTK_WED_PAO_STA_INFO_DO_INIT)) {
++		dev_err(dev->hw->dev, "mtk_wed%d: pao init failed!\n",
++			dev->hw->index);
++		return;
+ 	}
++
++	/* init pao txd src */
++	wed_set(dev, MTK_WED_PAO_HIFTXD_CFG,
++		FIELD_PREP(MTK_WED_PAO_HIFTXD_SRC, dev->hw->index));
++
++	/* init qmem */
++	wed_set(dev, MTK_WED_PAO_PSE, MTK_WED_PAO_PSE_RESET);
++	if (mtk_wed_poll_busy(dev, MTK_WED_PAO_MON_QMEM_STS1, BIT(29))) {
++		pr_info("%s: init pao qmem fail\n", __func__);
++		return;
++	}
++
++	/* eagle E1 PCIE1 tx ring 22 flow control issue */
++	if (dev->wlan.chip_id == 0x7991) {
++		wed_clr(dev, MTK_WED_PAO_AMSDU_FIFO,
++			MTK_WED_PAO_AMSDU_IS_PRIOR0_RING);
++	}
++
++	wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
++
++	return;
+ }
+ 
+-static void
+-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
++static int
++mtk_wed_hwrro_init(struct mtk_wed_device *dev)
+ {
+-#define MTK_WFMDA_RX_DMA_EN 	BIT(2)
++	if (!mtk_wed_get_rx_capa(dev))
++		return 0;
++
++	wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
++		FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
++
++	wed_w32(dev, MTK_WED_RRO_PG_BM_BASE,
++		dev->rx_page_buf_ring.desc_phys);
++
++	wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
++		MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
++		FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
++			   MTK_WED_RX_PG_BM_CNT));
++
++	/* enable rx_page_bm to fetch dmad */
++	wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
++
++	return 0;
++}
+ 
++static int
++mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
++			   struct mtk_wed_ring *ring)
++{
+ 	int timeout = 3;
+-	u32 cur_idx, regs;
++	u32 cur_idx;
+ 
+ 	do {
+-		regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
+-		       MTK_WED_RING_OFS_CPU_IDX;
+-		cur_idx = wed_r32(dev, regs);
++		cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
+ 		if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
+ 			break;
+ 
+@@ -546,70 +828,133 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
+ 		timeout--;
+ 	} while (timeout > 0);
+ 
+-	if (timeout) {
+-		unsigned int val;
++	return timeout;
++}
+ 
+-		val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
+-			       dev->wlan.phy_base);
+-		val |= MTK_WFMDA_RX_DMA_EN;
+ 
+-		wifi_w32(dev, dev->wlan.wpdma_rx_glo -
+-			 dev->wlan.phy_base, val);
++static void
++mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
++{
++	if (en) {
++		wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
++		wed_w32(dev, MTK_WED_TXP_DW1,
++			FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
+ 	} else {
+-		dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
+-			       dev->hw->index, idx);
++		wed_w32(dev, MTK_WED_TXP_DW1,
++			FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
++		wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ 	}
+ }
+ 
+ static void
+ mtk_wed_dma_enable(struct mtk_wed_device *dev)
+ {
+-	wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+-		MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
++#define MTK_WFMDA_RX_DMA_EN 	BIT(2)
++
++	if (dev->hw->version == 1)
++		wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
++			MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+ 
+ 	wed_set(dev, MTK_WED_GLO_CFG,
+ 		MTK_WED_GLO_CFG_TX_DMA_EN |
+ 		MTK_WED_GLO_CFG_RX_DMA_EN);
++
++	wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
++		FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
++		FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
++	wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++		MTK_WED_WDMA_RX_PREF_DDONE2_EN);
++
++	wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
++
+ 	wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ 		MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+-		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
++		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
++		MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
+ 	wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ 		MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ 
+ 	wdma_set(dev, MTK_WDMA_GLO_CFG,
+-		 MTK_WDMA_GLO_CFG_TX_DMA_EN |
++		 MTK_WDMA_GLO_CFG_TX_DMA_EN /*|
+ 		 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+-		 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
++		 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES*/);
+ 
+-	if (dev->ver == MTK_WED_V1) {
++	if (dev->hw->version == 1) {
+ 		wdma_set(dev, MTK_WDMA_GLO_CFG,
+ 			 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ 	} else {
+ 		int idx = 0;
+ 
+-		wed_set(dev, MTK_WED_WPDMA_CTRL,
+-			MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+-
+-		wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+-			MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+-			MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
++		if (mtk_wed_get_rx_capa(dev))
++			wed_set(dev, MTK_WED_WDMA_GLO_CFG,
++				MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
++				MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+ 
+ 		wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ 			MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ 			MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+ 
++		if (dev->hw->version == 3) {
++			wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++				MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
++			wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++				MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
++				MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
++				MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
++
++			wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++			//wdma_w32(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++			if (mtk_wed_get_rx_capa(dev)) {
++				wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++					MTK_WED_WPDMA_RX_D_PREF_EN |
++					FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
++					FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
++
++				wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
++
++				wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++
++				wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++			}
++		}
++
+ 		wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ 			MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ 			MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+ 
++		if (!mtk_wed_get_rx_capa(dev))
++			return;
++
++		wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
+ 		wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ 			MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+ 			FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+ 			FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+ 				   0x2));
+ 
+-		for (idx = 0; idx < dev->hw->ring_num; idx++)
+-			mtk_wed_check_wfdma_rx_fill(dev, idx);
++		for (idx = 0; idx < dev->hw->ring_num; idx++) {
++			struct mtk_wed_ring *ring = &dev->rx_ring[idx];
++
++			if(!(ring->flags & MTK_WED_RING_CONFIGURED))
++				continue;
++
++			if(mtk_wed_check_wfdma_rx_fill(dev, ring)) {
++				unsigned int val;
++
++				val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
++					       dev->wlan.phy_base);
++				val |= MTK_WFMDA_RX_DMA_EN;
++
++				wifi_w32(dev, dev->wlan.wpdma_rx_glo -
++					 dev->wlan.phy_base, val);
++
++				dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable successful!\n",
++						dev->hw->index, idx);
++			} else {
++				dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
++					dev->hw->index, idx);
++			}
++		}
+ 	}
+ }
+ 
+@@ -644,15 +989,20 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
+ 			MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ 		wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ 			MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+-	}
+ 
+-	mtk_wed_set_512_support(dev, false);
++		if (dev->hw->version == 3 && mtk_wed_get_rx_capa(dev)) {
++			wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
++				 MTK_WDMA_PREF_TX_CFG_PREF_EN);
++			wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
++				 MTK_WDMA_PREF_RX_CFG_PREF_EN);
++		}
++	}
+ }
+ 
+ static void
+ mtk_wed_stop(struct mtk_wed_device *dev)
+ {
+-	if (dev->ver > MTK_WED_V1) {
++	if (mtk_wed_get_rx_capa(dev)) {
+ 		wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+ 		wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
+ 	}
+@@ -677,13 +1027,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev)
+ 		MTK_WED_CTRL_WED_TX_BM_EN |
+ 		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ 
+-	if (dev->hw->ver == 1)
++	if (dev->hw->version == 1)
+ 		return;
+ 
+ 	wed_clr(dev, MTK_WED_CTRL,
+ 		MTK_WED_CTRL_RX_ROUTE_QM_EN |
+ 		MTK_WED_CTRL_WED_RX_BM_EN |
+ 		MTK_WED_CTRL_RX_RRO_QM_EN);
++
++	if (dev->hw->version == 3) {
++		wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
++		wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_PAO);
++		wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
++			MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
++			MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
++	}
+ }
+ 
+ static void
+@@ -702,9 +1060,9 @@ mtk_wed_detach(struct mtk_wed_device *dev)
+ 
+ 	mtk_wdma_tx_reset(dev);
+ 
+-	mtk_wed_free_buffer(dev);
++	mtk_wed_free_tx_buffer(dev);
+ 	mtk_wed_free_tx_rings(dev);
+-	if (dev->ver > MTK_WED_V1) {
++	if (mtk_wed_get_rx_capa(dev)) {
+ 		mtk_wed_wo_reset(dev);
+ 		mtk_wed_free_rx_rings(dev);
+ 		mtk_wed_wo_exit(hw);
+@@ -728,73 +1086,97 @@ mtk_wed_detach(struct mtk_wed_device *dev)
+ 	mutex_unlock(&hw_lock);
+ }
+ 
++#define IRQ_MASK_APMCU		0x1000301c
+ static void
+ mtk_wed_bus_init(struct mtk_wed_device *dev)
+ {
+-#define PCIE_BASE_ADDR0 0x11280000
++	switch (dev->wlan.bus_type) {
++	case MTK_WED_BUS_PCIE: {
++		struct device_node *np = dev->hw->eth->dev->of_node;
++		struct regmap *regs;
++		unsigned long addr;
++		u32 value;
+ 
+-	if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+-		struct device_node *node;
+-		void __iomem * base_addr;
+-		u32 value = 0;
++		if (dev->hw->version == 2) {
++			regs = syscon_regmap_lookup_by_phandle(np,
++							       "mediatek,wed-pcie");
++			if (IS_ERR(regs))
++				break;
+ 
+-		node = of_parse_phandle(dev->hw->node, "mediatek,wed_pcie", 0);
+-		if (!node) {
+-			pr_err("%s: no wed_pcie node\n", __func__);
+-			return;
++			regmap_update_bits(regs, 0, BIT(0), BIT(0));
+ 		}
+ 
+-		base_addr = of_iomap(node, 0);
+-
+-		value = readl(base_addr);
+-		value |= BIT(0);
+-		writel(value, base_addr);
++		if (dev->wlan.msi) {
++		     wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base| 0xc08);
++		     wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0xc04);
++		     wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
++		} else {
++		     wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base | 0x180);
++		     wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0x184);
++		     wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
++		}
+ 
+-		wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+-			FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
++		if (dev->hw->version < 3 || dev->hw->index) {
++			wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
++				FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
++		} else {
++			/* set mask apmcu */
++			addr = (unsigned long)ioremap(IRQ_MASK_APMCU, 4);
++			value = readl((void *)addr);
++			value |= 0x7;
++			writel(value, (void *)addr);
++			iounmap((void *)addr);
++		}
+ 
+ 		/* pcie interrupt control: pola/source selection */
+ 		wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ 			MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+-			FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+-		wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
++			MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
++			FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, dev->hw->index));
+ 
+-		value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
+-		value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
+-		wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+-		wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+-
+-		value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
+-		value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
+-
+-		wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+-		wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+-
+-		/* pola setting */
+-		value = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+-		wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+-			MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+-	} else if (dev->wlan.bus_type == MTK_WED_BUS_AXI) {
++		break;
++	}
++	case MTK_WED_BUS_AXI:
+ 		wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ 			MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
+ 			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
++		break;
++	default:
++		break;
+ 	}
++
+ 	return;
+ }
+ 
+ static void
+ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+ {
+-	if (dev->ver > MTK_WED_V1) {
++	if (dev->hw->version == 1) {
++		wed_w32(dev, MTK_WED_WPDMA_CFG_BASE,  dev->wlan.wpdma_phys);
++	} else {
++		mtk_wed_bus_init(dev);
++
+ 		wed_w32(dev, MTK_WED_WPDMA_CFG_BASE,  dev->wlan.wpdma_int);
+ 		wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK,  dev->wlan.wpdma_mask);
+-		wed_w32(dev, MTK_WED_WPDMA_CFG_TX,  dev->wlan.wpdma_tx);
++		wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ 		wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE,  dev->wlan.wpdma_txfree);
+ 
+-		wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG,  dev->wlan.wpdma_rx_glo);
+-		wed_w32(dev, MTK_WED_WPDMA_RX_RING,  dev->wlan.wpdma_rx);
+-	} else {
+-		wed_w32(dev, MTK_WED_WPDMA_CFG_BASE,  dev->wlan.wpdma_phys);
++		if (mtk_wed_get_rx_capa(dev)) {
++			int i;
++
++			wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG,  dev->wlan.wpdma_rx_glo);
++			wed_w32(dev, MTK_WED_WPDMA_RX_RING0,  dev->wlan.wpdma_rx);
++			wed_w32(dev, MTK_WED_WPDMA_RX_RING1,  dev->wlan.wpdma_rx + 0x10);
++
++			if (dev->wlan.hwrro) {
++	                       wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
++	                       wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
++	                       for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
++	                               wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
++	                                       dev->wlan.wpdma_rx_pg + i * 0x10);
++			       }
++			}
++		}
+ 	}
+ }
+ 
+@@ -806,21 +1188,25 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+ 	mtk_wed_deinit(dev);
+ 	mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ 
+-	if (dev->ver > MTK_WED_V1)
+-		mtk_wed_bus_init(dev);
+-
+ 	mtk_wed_set_wpdma(dev);
+ 
+-	mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+-	       MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+-	       MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+-	set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+-	      MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+-	      MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
++	if (dev->hw->version == 3) {
++		mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
++		set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
++	} else {
++		mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
++		       MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
++		       MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
++		set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
++		      MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
++		      MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
++	}
++
+ 	wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+ 
+-	if (dev->ver == MTK_WED_V1) {
++	if (dev->hw->version == 1) {
+ 		u32 offset;
++
+ 		offset = dev->hw->index ? 0x04000400 : 0;
+ 		wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+ 		wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+@@ -907,11 +1293,16 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
+ 	} while (1);
+ 
+ 	/* configure RX_ROUTE_QM */
+-	wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+-	wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+-	wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+-		FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
+-	wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++	if (dev->hw->version == 2) {
++		wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++		wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
++		wed_set(dev, MTK_WED_RTQM_GLO_CFG,
++			FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
++		wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++	} else {
++		wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
++			FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 0x3 + dev->hw->index));
++	}
+ 
+ 	/* enable RX_ROUTE_QM */
+ 	wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+@@ -920,23 +1311,45 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
+ static void
+ mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
+ {
+-	int size = dev->buf_ring.size;
++	int size = dev->wlan.nbuf;
+ 	int rev_size = MTK_WED_TX_RING_SIZE / 2;
+-	int thr = 1;
++	int thr_lo = 1, thr_hi = 1;
+ 
+-	if (dev->ver > MTK_WED_V1) {
++	if (dev->hw->version == 1) {
++		wed_w32(dev, MTK_WED_TX_BM_CTRL,
++			MTK_WED_TX_BM_CTRL_PAUSE |
++			FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
++			FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
++	} else {
+ 		size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
+-		       dev->buf_ring.size;
++		       dev->tx_buf_ring.size;
+ 		rev_size = size;
+-		thr = 0;
++		thr_lo = 0;
++		thr_hi = MTK_WED_TX_BM_DYN_THR_HI;
++
++		wed_w32(dev, MTK_WED_TX_TKID_CTRL,
++			MTK_WED_TX_TKID_CTRL_PAUSE |
++			FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
++				   size / 128) |
++			FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
++				   size / 128));
++
++		/* return SKBID + SDP back to bm */
++		if (dev->ver == 3) {
++			wed_set(dev, MTK_WED_TX_TKID_CTRL,
++				MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
++			 size = dev->wlan.nbuf;
++			 rev_size = size;
++		} else {
++			wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
++				FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
++				MTK_WED_TX_TKID_DYN_THR_HI);
++		}
+ 	}
+ 
+-	wed_w32(dev, MTK_WED_TX_BM_CTRL,
+-		MTK_WED_TX_BM_CTRL_PAUSE |
+-		FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
+-		FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
++	mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+ 
+-	wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
++	wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
+ 
+ 	wed_w32(dev, MTK_WED_TX_BM_TKID,
+ 		FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+@@ -946,25 +1359,44 @@ mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
+ 
+ 	wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+ 
+-	wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+-		FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr) |
+-		MTK_WED_TX_BM_DYN_THR_HI);
++	if (dev->hw->version < 3)
++		wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
++			FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_lo) |
++			FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_hi));
++	else {
++		/* change to new bm */
++		wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
++				MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
++		wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_LEGACY_EN);
++	}
+ 
+-	if (dev->ver > MTK_WED_V1) {
++	if (dev->hw->version != 1) {
+ 		wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+ 			MTK_WED_TX_TKID_CTRL_PAUSE |
+ 			FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
+-				   dev->buf_ring.size / 128) |
++				   size / 128) |
+ 			FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+-				   dev->buf_ring.size / 128));
+-		wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+-			FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+-			MTK_WED_TX_TKID_DYN_THR_HI);
++				   size / 128));
++
++		/* return SKBID + SDP back to bm */
++		if (dev->ver == 3)
++			wed_set(dev, MTK_WED_TX_TKID_CTRL,
++				MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
++		else
++			wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
++				FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
++				MTK_WED_TX_TKID_DYN_THR_HI);
+ 	}
+-	mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
++	wed_w32(dev, MTK_WED_TX_BM_TKID,
++		FIELD_PREP(MTK_WED_TX_BM_TKID_START,
++			   dev->wlan.token_start) |
++		FIELD_PREP(MTK_WED_TX_BM_TKID_END,
++			   dev->wlan.token_start + dev->wlan.nbuf - 1));
+ 
++	wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
++		MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
+ 	wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+-	if (dev->ver > MTK_WED_V1)
++	if (dev->hw->version != 1)
+ 		wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+ }
+ 
+@@ -977,7 +1409,26 @@ mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
+ 
+ 	wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+ 
++	/* reset prefetch index of ring */
++	wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
++		MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++	wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
++		MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++
++	wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
++		MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++	wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
++		MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++
++	/* reset prefetch FIFO of ring */
++	wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
++		MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
++		MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
++	wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
++
+ 	mtk_wed_rx_bm_hw_init(dev);
++	if (dev->wlan.hwrro)
++		mtk_wed_hwrro_init(dev);
+ 	mtk_wed_rro_hw_init(dev);
+ 	mtk_wed_route_qm_hw_init(dev);
+ }
+@@ -991,7 +1442,7 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
+ 	dev->init_done = true;
+ 	mtk_wed_set_ext_int(dev, false);
+ 	mtk_wed_tx_hw_init(dev);
+-	if (dev->ver > MTK_WED_V1)
++	if (mtk_wed_get_rx_capa(dev))
+ 		mtk_wed_rx_hw_init(dev);
+ }
+ 
+@@ -1015,26 +1466,6 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
+ 	}
+ }
+ 
+-static u32
+-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+-{
+- 	if (wed_r32(dev, reg) & mask)
+-		return true;
+-
+-	return false;
+-}
+-
+-static int
+-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+-{
+-	int sleep = 1000;
+-	int timeout = 100 * sleep;
+-	u32 val;
+-
+-	return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+-				 timeout, false, dev, reg, mask);
+-}
+-
+ static void
+ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ {
+@@ -1133,7 +1564,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ 		mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false);
+ 	}
+ 
+-	mtk_wed_free_rx_bm(dev);
++	mtk_wed_free_rx_buffer(dev);
+ }
+ 
+ 
+@@ -1271,12 +1702,15 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev,
+ 		int idx, int size, bool reset)
+ {
+ 	struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
++	int scale = dev->hw->version > 1 ? 2 : 1;
+ 
+ 	if(!reset)
+ 		if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+-				       dev->ver, true))
++				       scale, true))
+ 			return -ENOMEM;
+ 
++	wdma->flags |= MTK_WED_RING_CONFIGURED;
++
+ 	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ 		 wdma->desc_phys);
+ 	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+@@ -1296,12 +1730,31 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
+ 	int idx, int size, bool reset)
+ {
+ 	struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
++	int scale = dev->hw->version > 1 ? 2 : 1;
+ 
+ 	if (!reset)
+ 		if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+-				       dev->ver, true))
++				       scale, true))
+ 			return -ENOMEM;
+ 
++	if (dev->hw->version == 3) {
++		struct mtk_wdma_desc *desc = wdma->desc;
++		int i;
++
++		for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
++			desc->buf0 = 0;
++			desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
++			desc->buf1 = 0;
++			desc->info = MTK_WDMA_TXD0_DESC_INFO_DMA_DONE;
++			desc++;
++			desc->buf0 = 0;
++			desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
++			desc->buf1 = 0;
++			desc->info = MTK_WDMA_TXD1_DESC_INFO_DMA_DONE;
++			desc++;
++		}
++	}
++
+ 	wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ 		 wdma->desc_phys);
+ 	wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+@@ -1312,7 +1765,7 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
+ 		 MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
+ 	if (reset)
+ 		mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE,
+-				   dev->ver, true);
++				   scale, true);
+ 	if (idx == 0)  {
+ 		wed_w32(dev, MTK_WED_WDMA_RING_TX
+ 			+ MTK_WED_RING_OFS_BASE, wdma->desc_phys);
+@@ -1395,7 +1848,7 @@ mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
+ {
+ 	struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ 
+-	if (dev->ver == MTK_WED_V1)
++	if (!mtk_wed_get_rx_capa(dev))
+ 		return 0;
+ 
+ 	return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
+@@ -1420,13 +1873,87 @@ mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
+ 	}
+ }
+ 
++static void
++mtk_wed_start_hwrro(struct mtk_wed_device *dev, u32 irq_mask)
++{
++	int idx, ret;
++
++	wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
++	wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
++
++	if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hwrro)
++		return;
++
++	wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
++	wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR);
++
++	wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
++		MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
++		MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
++		MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
++		MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
++		FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
++			   dev->wlan.rro_rx_tbit[0]) |
++		FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
++			   dev->wlan.rro_rx_tbit[1]));
++
++	wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
++		MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
++		MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
++		MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
++		MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
++		MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
++		MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
++		FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
++			   dev->wlan.rx_pg_tbit[0]) |
++		FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
++			   dev->wlan.rx_pg_tbit[1])|
++		FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
++			   dev->wlan.rx_pg_tbit[2]));
++
++	/*
++	 * RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
++	 * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
++	 */
++	wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN);
++
++	for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++) {
++		struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
++
++		if(!(ring->flags & MTK_WED_RING_CONFIGURED))
++			continue;
++
++		ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
++		if (!ret)
++			dev_err(dev->hw->dev, "mtk_wed%d: rx_rro_ring(%d) init failed!\n",
++				dev->hw->index, idx);
++	}
++
++	for (idx = 0; idx < MTK_WED_RX_PAGE_QUEUES; idx++){
++		struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
++		if(!(ring->flags & MTK_WED_RING_CONFIGURED))
++			continue;
++
++		ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
++		if (!ret)
++			dev_err(dev->hw->dev, "mtk_wed%d: rx_page_ring(%d) init failed!\n",
++				dev->hw->index, idx);
++	}
++}
++
+ static void
+ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+ {
+ 	int i, ret;
+ 
+-	if (dev->ver > MTK_WED_V1)
+-		ret = mtk_wed_rx_bm_alloc(dev);
++	if (mtk_wed_get_rx_capa(dev)) {
++		ret = mtk_wed_rx_buffer_alloc(dev);
++		if (ret)
++			return;
++
++		if (dev->wlan.hwrro)
++			mtk_wed_rx_page_buffer_alloc(dev);
++	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ 		if (!dev->tx_wdma[i].desc)
+@@ -1437,7 +1964,7 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+ 	mtk_wed_set_int(dev, irq_mask);
+ 	mtk_wed_set_ext_int(dev, true);
+ 
+-	if (dev->ver == MTK_WED_V1) {
++	if (dev->hw->version == 1) {
+ 		u32 val;
+ 
+ 		val = dev->wlan.wpdma_phys |
+@@ -1448,33 +1975,52 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+ 			val |= BIT(1);
+ 		val |= BIT(0);
+ 		regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+-	} else {
++	} else if (mtk_wed_get_rx_capa(dev)) {
+ 		/* driver set mid ready and only once */
+ 		wed_w32(dev, MTK_WED_EXT_INT_MASK1,
+ 			MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+ 		wed_w32(dev, MTK_WED_EXT_INT_MASK2,
+ 			MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
++		if (dev->hw->version == 3)
++			wed_w32(dev, MTK_WED_EXT_INT_MASK3,
++				MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+ 
+ 		wed_r32(dev, MTK_WED_EXT_INT_MASK1);
+ 		wed_r32(dev, MTK_WED_EXT_INT_MASK2);
++		if (dev->hw->version == 3)
++			wed_r32(dev, MTK_WED_EXT_INT_MASK3);
+ 
+ 		ret = mtk_wed_rro_cfg(dev);
+ 		if (ret)
+ 			return;
+ 	}
+-	mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
++
++	if (dev->hw->version == 2)
++		mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
++	else if (dev->hw->version == 3)
++		mtk_wed_pao_init(dev);
+ 
+ 	mtk_wed_dma_enable(dev);
+ 	dev->running = true;
+ }
+ 
++static int
++mtk_wed_get_pci_base(struct mtk_wed_device *dev)
++{
++	if (dev->hw->index == 0)
++		return MTK_WED_PCIE_BASE0;
++	else if (dev->hw->index == 1)
++		return MTK_WED_PCIE_BASE1;
++	else
++		return MTK_WED_PCIE_BASE2;
++}
++
+ static int
+ mtk_wed_attach(struct mtk_wed_device *dev)
+ 	__releases(RCU)
+ {
+ 	struct mtk_wed_hw *hw;
+ 	struct device *device;
+-	u16 ver;
+ 	int ret = 0;
+ 
+ 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+@@ -1494,34 +2040,30 @@ mtk_wed_attach(struct mtk_wed_device *dev)
+ 		goto out;
+ 	}
+ 
+-	device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
+-	? &dev->wlan.pci_dev->dev
+-	: &dev->wlan.platform_dev->dev;
++	device = dev->wlan.bus_type == MTK_WED_BUS_PCIE ?
++				       &dev->wlan.pci_dev->dev
++				       : &dev->wlan.platform_dev->dev;
+ 	dev_info(device, "attaching wed device %d version %d\n",
+-		 hw->index, hw->ver);
++		 hw->index, hw->version);
+ 
+ 	dev->hw = hw;
+ 	dev->dev = hw->dev;
+ 	dev->irq = hw->irq;
+ 	dev->wdma_idx = hw->index;
++	dev->ver = hw->version;
++
++	if (dev->hw->version == 3)
++		dev->hw->pci_base = mtk_wed_get_pci_base(dev);
+ 
+ 	if (hw->eth->dma_dev == hw->eth->dev &&
+ 	    of_dma_is_coherent(hw->eth->dev->of_node))
+ 		mtk_eth_set_dma_device(hw->eth, hw->dev);
+ 
+-	dev->ver = FIELD_GET(MTK_WED_REV_ID_MAJOR,
+-			    wed_r32(dev, MTK_WED_REV_ID));
+-	if (dev->ver > MTK_WED_V1)
+-		ver = FIELD_GET(MTK_WED_REV_ID_MINOR,
+-			    wed_r32(dev, MTK_WED_REV_ID));
+-
+-	dev->rev_id = ((dev->ver << 28) | ver << 16);
+-
+-	ret = mtk_wed_buffer_alloc(dev);
++	ret = mtk_wed_tx_buffer_alloc(dev);
+ 	if (ret)
+ 		goto error;
+ 
+-	if (dev->ver > MTK_WED_V1) {
++	if (mtk_wed_get_rx_capa(dev)) {
+ 		ret = mtk_wed_rro_alloc(dev);
+ 		if (ret)
+ 			goto error;
+@@ -1533,15 +2075,20 @@ mtk_wed_attach(struct mtk_wed_device *dev)
+ 	init_completion(&dev->wlan_reset_done);
+ 	atomic_set(&dev->fe_reset, 0);
+ 
+-	if (dev->ver == MTK_WED_V1)
++	if (dev->hw->version != 1)
++		dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
++	else
+ 		regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ 				   BIT(hw->index), 0);
+-	else
++
++	if (mtk_wed_get_rx_capa(dev))
+ 		ret = mtk_wed_wo_init(hw);
+ 
+ error:
+-	if (ret)
++	if (ret) {
++		pr_info("%s: detach wed\n", __func__);
+ 		mtk_wed_detach(dev);
++	}
+ out:
+ 	mutex_unlock(&hw_lock);
+ 
+@@ -1576,8 +2123,26 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx,
+ 	if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
+ 		return -ENOMEM;
+ 
++	if (dev->hw->version == 3 && idx == 1) {
++		/* reset prefetch index */
++		wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
++		       MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
++		       MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
++
++		wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++		       MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
++		       MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
++
++		/* reset prefetch FIFO */
++		wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
++		       MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
++		       MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
++		wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
++	}
++
+ 	ring->reg_base = MTK_WED_RING_TX(idx);
+ 	ring->wpdma = regs;
++	ring->flags |= MTK_WED_RING_CONFIGURED;
+ 
+ 	/* WED -> WPDMA */
+ 	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+@@ -1599,7 +2164,7 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+ 	struct mtk_wed_ring *ring = &dev->txfree_ring;
+ 	int i, idx = 1;
+ 
+-	if(dev->ver > MTK_WED_V1)
++	if(dev->hw->version > 1)
+ 		idx = 0;
+ 
+ 	/*
+@@ -1652,6 +2217,129 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
+ 	return 0;
+ }
+ 
++static int
++mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
++{
++	struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
++
++	ring->wpdma = regs;
++
++	wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
++		readl(regs));
++	wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
++		readl(regs + MTK_WED_RING_OFS_COUNT));
++
++	ring->flags |= MTK_WED_RING_CONFIGURED;
++
++	return 0;
++}
++
++static int
++mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
++{
++	struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
++
++	ring->wpdma = regs;
++
++	wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
++		readl(regs));
++	wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
++		readl(regs + MTK_WED_RING_OFS_COUNT));
++
++	ring->flags |= MTK_WED_RING_CONFIGURED;
++
++	return 0;
++}
++
++static int
++mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
++{
++	struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
++	u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
++	int i = 0, cnt = 0;
++
++	ring->wpdma = regs;
++
++	if (readl(regs) & 0xf)
++		pr_info("%s(): address is not 16-byte alignment\n", __func__);
++
++	wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
++		readl(regs) & 0xfffffff0);
++
++	wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
++		readl(regs + MTK_WED_RING_OFS_COUNT));
++
++	/* ack sn cr */
++	wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
++		dev->wlan.ind_cmd.ack_sn_addr);
++	wed_w32(dev, MTK_WED_RRO_CFG1,
++		FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
++			   dev->wlan.ind_cmd.win_size) |
++		FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
++			   dev->wlan.ind_cmd.particular_sid));
++
++	/* particular session addr element */
++	wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, dev->wlan.ind_cmd.particular_se_phys);
++
++	for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
++		wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
++			dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
++		wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
++			MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
++
++		val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
++		while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) &&
++			 cnt < 100) {
++			val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
++			cnt++;
++		}
++		if (cnt >= 100) {
++			dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
++				dev->hw->index);
++		}
++		/*if (mtk_wed_poll_busy(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
++				      MTK_WED_ADDR_ELEM_TBL_WR_RDY)) {
++			dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
++				dev->hw->index);
++			return -1;
++		}*/
++	}
++
++	/* pn check init */
++	for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
++		wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
++			MTK_WED_PN_CHECK_IS_FIRST);
++
++		wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
++			FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
++
++		cnt = 0;
++		val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
++		while (!(val & MTK_WED_PN_CHECK_WR_RDY) &&
++			 cnt < 100) {
++			val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
++			cnt++;
++		}
++		if (cnt >= 100) {
++			dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
++				dev->hw->index, i);
++		}
++		/*if (mtk_wed_poll_busy(dev, MTK_WED_PN_CHECK_CFG,
++				      MTK_WED_PN_CHECK_WR_RDY)) {
++			dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
++				dev->hw->index, i);
++			//return -1;
++		}*/
++	}
++
++	wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
++
++	wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
++
++	return 0;
++}
++
++
+ static u32
+ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+ {
+@@ -1660,6 +2348,8 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+ 	val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ 	wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+ 	val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
++	if (dev->hw->version == 3)
++		val &= MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
+ 	WARN_RATELIMIT(val, "mtk_wed%d: error status=%08x\n",
+ 		       dev->hw->index, val);
+ 
+@@ -1752,6 +2442,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ 		.tx_ring_setup = mtk_wed_tx_ring_setup,
+ 		.txfree_ring_setup = mtk_wed_txfree_ring_setup,
+ 		.rx_ring_setup = mtk_wed_rx_ring_setup,
++		.rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
++		.msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
++		.ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
+ 		.msg_update = mtk_wed_send_msg,
+ 		.start = mtk_wed_start,
+ 		.stop = mtk_wed_stop,
+@@ -1763,6 +2456,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ 		.detach = mtk_wed_detach,
+ 		.setup_tc = mtk_wed_eth_setup_tc,
+ 		.ppe_check = mtk_wed_ppe_check,
++		.start_hwrro = mtk_wed_start_hwrro,
+ 	};
+ 	struct device_node *eth_np = eth->dev->of_node;
+ 	struct platform_device *pdev;
+@@ -1802,9 +2496,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ 	hw->wdma_phy = wdma_phy;
+ 	hw->index = index;
+ 	hw->irq = irq;
+-	hw->ver = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
++	hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) ?
++		      3 : MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ 
+-	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
++	if (hw->version == 1) {
+ 		hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ 							     "mediatek,pcie-mirror");
+ 		hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+@@ -1819,7 +2514,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ 			regmap_write(hw->mirror, 0, 0);
+ 			regmap_write(hw->mirror, 4, 0);
+ 		}
+-		hw->ver = MTK_WED_V1;
+ 	}
+ 
+ 	mtk_wed_hw_add_debugfs(hw);
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
+index 490873c..fcf7bd0 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -10,10 +10,13 @@
+ #include <linux/netdevice.h>
+ #define MTK_PCIE_BASE(n)		(0x1a143000 + (n) * 0x2000)
+ 
+-#define MTK_WED_PKT_SIZE		1900
++#define MTK_WED_PKT_SIZE		1920//1900
+ #define MTK_WED_BUF_SIZE		2048
++#define MTK_WED_PAGE_BUF_SIZE		128
+ #define MTK_WED_BUF_PER_PAGE		(PAGE_SIZE / 2048)
++#define MTK_WED_RX_PAGE_BUF_PER_PAGE	(PAGE_SIZE / 128)
+ #define MTK_WED_RX_RING_SIZE		1536
++#define MTK_WED_RX_PG_BM_CNT		8192
+ 
+ #define MTK_WED_TX_RING_SIZE		2048
+ #define MTK_WED_WDMA_RING_SIZE		512
+@@ -27,6 +30,9 @@
+ #define MTK_WED_RRO_QUE_CNT		8192
+ #define MTK_WED_MIOD_ENTRY_CNT		128
+ 
++#define MTK_WED_TX_BM_DMA_SIZE		65536
++#define MTK_WED_TX_BM_PKT_CNT		32768
++
+ #define MODULE_ID_WO		1
+ 
+ struct mtk_eth;
+@@ -43,6 +49,8 @@ struct mtk_wed_hw {
+ 	struct dentry *debugfs_dir;
+ 	struct mtk_wed_device *wed_dev;
+ 	struct mtk_wed_wo *wed_wo;
++	struct mtk_wed_pao *wed_pao;
++	u32 pci_base;
+ 	u32 debugfs_reg;
+ 	u32 num_flows;
+ 	u32 wdma_phy;
+@@ -50,7 +58,8 @@ struct mtk_wed_hw {
+ 	int ring_num;
+ 	int irq;
+ 	int index;
+-	u32 ver;
++	int token_id;
++	u32 version;
+ };
+ 
+ struct mtk_wdma_info {
+@@ -58,6 +67,18 @@ struct mtk_wdma_info {
+ 	u8 queue;
+ 	u16 wcid;
+ 	u8 bss;
++	u32 usr_info;
++	u8 tid;
++	u8 is_fixedrate;
++	u8 is_prior;
++	u8 is_sp;
++	u8 hf;
++	u8 amsdu_en;
++};
++
++struct mtk_wed_pao {
++	char *hif_txd[32];
++	dma_addr_t hif_txd_phys[32];
+ };
+ 
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+index 4a9e684..51e3d7c 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -11,9 +11,11 @@ struct reg_dump {
+ 	u16 offset;
+ 	u8 type;
+ 	u8 base;
++	u32 mask;
+ };
+ 
+ enum {
++	DUMP_TYPE_END,
+ 	DUMP_TYPE_STRING,
+ 	DUMP_TYPE_WED,
+ 	DUMP_TYPE_WDMA,
+@@ -23,8 +25,11 @@ enum {
+ 	DUMP_TYPE_WED_RRO,
+ };
+ 
++#define DUMP_END() { .type = DUMP_TYPE_END }
+ #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+ #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
++#define DUMP_REG_MASK(_reg, _mask) { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
++
+ #define DUMP_RING(_prefix, _base, ...)				\
+ 	{ _prefix " BASE", _base, __VA_ARGS__ },		\
+ 	{ _prefix " CNT",  _base + 0x4, __VA_ARGS__ },	\
+@@ -32,6 +37,7 @@ enum {
+ 	{ _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+ 
+ #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
++#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
+ #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+ 
+ #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+@@ -52,36 +58,49 @@ print_reg_val(struct seq_file *s, const char *name, u32 val)
+ 
+ static void
+ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
+-	      const struct reg_dump *regs, int n_regs)
++	      const struct reg_dump **regs)
+ {
+-	const struct reg_dump *cur;
++	const struct reg_dump **cur_o = regs, *cur;
++	bool newline = false;
+ 	u32 val;
+ 
+-	for (cur = regs; cur < &regs[n_regs]; cur++) {
+-		switch (cur->type) {
+-		case DUMP_TYPE_STRING:
+-			seq_printf(s, "%s======== %s:\n",
+-				   cur > regs ? "\n" : "",
+-				   cur->name);
+-			continue;
+-		case DUMP_TYPE_WED:
+-		case DUMP_TYPE_WED_RRO:
+-			val = wed_r32(dev, cur->offset);
+-			break;
+-		case DUMP_TYPE_WDMA:
+-			val = wdma_r32(dev, cur->offset);
+-			break;
+-		case DUMP_TYPE_WPDMA_TX:
+-			val = wpdma_tx_r32(dev, cur->base, cur->offset);
+-			break;
+-		case DUMP_TYPE_WPDMA_TXFREE:
+-			val = wpdma_txfree_r32(dev, cur->offset);
+-			break;
+-		case DUMP_TYPE_WPDMA_RX:
+-			val = wpdma_rx_r32(dev, cur->base, cur->offset);
+-			break;
++	while (*cur_o) {
++		cur = *cur_o;
++
++		while (cur->type != DUMP_TYPE_END) {
++			switch (cur->type) {
++			case DUMP_TYPE_STRING:
++				seq_printf(s, "%s======== %s:\n",
++					   newline ? "\n" : "",
++					   cur->name);
++				newline = true;
++				cur++;
++				continue;
++			case DUMP_TYPE_WED:
++			case DUMP_TYPE_WED_RRO:
++				val = wed_r32(dev, cur->offset);
++				break;
++			case DUMP_TYPE_WDMA:
++				val = wdma_r32(dev, cur->offset);
++				break;
++			case DUMP_TYPE_WPDMA_TX:
++				val = wpdma_tx_r32(dev, cur->base, cur->offset);
++				break;
++			case DUMP_TYPE_WPDMA_TXFREE:
++				val = wpdma_txfree_r32(dev, cur->offset);
++				break;
++			case DUMP_TYPE_WPDMA_RX:
++				val = wpdma_rx_r32(dev, cur->base, cur->offset);
++				break;
++			}
++
++			if (cur->mask)
++				val = (cur->mask & val) >> (ffs(cur->mask) - 1);
++
++			print_reg_val(s, cur->name, val);
++			cur++;
+ 		}
+-		print_reg_val(s, cur->name, val);
++		cur_o++;
+ 	}
+ }
+ 
+@@ -89,7 +108,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
+ static int
+ wed_txinfo_show(struct seq_file *s, void *data)
+ {
+-	static const struct reg_dump regs[] = {
++	static const struct reg_dump regs_common[] = {
+ 		DUMP_STR("WED TX"),
+ 		DUMP_WED(WED_TX_MIB(0)),
+ 		DUMP_WED_RING(WED_RING_TX(0)),
+@@ -128,16 +147,32 @@ wed_txinfo_show(struct seq_file *s, void *data)
+ 		DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ 		DUMP_WDMA_RING(WDMA_RING_RX(1)),
+ 
+-		DUMP_STR("TX FREE"),
++		DUMP_STR("WED TX FREE"),
+ 		DUMP_WED(WED_RX_MIB(0)),
++		DUMP_WED_RING(WED_RING_RX(0)),
++		DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
++
++		DUMP_WED(WED_RX_MIB(1)),
++		DUMP_WED_RING(WED_RING_RX(1)),
++		DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
++		DUMP_STR("WED_WPDMA TX FREE"),
++		DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
++		DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
++		DUMP_END(),
++	};
++
++	static const struct reg_dump *regs[] = {
++		&regs_common[0],
++		NULL,
+ 	};
++
+ 	struct mtk_wed_hw *hw = s->private;
+ 	struct mtk_wed_device *dev = hw->wed_dev;
+ 
+ 	if (!dev)
+ 		return 0;
+ 
+-	dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++	dump_wed_regs(s, dev, regs);
+ 
+ 	return 0;
+ }
+@@ -146,7 +181,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+ static int
+ wed_rxinfo_show(struct seq_file *s, void *data)
+ {
+-	static const struct reg_dump regs[] = {
++	static const struct reg_dump regs_common[] = {
+ 		DUMP_STR("WPDMA RX"),
+ 		DUMP_WPDMA_RX_RING(0),
+ 		DUMP_WPDMA_RX_RING(1),
+@@ -164,7 +199,7 @@ wed_rxinfo_show(struct seq_file *s, void *data)
+ 		DUMP_WED_RING(WED_RING_RX_DATA(0)),
+ 		DUMP_WED_RING(WED_RING_RX_DATA(1)),
+ 
+-		DUMP_STR("WED RRO"),
++		DUMP_STR("WED WO RRO"),
+ 		DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
+ 		DUMP_WED(WED_RROQM_MID_MIB),
+ 		DUMP_WED(WED_RROQM_MOD_MIB),
+@@ -175,16 +210,6 @@ wed_rxinfo_show(struct seq_file *s, void *data)
+ 		DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
+ 		DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
+ 
+-		DUMP_STR("WED Route QM"),
+-		DUMP_WED(WED_RTQM_R2H_MIB(0)),
+-		DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+-		DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+-		DUMP_WED(WED_RTQM_R2H_MIB(1)),
+-		DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+-		DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+-		DUMP_WED(WED_RTQM_Q2N_MIB),
+-		DUMP_WED(WED_RTQM_Q2B_MIB),
+-		DUMP_WED(WED_RTQM_PFDBK_MIB),
+ 
+ 		DUMP_STR("WED WDMA TX"),
+ 		DUMP_WED(WED_WDMA_TX_MIB),
+@@ -205,15 +230,99 @@ wed_rxinfo_show(struct seq_file *s, void *data)
+ 		DUMP_WED(WED_RX_BM_INTF2),
+ 		DUMP_WED(WED_RX_BM_INTF),
+ 		DUMP_WED(WED_RX_BM_ERR_STS),
++		DUMP_END()
++	};
++
++	static const struct reg_dump regs_v2[] = {
++		DUMP_STR("WED Route QM"),
++		DUMP_WED(WED_RTQM_R2H_MIB(0)),
++		DUMP_WED(WED_RTQM_R2Q_MIB(0)),
++		DUMP_WED(WED_RTQM_Q2H_MIB(0)),
++		DUMP_WED(WED_RTQM_R2H_MIB(1)),
++		DUMP_WED(WED_RTQM_R2Q_MIB(1)),
++		DUMP_WED(WED_RTQM_Q2H_MIB(1)),
++		DUMP_WED(WED_RTQM_Q2N_MIB),
++		DUMP_WED(WED_RTQM_Q2B_MIB),
++		DUMP_WED(WED_RTQM_PFDBK_MIB),
++
++		DUMP_END()
++	};
++
++	static const struct reg_dump regs_v3[] = {
++		DUMP_STR("WED RX RRO DATA"),
++		DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
++		DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
++
++		DUMP_STR("WED RX MSDU PAGE"),
++		DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
++		DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
++		DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
++
++		DUMP_STR("WED RX IND CMD"),
++		DUMP_WED(WED_IND_CMD_RX_CTRL1),
++		DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
++		DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
++		DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
++		DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
++		DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
++		DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
++			      WED_IND_CMD_PREFETCH_FREE_CNT),
++		DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
++
++		DUMP_STR("WED ADDR ELEM"),
++		DUMP_WED(WED_ADDR_ELEM_CFG0),
++		DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
++			      WED_ADDR_ELEM_PREFETCH_FREE_CNT),
++
++		DUMP_STR("WED Route QM"),
++		DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
++		DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
++		DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
++		DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
++		DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
++		DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
++
++		DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
++		DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
++		DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
++		DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
++		DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
++		DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
++
++		DUMP_END()
++	};
++
++	static const struct reg_dump *regs_new_v2[] = {
++		&regs_common[0],
++		&regs_v2[0],
++		NULL,
++	};
++
++	static const struct reg_dump *regs_new_v3[] = {
++		&regs_common[0],
++		&regs_v3[0],
++		NULL,
+ 	};
+ 
+ 	struct mtk_wed_hw *hw = s->private;
+ 	struct mtk_wed_device *dev = hw->wed_dev;
++	const struct reg_dump **regs;
+ 
+ 	if (!dev)
+ 		return 0;
+ 
+-	dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++	switch(dev->hw->version) {
++	case 2:
++		regs = regs_new_v2;
++		break;
++	case 3:
++		regs = regs_new_v3;
++		break;
++	default:
++		return 0;
++	}
++
++	dump_wed_regs(s, dev, regs);
+ 
+ 	return 0;
+ }
+@@ -248,6 +357,383 @@ mtk_wed_reg_get(void *data, u64 *val)
+ DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
+              "0x%08llx\n");
+ 
++static int
++wed_token_txd_show(struct seq_file *s, void *data)
++{
++	struct mtk_wed_hw *hw = s->private;
++	struct mtk_wed_device *dev = hw->wed_dev;
++	struct dma_page_info *page_list = dev->tx_buf_ring.pages;
++	int token = dev->wlan.token_start;
++	u32 val = hw->token_id, size = 1;
++	int page_idx = (val - token) / 2;
++	int i;
++
++	if (val < token) {
++		size = val;
++		page_idx = 0;
++	}
++
++	for (i = 0; i < size; i += MTK_WED_BUF_PER_PAGE) {
++		void *page = page_list[page_idx++].addr;
++		void *buf;
++		int j;
++
++		if (!page)
++			break;
++
++		buf = page_to_virt(page);
++
++		for (j = 0; j < MTK_WED_BUF_PER_PAGE; j++) {
++			printk("[TXD]:token id = %d\n", token + 2 * (page_idx - 1) + j);
++			print_hex_dump(KERN_ERR , "", DUMP_PREFIX_OFFSET, 16, 1, (u8 *)buf, 128, false);
++			seq_printf(s, "\n");
++
++			buf += MTK_WED_BUF_SIZE;
++		}
++	}
++
++	return 0;
++}
++
++DEFINE_SHOW_ATTRIBUTE(wed_token_txd);
++
++static int
++wed_pao_show(struct seq_file *s, void *data)
++{
++	static const struct reg_dump regs_common[] = {
++		DUMP_STR("PAO AMDSU INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_FIFO_DMAD),
++
++		DUMP_STR("PAO AMDSU ENG0 INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(0)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(0)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(0)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(0)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(0)),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
++			      WED_PAO_AMSDU_ENG_MAX_PL_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
++			      WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
++			      WED_PAO_AMSDU_ENG_CUR_ENTRY),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
++			      WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
++			      WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
++
++		DUMP_STR("PAO AMDSU ENG1 INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(1)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(1)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(1)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(1)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(1)),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
++			      WED_PAO_AMSDU_ENG_MAX_PL_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
++			      WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(1),
++			      WED_PAO_AMSDU_ENG_CUR_ENTRY),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
++			      WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
++			      WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
++
++		DUMP_STR("PAO AMDSU ENG2 INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(2)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(2)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(2)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(2)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(2)),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
++			      WED_PAO_AMSDU_ENG_MAX_PL_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
++			      WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
++			      WED_PAO_AMSDU_ENG_CUR_ENTRY),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
++			      WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
++			      WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
++
++		DUMP_STR("PAO AMDSU ENG3 INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(3)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(3)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(3)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(3)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(3)),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
++			      WED_PAO_AMSDU_ENG_MAX_PL_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
++			      WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
++			      WED_PAO_AMSDU_ENG_CUR_ENTRY),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
++			      WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
++			      WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
++
++		DUMP_STR("PAO AMDSU ENG4 INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(4)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(4)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(4)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(4)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(4)),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
++			      WED_PAO_AMSDU_ENG_MAX_PL_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
++			      WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
++			      WED_PAO_AMSDU_ENG_CUR_ENTRY),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
++			      WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
++			      WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
++
++		DUMP_STR("PAO AMDSU ENG5 INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(5)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(5)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(5)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(5)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(5)),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(5),
++			      WED_PAO_AMSDU_ENG_MAX_PL_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(5),
++			      WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
++			      WED_PAO_AMSDU_ENG_CUR_ENTRY),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
++			      WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(5),
++			      WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
++
++		DUMP_STR("PAO AMDSU ENG6 INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(6)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(6)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(6)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(6)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(6)),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(6),
++			      WED_PAO_AMSDU_ENG_MAX_PL_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(6),
++			      WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
++			      WED_PAO_AMSDU_ENG_CUR_ENTRY),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
++			      WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(6),
++			      WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
++
++		DUMP_STR("PAO AMDSU ENG7 INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(7)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(7)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(7)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(7)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(7)),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(7),
++			      WED_PAO_AMSDU_ENG_MAX_PL_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(7),
++			      WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(7),
++			      WED_PAO_AMSDU_ENG_CUR_ENTRY),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(7),
++			      WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),
++			      WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
++
++		DUMP_STR("PAO AMDSU ENG8 INFO"),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(8)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(8)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(8)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(8)),
++		DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(8)),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(8),
++			      WED_PAO_AMSDU_ENG_MAX_PL_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(8),
++			      WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
++			      WED_PAO_AMSDU_ENG_CUR_ENTRY),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
++			      WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
++		DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(8),
++			      WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
++
++		DUMP_STR("PAO QMEM INFO"),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(0), WED_PAO_QMEM_FQ_CNT),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(0), WED_PAO_QMEM_SP_QCNT),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(1), WED_PAO_QMEM_TID0_QCNT),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(1), WED_PAO_QMEM_TID1_QCNT),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(2), WED_PAO_QMEM_TID2_QCNT),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(2), WED_PAO_QMEM_TID3_QCNT),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(3), WED_PAO_QMEM_TID4_QCNT),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(3), WED_PAO_QMEM_TID5_QCNT),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(4), WED_PAO_QMEM_TID6_QCNT),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_CNT(4), WED_PAO_QMEM_TID7_QCNT),
++
++
++		DUMP_STR("PAO QMEM HEAD INFO"),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(0), WED_PAO_QMEM_FQ_HEAD),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(0), WED_PAO_QMEM_SP_QHEAD),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(1), WED_PAO_QMEM_TID0_QHEAD),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(1), WED_PAO_QMEM_TID1_QHEAD),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(2), WED_PAO_QMEM_TID2_QHEAD),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(2), WED_PAO_QMEM_TID3_QHEAD),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(3), WED_PAO_QMEM_TID4_QHEAD),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(3), WED_PAO_QMEM_TID5_QHEAD),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(4), WED_PAO_QMEM_TID6_QHEAD),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(4), WED_PAO_QMEM_TID7_QHEAD),
++
++		DUMP_STR("PAO QMEM TAIL INFO"),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(5), WED_PAO_QMEM_FQ_TAIL),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(5), WED_PAO_QMEM_SP_QTAIL),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(6), WED_PAO_QMEM_TID0_QTAIL),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(6), WED_PAO_QMEM_TID1_QTAIL),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(7), WED_PAO_QMEM_TID2_QTAIL),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(7), WED_PAO_QMEM_TID3_QTAIL),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(8), WED_PAO_QMEM_TID4_QTAIL),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(8), WED_PAO_QMEM_TID5_QTAIL),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(9), WED_PAO_QMEM_TID6_QTAIL),
++		DUMP_WED_MASK(WED_PAO_MON_QMEM_PTR(9), WED_PAO_QMEM_TID7_QTAIL),
++
++		DUMP_STR("PAO HIFTXD MSDU INFO"),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(1)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(2)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(3)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(4)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(5)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(6)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(7)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(8)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(9)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(10)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(11)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(12)),
++		DUMP_WED(WED_PAO_MON_HIFTXD_FETCH_MSDU(13)),
++		DUMP_END()
++	};
++
++	static const struct reg_dump *regs[] = {
++		&regs_common[0],
++		NULL,
++	};
++	struct mtk_wed_hw *hw = s->private;
++	struct mtk_wed_device *dev = hw->wed_dev;
++
++	if (!dev)
++		return 0;
++
++	dump_wed_regs(s, dev, regs);
++
++	return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_pao);
++
++static int
++wed_rtqm_show(struct seq_file *s, void *data)
++{
++	static const struct reg_dump regs_common[] = {
++		DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
++		DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
++		DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
++		DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
++		DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
++		DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
++		DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
++		DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
++
++
++		DUMP_STR("WED Route QM IGRS1(Legacy)"),
++		DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
++		DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
++		DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
++		DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
++		DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
++		DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
++		DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
++
++		DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
++		DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
++		DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
++		DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
++		DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
++		DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
++		DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
++		DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
++
++		DUMP_STR("WED Route QM IGRS3(DEBUG)"),
++		DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
++		DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
++		DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
++		DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
++		DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
++		DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
++		DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
++
++		DUMP_END()
++	};
++
++	static const struct reg_dump *regs[] = {
++		&regs_common[0],
++		NULL,
++	};
++	struct mtk_wed_hw *hw = s->private;
++	struct mtk_wed_device *dev = hw->wed_dev;
++
++	if (!dev)
++		return 0;
++
++	dump_wed_regs(s, dev, regs);
++
++	return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
++
++
++static int
++wed_rro_show(struct seq_file *s, void *data)
++{
++	static const struct reg_dump regs_common[] = {
++		DUMP_STR("RRO/IND CMD CNT"),
++		DUMP_WED(WED_RX_IND_CMD_CNT(1)),
++		DUMP_WED(WED_RX_IND_CMD_CNT(2)),
++		DUMP_WED(WED_RX_IND_CMD_CNT(3)),
++		DUMP_WED(WED_RX_IND_CMD_CNT(4)),
++		DUMP_WED(WED_RX_IND_CMD_CNT(5)),
++		DUMP_WED(WED_RX_IND_CMD_CNT(6)),
++		DUMP_WED(WED_RX_IND_CMD_CNT(7)),
++		DUMP_WED(WED_RX_IND_CMD_CNT(8)),
++		DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
++			      WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
++
++		DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
++		DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
++			      WED_ADDR_ELEM_SIG_FAIL_CNT),
++		DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
++		DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
++		DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
++		DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
++		DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
++		DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
++			      WED_PN_CHK_FAIL_CNT),
++
++		DUMP_END()
++	};
++
++	static const struct reg_dump *regs[] = {
++		&regs_common[0],
++		NULL,
++	};
++	struct mtk_wed_hw *hw = s->private;
++	struct mtk_wed_device *dev = hw->wed_dev;
++
++	if (!dev)
++		return 0;
++
++	dump_wed_regs(s, dev, regs);
++
++	return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_rro);
++
+ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+ {
+ 	struct dentry *dir;
+@@ -261,8 +747,18 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+ 	debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ 	debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ 	debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+-	debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
+-	if (hw->ver != MTK_WED_V1) {
++	debugfs_create_u32("token_id", 0600, dir, &hw->token_id);
++	debugfs_create_file_unsafe("token_txd", 0600, dir, hw, &wed_token_txd_fops);
++
++	if (hw->version == 3)
++		debugfs_create_file_unsafe("pao", 0400, dir, hw, &wed_pao_fops);
++
++	if (hw->version != 1) {
++		debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops);
++		if (hw->version == 3) {
++			debugfs_create_file_unsafe("rtqm", 0400, dir, hw, &wed_rtqm_fops);
++			debugfs_create_file_unsafe("rro", 0400, dir, hw, &wed_rro_fops);
++		}
+ 		wed_wo_mcu_debugfs(hw, dir);
+ 	}
+ }
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+index 96e30a3..055594d 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -242,7 +242,7 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
+ 	u32 ofs = 0;
+ 	u32 boot_cr, val;
+ 
+-	mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 : MT7986_FIRMWARE_WO_1;
++	mcu = wo->hw->index ? MTK_FIRMWARE_WO_1 : MTK_FIRMWARE_WO_0;
+ 
+ 	ret = request_firmware(&fw, mcu, wo->hw->dev);
+ 	if (ret)
+@@ -289,8 +289,12 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
+ 	}
+ 
+ 	/* write the start address */
+-	boot_cr = wo->hw->index ?
+-		WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
++	if (wo->hw->version == 3)
++		boot_cr = WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
++	else
++		boot_cr = wo->hw->index ?
++			WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
++
+ 	wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16));
+ 
+ 	/* wo firmware reset */
+@@ -298,8 +302,7 @@ mtk_wed_load_firmware(struct mtk_wed_wo *wo)
+ 
+ 	val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
+ 
+-	val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK :
+-		WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
++	val |= WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK;
+ 
+ 	wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
+index 19e1199..c07bdb6 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h
+@@ -16,8 +16,9 @@
+ #define WARP_OK_STATUS (0)
+ #define WARP_ALREADY_DONE_STATUS (1)
+ 
+-#define MT7986_FIRMWARE_WO_1		"mediatek/mt7986_wo_0.bin"
+-#define MT7986_FIRMWARE_WO_2		"mediatek/mt7986_wo_1.bin"
++#define MTK_FIRMWARE_WO_0		"mediatek/mtk_wo_0.bin"
++#define MTK_FIRMWARE_WO_1		"mediatek/mtk_wo_1.bin"
++#define MTK_FIRMWARE_WO_2		"mediatek/mtk_wo_2.bin"
+ 
+ #define WOCPU_EMI_DEV_NODE		"mediatek,wocpu_emi"
+ #define WOCPU_ILM_DEV_NODE		"mediatek,wocpu_ilm"
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+index 403a36b..4e619ff 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -20,6 +20,9 @@
+ #define MTK_WDMA_DESC_CTRL_DMA_DONE		BIT(31)
+ #define MTK_WED_RX_BM_TOKEN			GENMASK(31, 16)
+ 
++#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE	BIT(29)
++#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE	BIT(31)
++
+ struct mtk_wdma_desc {
+ 	__le32 buf0;
+ 	__le32 ctrl;
+@@ -51,6 +54,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RESET_WDMA_INT_AGENT			BIT(19)
+ #define MTK_WED_RESET_RX_RRO_QM				BIT(20)
+ #define MTK_WED_RESET_RX_ROUTE_QM			BIT(21)
++#define MTK_WED_RESET_TX_PAO				BIT(22)
+ #define MTK_WED_RESET_WED				BIT(31)
+ 
+ #define MTK_WED_CTRL					0x00c
+@@ -58,6 +62,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY		BIT(1)
+ #define MTK_WED_CTRL_WDMA_INT_AGENT_EN			BIT(2)
+ #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY		BIT(3)
++#define MTK_WED_CTRL_WED_RX_IND_CMD_EN			BIT(5)
++#define MTK_WED_CTRL_WED_RX_PG_BM_EN			BIT(6)
++#define MTK_WED_CTRL_WED_RX_PG_BM_BUSU			BIT(7)
+ #define MTK_WED_CTRL_WED_TX_BM_EN			BIT(8)
+ #define MTK_WED_CTRL_WED_TX_BM_BUSY			BIT(9)
+ #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN		BIT(10)
+@@ -68,9 +75,14 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_RX_RRO_QM_BUSY			BIT(15)
+ #define MTK_WED_CTRL_RX_ROUTE_QM_EN			BIT(16)
+ #define MTK_WED_CTRL_RX_ROUTE_QM_BUSY			BIT(17)
++#define MTK_WED_CTRL_TX_TKID_ALI_EN			BIT(20)
++#define MTK_WED_CTRL_TX_TKID_ALI_BUSY			BIT(21)
++#define MTK_WED_CTRL_TX_PAO_EN				BIT(22)
++#define MTK_WED_CTRL_TX_PAO_BUSY			BIT(23)
+ #define MTK_WED_CTRL_FINAL_DIDX_READ			BIT(24)
+ #define MTK_WED_CTRL_ETH_DMAD_FMT			BIT(25)
+ #define MTK_WED_CTRL_MIB_READ_CLEAR			BIT(28)
++#define MTK_WED_CTRL_FLD_MIB_RD_CLR			BIT(28)
+ 
+ #define MTK_WED_EXT_INT_STATUS				0x020
+ #define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR		BIT(0)
+@@ -78,12 +90,10 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID	BIT(4)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH		BIT(8)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH		BIT(9)
+-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+-#define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH		BIT(10)
+-#define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH		BIT(11)
+-#endif
+-#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY		BIT(12)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER		BIT(13)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2		BIT(10)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2		BIT(11)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH		BIT(12)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH		BIT(13)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR	BIT(16)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR	BIT(17)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT		BIT(18)
+@@ -100,17 +110,15 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_ERROR_MASK		(MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
+ 							 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
+ 							 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
+-							 MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \
+-							 MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \
+ 							 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
+ 							 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
+ 							 MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
+-							 MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR | \
+-							 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR)
++							 MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
+ 
+ #define MTK_WED_EXT_INT_MASK				0x028
+ #define MTK_WED_EXT_INT_MASK1				0x02c
+ #define MTK_WED_EXT_INT_MASK2				0x030
++#define MTK_WED_EXT_INT_MASK3				0x034
+ 
+ #define MTK_WED_STATUS					0x060
+ #define MTK_WED_STATUS_TX				GENMASK(15, 8)
+@@ -118,9 +126,14 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_BM_CTRL				0x080
+ #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM			GENMASK(6, 0)
+ #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM			GENMASK(22, 16)
++#define MTK_WED_TX_BM_CTRL_LEGACY_EN			BIT(26)
++#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT		BIT(27)
+ #define MTK_WED_TX_BM_CTRL_PAUSE			BIT(28)
+ 
+ #define MTK_WED_TX_BM_BASE				0x084
++#define MTK_WED_TX_BM_INIT_PTR				0x088
++#define MTK_WED_TX_BM_SW_TAIL_IDX			GENMASK(16, 0)
++#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX			BIT(16)
+ 
+ #define MTK_WED_TX_BM_BUF_LEN				0x08c
+ 
+@@ -134,22 +147,24 @@ struct mtk_wdma_desc {
+ #if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ #define MTK_WED_TX_BM_DYN_THR_LO			GENMASK(8, 0)
+ #define MTK_WED_TX_BM_DYN_THR_HI			GENMASK(24, 16)
+-
+-#define MTK_WED_TX_BM_TKID				0x0c8
+-#define MTK_WED_TX_BM_TKID_START			GENMASK(15, 0)
+-#define MTK_WED_TX_BM_TKID_END				GENMASK(31, 16)
+ #else
+ #define MTK_WED_TX_BM_DYN_THR_LO			GENMASK(6, 0)
+ #define MTK_WED_TX_BM_DYN_THR_HI			GENMASK(22, 16)
++#endif
+ 
+-#define MTK_WED_TX_BM_TKID				0x088
++#define MTK_WED_TX_BM_TKID				0x0c8
+ #define MTK_WED_TX_BM_TKID_START			GENMASK(15, 0)
+ #define MTK_WED_TX_BM_TKID_END				GENMASK(31, 16)
+-#endif
+ 
+ #define MTK_WED_TX_TKID_CTRL				0x0c0
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM		GENMASK(7, 0)
++#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM		GENMASK(23, 16)
++#else
+ #define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM		GENMASK(6, 0)
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM		GENMASK(22, 16)
++#endif
++
+ #define MTK_WED_TX_TKID_CTRL_PAUSE			BIT(28)
+ 
+ #define MTK_WED_TX_TKID_DYN_THR				0x0e0
+@@ -220,12 +235,15 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC	BIT(5)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC	BIT(6)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC	BIT(7)
+-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER	GENMASK(18, 16)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER	GENMASK(15, 12)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4	BIT(18)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT	BIT(19)
+-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK	BIT(20)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR		BIT(21)
+ #define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP		BIT(24)
++#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST		BIT(25)
+ #define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV		BIT(28)
++#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK		BIT(30)
+ 
+ /* CONFIG_MEDIATEK_NETSYS_V1 */
+ #define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE		GENMASK(5, 4)
+@@ -288,9 +306,11 @@ struct mtk_wdma_desc {
+ #define MTK_WED_PCIE_INT_TRIGGER_STATUS			BIT(16)
+ 
+ #define MTK_WED_PCIE_INT_CTRL				0x57c
+-#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA		BIT(20)
+-#define MTK_WED_PCIE_INT_CTRL_SRC_SEL			GENMASK(17, 16)
+ #define MTK_WED_PCIE_INT_CTRL_POLL_EN 			GENMASK(13, 12)
++#define MTK_WED_PCIE_INT_CTRL_SRC_SEL			GENMASK(17, 16)
++#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA		BIT(20)
++#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER		BIT(21)
++
+ #define MTK_WED_WPDMA_CFG_BASE				0x580
+ #define MTK_WED_WPDMA_CFG_INT_MASK			0x584
+ #define MTK_WED_WPDMA_CFG_TX				0x588
+@@ -319,20 +339,50 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX			GENMASK(25, 24)
+ 
+ #define MTK_WED_WPDMA_RX_GLO_CFG			0x76c
+-#define MTK_WED_WPDMA_RX_RING				0x770
++#if defined(CONFIG_MEDIATEK_NETSYS_V2)
++#define MTK_WED_WPDMA_RX_RING0				0x770
++#else
++#define MTK_WED_WPDMA_RX_RING0				0x7d0
++#endif
++#define MTK_WED_WPDMA_RX_RING1				0x7d8
+ 
+ #define MTK_WED_WPDMA_RX_D_MIB(_n)			(0x774 + (_n) * 4)
+ #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n)		(0x784 + (_n) * 4)
+ #define MTK_WED_WPDMA_RX_D_COHERENT_MIB			0x78c
+ 
++#define MTK_WED_WPDMA_RX_D_PREF_CFG			0x7b4
++#define MTK_WED_WPDMA_RX_D_PREF_EN			BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE		GENMASK(12, 8)
++#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES		GENMASK(21, 16)
++
++#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX		0x7b8
++#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR		BIT(15)
++
++#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX		0x7bc
++
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG		0x7c0
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR		BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR		BIT(16)
++
+ #define MTK_WED_WDMA_RING_TX				0x800
+ 
+ #define MTK_WED_WDMA_TX_MIB				0x810
+ 
+-
+ #define MTK_WED_WDMA_RING_RX(_n)			(0x900 + (_n) * 0x10)
+ #define MTK_WED_WDMA_RX_THRES(_n)			(0x940 + (_n) * 0x4)
+ 
++#define MTK_WED_WDMA_RX_PREF_CFG			0x950
++#define MTK_WED_WDMA_RX_PREF_EN				BIT(0)
++#define MTK_WED_WDMA_RX_PREF_BURST_SIZE			GENMASK(12, 8)
++#define MTK_WED_WDMA_RX_PREF_LOW_THRES			GENMASK(21, 16)
++#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR		BIT(24)
++#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR		BIT(25)
++#define MTK_WED_WDMA_RX_PREF_DDONE2_EN			BIT(26)
++
++#define MTK_WED_WDMA_RX_PREF_FIFO_CFG			0x95C
++#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR		BIT(0)
++#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR		BIT(16)
++
+ #define MTK_WED_WDMA_GLO_CFG				0xa04
+ #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN			BIT(0)
+ #define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK		BIT(1)
+@@ -365,6 +415,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE		GENMASK(17, 16)
+ 
+ #define MTK_WED_WDMA_INT_CTRL				0xa2c
++#define MTK_WED_WDMA_INT_POLL_PRD			GENMASK(7, 0)
+ #define MTK_WED_WDMA_INT_POLL_SRC_SEL			GENMASK(17, 16)
+ 
+ #define MTK_WED_WDMA_CFG_BASE				0xaa0
+@@ -426,6 +477,18 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_INT_GRP1				0x250
+ #define MTK_WDMA_INT_GRP2				0x254
+ 
++#define MTK_WDMA_PREF_TX_CFG				0x2d0
++#define MTK_WDMA_PREF_TX_CFG_PREF_EN			BIT(0)
++
++#define MTK_WDMA_PREF_RX_CFG				0x2dc
++#define MTK_WDMA_PREF_RX_CFG_PREF_EN			BIT(0)
++
++#define MTK_WDMA_WRBK_TX_CFG				0x300
++#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN			BIT(30)
++
++#define MTK_WDMA_WRBK_RX_CFG				0x344
++#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN			BIT(30)
++
+ #define MTK_PCIE_MIRROR_MAP(n)				((n) ? 0x4 : 0x0)
+ #define MTK_PCIE_MIRROR_MAP_EN				BIT(0)
+ #define MTK_PCIE_MIRROR_MAP_WED_ID			BIT(1)
+@@ -439,6 +502,31 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q_DBG_BYPASS			BIT(5)
+ #define MTK_WED_RTQM_TXDMAD_FPORT			GENMASK(23, 20)
+ 
++#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT		0xb1c
++#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n)		(0xb20 + (_n) * 0x4)
++#define	MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT			0xb28
++#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n)		(0xb2c + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS0_FDROP_CNT			0xb34
++
++
++#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT		0xb44
++#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n)		(0xb48 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT			0xb50
++#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n)		(0xb54+ (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS1_FDROP_CNT			0xb5c
++
++#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT		0xb6c
++#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n)		(0xb70 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT			0xb78
++#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n)		(0xb7c+ (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS2_FDROP_CNT			0xb84
++
++#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT		0xb94
++#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n)		(0xb98 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT			0xba0
++#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n)		(0xba4+ (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS3_FDROP_CNT			0xbac
++
+ #define MTK_WED_RTQM_R2H_MIB(_n)			(0xb70 + (_n) * 0x4)
+ #define MTK_WED_RTQM_R2Q_MIB(_n)			(0xb78 + (_n) * 0x4)
+ #define MTK_WED_RTQM_Q2N_MIB				0xb80
+@@ -447,6 +535,24 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q2B_MIB				0xb8c
+ #define MTK_WED_RTQM_PFDBK_MIB				0xb90
+ 
++#define MTK_WED_RTQM_ENQ_CFG0				0xbb8
++#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT		GENMASK(15, 12)
++
++#define MTK_WED_RTQM_FDROP_MIB				0xb84
++#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT			0xbbc
++#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT			0xbc0
++#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT			0xbc4
++#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT			0xbc8
++#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT			0xbcc
++#define MTK_WED_RTQM_ENQ_ERR_CNT			0xbd0
++
++#define MTK_WED_RTQM_DEQ_DMAD_CNT			0xbd8
++#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT			0xbdc
++#define MTK_WED_RTQM_DEQ_PKT_CNT			0xbe0
++#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT			0xbe4
++#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT			0xbe8
++#define MTK_WED_RTQM_DEQ_ERR_CNT			0xbec
++
+ #define MTK_WED_RROQM_GLO_CFG				0xc04
+ #define MTK_WED_RROQM_RST_IDX				0xc08
+ #define MTK_WED_RROQM_RST_IDX_MIOD 			BIT(0)
+@@ -487,8 +593,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RX_BM_BASE				0xd84
+ #define MTK_WED_RX_BM_INIT_PTR				0xd88
+ #define MTK_WED_RX_BM_PTR	      			0xd8c
+-#define MTK_WED_RX_BM_PTR_HEAD				GENMASK(32, 16)
+ #define MTK_WED_RX_BM_PTR_TAIL				GENMASK(15, 0)
++#define MTK_WED_RX_BM_PTR_HEAD				GENMASK(32, 16)
+ 
+ #define MTK_WED_RX_BM_BLEN	      			0xd90
+ #define MTK_WED_RX_BM_STS				0xd94
+@@ -496,7 +602,193 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RX_BM_INTF				0xd9c
+ #define MTK_WED_RX_BM_ERR_STS				0xda8
+ 
++#define MTK_RRO_IND_CMD_SIGNATURE			0xe00
++#define MTK_RRO_IND_CMD_DMA_IDX				GENMASK(11, 0)
++#define MTK_RRO_IND_CMD_MAGIC_CNT			GENMASK(30, 28)
++
++#define MTK_WED_IND_CMD_RX_CTRL0			0xe04
++#define MTK_WED_IND_CMD_PROC_IDX			GENMASK(11, 0)
++#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT		GENMASK(19, 16)
++#define MTK_WED_IND_CMD_MAGIC_CNT			GENMASK(30, 28)
++
++#define MTK_WED_IND_CMD_RX_CTRL1			0xe08
++#define MTK_WED_IND_CMD_RX_CTRL2			0xe0c
++#define MTK_WED_IND_CMD_MAX_CNT				GENMASK(11, 0)
++#define MTK_WED_IND_CMD_BASE_M				GENMASK(19, 16)
++
++#define MTK_WED_RRO_CFG0				0xe10
++#define MTK_WED_RRO_CFG1				0xe14
++#define MTK_WED_RRO_CFG1_MAX_WIN_SZ			GENMASK(31, 29)
++#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M			GENMASK(19, 16)
++#define MTK_WED_RRO_CFG1_PARTICL_SE_ID			GENMASK(11, 0)
++
++#define MTK_WED_ADDR_ELEM_CFG0				0xe18
++#define MTK_WED_ADDR_ELEM_CFG1				0xe1c
++#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT		GENMASK(19, 16)
++
++#define MTK_WED_ADDR_ELEM_TBL_CFG 			0xe20
++#define MTK_WED_ADDR_ELEM_TBL_OFFSET			GENMASK(6, 0)
++#define MTK_WED_ADDR_ELEM_TBL_RD_RDY			BIT(28)
++#define MTK_WED_ADDR_ELEM_TBL_WR_RDY			BIT(29)
++#define MTK_WED_ADDR_ELEM_TBL_RD			BIT(30)
++#define MTK_WED_ADDR_ELEM_TBL_WR			BIT(31)
++
++#define MTK_WED_RADDR_ELEM_TBL_WDATA 			0xe24
++#define MTK_WED_RADDR_ELEM_TBL_RDATA 			0xe28
++
++#define MTK_WED_PN_CHECK_CFG 				0xe30
++#define MTK_WED_PN_CHECK_SE_ID				GENMASK(11, 0)
++#define MTK_WED_PN_CHECK_RD_RDY				BIT(28)
++#define MTK_WED_PN_CHECK_WR_RDY				BIT(29)
++#define MTK_WED_PN_CHECK_RD				BIT(30)
++#define MTK_WED_PN_CHECK_WR				BIT(31)
++
++#define MTK_WED_PN_CHECK_WDATA_M 			0xe38
++#define MTK_WED_PN_CHECK_IS_FIRST			BIT(17)
++
++#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n)		(0xe44 + (_n) * 0x8)
++
++#define MTK_WED_RRO_MSDU_PG_RING2_CFG			0xe58
++#define MTK_WED_RRO_MSDU_PG_DRV_CLR			BIT(26)
++#define MTK_WED_RRO_MSDU_PG_DRV_EN			BIT(31)
++
++#define MTK_WED_RRO_MSDU_PG_CTRL0(_n)			(0xe5c + (_n) * 0xc)
++#define MTK_WED_RRO_MSDU_PG_CTRL1(_n)			(0xe60 + (_n) * 0xc)
++#define MTK_WED_RRO_MSDU_PG_CTRL2(_n)			(0xe64 + (_n) * 0xc)
++
++#define MTK_WED_RRO_RX_D_RX(_n)				(0xe80 + (_n) * 0x10)
++
++#define MTK_WED_RRO_RX_MAGIC_CNT			BIT(13)
++
++#define MTK_WED_RRO_RX_D_CFG(_n)			(0xea0 + (_n) * 0x4)
++#define MTK_WED_RRO_RX_D_DRV_CLR			BIT(26)
++#define MTK_WED_RRO_RX_D_DRV_EN				BIT(31)
++
++#define MTK_WED_RRO_PG_BM_RX_DMAM			0xeb0
++#define MTK_WED_RRO_PG_BM_RX_SDL0			GENMASK(13, 0)
++
++#define MTK_WED_RRO_PG_BM_BASE				0xeb4
++#define MTK_WED_RRO_PG_BM_INIT_PTR			0xeb8
++#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX			GENMASK(15, 0)
++#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX		BIT(16)
++
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX			0xeec
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN		BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR		BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG	GENMASK(6, 2)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN		BIT(8)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR		BIT(9)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG	GENMASK(14, 10)
++
++#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG		0xef4
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN		BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR		BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG	GENMASK(6, 2)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN		BIT(8)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR		BIT(9)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG	GENMASK(14, 10)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN		BIT(16)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR		BIT(17)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG	GENMASK(22, 18)
++
++#define MTK_WED_RX_IND_CMD_CNT0				0xf20
++#define MTK_WED_RX_IND_CMD_DBG_CNT_EN			BIT(31)
++
++#define MTK_WED_RX_IND_CMD_CNT(_n)			(0xf20 + (_n) * 0x4)
++#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT		GENMASK(15, 0)
++
++#define MTK_WED_RX_ADDR_ELEM_CNT(_n)			(0xf48 + (_n) * 0x4)
++#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT			GENMASK(15, 0)
++#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT		GENMASK(31, 16)
++#define MTK_WED_ADDR_ELEM_ACKSN_CNT			GENMASK(27, 0)
++
++#define MTK_WED_RX_MSDU_PG_CNT(_n)			(0xf5c + (_n) * 0x4)
++
++#define MTK_WED_RX_PN_CHK_CNT 				0xf70
++#define MTK_WED_PN_CHK_FAIL_CNT				GENMASK(15, 0)
++
+ #define MTK_WED_WOCPU_VIEW_MIOD_BASE		 	0x8000
+ #define MTK_WED_PCIE_INT_MASK				0x0
+ 
++#define MTK_WED_PAO_AMSDU_FIFO				0x1800
++#define MTK_WED_PAO_AMSDU_IS_PRIOR0_RING		BIT(10)
++
++#define MTK_WED_PAO_STA_INFO				0x01810
++#define MTK_WED_PAO_STA_INFO_DO_INIT			BIT(0)
++#define MTK_WED_PAO_STA_INFO_SET_INIT			BIT(1)
++
++#define MTK_WED_PAO_STA_INFO_INIT			0x01814
++#define MTK_WED_PAO_STA_WTBL_HDRT_MODE			BIT(0)
++#define MTK_WED_PAO_STA_RMVL				BIT(1)
++#define MTK_WED_PAO_STA_MAX_AMSDU_LEN			GENMASK(7, 2)
++#define MTK_WED_PAO_STA_MAX_AMSDU_NUM			GENMASK(11, 8)
++
++#define MTK_WED_PAO_HIFTXD_BASE_L(_n)			(0x1980 + (_n) * 0x4)
++
++#define MTK_WED_PAO_PSE					0x1910
++#define MTK_WED_PAO_PSE_RESET				BIT(16)
++
++#define MTK_WED_PAO_HIFTXD_CFG				0x1968
++#define MTK_WED_PAO_HIFTXD_SRC				GENMASK(16, 15)
++
++#define MTK_WED_PAO_MON_AMSDU_FIFO_DMAD			0x1a34
++
++#define MTK_WED_PAO_MON_AMSDU_ENG_DMAD(_n)		(0x1a80 + (_n) * 0x50)
++#define MTK_WED_PAO_MON_AMSDU_ENG_QFPL(_n)		(0x1a84 + (_n) * 0x50)
++#define MTK_WED_PAO_MON_AMSDU_ENG_QENI(_n)		(0x1a88 + (_n) * 0x50)
++#define MTK_WED_PAO_MON_AMSDU_ENG_QENO(_n)		(0x1a8c + (_n) * 0x50)
++#define MTK_WED_PAO_MON_AMSDU_ENG_MERG(_n)		(0x1a90 + (_n) * 0x50)
++
++#define MTK_WED_PAO_MON_AMSDU_ENG_CNT8(_n)		(0x1a94 + (_n) * 0x50)
++#define MTK_WED_PAO_AMSDU_ENG_MAX_QGPP_CNT		GENMASK(10, 0)
++#define MTK_WED_PAO_AMSDU_ENG_MAX_PL_CNT		GENMASK(27, 16)
++
++#define MTK_WED_PAO_MON_AMSDU_ENG_CNT9(_n)		(0x1a98 + (_n) * 0x50)
++#define MTK_WED_PAO_AMSDU_ENG_CUR_ENTRY			GENMASK(10, 0)
++#define MTK_WED_PAO_AMSDU_ENG_MAX_BUF_MERGED		GENMASK(20, 16)
++#define MTK_WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED		GENMASK(28, 24)
++
++#define MTK_WED_PAO_MON_QMEM_STS1			0x1e04
++
++#define MTK_WED_PAO_MON_QMEM_CNT(_n)			(0x1e0c + (_n) * 0x4)
++#define MTK_WED_PAO_QMEM_FQ_CNT				GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_SP_QCNT			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID0_QCNT			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID1_QCNT			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID2_QCNT			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID3_QCNT			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID4_QCNT			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID5_QCNT			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID6_QCNT			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID7_QCNT			GENMASK(11, 0)
++
++#define MTK_WED_PAO_MON_QMEM_PTR(_n)			(0x1e20 + (_n) * 0x4)
++#define MTK_WED_PAO_QMEM_FQ_HEAD				GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_SP_QHEAD			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID0_QHEAD			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID1_QHEAD			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID2_QHEAD			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID3_QHEAD			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID4_QHEAD			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID5_QHEAD			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID6_QHEAD			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID7_QHEAD			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_FQ_TAIL			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_SP_QTAIL			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID0_QTAIL			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID1_QTAIL			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID2_QTAIL			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID3_QTAIL			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID4_QTAIL			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID5_QTAIL			GENMASK(11, 0)
++#define MTK_WED_PAO_QMEM_TID6_QTAIL			GENMASK(27, 16)
++#define MTK_WED_PAO_QMEM_TID7_QTAIL			GENMASK(11, 0)
++
++#define MTK_WED_PAO_MON_HIFTXD_FETCH_MSDU(_n)		(0x1ec4 + (_n) * 0x4)
++
++#define MTK_WED_PCIE_BASE			0x11280000
++
++#define MTK_WED_PCIE_BASE0			0x11300000
++#define MTK_WED_PCIE_BASE1			0x11310000
++#define MTK_WED_PCIE_BASE2			0x11290000
+ #endif
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 58b5ce6..5e51790 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -873,6 +873,13 @@ struct net_device_path {
+ 			u8 queue;
+ 			u16 wcid;
+ 			u8 bss;
++			u32 usr_info;
++			u8 tid;
++			u8 is_fixedrate;
++			u8 is_prior;
++			u8 is_sp;
++			u8 hf;
++			u8 amsdu_en;
+ 		} mtk_wdma;
+ 	};
+ };
+diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
+index 27cf284..60336e0 100644
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -5,11 +5,14 @@
+ #include <linux/rcupdate.h>
+ #include <linux/regmap.h>
+ #include <linux/pci.h>
++#include <linux/skbuff.h>
++#include <linux/iopoll.h>
+ 
+ #define WED_WO_STA_REC			0x6
+ 
+ #define MTK_WED_TX_QUEUES		2
+ #define MTK_WED_RX_QUEUES		2
++#define MTK_WED_RX_PAGE_QUEUES         3
+ 
+ enum mtk_wed_wo_cmd {
+ 	MTK_WED_WO_CMD_WED_CFG,
+@@ -55,10 +58,13 @@ enum mtk_wed_bus_tye {
+ struct mtk_wed_hw;
+ struct mtk_wdma_desc;
+ 
++#define MTK_WED_RING_CONFIGURED		BIT(0)
++
+ struct mtk_wed_ring {
+ 	struct mtk_wdma_desc *desc;
+ 	dma_addr_t desc_phys;
+ 	int size;
++	u32 flags;
+ 
+ 	u32 reg_base;
+ 	void __iomem *wpdma;
+@@ -69,11 +75,18 @@ struct mtk_rxbm_desc {
+ 	__le32 token;
+ } __packed __aligned(4);
+ 
++struct dma_page_info {
++	void *addr;
++	dma_addr_t addr_phys;
++};
++
+ struct dma_buf {
+ 	int size;
+-	void **pages;
+-	struct mtk_wdma_desc *desc;
++	int pkt_nums;
++	void *desc;
++	int desc_size;
+ 	dma_addr_t desc_phys;
++	struct dma_page_info *pages;
+ };
+ 
+ struct dma_entry {
+@@ -97,6 +110,7 @@ struct mtk_wed_device {
+ 	struct device *dev;
+ 	struct mtk_wed_hw *hw;
+ 	bool init_done, running;
++	bool wdma_init_done;
+ 	int wdma_idx;
+ 	int irq;
+ 	u8 ver;
+@@ -108,7 +122,11 @@ struct mtk_wed_device {
+ 	struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
+ 	struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
+ 
+-	struct dma_buf buf_ring;
++	struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
++	struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
++	struct mtk_wed_ring ind_cmd_ring;
++
++	struct dma_buf tx_buf_ring;
+ 
+ 	struct {
+ 		int size;
+@@ -117,6 +135,8 @@ struct mtk_wed_device {
+ 		dma_addr_t desc_phys;
+ 	} rx_buf_ring;
+ 
++	struct dma_buf rx_page_buf_ring;
++
+ 	struct {
+ 		struct mtk_wed_ring rro_ring;
+ 		void __iomem *rro_desc;
+@@ -131,8 +151,9 @@ struct mtk_wed_device {
+ 			struct platform_device *platform_dev;
+ 			struct pci_dev *pci_dev;
+ 		};
++		enum mtk_wed_bus_tye bus_type;
+ 		void __iomem *base;
+-		u32 bus_type;
++		void __iomem *regs;
+ 		u32 phy_base;
+ 
+ 		u32 wpdma_phys;
+@@ -142,9 +163,13 @@ struct mtk_wed_device {
+ 		u32 wpdma_txfree;
+ 		u32 wpdma_rx_glo;
+ 		u32 wpdma_rx;
++		u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
++		u32 wpdma_rx_pg;
+ 
+ 		u8 tx_tbit[MTK_WED_TX_QUEUES];
+ 		u8 rx_tbit[MTK_WED_RX_QUEUES];
++		u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
++		u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
+ 		u8 txfree_tbit;
+ 
+ 		u16 token_start;
+@@ -154,12 +179,26 @@ struct mtk_wed_device {
+ 		unsigned int rx_size;
+ 
+ 		bool wcid_512;
+-
++		bool hwrro;
++		bool msi;
++
++		u8 max_amsdu_nums;
++		u32 max_amsdu_len;
++
++		struct {
++			u8 se_group_nums;
++			u16 win_size;
++			u16 particular_sid;
++			u32 ack_sn_addr;
++			dma_addr_t particular_se_phys;
++			dma_addr_t addr_elem_phys[1024];
++		} ind_cmd;
++
++		u32 chip_id;
+ 		u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ 		int (*offload_enable)(struct mtk_wed_device *wed);
+ 		void (*offload_disable)(struct mtk_wed_device *wed);
+-		u32 (*init_rx_buf)(struct mtk_wed_device *wed,
+-				   int pkt_num);
++		u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size);
+ 		void (*release_rx_buf)(struct mtk_wed_device *wed);
+ 		void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
+ 					   struct mtk_wed_wo_rx_stats *stats);
+@@ -180,6 +219,11 @@ struct mtk_wed_ops {
+ 				 void __iomem *regs);
+ 	int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ 			     void __iomem *regs, bool reset);
++	int (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
++			     void __iomem *regs);
++	int (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
++			     void __iomem *regs);
++	int (*ind_rx_ring_setup)(struct mtk_wed_device *dev, void __iomem *regs);
+ 	int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
+ 			  void *data, int len);
+ 	void (*detach)(struct mtk_wed_device *dev);
+@@ -196,6 +240,7 @@ struct mtk_wed_ops {
+ 	void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+ 	void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
+ 			  u32 reason, u32 hash);
++	void (*start_hwrro)(struct mtk_wed_device *dev, u32 irq_mask);
+ };
+ 
+ extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+@@ -224,12 +269,21 @@ static inline bool
+ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+ {
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
++	if (dev->ver == 3 && !dev->wlan.hwrro)
++		return false;
++
+ 	return dev->ver != 1;
+ #else
+ 	return false;
+ #endif
+ }
+ 
++static inline bool
++mtk_wed_device_support_pao(struct mtk_wed_device *dev)
++{
++	return dev->ver == 3;
++}
++
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ #define mtk_wed_device_active(_dev) !!(_dev)->ops
+ #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
+@@ -243,6 +297,12 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+ 	(_dev)->ops->txfree_ring_setup(_dev, _regs)
+ #define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
+ 	(_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
++#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
++	(_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
++#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
++	(_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
++#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
++	(_dev)->ops->ind_rx_ring_setup(_dev, _regs)
+ #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
+ 	(_dev)->ops->msg_update(_dev, _id, _msg, _len)
+ #define mtk_wed_device_reg_read(_dev, _reg) \
+@@ -257,6 +317,9 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+ 	(_dev)->ops->reset_dma(_dev)
+ #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
+ 	(_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
++#define mtk_wed_device_start_hwrro(_dev, _mask) \
++	(_dev)->ops->start_hwrro(_dev, _mask)
++
+ #else
+ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+ {
+@@ -268,6 +331,9 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+ #define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
+ #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
+ #define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
++#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
++#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs)  -ENODEV
++#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs)  -ENODEV
+ #define mtk_wed_device_reg_read(_dev, _reg) 0
+ #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
+ #define mtk_wed_device_irq_get(_dev, _mask) 0
+@@ -275,6 +341,7 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+ #define mtk_wed_device_dma_reset(_dev) do {} while (0)
+ #define mtk_wed_device_setup_tc(_dev, _ndev, _type, _data) do {} while (0)
+ #define mtk_wed_device_ppe_check(_dev, _hash)  do {} while (0)
++#define mtk_wed_device_start_hwrro(_dev, _mask) do {} while (0)
+ #endif
+ 
+ #endif
+-- 
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/wed3/999-3022-mtk-wed-add-wed3-ser-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/wed3/999-3022-mtk-wed-add-wed3-ser-support.patch
new file mode 100644
index 0000000..3837d8d
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/wed3/999-3022-mtk-wed-add-wed3-ser-support.patch
@@ -0,0 +1,611 @@
+From 7304ce8edabcbc34433307b02de429c2d118abaa Mon Sep 17 00:00:00 2001
+From: mtk27745 <rex.lu@mediatek.com>
+Date: Tue, 23 May 2023 11:19:30 +0800
+Subject: [PATCH] mtk-wed-add-wed3-ser-support
+
+---
+ drivers/net/ethernet/mediatek/mtk_wed.c      | 236 +++++++++++++++++--
+ drivers/net/ethernet/mediatek/mtk_wed_regs.h |  73 +++++-
+ include/linux/soc/mediatek/mtk_wed.h         |   6 +-
+ 3 files changed, 291 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
+index 6ed1c83..990888d 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -99,11 +99,65 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
+ 	u32 status;
+ 	u32 mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
+ 	int busy, i;
++	u32 value;
+ 
+ 	wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
+ 	busy = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
+-			       !(status & mask), 0, 10000);
++				  !(status & mask), 0, 10000);
+ 
++	if (dev->hw->version == 3) {
++		wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++		wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++		busy = read_poll_timeout(wdma_r32, status,
++					 !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 0, 10000,
++					 false, dev, MTK_WDMA_PREF_TX_CFG);
++		busy = read_poll_timeout(wdma_r32, status,
++					 !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 0, 10000,
++					 false, dev, MTK_WDMA_PREF_RX_CFG);
++
++		wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++		wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++		busy = read_poll_timeout(wdma_r32, status,
++					 !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 0, 10000,
++					 false, dev, MTK_WDMA_WRBK_TX_CFG);
++		busy = read_poll_timeout(wdma_r32, status,
++					 !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 0, 10000,
++					 false, dev, MTK_WDMA_WRBK_RX_CFG);
++
++		/* Prefetch FIFO */
++		wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
++			 MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
++			 MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
++		wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
++			 MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
++			 MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
++
++		/* Core FIFO */
++		value = (MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
++			 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
++			 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
++			 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
++			 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
++			 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
++			 MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
++
++		wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, value);
++		wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, value);
++
++		/* Writeback FIFO */
++		wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++		wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++
++		wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++		wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++
++		/* Prefetch ring status */
++		wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
++		wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
++		/* Writeback ring status */
++		wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
++		wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
++	}
+ 	wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ 	wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+ 
+@@ -121,13 +175,62 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+ {
+ 	u32 status;
+ 	u32 mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
+-	int i;
++	int busy, i;
++	u32 value;
+ 
+ 	wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
+ 	if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
+ 			       !(status & mask), 0, 10000))
+ 		WARN_ON_ONCE(1);
+ 
++	if (dev->hw->version == 3) {
++		wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++		wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++		busy = read_poll_timeout(wdma_r32, status,
++					 !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 0, 10000,
++					 false, dev, MTK_WDMA_PREF_TX_CFG);
++		busy = read_poll_timeout(wdma_r32, status,
++					 !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 0, 10000,
++					 false, dev, MTK_WDMA_PREF_RX_CFG);
++
++		wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++		wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++		busy = read_poll_timeout(wdma_r32, status,
++					 !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 0, 10000,
++					 false, dev, MTK_WDMA_WRBK_TX_CFG);
++		busy = read_poll_timeout(wdma_r32, status,
++					 !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 0, 10000,
++					 false, dev, MTK_WDMA_WRBK_RX_CFG);
++
++		/* Prefetch FIFO */
++		wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
++			 MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
++			 MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
++		wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
++			 MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
++			 MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
++		/* Core FIFO */
++		value = (MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
++			 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
++			 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
++			 MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
++
++		wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, value);
++		wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, value);
++		/* Writeback FIFO */
++		wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++		wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++
++		wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++		wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++
++		/* Prefetch ring status */
++		wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
++		wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
++		/* Writeback ring status */
++		wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
++		wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
++	}
+ 	wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
+ 	wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+ 	for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+@@ -903,7 +1006,7 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev)
+ 				MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
+ 
+ 			wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
+-			//wdma_w32(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++			wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+ 			if (mtk_wed_get_rx_capa(dev)) {
+ 				wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ 					MTK_WED_WPDMA_RX_D_PREF_EN |
+@@ -1477,13 +1580,30 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ 	mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, MTK_WED_WO_CMD_CHANGE_STATE,
+ 			     &state, sizeof(state), true);
+ 
++	if (dev->wlan.hwrro) {
++		wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
++		mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS,
++				  MTK_WED_RX_IND_CMD_BUSY);
++		mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG);
++	}
+ 	wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ 	busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ 				 MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
++	if (dev->hw->version == 3)
++		busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++					 MTK_WED_WPDMA_RX_D_PREF_BUSY);
+ 	if (busy) {
+ 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
+ 	} else {
++		if (dev->hw->version == 3) {
++			/*1.a. Disable Prefetch HW*/
++			wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, MTK_WED_WPDMA_RX_D_PREF_EN);
++			mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++					  MTK_WED_WPDMA_RX_D_PREF_BUSY);
++			wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++				MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL);
++		}
+ 		wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ 			MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ 			MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+@@ -1511,6 +1631,24 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ 		wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ 	}
+ 
++	if (dev->wlan.hwrro) {
++		/* Disable RRO MSDU Page Drv */
++		wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN);
++
++		/* Disable RRO Data Drv */
++		wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
++
++		/* RRO MSDU Page Drv Reset */
++		wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR);
++		mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++				  MTK_WED_RRO_MSDU_PG_DRV_CLR);
++
++		/* RRO Data Drv Reset */
++		wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_CLR);
++		mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2),
++				  MTK_WED_RRO_RX_D_DRV_CLR);
++	}
++
+ 	/* reset route qm */
+ 	wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ 	busy = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+@@ -1518,8 +1656,13 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ 	if (busy) {
+ 		mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
+ 	} else {
+-		wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+-			MTK_WED_RTQM_Q_RST);
++		if (dev->hw->version == 3) {
++			wed_set(dev, MTK_WED_RTQM_RST, BIT(0));
++			wed_clr(dev, MTK_WED_RTQM_RST, BIT(0));
++			mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
++		} else
++			wed_set(dev, MTK_WED_RTQM_GLO_CFG,
++				MTK_WED_RTQM_Q_RST);
+ 	}
+ 
+ 	/* reset tx wdma */
+@@ -1527,8 +1670,13 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ 
+ 	/* reset tx wdma drv */
+ 	wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
+-	mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+-			  MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
++	if (dev->hw->version == 3)
++		mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS,
++				  MTK_WED_WPDMA_STATUS_TX_DRV);
++	else
++		mtk_wed_poll_busy(dev, MTK_WED_CTRL,
++				  MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
++
+ 	mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
+ 
+ 	/* reset wed rx dma */
+@@ -1546,9 +1694,17 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ 	/* reset rx bm */
+ 	wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+ 	mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+-			   MTK_WED_CTRL_WED_RX_BM_BUSY);
++			  MTK_WED_CTRL_WED_RX_BM_BUSY);
+ 	mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
+ 
++	if (dev->wlan.hwrro) {
++		wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
++		mtk_wed_poll_busy(dev, MTK_WED_CTRL,
++				  MTK_WED_CTRL_WED_RX_PG_BM_BUSY);
++		wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
++		wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
++	}
++
+ 	/* wo change to enable state */
+ 	state = WO_STATE_ENABLE;
+ 	mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, MTK_WED_WO_CMD_CHANGE_STATE,
+@@ -1565,6 +1721,9 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ 	}
+ 
+ 	mtk_wed_free_rx_buffer(dev);
++
++	if (dev->wlan.hwrro)
++		mtk_wed_rx_page_free_buffer(dev);
+ }
+ 
+ 
+@@ -1598,18 +1757,40 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
+ 
+ 	/* 2. Reset WDMA Rx DMA/Driver_Engine */
+ 	busy = !!mtk_wdma_rx_reset(dev);
++	if (dev->hw->version == 3) {
++		val = wed_r32(dev, MTK_WED_WDMA_GLO_CFG);
++		val |= MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
++		val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN;
++		wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val);
++	} else
++		wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ 
+-	wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ 	busy = !!(busy ||
+ 		  mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
+-					 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY));
++				    MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY));
++	if (dev->hw->version == 3)
++		busy = !!(busy ||
++			  mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
++					    MTK_WED_WDMA_RX_PREF_BUSY));
+ 
+ 	if (busy) {
+ 		mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+ 		mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+ 	} else {
++		if (dev->hw->version == 3) {
++			/*1.a. Disable Prefetch HW*/
++			wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
++			mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
++					  MTK_WED_WDMA_RX_PREF_BUSY);
++			wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_DDONE2_EN);
++
++			/*2. Reset dma index*/
++			wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
++				MTK_WED_WDMA_RESET_IDX_RX_ALL);
++		}
+ 		wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+-			MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
++			MTK_WED_WDMA_RESET_IDX_RX |
++			MTK_WED_WDMA_RESET_IDX_DRV);
+ 		wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+ 
+ 		wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+@@ -1624,9 +1805,15 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
+ 		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ 
+ 	for (i = 0; i < 100; i++) {
+-		val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+-		if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
+-			break;
++		if (dev->ver > MTK_WED_V1) {
++			val = wed_r32(dev, MTK_WED_TX_TKID_INTF);
++			if (FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, val) == 0x40)
++				break;
++		} else {
++			val = wed_r32(dev, MTK_WED_TX_BM_INTF);
++			if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
++				break;
++		}
+ 	}
+ 	mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
+ 
+@@ -1635,18 +1822,20 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
+ 
+ 	/* 4. Reset WED WPDMA Tx Driver Engine */
+ 	busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
+-				      MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
++				 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
+ 	wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ 		MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ 		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ 
+ 	busy = !!(busy ||
+ 		  mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
+-					 MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY));
++				    MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY));
+ 	if (busy) {
+ 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+ 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
++		if (dev->hw->version == 3)
++			wed_w32(dev, MTK_WED_RX1_CTRL2, 0);
+ 	} else {
+ 		wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+ 			MTK_WED_WPDMA_RESET_IDX_TX |
+@@ -1659,7 +1848,13 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
+ 		}
+ 	}
+ 
+-	if (dev->ver > MTK_WED_V1) {
++	if (dev->hw->version == 3) {
++		/*reset wed pao*/
++		wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
++		mtk_wed_reset(dev, MTK_WED_RESET_TX_PAO);
++	}
++
++	if (mtk_wed_get_rx_capa(dev)) {
+ 		dev->init_done = false;
+ 		mtk_wed_rx_reset(dev);
+ 	}
+@@ -1874,7 +2069,7 @@ mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
+ }
+ 
+ static void
+-mtk_wed_start_hwrro(struct mtk_wed_device *dev, u32 irq_mask)
++mtk_wed_start_hwrro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
+ {
+ 	int idx, ret;
+ 
+@@ -1884,6 +2079,11 @@ mtk_wed_start_hwrro(struct mtk_wed_device *dev, u32 irq_mask)
+ 	if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hwrro)
+ 		return;
+ 
++	if (reset) {
++		wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN);
++		return;
++	}
++	
+ 	wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ 	wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+index 25be547..4379dc4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -42,6 +42,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RESET					0x008
+ #define MTK_WED_RESET_TX_BM				BIT(0)
+ #define MTK_WED_RESET_RX_BM				BIT(1)
++#define MTK_WED_RESET_RX_PG_BM				BIT(2)
++#define MTK_WED_RESET_RRO_RX_TO_PG			BIT(3)
+ #define MTK_WED_RESET_TX_FREE_AGENT			BIT(4)
+ #define MTK_WED_RESET_WPDMA_TX_DRV			BIT(8)
+ #define MTK_WED_RESET_WPDMA_RX_DRV			BIT(9)
+@@ -64,7 +66,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY		BIT(3)
+ #define MTK_WED_CTRL_WED_RX_IND_CMD_EN			BIT(5)
+ #define MTK_WED_CTRL_WED_RX_PG_BM_EN			BIT(6)
+-#define MTK_WED_CTRL_WED_RX_PG_BM_BUSU			BIT(7)
++#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY			BIT(7)
+ #define MTK_WED_CTRL_WED_TX_BM_EN			BIT(8)
+ #define MTK_WED_CTRL_WED_TX_BM_BUSY			BIT(9)
+ #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN		BIT(10)
+@@ -123,6 +125,10 @@ struct mtk_wdma_desc {
+ #define MTK_WED_STATUS					0x060
+ #define MTK_WED_STATUS_TX				GENMASK(15, 8)
+ 
++#define MTK_WED_WPDMA_STATUS				0x068
++#define MTK_WED_WPDMA_STATUS_TX_DRV			GENMASK(15, 8)
++
++
+ #define MTK_WED_TX_BM_CTRL				0x080
+ #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM			GENMASK(6, 0)
+ #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM			GENMASK(22, 16)
+@@ -167,6 +173,9 @@ struct mtk_wdma_desc {
+ 
+ #define MTK_WED_TX_TKID_CTRL_PAUSE			BIT(28)
+ 
++#define MTK_WED_TX_TKID_INTF				0x0dc
++#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP		GENMASK(25, 16)
++
+ #define MTK_WED_TX_TKID_DYN_THR				0x0e0
+ #define MTK_WED_TX_TKID_DYN_THR_LO			GENMASK(6, 0)
+ #define MTK_WED_TX_TKID_DYN_THR_HI			GENMASK(22, 16)
+@@ -203,10 +212,11 @@ struct mtk_wdma_desc {
+ #define MTK_WED_GLO_CFG_RX_2B_OFFSET			BIT(31)
+ 
+ #define MTK_WED_RESET_IDX				0x20c
+-#define MTK_WED_RESET_IDX_TX				GENMASK(3, 0)
+-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
++#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
++#define MTK_WED_RESET_IDX_TX				GENMASK(1, 0)
+ #define MTK_WED_RESET_IDX_RX				GENMASK(7, 6)
+ #else
++#define MTK_WED_RESET_IDX_TX				GENMASK(3, 0)
+ #define MTK_WED_RESET_IDX_RX				GENMASK(17, 16)
+ #endif
+ #define MTK_WED_RESET_WPDMA_IDX_RX			GENMASK(31, 30)
+@@ -221,6 +231,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RING_RX_DATA(_n)			(0x420 + (_n) * 0x10)
+ 
+ #define MTK_WED_SCR0					0x3c0
++#define MTK_WED_RX1_CTRL2				0x418
+ #define MTK_WED_WPDMA_INT_TRIGGER			0x504
+ #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE		BIT(1)
+ #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE		GENMASK(5, 4)
+@@ -336,6 +347,7 @@ struct mtk_wdma_desc {
+ 
+ #define MTK_WED_WPDMA_RX_D_RST_IDX			0x760
+ #define MTK_WED_WPDMA_RX_D_RST_CRX_IDX			GENMASK(17, 16)
++#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL		BIT(20)
+ #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX			GENMASK(25, 24)
+ 
+ #define MTK_WED_WPDMA_RX_GLO_CFG			0x76c
+@@ -352,6 +364,7 @@ struct mtk_wdma_desc {
+ 
+ #define MTK_WED_WPDMA_RX_D_PREF_CFG			0x7b4
+ #define MTK_WED_WPDMA_RX_D_PREF_EN			BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_BUSY		BIT(1)
+ #define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE		GENMASK(12, 8)
+ #define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES		GENMASK(21, 16)
+ 
+@@ -373,11 +386,13 @@ struct mtk_wdma_desc {
+ 
+ #define MTK_WED_WDMA_RX_PREF_CFG			0x950
+ #define MTK_WED_WDMA_RX_PREF_EN				BIT(0)
++#define MTK_WED_WDMA_RX_PREF_BUSY			BIT(1)
+ #define MTK_WED_WDMA_RX_PREF_BURST_SIZE			GENMASK(12, 8)
+ #define MTK_WED_WDMA_RX_PREF_LOW_THRES			GENMASK(21, 16)
+ #define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR		BIT(24)
+ #define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR		BIT(25)
+ #define MTK_WED_WDMA_RX_PREF_DDONE2_EN			BIT(26)
++#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY		BIT(27)
+ 
+ #define MTK_WED_WDMA_RX_PREF_FIFO_CFG			0x95C
+ #define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR		BIT(0)
+@@ -406,6 +421,7 @@ struct mtk_wdma_desc {
+ 
+ #define MTK_WED_WDMA_RESET_IDX				0xa08
+ #define MTK_WED_WDMA_RESET_IDX_RX			GENMASK(17, 16)
++#define MTK_WED_WDMA_RESET_IDX_RX_ALL			BIT(20)
+ #define MTK_WED_WDMA_RESET_IDX_DRV			GENMASK(25, 24)
+ 
+ #define MTK_WED_WDMA_INT_CLR				0xa24
+@@ -474,21 +490,66 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_INT_MASK_RX_DELAY			BIT(30)
+ #define MTK_WDMA_INT_MASK_RX_COHERENT			BIT(31)
+ 
++#define MTK_WDMA_XDMA_TX_FIFO_CFG			0x238
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR		BIT(0)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR		BIT(4)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR		BIT(8)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR		BIT(12)
++
++#define MTK_WDMA_XDMA_RX_FIFO_CFG			0x23c
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR		BIT(0)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR		BIT(4)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR		BIT(8)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR		BIT(12)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR		BIT(15)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR		BIT(18)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR		BIT(21)
++
++
++
+ #define MTK_WDMA_INT_GRP1				0x250
+ #define MTK_WDMA_INT_GRP2				0x254
+ 
+ #define MTK_WDMA_PREF_TX_CFG				0x2d0
+ #define MTK_WDMA_PREF_TX_CFG_PREF_EN			BIT(0)
++#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY			BIT(1)
+ 
+ #define MTK_WDMA_PREF_RX_CFG				0x2dc
+ #define MTK_WDMA_PREF_RX_CFG_PREF_EN			BIT(0)
++#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY			BIT(1)
++
++#define MTK_WDMA_PREF_RX_FIFO_CFG			0x2e0
++#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR		BIT(0)
++#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR		BIT(16)
++
++#define MTK_WDMA_PREF_TX_FIFO_CFG			0x2d4
++#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR		BIT(0)
++#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR		BIT(16)
++
++#define MTK_WDMA_PREF_SIDX_CFG				0x2e4
++#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR		GENMASK(3, 0)
++#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR		GENMASK(5, 4)
+ 
+ #define MTK_WDMA_WRBK_TX_CFG				0x300
++#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY			BIT(0)
+ #define MTK_WDMA_WRBK_TX_CFG_WRBK_EN			BIT(30)
+ 
++#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n)			(0x304 + (_n) * 0x4)
++#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR		BIT(0)
++
++
+ #define MTK_WDMA_WRBK_RX_CFG				0x344
++#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY			BIT(0)
+ #define MTK_WDMA_WRBK_RX_CFG_WRBK_EN			BIT(30)
+ 
++#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n)			(0x348 + (_n) * 0x4)
++#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR		BIT(0)
++
++
++#define MTK_WDMA_WRBK_SIDX_CFG				0x388
++#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR		GENMASK(3, 0)
++#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR		GENMASK(5, 4)
++
+ #define MTK_PCIE_MIRROR_MAP(n)				((n) ? 0x4 : 0x0)
+ #define MTK_PCIE_MIRROR_MAP_EN				BIT(0)
+ #define MTK_PCIE_MIRROR_MAP_WED_ID			BIT(1)
+@@ -502,6 +563,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q_DBG_BYPASS			BIT(5)
+ #define MTK_WED_RTQM_TXDMAD_FPORT			GENMASK(23, 20)
+ 
++#define MTK_WED_RTQM_RST				0xb04
++
++
+ #define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT		0xb1c
+ #define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n)		(0xb20 + (_n) * 0x4)
+ #define	MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT			0xb28
+@@ -691,6 +755,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR		BIT(17)
+ #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG	GENMASK(22, 18)
+ 
++#define MTK_WED_RRO_RX_HW_STS				0xf00
++#define MTK_WED_RX_IND_CMD_BUSY			GENMASK(31, 0)
++
+ #define MTK_WED_RX_IND_CMD_CNT0				0xf20
+ #define MTK_WED_RX_IND_CMD_DBG_CNT_EN			BIT(31)
+ 
+diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
+index 2b389e8..bb02ba5 100644
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -240,7 +240,7 @@ struct mtk_wed_ops {
+ 	void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+ 	void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
+ 			  u32 reason, u32 hash);
+-	void (*start_hwrro)(struct mtk_wed_device *dev, u32 irq_mask);
++	void (*start_hwrro)(struct mtk_wed_device *dev, u32 irq_mask, bool reset);
+ };
+ 
+ extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+@@ -317,8 +317,8 @@ mtk_wed_device_support_pao(struct mtk_wed_device *dev)
+ 	(_dev)->ops->reset_dma(_dev)
+ #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
+ 	(_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
+-#define mtk_wed_device_start_hwrro(_dev, _mask) \
+-	(_dev)->ops->start_hwrro(_dev, _mask)
++#define mtk_wed_device_start_hwrro(_dev, _mask, _reset) \
++	(_dev)->ops->start_hwrro(_dev, _mask, _reset)
+ 
+ #else
+ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+-- 
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek_5.4.bb b/recipes-kernel/linux/linux-mediatek_5.4.bb
index aa61a32..f6e8bc0 100644
--- a/recipes-kernel/linux/linux-mediatek_5.4.bb
+++ b/recipes-kernel/linux/linux-mediatek_5.4.bb
@@ -5,6 +5,7 @@
 FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}-${PV}/mediatek/patches-5.4:"
 FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}-${PV}/mediatek/flow_patch:"
 FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}-${PV}/mediatek/nf_hnat:"
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}-${PV}/mediatek/wed3:"
 
 KBRANCH ?= "linux-5.4.y"
 
@@ -46,7 +47,7 @@
     ${@bb.utils.contains('DISTRO_FEATURES','emmc','file://rdkb_cfg/emmc.cfg','',d)} \
 "
 
-SRC_URI_append_mt7986 += " \
+SRC_URI_append += " \
     ${@bb.utils.contains('DISTRO_FEATURES','flow_offload','file://rdkb_cfg/bridge_netfilter.cfg','',d)} \
 "
 
@@ -113,6 +114,14 @@
         fi
 }
 
+do_filogic_patches_append_mt7988() {
+    if [ ! -e wed3_patch_applied ]; then
+        if [ $DISTRO_FlowBlock_ENABLED = 'true' ]; then
+                for i in ${WORKDIR}/mediatek/wed3/*.patch; do patch -p1 < $i; done
+        fi
+        touch wed3_patch_applied
+    fi
+}
 addtask filogic_patches after do_patch before do_compile
 
 KERNEL_MODULE_AUTOLOAD += "${@bb.utils.contains('DISTRO_FEATURES','logan','mtkhnat nf_flow_table_hw','',d)}"
\ No newline at end of file