[rdkb][common][bsp][Refactor and sync kernel from openwrt]

[Description]
5e36163 [MAC80211][hnat][Fix patch fail issue]
933cc16 [update panther/cheetah use adma v1]
af4b0a9 [kernel][mt7988][eth][Update 10G interface dyanmically change support fo ESP-T5-R SFP+ modules]
3d29ccb [kernel][common][eth][Add support for coherent DMA]
216985a [MAC80211][hnat][Move Netfilter Netlink Ftnl package to feed]
48f7943 [kernel][common][eth][Introduce device register map]
364fa65 [kernel][mt7981/mt7988][eth][phy: mediatek-ge: Add EEE fine-tuning]
7a6e89a [kernel][mt7981][eth][phy: mediatek-ge: Add hardware v7 enhancement]
5264b63 [kernel][mt7988][eth][Add 10G interface dyanmically change support fo ESP-T5-R SFP+ modules]

[Release-log]

Change-Id: I7f63ac3d762247bbfa5041b6f74f87ca306f4138
diff --git a/recipes-devtools/flowtable/files/src/api.c b/recipes-devtools/flowtable/files/src/api.c
index 17b88b4..64bbbfe 100644
--- a/recipes-devtools/flowtable/files/src/api.c
+++ b/recipes-devtools/flowtable/files/src/api.c
@@ -17,7 +17,7 @@
 	printf("\n");
 }
 
-struct ftnl_handle* ftnl_open(void)
+struct ftnl_handle *ftnl_open(void)
 {
 	struct ftnl_handle *h = NULL;
 
@@ -50,24 +50,30 @@
 	free(h);
 }
 
-static void build_tuple(struct nlmsghdr *nlh,size_t size, struct flow_tuple *tuple)
+static void build_tuple(struct nlmsghdr *nlh, size_t size,
+			struct flow_tuple *tuple)
 {
 	struct nfattr *nest_tuple, *nest_ip, *nest_proto;
 
 	nest_tuple = nfnl_nest(nlh, size, FTA_TUPLE);
 
 	nest_ip = nfnl_nest(nlh, size, FTA_TUPLE_IP);
-	nfnl_addattr_l(nlh, size, FTA_IP_V4_SRC, &tuple->sip4, sizeof(uint32_t));
-	nfnl_addattr_l(nlh, size, FTA_IP_V4_DST, &tuple->dip4, sizeof(uint32_t));
+	nfnl_addattr_l(nlh, size, FTA_IP_V4_SRC,
+		       &tuple->sip4, sizeof(uint32_t));
+	nfnl_addattr_l(nlh, size, FTA_IP_V4_DST,
+		       &tuple->dip4, sizeof(uint32_t));
 	nfnl_nest_end(nlh, nest_ip);
 
 	nest_proto = nfnl_nest(nlh, size, FTA_TUPLE_PROTO);
-	nfnl_addattr_l(nlh, size, FTA_PROTO_NUM, &tuple->proto, sizeof(uint8_t));
-	nfnl_addattr_l(nlh, size, FTA_PROTO_SPORT, &tuple->sport, sizeof(uint16_t));
-	nfnl_addattr_l(nlh, size, FTA_PROTO_DPORT, &tuple->dport, sizeof(uint16_t));
+	nfnl_addattr_l(nlh, size, FTA_PROTO_NUM,
+		       &tuple->proto, sizeof(uint8_t));
+	nfnl_addattr_l(nlh, size, FTA_PROTO_SPORT,
+		       &tuple->sport, sizeof(uint16_t));
+	nfnl_addattr_l(nlh, size, FTA_PROTO_DPORT,
+		       &tuple->dport, sizeof(uint16_t));
 	nfnl_nest_end(nlh, nest_proto);
 
-	nfnl_nest_end(nlh,nest_tuple);
+	nfnl_nest_end(nlh, nest_tuple);
 //	attr_dump(nest_tuple);
 }
 
diff --git a/recipes-devtools/flowtable/files/src/ftnl.c b/recipes-devtools/flowtable/files/src/ftnl.c
index 30352b2..d021bb3 100644
--- a/recipes-devtools/flowtable/files/src/ftnl.c
+++ b/recipes-devtools/flowtable/files/src/ftnl.c
@@ -15,14 +15,14 @@
 	printf("ftnl -D [sip] [dip] [proto] [sport] [dport]\n");
 }
 
-int main (int argc, char *argv[])
+int main(int argc, char *argv[])
 {
 	struct ftnl_handle *h;
 	struct flow_tuple tuple = {0};
 	int msg = -1;
 	int c;
 	int ret = -1;
-	const char* optstring = "FD";
+	const char *optstring = "FD";
 	struct option opts[] = {
 		{"sip", required_argument, NULL, 's'},
 		{"dip", required_argument, NULL, 'd'},
@@ -39,51 +39,51 @@
 	/* parse arg */
 	while ((c = getopt_long(argc, argv, optstring, opts, NULL)) != -1) {
 		switch (c) {
-			case 'F':
-				msg = FT_MSG_FLUSH;
-				break;
-			case 'D':
-				msg = FT_MSG_DEL;
-				break;
-			case 's':
-				inet_aton(optarg, &tuple.sip4);
-				break;
-			case 'd':
-				inet_aton(optarg, &tuple.dip4);
-				break;
-			case 'p':
-				if (!strcmp(optarg, "tcp"))
-					tuple.proto = IPPROTO_TCP;
-				else if (!strcmp(optarg, "udp"))
-					tuple.proto = IPPROTO_UDP;
-				else {
-					printf("proto bad value "
-					       "pls set proto to udp or tcp "
-					       "arg : %s\n", optarg);
-					goto out;
-				}
-				break;
-			case 'm':
-				tuple.sport = htons(atoi(optarg));
-				break;
-			case 'n':
-				tuple.dport = htons(atoi(optarg));
-				break;
-			default:
-				usage();
+		case 'F':
+			msg = FT_MSG_FLUSH;
+			break;
+		case 'D':
+			msg = FT_MSG_DEL;
+			break;
+		case 's':
+			inet_aton(optarg, &tuple.sip4);
+			break;
+		case 'd':
+			inet_aton(optarg, &tuple.dip4);
+			break;
+		case 'p':
+			if (!strcmp(optarg, "tcp"))
+				tuple.proto = IPPROTO_TCP;
+			else if (!strcmp(optarg, "udp"))
+				tuple.proto = IPPROTO_UDP;
+			else {
+				printf("proto bad value...\n");
+				printf("pls set proto to udp or tcp arg : %s\n",
+				       optarg);
 				goto out;
+			}
+			break;
+		case 'm':
+			tuple.sport = htons(atoi(optarg));
+			break;
+		case 'n':
+			tuple.dport = htons(atoi(optarg));
+			break;
+		default:
+			usage();
+			goto out;
 		}
 	}
 
 	switch (msg) {
-		case FT_MSG_FLUSH:
-			ftnl_flush_table(h);
-			break;
-		case FT_MSG_DEL:
-			ftnl_del_flow(h, &tuple);
-			break;
-		default:
-			break;
+	case FT_MSG_FLUSH:
+		ftnl_flush_table(h);
+		break;
+	case FT_MSG_DEL:
+		ftnl_del_flow(h, &tuple);
+		break;
+	default:
+		break;
 	}
 
 out:
diff --git a/recipes-devtools/flowtable/files/src/netfilter_flowtable.h b/recipes-devtools/flowtable/files/src/netfilter_flowtable.h
index 3ea8916..6a50300 100644
--- a/recipes-devtools/flowtable/files/src/netfilter_flowtable.h
+++ b/recipes-devtools/flowtable/files/src/netfilter_flowtable.h
@@ -15,7 +15,7 @@
 	unsigned short int dport;
 };
 
-enum ft_msg_types{
+enum ft_msg_types {
 	FT_MSG_DEL,
 	FT_MSG_ADD,	//not support now
 	FT_MSG_FLUSH,
@@ -57,7 +57,7 @@
 };
 #define FTA_PROTO_MAX (__FTA_PROTO_MAX - 1)
 
-struct ftnl_handle* ftnl_open(void);
+struct ftnl_handle *ftnl_open(void);
 void ftnl_close(struct ftnl_handle *h);
 int ftnl_flush_table(struct ftnl_handle *h);
 int ftnl_del_flow(struct ftnl_handle *h, struct flow_tuple *tuple);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
index 3e93200..c23f868 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
@@ -924,8 +924,7 @@
 			   rx_ring->rxd1, rx_ring->rxd2,
 			   rx_ring->rxd3, rx_ring->rxd4);
 
-		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-		    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 			seq_printf(seq, " %08x %08x %08x %08x",
 				   rx_ring->rxd5, rx_ring->rxd6,
 				   rx_ring->rxd7, rx_ring->rxd8);
@@ -1120,8 +1119,7 @@
 	struct mtk_eth *eth = g_eth;
 	u32 idx, agg_cnt, agg_size;
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-	    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 		idx = ring_no - 4;
 		agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
 	} else {
@@ -1145,8 +1143,7 @@
 	struct mtk_eth *eth = g_eth;
 	u32 idx, flush_reason;
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-	    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 		idx = ring_no - 4;
 		flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
 	} else {
@@ -1396,8 +1393,7 @@
 {
 	struct mtk_eth *eth = g_eth;
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-	    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
 		hw_lro_stats_read_v2(seq, v);
 	else
 		hw_lro_stats_read_v1(seq, v);
@@ -1668,8 +1664,7 @@
 	seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
 	seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
 
-	if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2) ||
-	    MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V3)) {
+	if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_RX_V2)) {
 		for (i = 1; i <= 8; i++)
 			hw_lro_auto_tlb_dump_v2(seq, i);
 	} else {
@@ -1705,7 +1700,8 @@
 		    ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
 		seq_printf(seq,
 			   "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
-			   (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V1)) ? i : i+3,
+			   (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_RX_V2)) ?
+			   i : i+3,
 			   agg_cnt, agg_time, age_time, reg_op4);
 	}
 
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 3144511..cc4b1d5 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -162,21 +162,6 @@
 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
 				   SYSCFG0_SGMII_MASK, val);
 
-	/* Enable GDM/XGDM Path */
-	if (eth->mac[mac_id]->type == MTK_GDM_TYPE) {
-		val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac_id));
-		mtk_w32(eth, val & ~MTK_GDMA_XGDM_SEL,
-			MTK_GDMA_EG_CTRL(mac_id));
-	} else if (eth->mac[mac_id]->type == MTK_XGDM_TYPE) {
-		val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac_id));
-		mtk_w32(eth, val | MTK_GDMA_XGDM_SEL,
-			MTK_GDMA_EG_CTRL(mac_id));
-
-		val = mtk_r32(eth, MTK_XGMAC_STS(mac_id));
-		mtk_w32(eth, val | (MTK_XGMAC_FORCE_LINK << 16),
-			MTK_XGMAC_STS(mac_id));
-	}
-
 	spin_unlock(&eth->syscfg0_lock);
 
 	dev_dbg(eth->dev, "path %s in %s updated = %d\n",
@@ -271,11 +256,6 @@
 		}
 	}
 
-	/* Enable XGDM Path */
-	val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac_id));
-	val |= MTK_GDMA_XGDM_SEL;
-	mtk_w32(eth, val, MTK_GDMA_EG_CTRL(mac_id));
-
 	dev_dbg(eth->dev, "path %s in %s updated = %d\n",
 		mtk_eth_path_name(path), __func__, updated);
 
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 6f3b918..782ff76 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -9,6 +9,7 @@
 #include <linux/of_device.h>
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
+#include <linux/of_address.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 #include <linux/clk.h>
@@ -39,6 +40,7 @@
 static int mtk_msg_level = -1;
 atomic_t reset_lock = ATOMIC_INIT(0);
 atomic_t force = ATOMIC_INIT(0);
+atomic_t reset_pending = ATOMIC_INIT(0);
 
 module_param_named(msg_level, mtk_msg_level, int, 0);
 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
@@ -47,6 +49,174 @@
 #define MTK_ETHTOOL_STAT(x) { #x, \
 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
 
+static const struct mtk_reg_map mtk_reg_map = {
+	.tx_irq_mask		= 0x1a1c,
+	.tx_irq_status		= 0x1a18,
+	.pdma = {
+		.rx_ptr		= 0x0900,
+		.rx_cnt_cfg	= 0x0904,
+		.pcrx_ptr	= 0x0908,
+		.glo_cfg	= 0x0a04,
+		.rst_idx	= 0x0a08,
+		.delay_irq	= 0x0a0c,
+		.irq_status	= 0x0a20,
+		.irq_mask	= 0x0a28,
+		.int_grp	= 0x0a50,
+		.int_grp2	= 0x0a54,
+	},
+	.qdma = {
+		.qtx_cfg	= 0x1800,
+		.qtx_sch	= 0x1804,
+		.rx_ptr		= 0x1900,
+		.rx_cnt_cfg	= 0x1904,
+		.qcrx_ptr	= 0x1908,
+		.glo_cfg	= 0x1a04,
+		.rst_idx	= 0x1a08,
+		.delay_irq	= 0x1a0c,
+		.fc_th		= 0x1a10,
+		.tx_sch_rate	= 0x1a14,
+		.int_grp	= 0x1a20,
+		.int_grp2	= 0x1a24,
+		.hred2		= 0x1a44,
+		.ctx_ptr	= 0x1b00,
+		.dtx_ptr	= 0x1b04,
+		.crx_ptr	= 0x1b10,
+		.drx_ptr	= 0x1b14,
+		.fq_head	= 0x1b20,
+		.fq_tail	= 0x1b24,
+		.fq_count	= 0x1b28,
+		.fq_blen	= 0x1b2c,
+	},
+	.gdm1_cnt		= 0x2400,
+	.gdma_to_ppe0		= 0x4444,
+	.ppe_base = {
+		[0]		= 0x0c00,
+	},
+	.wdma_base = {
+		[0]		= 0x2800,
+		[1]		= 0x2c00,
+	},
+};
+
+static const struct mtk_reg_map mt7628_reg_map = {
+	.tx_irq_mask		= 0x0a28,
+	.tx_irq_status		= 0x0a20,
+	.pdma = {
+		.rx_ptr		= 0x0900,
+		.rx_cnt_cfg	= 0x0904,
+		.pcrx_ptr	= 0x0908,
+		.glo_cfg	= 0x0a04,
+		.rst_idx	= 0x0a08,
+		.delay_irq	= 0x0a0c,
+		.irq_status	= 0x0a20,
+		.irq_mask	= 0x0a28,
+		.int_grp	= 0x0a50,
+		.int_grp2	= 0x0a54,
+	},
+};
+
+static const struct mtk_reg_map mt7986_reg_map = {
+	.tx_irq_mask		= 0x461c,
+	.tx_irq_status		= 0x4618,
+	.pdma = {
+		.rx_ptr		= 0x4100,
+		.rx_cnt_cfg	= 0x4104,
+		.pcrx_ptr	= 0x4108,
+		.glo_cfg	= 0x4204,
+		.rst_idx	= 0x4208,
+		.delay_irq	= 0x420c,
+		.irq_status	= 0x4220,
+		.irq_mask	= 0x4228,
+		.int_grp	= 0x4250,
+		.int_grp2	= 0x4254,
+	},
+	.qdma = {
+		.qtx_cfg	= 0x4400,
+		.qtx_sch	= 0x4404,
+		.rx_ptr		= 0x4500,
+		.rx_cnt_cfg	= 0x4504,
+		.qcrx_ptr	= 0x4508,
+		.glo_cfg	= 0x4604,
+		.rst_idx	= 0x4608,
+		.delay_irq	= 0x460c,
+		.fc_th		= 0x4610,
+		.int_grp	= 0x4620,
+		.int_grp2	= 0x4624,
+		.hred2		= 0x4644,
+		.ctx_ptr	= 0x4700,
+		.dtx_ptr	= 0x4704,
+		.crx_ptr	= 0x4710,
+		.drx_ptr	= 0x4714,
+		.fq_head	= 0x4720,
+		.fq_tail	= 0x4724,
+		.fq_count	= 0x4728,
+		.fq_blen	= 0x472c,
+		.tx_sch_rate	= 0x4798,
+	},
+	.gdm1_cnt		= 0x1c00,
+	.gdma_to_ppe0		= 0x3333,
+	.ppe_base = {
+		[0]		= 0x2000,
+		[1]		= 0x2400,
+	},
+	.wdma_base = {
+		[0]		= 0x4800,
+		[1]		= 0x4c00,
+	},
+};
+
+static const struct mtk_reg_map mt7988_reg_map = {
+	.tx_irq_mask		= 0x461c,
+	.tx_irq_status		= 0x4618,
+	.pdma = {
+		.rx_ptr		= 0x6900,
+		.rx_cnt_cfg	= 0x6904,
+		.pcrx_ptr	= 0x6908,
+		.glo_cfg	= 0x6a04,
+		.rst_idx	= 0x6a08,
+		.delay_irq	= 0x6a0c,
+		.irq_status	= 0x6a20,
+		.irq_mask	= 0x6a28,
+		.int_grp	= 0x6a50,
+		.int_grp2	= 0x6a54,
+	},
+	.qdma = {
+		.qtx_cfg	= 0x4400,
+		.qtx_sch	= 0x4404,
+		.rx_ptr		= 0x4500,
+		.rx_cnt_cfg	= 0x4504,
+		.qcrx_ptr	= 0x4508,
+		.glo_cfg	= 0x4604,
+		.rst_idx	= 0x4608,
+		.delay_irq	= 0x460c,
+		.fc_th		= 0x4610,
+		.int_grp	= 0x4620,
+		.int_grp2	= 0x4624,
+		.hred2		= 0x4644,
+		.ctx_ptr	= 0x4700,
+		.dtx_ptr	= 0x4704,
+		.crx_ptr	= 0x4710,
+		.drx_ptr	= 0x4714,
+		.fq_head	= 0x4720,
+		.fq_tail	= 0x4724,
+		.fq_count	= 0x4728,
+		.fq_blen	= 0x472c,
+		.tx_sch_rate	= 0x4798,
+	},
+	.gdm1_cnt		= 0x1c00,
+	.gdma_to_ppe0		= 0x3333,
+	.ppe_base = {
+		[0]		= 0x2000,
+		[1]		= 0x2400,
+		[2]		= 0x2c00,
+	},
+	.wdma_base = {
+		[0]		= 0x4800,
+		[1]		= 0x4c00,
+		[2]		= 0x5000,
+	},
+};
+
 /* strings used by ethtool */
 static const struct mtk_ethtool_stats {
 	char str[ETH_GSTRING_LEN];
@@ -281,7 +451,7 @@
 
 	/* Force Port1 XGMAC Link Up */
 	val = mtk_r32(eth, MTK_XGMAC_STS(MTK_GMAC1_ID));
-	mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
+	mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
 		MTK_XGMAC_STS(MTK_GMAC1_ID));
 
 	/* Adjust GSW bridge IPG to 11*/
@@ -337,7 +507,8 @@
 					   phylink_config);
 	struct mtk_eth *eth = mac->hw;
 	u32 sid, i;
-	int val = 0, ge_mode, err = 0;
+	int val = 0, ge_mode, force_link, err = 0;
+	unsigned int mac_type = mac->type;
 
 	/* MT76x8 has no hardware settings between for the MAC */
 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
@@ -358,6 +529,7 @@
 		case PHY_INTERFACE_MODE_MII:
 		case PHY_INTERFACE_MODE_REVMII:
 		case PHY_INTERFACE_MODE_RMII:
+			mac->type = MTK_GDM_TYPE;
 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
 				if (err)
@@ -367,6 +539,7 @@
 		case PHY_INTERFACE_MODE_1000BASEX:
 		case PHY_INTERFACE_MODE_2500BASEX:
 		case PHY_INTERFACE_MODE_SGMII:
+			mac->type = MTK_GDM_TYPE;
 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
 				err = mtk_gmac_sgmii_path_setup(eth, mac->id);
 				if (err)
@@ -374,6 +547,7 @@
 			}
 			break;
 		case PHY_INTERFACE_MODE_GMII:
+			mac->type = MTK_GDM_TYPE;
 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
 				if (err)
@@ -381,6 +555,7 @@
 			}
 			break;
 		case PHY_INTERFACE_MODE_XGMII:
+			mac->type = MTK_XGDM_TYPE;
 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_XGMII)) {
 				err = mtk_gmac_xgmii_path_setup(eth, mac->id);
 				if (err)
@@ -390,6 +565,7 @@
 		case PHY_INTERFACE_MODE_USXGMII:
 		case PHY_INTERFACE_MODE_10GKR:
 		case PHY_INTERFACE_MODE_5GBASER:
+			mac->type = MTK_XGDM_TYPE;
 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_USXGMII)) {
 				err = mtk_gmac_usxgmii_path_setup(eth, mac->id);
 				if (err)
@@ -521,13 +697,47 @@
 			case MTK_GMAC1_ID:
 				mtk_setup_bridge_switch(eth);
 				break;
+			case MTK_GMAC2_ID:
+				force_link = (mac->interface ==
+					      PHY_INTERFACE_MODE_XGMII) ?
+					      MTK_XGMAC_FORCE_LINK(mac->id) : 0;
+				val = mtk_r32(eth, MTK_XGMAC_STS(mac->id));
+				mtk_w32(eth, val | force_link,
+					MTK_XGMAC_STS(mac->id));
+				break;
+			case MTK_GMAC3_ID:
+				val = mtk_r32(eth, MTK_XGMAC_STS(mac->id));
+				mtk_w32(eth,
+					val | MTK_XGMAC_FORCE_LINK(mac->id),
+					MTK_XGMAC_STS(mac->id));
+				break;
+			}
+		}
+	} else if (mac->type == MTK_GDM_TYPE) {
+		val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac->id));
+		mtk_w32(eth, val & ~MTK_GDMA_XGDM_SEL,
+			MTK_GDMA_EG_CTRL(mac->id));
+
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+			switch (mac->id) {
+			case MTK_GMAC2_ID:
 			case MTK_GMAC3_ID:
 				val = mtk_r32(eth, MTK_XGMAC_STS(mac->id));
-				mtk_w32(eth, val | MTK_XGMAC_FORCE_LINK,
+				mtk_w32(eth,
+					val & ~MTK_XGMAC_FORCE_LINK(mac->id),
 					MTK_XGMAC_STS(mac->id));
 				break;
 			}
 		}
+
+		if (mac->type != mac_type) {
+			if (atomic_read(&reset_pending) == 0) {
+				atomic_inc(&force);
+				schedule_work(&eth->pending_work);
+				atomic_inc(&reset_pending);
+			} else
+				atomic_dec(&reset_pending);
+		}
 	}
 
 	return;
@@ -571,6 +781,7 @@
 			break;
 		}
 
+		state->interface = mac->interface;
 		state->link = FIELD_GET(MTK_USXGMII_PCS_LINK, sts);
 	} else if (mac->type == MTK_GDM_TYPE) {
 		struct mtk_eth *eth = mac->hw;
@@ -581,6 +792,7 @@
 
 		regmap_read(ss->regmap_sgmii[id], SGMSYS_PCS_CONTROL_1, &val);
 
+		state->interface = mac->interface;
 		state->link = FIELD_GET(SGMII_LINK_STATYS, val);
 
 		if (FIELD_GET(SGMII_AN_ENABLE, val)) {
@@ -955,8 +1167,8 @@
 	u32 val;
 
 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
-	val = mtk_r32(eth, eth->tx_int_mask_reg);
-	mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
+	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
 }
 
@@ -966,8 +1178,8 @@
 	u32 val;
 
 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
-	val = mtk_r32(eth, eth->tx_int_mask_reg);
-	mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
+	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
 }
 
@@ -977,8 +1189,8 @@
 	u32 val;
 
 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
-	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
-	mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
 }
 
@@ -988,8 +1200,8 @@
 	u32 val;
 
 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
-	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
-	mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
 }
 
@@ -1028,46 +1240,59 @@
 void mtk_stats_update_mac(struct mtk_mac *mac)
 {
 	struct mtk_eth *eth = mac->hw;
+	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
-	unsigned int base = MTK_GDM1_TX_GBCNT;
+	unsigned int offs = hw_stats->reg_offset;
 	u64 stats;
 
-	base += hw_stats->reg_offset;
-
 	u64_stats_update_begin(&hw_stats->syncp);
 
-	hw_stats->rx_bytes += mtk_r32(mac->hw, base);
-	stats =  mtk_r32(mac->hw, base + 0x04);
+	hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
+	stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
 	if (stats)
 		hw_stats->rx_bytes += (stats << 32);
-	hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
-	hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
-	hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
-	hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
-	hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
-	hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
+	hw_stats->rx_packets +=
+		mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x08 + offs);
+	hw_stats->rx_overflow +=
+		mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
+	hw_stats->rx_fcs_errors +=
+		mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
+	hw_stats->rx_short_errors +=
+		mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
+	hw_stats->rx_long_errors +=
+		mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
+	hw_stats->rx_checksum_errors +=
+		mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
 	hw_stats->rx_flow_control_packets +=
-					mtk_r32(mac->hw, base + 0x24);
+		mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
 
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
-		hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x50);
-		hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x54);
-		hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x40);
-		stats =  mtk_r32(mac->hw, base + 0x44);
+		hw_stats->tx_skip +=
+			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
+		hw_stats->tx_collisions +=
+			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
+		hw_stats->tx_bytes +=
+			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
+		stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
 		if (stats)
 			hw_stats->tx_bytes += (stats << 32);
-		hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x48);
-		u64_stats_update_end(&hw_stats->syncp);
+		hw_stats->tx_packets +=
+			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
 	} else {
-		hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
-		hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
-		hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
-		stats =  mtk_r32(mac->hw, base + 0x34);
+		hw_stats->tx_skip +=
+			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
+		hw_stats->tx_collisions +=
+			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
+		hw_stats->tx_bytes +=
+			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
+		stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
 		if (stats)
 			hw_stats->tx_bytes += (stats << 32);
-		hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
-		u64_stats_update_end(&hw_stats->syncp);
+		hw_stats->tx_packets +=
+			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
 	}
+
+	u64_stats_update_end(&hw_stats->syncp);
 }
 
 static void mtk_stats_update(struct mtk_eth *eth)
@@ -1149,8 +1374,7 @@
 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-	    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
 		rxd->rxd7 = READ_ONCE(dma_rxd->rxd7);
@@ -1169,7 +1393,7 @@
 	int i;
 
 	if (!eth->soc->has_sram) {
-		eth->scratch_ring = dma_alloc_coherent(eth->dev,
+		eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
 					       cnt * soc->txrx.txd_size,
 					       &eth->phy_scratch_ring,
 					       GFP_KERNEL);
@@ -1187,10 +1411,10 @@
 	if (unlikely(!eth->scratch_head))
 		return -ENOMEM;
 
-	dma_addr = dma_map_single(eth->dev,
+	dma_addr = dma_map_single(eth->dma_dev,
 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
 				  DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
 		return -ENOMEM;
 
 	phy_ring_tail = eth->phy_scratch_ring +
@@ -1217,10 +1441,10 @@
 		}
 	}
 
-	mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
-	mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
-	mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
-	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
+	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
+	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
+	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
+	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
 
 	return 0;
 }
@@ -1254,26 +1478,26 @@
 {
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
-			dma_unmap_single(eth->dev,
+			dma_unmap_single(eth->dma_dev,
 					 dma_unmap_addr(tx_buf, dma_addr0),
 					 dma_unmap_len(tx_buf, dma_len0),
 					 DMA_TO_DEVICE);
 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
-			dma_unmap_page(eth->dev,
+			dma_unmap_page(eth->dma_dev,
 				       dma_unmap_addr(tx_buf, dma_addr0),
 				       dma_unmap_len(tx_buf, dma_len0),
 				       DMA_TO_DEVICE);
 		}
 	} else {
 		if (dma_unmap_len(tx_buf, dma_len0)) {
-			dma_unmap_page(eth->dev,
+			dma_unmap_page(eth->dma_dev,
 				       dma_unmap_addr(tx_buf, dma_addr0),
 				       dma_unmap_len(tx_buf, dma_len0),
 				       DMA_TO_DEVICE);
 		}
 
 		if (dma_unmap_len(tx_buf, dma_len1)) {
-			dma_unmap_page(eth->dev,
+			dma_unmap_page(eth->dma_dev,
 				       dma_unmap_addr(tx_buf, dma_addr1),
 				       dma_unmap_len(tx_buf, dma_len1),
 				       DMA_TO_DEVICE);
@@ -1514,9 +1738,9 @@
 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
 	memset(itx_buf, 0, sizeof(*itx_buf));
 
-	txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
+	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
 				       DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
+	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
 		return -ENOMEM;
 
 	mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
@@ -1557,10 +1781,11 @@
 			txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
 					!(frag_size - txd_info.size);
-			txd_info.addr = skb_frag_dma_map(eth->dev, frag,
+			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
 							 offset, txd_info.size,
 							 DMA_TO_DEVICE);
-			if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
+			if (unlikely(dma_mapping_error(eth->dma_dev,
+						       txd_info.addr)))
  				goto err_dma;
 
 			mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
@@ -1607,7 +1832,7 @@
 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
 		if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
 		    !netdev_xmit_more())
-			mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
 	} else {
 		int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
 					     ring->dma_size);
@@ -1794,8 +2019,8 @@
 		goto rx_done;
 
 	while (done < budget) {
+		unsigned int pktlen, *rxdcsum;
 		struct net_device *netdev = NULL;
-		unsigned int pktlen;
 		dma_addr_t dma_addr = 0;
 		int mac = 0;
 
@@ -1816,8 +2041,7 @@
 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
 			mac = 0;
 		} else {
-			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-			    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 				switch (RX_DMA_GET_SPORT_V2(trxd.rxd5)) {
 				case PSE_GDM1_PORT:
 				case PSE_GDM2_PORT:
@@ -1847,12 +2071,12 @@
 			netdev->stats.rx_dropped++;
 			goto release_desc;
 		}
-		dma_addr = dma_map_single(eth->dev,
+		dma_addr = dma_map_single(eth->dma_dev,
 					  new_data + NET_SKB_PAD +
 					  eth->ip_align,
 					  ring->buf_size,
 					  DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
+		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
 			skb_free_frag(new_data);
 			netdev->stats.rx_dropped++;
 			goto release_desc;
@@ -1861,7 +2085,7 @@
 		addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
 			  ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
 
-		dma_unmap_single(eth->dev,
+		dma_unmap_single(eth->dma_dev,
 				 (u64)(trxd.rxd1 | addr64),
 				 ring->buf_size, DMA_FROM_DEVICE);
 
@@ -1878,18 +2102,19 @@
 		skb->dev = netdev;
 		skb_put(skb, pktlen);
 
-		if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) &&
-				  (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
-		    (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) &&
-				  (trxd.rxd3 & eth->rx_dma_l4_valid)))
+		if ((MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)))
+			rxdcsum = &trxd.rxd3;
+		else
+			rxdcsum = &trxd.rxd4;
+
+		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 		else
 			skb_checksum_none_assert(skb);
 		skb->protocol = eth_type_trans(skb, netdev);
 
 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
-			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-			    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 				if (trxd.rxd3 & RX_DMA_VTAG_V2)
 					__vlan_hwaccel_put_tag(skb,
 					htons(RX_DMA_VPID_V2(trxd.rxd4)),
@@ -1912,8 +2137,7 @@
 		}
 
 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
-		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-		    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
 			*(u32 *)(skb->head) = trxd.rxd5;
 		else
 			*(u32 *)(skb->head) = trxd.rxd4;
@@ -1974,6 +2198,7 @@
 static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
 			    unsigned int *done, unsigned int *bytes)
 {
+	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
 	const struct mtk_soc_data *soc = eth->soc;
 	struct mtk_tx_ring *ring = &eth->tx_ring;
 	struct mtk_tx_dma *desc;
@@ -1982,7 +2207,7 @@
 	u32 cpu, dma;
 
 	cpu = ring->last_free_ptr;
-	dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
 
 	desc = mtk_qdma_phys_to_virt(ring, cpu);
 
@@ -2019,7 +2244,7 @@
 	}
 
 	ring->last_free_ptr = cpu;
-	mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
 }
 
 static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
@@ -2101,17 +2326,18 @@
 static int mtk_napi_tx(struct napi_struct *napi, int budget)
 {
 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
 	u32 status, mask;
 	int tx_done = 0;
 
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
 		mtk_handle_status_irq(eth);
-	mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
+	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
 	tx_done = mtk_poll_tx(eth, budget);
 
 	if (unlikely(netif_msg_intr(eth))) {
-		status = mtk_r32(eth, eth->tx_int_status_reg);
-		mask = mtk_r32(eth, eth->tx_int_mask_reg);
+		status = mtk_r32(eth, reg_map->tx_irq_status);
+		mask = mtk_r32(eth, reg_map->tx_irq_mask);
 		dev_info(eth->dev,
 			 "done tx %d, intr 0x%08x/0x%x\n",
 			 tx_done, status, mask);
@@ -2120,7 +2346,7 @@
 	if (tx_done == budget)
 		return budget;
 
-	status = mtk_r32(eth, eth->tx_int_status_reg);
+	status = mtk_r32(eth, reg_map->tx_irq_status);
 	if (status & MTK_TX_DONE_INT)
 		return budget;
 
@@ -2134,6 +2360,7 @@
 {
 	struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi);
 	struct mtk_eth *eth = rx_napi->eth;
+	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
 	struct mtk_rx_ring *ring = rx_napi->rx_ring;
 	u32 status, mask;
 	int rx_done = 0;
@@ -2142,12 +2369,12 @@
 	mtk_handle_status_irq(eth);
 
 poll_again:
-	mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS);
+	mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), reg_map->pdma.irq_status);
 	rx_done = mtk_poll_rx(napi, remain_budget, eth);
 
 	if (unlikely(netif_msg_intr(eth))) {
-		status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
-		mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
+		status = mtk_r32(eth, reg_map->pdma.irq_status);
+		mask = mtk_r32(eth, reg_map->pdma.irq_mask);
 		dev_info(eth->dev,
 			 "done rx %d, intr 0x%08x/0x%x\n",
 			 rx_done, status, mask);
@@ -2155,7 +2382,7 @@
 	if (rx_done == remain_budget)
 		return budget;
 
-	status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+	status = mtk_r32(eth, reg_map->pdma.irq_status);
 	if (status & MTK_RX_DONE_INT(ring->ring_no)) {
 		remain_budget -= rx_done;
 		goto poll_again;
@@ -2180,7 +2407,7 @@
 		goto no_tx_mem;
 
 	if (!eth->soc->has_sram)
-		ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+		ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
 					       &ring->phys, GFP_KERNEL);
 	else {
 		ring->dma =  eth->scratch_ring + MTK_DMA_SIZE * sz;
@@ -2214,7 +2441,8 @@
 	 * descriptors in ring->dma_pdma.
 	 */
 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
-		ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev,
+						    MTK_DMA_SIZE * sz,
 						    &ring->phys_pdma, GFP_KERNEL);
 		if (!ring->dma_pdma)
 			goto no_tx_mem;
@@ -2240,19 +2468,19 @@
 	wmb();
 
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
-		mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
-		mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
+		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
 		mtk_w32(eth,
 			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
-			MTK_QTX_CRX_PTR);
-		mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
+			soc->reg_map->qdma.crx_ptr);
+		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
 		mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
-			MTK_QTX_CFG(0));
+			soc->reg_map->qdma.qtx_cfg);
 	} else {
 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
 		mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
-		mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
 	}
 
 	return 0;
@@ -2275,14 +2503,14 @@
 	}
 
 	if (!eth->soc->has_sram && ring->dma) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  MTK_DMA_SIZE * soc->txrx.txd_size,
 				  ring->dma, ring->phys);
 		ring->dma = NULL;
 	}
 
 	if (ring->dma_pdma) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  MTK_DMA_SIZE * soc->txrx.txd_size,
 				  ring->dma_pdma, ring->phys_pdma);
 		ring->dma_pdma = NULL;
@@ -2291,6 +2519,7 @@
 
 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
 {
+	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
 	struct mtk_rx_ring *ring;
 	int rx_data_len, rx_dma_size;
 	int i;
@@ -2327,15 +2556,15 @@
 
 	if ((!eth->soc->has_sram) || (eth->soc->has_sram
 				&& (rx_flag != MTK_RX_FLAGS_NORMAL)))
-		ring->dma = dma_alloc_coherent(eth->dev,
+		ring->dma = dma_alloc_coherent(eth->dma_dev,
 					       rx_dma_size * eth->soc->txrx.rxd_size,
 					       &ring->phys, GFP_KERNEL);
 	else {
 		struct mtk_tx_ring *tx_ring = &eth->tx_ring;
 		ring->dma = tx_ring->dma + MTK_DMA_SIZE *
-			    eth->soc->txrx.rxd_size * (ring_no + 1);
+			    eth->soc->txrx.txd_size * (ring_no + 1);
 		ring->phys = tx_ring->phys + MTK_DMA_SIZE *
-			     eth->soc->txrx.rxd_size * (ring_no + 1);
+			     eth->soc->txrx.txd_size * (ring_no + 1);
 	}
 
 	if (!ring->dma)
@@ -2344,11 +2573,11 @@
 	for (i = 0; i < rx_dma_size; i++) {
 		struct mtk_rx_dma_v2 *rxd;
 
-		dma_addr_t dma_addr = dma_map_single(eth->dev,
+		dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
 				ring->data[i] + NET_SKB_PAD + eth->ip_align,
 				ring->buf_size,
 				DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
 			return -ENOMEM;
 
 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
@@ -2365,8 +2594,7 @@
 		rxd->rxd3 = 0;
 		rxd->rxd4 = 0;
 
-		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-		    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 			rxd->rxd5 = 0;
 			rxd->rxd6 = 0;
 			rxd->rxd7 = 0;
@@ -2386,15 +2614,23 @@
 	wmb();
 
 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
-		mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
-		mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
-		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
-		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
+		mtk_w32(eth, ring->phys,
+			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+		mtk_w32(eth, rx_dma_size,
+			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+		mtk_w32(eth, ring->calc_idx,
+			ring->crx_idx_reg);
+		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+			reg_map->qdma.rst_idx);
 	} else {
-		mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
-		mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
-		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
-		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
+		mtk_w32(eth, ring->phys,
+			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+		mtk_w32(eth, rx_dma_size,
+			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+		mtk_w32(eth, ring->calc_idx,
+			ring->crx_idx_reg);
+		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+			reg_map->pdma.rst_idx);
 	}
 
 	return 0;
@@ -2420,7 +2656,7 @@
 					       MTK_8GB_ADDRESSING)) ?
 				  ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
 
-			dma_unmap_single(eth->dev,
+			dma_unmap_single(eth->dma_dev,
 					 (u64)(rxd->rxd1 | addr64),
 					 ring->buf_size,
 					 DMA_FROM_DEVICE);
@@ -2434,7 +2670,7 @@
 		return;
 
 	if (ring->dma) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  ring->dma_size * eth->soc->txrx.rxd_size,
 				  ring->dma,
 				  ring->phys);
@@ -2491,8 +2727,7 @@
 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-	    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 		val = mtk_r32(eth, MTK_PDMA_RX_CFG);
 		mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET),
 			MTK_PDMA_RX_CFG);
@@ -2548,8 +2783,7 @@
 {
 	u32 reg_val;
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-	    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
 		idx += 1;
 
 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
@@ -2567,8 +2801,7 @@
 {
 	u32 reg_val;
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-	    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
 		idx += 1;
 
 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
@@ -2702,7 +2935,7 @@
 {
 	u32 val;
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) {
+	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 		/* Set RSS rings to PSE modes */
 		val =  mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1));
 		val |= MTK_RING_PSE_MODE;
@@ -2878,7 +3111,7 @@
 		return err;
 
 	if (eth->hwlro) {
-		i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1)) ? 1 : 4;
+		i = (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) ? 1 : 4;
 		for (; i < MTK_MAX_RX_RING_NUM; i++) {
 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
 			if (err)
@@ -2905,8 +3138,8 @@
 		 * automatically
 		 */
 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
-			FC_THRES_MIN, MTK_QDMA_FC_THRES);
-		mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
+		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred2);
 	}
 
 	return 0;
@@ -2921,7 +3154,7 @@
 		if (eth->netdev[i])
 			netdev_reset_queue(eth->netdev[i]);
 	if ( !eth->soc->has_sram && eth->scratch_ring) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  MTK_DMA_SIZE * soc->txrx.txd_size,
 				  eth->scratch_ring, eth->phy_scratch_ring);
 		eth->scratch_ring = NULL;
@@ -2994,13 +3227,14 @@
 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
 {
 	struct mtk_eth *eth = _eth;
+	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
 
-	if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) {
-		if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0))
+	if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT(0)) {
+		if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT(0))
 			mtk_handle_irq_rx(irq, &eth->rx_napi[0]);
 	}
-	if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
-		if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
 			mtk_handle_irq_tx(irq, _eth);
 	}
 
@@ -3054,6 +3288,7 @@
 static int mtk_start_dma(struct mtk_eth *eth)
 {
 	u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
 	int val, err;
 
 	err = mtk_dma_init(eth);
@@ -3063,7 +3298,7 @@
 	}
 
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
-		val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
+		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
 		    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
 			val &= ~MTK_RESV_BUF_MASK;
@@ -3073,7 +3308,7 @@
 				MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
 				MTK_RESV_BUF | MTK_WCOMP_EN |
 				MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
-				MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
+				MTK_RX_2B_OFFSET, reg_map->qdma.glo_cfg);
 		}
 		else
 			mtk_w32(eth,
@@ -3081,20 +3316,20 @@
 				MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
 				MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
 				MTK_RX_BT_32DWORDS,
-				MTK_QDMA_GLO_CFG);
+				reg_map->qdma.glo_cfg);
 
-		val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
+		val = mtk_r32(eth, reg_map->pdma.glo_cfg);
 		mtk_w32(eth,
 			val | MTK_RX_DMA_EN | rx_2b_offset |
 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
-			MTK_PDMA_GLO_CFG);
+			reg_map->pdma.glo_cfg);
 	} else {
 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
-			MTK_PDMA_GLO_CFG);
+			reg_map->pdma.glo_cfg);
 	}
 
-	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V1) && eth->hwlro) {
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) && eth->hwlro) {
 		val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
 		mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG);
 	}
@@ -3293,8 +3528,8 @@
 	}
 
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
-		mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
-	mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
+	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
 
 	mtk_dma_free(eth);
 
@@ -3373,6 +3608,9 @@
 
 static int mtk_hw_init(struct mtk_eth *eth, u32 type)
 {
+	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+		       ETHSYS_DMA_AG_MAP_PPE;
+	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
 	int i, ret = 0;
 	u32 val;
 
@@ -3391,6 +3629,11 @@
 			goto err_disable_pm;
 	}
 
+	if (eth->ethsys)
+		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
+				   of_dma_is_coherent(eth->dma_dev->of_node) *
+				   dma_mask);
+
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
 		ret = device_reset(eth->dev);
 		if (ret) {
@@ -3416,8 +3659,7 @@
 	else
 		mtk_eth_cold_reset(eth);
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-	    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
 		/* Set FE to PDMAv2 if necessary */
 		mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
 	}
@@ -3454,10 +3696,10 @@
 	mtk_rx_irq_disable(eth, ~0);
 
 	/* FE int grouping */
-	mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
-	mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
-	mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
-	mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
+	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+	mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->pdma.int_grp2);
+	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+	mtk_w32(eth, MTK_RX_DONE_INT(0), reg_map->qdma.int_grp2);
 	mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
 	mtk_w32(eth, MTK_FE_INT_TSO_FAIL |
 		MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
@@ -4215,6 +4457,35 @@
 	return err;
 }
 
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
+{
+	struct net_device *dev, *tmp;
+	LIST_HEAD(dev_list);
+	int i;
+
+	rtnl_lock();
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		dev = eth->netdev[i];
+
+		if (!dev || !(dev->flags & IFF_UP))
+			continue;
+
+		list_add_tail(&dev->close_list, &dev_list);
+	}
+
+	dev_close_many(&dev_list, false);
+
+	eth->dma_dev = dma_dev;
+
+	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
+		list_del_init(&dev->close_list);
+		dev_open(dev, NULL);
+	}
+
+	rtnl_unlock();
+}
+
 static int mtk_probe(struct platform_device *pdev)
 {
 	struct device_node *mac_np;
@@ -4228,6 +4499,7 @@
 	eth->soc = of_device_get_match_data(&pdev->dev);
 
 	eth->dev = &pdev->dev;
+	eth->dma_dev = &pdev->dev;
 	eth->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(eth->base))
 		return PTR_ERR(eth->base);
@@ -4246,24 +4518,8 @@
 		eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
 	}
 
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
-		eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
-		eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
-	} else {
-		eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
-		eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
-	}
-
-	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
-		eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
 		eth->ip_align = NET_IP_ALIGN;
-	} else {
-		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
-		    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
-			eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
-		else
-			eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
-	}
 
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) {
 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
@@ -4300,6 +4556,16 @@
 		}
 	}
 
+	if (of_dma_is_coherent(pdev->dev.of_node)) {
+		struct regmap *cci;
+
+		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+						      "cci-control-port");
+		/* enable CPU/bus coherency */
+		if (!IS_ERR(cci))
+			regmap_write(cci, 0, 3);
+	}
+
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
 		eth->xgmii = devm_kzalloc(eth->dev, sizeof(*eth->xgmii),
 					  GFP_KERNEL);
@@ -4533,6 +4799,7 @@
 }
 
 static const struct mtk_soc_data mt2701_data = {
+	.reg_map = &mtk_reg_map,
 	.caps = MT7623_CAPS | MTK_HWLRO,
 	.hw_features = MTK_HW_FEATURES,
 	.required_clks = MT7623_CLKS_BITMAP,
@@ -4541,12 +4808,14 @@
 	.txrx = {
 		.txd_size = sizeof(struct mtk_tx_dma),
 		.rxd_size = sizeof(struct mtk_rx_dma),
+		.rx_dma_l4_valid = RX_DMA_L4_VALID,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
 	},
 };
 
 static const struct mtk_soc_data mt7621_data = {
+	.reg_map = &mtk_reg_map,
 	.caps = MT7621_CAPS,
 	.hw_features = MTK_HW_FEATURES,
 	.required_clks = MT7621_CLKS_BITMAP,
@@ -4554,6 +4823,7 @@
 	.has_sram = false,
 	.txrx = {
 		.txd_size = sizeof(struct mtk_tx_dma),
+		.rx_dma_l4_valid = RX_DMA_L4_VALID,
 		.rxd_size = sizeof(struct mtk_rx_dma),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
@@ -4561,6 +4831,7 @@
 };
 
 static const struct mtk_soc_data mt7622_data = {
+	.reg_map = &mtk_reg_map,
 	.ana_rgc3 = 0x2028,
 	.caps = MT7622_CAPS | MTK_HWLRO,
 	.hw_features = MTK_HW_FEATURES,
@@ -4570,12 +4841,14 @@
 	.txrx = {
 		.txd_size = sizeof(struct mtk_tx_dma),
 		.rxd_size = sizeof(struct mtk_rx_dma),
+		.rx_dma_l4_valid = RX_DMA_L4_VALID,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
 	},
 };
 
 static const struct mtk_soc_data mt7623_data = {
+	.reg_map = &mtk_reg_map,
 	.caps = MT7623_CAPS | MTK_HWLRO,
 	.hw_features = MTK_HW_FEATURES,
 	.required_clks = MT7623_CLKS_BITMAP,
@@ -4584,12 +4857,14 @@
 	.txrx = {
 		.txd_size = sizeof(struct mtk_tx_dma),
 		.rxd_size = sizeof(struct mtk_rx_dma),
+		.rx_dma_l4_valid = RX_DMA_L4_VALID,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
 	},
 };
 
 static const struct mtk_soc_data mt7629_data = {
+	.reg_map = &mtk_reg_map,
 	.ana_rgc3 = 0x128,
 	.caps = MT7629_CAPS | MTK_HWLRO,
 	.hw_features = MTK_HW_FEATURES,
@@ -4599,12 +4874,14 @@
 	.txrx = {
 		.txd_size = sizeof(struct mtk_tx_dma),
 		.rxd_size = sizeof(struct mtk_rx_dma),
+		.rx_dma_l4_valid = RX_DMA_L4_VALID,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
 	},
 };
 
 static const struct mtk_soc_data mt7986_data = {
+	.reg_map = &mt7986_reg_map,
 	.ana_rgc3 = 0x128,
 	.caps = MT7986_CAPS,
 	.hw_features = MTK_HW_FEATURES,
@@ -4613,13 +4890,15 @@
 	.has_sram = true,
 	.txrx = {
 		.txd_size = sizeof(struct mtk_tx_dma_v2),
-		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+		.rxd_size = sizeof(struct mtk_rx_dma),
+		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
 	},
 };
 
 static const struct mtk_soc_data mt7981_data = {
+	.reg_map = &mt7986_reg_map,
 	.ana_rgc3 = 0x128,
 	.caps = MT7981_CAPS,
 	.hw_features = MTK_HW_FEATURES,
@@ -4628,13 +4907,15 @@
 	.has_sram = true,
 	.txrx = {
 		.txd_size = sizeof(struct mtk_tx_dma_v2),
-		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+		.rxd_size = sizeof(struct mtk_rx_dma),
+		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
 	},
 };
 
 static const struct mtk_soc_data mt7988_data = {
+	.reg_map = &mt7988_reg_map,
 	.ana_rgc3 = 0x128,
 	.caps = MT7988_CAPS,
 	.hw_features = MTK_HW_FEATURES,
@@ -4644,12 +4925,14 @@
 	.txrx = {
 		.txd_size = sizeof(struct mtk_tx_dma_v2),
 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
 	},
 };
 
 static const struct mtk_soc_data rt5350_data = {
+	.reg_map = &mt7628_reg_map,
 	.caps = MT7628_CAPS,
 	.hw_features = MTK_HW_FEATURES_MT7628,
 	.required_clks = MT7628_CLKS_BITMAP,
@@ -4658,6 +4941,7 @@
 	.txrx = {
 		.txd_size = sizeof(struct mtk_tx_dma),
 		.rxd_size = sizeof(struct mtk_rx_dma),
+		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
 	},
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index f0b3a24..c7510b3 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -50,6 +50,8 @@
 #define MTK_HW_FEATURES_MT7628	(NETIF_F_SG | NETIF_F_RXCSUM)
 #define NEXT_DESP_IDX(X, Y)	(((X) + 1) & ((Y) - 1))
 
+#define MTK_QRX_OFFSET		0x10
+
 #define MTK_HW_LRO_DMA_SIZE	8
 
 #define	MTK_MAX_LRO_RX_LENGTH		(4096 * 3)
@@ -174,7 +176,11 @@
 #define WDMA_BASE(x)		(0x4800 + ((x) * 0x400))
 #define PPE_BASE(x)		((x == 2) ? 0x2E00 : 0x2200 + ((x) * 0x400))
 #elif defined(CONFIG_MEDIATEK_NETSYS_V2)
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
 #define PDMA_BASE               0x6000
+#else
+#define PDMA_BASE               0x4000
+#endif
 #define QDMA_BASE               0x4400
 #define WDMA_BASE(x)		(0x4800 + ((x) * 0x400))
 #define PPE_BASE(x)		(0x2200 + ((x) * 0x400))
@@ -202,7 +208,7 @@
 
 /* PDMA HW LRO Control Registers */
 #define BITS(m, n)			(~(BIT(m) - 1) & ((BIT(n) - 1) | BIT(n)))
-#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
 #define MTK_MAX_RX_RING_NUM		(8)
 #define MTK_HW_LRO_RING_NUM		(4)
 #define IS_HW_LRO_RING(ring_no)		(((ring_no) > 3) && ((ring_no) < 8))
@@ -246,14 +252,14 @@
 #define MTK_LRO_MIN_RXD_SDL	(MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
 
 /* PDMA RSS Control Registers */
-#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
 #define MTK_PDMA_RSS_GLO_CFG		(PDMA_BASE + 0x800)
 #define MTK_RX_NAPI_NUM			(2)
 #define MTK_MAX_IRQ_NUM			(4)
 #else
-#define MTK_PDMA_RSS_GLO_CFG		0x3000
-#define MTK_RX_NAPI_NUM			(1)
-#define MTK_MAX_IRQ_NUM			(3)
+#define MTK_PDMA_RSS_GLO_CFG		0x2800
+#define MTK_RX_NAPI_NUM			(2)
+#define MTK_MAX_IRQ_NUM			(4)
 #endif
 #define MTK_RSS_RING1			(1)
 #define MTK_RSS_EN			BIT(0)
@@ -288,7 +294,11 @@
 
 /* PDMA Delay Interrupt Register */
 #define MTK_PDMA_DELAY_INT		(PDMA_BASE + 0x20c)
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
 #define MTK_PDMA_RSS_DELAY_INT		(PDMA_BASE + 0x2c0)
+#else
+#define MTK_PDMA_RSS_DELAY_INT		(PDMA_BASE + 0x270)
+#endif
 #define MTK_PDMA_DELAY_RX_EN		BIT(15)
 #define MTK_PDMA_DELAY_RX_PINT		4
 #define MTK_PDMA_DELAY_RX_PINT_SHIFT	8
@@ -306,7 +316,7 @@
 /* PDMA Interrupt grouping registers */
 #define MTK_PDMA_INT_GRP1	(PDMA_BASE + 0x250)
 #define MTK_PDMA_INT_GRP2	(PDMA_BASE + 0x254)
-#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
 #define MTK_PDMA_INT_GRP3	(PDMA_BASE + 0x258)
 #else
 #define MTK_PDMA_INT_GRP3	(PDMA_BASE + 0x22c)
@@ -315,7 +325,7 @@
 #define MTK_MAX_DELAY_INT	0x8f0f8f0f
 
 /* PDMA HW LRO IP Setting Registers */
-#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
 #define MTK_LRO_RX_RING0_DIP_DW0	(PDMA_BASE + 0x414)
 #else
 #define MTK_LRO_RX_RING0_DIP_DW0	(PDMA_BASE + 0x304)
@@ -434,7 +444,7 @@
 
 /* QDMA Interrupt Status Register */
 #define MTK_QDMA_INT_STATUS	(QDMA_BASE + 0x218)
-#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
 #define MTK_RX_DONE_INT(ring_no)					\
 	(MTK_HAS_CAPS(eth->soc->caps, MTK_RSS) ? (BIT(24 + (ring_no))) :	\
 	 ((ring_no) ? BIT(16 + (ring_no)) : BIT(14)))
@@ -548,6 +558,9 @@
 #define MTK_TX_DMA_BUF_SHIFT    16
 #define MTK_TX_DMA_BUF_SHIFT_V2 8
 
+#define MTK_RX_DMA_BUF_LEN      0x3fff
+#define MTK_RX_DMA_BUF_SHIFT    16
+
 #define RX_DMA_SPORT_SHIFT      19
 #define RX_DMA_SPORT_SHIFT_V2   26
 #define RX_DMA_SPORT_MASK       0x7
@@ -576,8 +589,16 @@
 /* QDMA descriptor rxd2 */
 #define RX_DMA_DONE		BIT(31)
 #define RX_DMA_LSO		BIT(30)
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
 #define RX_DMA_PLEN0(_x)	(((_x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
 #define RX_DMA_GET_PLEN0(_x)	(((_x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
+#else
+#define RX_DMA_PLEN0(_x)	\
+	(((_x) & MTK_RX_DMA_BUF_LEN) << MTK_RX_DMA_BUF_SHIFT)
+#define RX_DMA_GET_PLEN0(_x)	\
+	(((_x) >> MTK_RX_DMA_BUF_SHIFT) & MTK_RX_DMA_BUF_LEN)
+#endif
+
 #define RX_DMA_GET_AGG_CNT(_x)	(((_x) >> 2) & 0xff)
 #define RX_DMA_GET_REV(_x)	(((_x) >> 10) & 0x1f)
 #define RX_DMA_VTAG		BIT(15)
@@ -638,7 +659,7 @@
 
 /* XMAC status registers */
 #define MTK_XGMAC_STS(x)	((x == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
-#define MTK_XGMAC_FORCE_LINK	BIT(15)
+#define MTK_XGMAC_FORCE_LINK(x)	((x == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
 #define MTK_USXGMII_PCS_LINK	BIT(8)
 #define MTK_XGMAC_RX_FC		BIT(5)
 #define MTK_XGMAC_TX_FC		BIT(4)
@@ -790,6 +811,11 @@
 /* ethernet reset check idle register */
 #define ETHSYS_FE_RST_CHK_IDLE_EN 	0x28
 
+/* ethernet dma channel agent map */
+#define ETHSYS_DMA_AG_MAP	0x408
+#define ETHSYS_DMA_AG_MAP_PDMA	BIT(0)
+#define ETHSYS_DMA_AG_MAP_QDMA	BIT(1)
+#define ETHSYS_DMA_AG_MAP_PPE	BIT(2)
 
 /* SGMII subsystem config registers */
 /* Register to auto-negotiation restart */
@@ -1300,6 +1326,7 @@
 	MTK_QDMA_BIT,
 	MTK_NETSYS_V1_BIT,
 	MTK_NETSYS_V2_BIT,
+	MTK_NETSYS_RX_V2_BIT,
 	MTK_NETSYS_V3_BIT,
 	MTK_SOC_MT7628_BIT,
 	MTK_RSTCTRL_PPE1_BIT,
@@ -1350,6 +1377,7 @@
 #define MTK_QDMA		BIT_ULL(MTK_QDMA_BIT)
 #define MTK_NETSYS_V1		BIT_ULL(MTK_NETSYS_V1_BIT)
 #define MTK_NETSYS_V2		BIT_ULL(MTK_NETSYS_V2_BIT)
+#define MTK_NETSYS_RX_V2	BIT(MTK_NETSYS_RX_V2_BIT)
 #define MTK_NETSYS_V3		BIT_ULL(MTK_NETSYS_V3_BIT)
 #define MTK_SOC_MT7628		BIT_ULL(MTK_SOC_MT7628_BIT)
 #define MTK_RSTCTRL_PPE1	BIT_ULL(MTK_RSTCTRL_PPE1_BIT)
@@ -1456,7 +1484,7 @@
 
 #define MT7986_CAPS   (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
                        MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
-                       MTK_NETSYS_V2)
+			MTK_NETSYS_V2)
 
 #define MT7981_CAPS   (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
 			MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
@@ -1468,7 +1496,8 @@
 		       MTK_NETSYS_V3 | MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | \
 		       MTK_GMAC1_USXGMII | MTK_GMAC2_USXGMII | \
 		       MTK_GMAC3_USXGMII | MTK_MUX_GMAC123_TO_USXGMII | \
-		       MTK_GMAC2_XGMII | MTK_MUX_GMAC2_TO_XGMII | MTK_RSS)
+		       MTK_GMAC2_XGMII | MTK_MUX_GMAC2_TO_XGMII | MTK_RSS | \
+		       MTK_NETSYS_RX_V2)
 
 struct mtk_tx_dma_desc_info {
 	dma_addr_t	addr;
@@ -1482,9 +1511,55 @@
 	u8		last:1;
 };
 
+struct mtk_reg_map {
+	u32	tx_irq_mask;
+	u32	tx_irq_status;
+	struct {
+		u32	rx_ptr;		/* rx base pointer */
+		u32	rx_cnt_cfg;	/* rx max count configuration */
+		u32	pcrx_ptr;	/* rx cpu pointer */
+		u32	glo_cfg;	/* global configuration */
+		u32	rst_idx;	/* reset index */
+		u32	delay_irq;	/* delay interrupt */
+		u32	irq_status;	/* interrupt status */
+		u32	irq_mask;	/* interrupt mask */
+		u32	int_grp;	/* interrupt group1 */
+		u32	int_grp2;	/* interrupt group2 */
+	} pdma;
+	struct {
+		u32	qtx_cfg;	/* tx queue configuration */
+		u32	qtx_sch;	/* tx queue scheduler configuration */
+		u32	rx_ptr;		/* rx base pointer */
+		u32	rx_cnt_cfg;	/* rx max count configuration */
+		u32	qcrx_ptr;	/* rx cpu pointer */
+		u32	glo_cfg;	/* global configuration */
+		u32	rst_idx;	/* reset index */
+		u32	delay_irq;	/* delay interrupt */
+		u32	fc_th;		/* flow control */
+		u32	int_grp;	/* interrupt group1 */
+		u32	int_grp2;	/* interrupt group2 */
+		u32	hred2;		/* interrupt mask */
+		u32	ctx_ptr;	/* tx acquire cpu pointer */
+		u32	dtx_ptr;	/* tx acquire dma pointer */
+		u32	crx_ptr;	/* tx release cpu pointer */
+		u32	drx_ptr;	/* tx release dma pointer */
+		u32	fq_head;	/* fq head pointer */
+		u32	fq_tail;	/* fq tail pointer */
+		u32	fq_count;	/* fq free page count */
+		u32	fq_blen;	/* fq free page buffer length */
+		u32	tx_sch_rate;	/* tx scheduler rate control
+					   registers */
+	} qdma;
+	u32	gdm1_cnt;
+	u32	gdma_to_ppe0;
+	u32	ppe_base[3];
+	u32	wdma_base[3];
+};
+
 /* struct mtk_eth_data -	This is the structure holding all differences
  *				among various plaforms
- * @ana_rgc3:                   The offset for register ANA_RGC3 related to
+ * @reg_map			Soc register map.
+ * @ana_rgc3:			The offset for register ANA_RGC3 related to
  *				sgmiisys syscon
  * @caps			Flags shown the extra capability for the SoC
  * @hw_features			Flags shown HW features
@@ -1494,11 +1569,13 @@
  *				the extra setup for those pins used by GMAC.
  * @txd_size			Tx DMA descriptor size.
  * @rxd_size			Rx DMA descriptor size.
+ * @rx_dma_l4_valid		Rx DMA valid register mask.
  * @dma_max_len			Max DMA tx/rx buffer length.
  * @dma_len_offset		Tx/Rx DMA length field offset.
  */
 struct mtk_soc_data {
-	u32             ana_rgc3;
+	const struct mtk_reg_map *reg_map;
+	u32		ana_rgc3;
 	u64		caps;
 	u64		required_clks;
 	bool		required_pctl;
@@ -1507,6 +1584,7 @@
 	struct {
 		u32	txd_size;
 		u32	rxd_size;
+		u32	rx_dma_l4_valid;
 		u32	dma_max_len;
 		u32	dma_len_offset;
 	} txrx;
@@ -1573,6 +1651,7 @@
 /* struct mtk_eth -	This is the main datasructure for holding the state
  *			of the driver
  * @dev:		The device pointer
+ * @dma_dev:		The device pointer used for dma mapping/alloc
  * @base:		The mapped register i/o base
  * @page_lock:		Make sure that register operations are atomic
  * @tx_irq__lock:	Make sure that IRQ register operations are atomic
@@ -1607,6 +1686,7 @@
 
 struct mtk_eth {
 	struct device			*dev;
+	struct device			*dma_dev;
 	void __iomem			*base;
 	void __iomem			*sram_base;
 	spinlock_t			page_lock;
@@ -1642,8 +1722,6 @@
 
 	const struct mtk_soc_data	*soc;
 
-	u32				tx_int_mask_reg;
-	u32				tx_int_status_reg;
 	u32				rx_dma_l4_valid;
 	int				ip_align;
 	spinlock_t			syscfg0_lock;
@@ -1717,4 +1795,6 @@
 void mtk_usxgmii_setup_phya_an_10000(struct mtk_xgmii *ss, int mac_id);
 void mtk_usxgmii_reset(struct mtk_xgmii *ss, int mac_id);
 int mtk_dump_usxgmii(struct regmap *pmap, char *name, u32 offset, u32 range);
+
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
 #endif /* MTK_ETH_H */
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
index 2a3c7f8..7cd23a2 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
@@ -47,7 +47,7 @@
 	u32 resv3 : 19;
 	u32 magic_tag_protect : 16;
 } __packed;
-#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
+#elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
 struct hnat_desc {
 	u32 entry : 15;
 	u32 filled : 3;
@@ -70,13 +70,13 @@
 	u32 crsn : 5;
 	u32 sport : 4;
 	u32 alg : 1;
-	u32 iface : 4;
+	u32 iface : 8;
 	u32 filled : 3;
 	u32 resv : 1;
 	u32 magic_tag_protect : 16;
 	u32 wdmaid : 8;
 	u32 rxid : 2;
-	u32 wcid : 8;
+	u32 wcid : 10;
 	u32 bssid : 6;
 } __packed;
 #endif
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/phy/mediatek-ge.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/phy/mediatek-ge.c
index 35ec82f..8b7fbf8 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/phy/mediatek-ge.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/phy/mediatek-ge.c
@@ -19,6 +19,14 @@
 #define MTK_PHY_PAGE_EXTENDED_2		0x0002
 #define MTK_PHY_PAGE_EXTENDED_3		0x0003
 
+/* Registers on Page 3 */
+#define MTK_PHY_LPI_REG_14				(0x14)
+#define   MTK_PHY_LPI_WAKE_TIMER_1000	GENMASK(8, 0)
+
+#define MTK_PHY_LPI_REG_1c				(0x1c)
+#define   MTK_PHY_SMI_DET_ON_THRESH		GENMASK(13, 8)
+/*******************************/
+
 #define MTK_PHY_PAGE_EXTENDED_2A30	0x2a30
 #define MTK_PHY_ANARG_RG		(0x10)
 #define   MTK_PHY_TCLKOFFSET_MASK	GENMASK(12, 8)
@@ -117,6 +125,16 @@
 
 #define MTK_PHY_RG_TX_FILTER		(0xfe)
 
+#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG120	(0x120)
+#define   MTK_PHY_LPI_SIG_EN_LO_THRESH1000_MASK	GENMASK(12, 8)
+#define   MTK_PHY_LPI_SIG_EN_HI_THRESH1000_MASK	GENMASK(4, 0)
+
+#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG122	(0x122)
+#define   MTK_PHY_LPI_NORM_MSE_HI_THRESH1000_MASK	GENMASK(7, 0)
+
+#define MTK_PHY_RG_TESTMUX_ADC_CTRL	(0x144)
+#define   MTK_PHY_RG_TXEN_DIG_MASK		GENMASK(5, 5)
+
 #define MTK_PHY_RG_DEV1E_REG172		(0x172)
 #define   MTK_PHY_CR_TX_AMP_OFFSET_A_MASK	GENMASK(13, 8)
 #define   MTK_PHY_CR_TX_AMP_OFFSET_B_MASK	GENMASK(6, 0)
@@ -166,6 +184,9 @@
 #define MTK_PHY_RG_DEV1E_REG184		(0x180)
 #define   MTK_PHY_DASN_DAC_IN1_D_MASK	GENMASK(9, 0)
 
+#define MTK_PHY_RG_DEV1E_REG19b		(0x19b)
+#define   MTK_PHY_BYPASS_DSP_LPI_READY	BIT(8)
+
 #define MTK_PHY_RG_LP_IIR2_K1_L		(0x22a)
 #define MTK_PHY_RG_LP_IIR2_K1_U		(0x22b)
 #define MTK_PHY_RG_LP_IIR2_K2_L		(0x22c)
@@ -180,14 +201,48 @@
 #define MTK_PHY_RG_DEV1E_REG234		(0x234)
 #define   MTK_PHY_TR_OPEN_LOOP_EN_MASK	GENMASK(0, 0)
 #define   MTK_PHY_LPF_X_AVERAGE_MASK	GENMASK(7, 4)
+#define   MTK_PHY_TR_LP_IIR_EEE_EN		BIT(12)
 
 #define MTK_PHY_RG_LPF_CNT_VAL		(0x235)
 
+#define MTK_PHY_RG_DEV1E_REG238		(0x238)
+#define   MTK_PHY_LPI_SLV_SEND_TX_TIMER_MASK	GENMASK(8, 0)
+#define   MTK_PHY_LPI_SLV_SEND_TX_EN			BIT(12)
+
+#define MTK_PHY_RG_DEV1E_REG239		(0x239)
+#define   MTK_PHY_LPI_SEND_LOC_TIMER_MASK		GENMASK(8, 0)
+#define   MTK_PHY_LPI_TXPCS_LOC_RCV		BIT(12)
+
 #define MTK_PHY_RG_DEV1E_REG27C		(0x27c)
 #define   MTK_PHY_VGASTATE_FFE_THR_ST1_MASK	GENMASK(12, 8)
 #define MTK_PHY_RG_DEV1E_REG27D		(0x27d)
 #define   MTK_PHY_VGASTATE_FFE_THR_ST2_MASK	GENMASK(4, 0)
 
+#define MTK_PHY_RG_DEV1E_REG2C7		(0x2c7)
+#define   MTK_PHY_MAX_GAIN_MASK		GENMASK(4, 0)
+#define   MTK_PHY_MIN_GAIN_MASK		GENMASK(12, 8)
+
+#define MTK_PHY_RG_DEV1E_REG2D1		(0x2d1)
+#define   MTK_PHY_VCO_SLICER_THRESH_BITS_HIGH_EEE_MASK	GENMASK(7, 0)
+#define   MTK_PHY_LPI_SKIP_SD_SLV_TR				BIT(8)
+#define   MTK_PHY_LPI_TR_READY						BIT(9)
+#define   MTK_PHY_LPI_VCO_EEE_STG0_EN				BIT(10)
+
+#define MTK_PHY_RG_DEV1E_REG323		(0x323)
+#define   MTK_PHY_EEE_WAKE_MAS_INT_DC	BIT(0)
+#define   MTK_PHY_EEE_WAKE_SLV_INT_DC	BIT(4)
+
+#define MTK_PHY_RG_DEV1E_REG324		(0x324)
+#define   MTK_PHY_SMI_DETCNT_MAX_MASK	GENMASK(5, 0)
+#define   MTK_PHY_SMI_DET_MAX_EN		BIT(8)
+
+#define MTK_PHY_RG_DEV1E_REG326		(0x326)
+#define   MTK_PHY_LPI_MODE_SD_ON				BIT(0)
+#define   MTK_PHY_RESET_RANDUPD_CNT				BIT(1)
+#define   MTK_PHY_TREC_UPDATE_ENAB_CLR			BIT(2)
+#define   MTK_PHY_LPI_QUIT_WAIT_DFE_SIG_DET_OFF	BIT(4)
+#define   MTK_PHY_TR_READY_SKIP_AFE_WAKEUP		BIT(5)
+
 #define MTK_PHY_LDO_PUMP_EN_PAIRAB	(0x502)
 #define MTK_PHY_LDO_PUMP_EN_PAIRCD	(0x503)
 
@@ -530,17 +585,6 @@
 					7, 1, 4, 7,
 					7, 1, 4, 7 };
 			memcpy(bias, (const void *)tmp, sizeof(bias));
-			for (i = 0; i <= 12; i += 4) {
-				if (likely(buf[i>>2] + bias[i] >= 32)) {
-					bias[i] -= 13;
-				} else {
-					phy_modify_mmd(phydev, MDIO_MMD_VEND1,
-						0x5c, 0x7 << i, bias[i] << i);
-					bias[i+1] += 13;
-					bias[i+2] += 13;
-					bias[i+3] += 13;
-				}
-			}
 			break;
 		}
 		case 0x03a29481:
@@ -949,6 +993,118 @@
 	return 0;
 }
 
+static inline void mt798x_phy_eee(struct phy_device *phydev)
+{
+	phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+		MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG120,
+		MTK_PHY_LPI_SIG_EN_LO_THRESH1000_MASK |
+		MTK_PHY_LPI_SIG_EN_HI_THRESH1000_MASK,
+		FIELD_PREP(MTK_PHY_LPI_SIG_EN_LO_THRESH1000_MASK, 0x0) |
+		FIELD_PREP(MTK_PHY_LPI_SIG_EN_HI_THRESH1000_MASK, 0x14));
+
+	phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+		MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG122,
+		MTK_PHY_LPI_NORM_MSE_HI_THRESH1000_MASK,
+		FIELD_PREP(MTK_PHY_LPI_NORM_MSE_HI_THRESH1000_MASK, 0xff));
+
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+		MTK_PHY_RG_TESTMUX_ADC_CTRL, MTK_PHY_RG_TXEN_DIG_MASK);
+
+	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+		MTK_PHY_RG_DEV1E_REG19b, MTK_PHY_BYPASS_DSP_LPI_READY);
+
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+		MTK_PHY_RG_DEV1E_REG234, MTK_PHY_TR_LP_IIR_EEE_EN);
+
+	phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG238,
+		MTK_PHY_LPI_SLV_SEND_TX_TIMER_MASK | MTK_PHY_LPI_SLV_SEND_TX_EN,
+		FIELD_PREP(MTK_PHY_LPI_SLV_SEND_TX_TIMER_MASK, 0x120));
+
+	phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239,
+		MTK_PHY_LPI_SEND_LOC_TIMER_MASK | MTK_PHY_LPI_TXPCS_LOC_RCV,
+		FIELD_PREP(MTK_PHY_LPI_SEND_LOC_TIMER_MASK, 0x117));
+
+	phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG2C7,
+		MTK_PHY_MAX_GAIN_MASK | MTK_PHY_MIN_GAIN_MASK,
+		FIELD_PREP(MTK_PHY_MAX_GAIN_MASK, 0x8) |
+		FIELD_PREP(MTK_PHY_MIN_GAIN_MASK, 0x13));
+
+	phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG2D1,
+		MTK_PHY_VCO_SLICER_THRESH_BITS_HIGH_EEE_MASK,
+		FIELD_PREP(MTK_PHY_VCO_SLICER_THRESH_BITS_HIGH_EEE_MASK, 0x33) |
+		MTK_PHY_LPI_SKIP_SD_SLV_TR | MTK_PHY_LPI_TR_READY |
+		MTK_PHY_LPI_VCO_EEE_STG0_EN);
+
+	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG323,
+		MTK_PHY_EEE_WAKE_MAS_INT_DC | MTK_PHY_EEE_WAKE_SLV_INT_DC);
+
+	phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG324,
+		MTK_PHY_SMI_DETCNT_MAX_MASK,
+		FIELD_PREP(MTK_PHY_SMI_DETCNT_MAX_MASK, 0x3f) |
+		MTK_PHY_SMI_DET_MAX_EN);
+
+	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG326,
+		MTK_PHY_LPI_MODE_SD_ON | MTK_PHY_RESET_RANDUPD_CNT |
+		MTK_PHY_TREC_UPDATE_ENAB_CLR |
+		MTK_PHY_LPI_QUIT_WAIT_DFE_SIG_DET_OFF |
+		MTK_PHY_TR_READY_SKIP_AFE_WAKEUP);
+
+	phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+	/* Regsigdet_sel_1000 = 0 */
+	__phy_write(phydev, 0x11, 0xb);
+	__phy_write(phydev, 0x12, 0x0);
+	__phy_write(phydev, 0x10, 0x9690);
+
+	/* REG_EEE_st2TrKf1000 = 3 */
+	__phy_write(phydev, 0x11, 0x114f);
+	__phy_write(phydev, 0x12, 0x2);
+	__phy_write(phydev, 0x10, 0x969a);
+
+	/* RegEEE_slv_wake_tr_timer_tar = 6, RegEEE_slv_remtx_timer_tar = 20 */
+	__phy_write(phydev, 0x11, 0x3028);
+	__phy_write(phydev, 0x12, 0x0);
+	__phy_write(phydev, 0x10, 0x969e);
+
+	/* RegEEE_slv_wake_int_timer_tar = 8 */
+	__phy_write(phydev, 0x11, 0x5010);
+	__phy_write(phydev, 0x12, 0x0);
+	__phy_write(phydev, 0x10, 0x96a0);
+
+	/* RegEEE_trfreeze_timer2 = 586 */
+	__phy_write(phydev, 0x11, 0x24a);
+	__phy_write(phydev, 0x12, 0x0);
+	__phy_write(phydev, 0x10, 0x96a8);
+
+	/* RegEEE100Stg1_tar = 16 */
+	__phy_write(phydev, 0x11, 0x3210);
+	__phy_write(phydev, 0x12, 0x0);
+	__phy_write(phydev, 0x10, 0x96b8);
+
+	/* REGEEE_wake_slv_tr_wait_dfesigdet_en = 1 */
+	__phy_write(phydev, 0x11, 0x1463);
+	__phy_write(phydev, 0x12, 0x0);
+	__phy_write(phydev, 0x10, 0x96ca);
+
+	/* DfeTailEnableVgaThresh1000 = 27 */
+	__phy_write(phydev, 0x11, 0x36);
+	__phy_write(phydev, 0x12, 0x0);
+	__phy_write(phydev, 0x10, 0x8f80);
+	phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+
+	phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_3);
+	__phy_modify(phydev, MTK_PHY_LPI_REG_14, MTK_PHY_LPI_WAKE_TIMER_1000,
+		FIELD_PREP(MTK_PHY_LPI_WAKE_TIMER_1000, 0x19c));
+
+	__phy_modify(phydev, MTK_PHY_LPI_REG_1c, MTK_PHY_SMI_DET_ON_THRESH,
+		FIELD_PREP(MTK_PHY_SMI_DET_ON_THRESH, 0xc));
+	phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+
+	phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+		MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG122,
+		MTK_PHY_LPI_NORM_MSE_HI_THRESH1000_MASK,
+		FIELD_PREP(MTK_PHY_LPI_NORM_MSE_HI_THRESH1000_MASK, 0xff));
+}
+
 static inline void mt7981_phy_finetune(struct phy_device *phydev)
 {
 	u32 i;
@@ -984,8 +1140,8 @@
 	__phy_write(phydev, 0x12, 0xe);
 	__phy_write(phydev, 0x10, 0x8fb0);
 
-	/* SlvDSPreadyTime = 0xc */
-	__phy_write(phydev, 0x11, 0x671);
+	/* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
+	__phy_write(phydev, 0x11, 0xc71);
 	__phy_write(phydev, 0x12, 0xc);
 	__phy_write(phydev, 0x10, 0x8fae);
 
@@ -999,11 +1155,23 @@
 	__phy_write(phydev, 0x12, 0x0);
 	__phy_write(phydev, 0x10, 0x8f80);
 
-	/* SSTr related */
+	/* SSTrKp1000Slv = 5 */
 	__phy_write(phydev, 0x11, 0xbaef);
 	__phy_write(phydev, 0x12, 0x2e);
 	__phy_write(phydev, 0x10, 0x968c);
 
+	/* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2,
+	 * MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2
+	 */
+	__phy_write(phydev, 0x11, 0xd10a);
+	__phy_write(phydev, 0x12, 0x34);
+	__phy_write(phydev, 0x10, 0x8f82);
+
+	/* TrFreeze = 0 */
+	__phy_write(phydev, 0x11, 0x0);
+	__phy_write(phydev, 0x12, 0x0);
+	__phy_write(phydev, 0x10, 0x9686);
+
 	/* VcoSlicerThreshBitsHigh */
 	__phy_write(phydev, 0x11, 0x5555);
 	__phy_write(phydev, 0x12, 0x55);
@@ -1019,6 +1187,11 @@
 	__phy_write(phydev, 0x12, 0x3e);
 	__phy_write(phydev, 0x10, 0x8fa4);
 
+	/* FfeUpdGainForce = 4 */
+	__phy_write(phydev, 0x11, 0x240);
+	__phy_write(phydev, 0x12, 0x0);
+	__phy_write(phydev, 0x10, 0x9680);
+
 	phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
 	/* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9*/
 	phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234,
@@ -1054,6 +1227,15 @@
 		phy_write_mmd(phydev, MDIO_MMD_VEND2, i, 0x2219);
 		phy_write_mmd(phydev, MDIO_MMD_VEND2, i+1, 0x23);
 	}
+
+	/* Disable LDO pump */
+	phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_PUMP_EN_PAIRAB, 0x0);
+	phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_PUMP_EN_PAIRCD, 0x0);
+
+	/* Adjust LDO output voltage */
+	phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_OUTPUT_V, 0x2222);
+
+	mt798x_phy_eee(phydev);
 }
 
 static inline void mt7988_phy_finetune(struct phy_device *phydev)
@@ -1080,7 +1262,7 @@
 	__phy_write(phydev, 0x12, 0xe);
 	__phy_write(phydev, 0x10, 0x8fb0);
 
-	/* SlvDSPreadyTime = 0xc */
+	/* SlvDSPreadyTime = 24, MasDSPreadyTime = 12 */
 	__phy_write(phydev, 0x11, 0x671);
 	__phy_write(phydev, 0x12, 0xc);
 	__phy_write(phydev, 0x10, 0x8fae);
@@ -1165,6 +1347,8 @@
 
 	/* Adjust LDO output voltage */
 	phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_OUTPUT_V, 0x2222);
+
+	mt798x_phy_eee(phydev);
 }
 
 static int mt798x_phy_calibration(struct phy_device *phydev)
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/include/net/ra_nat.h b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/include/net/ra_nat.h
index f5231ca..cfca603 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/include/net/ra_nat.h
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/include/net/ra_nat.h
@@ -130,7 +130,7 @@
 	uint16_t rsv2:7;
 	u16 MAGIC_TAG_PROTECT;
 } __packed;
-#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
+#elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
 struct dmad_rx_descinfo4 {
 	uint32_t foe_entry_num:15;
 	uint32_t rsv0:3;
@@ -155,14 +155,15 @@
 struct dmad_rx_descinfo4 {
 	uint32_t foe_entry_num:14;
 	uint32_t CRSN:5;
-	uint32_t SPORT:3;
-	uint32_t rsv:1;
+	uint32_t SPORT:4;
 	uint32_t ALG:1;
 	uint32_t IF:8;
+	uint32_t ppe:1;
+	uint32_t rsv2:3;
 	uint32_t MAGIC_TAG_PROTECT: 16;
 	uint32_t WDMAID:8;
 	uint32_t RXID:2;
-	uint32_t WCID:8;
+	uint32_t WCID:10;
 	uint32_t BSSID:6;
 #if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
 	u16 SOURCE;
@@ -196,7 +197,7 @@
 #endif
 } __packed;
 
-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
 struct head_rx_descinfo4 {
 	uint32_t foe_entry_num:14;
 	uint32_t CRSN:5;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9990-mt7622-backport-nf-hw-offload-framework-and-ups.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9990-mt7622-backport-nf-hw-offload-framework-and-ups.patch
index 42ebe98..13b754e 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9990-mt7622-backport-nf-hw-offload-framework-and-ups.patch
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9990-mt7622-backport-nf-hw-offload-framework-and-ups.patch
@@ -224,15 +224,14 @@
  };
  
  /* struct mtk_mac -	the structure that holds the info about the MACs of the
-@@ -1319,4 +1333,8 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
- void mtk_gdm_config(struct mtk_eth *eth, u32 config);
- void ethsys_reset(struct mtk_eth *eth, u32 reset_bits);
+@@ -1319,4 +1333,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
+ void mtk_usxgmii_reset(struct mtk_xgmii *ss, int mac_id);
+ int mtk_dump_usxgmii(struct regmap *pmap, char *name, u32 offset, u32 range);
  
 +int mtk_eth_offload_init(struct mtk_eth *eth);
 +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
 +		     void *type_data);
-+
- int mtk_mac2xgmii_id(struct mtk_eth *eth, int mac_id);
+ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
 diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
 new file mode 100644
 index 000000000..66298e223
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9993-add-wed.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9993-add-wed.patch
index b4f8e4e..04e4edc 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9993-add-wed.patch
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9993-add-wed.patch
@@ -138,14 +138,6 @@
 index 819d8a0be..2121335a1
 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -9,6 +9,7 @@
- #include <linux/of_device.h>
- #include <linux/of_mdio.h>
- #include <linux/of_net.h>
-+#include <linux/of_address.h>
- #include <linux/mfd/syscon.h>
- #include <linux/regmap.h>
- #include <linux/clk.h>
 @@ -20,12 +21,14 @@
  #include <linux/pinctrl/devinfo.h>
  #include <linux/phylink.h>
@@ -161,84 +153,6 @@
  
  #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
  #include "mtk_hnat/nf_hnat_mtk.h"
-@@ -1116,7 +1119,7 @@ static int mtk_init_fq_dma(struct mtk_et
- 	int i;
- 
- 	if (!eth->soc->has_sram) {
--		eth->scratch_ring = dma_alloc_coherent(eth->dev,
-+		eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- 					       cnt * soc->txrx.txd_size,
- 					       &eth->phy_scratch_ring,
- 					       GFP_KERNEL);
-@@ -1134,10 +1137,10 @@ static int mtk_init_fq_dma(struct mtk_et
- 	if (unlikely(!eth->scratch_head))
- 		return -ENOMEM;
- 
--	dma_addr = dma_map_single(eth->dev,
-+	dma_addr = dma_map_single(eth->dma_dev,
- 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
- 				  DMA_FROM_DEVICE);
--	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-+	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- 		return -ENOMEM;
- 
- 	phy_ring_tail = eth->phy_scratch_ring +
-@@ -1201,26 +1204,26 @@ static void mtk_tx_unmap(struct mtk_eth
- {
- 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
--			dma_unmap_single(eth->dev,
-+			dma_unmap_single(eth->dma_dev,
- 					 dma_unmap_addr(tx_buf, dma_addr0),
- 					 dma_unmap_len(tx_buf, dma_len0),
- 					 DMA_TO_DEVICE);
- 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
--			dma_unmap_page(eth->dev,
-+			dma_unmap_page(eth->dma_dev,
- 				       dma_unmap_addr(tx_buf, dma_addr0),
- 				       dma_unmap_len(tx_buf, dma_len0),
- 				       DMA_TO_DEVICE);
- 		}
- 	} else {
- 		if (dma_unmap_len(tx_buf, dma_len0)) {
--			dma_unmap_page(eth->dev,
-+			dma_unmap_page(eth->dma_dev,
- 				       dma_unmap_addr(tx_buf, dma_addr0),
- 				       dma_unmap_len(tx_buf, dma_len0),
- 				       DMA_TO_DEVICE);
- 		}
- 
- 		if (dma_unmap_len(tx_buf, dma_len1)) {
--			dma_unmap_page(eth->dev,
-+			dma_unmap_page(eth->dma_dev,
- 				       dma_unmap_addr(tx_buf, dma_addr1),
- 				       dma_unmap_len(tx_buf, dma_len1),
- 				       DMA_TO_DEVICE);
-@@ -1454,9 +1457,9 @@ static int mtk_tx_map(struct sk_buff *sk
- 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
- 	memset(itx_buf, 0, sizeof(*itx_buf));
- 
--	txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
-+	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
- 				       DMA_TO_DEVICE);
--	if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
-+	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
- 		return -ENOMEM;
- 
- 	mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
-@@ -1497,10 +1500,10 @@ static int mtk_tx_map(struct sk_buff *sk
- 			txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
- 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
- 					!(frag_size - txd_info.size);
--			txd_info.addr = skb_frag_dma_map(eth->dev, frag,
-+			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
- 							 offset, txd_info.size,
- 							 DMA_TO_DEVICE);
--			if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
-+			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
-  				goto err_dma;
- 
- 			mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
 @@ -1737,6 +1740,7 @@ static int mtk_poll_rx(struct napi_struc
  		struct net_device *netdev = NULL;
  		unsigned int pktlen;
@@ -247,30 +161,6 @@
  		int mac = 0;
  
  		if (eth->hwlro)
-@@ -1787,12 +1791,12 @@ static int mtk_poll_rx(struct napi_struc
- 			netdev->stats.rx_dropped++;
- 			goto release_desc;
- 		}
--		dma_addr = dma_map_single(eth->dev,
-+		dma_addr = dma_map_single(eth->dma_dev,
- 					  new_data + NET_SKB_PAD +
- 					  eth->ip_align,
- 					  ring->buf_size,
- 					  DMA_FROM_DEVICE);
--		if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
-+		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
- 			skb_free_frag(new_data);
- 			netdev->stats.rx_dropped++;
- 			goto release_desc;
-@@ -1801,7 +1805,7 @@ static int mtk_poll_rx(struct napi_struc
- 		addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
- 			  ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
- 
--		dma_unmap_single(eth->dev,
-+		dma_unmap_single(eth->dma_dev,
- 				 (u64)(trxd.rxd1 | addr64),
- 				 ring->buf_size, DMA_FROM_DEVICE);
- 
 @@ -1827,6 +1831,17 @@ static int mtk_poll_rx(struct napi_struc
  			skb_checksum_none_assert(skb);
  		skb->protocol = eth_type_trans(skb, netdev);
@@ -289,91 +179,6 @@
  		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
  			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
  			    MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
-@@ -2120,7 +2135,7 @@ static int mtk_tx_alloc(struct mtk_eth *
- 		goto no_tx_mem;
- 
- 	if (!eth->soc->has_sram)
--		ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
-+		ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
- 					       &ring->phys, GFP_KERNEL);
- 	else {
- 		ring->dma =  eth->scratch_ring + MTK_DMA_SIZE * sz;
-@@ -2154,7 +2169,7 @@ static int mtk_tx_alloc(struct mtk_eth *
- 	 * descriptors in ring->dma_pdma.
- 	 */
- 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
--		ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
-+		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
- 						    &ring->phys_pdma, GFP_KERNEL);
- 		if (!ring->dma_pdma)
- 			goto no_tx_mem;
-@@ -2215,14 +2230,14 @@ static void mtk_tx_clean(struct mtk_eth
- 	}
- 
- 	if (!eth->soc->has_sram && ring->dma) {
--		dma_free_coherent(eth->dev,
-+		dma_free_coherent(eth->dma_dev,
- 				  MTK_DMA_SIZE * soc->txrx.txd_size,
- 				  ring->dma, ring->phys);
- 		ring->dma = NULL;
- 	}
- 
- 	if (ring->dma_pdma) {
--		dma_free_coherent(eth->dev,
-+		dma_free_coherent(eth->dma_dev,
- 				  MTK_DMA_SIZE * soc->txrx.txd_size,
- 				  ring->dma_pdma, ring->phys_pdma);
- 		ring->dma_pdma = NULL;
-@@ -2267,7 +2282,7 @@ static int mtk_rx_alloc(struct mtk_eth *
- 
- 	if ((!eth->soc->has_sram) || (eth->soc->has_sram
- 				&& (rx_flag != MTK_RX_FLAGS_NORMAL)))
--		ring->dma = dma_alloc_coherent(eth->dev,
-+		ring->dma = dma_alloc_coherent(eth->dma_dev,
- 					       rx_dma_size * eth->soc->txrx.rxd_size,
- 					       &ring->phys, GFP_KERNEL);
- 	else {
-@@ -2284,11 +2299,11 @@ static int mtk_rx_alloc(struct mtk_eth *
- 	for (i = 0; i < rx_dma_size; i++) {
- 		struct mtk_rx_dma_v2 *rxd;
- 
--		dma_addr_t dma_addr = dma_map_single(eth->dev,
-+		dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
- 				ring->data[i] + NET_SKB_PAD + eth->ip_align,
- 				ring->buf_size,
- 				DMA_FROM_DEVICE);
--		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-+		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- 			return -ENOMEM;
- 
- 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
-@@ -2360,7 +2375,7 @@ static void mtk_rx_clean(struct mtk_eth
- 					       MTK_8GB_ADDRESSING)) ?
- 				  ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
- 
--			dma_unmap_single(eth->dev,
-+			dma_unmap_single(eth->dma_dev,
- 					 (u64)(rxd->rxd1 | addr64),
- 					 ring->buf_size,
- 					 DMA_FROM_DEVICE);
-@@ -2374,7 +2389,7 @@ static void mtk_rx_clean(struct mtk_eth
- 		return;
- 
- 	if (ring->dma) {
--		dma_free_coherent(eth->dev,
-+		dma_free_coherent(eth->dma_dev,
- 				  ring->dma_size * eth->soc->txrx.rxd_size,
- 				  ring->dma,
- 				  ring->phys);
-@@ -2861,7 +2876,7 @@ static void mtk_dma_free(struct mtk_eth
- 		if (eth->netdev[i])
- 			netdev_reset_queue(eth->netdev[i]);
- 	if ( !eth->soc->has_sram && eth->scratch_ring) {
--		dma_free_coherent(eth->dev,
-+		dma_free_coherent(eth->dma_dev,
- 				  MTK_DMA_SIZE * soc->txrx.txd_size,
- 				  eth->scratch_ring, eth->phy_scratch_ring);
- 		eth->scratch_ring = NULL;
 @@ -3243,7 +3258,7 @@ static int mtk_stop(struct net_device *d
  	mtk_dma_free(eth);
  
@@ -383,87 +188,6 @@
  
  	return 0;
  }
-@@ -3320,6 +3335,8 @@ static int mtk_napi_init(struct mtk_eth
- 
- static int mtk_hw_init(struct mtk_eth *eth, u32 type)
- {
-+	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
-+		       ETHSYS_DMA_AG_MAP_PPE;
- 	int i, ret = 0;
- 	u32 val;
- 
-@@ -3338,6 +3355,10 @@ static int mtk_hw_init(struct mtk_eth *e
- 			goto err_disable_pm;
- 	}
- 
-+	if (eth->ethsys)
-+		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
-+				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
-+
- 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
- 		ret = device_reset(eth->dev);
- 		if (ret) {
-@@ -4091,6 +4112,35 @@ free_netdev:
- 	return err;
- }
- 
-+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
-+{
-+	struct net_device *dev, *tmp;
-+	LIST_HEAD(dev_list);
-+	int i;
-+
-+	rtnl_lock();
-+
-+	for (i = 0; i < MTK_MAC_COUNT; i++) {
-+		dev = eth->netdev[i];
-+
-+		if (!dev || !(dev->flags & IFF_UP))
-+			continue;
-+
-+		list_add_tail(&dev->close_list, &dev_list);
-+	}
-+
-+	dev_close_many(&dev_list, false);
-+
-+	eth->dma_dev = dma_dev;
-+
-+	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
-+		list_del_init(&dev->close_list);
-+		dev_open(dev, NULL);
-+	}
-+
-+	rtnl_unlock();
-+}
-+
- static int mtk_probe(struct platform_device *pdev)
- {
- 	struct device_node *mac_np;
-@@ -4104,6 +4154,7 @@ static int mtk_probe(struct platform_dev
- 	eth->soc = of_device_get_match_data(&pdev->dev);
- 
- 	eth->dev = &pdev->dev;
-+	eth->dma_dev = &pdev->dev;
- 	eth->base = devm_platform_ioremap_resource(pdev, 0);
- 	if (IS_ERR(eth->base))
- 		return PTR_ERR(eth->base);
-@@ -4176,6 +4227,16 @@ static int mtk_probe(struct platform_dev
- 		}
- 	}
- 
-+	if (of_dma_is_coherent(pdev->dev.of_node)) {
-+		struct regmap *cci;
-+
-+		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-+						      "mediatek,cci-control");
-+		/* enable CPU/bus coherency */
-+		if (!IS_ERR(cci))
-+			regmap_write(cci, 0, 3);
-+	}
-+
- 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- 		eth->xgmii = devm_kzalloc(eth->dev, sizeof(*eth->xgmii),
- 					  GFP_KERNEL);
 @@ -4217,6 +4278,22 @@ static int mtk_probe(struct platform_dev
  		}
  	}
@@ -518,35 +242,6 @@
  /* QDMA descriptor txd4 */
  #define TX_DMA_CHKSUM		(0x7 << 29)
  #define TX_DMA_TSO		BIT(28)
-@@ -773,6 +776,12 @@
- #define ETHSYS_FE_RST_CHK_IDLE_EN 	0x28
- 
- 
-+/* ethernet dma channel agent map */
-+#define ETHSYS_DMA_AG_MAP	0x408
-+#define ETHSYS_DMA_AG_MAP_PDMA	BIT(0)
-+#define ETHSYS_DMA_AG_MAP_QDMA	BIT(1)
-+#define ETHSYS_DMA_AG_MAP_PPE	BIT(2)
-+
- /* SGMII subsystem config registers */
- /* Register to auto-negotiation restart */
- #define SGMSYS_PCS_CONTROL_1	0x0
-@@ -1520,6 +1529,7 @@ struct mtk_phylink_priv {
- /* struct mtk_eth -	This is the main datasructure for holding the state
-  *			of the driver
-  * @dev:		The device pointer
-+ * @dev:		The device pointer used for dma mapping/alloc
-  * @base:		The mapped register i/o base
-  * @page_lock:		Make sure that register operations are atomic
-  * @tx_irq__lock:	Make sure that IRQ register operations are atomic
-@@ -1554,6 +1564,7 @@ struct mtk_phylink_priv {
- 
- struct mtk_eth {
- 	struct device			*dev;
-+	struct device			*dma_dev;
- 	void __iomem			*base;
- 	void __iomem			*sram_base;
- 	spinlock_t			page_lock;
 @@ -1596,7 +1607,7 @@ struct mtk_eth {
  	spinlock_t			syscfg0_lock;
  	struct timer_list		mtk_dma_monitor_timer;
@@ -556,15 +251,6 @@
  	struct rhashtable		flow_table;
  };
  
-@@ -1655,6 +1666,7 @@ void ethsys_reset(struct mtk_eth *eth, u
- int mtk_eth_offload_init(struct mtk_eth *eth);
- int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
- 		     void *type_data);
-+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
- 
- int mtk_mac2xgmii_id(struct mtk_eth *eth, int mac_id);
- int mtk_usxgmii_init(struct mtk_xgmii *ss, struct device_node *r);
-
 diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
 old mode 100644
 new mode 100755
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9994-ethernet-update-ppe-from-mt7622-to-mt7986.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9994-ethernet-update-ppe-from-mt7622-to-mt7986.patch
index 1bfb09a..8deac02 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9994-ethernet-update-ppe-from-mt7622-to-mt7986.patch
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9994-ethernet-update-ppe-from-mt7622-to-mt7986.patch
@@ -46,14 +46,15 @@
  
  		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
  			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
-@@ -3926,12 +3937,13 @@ static const struct mtk_soc_data mt7986_data = {
+@@ -3926,13 +3937,14 @@ static const struct mtk_soc_data mt7986_data = {
  	.required_clks = MT7986_CLKS_BITMAP,
  	.required_pctl = false,
  	.has_sram = true,
 +	.offload_version = 2,
  	.txrx = {
  		.txd_size = sizeof(struct mtk_tx_dma_v2),
- 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+ 		.rxd_size = sizeof(struct mtk_rx_dma),
+ 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
  		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
  	},
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9995-flow-offload-add-mkhnat-dual-ppe-new-v2.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9995-flow-offload-add-mkhnat-dual-ppe-new-v2.patch
index cfb1ad6..850afe4 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9995-flow-offload-add-mkhnat-dual-ppe-new-v2.patch
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9995-flow-offload-add-mkhnat-dual-ppe-new-v2.patch
@@ -165,21 +165,18 @@
  	struct rhashtable		flow_table;
  };
  
-@@ -1668,11 +1674,13 @@ int mtk_gmac_usxgmii_path_setup(struct m
- void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config);
- void ethsys_reset(struct mtk_eth *eth, u32 reset_bits);
+@@ -1668,8 +1674,10 @@ int mtk_gmac_usxgmii_path_setup(struct m
+ void mtk_usxgmii_reset(struct mtk_xgmii *ss, int mac_id);
+ int mtk_dump_usxgmii(struct regmap *pmap, char *name, u32 offset, u32 range);
  
 -int mtk_eth_offload_init(struct mtk_eth *eth);
 +int mtk_eth_offload_init(struct mtk_eth *eth, int id);
  int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
  		     void *type_data);
  void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
- 
-+int mtk_ppe_debugfs_init(struct mtk_eth *eth);
 +
- int mtk_mac2xgmii_id(struct mtk_eth *eth, int mac_id);
- int mtk_usxgmii_init(struct mtk_xgmii *ss, struct device_node *r);
- int mtk_xfi_pextp_init(struct mtk_xgmii *ss, struct device_node *r);
++int mtk_ppe_debugfs_init(struct mtk_eth *eth);
+ #endif /* MTK_ETH_H */
 diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
 index d46e91178..3d6ff30ba 100755
 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9999-3-flow-offload-add-mtkhnat-qdma-qos.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9999-3-flow-offload-add-mtkhnat-qdma-qos.patch
index d2e6ac2..570fbcd 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9999-3-flow-offload-add-mtkhnat-qdma-qos.patch
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9999-3-flow-offload-add-mtkhnat-qdma-qos.patch
@@ -12,10 +12,10 @@
  ifdef CONFIG_DEBUG_FS
  mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-index efdd2e6..9ffc46b 100644
+index ca76047..809c735 100644
 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3992,6 +3992,8 @@ static int mtk_probe(struct platform_device *pdev)
+@@ -4787,6 +4787,8 @@ static int mtk_probe(struct platform_device *pdev)
  		}
  
  		mtk_ppe_debugfs_init(eth);
@@ -24,15 +24,15 @@
  	}
  
  	for (i = 0; i < MTK_MAX_DEVS; i++) {
-@@ -4101,6 +4103,7 @@ static const struct mtk_soc_data mt2701_data = {
- 		.rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4901,6 +4903,7 @@ static const struct mtk_soc_data mt2701_data = {
+ 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
  		.dma_max_len = MTK_TX_DMA_BUF_LEN,
  		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
 +		.qdma_tx_sch = 2,
  	},
  };
  
-@@ -4118,6 +4121,7 @@ static const struct mtk_soc_data mt7621_data = {
+@@ -4920,6 +4923,7 @@ static const struct mtk_soc_data mt7621_data = {
  		.rxd_size = sizeof(struct mtk_rx_dma),
  		.dma_max_len = MTK_TX_DMA_BUF_LEN,
  		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
@@ -40,40 +40,48 @@
  	},
  };
  
-@@ -4136,6 +4140,7 @@ static const struct mtk_soc_data mt7622_data = {
- 		.rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4940,6 +4944,7 @@ static const struct mtk_soc_data mt7622_data = {
+ 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
  		.dma_max_len = MTK_TX_DMA_BUF_LEN,
  		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
 +		.qdma_tx_sch = 2,
  	},
  };
  
-@@ -4153,6 +4158,7 @@ static const struct mtk_soc_data mt7623_data = {
- 		.rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4959,6 +4964,7 @@ static const struct mtk_soc_data mt7623_data = {
+ 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
  		.dma_max_len = MTK_TX_DMA_BUF_LEN,
  		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
 +		.qdma_tx_sch = 2,
  	},
  };
  
-@@ -4187,6 +4193,7 @@ static const struct mtk_soc_data mt7986_data = {
- 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+@@ -4997,6 +5003,7 @@ static const struct mtk_soc_data mt7986_data = {
+ 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
  		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
 +		.qdma_tx_sch = 4,
  	},
  };
  
-@@ -4205,6 +4212,7 @@ static const struct mtk_soc_data mt7981_data = {
- 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+@@ -5017,6 +5024,7 @@ static const struct mtk_soc_data mt7981_data = {
+ 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
  		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
 +		.qdma_tx_sch = 4,
  	},
  };
  
-@@ -4220,6 +4228,7 @@ static const struct mtk_soc_data rt5350_data = {
- 		.rxd_size = sizeof(struct mtk_rx_dma),
+@@ -5034,6 +5042,7 @@ static const struct mtk_soc_data mt7988_data = {
+ 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
++		.qdma_tx_sch = 4,
+ 	},
+ };
+ 
+@@ -5051,6 +5060,7 @@ static const struct mtk_soc_data rt5350_data = {
+ 		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
  		.dma_max_len = MTK_TX_DMA_BUF_LEN,
  		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
 +		.qdma_tx_sch = 4,
@@ -81,10 +89,10 @@
  };
  
 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-index c87a823..955bb27 100644
+index c6afff5..bd73c27 100644
 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -352,10 +352,21 @@
+@@ -385,10 +385,21 @@
  
  /* QDMA TX Queue Configuration Registers */
  #define MTK_QTX_CFG(x)		(QDMA_BASE + (x * 0x10))
@@ -106,7 +114,7 @@
  
  /* QDMA RX Base Pointer Register */
  #define MTK_QRX_BASE_PTR0	(QDMA_BASE + 0x100)
-@@ -373,7 +384,9 @@
+@@ -406,7 +417,9 @@
  #define MTK_QRX_DRX_IDX0	(QDMA_BASE + 0x10c)
  
  /* QDMA Page Configuration Register */
@@ -117,7 +125,7 @@
  
  /* QDMA Global Configuration Register */
  #define MTK_QDMA_GLO_CFG	(QDMA_BASE + 0x204)
-@@ -410,6 +423,9 @@
+@@ -443,6 +456,9 @@
  #define FC_THRES_DROP_EN	(7 << 16)
  #define FC_THRES_MIN		0x4444
  
@@ -126,8 +134,8 @@
 +
  /* QDMA Interrupt Status Register */
  #define MTK_QDMA_INT_STATUS	(QDMA_BASE + 0x218)
- #if defined(CONFIG_MEDIATEK_NETSYS_V2)
-@@ -444,6 +460,11 @@
+ #if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
+@@ -478,6 +494,11 @@
  /* QDMA Interrupt Mask Register */
  #define MTK_QDMA_HRED2		(QDMA_BASE + 0x244)
  
@@ -139,7 +147,7 @@
  /* QDMA TX Forward CPU Pointer Register */
  #define MTK_QTX_CTX_PTR		(QDMA_BASE +0x300)
  
-@@ -471,6 +492,14 @@
+@@ -505,6 +526,14 @@
  /* QDMA FQ Free Page Buffer Length Register */
  #define MTK_QDMA_FQ_BLEN	(QDMA_BASE +0x32c)
  
@@ -152,17 +160,17 @@
 +#define MTK_QDMA_TX_SCH_RATE_EXP	GENMASK(3, 0)
 +
  /* WDMA Registers */
+ #define MTK_WDMA_CTX_PTR(x)	(WDMA_BASE(x) + 0x8)
  #define MTK_WDMA_DTX_PTR(x)	(WDMA_BASE(x) + 0xC)
- #define MTK_WDMA_GLO_CFG(x)	(WDMA_BASE(x) + 0x204)
-@@ -1223,6 +1252,7 @@ struct mtk_soc_data {
- 		u32	rxd_size;
+@@ -1596,6 +1625,7 @@ struct mtk_soc_data {
+ 		u32	rx_dma_l4_valid;
  		u32	dma_max_len;
  		u32	dma_len_offset;
 +		u32	qdma_tx_sch;
  	} txrx;
  };
  
-@@ -1353,6 +1383,7 @@ struct mtk_eth {
+@@ -1736,6 +1766,7 @@ struct mtk_eth {
  	spinlock_t			syscfg0_lock;
  	struct timer_list		mtk_dma_monitor_timer;
  
@@ -170,13 +178,13 @@
  	u8				ppe_num;
  	struct mtk_ppe			*ppe[MTK_MAX_PPE_NUM];
  	struct rhashtable		flow_table;
-@@ -1412,4 +1443,6 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
+@@ -1815,4 +1846,6 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
  
  int mtk_ppe_debugfs_init(struct mtk_eth *eth);
- 
-+int mtk_qdma_debugfs_init(struct mtk_eth *eth);
 +
- int mtk_mac2xgmii_id(struct mtk_eth *eth, int mac_id);
++int mtk_qdma_debugfs_init(struct mtk_eth *eth);
+ #endif /* MTK_ETH_H */
 diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
 index a49275f..1767823 100755
 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9999-6-ethernet-update-ppe-from-mt7986-to-mt7988.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9999-6-ethernet-update-ppe-from-mt7986-to-mt7988.patch
index e590ce2..1a13d74 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9999-6-ethernet-update-ppe-from-mt7986-to-mt7988.patch
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/flow_patch/9999-6-ethernet-update-ppe-from-mt7986-to-mt7988.patch
@@ -34,7 +34,7 @@
  						   2, eth->soc->hash_way, i,
  						   eth->soc->has_accounting);
  			if (!eth->ppe[i]) {
-@@ -4626,11 +4626,15 @@ static const struct mtk_soc_data mt7988_data = {
+@@ -4626,13 +4626,16 @@ static const struct mtk_soc_data mt7988_data = {
  	.required_clks = MT7988_CLKS_BITMAP,
  	.required_pctl = false,
  	.has_sram = true,
@@ -44,9 +44,10 @@
  	.txrx = {
  		.txd_size = sizeof(struct mtk_tx_dma_v2),
  		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+ 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
  		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  		.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
-+		.qdma_tx_sch = 4,
+ 		.qdma_tx_sch = 4,
  	},
  };
  
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/754-net-phy-add-5GBASER.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/754-net-phy-add-5GBASER.patch
index a449bcc..8165303 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/754-net-phy-add-5GBASER.patch
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/754-net-phy-add-5GBASER.patch
@@ -1,3 +1,25 @@
+diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
+index daed73a..7d080d5 100644
+--- a/drivers/net/phy/marvell10g.c
++++ b/drivers/net/phy/marvell10g.c
+@@ -516,6 +516,7 @@ static void mv3310_update_interface(struct phy_device *phydev)
+ 
+ 	if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+ 	     phydev->interface == PHY_INTERFACE_MODE_2500BASEX ||
++	     phydev->interface == PHY_INTERFACE_MODE_5GBASER ||
+ 	     phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+ 		/* The PHY automatically switches its serdes interface (and
+ 		 * active PHYXS instance) between Cisco SGMII, 10GBase-KR and
+@@ -527,6 +528,9 @@ static void mv3310_update_interface(struct phy_device *phydev)
+ 		case SPEED_10000:
+ 			phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ 			break;
++		case SPEED_5000:
++			phydev->interface = PHY_INTERFACE_MODE_5GBASER;
++			break;
+ 		case SPEED_2500:
+ 			phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ 			break;
 diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
 index b3f25a9..6a38a1c 100644
 --- a/drivers/net/phy/phylink.c
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/755-net-phy-sfp-add-rollball-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/755-net-phy-sfp-add-rollball-support.patch
index ee68b4e..6233046 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/755-net-phy-sfp-add-rollball-support.patch
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/755-net-phy-sfp-add-rollball-support.patch
@@ -370,6 +370,140 @@
 +			       enum mdio_i2c_proto protocol);
  
  #endif
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index f360d92..67f34ed 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -483,62 +483,105 @@ static void phylink_resolve(struct work_struct *w)
+ 	struct phylink *pl = container_of(w, struct phylink, resolve);
+ 	struct phylink_link_state link_state;
+ 	struct net_device *ndev = pl->netdev;
+-	int link_changed;
++	bool mac_config = false;
++	bool retrigger = false;
++	bool cur_link_state;
+ 
+ 	mutex_lock(&pl->state_mutex);
++	if (pl->netdev)
++		cur_link_state = netif_carrier_ok(ndev);
++	else
++		cur_link_state = pl->old_link_state;
++
+ 	if (pl->phylink_disable_state) {
+ 		pl->mac_link_dropped = false;
+ 		link_state.link = false;
+ 	} else if (pl->mac_link_dropped) {
+ 		link_state.link = false;
++		retrigger = true;
+ 	} else {
+ 		switch (pl->cur_link_an_mode) {
+ 		case MLO_AN_PHY:
+ 			link_state = pl->phy_state;
+ 			phylink_resolve_flow(pl, &link_state);
+-			phylink_mac_config_up(pl, &link_state);
++			mac_config = link_state.link;
+ 			break;
+ 
+ 		case MLO_AN_FIXED:
+ 			phylink_get_fixed_state(pl, &link_state);
+-			phylink_mac_config_up(pl, &link_state);
++			mac_config = link_state.link;
+ 			break;
+ 
+ 		case MLO_AN_INBAND:
+ 			phylink_get_mac_state(pl, &link_state);
+ 
++			/* The PCS may have a latching link-fail indicator.
++			 * If the link was up, bring the link down and
++			 * re-trigger the resolve. Otherwise, re-read the
++			 * PCS state to get the current status of the link.
++			 */
++			if (!link_state.link) {
++				if (cur_link_state)
++					retrigger = true;
++				else
++					phylink_get_mac_state(pl,
++							      &link_state);
++			}
++
+ 			/* If we have a phy, the "up" state is the union of
+-			 * both the PHY and the MAC */
++			 * both the PHY and the MAC
++			 */
+ 			if (pl->phydev)
+ 				link_state.link &= pl->phy_state.link;
+ 
+ 			/* Only update if the PHY link is up */
+ 			if (pl->phydev && pl->phy_state.link) {
++				/* If the interface has changed, force a
++				 * link down event if the link isn't already
++				 * down, and re-resolve.
++				 */
++				if (link_state.interface !=
++				    pl->phy_state.interface) {
++					retrigger = true;
++					link_state.link = false;
++				}
+ 				link_state.interface = pl->phy_state.interface;
+ 
+ 				/* If we have a PHY, we need to update with
+-				 * the pause mode bits. */
+-				link_state.pause |= pl->phy_state.pause;
+-				phylink_resolve_flow(pl, &link_state);
+-				phylink_mac_config(pl, &link_state);
++				 * the PHY flow control bits.
++				 */
++				link_state.pause = pl->phy_state.pause;
++				mac_config = true;
+ 			}
++			phylink_resolve_flow(pl, &link_state);
+ 			break;
+ 		}
+ 	}
+ 
+-	if (pl->netdev)
+-		link_changed = (link_state.link != netif_carrier_ok(ndev));
+-	else
+-		link_changed = (link_state.link != pl->old_link_state);
++	if (mac_config) {
++		if (link_state.interface != pl->link_config.interface) {
++			/* The interface has changed, force the link down and
++			 * then reconfigure.
++			 */
++			if (cur_link_state) {
++				phylink_mac_link_down(pl);
++				cur_link_state = false;
++			}
++			phylink_mac_config(pl, &link_state);
++			pl->link_config.interface = link_state.interface;
++		} else {
++			phylink_mac_config(pl, &link_state);
++		}
++	}
+ 
+-	if (link_changed) {
++	if (link_state.link != cur_link_state) {
+ 		pl->old_link_state = link_state.link;
+ 		if (!link_state.link)
+ 			phylink_mac_link_down(pl);
+ 		else
+ 			phylink_mac_link_up(pl, link_state);
+ 	}
+-	if (!link_state.link && pl->mac_link_dropped) {
++	if (!link_state.link && retrigger) {
+ 		pl->mac_link_dropped = false;
+ 		queue_work(system_power_efficient_wq, &pl->resolve);
+ 	}
+@@ -1014,7 +1057,8 @@ void phylink_start(struct phylink *pl)
+ 		if (irq <= 0)
+ 			mod_timer(&pl->link_poll, jiffies + HZ);
+ 	}
+-	if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
++	if ((pl->cfg_link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) ||
++	    (pl->cfg_link_an_mode == MLO_AN_INBAND))
+ 		mod_timer(&pl->link_poll, jiffies + HZ);
+ 	if (pl->phydev)
+ 		phy_start(pl->phydev);
 diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
 index 42f0441..0d5ac2a 100644
 --- a/drivers/net/phy/sfp-bus.c
@@ -552,7 +686,7 @@
  
  #if IS_ENABLED(CONFIG_HWMON)
  	struct sfp_diag diag;
-@@ -303,6 +313,135 @@ static const struct of_device_id sfp_of_match[] = {
+@@ -303,6 +313,136 @@ static const struct of_device_id sfp_of_match[] = {
  };
  MODULE_DEVICE_TABLE(of, sfp_of_match);
  
@@ -636,6 +770,7 @@
 +
 +	SFP_QUIRK_M("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
 +
++	SFP_QUIRK_F("ETU", "ESP-T5-R", sfp_fixup_rollball_cc),
 +	SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
 +	SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
 +	SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),