[][Add initial mtk feed for OpenWRT v21.02]

[Description]
Add initial mtk feed for OpenWRT v21.02

[Release-log]
N/A

Change-Id: I8051c6ba87f1ccf26c02fdd88a17d66f63c0b101
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/4495320
diff --git a/feed/mii_mgr/Makefile b/feed/mii_mgr/Makefile
new file mode 100755
index 0000000..166e3f5
--- /dev/null
+++ b/feed/mii_mgr/Makefile
@@ -0,0 +1,36 @@
+#
+# hua.shao@mediatek.com
+#
+# MTK Property Software.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=mii_mgr
+PKG_RELEASE:=1
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)
+
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/kernel.mk
+
+define Package/mii_mgr
+  SECTION:=MTK Properties
+  CATEGORY:=MTK Properties
+  TITLE:=mii_mgr/mii_mgr_cl45
+  SUBMENU:=Applications
+endef
+
+define Package/mii_mgr/description
+  An mdio r/w phy regs program.
+endef
+
+define Package/mii_mgr/install
+	$(INSTALL_DIR) $(1)/usr/sbin
+	$(INSTALL_BIN) $(PKG_BUILD_DIR)/mii_mgr $(1)/usr/sbin
+	$(INSTALL_BIN) $(PKG_BUILD_DIR)/mii_mgr $(1)/usr/sbin/mii_mgr_cl45
+endef
+
+
+$(eval $(call BuildPackage,mii_mgr))
+
diff --git a/feed/mii_mgr/src/Makefile b/feed/mii_mgr/src/Makefile
new file mode 100644
index 0000000..55d6a6f
--- /dev/null
+++ b/feed/mii_mgr/src/Makefile
@@ -0,0 +1,16 @@
+EXEC = mii_mgr
+
+CFLAGS += -Wall -Werror
+
+all: $(EXEC)
+
+mii_mgr: mii_mgr.o
+
+	$(CC) $(LDFLAGS) -o $@ $^
+
+romfs:
+	$(ROMFSINST) /bin/mii_mgr
+
+clean:
+	-rm -f $(EXEC) *.elf *.gdb *.o
+
diff --git a/feed/mii_mgr/src/mii_mgr.c b/feed/mii_mgr/src/mii_mgr.c
new file mode 100644
index 0000000..34cf8d5
--- /dev/null
+++ b/feed/mii_mgr/src/mii_mgr.c
@@ -0,0 +1,131 @@
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/sockios.h>
+
+void show_usage(void)
+{
+	printf("mii_mgr -g -i [ifname] -p [phy number] -r [register number]\n");
+	printf("  Get: mii_mgr -g -p 3 -r 4\n\n");
+	printf("mii_mgr -s -p [phy number] -r [register number] -v [0xvalue]\n");
+	printf("  Set: mii_mgr -s -p 4 -r 1 -v 0xff11\n");
+	printf("#NOTE: Without -i , eth0 is default ifname!\n");
+	printf("----------------------------------------------------------------------------------------\n");
+	printf("Get: mii_mgr_cl45 -g -p [port number] -d [dev number] -r [register number]\n");
+	printf("Example: mii_mgr_cl45 -g -p 3 -d 0x5 -r 0x4\n\n");
+	printf("Set: mii_mgr_cl45 -s -p [port number] -d [dev number] -r [register number] -v [value]\n");
+	printf("Example: mii_mgr_cl45 -s -p 4 -d 0x6 -r 0x1 -v 0xff11\n\n");
+}
+
+static int __phy_op(char *ifname,uint16_t phy_id,uint16_t reg_num, uint16_t *val, int cmd)
+{
+        static int sd = -1;
+
+        struct ifreq ifr;
+        struct mii_ioctl_data* mii = (struct mii_ioctl_data *)(&ifr.ifr_data);
+        int err;
+
+        if (sd < 0)
+                sd = socket(AF_INET, SOCK_DGRAM, 0);
+
+        if (sd < 0)
+                return sd;
+
+        strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
+
+        mii->phy_id  = phy_id;
+        mii->reg_num = reg_num;
+        mii->val_in  = *val;
+        mii->val_out = 0;
+
+        err = ioctl(sd, cmd, &ifr);
+        if (err)
+                return -errno;
+
+        *val = mii->val_out;
+        return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	int opt;
+	char options[] = "gsi:p:d:r:v:?t";
+	int is_write = 0,is_cl45 = 0;
+	unsigned int port=0, dev=0,reg_num=0,val=0;
+	char ifname[IFNAMSIZ]="eth0";	
+	uint16_t phy_id=0;
+
+
+	if (argc < 6) {
+		show_usage();
+		return 0;
+	}
+
+	while ((opt = getopt(argc, argv, options)) != -1) {
+		switch (opt) {
+			case 'g':
+				is_write=0;
+				break;
+			case 's':
+				is_write=1;
+				break;
+			case 'i':
+				strncpy(ifname,optarg, 5);
+				break;	
+			case 'p':
+				port = strtoul(optarg, NULL, 16);
+				break;
+                        case 'd':				
+                                dev = strtoul(optarg, NULL, 16);
+				is_cl45 = 1;
+				break;
+			case 'r':
+				reg_num = strtoul(optarg, NULL, 16);
+				break;
+
+			case 'v':
+				val = strtoul(optarg, NULL, 16);
+				break;
+			case '?':
+				show_usage();
+				break;
+		}
+	}
+
+	if(is_cl45)
+		phy_id = mdio_phy_id_c45(port, dev);
+	else
+		phy_id = port;
+
+	if(is_write) { 
+		__phy_op(ifname,phy_id,reg_num,(uint16_t *)&val,SIOCSMIIREG);
+
+		if(is_cl45)
+			printf("Set: port%x dev%Xh_reg%Xh = 0x%04X\n",port, dev, reg_num, val);
+		else
+			printf("Set: phy[%x].reg[%x] = %04x\n",port, reg_num, val);
+	}	
+	else {
+		__phy_op(ifname,phy_id,reg_num,(uint16_t *)&val,SIOCGMIIREG);
+
+		if(is_cl45)
+			printf("Get: port%x dev%Xh_reg%Xh = 0x%04X\n",port, dev, reg_num, val);
+		else
+			printf("Get: phy[%x].reg[%x] = %04x\n",port, reg_num, val);
+	
+	}
+
+	return 0;	
+}
diff --git a/feed/mt76-vendor/Makefile b/feed/mt76-vendor/Makefile
new file mode 100644
index 0000000..7436e0d
--- /dev/null
+++ b/feed/mt76-vendor/Makefile
@@ -0,0 +1,29 @@
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=mt76-vendor
+PKG_RELEASE=1
+
+PKG_LICENSE:=GPLv2
+PKG_LICENSE_FILES:=
+
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/cmake.mk
+
+CMAKE_SOURCE_DIR:=$(PKG_BUILD_DIR)
+CMAKE_BINARY_DIR:=$(PKG_BUILD_DIR)
+
+define Package/mt76-vendor
+  SECTION:=devel
+  CATEGORY:=Development
+  TITLE:=vendor cmd for mt76
+  DEPENDS:=+libnl-tiny
+endef
+
+TARGET_CFLAGS += -I$(STAGING_DIR)/usr/include/libnl-tiny
+
+define Package/mt76-vendor/install
+	mkdir -p $(1)/usr/sbin
+	$(INSTALL_BIN) $(PKG_BUILD_DIR)/mt76-vendor $(1)/usr/sbin
+endef
+
+$(eval $(call BuildPackage,mt76-vendor))
diff --git a/feed/mt76-vendor/src/CMakeLists.txt b/feed/mt76-vendor/src/CMakeLists.txt
new file mode 100644
index 0000000..97059d0
--- /dev/null
+++ b/feed/mt76-vendor/src/CMakeLists.txt
@@ -0,0 +1,13 @@
+cmake_minimum_required(VERSION 2.8)
+
+PROJECT(mt76-vendor C)
+ADD_DEFINITIONS(-Os -Wall --std=gnu99 -g3)
+
+ADD_EXECUTABLE(mt76-vendor main.c)
+TARGET_LINK_LIBRARIES(mt76-vendor nl-tiny)
+
+SET(CMAKE_INSTALL_PREFIX /usr)
+
+INSTALL(TARGETS mt76-vendor
+	RUNTIME DESTINATION sbin
+)
diff --git a/feed/mt76-vendor/src/main.c b/feed/mt76-vendor/src/main.c
new file mode 100644
index 0000000..188a151
--- /dev/null
+++ b/feed/mt76-vendor/src/main.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2021 Mediatek Inc. */
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <net/if.h>
+
+#include "mt76-vendor.h"
+
+struct unl unl;
+static const char *progname;
+struct csi_data *csi;
+int csi_idx;
+
+static struct nla_policy csi_ctrl_policy[NUM_MTK_VENDOR_ATTRS_CSI_CTRL] = {
+	[MTK_VENDOR_ATTR_CSI_CTRL_CFG] = { .type = NLA_NESTED },
+	[MTK_VENDOR_ATTR_CSI_CTRL_CFG_MODE] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_CTRL_CFG_TYPE] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_CTRL_CFG_VAL1] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_CTRL_CFG_VAL2] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_CTRL_MAC_ADDR] = { .type = NLA_NESTED },
+	[MTK_VENDOR_ATTR_CSI_CTRL_DUMP_NUM] = { .type = NLA_U16 },
+	[MTK_VENDOR_ATTR_CSI_CTRL_DATA] = { .type = NLA_NESTED },
+};
+
+static struct nla_policy csi_data_policy[NUM_MTK_VENDOR_ATTRS_CSI_DATA] = {
+	[MTK_VENDOR_ATTR_CSI_DATA_VER] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_DATA_TS] = { .type = NLA_U64 },
+	[MTK_VENDOR_ATTR_CSI_DATA_RSSI] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_DATA_SNR] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_DATA_BW] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_DATA_CH_IDX] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_DATA_TA] = { .type = NLA_NESTED },
+	[MTK_VENDOR_ATTR_CSI_DATA_I] = { .type = NLA_NESTED },
+	[MTK_VENDOR_ATTR_CSI_DATA_Q] = { .type = NLA_NESTED },
+	[MTK_VENDOR_ATTR_CSI_DATA_INFO] = { .type = NLA_U32 },
+	[MTK_VENDOR_ATTR_CSI_DATA_TX_ANT] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_DATA_RX_ANT] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_DATA_MODE] = { .type = NLA_U8 },
+	[MTK_VENDOR_ATTR_CSI_DATA_H_IDX] = { .type = NLA_U32 },
+};
+
+void usage(void)
+{
+	static const char *const commands[] = {
+		"set csi_ctrl=",
+		"dump <packet num> <filename>",
+	};
+	int i;
+
+	fprintf(stderr, "Usage:\n");
+	for (i = 0; i < ARRAY_SIZE(commands); i++)
+		printf("  %s wlanX %s\n", progname, commands[i]);
+
+	exit(1);
+}
+
+static int mt76_dump_cb(struct nl_msg *msg, void *arg)
+{
+	struct nlattr *tb[NUM_MTK_VENDOR_ATTRS_CSI_CTRL];
+	struct nlattr *tb_data[NUM_MTK_VENDOR_ATTRS_CSI_DATA];
+	struct nlattr *attr;
+	struct nlattr *cur;
+	int rem, idx;
+	struct csi_data *c = &csi[csi_idx];
+
+	attr = unl_find_attr(&unl, msg, NL80211_ATTR_VENDOR_DATA);
+	if (!attr) {
+		fprintf(stderr, "Testdata attribute not found\n");
+		return NL_SKIP;
+	}
+
+	nla_parse_nested(tb, MTK_VENDOR_ATTR_CSI_CTRL_MAX,
+			 attr, csi_ctrl_policy);
+
+	if (!tb[MTK_VENDOR_ATTR_CSI_CTRL_DATA])
+		return NL_SKIP;
+
+	nla_parse_nested(tb_data, MTK_VENDOR_ATTR_CSI_DATA_MAX,
+			 tb[MTK_VENDOR_ATTR_CSI_CTRL_DATA], csi_data_policy);
+
+	if (!(tb_data[MTK_VENDOR_ATTR_CSI_DATA_VER] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_TS] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_RSSI] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_SNR] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_BW] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_CH_IDX] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_TA] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_I] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_Q] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_INFO] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_MODE] &&
+	      tb_data[MTK_VENDOR_ATTR_CSI_DATA_H_IDX])) {
+		fprintf(stderr, "Attributes error for CSI data\n");
+		return NL_SKIP;
+	}
+
+	c->rssi = nla_get_u8(tb_data[MTK_VENDOR_ATTR_CSI_DATA_RSSI]);
+	c->snr = nla_get_u8(tb_data[MTK_VENDOR_ATTR_CSI_DATA_SNR]);
+	c->data_bw = nla_get_u8(tb_data[MTK_VENDOR_ATTR_CSI_DATA_BW]);
+	c->pri_ch_idx = nla_get_u8(tb_data[MTK_VENDOR_ATTR_CSI_DATA_CH_IDX]);
+	c->rx_mode = nla_get_u8(tb_data[MTK_VENDOR_ATTR_CSI_DATA_MODE]);
+
+	c->tx_idx = nla_get_u16(tb_data[MTK_VENDOR_ATTR_CSI_DATA_TX_ANT]);
+	c->rx_idx = nla_get_u16(tb_data[MTK_VENDOR_ATTR_CSI_DATA_RX_ANT]);
+
+	c->info = nla_get_u32(tb_data[MTK_VENDOR_ATTR_CSI_DATA_INFO]);
+	c->h_idx = nla_get_u32(tb_data[MTK_VENDOR_ATTR_CSI_DATA_H_IDX]);
+
+	c->ts = nla_get_u64(tb_data[MTK_VENDOR_ATTR_CSI_DATA_TS]);
+
+	idx = 0;
+	nla_for_each_nested(cur, tb_data[MTK_VENDOR_ATTR_CSI_DATA_TA], rem) {
+		c->ta[idx++] = nla_get_u8(cur);
+	}
+
+	idx = 0;
+	nla_for_each_nested(cur, tb_data[MTK_VENDOR_ATTR_CSI_DATA_I], rem) {
+		c->data_i[idx++] = nla_get_u16(cur);
+	}
+
+	idx = 0;
+	nla_for_each_nested(cur, tb_data[MTK_VENDOR_ATTR_CSI_DATA_Q], rem) {
+		c->data_q[idx++] = nla_get_u16(cur);
+	}
+
+	csi_idx++;
+
+	return NL_SKIP;
+}
+
+static int mt76_csi_to_json(const char *name)
+{
+#define MAX_BUF_SIZE	6000
+	FILE *f;
+	int i;
+
+	f = fopen(name, "a+");
+	if (!f) {
+		printf("open failure");
+		return 1;
+	}
+
+	fwrite("[", 1, 1, f);
+
+	for (i = 0; i < csi_idx; i++) {
+		char *pos, *buf = malloc(MAX_BUF_SIZE);
+		struct csi_data *c = &csi[i];
+		int j;
+
+		pos = buf;
+
+		pos += snprintf(pos, MAX_BUF_SIZE, "%c", '[');
+
+		pos += snprintf(pos, MAX_BUF_SIZE, "%ld,", c->ts);
+		pos += snprintf(pos, MAX_BUF_SIZE, "\"%02x%02x%02x%02x%02x%02x\",", c->ta[0], c->ta[1], c->ta[2], c->ta[3], c->ta[4], c->ta[5]);
+
+		pos += snprintf(pos, MAX_BUF_SIZE, "%d,", c->rssi);
+		pos += snprintf(pos, MAX_BUF_SIZE, "%u,", c->snr);
+		pos += snprintf(pos, MAX_BUF_SIZE, "%u,", c->data_bw);
+		pos += snprintf(pos, MAX_BUF_SIZE, "%u,", c->pri_ch_idx);
+		pos += snprintf(pos, MAX_BUF_SIZE, "%u,", c->rx_mode);
+		pos += snprintf(pos, MAX_BUF_SIZE, "%d,", c->tx_idx);
+		pos += snprintf(pos, MAX_BUF_SIZE, "%d,", c->rx_idx);
+		pos += snprintf(pos, MAX_BUF_SIZE, "%d,", c->h_idx);
+		pos += snprintf(pos, MAX_BUF_SIZE, "%d,", c->info);
+
+		pos += snprintf(pos, MAX_BUF_SIZE, "%c", '[');
+		for (j = 0; j < 256; j++) {
+			pos += snprintf(pos, MAX_BUF_SIZE, "%d", c->data_i[j]);
+			if (j != 255)
+				pos += snprintf(pos, MAX_BUF_SIZE, ",");
+		}
+		pos += snprintf(pos, MAX_BUF_SIZE, "%c,", ']');
+
+		pos += snprintf(pos, MAX_BUF_SIZE, "%c", '[');
+		for (j = 0; j < 256; j++) {
+			pos += snprintf(pos, MAX_BUF_SIZE, "%d", c->data_q[j]);
+			if (j != 255)
+				pos += snprintf(pos, MAX_BUF_SIZE, ",");
+		}
+		pos += snprintf(pos, MAX_BUF_SIZE, "%c", ']');
+
+		pos += snprintf(pos, MAX_BUF_SIZE, "%c", ']');
+		if (i != csi_idx - 1)
+			pos += snprintf(pos, MAX_BUF_SIZE, ",");
+
+		fwrite(buf, 1, pos - buf, f);
+		free(buf);
+	}
+
+	fwrite("]", 1, 1, f);
+	
+	fclose(f);
+}
+
+static int mt76_dump(int idx, int argc, char **argv)
+{
+	int pkt_num, ret, i;
+	struct nl_msg *msg;
+	void *data;
+
+	if (argc < 2)
+		return 1;
+	pkt_num = strtol(argv[0], NULL, 10);
+
+#define CSI_DUMP_PER_NUM	3
+	csi_idx = 0;
+	csi = (struct csi_data *)calloc(pkt_num, sizeof(*csi));
+
+	for (i = 0; i < pkt_num / CSI_DUMP_PER_NUM; i++) {
+		if (unl_genl_init(&unl, "nl80211") < 0) {
+			fprintf(stderr, "Failed to connect to nl80211\n");
+			return 2;
+		}
+
+		msg = unl_genl_msg(&unl, NL80211_CMD_VENDOR, true);
+
+		if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, idx) ||
+		nla_put_u32(msg, NL80211_ATTR_VENDOR_ID, MTK_NL80211_VENDOR_ID) ||
+		nla_put_u32(msg, NL80211_ATTR_VENDOR_SUBCMD, MTK_NL80211_VENDOR_SUBCMD_CSI_CTRL))
+			return false;
+
+		data = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA | NLA_F_NESTED);
+
+		if (nla_put_u16(msg, MTK_VENDOR_ATTR_CSI_CTRL_DUMP_NUM, CSI_DUMP_PER_NUM))
+			return false;
+
+		nla_nest_end(msg, data);
+
+		ret = unl_genl_request(&unl, msg, mt76_dump_cb, NULL);
+		if (ret)
+			fprintf(stderr, "nl80211 call failed: %s\n", strerror(-ret));
+
+		unl_free(&unl);
+	}
+
+	mt76_csi_to_json(argv[1]);
+	free(csi);
+
+	return ret;
+}
+
+static int mt76_csi_ctrl(struct nl_msg *msg, int argc, char **argv)
+{
+	int idx = MTK_VENDOR_ATTR_CSI_CTRL_CFG_MODE;
+	char *val, *s1, *s2, *cur;
+	void *data;
+
+	val = strchr(argv[0], '=');
+	if (val)
+		*(val++) = 0;
+
+	s1 = s2 = strdup(val);
+
+	data = nla_nest_start(msg, MTK_VENDOR_ATTR_CSI_CTRL_CFG | NLA_F_NESTED);
+
+	while ((cur = strsep(&s1, ",")) != NULL)
+		nla_put_u8(msg, idx++, strtoul(cur, NULL, 0));
+
+	nla_nest_end(msg, data);
+
+	free(s2);
+
+	if (argc == 2 &&
+	    !strncmp(argv[1], "mac_addr", strlen("mac_addr"))) {
+		u8 a[ETH_ALEN];
+		int matches, i;
+
+		val = strchr(argv[1], '=');
+		if (val)
+			*(val++) = 0;
+
+		matches = sscanf(val, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+				a, a+1, a+2, a+3, a+4, a+5);
+
+		if (matches != ETH_ALEN)
+			return -EINVAL;
+
+		data = nla_nest_start(msg, MTK_VENDOR_ATTR_CSI_CTRL_MAC_ADDR | NLA_F_NESTED);
+		for (i = 0; i < ETH_ALEN; i++)
+			nla_put_u8(msg, i, a[i]);
+
+		nla_nest_end(msg, data);
+	}
+
+	return 0;
+}
+
+static int mt76_set(int idx, int argc, char **argv)
+{
+	struct nl_msg *msg;
+	void *data;
+	int ret;
+
+	if (argc < 1)
+		return 1;
+
+	msg = unl_genl_msg(&unl, NL80211_CMD_VENDOR, false);
+
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_VENDOR_ID, MTK_NL80211_VENDOR_ID) ||
+	    nla_put_u32(msg, NL80211_ATTR_VENDOR_SUBCMD, MTK_NL80211_VENDOR_SUBCMD_CSI_CTRL))
+		return false;
+
+	data = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA | NLA_F_NESTED);
+
+	if (!strncmp(argv[0], "csi_ctrl", strlen("csi_ctrl")))
+		mt76_csi_ctrl(msg, argc, argv);
+
+	nla_nest_end(msg, data);
+
+	ret = unl_genl_request(&unl, msg, NULL, NULL);
+	if (ret)
+		fprintf(stderr, "nl80211 call failed: %s\n", strerror(-ret));
+
+	return ret;
+}
+
+int main(int argc, char **argv)
+{
+	const char *cmd;
+	int ret = 0;
+	int idx;
+
+	progname = argv[0];
+	if (argc < 3)
+		usage();
+
+	idx = if_nametoindex(argv[1]);
+	if (!idx) {
+		fprintf(stderr, "%s\n", strerror(errno));
+		return 2;
+	}
+
+	cmd = argv[2];
+	argv += 3;
+	argc -= 3;
+
+	if (!strcmp(cmd, "dump"))
+		ret = mt76_dump(idx, argc, argv);
+	else if (!strcmp(cmd, "set")) {
+		if (unl_genl_init(&unl, "nl80211") < 0) {
+			fprintf(stderr, "Failed to connect to nl80211\n");
+			return 2;
+		}
+
+		ret = mt76_set(idx, argc, argv);
+		unl_free(&unl);
+	}
+	else
+		usage();
+
+	return ret;
+}
diff --git a/feed/mt76-vendor/src/mt76-vendor.h b/feed/mt76-vendor/src/mt76-vendor.h
new file mode 100644
index 0000000..3407903
--- /dev/null
+++ b/feed/mt76-vendor/src/mt76-vendor.h
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+#ifndef __MT76_TEST_H
+#define __MT76_TEST_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <linux/nl80211.h>
+#include <netlink/attr.h>
+#include <unl.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64, ktime_t;
+
+#define MTK_NL80211_VENDOR_ID	0x0ce7
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
+#endif
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+
+struct nl_msg;
+struct nlattr;
+
+enum mtk_nl80211_vendor_subcmds {
+	MTK_NL80211_VENDOR_SUBCMD_CSI_CTRL = 0xc2,
+};
+
+enum mtk_vendor_attr_csi_ctrl {
+	MTK_VENDOR_ATTR_CSI_CTRL_UNSPEC,
+
+	MTK_VENDOR_ATTR_CSI_CTRL_CFG,
+	MTK_VENDOR_ATTR_CSI_CTRL_CFG_MODE,
+	MTK_VENDOR_ATTR_CSI_CTRL_CFG_TYPE,
+	MTK_VENDOR_ATTR_CSI_CTRL_CFG_VAL1,
+	MTK_VENDOR_ATTR_CSI_CTRL_CFG_VAL2,
+	MTK_VENDOR_ATTR_CSI_CTRL_MAC_ADDR,
+
+	MTK_VENDOR_ATTR_CSI_CTRL_DUMP_NUM,
+
+	MTK_VENDOR_ATTR_CSI_CTRL_DATA,
+
+	/* keep last */
+	NUM_MTK_VENDOR_ATTRS_CSI_CTRL,
+	MTK_VENDOR_ATTR_CSI_CTRL_MAX =
+		NUM_MTK_VENDOR_ATTRS_CSI_CTRL - 1
+};
+
+enum mtk_vendor_attr_csi_data {
+	MTK_VENDOR_ATTR_CSI_DATA_UNSPEC,
+	MTK_VENDOR_ATTR_CSI_DATA_PAD,
+
+	MTK_VENDOR_ATTR_CSI_DATA_VER,
+	MTK_VENDOR_ATTR_CSI_DATA_TS,
+	MTK_VENDOR_ATTR_CSI_DATA_RSSI,
+	MTK_VENDOR_ATTR_CSI_DATA_SNR,
+	MTK_VENDOR_ATTR_CSI_DATA_BW,
+	MTK_VENDOR_ATTR_CSI_DATA_CH_IDX,
+	MTK_VENDOR_ATTR_CSI_DATA_TA,
+	MTK_VENDOR_ATTR_CSI_DATA_I,
+	MTK_VENDOR_ATTR_CSI_DATA_Q,
+	MTK_VENDOR_ATTR_CSI_DATA_INFO,
+	MTK_VENDOR_ATTR_CSI_DATA_RSVD1,
+	MTK_VENDOR_ATTR_CSI_DATA_RSVD2,
+	MTK_VENDOR_ATTR_CSI_DATA_RSVD3,
+	MTK_VENDOR_ATTR_CSI_DATA_RSVD4,
+	MTK_VENDOR_ATTR_CSI_DATA_TX_ANT,
+	MTK_VENDOR_ATTR_CSI_DATA_RX_ANT,
+	MTK_VENDOR_ATTR_CSI_DATA_MODE,
+	MTK_VENDOR_ATTR_CSI_DATA_H_IDX,
+
+	/* keep last */
+	NUM_MTK_VENDOR_ATTRS_CSI_DATA,
+	MTK_VENDOR_ATTR_CSI_DATA_MAX =
+		NUM_MTK_VENDOR_ATTRS_CSI_DATA - 1
+};
+
+#define CSI_MAX_COUNT 256
+#define ETH_ALEN 6
+
+struct csi_data {
+	s16 data_i[CSI_MAX_COUNT];
+	s16 data_q[CSI_MAX_COUNT];
+	s8 rssi;
+	u8 snr;
+	ktime_t ts;
+	u8 data_bw;
+	u8 pri_ch_idx;
+	u8 ta[ETH_ALEN];
+	u32 info;
+	u8 rx_mode;
+	u32 h_idx;
+	u16 tx_idx;
+	u16 rx_idx;
+};
+
+struct vendor_field {
+	const char *name;
+	const char *prefix;
+
+	bool (*parse)(const struct vendor_field *field, int idx, struct nl_msg *msg,
+		      const char *val);
+	void (*print)(const struct vendor_field *field, struct nlattr *attr);
+
+	union {
+		struct {
+			const char * const *enum_str;
+			int enum_len;
+		};
+		struct {
+			bool (*parse2)(const struct vendor_field *field, int idx,
+				       struct nl_msg *msg, const char *val);
+			void (*print2)(const struct vendor_field *field,
+				       struct nlattr *attr);
+		};
+		struct {
+			void (*print_extra)(const struct vendor_field *field,
+					    struct nlattr **tb);
+			const struct vendor_field *fields;
+			struct nla_policy *policy;
+			int len;
+		};
+	};
+};
+
+extern struct unl unl;
+extern const struct vendor_field msg_field;
+
+void usage(void);
+
+#endif
diff --git a/feed/mtk_factory_rw/Makefile b/feed/mtk_factory_rw/Makefile
new file mode 100644
index 0000000..ad5316f
--- /dev/null
+++ b/feed/mtk_factory_rw/Makefile
@@ -0,0 +1,43 @@
+#
+# MTK-factory read and write
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=mtk_factory_rw
+PKG_VERSION:=1
+PKG_RELEASE:=1
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)
+PKG_CONFIG_DEPENDS:=
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/mtk_factory_rw
+  SECTION:=MTK Properties
+  CATEGORY:=MTK Properties
+  SUBMENU:=Misc
+  TITLE:=mtk factory read and write
+  VERSION:=$(PKG_RELEASE)-$(REVISION)
+endef
+
+define Package/mtk_factory_rw/description
+  mtk factory's data read and write
+endef
+
+define Build/Prepare
+	mkdir -p $(PKG_BUILD_DIR)
+endef
+
+define Build/Compile/Default
+endef
+
+Build/Compile = $(Build/Compile/Default)
+
+define Package/mtk_factory_rw/install
+	$(INSTALL_DIR) $(1)/sbin
+	$(INSTALL_BIN) ./files/mtk_factory_rw.sh $(1)/sbin/mtk_factory_rw.sh
+endef
+
+$(eval $(call BuildPackage,mtk_factory_rw))
+
diff --git a/feed/mtk_factory_rw/files/mtk_factory_rw.sh b/feed/mtk_factory_rw/files/mtk_factory_rw.sh
new file mode 100755
index 0000000..7657260
--- /dev/null
+++ b/feed/mtk_factory_rw/files/mtk_factory_rw.sh
@@ -0,0 +1,160 @@
+#!/bin/sh
+
+usage()
+{
+	echo "This is a script to get or set mtk factory's data"
+	echo "-Typically, get or set the eth lan/wan mac_address-"
+	echo "Usage1: $0 <op> <side> [mac_address] "
+	echo "	<op>: -r or -w (Read or Write action)"
+	echo "	[mac_address]: MAC[1] MAC[2] MAC[3] MAC[4] MAC[5] MAC[6] (only for write action)"
+	echo "Usage2: $0 <op> <length> <offset> [data] "
+	echo "	<length>: length bytes of input"
+	echo "	<offset>: Skip offset bytes from the beginning of the input"
+	echo "Usage3: $0 -o <length> <get_from> <overwrite_to>"
+	echo "Example:"
+	echo "$0 -w lan 00 0c 43 68 55 56"
+	echo "$0 -r lan"
+	echo "$0 -w 8 0x22 11 22 33 44 55 66 77 88"
+	echo "$0 -r 8 0x22"
+	echo "$0 -o 12 0x24 0x7fff4"
+	exit 1
+}
+
+factory_name="Factory"
+factory_mtd=/dev/$(grep -i ''${factory_name}'' /proc/mtd | cut -c 1-4)
+
+#default:7622
+lan_mac_offset=0x2A
+wan_mac_offset=0x24
+
+case `cat /tmp/sysinfo/board_name` in
+	*7621*ax*)
+		# 256k - 12 byte
+		lan_mac_offset=0x3FFF4
+		wan_mac_offset=0x3FFFA
+		;;
+	*7621*)
+		lan_mac_offset=0xe000
+		wan_mac_offset=0xe006
+		;;
+	*7622*)
+		#512k -12 byte
+		lan_mac_offset=0x7FFF4
+		wan_mac_offset=0x7FFFA
+		;;
+	*7623*)
+		lan_mac_offset=0x1F800
+		wan_mac_offset=0x1F806
+		;;
+	*)
+		lan_mac_offset=0x2A
+		wan_mac_offset=0x24
+		;;
+esac
+
+#1.Read the offset's data from the Factory
+#usage: Get_offset_data length offset
+Get_offset_data()
+{
+	local length=$1
+	local offset=$2
+
+	hexdump -v -n ${length} -s ${offset} -e ''`expr ${length} - 1`'/1 "%02x-" "%02x "' ${factory_mtd}
+}
+
+overwrite_data=
+
+Get_offset_overwrite_data()
+{
+        local length=$1
+        local offset=$2
+
+        overwrite_data=`hexdump -v -n ${length} -s ${offset} -e ''\`expr ${length} - 1\`'/1 "%02x " " %02x"' ${factory_mtd}`
+}
+
+#2.Write the offset's data from the Factory
+#usage: Set_offset_data length offset data
+Set_offset_data()
+{
+	local length=$1
+	local offset=$2
+	local index=`expr $# - ${length} + 1`
+	local data=""
+
+	for j in $(seq ${index} `expr ${length} + ${index} - 1`)
+	do
+		temp=`eval echo '$'{"$j"}`
+		data=${data}"\x${temp}"
+	done
+
+	dd if=${factory_mtd} of=/tmp/Factory.backup
+	printf "${data}" | dd conv=notrunc of=/tmp/Factory.backup bs=1 seek=$((${offset}))
+	mtd write /tmp/Factory.backup ${factory_name}
+	rm -rf /tmp/Factory.backup
+}
+
+#3.Read Factory lan/wan mac address
+GetMac()
+{
+	if [ "$1" == "lan" ]; then
+		#read lan mac
+		Get_offset_data 6 ${lan_mac_offset}
+	elif [ "$1" == "wan" ]; then
+		#read wan mac
+		Get_offset_data 6 ${wan_mac_offset}
+	else
+		usage
+		exit 1
+	fi
+}
+
+
+#4.write Factory lan/wan mac address
+SetMac()
+{
+	if [ "$#" != "9" ]; then
+		echo "Mac address must be 6 bytes!"
+		exit 1
+	fi
+
+	if [ "$1" == "lan" ]; then
+		#write lan mac
+		Set_offset_data 6 ${lan_mac_offset} $@
+
+	elif [ "$1" == "wan" ]; then
+		#write wan mac
+		Set_offset_data 6 ${wan_mac_offset} $@
+	else
+		usage
+		exit 1
+	fi
+}
+
+#usage:
+# 1. Set/Get the mac_address: mtk_factory -r/-w lan/wan /data
+# 2. Set/Get the offset data: mtk_factory -r/-w length offset /data
+# 3. Overwrite from offset1 to offset2 by length byte : mtk_factory -o length from to
+if [ "$1" == "-r" ]; then
+	if [ "$2" == "lan" -o "$2" == "wan" ]; then
+		GetMac $2
+	elif [ "$2" -eq "$2" ]; then
+		Get_offset_data $2 $3
+	else
+		echo "Unknown command!"
+		usage
+		exit 1
+	fi
+elif [ "$1" == "-w" ]; then
+	if [ "$2" == "lan" -o "$2" == "wan" ]; then
+		SetMac $2 $@
+	else
+		Set_offset_data $2 $3 $@
+	fi
+elif [ "$1" == "-o" ]; then
+	Get_offset_overwrite_data $2 $3
+	Set_offset_data $2 $4 ${overwrite_data}
+else
+	echo "Unknown command!"
+	usage
+	exit 1
+fi
diff --git a/feed/mtk_failsafe/Makefile b/feed/mtk_failsafe/Makefile
new file mode 100755
index 0000000..7cbd3ca
--- /dev/null
+++ b/feed/mtk_failsafe/Makefile
@@ -0,0 +1,43 @@
+#
+# MTK-factory read and write
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=mtk_failsafe
+PKG_VERSION:=1
+PKG_RELEASE:=1
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)
+PKG_CONFIG_DEPENDS:=
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/mtk_failsafe
+  SECTION:=MTK Properties
+  CATEGORY:=MTK Properties
+  SUBMENU:=Misc
+  TITLE:=mtk failsafe script
+  VERSION:=$(PKG_RELEASE)-$(REVISION)
+endef
+
+define Package/mtk_failsafe/description
+  mtk init script for failsafe mode
+endef
+
+define Build/Prepare
+	mkdir -p $(PKG_BUILD_DIR)
+endef
+
+define Build/Compile/Default
+endef
+
+Build/Compile = $(Build/Compile/Default)
+
+define Package/mtk_failsafe/install
+	$(INSTALL_DIR) $(1)/sbin
+	$(INSTALL_BIN) ./files/mtk_failsafe.sh $(1)/sbin/mtk_failsafe.sh
+endef
+
+$(eval $(call BuildPackage,mtk_failsafe))
+
diff --git a/feed/mtk_failsafe/files/mtk_failsafe.sh b/feed/mtk_failsafe/files/mtk_failsafe.sh
new file mode 100755
index 0000000..d9b9e60
--- /dev/null
+++ b/feed/mtk_failsafe/files/mtk_failsafe.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+mtk_common_init() {
+	echo "Running mtk failsafe script..."
+	echo "You can edit here : package/mtk/mtk_failsafe/file/mtk_failsafe.sh"
+	echo "mtk_common_init....."
+	mount_root
+	mount_root done
+	sync
+	echo 3 > /proc/sys/vm/drop_caches
+}
+
+mtk_wifi_init() {
+	echo "mtk_wifi_init....."
+	# once the bin is correct, unmark below
+        #insmod wifi_emi_loader
+        #rmmod wifi_emi_loader
+	#insmod mt_wifi
+        #ifconfig ra0 up
+        #ifconfig rax0 up
+}
+
+mtk_network_init() {
+	echo "mtk_network_init....."
+	# NOTE : LAN IP subnet should be 192.168.1.x
+        ifconfig eth0 0.0.0.0
+        brctl addbr br-lan
+        ifconfig br-lan 192.168.1.1 netmask 255.255.255.0 up
+        brctl addif br-lan eth0
+        #brctl addif br-lan ra0
+        #brctl addif br-lan rax0
+	./etc/init.d/telnet start
+	#./usr/bin/ated
+}
+
+mtk_common_init
+mtk_wifi_init
+mtk_network_init
+
diff --git a/feed/mtkhnat_util/Makefile b/feed/mtkhnat_util/Makefile
new file mode 100755
index 0000000..8ef168a
--- /dev/null
+++ b/feed/mtkhnat_util/Makefile
@@ -0,0 +1,48 @@
+#
+# MTK-factory read and write
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=mtkhnat_util
+PKG_VERSION:=1
+PKG_RELEASE:=1
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)
+PKG_CONFIG_DEPENDS:=
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/mtkhnat_util
+  SECTION:=net
+  CATEGORY:=Network
+  TITLE:=mtk hnat utility
+  VERSION:=$(PKG_RELEASE)-$(REVISION)
+endef
+
+define Package/mtkhnat_util/description
+  mtk hnat util to init hnat module
+endef
+
+define Build/Prepare
+	mkdir -p $(PKG_BUILD_DIR)
+endef
+
+define Build/Compile/Default
+endef
+
+Build/Compile = $(Build/Compile/Default)
+
+define Package/mtkhnat_util/install
+	$(INSTALL_DIR) $(1)/sbin
+	$(INSTALL_DIR) $(1)/etc/config
+	$(INSTALL_DIR) $(1)/etc/init.d
+	$(INSTALL_DIR) $(1)/etc/uci-defaults
+
+	$(INSTALL_BIN) ./files/mtkhnat $(1)/sbin/
+	$(INSTALL_BIN) ./files/mtkhnat.config $(1)/etc/config/mtkhnat
+	$(INSTALL_BIN) ./files/mtkhnat.init $(1)/etc/init.d/mtkhnat
+	$(INSTALL_BIN) ./files/99-firewall $(1)/etc/uci-defaults	
+endef
+
+$(eval $(call BuildPackage,mtkhnat_util))
diff --git a/feed/mtkhnat_util/files/99-firewall b/feed/mtkhnat_util/files/99-firewall
new file mode 100755
index 0000000..9c72762
--- /dev/null
+++ b/feed/mtkhnat_util/files/99-firewall
@@ -0,0 +1,6 @@
+echo "iptables -t mangle -A FORWARD -m dscp --dscp-class BE -j MARK --set-mark 0" >> /etc/firewall.user
+echo "iptables -t mangle -A FORWARD -m dscp --dscp-class CS2 -j MARK --set-mark 2" >> /etc/firewall.user
+echo "iptables -t mangle -A FORWARD -m dscp --dscp-class CS4 -j MARK --set-mark 4" >> /etc/firewall.user
+echo "iptables -t mangle -A FORWARD -m dscp --dscp-class CS6 -j MARK --set-mark 6" >> /etc/firewall.user
+
+exit 0
diff --git a/feed/mtkhnat_util/files/mtkhnat b/feed/mtkhnat_util/files/mtkhnat
new file mode 100755
index 0000000..ce3ef9a
--- /dev/null
+++ b/feed/mtkhnat_util/files/mtkhnat
@@ -0,0 +1,96 @@
+#!/bin/sh
+
+. /lib/functions.sh
+
+config_load mtkhnat
+config_get enable global enable 0
+config_get hqos global hqos 0
+config_get txq_num global txq_num 16
+config_get scheduling global scheduling "wrr"
+config_get sch0_bw global sch0_bw 100000
+config_get sch1_bw global sch1_bw 100000
+
+#if enable=0, disable qdma_sch & qdma_txq
+[ "${enable}" -eq 1 ] || {
+	echo 0 ${scheduling} ${sch0_bw} > /sys/kernel/debug/hnat/qdma_sch0
+	echo 0 ${scheduling} ${sch1_bw} > /sys/kernel/debug/hnat/qdma_sch1
+	echo 1 0 0 0 0 0 4 > /sys/kernel/debug/hnat/qdma_txq0
+	for i in $(seq 1 $((txq_num - 1)))
+	do
+		echo 0 0 0 0 0 0 0 > /sys/kernel/debug/hnat/qdma_txq$i
+	done
+
+	rmmod mtkhnat
+	exit 0
+}
+
+insmod mtkhnat
+
+#if hqos=0, disable qdma_sch & qdma_txq
+[ "${hqos}" -eq 1 ] || {
+	echo 0 ${scheduling} ${sch0_bw} > /sys/kernel/debug/hnat/qdma_sch0
+	echo 0 ${scheduling} ${sch1_bw} > /sys/kernel/debug/hnat/qdma_sch1
+	echo 1 0 0 0 0 0 4 > /sys/kernel/debug/hnat/qdma_txq0
+	for i in $(seq 1 $((txq_num - 1)))
+	do
+		echo 0 0 0 0 0 0 0 > /sys/kernel/debug/hnat/qdma_txq$i
+	done
+
+	exit 0
+}
+
+# enable qdma_sch0 and qdma_sch1
+echo 1 ${scheduling} ${sch0_bw} > /sys/kernel/debug/hnat/qdma_sch0
+echo 1 ${scheduling} ${sch1_bw} > /sys/kernel/debug/hnat/qdma_sch1
+
+setup_queue() {
+	local queue_id queue_scheduler queue_minebl queue_maxebl
+	local queue_minrate queue_maxrate queue_resv minrate maxrate queue_weight
+
+	config_get queue_id $1 id 0
+	config_get queue_minrate $1 minrate 0
+	config_get queue_maxrate $1 maxrate 0
+	config_get queue_resv $1 resv 4
+	config_get queue_weight $1 weight 4
+
+	# check qid < txq max num or not for loop condition
+	[ "${queue_id}" -gt $((txq_num - 1)) ] && return 0
+
+	# start to set per queue config
+	queue_minebl=1
+	queue_maxebl=1
+	queue_scheduler=0
+
+	# if min rate = 0, set min enable = 0
+	# if max rate = 0, set max enable = 0
+	[ "${queue_minrate}" -eq 0 ] && queue_minebl=0
+	[ "${queue_maxrate}" -eq 0 ] && queue_maxebl=0
+
+	# calculate min rate according to sch0_bw
+	minrate=$((sch0_bw * $queue_minrate))
+	minrate=$((minrate / 100))
+
+	# calculate max rate according to sch0_bw
+	maxrate=$((sch0_bw * $queue_maxrate))
+	maxrate=$((maxrate / 100))
+
+	# set the queue of sch0 group(the lower half of total queues)
+	[ "${queue_id}" -le $(((txq_num / 2) - 1)) ] && \
+	echo 0 ${queue_minebl} ${minrate} ${queue_maxebl} ${maxrate} ${queue_weight} \
+		${queue_resv} > /sys/kernel/debug/hnat/qdma_txq${queue_id}
+
+	# calculate min rate according to sch1_bw
+	minrate=$((sch1_bw * $queue_minrate))
+	minrate=$((minrate / 100))
+
+	# calculate max rate according to sch1_bw
+	maxrate=$((sch1_bw * $queue_maxrate))
+	maxrate=$((maxrate / 100))
+
+	# set the queue of sch1 group(the upper half of total queues)
+	[ "${queue_id}" -gt $(((txq_num / 2) - 1)) ] && \
+	echo 1 ${queue_minebl} ${minrate} ${queue_maxebl} ${maxrate} ${queue_weight} \
+		${queue_resv} > /sys/kernel/debug/hnat/qdma_txq${queue_id}
+}
+
+config_foreach setup_queue queue
diff --git a/feed/mtkhnat_util/files/mtkhnat.config b/feed/mtkhnat_util/files/mtkhnat.config
new file mode 100755
index 0000000..f252a98
--- /dev/null
+++ b/feed/mtkhnat_util/files/mtkhnat.config
@@ -0,0 +1,921 @@
+####################################################################
+#	hqos: 1:ON, 0:OFF                                          #
+#	txq_num: 16:default (only supports 64 queues for MT7622)   #
+#	scheduling: wrr: weighted round-robin, sp: strict priority #
+#	sch0_bw: sch0 bandwidth (unit:Kbps)                        #
+#	sch1_bw: sch1 bandwidth (unit:Kbps)                        #
+####################################################################
+config global global
+	option enable 1
+	option hqos 0
+	option txq_num 16
+	option scheduling 'wrr'
+	option sch0_bw 1000000
+	option sch1_bw 1000000
+
+####################################################################
+#	id: queue id                                               #
+#	minrate: percentage of min rate limit                      #
+#	maxrate: percentage of max rate limit                      #
+#	weight: weight for queue schedule                          #
+#	resv: buffer reserved for HW/SW path                       #
+####################################################################
+config queue
+        option id 0
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 1
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 2
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 3
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 4
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 5
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 6
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 7
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 8
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 9
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 10
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 11
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 12
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 13
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 14
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 15
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+####################################################################
+#	Default setting supports 16 queues (id: 0~15)              #
+#	Only supports 64 queues for MT7622                         #
+####################################################################
+config queue
+        option id 16
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 17
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 18
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 19
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 20
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 21
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 22
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 23
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 24
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 25
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 26
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 27
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 28
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 29
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 30
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 31
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 32
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 33
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 34
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 35
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 36
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 37
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 38
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 39
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 40
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 41
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 42
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 43
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 44
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 45
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 46
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 47
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 48
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 49
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 50
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 51
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 52
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 53
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 54
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 55
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 56
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 57
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 58
+        option minrate 30
+        option maxrate 100
+        option weight 2
+        option resv 4
+
+config queue
+        option id 59
+        option minrate 30
+        option maxrate 100
+        option weight 4
+        option resv 4
+
+config queue
+        option id 60
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 61
+        option minrate 30
+        option maxrate 100
+        option weight 6
+        option resv 4
+
+config queue
+        option id 62
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 63
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 64
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 65
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 66
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 67
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 68
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 69
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 70
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 71
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 72
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 73
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 74
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 75
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 76
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 77
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 78
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 79
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 80
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 81
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 82
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 83
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 84
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 85
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 86
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 87
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 88
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 89
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 90
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 91
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 92
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 93
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 94
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 95
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 96
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 97
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 98
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 99
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 100
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 101
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 102
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 103
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 104
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 105
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 106
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 107
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 108
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 109
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 110
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 111
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 112
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 113
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 114
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 115
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 116
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 117
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 118
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 119
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 120
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 121
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 122
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 123
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 124
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 125
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 126
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
+
+config queue
+        option id 127
+        option minrate 30
+        option maxrate 100
+        option weight 8
+        option resv 4
diff --git a/feed/mtkhnat_util/files/mtkhnat.init b/feed/mtkhnat_util/files/mtkhnat.init
new file mode 100755
index 0000000..528e62e
--- /dev/null
+++ b/feed/mtkhnat_util/files/mtkhnat.init
@@ -0,0 +1,17 @@
+#!/bin/sh /etc/rc.common
+
+START=19
+
+USE_PROCD=1
+NAME=mtkhnat
+PROG=/sbin/mtkhnat
+
+start_service() {
+	procd_open_instance
+	procd_set_param command "${PROG}"
+	procd_close_instance
+}
+
+service_triggers() {
+	procd_add_reload_trigger "mtkhnat"
+}
diff --git a/feed/regs/Makefile b/feed/regs/Makefile
new file mode 100755
index 0000000..d0f2444
--- /dev/null
+++ b/feed/regs/Makefile
@@ -0,0 +1,39 @@
+#
+# hua.shao@mediatek.com
+#
+# MTK Property Software.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=regs
+PKG_RELEASE:=1
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)
+
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/kernel.mk
+
+define Package/regs
+  SECTION:=MTK Properties
+  CATEGORY:=MTK Properties
+  TITLE:=an program to read/write from/to a pci device from userspace.
+  SUBMENU:=Applications
+  DEPENDS:=+@KERNEL_DEVMEM
+endef
+
+define Package/regs/description
+  Simple program to read/write from/to a pci device from userspace.
+endef
+
+define Build/Configure
+endef
+
+define Package/regs/install
+	$(INSTALL_DIR) $(1)/usr/bin
+	$(INSTALL_BIN) $(PKG_BUILD_DIR)/regs $(1)/usr/bin
+endef
+
+
+$(eval $(call BuildPackage,regs))
+
diff --git a/feed/regs/src/Makefile b/feed/regs/src/Makefile
new file mode 100644
index 0000000..bc3a12f
--- /dev/null
+++ b/feed/regs/src/Makefile
@@ -0,0 +1,13 @@
+EXEC = regs
+
+all: $(EXEC)
+
+$(EXEC): $(EXEC).c
+	$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $@.c $(LDLIBS)
+
+romfs:
+	$(ROMFSINST) /bin/$(EXEC)
+
+clean:
+	-rm -f $(EXEC) *.elf *.gdb *.o
+
diff --git a/feed/regs/src/regs.c b/feed/regs/src/regs.c
new file mode 100755
index 0000000..43397dd
--- /dev/null
+++ b/feed/regs/src/regs.c
@@ -0,0 +1,166 @@
+/*
+ * pcimem.c: Simple program to read/write from/to a pci device from userspace.
+ *
+ *  Copyright (C) 2010, Bill Farrow (bfarrow@beyondelectronics.us)
+ *
+ *  Based on the devmem2.c code
+ *  Copyright (C) 2000, Jan-Derk Bakker (J.D.Bakker@its.tudelft.nl)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <ctype.h>
+#include <termios.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#define PRINT_ERROR \
+	do { \
+		fprintf(stderr, "Error at line %d, file %s (%d) [%s]\n", \
+		__LINE__, __FILE__, errno, strerror(errno)); exit(1); \
+	} while(0)
+
+#define MAP_SIZE 4096UL
+#define MAP_MASK (MAP_SIZE - 1)
+
+void dump_page(uint32_t *vaddr, uint32_t *vbase, uint32_t *pbase)
+{
+	int i =0;
+	uint32_t *end = vaddr + (MAP_SIZE >> 6);
+	uint32_t *start = vaddr;
+
+	while(start  < end) {
+		printf("%p:%08x %08x %08x %08x\n",
+			start - vbase + pbase, start[0], start[1] , start[2], start[3]);
+		start+=4;
+	}
+}
+
+void reg_mod_bits(uint32_t *virt_addr, int data, int  start_bit, int data_len)
+{
+    int mask=0;
+    int value;
+    int i;
+
+	if ((start_bit < 0) || (start_bit > 31) ||
+	    (data_len < 1) || (data_len > 32) ||
+	    (start_bit + data_len > 32)) {
+		fprintf(stderr, "Startbit range[0~31], and DataLen range[1~32], and Startbit + DataLen <= 32\n");
+		return;
+	}
+
+	for (i = 0; i < data_len; i++)
+		mask |= 1 << (start_bit + i);
+
+	value = *((volatile uint32_t *) virt_addr);
+	value &= ~mask;
+	value |= (data << start_bit) & mask;;
+
+	*((uint32_t *) virt_addr) = value;
+
+	printf("Modify 0x%X[%d:%d]; ", data, start_bit + data_len - 1, start_bit);
+}
+
+void usage(void)
+{
+		fprintf(stderr, "\nUsage:\tregs [Type] [ Offset:Hex ] [ Data:Hex ] [StartBit:Dec] [DataLen:Dec]\n"
+			"\tType    : access operation type : [m]odify, [w]wite, [d]ump\n"
+			"\tOffset  : offset into memory region to act upon\n"
+			"\tData    : data to be written\n"
+			"\tStartbit: Startbit of Addr that want to be modified. Range[0~31]\n"
+			"\tDataLen : Data length of Data. Range[1~32], and Startbit + DataLen <= 32\n\n"
+			"Example:\tRead/Write/Modify register \n"
+			"\tRead    : regs d 0x1b100000           //dump 0x1b100000~0x1b1000f0 \n"
+			"\tWrite   : regs w 0x1b100000 0x1234    //write 0x1b100000=0x1234\n"
+			"\tModify  : regs m 0x1b100000 0x0 29 3  //modify 0x1b100000[29:31]=0\n");
+}
+
+int main(int argc, char **argv) {
+	int fd;
+	void *map_base = NULL;
+        void *virt_addr = NULL;
+	uint32_t read_result =0;
+        uint32_t writeval = 0;
+	uint32_t startbit = 0;
+       	uint32_t datalen = 0;
+	char *filename = NULL;
+	off_t offset = 0;
+	int access_type = 0;
+
+	if(argc < 3) {
+		usage();
+		exit(1);
+	}
+
+	access_type = tolower(argv[1][0]);
+	if ((access_type == 'w' && argc < 4) || (access_type == 'm' && argc < 6)) {
+		usage();
+		exit(1);
+	}
+
+	filename = "/dev/mem";
+	if((fd = open(filename, O_RDWR | O_SYNC)) == -1)
+		PRINT_ERROR;
+
+	/* Map one page */
+	offset = strtoul(argv[2], NULL, 16);
+	map_base = mmap(0, 2*MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, offset & ~MAP_MASK);
+	if(map_base == (void *) -1)
+		PRINT_ERROR;
+
+	virt_addr = map_base + (offset & MAP_MASK);
+	read_result = *((volatile uint32_t *) virt_addr);
+	printf("Value at 0x%llX (%p): 0x%X\n",
+	       (unsigned long long)offset, virt_addr, read_result);
+
+	switch(access_type) {
+		case 'm':
+			writeval = strtoul(argv[3], 0, 16);
+			startbit = strtoul(argv[4], 0, 10);
+			datalen  = strtoul(argv[5], 0, 10);
+			reg_mod_bits((uint32_t *)virt_addr, writeval, startbit, datalen);
+			break;
+		case 'w':
+			writeval = strtoul(argv[3], 0, 16);
+			*((uint32_t *) virt_addr) = writeval;
+			printf("Written 0x%X; ", writeval);
+			break;
+		case 'd':
+			dump_page(virt_addr, map_base, (uint32_t *)(offset & ~MAP_MASK));
+			goto out;
+		default:
+			fprintf(stderr, "Illegal data type '%c'.\n", access_type);
+			goto out;
+	}
+
+	read_result = *((volatile uint32_t *) virt_addr);
+	printf("Readback 0x%X\n", read_result);
+
+out:
+	if(munmap(map_base, MAP_SIZE) == -1)
+		PRINT_ERROR;
+
+	close(fd);
+	return 0;
+}
diff --git a/feed/switch/Makefile b/feed/switch/Makefile
new file mode 100755
index 0000000..0eb3d7e
--- /dev/null
+++ b/feed/switch/Makefile
@@ -0,0 +1,48 @@
+#
+# hua.shao@mediatek.com
+#
+# MTK Property Software.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=switch
+PKG_RELEASE:=1
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/kernel.mk
+
+define Package/switch
+  SECTION:=MTK Properties
+  CATEGORY:=MTK Properties
+  DEPENDS:=+libnl-tiny
+  TITLE:=Command to config switch
+  SUBMENU:=Applications
+endef
+
+define Package/switch/description
+  An program to config switch.
+endef
+
+TARGET_CPPFLAGS := \
+	-D_GNU_SOURCE \
+	-I$(LINUX_DIR)/user_headers/include \
+	-I$(STAGING_DIR)/usr/include/libnl-tiny \
+	-I$(PKG_BUILD_DIR) \
+	$(TARGET_CPPFLAGS) \
+
+define Build/Compile
+	CFLAGS="$(TARGET_CPPFLAGS) $(TARGET_CFLAGS)" \
+	$(MAKE) -C $(PKG_BUILD_DIR) \
+		$(TARGET_CONFIGURE_OPTS) \
+		LIBS="$(TARGET_LDFLAGS) -lnl-tiny -lm"
+endef
+
+define Package/switch/install
+	$(INSTALL_DIR) $(1)/usr/sbin
+	$(INSTALL_DIR) $(1)/lib/network
+	$(INSTALL_BIN) $(PKG_BUILD_DIR)/switch $(1)/usr/sbin
+endef
+
+$(eval $(call BuildPackage,switch))
diff --git a/feed/switch/src/Makefile b/feed/switch/src/Makefile
new file mode 100644
index 0000000..81ae127
--- /dev/null
+++ b/feed/switch/src/Makefile
@@ -0,0 +1,14 @@
+EXEC = switch
+
+SRC=switch_fun.c switch_753x.c switch_ioctl.c switch_netlink.c
+
+all: $(EXEC)
+
+switch: $(SRC)
+	$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(SRC) $(LDLIBS) $(LIBS)
+
+romfs:
+	$(ROMFSINST) /bin/switch
+
+clean:
+	-rm -f $(EXEC) *.elf *.gdb *.o
diff --git a/feed/switch/src/NOTICE b/feed/switch/src/NOTICE
new file mode 100644
index 0000000..c031eed
--- /dev/null
+++ b/feed/switch/src/NOTICE
@@ -0,0 +1,202 @@
+MediaTek (C) 2011
+
+The GNU General Public License (GPL)
+
+Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU
+General Public License is intended to guarantee your freedom to share and change free software--to make sure the
+software is free for all its users. This General Public License applies to most of the Free Software Foundation's
+software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is
+covered by the GNU Library General Public License instead.) You can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make
+sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you
+receive source code or can get it if you want it, that you can change the software or use pieces of it in new free
+programs; and that you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to
+surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the
+software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all
+the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them
+these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal
+permission to copy, distribute and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty
+for this free software. If the software is modified by someone else and passed on, we want its recipients to know that
+what they have is not the original, so that any problems introduced by others will not reflect on the original authors'
+reputations.
+
+Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors
+of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this,
+we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and modification follow.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it
+may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or
+work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to
+say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into
+another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is
+addressed as "you".
+
+Activities other than copying, distribution and modification are not covered by this License; they are outside its
+scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents
+constitute a work based on the Program (independent of having been made by running the Program). Whether that is true
+depends on what the Program does.
+
+1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided
+that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of
+warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other
+recipients of the Program a copy of this License along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection
+in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and
+copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of
+these conditions:
+
+a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any
+change.
+
+b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the
+Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this
+License.
+
+c) If the modified program normally reads commands interactively when run, you must cause it, when started running for
+such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright
+notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may
+redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if
+the Program itself is interactive but does not normally print such an announcement, your work based on the Program is
+not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the
+Program, and can be reasonably considered independent and separate works in themselves, then this License, and its
+terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same
+sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part
+regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you;
+rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the
+Program.
+
+In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the
+Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
+
+3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you also do one of the following:
+
+a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms
+of Sections 1 and 2 above on a medium customarily used for software interchange; or,
+
+b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than
+your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source
+code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange;
+or,
+
+c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This
+alternative is allowed only for noncommercial distribution and only if you received the program in object code or
+executable form with such an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for making modifications to it. For an executable work,
+complete source code means all the source code for all modules it contains, plus any associated interface definition
+files, plus the scripts used to control compilation and installation of the executable. However, as a special exception,
+the source code distributed need not include anything that is normally distributed (in either source or binary form)
+with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless
+that component itself accompanies the executable.
+
+If distribution of executable or object code is made by offering access to copy from a designated place, then offering
+equivalent access to copy the source code from the same place counts as distribution of the source code, even though
+third parties are not compelled to copy the source along with the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your
+rights under this License. However, parties who have received copies, or rights, from you under this License will not
+have their licenses terminated so long as such parties remain in full compliance.
+
+5. You are not required to accept this License, since you have not signed it. However, nothing else grants you
+permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do
+not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you
+indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or
+modifying the Program or works based on it.
+
+6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a
+license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You
+may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not
+responsible for enforcing compliance by third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to
+patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the
+conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as
+to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence
+you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution
+of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy
+both it and this License would be to refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the
+section is intended to apply and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest
+validity of any such claims; this section has the sole purpose of protecting the integrity of the free software
+distribution system, which is implemented by public license practices. Many people have made generous contributions to
+the wide range of software distributed through that system in reliance on consistent application of that system; it is
+up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee
+cannot impose that choice.
+
+This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted
+interfaces, the original copyright holder who places the Program under this License may add an explicit geographical
+distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if written in the body of this License.
+
+9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time.
+Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or
+concerns.
+
+Each version is given a distinguishing version number. If the Program specifies a version number of this License which
+applies to it and "any later version", you have the option of following the terms and conditions either of that version
+or of any later version published by the Free Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are
+different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation,
+write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two
+goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of
+software generally.
+
+NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM
+"AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR
+CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY
+WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL,
+SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT
+LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY
+OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+
diff --git a/feed/switch/src/switch_753x.c b/feed/switch/src/switch_753x.c
new file mode 100644
index 0000000..d0e1fc2
--- /dev/null
+++ b/feed/switch/src/switch_753x.c
@@ -0,0 +1,682 @@
+/*
+ * switch_753x.c: set for 753x switch
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <linux/if.h>
+
+#include "switch_netlink.h"
+#include "switch_ioctl.h"
+#include "switch_fun.h"
+
+struct mt753x_attr *attres;
+int chip_name;
+bool nl_init_flag;
+
+static void usage(char *cmd)
+{
+	printf("==================Usage===============================================================================================================================\n");
+
+	/* 1. basic operations */
+	printf("1) mt753x switch Basic operations=================================================================================================================>>>>\n");
+	printf(" 1.1) %s devs							- list switch device id and model name  \n", cmd);
+	printf(" 1.2) %s sysctl							- show the ways to access kenerl driver: netlink or ioctl \n", cmd);
+	printf(" 1.3) %s reset							- sw reset switch fsm and registers\n", cmd);
+	printf(" 1.4) %s reg r [offset]						- read the reg with default switch \n", cmd);
+	printf(" 1.5) %s reg w [offset] [value]					- write the reg with default switch \n", cmd);
+	printf(" 1.6) %s reg d [offset]						- dump the reg with default switch\n", cmd);
+	printf(" 1.7) %s dev [devid] reg r [addr]				- read the reg with the switch devid  \n", cmd);
+	printf(" 1.8) %s dev [devid] reg w [addr] [value] 			- write the regs with the switch devid \n", cmd);
+	printf(" 1.9) %s dev [devid] reg d [addr]				- dump the regs with the switch devid \n", cmd);
+	printf("																			\n");
+
+	/* 2. phy operations */
+	printf("2) mt753x switch PHY operations===================================================================================================================>>>>\n");
+	printf(" 2.1) %s phy							- dump all phy registers (clause 22)\n", cmd);
+	printf(" 2.2) %s phy [phy_addr]						- dump phy register of specific port (clause 22)\n", cmd);
+	printf(" 2.3) %s phy cl22 r [port_num] [phy_reg]			- read specific phy register of specific port by clause 22\n", cmd);
+	printf(" 2.4) %s phy cl22 w [port_num] [phy_reg] [value]		- write specific phy register of specific port by clause 22\n", cmd);
+	printf(" 2.5) %s phy cl45 r [port_num] [dev_num] [phy_reg]		- read specific phy register of specific port by clause 45\n", cmd);
+	printf(" 2.6) %s phy cl45 w [port_num] [dev_num] [phy_reg] [value]	- write specific phy register of specific port by clause 45\n", cmd);
+	printf(" 2.7) %s phy fc [port_num] [enable 0|1]				- set switch phy flow control, port is 0~4, enable is 1, disable is 0 \n", cmd);
+	printf(" 2.8) %s phy an [port_num] [enable 0|1]				- set switch phy auto-negotiation, port is 0~4, enable is 1, disable is 0 \n", cmd);
+	printf(" 2.9) %s trreg r [port_num] [ch_addr] [node_addr] [data_addr]	- read phy token-ring of specific port\n", cmd);
+	printf(" 2.10) %s trreg w [port_num] [ch_addr] [node_addr] [data_addr]	- write phy token-ring of specific port\n", cmd);
+	printf("		[high_value] [low_value]									\n");
+	printf(" 2.11) %s crossover [port_num] [mode auto|mdi|mdix]		- switch auto or force mdi/mdix mode for crossover cable\n", cmd);
+	printf("																			\n");
+
+	/* 3. mac operations */
+	printf("3) mt753x switch MAC operations====================================================================================================================>>>>\n");
+	printf(" 3.1) %s dump							- dump switch mac table\n", cmd);
+	printf(" 3.2) %s clear							- clear switch mac table\n", cmd);
+	printf(" 3.3) %s add [mac] [portmap]					- add an entry (with portmap) to switch mac table\n", cmd);
+	printf(" 3.4) %s add [mac] [portmap] [vlan id]				- add an entry (with portmap, vlan id) to switch mac table\n", cmd);
+	printf(" 3.5) %s add [mac] [portmap] [vlan id] [age]			- add an entry (with portmap, vlan id, age out time) to switch mac table\n", cmd);
+	printf(" 3.6) %s del mac [mac] vid [vid]				- delete an entry from switch mac table\n", cmd);
+	printf(" 3.7) %s del mac [mac] fid [fid]				- delete an entry from switch mac table\n", cmd);
+	printf(" 3.8) %s search mac [mac] vid [vid]				- search an entry with specific mac and vid\n", cmd);
+	printf(" 3.9) %s search mac [mac] fid [fid]				- search an entry with specific mac and fid\n", cmd);
+	printf(" 3.10) %s filt [mac]						- add a SA filtering entry (with portmap 1111111) to switch mac table\n", cmd);
+	printf(" 3.11) %s filt [mac] [portmap]					- add a SA filtering entry (with portmap)to switch mac table\n", cmd);
+	printf(" 3.12) %s filt [mac] [portmap] [vlan id				- add a SA filtering entry (with portmap, vlan id)to switch mac table\n", cmd);
+	printf(" 3.13) %s filt [mac] [portmap] [vlan id] [age]			- add a SA filtering entry (with portmap, vlan id, age out time) to switch table\n", cmd);
+	printf(" 3.14) %s arl aging [active:0|1] [time:1~65536]			- set switch arl aging timeout value \n", cmd);
+	printf(" 3.15) %s macctl fc [enable|disable]				- set switch mac global flow control,enable is 1, disable is 0 \n", cmd);
+	printf("																			\n");
+
+	/* 4. mib counter operations */
+	printf("4) mt753x switch mib counter operations============================================================================================================>>>>\n");
+	printf(" 4.1) %s esw_cnt get						-get switch mib counters          \n", cmd);
+	printf(" 4.2) %s esw_cnt clear						-clear switch mib counters         \n", cmd);
+	printf(" 4.3) %s output_queue_cnt get					-get switch output queue counters \n", cmd);
+	printf(" 4.4) %s free_page get						-get switch system free page counters  \n", cmd);
+	printf("																			\n");
+
+	/* 5. acl function operations */
+	printf("5) mt753x switch acl function operations============================================================================================================>>>>\n");
+	printf(" 5.1) %s acl enable [port] [port_enable:0|1]			- set switch acl function enabled, port is 0~6,enable is 1, disable is 0  \n", cmd);
+	printf(" 5.2) %s acl etype add [ethtype] [portmap]			- drop L2 ethertype packets \n", cmd);
+	printf(" 5.3) %s acl dmac add [mac] [portmap]				- drop L2 dest-Mac packets \n", cmd);
+	printf(" 5.4) %s acl dip add [dip] [portmap]				- drop dip packets \n", cmd);
+	printf(" 5.5) %s acl port add [sport] [portmap]				- drop L4 UDP/TCP source port packets\n", cmd);
+	printf(" 5.6) %s acl L4 add [2byes] [portmap]				- drop L4 packets with 2bytes payload\n", cmd);
+	printf(" 5.7) %s acl acltbl-add  [tbl_idx:0~63/255] [vawd1] [vawd2]	- set switch acl table new entry, max index-7530:63,7531:255 \n", cmd);
+	printf(" 5.8) %s acl masktbl-add [tbl_idx:0~31/127] [vawd1] [vawd2]	- set switch acl mask table new entry, max index-7530:31,7531:127   \n", cmd);
+	printf(" 5.9) %s acl ruletbl-add [tbl_idx:0~31/127] [vawd1] [vawd2]	- set switch acl rule table new entry, max index-7530:31,7531:127  \n", cmd);
+	printf(" 5.10) %s acl ratetbl-add [tbl_idx:0~31] [vawd1] [vawd2] 	- set switch acl rate table new entry  \n", cmd);
+	printf(" 5.11) %s acl dip meter [dip] [portmap][meter:kbps]		- rate limit dip packets \n", cmd);
+	printf(" 5.12) %s acl dip trtcm [dip] [portmap][CIR:kbps][CBS][PIR][PBS]- TrTCM dip packets \n", cmd);
+	printf(" 5.13) %s acl dip modup [dip] [portmap][usr_pri]		- modify usr priority from ACL \n", cmd);
+	printf(" 5.14) %s acl dip pppoe [dip] [portmap]				- pppoe header removal \n", cmd);
+	printf("																			\n");
+
+	/* 6. dip table operations */
+	printf("6) mt753x switch dip table operations=================================================================================================================>>>>\n");
+	printf(" 6.1) %s dip dump						- dump switch dip table\n", cmd);
+	printf(" 6.2) %s dip clear						- clear switch dip table\n", cmd);
+	printf(" 6.3) %s dip add [dip] [portmap]				- add a dip entry to switch table\n", cmd);
+	printf(" 6.4) %s dip del [dip]						- del a dip entry to switch table\n", cmd);
+	printf("																			\n");
+
+	/* 7. sip table operations */
+	printf("7) mt753x switch sip table operations=================================================================================================================>>>>\n");
+	printf(" 7.1) %s sip dump						- dump switch sip table\n", cmd);
+	printf(" 7.2) %s sip clear						- clear switch sip table\n", cmd);
+	printf(" 7.3) %s sip add [sip] [dip] [portmap]				- add a sip entry to switch table\n", cmd);
+	printf(" 7.4) %s sip del [sip] [dip]					- del a sip entry to switch table\n", cmd);
+	printf("																			\n");
+
+	/* 8. vlan table operations */
+	printf("8) mt753x switch sip table operations====================================================================================================================>>>>\n");
+	printf(" 8.1) %s vlan dump (egtag)					- dump switch vlan table (with per port eg_tag setting)\n", cmd);
+	printf(" 8.2) %s vlan set [fid:0~7] [vid] [portmap]			- set vlan id and associated member at switch vlan table\n", cmd);
+	printf("			([stag:0~4095] [eg_con:0|1] [egtagPortMap 0:untagged 2:tagged]) \n");
+	printf("			Full Example: %s vlan set 0 3 10000100 0 0 20000200\n", cmd);
+	printf(" 8.3) %s vlan vid [vlan idx] [active:0|1] [vid] [portMap] 	- set switch vlan vid elements  \n", cmd);
+	printf("			[egtagPortMap] [ivl_en] [fid] [stag]							 \n");
+	printf(" 8.4) %s vlan pvid [port] [pvid]				- set switch vlan pvid  \n", cmd);
+	printf(" 8.5) %s vlan acc-frm [port] [acceptable_frame_type:0~3]	- set switch vlan acceptable_frame type : admit all frames: 0, \n", cmd);
+	printf("									admit only vlan-taged frames: 1,admit only untagged or priority-tagged frames: 2, reserved:3 \n");
+	printf(" 8.6) %s vlan port-attr [port] [attr:0~3]			- set switch vlan port attribute: user port: 0, statck port: 1, \n", cmd);
+	printf("									translation port: 2, transparent port:3        \n");
+	printf(" 8.7) %s vlan port-mode [port] [mode:0~3]			- set switch vlan port mode : port matrix mode: 0, fallback mode: 1,  \n", cmd);
+	printf("									check mode: 2, security mode:3                    \n");
+	printf(" 8.8) %s vlan eg-tag-pvc [port] [eg_tag:0~7]			- set switch vlan eg tag pvc : disable: 0, consistent: 1, reserved: 2, \n", cmd);
+	printf("									reserved:3,untagged:4,swap:5,tagged:6, stack:7                 \n");
+	printf(" 8.9) %s vlan eg-tag-pcr [port] [eg_tag:0~3]			- set switch vlan eg tag pcr : untagged: 0, swap: 1, tagged: 2, stack:3 \n", cmd);
+	printf("																			\n");
+
+	/* 9. rate limit operations */
+	printf("9) mt753x switch rate limit operations=================================================================================================================>>>>\n");
+	printf(" 9.1) %s ratectl [in_ex_gress:0|1] [port] [rate]		- set switch port ingress(1) or egress(0) rate  \n", cmd);
+	printf(" 9.2) %s ingress-rate on [port] [Kbps]				- set ingress rate limit on port n (n= 0~ switch max port) \n", cmd);
+	printf(" 9.3) %s egress-rate on [port] [Kbps]				- set egress rate limit on port n (n= 0~ switch max port) \n", cmd);
+	printf(" 9.4) %s ingress-rate off [port]				- disable ingress rate limit on port n (n= 0~ switch max port) \n", cmd);
+	printf(" 9.5) %s egress-rate off [port]					- disable egress rate limit on port n (n= 0~ switch max port)\n", cmd);
+	printf("																			\n");
+
+	/* 10. igmp operations */
+	printf("10) mt753x igmp operations===============================================================================================================================>>>>\n");
+	printf(" 10.1) %s igmpsnoop on [leaky_en] [wan_num]			- turn on IGMP snoop and router port learning\n", cmd);
+	printf("									leaky_en: 1 or 0. default 0; wan_num: 0 or 4. default 4\n");
+	printf(" 10.2) %s igmpsnoop off						- turn off IGMP snoop and router port learning\n", cmd);
+	printf(" 10.3) %s igmpsnoop enable [port#]				- enable IGMP HW leave/join/Squery/Gquery\n", cmd);
+	printf(" 10.4) %s igmpsnoop disable [port#]				- disable IGMP HW leave/join/Squery/Gquery\n", cmd);
+	printf("																			\n");
+
+	/* 11. QoS operations */
+	printf("11) mt753x QoS operations================================================================================================================================>>>>\n");
+	printf(" 11.1) %s qos sch [port:0~6] [queue:0~7] [shaper:min|max] [type:rr:0|sp:1|wfq:2]     - set switch qos sch type\n", cmd);
+	printf(" 11.2) %s qos base [port:0~6] [base]					- set switch qos base(UPW); port-based:0, tag-based:1, \n", cmd);
+	printf("									dscp-based:2, acl-based:3, arl-based:4, stag-based:5   \n");
+	printf(" 11.3) %s qos port-weight [port:0~6] [q0] [q1][q2][q3]		- set switch qos port queue weight; \n", cmd);
+	printf("				[q4][q5][q6][q7]				 [qn]: the weight of queue n, range: 1~16     \n");
+	printf(" 11.4) %s qos port-prio [port:0~6] [prio:0~7]			- set switch port qos user priority;  port is 0~6, priority is 0~7  \n", cmd);
+	printf(" 11.5) %s qos dscp-prio [dscp:0~63] [prio:0~7]			- set switch qos dscp user priority;  dscp is 0~63, priority is 0~7  \n", cmd);
+	printf(" 11.6) %s qos prio-qmap [port:0~6] [prio:0~7]  [queue:0~7]			- set switch qos priority queue map; priority is 0~7,queue is 0~7  \n", cmd);
+	printf("																			\n");
+
+	/*12. port mirror operations*/
+	printf(" 12) mt753x port mirror operations========================================================================================================================>>>>\n");
+	printf(" 12.1) %s mirror monitor [port]					- enable port mirror and indicate monitor port number\n", cmd);
+	printf(" 12.2) %s mirror target  [port]					- set port mirror target\n", cmd);
+	printf("			[direction| 0:off, 1:rx, 2:tx, 3:all]					\n");
+	printf(" 12.3) %s mirror enable [mirror_en:0|1] [mirror_port: 0-6]	- set switch mirror function enable(1) or disabled(0) for port 0~6  \n", cmd);
+	printf(" 12.4) %s mirror port-based [port] [port_tx_mir:0|1]		- set switch mirror port: target tx/rx/acl/vlan/igmp\n", cmd);
+	printf("				[port_rx_mir:0|1] [acl_mir:0|1]						\n");
+	printf("				[vlan_mis:0|1] [igmp_mir:0|1]						\n");
+	printf("																			\n");
+
+	/*13. stp function*/
+	printf(" 13) mt753x stp operations===============================================================================================================================>>>>\n");
+	printf(" 13.1) %s stp [port] [fid] [state]				- set switch spanning tree state, port is 0~6, fid is 0~7,  \n", cmd);
+	printf("									state is 0~3(Disable/Discarding:0,Blocking/Listening/Discarding:1,) \n");
+	printf("									Learning:2,Forwarding:3 \n");
+	printf("																			\n");
+
+	/*14. collision pool operations*/
+	printf("14) mt753x collision pool operations========================================================================================================================>>>>\n");
+	printf(" 14.1) %s collision-pool enable [enable 0|1]			- enable or disable collision pool\n", cmd);
+	printf(" 14.2) %s collision-pool mac dump				- dump collision pool mac table\n", cmd);
+	printf(" 14.3) %s collision-pool dip dump				- dump collision pool dip table\n", cmd);
+	printf(" 14.4) %s collision-pool sip dump				- dump collision pool sip table\n", cmd);
+	printf("																			\n");
+
+	/*15. pfc(priority flow control) operations*/
+	printf("15) mt753x pfc(priority flow control) operations==============================================================================================================>>>>\n");
+	printf(" 15.1) %s pfc enable [port] [enable 0|1]			- enable or disable port's pfc \n", cmd);
+	printf(" 15.2) %s pfc rx_counter [port]					- get port n pfc 8 up rx counter \n", cmd);
+	printf(" 15.3) %s pfc tx_counter [port]					- get port n pfc 8 up rx counter \n", cmd);
+	printf("																			\n");
+
+	/*15. pfc(priority flow control) operations*/
+	printf("16) mt753x EEE(802.3az) operations==============================================================================================================>>>>\n");
+	printf(" 16.1) %s eee enable [enable 0|1] ([portMap])			- enable or disable EEE (by portMap)\n", cmd);
+	printf(" 16.2) %s eee dump ([port])					- dump EEE capability (by port)\n", cmd);
+	printf("																			\n");
+
+	exit_free();
+	exit(0);
+}
+
+static void parse_reg_cmd(int argc, char *argv[], int len)
+{
+	unsigned int val;
+	unsigned int off;
+	int i, j;
+
+	if (!strncmp(argv[len - 3], "reg", 4)) {
+		if (argv[len - 2][0] == 'r') {
+			off = strtoul(argv[len - 1], NULL, 16);
+			reg_read(off, &val);
+			printf(" Read reg=%x, value=%x\n", off, val);
+		} else if (argv[len - 2][0] == 'w') {
+			off = strtoul(argv[len - 1], NULL, 16);
+			if (argc != len + 1)
+				usage(argv[0]);
+			val = strtoul(argv[len], NULL, 16);
+			reg_write(off, val);
+			printf(" Write reg=%x, value=%x\n", off, val);
+		} else if (argv[len - 2][0] == 'd') {
+			off = strtoul(argv[len - 1], NULL, 16);
+			for (i = 0; i < 16; i++) {
+				printf("0x%08x: ", off + 0x10 * i);
+				for (j = 0; j < 4; j++) {
+					reg_read(off + i * 0x10 + j * 0x4, &val);
+					printf(" 0x%08x", val);
+				}
+				printf("\n");
+			}
+		} else
+			usage(argv[0]);
+	} else
+		usage(argv[0]);
+}
+
+static int get_chip_name()
+{
+	unsigned int temp;
+	/*judge 7530*/
+	reg_read((0x7ffc), &temp);
+	temp = temp >> 16;
+	if (temp == 0x7530)
+		return temp;
+	/*judge 7531*/
+	reg_read(0x781c, &temp);
+	temp = temp >> 16;
+	if (temp == 0x7531)
+		return temp;
+	return -1;
+}
+
+static int phy_operate(int argc, char *argv[])
+{
+	unsigned int port_num;
+	unsigned int dev_num;
+	unsigned int value;
+	unsigned int reg;
+	int ret = 0;
+	char op;
+
+	if (strncmp(argv[2], "cl22", 4) && strncmp(argv[2], "cl45", 4))
+		usage(argv[0]);
+
+	op = argv[3][0];
+
+	switch(op) {
+		case 'r':
+			reg = strtoul(argv[argc-1], NULL, 0);
+			if (argc == 6) {
+				port_num = strtoul(argv[argc-2], NULL, 0);
+				ret = mii_mgr_read(port_num, reg, &value);
+				if (ret < 0)
+					printf(" Phy read reg fail\n");
+				else
+					printf(" Phy read reg=0x%x, value=0x%x\n", reg, value);
+			} else if (argc == 7) {
+				dev_num = strtoul(argv[argc-2], NULL, 0);
+				port_num = strtoul(argv[argc-3], NULL, 0);
+				ret = mii_mgr_c45_read(port_num, dev_num, reg, &value);
+				if (ret < 0)
+					printf(" Phy read reg fail\n");
+				else
+					printf(" Phy read reg=0x%x, value=0x%x\n", reg, value);
+			} else
+				ret = phy_dump(32);
+			break;
+		case 'w':
+			reg = strtoul(argv[argc-2], NULL, 0);
+			value = strtoul(argv[argc-1], NULL, 0);
+			if (argc == 7) {
+				port_num = strtoul(argv[argc-3], NULL, 0);
+				ret = mii_mgr_write(port_num, reg, value);
+			}
+			else if (argc == 8) {
+				dev_num = strtoul(argv[argc-3], NULL, 0);
+				port_num = strtoul(argv[argc-4], NULL, 0);
+				ret = mii_mgr_c45_write(port_num, dev_num, reg, value);
+			}
+			else
+				usage(argv[0]);
+			break;
+		default:
+			break;
+	}
+
+	return ret;
+}
+
+
+int main(int argc, char *argv[])
+{
+	int err;
+
+	attres = (struct mt753x_attr *)malloc(sizeof(struct mt753x_attr));
+	attres->dev_id = -1;
+	attres->port_num = -1;
+	attres->phy_dev = -1;
+	nl_init_flag = true;
+
+	err = mt753x_netlink_init(MT753X_DSA_GENL_NAME);
+	if (!err)
+		chip_name = get_chip_name();
+
+	/* dsa netlink family might not be enabled. Try gsw netlink family. */
+	if (err < 0 || chip_name < 0) {
+		err = mt753x_netlink_init(MT753X_GENL_NAME);
+		if (!err)
+			chip_name = get_chip_name();
+	}
+
+	if (err < 0 || chip_name < 0) {
+		nl_init_flag = false;
+
+		switch_ioctl_init();
+		chip_name = get_chip_name();
+		if (chip_name < 0) {
+			printf("no chip unsupport or chip id is invalid!\n");
+			exit_free();
+			exit(0);
+		}
+	}
+
+	if (argc < 2)
+		usage(argv[0]);
+
+	if (!strcmp(argv[1], "dev")) {
+		attres->dev_id = strtoul(argv[2], NULL, 0);
+		argv += 2;
+		argc -= 2;
+		if (argc < 2)
+			usage(argv[0]);
+
+	}
+
+	if (argc == 2) {
+		if (!strcmp(argv[1], "devs")) {
+			attres->type = MT753X_ATTR_TYPE_MESG;
+			mt753x_list_swdev(attres, MT753X_CMD_REQUEST);
+		} else if (!strncmp(argv[1], "dump", 5)) {
+			table_dump();
+		} else if (!strncmp(argv[1], "clear", 6)) {
+			table_clear();
+			printf("done.\n");
+		} else if (!strncmp(argv[1], "reset", 5)) {
+			switch_reset(argc, argv);
+		} else if (!strncmp(argv[1], "phy", 4)) {
+			phy_dump(32); //dump all phy register
+		} else if (!strncmp(argv[1], "sysctl", 7)) {
+			if (nl_init_flag)
+				printf("netlink(%s)\n",MT753X_GENL_NAME);
+			else
+				printf("ioctl(%s)\n",ETH_DEVNAME);
+		} else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "arl", 4)) {
+		if (!strncmp(argv[2], "aging", 6))
+			doArlAging(argc, argv);
+	} else if (!strncmp(argv[1], "esw_cnt", 8)) {
+		if (!strncmp(argv[2], "get", 4))
+			read_mib_counters();
+		else if (!strncmp(argv[2], "clear", 6))
+			clear_mib_counters();
+		else
+			usage(argv[0]);
+	}else if (!strncmp(argv[1], "output_queue_cnt", 17)) {
+		if (!strncmp(argv[2], "get", 4))
+			read_output_queue_counters();
+		else
+			usage(argv[0]);
+	}else if (!strncmp(argv[1], "free_page", 10)) {
+		if (!strncmp(argv[2], "get", 4))
+			read_free_page_counters();
+		else
+			usage(argv[0]);
+	}
+	else if (!strncmp(argv[1], "ratectl", 8))
+		rate_control(argc, argv);
+	else if (!strncmp(argv[1], "add", 4))
+		table_add(argc, argv);
+	else if (!strncmp(argv[1], "filt", 5))
+		table_add(argc, argv);
+	else if (!strncmp(argv[1], "del", 4)) {
+		if (!strncmp(argv[4], "fid", 4))
+			table_del_fid(argc, argv);
+		else if (!strncmp(argv[4], "vid", 4))
+			table_del_vid(argc, argv);
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "search", 7)) {
+		if (!strncmp(argv[4], "fid", 4))
+			table_search_mac_fid(argc, argv);
+		else if (!strncmp(argv[4], "vid", 4))
+			table_search_mac_vid(argc, argv);
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "phy", 4)) {
+		if (argc == 3) {
+			int phy_addr = strtoul(argv[2], NULL, 0);
+			if (phy_addr < 0 || phy_addr > 31)
+				usage(argv[0]);
+			phy_dump(phy_addr);
+		} else if (argc == 5) {
+			if (!strncmp(argv[2], "fc", 2))
+				phy_set_fc(argc, argv);
+			else if (!strncmp(argv[2], "an", 2))
+				phy_set_an(argc, argv);
+			else
+				phy_dump(32);
+		} else
+			phy_operate(argc, argv);
+	} else if (!strncmp(argv[1], "trreg", 4)) {
+		if (rw_phy_token_ring(argc, argv) < 0)
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "macctl", 7)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "fc", 3))
+			global_set_mac_fc(argc, argv);
+		else if (!strncmp(argv[2], "pfc", 4))
+			set_mac_pfc(argc, argv);
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "qos", 4)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "sch", 4))
+			qos_sch_select(argc, argv);
+		else if (!strncmp(argv[2], "base", 5))
+			qos_set_base(argc, argv);
+		else if (!strncmp(argv[2], "port-weight", 12))
+			qos_wfq_set_weight(argc, argv);
+		else if (!strncmp(argv[2], "port-prio", 10))
+			qos_set_portpri(argc, argv);
+		else if (!strncmp(argv[2], "dscp-prio", 10))
+			qos_set_dscppri(argc, argv);
+		else if (!strncmp(argv[2], "prio-qmap", 10))
+			qos_pri_mapping_queue(argc, argv);
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "stp", 3)) {
+		if (argc < 3)
+			usage(argv[0]);
+		else
+			doStp(argc, argv);
+	} else if (!strncmp(argv[1], "sip", 5)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "dump", 5))
+			sip_dump();
+		else if (!strncmp(argv[2], "add", 4))
+			sip_add(argc, argv);
+		else if (!strncmp(argv[2], "del", 4))
+			sip_del(argc, argv);
+		else if (!strncmp(argv[2], "clear", 6))
+			sip_clear();
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "dip", 4)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "dump", 5))
+			dip_dump();
+		else if (!strncmp(argv[2], "add", 4))
+			dip_add(argc, argv);
+		else if (!strncmp(argv[2], "del", 4))
+			dip_del(argc, argv);
+		else if (!strncmp(argv[2], "clear", 6))
+			dip_clear();
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "mirror", 7)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "monitor", 8))
+			set_mirror_to(argc, argv);
+		else if (!strncmp(argv[2], "target", 7))
+			set_mirror_from(argc, argv);
+		else if (!strncmp(argv[2], "enable", 7))
+			doMirrorEn(argc, argv);
+		else if (!strncmp(argv[2], "port-based", 11))
+			doMirrorPortBased(argc, argv);
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "acl", 4)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "dip", 4)) {
+			if (!strncmp(argv[3], "add", 4))
+				acl_dip_add(argc, argv);
+			else if (!strncmp(argv[3], "modup", 6))
+				acl_dip_modify(argc, argv);
+			else if (!strncmp(argv[3], "pppoe", 6))
+				acl_dip_pppoe(argc, argv);
+			else if (!strncmp(argv[3], "trtcm", 4))
+				acl_dip_trtcm(argc, argv);
+			else if (!strncmp(argv[3], "meter", 6))
+				acl_dip_meter(argc, argv);
+			else
+				usage(argv[0]);
+		} else if (!strncmp(argv[2], "dmac", 6)) {
+			if (!strncmp(argv[3], "add", 4))
+				acl_mac_add(argc, argv);
+			else
+				usage(argv[0]);
+		} else if (!strncmp(argv[2], "etype", 6)) {
+			if (!strncmp(argv[3], "add", 4))
+				acl_ethertype(argc, argv);
+			else
+				usage(argv[0]);
+		} else if (!strncmp(argv[2], "port", 5)) {
+			if (!strncmp(argv[3], "add", 4))
+				acl_sp_add(argc, argv);
+			else
+				usage(argv[0]);
+		} else if (!strncmp(argv[2], "L4", 5)) {
+			if (!strncmp(argv[3], "add", 4))
+				acl_l4_add(argc, argv);
+			else
+				usage(argv[0]);
+		} else if (!strncmp(argv[2], "enable", 7))
+			acl_port_enable(argc, argv);
+		else if (!strncmp(argv[2], "acltbl-add", 11))
+			acl_table_add(argc, argv);
+		else if (!strncmp(argv[2], "masktbl-add", 12))
+			acl_mask_table_add(argc, argv);
+		else if (!strncmp(argv[2], "ruletbl-add", 12))
+			acl_rule_table_add(argc, argv);
+		else if (!strncmp(argv[2], "ratetbl-add", 12))
+			acl_rate_table_add(argc, argv);
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "vlan", 5)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "dump", 5))
+			vlan_dump(argc, argv);
+		else if (!strncmp(argv[2], "set", 4))
+			vlan_set(argc, argv);
+		else if (!strncmp(argv[2], "clear", 6))
+			vlan_clear(argc, argv);
+		else if (!strncmp(argv[2], "vid", 4))
+			doVlanSetVid(argc, argv);
+		else if (!strncmp(argv[2], "pvid", 5))
+			doVlanSetPvid(argc, argv);
+		else if (!strncmp(argv[2], "acc-frm", 8))
+			doVlanSetAccFrm(argc, argv);
+		else if (!strncmp(argv[2], "port-attr", 10))
+			doVlanSetPortAttr(argc, argv);
+		else if (!strncmp(argv[2], "port-mode", 10))
+			doVlanSetPortMode(argc, argv);
+		else if (!strncmp(argv[2], "eg-tag-pcr", 11))
+			doVlanSetEgressTagPCR(argc, argv);
+		else if (!strncmp(argv[2], "eg-tag-pvc", 11))
+			doVlanSetEgressTagPVC(argc, argv);
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "reg", 4)) {
+		parse_reg_cmd(argc, argv, 4);
+	} else if (!strncmp(argv[1], "ingress-rate", 6)) {
+		int port = 0, bw = 0;
+		if (argv[2][1] == 'n') {
+			port = strtoul(argv[3], NULL, 0);
+			bw = strtoul(argv[4], NULL, 0);
+			if (ingress_rate_set(1, port, bw) == 0)
+				printf("switch port=%d, bw=%d\n", port, bw);
+		}
+		else if (argv[2][1] == 'f') {
+			if (argc != 4)
+				usage(argv[0]);
+			port = strtoul(argv[3], NULL, 0);
+			if (ingress_rate_set(0, port, bw) == 0)
+				printf("switch port=%d ingress rate limit off\n", port);
+		} else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "egress-rate", 6)) {
+		int port = 0, bw = 0;
+		if (argv[2][1] == 'n') {
+			port = strtoul(argv[3], NULL, 0);
+			bw = strtoul(argv[4], NULL, 0);
+			if (egress_rate_set(1, port, bw) == 0)
+				printf("switch port=%d, bw=%d\n", port, bw);
+		} else if (argv[2][1] == 'f') {
+			if (argc != 4)
+				usage(argv[0]);
+			port = strtoul(argv[3], NULL, 0);
+			if (egress_rate_set(0, port, bw) == 0)
+				printf("switch port=%d egress rate limit off\n", port);
+		} else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "igmpsnoop", 10)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "on", 3))
+			igmp_on(argc, argv);
+		else if (!strncmp(argv[2], "off", 4))
+			igmp_off();
+		else if (!strncmp(argv[2], "enable", 7))
+			igmp_enable(argc, argv);
+		else if (!strncmp(argv[2], "disable", 8))
+			igmp_disable(argc, argv);
+		else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "collision-pool", 15)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "enable", 7))
+			collision_pool_enable(argc, argv);
+		else if (!strncmp(argv[2], "mac", 4)){
+			if (!strncmp(argv[3], "dump", 5))
+				collision_pool_mac_dump();
+			else
+				usage(argv[0]);
+		} else if (!strncmp(argv[2], "dip", 4)){
+			if (!strncmp(argv[3], "dump", 5))
+				collision_pool_dip_dump();
+			else
+				usage(argv[0]);
+		} else if (!strncmp(argv[2], "sip", 4)){
+			if (!strncmp(argv[3], "dump", 5))
+				collision_pool_sip_dump();
+			else
+				usage(argv[0]);
+			}
+		else
+			usage(argv[0]);
+	}  else if (!strncmp(argv[1], "pfc", 15)) {
+		if (argc < 4 || argc > 5)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "enable", 7))
+			set_mac_pfc(argc, argv);
+		else if (!strncmp(argv[2], "rx_counter", 11)){
+			pfc_get_rx_counter(argc, argv);
+		} else if (!strncmp(argv[2], "tx_counter", 11)){
+			pfc_get_tx_counter(argc, argv);
+		} else
+			usage(argv[0]);
+	} else if (!strncmp(argv[1], "crossover", 10)) {
+		if (argc < 4)
+			usage(argv[0]);
+		else
+			phy_crossover(argc, argv);
+	} else if (!strncmp(argv[1], "eee", 4)) {
+		if (argc < 3)
+			usage(argv[0]);
+		if (!strncmp(argv[2], "enable", 7) ||
+			 !strncmp(argv[2], "disable", 8))
+			eee_enable(argc, argv);
+		else if (!strncmp(argv[2], "dump", 5))
+			eee_dump(argc, argv);
+		else
+			usage(argv[0]);
+	} else
+		usage(argv[0]);
+
+	exit_free();
+	return 0;
+}
diff --git a/feed/switch/src/switch_extend.h b/feed/switch/src/switch_extend.h
new file mode 100644
index 0000000..c352767
--- /dev/null
+++ b/feed/switch/src/switch_extend.h
@@ -0,0 +1,342 @@
+#define atoi(x)         strtoul(x, NULL,10)
+
+#define EXTEND_SETVID_PARAM	1
+#define SQA_VERIFY		1
+#define ETHCMD_DBG		1
+#define ACTIVED	(1<<0)
+#define SWITCH_MAX_PORT		7
+
+#define GENERAL_TABLE		0
+#define COLLISION_TABLE		1
+
+#define GSW_BASE		0x0
+#define GSW_ARL_BASE		(GSW_BASE + 0x0000)
+#define GSW_BMU_BASE		(GSW_BASE + 0x1000)
+#define GSW_PORT_BASE		(GSW_BASE + 0x2000)
+#define GSW_MAC_BASE		(GSW_BASE + 0x3000)
+#define GSW_MIB_BASE		(GSW_BASE + 0x4000)
+#define GSW_CFG_BASE		(GSW_BASE + 0x7000)
+
+#define GSW_PCR(n)		(GSW_PORT_BASE + (n)*0x100 + 0x04)
+#define GSW_MFC			(GSW_ARL_BASE + 0x10)
+#define GSW_UPW(n)		(GSW_PORT_BASE + (n)*0x100 + 0x40)
+//#define GSW_PEM(n)		(GSW_ARL_BASE + (n)*0x4 + 0x48)
+#define GSW_PEM(n)		(GSW_PORT_BASE + (n)*0x4 + 0x44)
+
+#define GSW_MMSCR0_Q(n)		(GSW_BMU_BASE + (n)*0x8)
+#define GSW_MMSCR1_Q(n)		(GSW_BMU_BASE + (n)*0x8 + 0x04)
+
+#define GSW_PMCR(n)		(GSW_MAC_BASE + (n)*0x100)
+#define GSW_PMSR(n)		(GSW_MAC_BASE + (n)*0x100 + 0x08)
+#define GSW_PINT_EN(n)		(GSW_MAC_BASE + (n)*0x100 + 0x10)
+#define GSW_SMACCR0		(GSW_MAC_BASE + 0xe4)
+#define GSW_SMACCR1		(GSW_MAC_BASE + 0xe8)
+#define GSW_CKGCR		(GSW_MAC_BASE + 0xf0)
+
+#define GSW_ESR(n)		(GSW_MIB_BASE + (n)*0x100 + 0x00)
+#define GSW_INTS(n)		(GSW_MIB_BASE + (n)*0x100 + 0x04)
+#define GSW_TGPC(n)		(GSW_MIB_BASE + (n)*0x100 + 0x10)
+#define GSW_TBOC(n)		(GSW_MIB_BASE + (n)*0x100 + 0x14)
+#define GSW_TGOC(n)		(GSW_MIB_BASE + (n)*0x100 + 0x18)
+#define GSW_TEPC(n)		(GSW_MIB_BASE + (n)*0x100 + 0x1C)
+#define GSW_RGPC(n)		(GSW_MIB_BASE + (n)*0x100 + 0x20)
+#define GSW_RBOC(n)		(GSW_MIB_BASE + (n)*0x100 + 0x24)
+#define GSW_RGOC(n)		(GSW_MIB_BASE + (n)*0x100 + 0x28)
+#define GSW_REPC1(n)		(GSW_MIB_BASE + (n)*0x100 + 0x2C)
+#define GSW_REPC2(n)		(GSW_MIB_BASE + (n)*0x100 + 0x30)
+#define GSW_MIBCNTEN		(GSW_MIB_BASE + 0x800)
+#define GSW_AECNT1		(GSW_MIB_BASE + 0x804)
+#define GSW_AECNT2		(GSW_MIB_BASE + 0x808)
+
+#define GSW_CFG_PPSC		(GSW_CFG_BASE + 0x0)
+#define GSW_CFG_PIAC		(GSW_CFG_BASE + 0x4)
+#define GSW_CFG_GPC		(GSW_CFG_BASE + 0x14)
+
+#define MAX_VID_VALUE			(4095)
+#define MAX_VLAN_RULE			(16)
+
+
+#define REG_MFC_ADDR			(0x0010)
+#define REG_ISC_ADDR			(0x0018)
+
+#define REG_CFC_ADDR			(0x0004)
+#define REG_CFC_MIRROR_PORT_OFFT	(16)
+#define REG_CFC_MIRROR_PORT_LENG	(3)
+#define REG_CFC_MIRROR_PORT_RELMASK	(0x00000007)
+#define REG_CFC_MIRROR_PORT_MASK	(REG_CFC_MIRROR_PORT_RELMASK << REG_CFC_MIRROR_PORT_OFFT)
+#define REG_CFC_MIRROR_EN_OFFT		(19)
+#define REG_CFC_MIRROR_EN_LENG		(1)
+#define REG_CFC_MIRROR_EN_RELMASK	(0x00000001)
+#define REG_CFC_MIRROR_EN_MASK		(REG_CFC_MIRROR_EN_RELMASK << REG_CFC_MIRROR_EN_OFFT)
+
+#define REG_ATA1_ADDR			(0x0074)
+#define REG_ATA2_ADDR			(0x0078)
+
+#define REG_ATWD_ADDR			(0x007C)
+#define REG_ATWD_STATUS_OFFT		(2)
+#define REG_ATWD_STATUS_LENG		(2)
+#define REG_ATWD_STATUS_RELMASK		(0x00000003)
+#define REG_ATWD_STATUS_MASK		(REG_ATWD_STATUS_RELMASK << REG_ATWD_STATUS_OFFT)
+#define REG_ATWD_PORT_OFFT		(4)
+#define REG_ATWD_PORT_LENG		(8)
+#define REG_ATWD_PORT_RELMASK		(0x000000FF)
+#define REG_ATWD_PORT_MASK		(REG_ATWD_PORT_RELMASK << REG_ATWD_PORT_OFFT)
+#define REG_ATWD_LEAKY_EN_OFFT		(12)
+#define REG_ATWD_LEAKY_EN_LENG		(1)
+#define REG_ATWD_LEAKY_EN_RELMASK	(0x00000001)
+#define REG_ATWD_LEAKY_EN_MASK		(REG_ATWD_LEAKY_EN_RELMASK << REG_ATWD_LEAKY_EN_OFFT)
+#define REG_ATWD_EG_TAG_OFFT		(13)
+#define REG_ATWD_EG_TAG_LENG		(3)
+#define REG_ATWD_EG_TAG_RELMASK		(0x00000007)
+#define REG_ATWD_EG_TAG_MASK		(REG_ATWD_EG_TAG_RELMASK << REG_ATWD_EG_TAG_OFFT)
+#define REG_ATWD_USR_PRI_OFFT		(16)
+#define REG_ATWD_USR_PRI_LENG		(3)
+#define REG_ATWD_USR_PRI_RELMASK	(0x00000007)
+#define REG_ATWD_USR_PRI_MASK		(REG_ATWD_USR_PRI_RELMASK << REG_ATWD_USR_PRI_OFFT)
+#define REG_ATWD_SA_MIR_EN_OFFT		(19)
+#define REG_ATWD_SA_MIR_EN_LENG		(1)
+#define REG_ATWD_SA_MIR_EN_RELMASK	(0x00000001)
+#define REG_ATWD_SA_MIR_EN_MASK		(REG_ATWD_SA_MIR_EN_RELMASK << REG_ATWD_SA_MIR_EN_OFFT)
+#define REG_ATWD_SA_PORT_FW_OFFT	(20)
+#define REG_ATWD_SA_PORT_FW_LENG	(3)
+#define REG_ATWD_SA_PORT_FW_RELMASK	(0x00000007)
+#define REG_ATWD_SA_PORT_FW_MASK	(REG_ATWD_SA_PORT_FW_RELMASK << REG_ATWD_SA_PORT_FW_OFFT)
+
+#define REG_ATC_ADDR			(0x0080)
+#define REG_ATC_AC_CMD_OFFT		(0)
+#define REG_ATC_AC_CMD_LENG		(3)
+#define REG_ATC_AC_CMD_RELMASK		(0x00000007)
+#define REG_ATC_AC_CMD_MASK		(REG_ATC_AC_CMD_RELMASK << REG_ATC_AC_CMD_OFFT)
+#define REG_ATC_AC_SAT_OFFT		(4)
+#define REG_ATC_AC_SAT_LENG		(2)
+#define REG_ATC_AC_SAT_RELMASK		(0x00000003)
+#define REG_ATC_AC_SAT_MASK		(REG_ATC_AC_SAT_RELMASK << REG_ATC_AC_SAT_OFFT)
+#define REG_ATC_AC_MAT_OFFT		(8)
+#define REG_ATC_AC_MAT_LENG		(4)
+#define REG_ATC_AC_MAT_RELMASK		(0x0000000F)
+#define REG_ATC_AC_MAT_MASK		(REG_ATC_AC_MAT_RELMASK << REG_ATC_AC_MAT_OFFT)
+#define REG_AT_SRCH_HIT_OFFT		(13)
+#define REG_AT_SRCH_HIT_RELMASK		(0x00000001)
+#define REG_AT_SRCH_HIT_MASK		(REG_AT_SRCH_HIT_RELMASK << REG_AT_SRCH_HIT_OFFT)
+#define REG_AT_SRCH_END_OFFT		(14)
+#define REG_AT_SRCH_END_RELMASK		(0x00000001)
+#define REG_AT_SRCH_END_MASK		(REG_AT_SRCH_END_RELMASK << REG_AT_SRCH_END_OFFT)
+#define REG_ATC_BUSY_OFFT		(15)
+#define REG_ATC_BUSY_LENG		(1)
+#define REG_ATC_BUSY_RELMASK		(0x00000001)
+#define REG_ATC_BUSY_MASK		(REG_ATC_BUSY_RELMASK << REG_ATC_BUSY_OFFT)
+#define REG_AT_ADDR_OFFT		(16)
+#define REG_AT_ADDR_LENG		(12)
+#define REG_AT_ADDR_RELMASK		(0x00000FFF)
+#define REG_AT_ADDR_MASK		(REG_AT_ADDR_RELMASK << REG_AT_ADDR_OFFT)
+
+#define REG_TSRA1_ADDR			(0x0084)
+#define REG_TSRA2_ADDR			(0x0088)
+#define REG_ATRD_ADDR			(0x008C)
+
+#define REG_VTCR_ADDR			(0x0090)
+#define REG_VTCR_VID_OFFT		(0)
+#define REG_VTCR_VID_LENG		(12)
+#define REG_VTCR_VID_RELMASK		(0x00000FFF)
+#define REG_VTCR_VID_MASK		(REG_VTCR_VID_RELMASK << REG_VTCR_VID_OFFT)
+#define REG_VTCR_FUNC_OFFT		(12)
+#define REG_VTCR_FUNC_LENG		(4)
+#define REG_VTCR_FUNC_RELMASK		(0x0000000F)
+#define REG_VTCR_FUNC_MASK		(REG_VTCR_FUNC_RELMASK << REG_VTCR_FUNC_OFFT)
+#define REG_VTCR_IDX_INVLD_OFFT		(16)
+#define REG_VTCR_IDX_INVLD_RELMASK	(0x00000001)
+#define REG_VTCR_IDX_INVLD_MASK		(REG_VTCR_IDX_INVLD_RELMASK << REG_VTCR_IDX_INVLD_OFFT)
+#define REG_VTCR_BUSY_OFFT		(31)
+#define REG_VTCR_BUSY_RELMASK		(0x00000001)
+#define REG_VTCR_BUSY_MASK		(REG_VTCR_BUSY_RELMASK << REG_VTCR_BUSY_OFFT)
+
+#define REG_VAWD1_ADDR			(0x0094)
+#define REG_VAWD2_ADDR			(0x0098)
+#define REG_VLAN_ID_BASE		(0x0100)
+
+#define REG_CPGC_ADDR 			(0xB0)
+#define REG_CPCG_COL_EN_OFFT		(0)
+#define REG_CPCG_COL_EN_RELMASK		(0x00000001)
+#define REG_CPCG_COL_EN_MASK		(REG_CPCG_COL_EN_RELMASK << REG_CPCG_COL_EN_OFFT)
+#define REG_CPCG_COL_CLK_EN_OFFT	(1)
+#define REG_CPCG_COL_CLK_EN_RELMASK	(0x00000001)
+#define REG_CPCG_COL_CLK_EN_MASK	(REG_CPCG_COL_CLK_EN_RELMASK << REG_CPCG_COL_CLK_EN_OFFT)
+#define REG_CPCG_COL_RST_N_OFFT		(2)
+#define REG_CPCG_COL_RST_N_RELMASK	(0x00000001)
+#define REG_CPCG_COL_RST_N_MASK		(REG_CPCG_COL_RST_N_RELMASK << REG_CPCG_COL_RST_N_OFFT)
+
+#define REG_GFCCR0_ADDR			(0x1FE0)
+#define REG_FC_EN_OFFT			(31)
+#define REG_FC_EN_RELMASK		(0x00000001)
+#define REG_FC_EN_MASK			(REG_FC_EN_RELMASK << REG_FC_EN_OFFT)
+
+#define REG_PFC_CTRL_ADDR		(0x30b0)
+#define PFC_RX_COUNTER_L(n)		(0x3030 + (n)*0x100)
+#define PFC_RX_COUNTER_H(n)		(0x3034 + (n)*0x100)
+#define PFC_TX_COUNTER_L(n)		(0x3040 + (n)*0x100)
+#define PFC_TX_COUNTER_H(n)		(0x3044 + (n)*0x100)
+#define PMSR_P(n)			(0x3008 + (n)*0x100)
+
+
+#define REG_SSC_P0_ADDR			(0x2000)
+
+#define REG_PCR_P0_ADDR			(0x2004)
+#define REG_PCR_VLAN_MIS_OFFT		(2)
+#define REG_PCR_VLAN_MIS_LENG		(1)
+#define REG_PCR_VLAN_MIS_RELMASK	(0x00000001)
+#define REG_PCR_VLAN_MIS_MASK		(REG_PCR_VLAN_MIS_RELMASK << REG_PCR_VLAN_MIS_OFFT)
+#define REG_PCR_ACL_MIR_OFFT		(7)
+#define REG_PCR_ACL_MIR_LENG		(1)
+#define REG_PCR_ACL_MIR_RELMASK		(0x00000001)
+#define REG_PCR_ACL_MIR_MASK		(REG_PCR_ACL_MIR_RELMASK << REG_PCR_ACL_MIR_OFFT)
+#define REG_PORT_RX_MIR_OFFT		(8)
+#define REG_PORT_RX_MIR_LENG		(1)
+#define REG_PORT_RX_MIR_RELMASK		(0x00000001)
+#define REG_PORT_RX_MIR_MASK		(REG_PORT_RX_MIR_RELMASK << REG_PORT_RX_MIR_OFFT)
+#define REG_PORT_TX_MIR_OFFT		(9)
+#define REG_PORT_TX_MIR_LENG		(1)
+#define REG_PORT_TX_MIR_RELMASK		(0x00000001)
+#define REG_PORT_TX_MIR_MASK		(REG_PORT_TX_MIR_RELMASK << REG_PORT_TX_MIR_OFFT)
+#define REG_PORT_ACL_EN_OFFT		(10)
+#define REG_PORT_ACL_EN_LENG		(1)
+#define REG_PORT_ACL_EN_RELMASK		(0x00000001)
+#define REG_PORT_ACL_EN_MASK		(REG_PORT_ACL_EN_RELMASK << REG_PORT_ACL_EN_OFFT)
+#define REG_PCR_EG_TAG_OFFT		(28)
+#define REG_PCR_EG_TAG_LENG		(2)
+#define REG_PCR_EG_TAG_RELMASK		(0x00000003)
+#define REG_PCR_EG_TAG_MASK		(REG_PCR_EG_TAG_RELMASK << REG_PCR_EG_TAG_OFFT)
+
+#define REG_PIC_P0_ADDR			(0x2008)
+#define REG_PIC_IGMP_MIR_OFFT		(19)
+#define REG_PIC_IGMP_MIR_LENG		(1)
+#define REG_PIC_IGMP_MIR_RELMASK	(0x00000001)
+#define REG_PIC_IGMP_MIR_MASK		(REG_PIC_IGMP_MIR_RELMASK << REG_PIC_IGMP_MIR_OFFT)
+
+#define REG_PSC_P0_ADDR			(0x200C)
+
+#define REG_PVC_P0_ADDR			(0x2010)
+#define REG_PVC_ACC_FRM_OFFT		(0)
+#define REG_PVC_ACC_FRM_LENG		(2)
+#define REG_PVC_ACC_FRM_RELMASK		(0x00000003)
+#define REG_PVC_ACC_FRM_MASK		(REG_PVC_ACC_FRM_RELMASK << REG_PVC_ACC_FRM_OFFT)
+#define REG_PVC_EG_TAG_OFFT		(8)
+#define REG_PVC_EG_TAG_LENG		(3)
+#define REG_PVC_EG_TAG_RELMASK		(0x00000007)
+#define REG_PVC_EG_TAG_MASK		(REG_PVC_EG_TAG_RELMASK << REG_PVC_EG_TAG_OFFT)
+
+#define REG_PPBV1_P0_ADDR		(0x2014)
+#define REG_PPBV2_P0_ADDR		(0x2018)
+#define REG_BSR_P0_ADDR			(0x201C)
+#define REG_STAG01_P0_ADDR		(0x2020)
+#define REG_STAG23_P0_ADDR		(0x2024)
+#define REG_STAG45_P0_ADDR		(0x2028)
+#define REG_STAG67_P0_ADDR		(0x202C)
+
+#define REG_CMACCR_ADDR			(0x30E0)
+#define REG_MTCC_LMT_OFFT		(9)
+#define REG_MTCC_LMT_LENG		(4)
+#define REG_MTCC_LMT_RELMASK		(0x0000000F)
+#define REG_MTCC_LMT_MASK		(REG_MTCC_LMT_RELMASK << REG_MTCC_LMT_OFFT)
+
+#define ETHCMD_ENABLE	"enable"
+#define ETHCMD_DISABLE	"disable"
+
+#define HELP_VLAN_PVID		"vlan pvid <port> <pvid>"
+
+#if defined(EXTEND_SETVID_PARAM) || defined(SQA_VERIFY)
+#define HELP_VLAN_VID		"vlan vid <index> <active:0|1> <vid> <portMap> <egtagPortMap>\n" \
+							"	 <ivl_en> <fid> <stag>\n"
+#else
+#define HELP_VLAN_VID		"vlan vid <index> <active:0|1> <vid> <portMap> <tagPortMap>\n"
+#endif //SQA_VERIFY
+
+//#if defined(SQA_VERIFY)
+
+#define MT7530_UPW_REG_UPDATE 1
+
+#define HELP_QOS_TYPE		"qos type <rr:0|sp:1|wfq:2>\n"
+#ifdef MT7530_UPW_REG_UPDATE
+#define HELP_QOS_BASE		"qos base <port-based:0|tag-based:1|dscp-based:2|acl-based:3|arl-based:4|stag-based:5>\n"
+#else
+#define HELP_QOS_BASE		"qos base <port-based:0|tag-based:1|dscp-based:2|acl-based:3|arl-based:4>\n"
+#endif
+#define HELP_QOS_PRIO_QMAP	"qos prio-qmap <prio:0~7> <queue:0~7>\n"
+#define HELP_QOS_PRIO_TAGMAP	"qos prio-tagmap <prio:0~7> <tag:0~7>\n"
+#define HELP_QOS_PRIO_DSCPMAP	"qos prio-dscpmap <prio:0~7> <dscp:0~63>\n"
+//#define HELP_QOS_VPRI_QMAP	"qos vprio-qmap <prio:0~7> <queue:0~7>\n"
+#define HELP_QOS_PORT_PRIO	"qos port-prio <port> <prio:0~7>\n"
+#define HELP_QOS_PORT_WEIGHT	"qos port-weight <port:0~7> <q0> <q1> <q2> <q3> <q4> <q5> <q6> <q7>\n" \
+							 " <qn>: the weight of queue n, range: 1~16\n"
+#define HELP_QOS_DSCP_PRIO	"qos dscp-prio <dscp:0~63> <prio:0~7> : for ingress\n"
+
+#define HELP_ARL_L2LEN_CHK	"arl l2len-chk <active:0|1>\n"
+
+#define HELP_ARL_AGING		"arl aging <active:0|1> <time:1~65536>\n"
+
+#define HELP_ARL_MAC_TBL_ADD	"arl mactbl-add <MacAddr> <DestPortMap>\n"\
+	                        " ** optional : <leaky_en:0|1> <eg_tag:0~7> <usr_pri:0~7> <sa_mir_en:0|1> <sa_port_fw:0~7>\n"
+
+#define HELP_ARL_DIP_TBL_ADD	"arl diptbl-add <DIP> <DestPortMap> <leaky_en:0|1> <eg_tag:0~7> <usr_pri:0~7> <status:0~3>\n"
+
+#define HELP_ARL_SIP_TBL_ADD	"arl siptbl-add <DIP> <SIP> <DestPortMap> <status:0~3>\n"
+
+#define HELP_ACL_SETPORTEN	"acl enable <port> <port_enable:0|1>\n"
+#define HELP_ACL_ACL_TBL_ADD	"arl acltbl-add <tbl_idx:0~63/255> <vawd1> <vawd2>\n"
+#define HELP_ACL_MASK_TBL_ADD	"arl masktbl-add <tbl_idx:0~31/127> <vawd1> <vawd2>\n"
+#define HELP_ACL_RULE_TBL_ADD	"arl ruletbl-add <tbl_idx:0~31/127> <vawd1> <vawd2>\n"
+#define HELP_ACL_RATE_TBL_ADD	"arl ratetbl-add <tbl_idx:0~31> <vawd1> <vawd2>\n"
+#define HELP_ACL_TRTCM_TBL_ADD	"arl trTCMtbl-add <tbl_idx:0~31> <vawd1> <vawd2>\n"
+
+
+#define HELP_VLAN_PORT_MODE	"vlan port-mode <port> <mode:0~3>\n" \
+							"<mode>: 0: port matrix mode\n" \
+							"        1: fallback mode\n" \
+							"        2: check mode\n" \
+							"        3: security mode\n"\
+
+#define HELP_VLAN_PORT_ATTR	"vlan port-attr <port> <attr:0~3>\n" \
+							"<attr>: 0: user port\n" \
+							"        1: statck port\n" \
+							"        2: translation port\n" \
+							"        3: transparent port\n"
+
+#define HELP_VLAN_EGRESS_TAG_PVC	"vlan eg-tag-pvc <port> <eg_tag:0~7>\n" \
+							 "<eg_tag>: 0: disable\n" \
+							 "          1: consistent\n" \
+							 "          2: reserved\n" \
+							 "          3: reserved\n" \
+							 "          4: untagged\n" \
+							 "          5: swap\n" \
+							 "          6: tagged\n" \
+							 "          7: stack\n"
+
+#define HELP_VLAN_EGRESS_TAG_PCR	"vlan eg-tag-pcr <port> <eg_tag:0~3>\n" \
+							 "<eg_tag>: 0: untagged\n" \
+							 "          1: swap\n" \
+							 "          2: tagged\n" \
+							 "          3: stack\n"
+
+#define HELP_VLAN_ACC_FRM	"vlan acc-frm <port> <acceptable_frame_type:0~3>\n" \
+							"<type>: 0: admit all frames\n" \
+							"        1: admit only vlan-taged frames\n" \
+							"        2: admit only untagged or priority-tagged frames\n" \
+							"        3: reserved\n"
+
+
+#define HELP_SWITCH_RESET	"switch software reset\n"
+#define HELP_MACCTL_FC		"macctl fc <enable:0|1>\n"
+#define HELP_MIRROR_EN		"mirror enable <mirror_en:0|1> <mirror_port: 0-6>\n"
+#define HELP_MIRROR_PORTBASED	"mirror port-based <port> <port_tx_mir:0|1> <port_rx_mir:0|1> <acl_mir:0|1> <vlan_mis:0|1> <igmp_mir:0|1>\n"
+
+#define HELP_PHY_AN_EN		"phyctl an <port> <auto_negotiation_en:0|1>\n"
+#define HELP_PHY_FC_EN		"phyctl fc <port> <full_duplex_pause_capable:0|1>\n"
+
+#define HELP_STP		"stp <port> <fid> <state>\n" \
+						 "<state>: 0: Disable/Discarding\n" \
+						 "         1: Blocking/Listening/Discarding\n" \
+						 "         2: Learning\n" \
+						 "         3: Forwarding\n"
+#define HELP_COLLISION_POOL_EN	"collision-pool enable [enable 0|1] \n"
+#define HELP_EEE_EN		"eee [enable|disable] ([port|portMap]) \n"
+
+//#endif //SQA_VERIFY
diff --git a/feed/switch/src/switch_fun.c b/feed/switch/src/switch_fun.c
new file mode 100755
index 0000000..aefb927
--- /dev/null
+++ b/feed/switch/src/switch_fun.c
@@ -0,0 +1,3753 @@
+/*
+* switch_fun.c: switch function sets
+*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <linux/if.h>
+#include <stdbool.h>
+#include <time.h>
+
+#include "switch_extend.h"
+#include "switch_netlink.h"
+#include "switch_ioctl.h"
+#include "switch_fun.h"
+
+#define leaky_bucket 0
+
+static int getnext(char *src, int separator, char *dest)
+{
+	char *c;
+	int len;
+
+	if ((src == NULL) || (dest == NULL))
+		return -1;
+
+	c = strchr(src, separator);
+	if (c == NULL)
+		return -1;
+
+	len = c - src;
+	strncpy(dest, src, len);
+	dest[len] = '\0';
+	return len + 1;
+}
+
+static int str_to_ip(unsigned int *ip, char *str)
+{
+	int i;
+	int len;
+	char *ptr = str;
+	char buf[128];
+	unsigned char c[4];
+
+	for (i = 0; i < 3; ++i) {
+		if ((len = getnext(ptr, '.', buf)) == -1)
+			return 1;
+		c[i] = atoi(buf);
+		ptr += len;
+	}
+	c[3] = atoi(ptr);
+	*ip = (c[0] << 24) + (c[1] << 16) + (c[2] << 8) + c[3];
+	return 0;
+}
+
+/*convert IP address from number to string */
+static void ip_to_str(char *str, unsigned int ip)
+{
+	unsigned char *ptr = (unsigned char *)&ip;
+	unsigned char c[4];
+
+	c[0] = *(ptr);
+	c[1] = *(ptr + 1);
+	c[2] = *(ptr + 2);
+	c[3] = *(ptr + 3);
+	/*sprintf(str, "%d.%d.%d.%d", c[0], c[1], c[2], c[3]);*/
+	sprintf(str, "%d.%d.%d.%d", c[3], c[2], c[1], c[0]);
+}
+
+int reg_read(unsigned int offset, unsigned int *value)
+{
+	int ret = -1;
+
+	if (nl_init_flag == true) {
+		ret = reg_read_netlink(attres, offset, value);
+	} else {
+		if (attres->dev_id == -1)
+			ret = reg_read_ioctl(offset, value);
+	}
+	if (ret < 0) {
+		printf("Read fail\n");
+		*value = 0;
+		return ret;
+	}
+
+	return 0;
+}
+
+int reg_write(unsigned int offset, unsigned int value)
+{
+	int ret = -1;
+
+	if (nl_init_flag == true) {
+		ret = reg_write_netlink(attres, offset, value);
+	} else {
+		if (attres->dev_id == -1)
+			ret = reg_write_ioctl(offset, value);
+	}
+	if (ret < 0) {
+		printf("Write fail\n");
+		exit_free();
+		exit(0);
+	}
+	return 0;
+}
+
+int mii_mgr_read(unsigned int port_num, unsigned int reg, unsigned int *value)
+{
+	int ret;
+
+	if (port_num > 31) {
+		printf("Invalid Port or PHY addr \n");
+		return -1;
+	}
+
+	if (nl_init_flag == true)
+		ret = phy_cl22_read_netlink(attres, port_num, reg, value);
+	else
+		ret = mii_mgr_cl22_read_ioctl(port_num, reg, value);
+
+	if (ret < 0) {
+		printf("Phy read fail\n");
+		exit_free();
+		exit(0);
+	}
+
+	return 0;
+}
+
+int mii_mgr_write(unsigned int port_num, unsigned int reg, unsigned int value)
+{
+	int ret;
+
+	if (port_num > 31) {
+		printf("Invalid Port or PHY addr \n");
+		return -1;
+	}
+
+	if (nl_init_flag == true)
+		ret = phy_cl22_write_netlink(attres, port_num, reg, value);
+	else
+		ret = mii_mgr_cl22_write_ioctl(port_num, reg, value);
+
+	if (ret < 0) {
+		printf("Phy write fail\n");
+		exit_free();
+		exit(0);
+	}
+
+	return 0;
+}
+
+int mii_mgr_c45_read(unsigned int port_num, unsigned int dev, unsigned int reg, unsigned int *value)
+{
+	int ret;
+
+	if (port_num > 31) {
+		printf("Invalid Port or PHY addr \n");
+		return -1;
+	}
+
+	if (nl_init_flag == true)
+		ret = phy_cl45_read_netlink(attres, port_num, dev, reg, value);
+	else
+		ret = mii_mgr_cl45_read_ioctl(port_num, dev, reg, value);
+
+	if (ret < 0) {
+		printf("Phy read fail\n");
+		exit_free();
+		exit(0);
+	}
+
+	return 0;
+}
+
+int mii_mgr_c45_write(unsigned int port_num, unsigned int dev, unsigned int reg, unsigned int value)
+{
+	int ret;
+
+	if (port_num > 31) {
+		printf("Invalid Port or PHY addr \n");
+		return -1;
+	}
+
+	if (nl_init_flag == true)
+		ret = phy_cl45_write_netlink(attres, port_num, dev, reg, value);
+	else
+		ret = mii_mgr_cl45_write_ioctl(port_num, dev, reg, value);
+
+	if (ret < 0) {
+		printf("Phy write fail\n");
+		exit_free();
+		exit(0);
+	}
+
+	return 0;
+}
+
+
+int phy_dump(int phy_addr)
+{
+	int ret;
+
+	if (nl_init_flag == true)
+		ret = phy_dump_netlink(attres, phy_addr);
+	else
+		ret = phy_dump_ioctl(phy_addr);
+
+	if (ret < 0) {
+		printf("Phy dump fail\n");
+		exit_free();
+		exit(0);
+	}
+
+	return 0;
+}
+
+void phy_crossover(int argc, char *argv[])
+{
+	unsigned int port_num = strtoul(argv[2], NULL, 10);
+	unsigned int value;
+	int ret;
+
+	if (port_num > 4) {
+		printf("invaild value, port_name:0~4\n");
+		return;
+	}
+
+	if (nl_init_flag == true)
+		ret = phy_cl45_read_netlink(attres, port_num, 0x1E, MT7530_T10_TEST_CONTROL, &value);
+	else
+		ret = mii_mgr_cl45_read_ioctl(port_num, 0x1E, MT7530_T10_TEST_CONTROL, &value);
+	if (ret < 0) {
+		printf("phy_cl45 read fail\n");
+		exit_free();
+		exit(0);
+	}
+
+	printf("mii_mgr_cl45:");
+	printf("Read:  port#=%d, device=0x%x, reg=0x%x, value=0x%x\n", port_num, 0x1E, MT7530_T10_TEST_CONTROL, value);
+
+	if (!strncmp(argv[3], "auto", 5))
+	{
+		value &= (~(0x3 << 3));
+	} else if (!strncmp(argv[3], "mdi", 4)) {
+		value &= (~(0x3 << 3));
+		value |= (0x2 << 3);
+	} else if (!strncmp(argv[3], "mdix", 5)) {
+		value |= (0x3 << 3);
+	} else {
+		printf("invaild parameter\n");
+		return;
+	}
+	printf("Write: port#=%d, device=0x%x, reg=0x%x. value=0x%x\n", port_num, 0x1E, MT7530_T10_TEST_CONTROL, value);
+
+	if (nl_init_flag == true)
+		ret = phy_cl45_write_netlink(attres, port_num, 0x1E, MT7530_T10_TEST_CONTROL, value);
+	else
+		ret = mii_mgr_cl45_write_ioctl(port_num, 0x1E, MT7530_T10_TEST_CONTROL, value);
+
+	if (ret < 0) {
+		printf("phy_cl45 write fail\n");
+		exit_free();
+		exit(0);
+	}
+}
+
+int rw_phy_token_ring(int argc, char *argv[])
+{
+	int ch_addr, node_addr, data_addr;
+	unsigned int tr_reg_control;
+	unsigned int val_l = 0;
+	unsigned int val_h = 0;
+	unsigned int port_num;
+
+	if (argc < 4)
+		return -1;
+
+	if (argv[2][0] == 'r') {
+		if (argc != 7)
+			return -1;
+		mii_mgr_write(0, 0x1f, 0x52b5); // r31 = 0x52b5
+		port_num = strtoul(argv[3], NULL, 0);
+		if (port_num > MAX_PORT) {
+			printf("Illegal port index and port:0~6\n");
+			return -1;
+		}
+		ch_addr = strtoul(argv[4], NULL, 0);
+		node_addr = strtoul(argv[5], NULL, 0);
+		data_addr = strtoul(argv[6], NULL, 0);
+		printf("port = %x, ch_addr = %x, node_addr=%x, data_addr=%x\n", port_num, ch_addr, node_addr, data_addr);
+		tr_reg_control = (1 << 15) | (1 << 13) | (ch_addr << 11) | (node_addr << 7) | (data_addr << 1);
+		mii_mgr_write(port_num, 16, tr_reg_control); // r16 = tr_reg_control
+		mii_mgr_read(port_num, 17, &val_l);
+		mii_mgr_read(port_num, 18, &val_h);
+		printf("switch trreg read tr_reg_control=%x, value_H=%x, value_L=%x\n", tr_reg_control, val_h, val_l);
+	} else if (argv[2][0] == 'w') {
+		if (argc != 9)
+			return -1;
+		mii_mgr_write(0, 0x1f, 0x52b5); // r31 = 0x52b5
+		port_num = strtoul(argv[3], NULL, 0);
+		if (port_num > MAX_PORT) {
+			printf("\n**Illegal port index and port:0~6\n");
+			return -1;
+		}
+		ch_addr = strtoul(argv[4], NULL, 0);
+		node_addr = strtoul(argv[5], NULL, 0);
+		data_addr = strtoul(argv[6], NULL, 0);
+		val_h = strtoul(argv[7], NULL, 0);
+		val_l = strtoul(argv[8], NULL, 0);
+		printf("port = %x, ch_addr = %x, node_addr=%x, data_addr=%x\n", port_num, ch_addr, node_addr, data_addr);
+		tr_reg_control = (1 << 15) | (0 << 13) | (ch_addr << 11) | (node_addr << 7) | (data_addr << 1);
+		mii_mgr_write(port_num, 17, val_l);
+		mii_mgr_write(port_num, 18, val_h);
+		mii_mgr_write(port_num, 16, tr_reg_control); // r16 = tr_reg_control
+		printf("switch trreg Write tr_reg_control=%x, value_H=%x, value_L=%x\n", tr_reg_control, val_h, val_l);
+	} else
+		return -1;
+	return 0;
+}
+
+void write_acl_table(unsigned char tbl_idx, unsigned int vawd1, unsigned int vawd2)
+{
+	unsigned int value, reg;
+	unsigned int max_index;
+
+	if (chip_name == 0x7531)
+		max_index = 256;
+	else
+		max_index = 64;
+
+	printf("Pattern_acl_tbl_idx:%d\n", tbl_idx);
+
+	if (tbl_idx >= max_index) {
+		printf(HELP_ACL_ACL_TBL_ADD);
+		return;
+	}
+
+	reg = REG_VTCR_ADDR;
+	while (1)
+	{ // wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0) {
+			break;
+		}
+	}
+	reg_write(REG_VAWD1_ADDR, vawd1);
+	printf("write reg: %x, value: %x\n", REG_VAWD1_ADDR, vawd1);
+	reg_write(REG_VAWD2_ADDR, vawd2);
+	printf("write reg: %x, value: %x\n", REG_VAWD2_ADDR, vawd2);
+	reg = REG_VTCR_ADDR;
+	value = REG_VTCR_BUSY_MASK | (0x05 << REG_VTCR_FUNC_OFFT) | tbl_idx;
+	reg_write(reg, value);
+	printf("write reg: %x, value: %x\n", reg, value);
+
+	while (1)
+	{ 	// wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0)
+			break;
+	}
+}
+
+void acl_table_add(int argc, char *argv[])
+{
+	unsigned int vawd1, vawd2;
+	unsigned char tbl_idx;
+
+	tbl_idx = atoi(argv[3]);
+	vawd1 = strtoul(argv[4], (char **)NULL, 16);
+	vawd2 = strtoul(argv[5], (char **)NULL, 16);
+	write_acl_table(tbl_idx, vawd1, vawd2);
+}
+
+void write_acl_mask_table(unsigned char tbl_idx, unsigned int vawd1, unsigned int vawd2)
+{
+	unsigned int value, reg;
+	unsigned int max_index;
+
+	if (chip_name == 0x7531)
+		max_index = 128;
+	else
+		max_index = 32;
+
+	printf("Rule_mask_tbl_idx:%d\n", tbl_idx);
+
+	if (tbl_idx >= max_index) {
+		printf(HELP_ACL_MASK_TBL_ADD);
+		return;
+	}
+	reg = REG_VTCR_ADDR;
+	while (1)
+	{ // wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0)
+			break;
+	}
+	reg_write(REG_VAWD1_ADDR, vawd1);
+	printf("write reg: %x, value: %x\n", REG_VAWD1_ADDR, vawd1);
+	reg_write(REG_VAWD2_ADDR, vawd2);
+	printf("write reg: %x, value: %x\n", REG_VAWD2_ADDR, vawd2);
+	reg = REG_VTCR_ADDR;
+	value = REG_VTCR_BUSY_MASK | (0x09 << REG_VTCR_FUNC_OFFT) | tbl_idx;
+	reg_write(reg, value);
+	printf("write reg: %x, value: %x\n", reg, value);
+	while (1)
+	{ // wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0)
+			break;
+	}
+}
+
+void acl_mask_table_add(int argc, char *argv[])
+{
+	unsigned int vawd1, vawd2;
+	unsigned char tbl_idx;
+
+	tbl_idx = atoi(argv[3]);
+	vawd1 = strtoul(argv[4], (char **)NULL, 16);
+	vawd2 = strtoul(argv[5], (char **)NULL, 16);
+	write_acl_mask_table(tbl_idx, vawd1, vawd2);
+}
+
+void write_acl_rule_table(unsigned char tbl_idx, unsigned int vawd1, unsigned int vawd2)
+{
+	unsigned int value, reg;
+	unsigned int max_index;
+
+	if (chip_name == 0x7531)
+		max_index = 128;
+	else
+		max_index = 32;
+
+	printf("Rule_control_tbl_idx:%d\n", tbl_idx);
+
+	if (tbl_idx >= max_index) { /*Check the input parameters is right or not.*/
+		printf(HELP_ACL_RULE_TBL_ADD);
+		return;
+	}
+	reg = REG_VTCR_ADDR;
+
+	while (1)
+	{ // wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0) {
+			break;
+		}
+	}
+	reg_write(REG_VAWD1_ADDR, vawd1);
+	printf("write reg: %x, value: %x\n", REG_VAWD1_ADDR, vawd1);
+	reg_write(REG_VAWD2_ADDR, vawd2);
+	printf("write reg: %x, value: %x\n", REG_VAWD2_ADDR, vawd2);
+	reg = REG_VTCR_ADDR;
+	value = REG_VTCR_BUSY_MASK | (0x0B << REG_VTCR_FUNC_OFFT) | tbl_idx;
+	reg_write(reg, value);
+	printf("write reg: %x, value: %x\n", reg, value);
+
+	while (1)
+	{ // wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0) {
+			break;
+		}
+	}
+}
+
+void acl_rule_table_add(int argc, char *argv[])
+{
+	unsigned int vawd1, vawd2;
+	unsigned char tbl_idx;
+
+	tbl_idx = atoi(argv[3]);
+	vawd1 = strtoul(argv[4], (char **)NULL, 16);
+	vawd2 = strtoul(argv[5], (char **)NULL, 16);
+	write_acl_rule_table(tbl_idx, vawd1, vawd2);
+}
+
+void write_rate_table(unsigned char tbl_idx, unsigned int vawd1, unsigned int vawd2)
+{
+	unsigned int value, reg;
+	unsigned int max_index = 32;
+
+	printf("Rule_action_tbl_idx:%d\n", tbl_idx);
+
+	if (tbl_idx >= max_index) {
+		printf(HELP_ACL_RATE_TBL_ADD);
+		return;
+	}
+
+	reg = REG_VTCR_ADDR;
+	while (1) { 	// wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0)
+			break;
+	}
+
+	reg_write(REG_VAWD1_ADDR, vawd1);
+	printf("write reg: %x, value: %x\n", REG_VAWD1_ADDR, vawd1);
+	reg_write(REG_VAWD2_ADDR, vawd2);
+	printf("write reg: %x, value: %x\n", REG_VAWD2_ADDR, vawd2);
+	reg = REG_VTCR_ADDR;
+	value = REG_VTCR_BUSY_MASK | (0x0D << REG_VTCR_FUNC_OFFT) | tbl_idx;
+	reg_write(reg, value);
+	printf("write reg: %x, value: %x\n", reg, value);
+
+	while (1) { // wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0)
+			break;
+	}
+}
+
+void acl_rate_table_add(int argc, char *argv[])
+{
+	unsigned int vawd1, vawd2;
+	unsigned char tbl_idx;
+
+	tbl_idx = atoi(argv[3]);
+	vawd1 = strtoul(argv[4], (char **)NULL, 16);
+	vawd2 = strtoul(argv[5], (char **)NULL, 16);
+
+	write_rate_table(tbl_idx, vawd1, vawd2);
+}
+
+void write_trTCM_table(unsigned char tbl_idx, unsigned int vawd1, unsigned int vawd2)
+{
+	unsigned int value, reg;
+	unsigned int max_index = 32;
+
+	printf("trTCM_tbl_idx:%d\n", tbl_idx);
+
+	if (tbl_idx >= max_index) {
+		printf(HELP_ACL_TRTCM_TBL_ADD);
+		return;
+	}
+
+	reg = REG_VTCR_ADDR;
+	while (1) { 	// wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0)
+			break;
+	}
+
+	reg_write(REG_VAWD1_ADDR, vawd1);
+	printf("write reg: %x, value: %x\n", REG_VAWD1_ADDR, vawd1);
+	reg_write(REG_VAWD2_ADDR, vawd2);
+	printf("write reg: %x, value: %x\n", REG_VAWD2_ADDR, vawd2);
+	reg = REG_VTCR_ADDR;
+	value = REG_VTCR_BUSY_MASK | (0x07 << REG_VTCR_FUNC_OFFT) | tbl_idx;
+	reg_write(reg, value);
+	printf("write reg: %x, value: %x\n", reg, value);
+
+	while (1) { // wait until not busy
+		reg_read(reg, &value);
+		if ((value & REG_VTCR_BUSY_MASK) == 0)
+			break;
+	}
+}
+
+int acl_parameters_pre_del(int len1, int len2, int argc, char *argv[], int *port)
+{
+	int i;
+
+	*port = 0;
+	if (argc < len1) {
+		printf("insufficient arguments!\n");
+		return -1;
+	}
+
+	if (len2 == 12)
+	{
+		if (!argv[4] || strlen(argv[4]) != len2) {
+			printf("The [%s] format error, should be of length %d\n",argv[4], len2);
+			return -1;
+		}
+	}
+
+	if (!argv[5] || strlen(argv[5]) != 8) {
+		printf("portsmap format error, should be of length 7\n");
+		return -1;
+	}
+
+	for (i = 0; i < 7; i++) {
+		if (argv[5][i] != '0' && argv[5][i] != '1') {
+			printf("portmap format error, should be of combination of 0 or 1\n");
+			return -1;
+		}
+		*port += (argv[5][i] - '0') * (1 << i);
+	}
+	return 0;
+}
+
+void acl_compare_pattern(int ports, int comparion, int base, int word, unsigned char table_index)
+{
+	unsigned int value;
+
+	comparion |= 0xffff0000; //compare mask
+
+	value = ports << 8; //w_port_map
+	value |= 0x1 << 19; //enable
+	value |= base << 16; //mac header
+	value |= word << 1;  //word offset
+
+	write_acl_table(table_index, comparion, value);
+}
+
+void acl_mac_add(int argc, char *argv[])
+{
+	unsigned int value;
+	int ports;
+	char tmpstr[5];
+	int ret;
+
+	ret = acl_parameters_pre_del(6, 12, argc, argv, &ports);
+	if (ret < 0)
+		return;
+	//set pattern
+	strncpy(tmpstr, argv[4], 4);
+	tmpstr[4] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	acl_compare_pattern(ports, value, 0x0, 0, 0);
+
+	strncpy(tmpstr, argv[4] + 4, 4);
+	tmpstr[4] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	acl_compare_pattern(ports, value, 0x0, 1, 1);
+
+	strncpy(tmpstr, argv[4] + 8, 4);
+	tmpstr[4] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	acl_compare_pattern(ports, value, 0x0, 2, 2);
+
+	//set mask
+	write_acl_mask_table(0,0x7,0);
+
+	//set action
+	value = 0x7;      //drop
+	value |= 1 << 28; //acl intterupt enable
+	value |= 1 << 27; //acl hit count
+	value |= 2 << 24; //acl hit count group index (0~3)
+	write_acl_rule_table(0,value,0);
+}
+
+void acl_dip_meter(int argc, char *argv[])
+{
+	unsigned int value, ip_value, meter;
+	int ports;
+	int ret;
+
+	ip_value = 0;
+	ret = acl_parameters_pre_del(7, -1, argc, argv, &ports);
+	if (ret < 0)
+		return;
+
+	str_to_ip(&ip_value, argv[4]);
+	//set pattern
+	value = (ip_value >> 16);
+	acl_compare_pattern(ports, value, 0x2, 0x8, 0);
+
+	//set pattern
+	value = (ip_value & 0xffff);
+	acl_compare_pattern(ports, value, 0x2, 0x9, 1);
+
+	//set mask
+	write_acl_mask_table(0,0x3,0);
+
+	//set action
+	meter = strtoul(argv[6], NULL, 0);
+	if (((chip_name == 0x7530) && (meter > 1000000)) ||
+		((chip_name == 0x7531) && (meter > 2500000))) {
+		printf("\n**Illegal meter input, and 7530: 0~1000000Kpbs, 7531: 0~2500000Kpbs**\n");
+		return;
+	}
+	if (((chip_name == 0x7531) && (meter > 1000000))) {
+		reg_read(0xc,&value);
+		value |= 0x1 << 30;
+		reg_write(0xC,value);
+		printf("AGC: 0x%x\n",value);
+		value = meter / 1000; //uint is 1Mbps
+	} else {
+		reg_read(0xc,&value);
+		value &= ~(0x1 << 30);
+		reg_write(0xC,value);
+		printf("AGC: 0x%x\n",value);
+		value = meter >> 6; //uint is 64Kbps
+	}
+	value |= 0x1 << 15; //enable rate control
+	printf("Acl rate control:0x%x\n",value);
+	write_rate_table(0, value, 0);
+}
+
+void acl_dip_trtcm(int argc, char *argv[])
+{
+	unsigned int value, value2, ip_value;
+	unsigned int CIR, CBS, PIR, PBS;
+	int ports;
+	int ret;
+
+	ip_value = 0;
+	ret = acl_parameters_pre_del(10, -1, argc, argv, &ports);
+	if (ret < 0)
+		return;
+
+	str_to_ip(&ip_value, argv[4]);
+	//set pattern
+	value = (ip_value >> 16);
+	acl_compare_pattern(ports, value, 0x2, 0x8, 0);
+
+	//set pattern
+	value = (ip_value & 0xffff);
+	acl_compare_pattern(ports, value, 0x2, 0x9, 1);
+
+	//set CBS PBS
+	CIR = strtoul(argv[6], NULL, 0);
+	CBS = strtoul(argv[7], NULL, 0);
+	PIR = strtoul(argv[8], NULL, 0);
+	PBS = strtoul(argv[9], NULL, 0);
+
+	if (CIR > 65535*64 || CBS > 65535 || PIR > 65535*64  || PBS > 65535) {
+		printf("\n**Illegal input parameters**\n");
+		return;
+	}
+
+	value = CBS << 16; //bit16~31
+	value |= PBS;      //bit0~15
+			   //value |= 1;//valid
+	CIR = CIR >> 6;
+	PIR = PIR >> 6;
+
+	value2 = CIR << 16; //bit16~31
+	value2 |= PIR;      //bit0~15
+	write_trTCM_table(0,value,value2);
+
+	//set pattern
+	write_acl_mask_table(0,0x3,0);
+
+	//set action
+	value = 0x1 << (11 + 1); //TrTCM green  meter#0 Low drop
+	value |= 0x2 << (8 + 1); //TrTCM yellow  meter#0 Med drop
+	value |= 0x3 << (5 + 1); //TrTCM red  meter#0    Hig drop
+	value |= 0x1 << 0;       //TrTCM drop pcd select
+	write_acl_rule_table(0,0,value);
+}
+
+void acl_ethertype(int argc, char *argv[])
+{
+	unsigned int value, ethertype;
+	int ports;
+	int ret;
+
+	ret = acl_parameters_pre_del(6, -1, argc, argv, &ports);
+	if (ret < 0)
+		return;
+	printf("ports:0x%x\n",ports);
+	ethertype = strtoul(argv[4], NULL, 16);
+	//set pattern
+	value = ethertype;
+	acl_compare_pattern(ports, value, 0x0, 0x6, 0);
+
+	//set pattern
+	write_acl_mask_table(0,0x1,0);
+
+	//set action(drop)
+	value = 0x7;      //default. Nodrop
+	value |= 1 << 28; //acl intterupt enable
+	value |= 1 << 27; //acl hit count
+
+	write_acl_rule_table(0,value,0);
+}
+
+void acl_dip_modify(int argc, char *argv[])
+{
+	unsigned int value, ip_value;
+	int ports;
+	int priority;
+	int ret;
+
+	ip_value = 0;
+	priority = strtoul(argv[6], NULL, 16);
+	if (priority < 0 || priority > 7) {
+		printf("\n**Illegal priority value!**\n");
+		return;
+	}
+
+	ret = acl_parameters_pre_del(6, -1, argc, argv, &ports);
+	if (ret < 0)
+		return;
+
+	str_to_ip(&ip_value, argv[4]);
+	//set pattern
+	value = (ip_value >> 16);
+	acl_compare_pattern(ports, value, 0x2, 0x8, 0);
+
+	//set pattern
+	value = (ip_value & 0xffff);
+	acl_compare_pattern(ports, value, 0x2, 0x9, 1);
+
+	//set pattern
+	write_acl_mask_table(0,0x3,0);
+
+	//set action
+	value = 0x0;      //default. Nodrop
+	value |= 1 << 28; //acl intterupt enable
+	value |= 1 << 27; //acl hit count
+	value |= priority << 4;  //acl UP
+	write_acl_rule_table(0,value,0);
+}
+
+void acl_dip_pppoe(int argc, char *argv[])
+{
+	unsigned int value, ip_value;
+	int ports;
+	int ret;
+
+	ip_value = 0;
+	ret = acl_parameters_pre_del(6, -1, argc, argv, &ports);
+	if (ret < 0)
+		return;
+
+	str_to_ip(&ip_value, argv[4]);
+	//set pattern
+	value = (ip_value >> 16);
+	acl_compare_pattern(ports, value, 0x2, 0x8, 0);
+
+	//set pattern
+	value = (ip_value & 0xffff);
+	acl_compare_pattern(ports, value, 0x2, 0x9, 1);
+
+	//set pattern
+	write_acl_mask_table(0,0x3,0);
+
+	//set action
+	value = 0x0;      //default. Nodrop
+	value |= 1 << 28; //acl intterupt enable
+	value |= 1 << 27; //acl hit count
+	value |= 1 << 20; //pppoe header remove
+	value |= 1 << 21; //SA MAC SWAP
+	value |= 1 << 22; //DA MAC SWAP
+	write_acl_rule_table(0,value,7);
+}
+
+void acl_dip_add(int argc, char *argv[])
+{
+	unsigned int value, ip_value;
+	int ports;
+	int ret;
+
+	ip_value = 0;
+	ret = acl_parameters_pre_del(6, -1, argc, argv, &ports);
+	if (ret < 0)
+		return;
+
+	str_to_ip(&ip_value, argv[4]);
+	//set pattern
+	value = (ip_value >> 16);
+	acl_compare_pattern(ports, value, 0x2, 0x8, 0);
+
+	//set pattern
+	value = (ip_value & 0xffff);
+	acl_compare_pattern(ports, value, 0x2, 0x9, 1);
+
+	//set pattern
+	write_acl_mask_table(0,0x3,0);
+
+	//set action
+	//value = 0x0; //default
+	value = 0x7;      //drop
+	value |= 1 << 28; //acl intterupt enable
+	value |= 1 << 27; //acl hit count
+	value |= 2 << 24; //acl hit count group index (0~3)
+	write_acl_rule_table(0,value,0);
+}
+
+void acl_l4_add(int argc, char *argv[])
+{
+	unsigned int value;
+	int ports;
+	int ret;
+
+	ret = acl_parameters_pre_del(6, -1, argc, argv, &ports);
+	if (ret < 0)
+		return;
+
+	//set pattern
+	value = strtoul(argv[4], NULL, 16);
+	acl_compare_pattern(ports, value, 0x5, 0x0, 0);
+
+	//set rue mask
+	write_acl_mask_table(0,0x1,0);
+	//set action
+	value = 0x7; //drop
+		     //value |= 1;//valid
+	write_acl_rule_table(0,value,0);
+}
+
+void acl_sp_add(int argc, char *argv[])
+{
+	unsigned int value;
+	int ports;
+	int ret;
+
+	ret = acl_parameters_pre_del(6, -1, argc, argv, &ports);
+	if (ret < 0)
+		return;
+	//set pattern
+	value = strtoul(argv[4], NULL, 0);
+	acl_compare_pattern(ports, value, 0x4, 0x0, 0);
+
+	//set rue mask
+	write_acl_mask_table(0,0x1,0);
+
+	//set action
+	value = 0x7; //drop
+		     //value |= 1;//valid
+	write_acl_rule_table(0,value,0);
+}
+
+void acl_port_enable(int argc, char *argv[])
+{
+	unsigned int value, reg;
+	unsigned char acl_port, acl_en;
+
+	acl_port = atoi(argv[3]);
+	acl_en = atoi(argv[4]);
+
+	printf("acl_port:%d, acl_en:%d\n", acl_port, acl_en);
+
+	/*Check the input parameters is right or not.*/
+	if ((acl_port > SWITCH_MAX_PORT) || (acl_en > 1)) {
+		printf(HELP_ACL_SETPORTEN);
+		return;
+	}
+
+	reg = REG_PCR_P0_ADDR + (0x100 * acl_port); // 0x2004[10]
+	reg_read(reg, &value);
+	value &= (~REG_PORT_ACL_EN_MASK);
+	value |= (acl_en << REG_PORT_ACL_EN_OFFT);
+
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+}
+
+static void dip_dump_internal(int type)
+{
+	unsigned int i, j, value, mac, mac2, value2;
+	char tmpstr[16];
+	int table_size = 0;
+	int hit_value1 = 0;
+	int hit_value2 = 0;
+
+	if(type == GENERAL_TABLE) {
+		table_size = 0x800;
+		reg_write(REG_ATC_ADDR, 0x8104); //dip search command
+		} else {
+		table_size = 0x40;
+		reg_write(REG_ATC_ADDR, 0x811c); //dip search command
+	}
+	printf("hash   port(0:6)   rsp_cnt  flag  timer    dip-address       ATRD\n");
+	for (i = 0; i < table_size; i++) {
+		while (1)
+		{
+			reg_read(REG_ATC_ADDR, &value);
+			if(type == GENERAL_TABLE) {
+				hit_value1 = value & (0x1 << 13);
+				hit_value2 = 1;
+			}else {
+				hit_value1 = value & (0x1 << 13);
+				hit_value2 = value & (0x1 << 28);
+			}
+
+			if (hit_value1 && hit_value2 ) { //search_rdy
+				reg_read(REG_ATRD_ADDR, &value2);
+				//printf("REG_ATRD_ADDR=0x%x\n\r",value2);
+
+				printf("%03x:   ", (value >> 16) & 0xfff); //hash_addr_lu
+				j = (value2 >> 4) & 0xff;		   //r_port_map
+				printf("%c", (j & 0x01) ? '1' : '-');
+				printf("%c", (j & 0x02) ? '1' : '-');
+				printf("%c", (j & 0x04) ? '1' : '-');
+				printf("%c ", (j & 0x08) ? '1' : '-');
+				printf("%c", (j & 0x10) ? '1' : '-');
+				printf("%c", (j & 0x20) ? '1' : '-');
+				printf("%c", (j & 0x40) ? '1' : '-');
+
+				reg_read(REG_TSRA2_ADDR, &mac2);
+
+				printf("     0x%4x", (mac2 & 0xffff));    //RESP_CNT
+				printf("  0x%2x", ((mac2 >> 16) & 0xff)); //RESP_FLAG
+				printf("  %3d", ((mac2 >> 24) & 0xff));   //RESP_TIMER
+									  //printf(" %4d", (value2 >> 24) & 0xff); //r_age_field
+				reg_read(REG_TSRA1_ADDR, &mac);
+				ip_to_str(tmpstr, mac);
+				printf("     %s", tmpstr);
+				printf("  0x%8x\n", value2); //ATRD
+							     //printf("%04x", ((mac2 >> 16) & 0xffff));
+							     //printf("     %c\n", (((value2 >> 20) & 0x03)== 0x03)? 'y':'-');
+				if (value & 0x4000) {
+					printf("end of table %d\n", i);
+					return;
+				}
+				break;
+			}
+			else if (value & 0x4000) { //at_table_end
+				printf("found the last entry %d (not ready)\n", i);
+				return;
+			}
+			usleep(5000);
+		}
+
+		if(type == GENERAL_TABLE)
+			reg_write(REG_ATC_ADDR, 0x8105); //search for next dip address
+		else
+			reg_write(REG_ATC_ADDR, 0x811d); //search for next dip address
+		usleep(5000);
+	}
+}
+
+void dip_dump(void)
+{
+	dip_dump_internal(GENERAL_TABLE);
+
+}
+
+void dip_add(int argc, char *argv[])
+{
+	unsigned int value = 0;
+	unsigned int i, j;
+
+	value = 0;
+
+	str_to_ip(&value, argv[3]);
+
+	reg_write(REG_ATA1_ADDR, value);
+	printf("REG_ATA1_ADDR is 0x%x\n\r", value);
+
+#if 0
+	reg_write(REG_ATA2_ADDR, value);
+	printf("REG_ATA2_ADDR is 0x%x\n\r", value);
+#endif
+	if (!argv[4] || strlen(argv[4]) != 8) {
+		printf("portmap format error, should be of length 7\n");
+		return;
+	}
+	j = 0;
+	for (i = 0; i < 7; i++) {
+		if (argv[4][i] != '0' && argv[4][i] != '1') {
+			printf("portmap format error, should be of combination of 0 or 1\n");
+			return;
+		}
+		j += (argv[4][i] - '0') * (1 << i);
+	}
+	value = j << 4;      //w_port_map
+	value |= (0x3 << 2); //static
+
+	reg_write(REG_ATWD_ADDR, value);
+
+	usleep(5000);
+	reg_read(REG_ATWD_ADDR, &value);
+	printf("REG_ATWD_ADDR is 0x%x\n\r", value);
+
+	value = 0x8011; //single w_dip_cmd
+	reg_write(REG_ATC_ADDR, value);
+
+	usleep(1000);
+
+	for (i = 0; i < 20; i++) {
+		reg_read(REG_ATC_ADDR, &value);
+		if ((value & 0x8000) == 0) { //mac address busy
+			printf("done.\n");
+			return;
+		}
+		usleep(1000);
+	}
+	if (i == 20)
+		printf("timeout.\n");
+}
+
+void dip_del(int argc, char *argv[])
+{
+	unsigned int i, value;
+
+	value = 0;
+	str_to_ip(&value, argv[3]);
+
+	reg_write(REG_ATA1_ADDR, value);
+
+	value = 0;
+	reg_write(REG_ATA2_ADDR, value);
+
+	value = 0; //STATUS=0, delete dip
+	reg_write(REG_ATWD_ADDR, value);
+
+	value = 0x8011; //w_dip_cmd
+	reg_write(REG_ATC_ADDR, value);
+
+	for (i = 0; i < 20; i++) {
+		reg_read(REG_ATC_ADDR, &value);
+		if ((value & 0x8000) == 0) { //mac address busy
+			if (argv[1] != NULL)
+				printf("done.\n");
+			return;
+		}
+		usleep(1000);
+	}
+	if (i == 20)
+		printf("timeout.\n");
+}
+
+void dip_clear(void)
+{
+
+	unsigned int value;
+
+	reg_write(REG_ATC_ADDR, 0x8102); //clear all dip
+	usleep(5000);
+	reg_read(REG_ATC_ADDR, &value);
+	printf("REG_ATC_ADDR is 0x%x\n\r", value);
+}
+
+static void sip_dump_internal(int type)
+{
+	unsigned int i, j, value, mac, mac2, value2;
+	int table_size = 0;
+	int hit_value1 = 0;
+	int hit_value2 = 0;
+	char tmpstr[16];
+
+	if (type == GENERAL_TABLE) {
+		table_size = 0x800;
+		reg_write(REG_ATC_ADDR, 0x8204); //sip search command
+		}else {
+		table_size = 0x40;
+		reg_write(REG_ATC_ADDR, 0x822c); //sip search command
+	}
+	printf("hash  port(0:6)   dip-address    sip-address      ATRD\n");
+	for (i = 0; i < table_size; i++) {
+		while (1)
+		{
+			reg_read(REG_ATC_ADDR, &value);
+			if(type == GENERAL_TABLE) {
+				hit_value1 = value & (0x1 << 13);
+				hit_value2 = 1;
+			} else {
+				hit_value1 = value & (0x1 << 13);
+				hit_value2 = value & (0x1 << 28);
+			}
+
+			if (hit_value1 && hit_value2) { //search_rdy
+				reg_read(REG_ATRD_ADDR, &value2);
+				//printf("REG_ATRD_ADDR=0x%x\n\r",value2);
+
+				printf("%03x:  ", (value >> 16) & 0xfff); //hash_addr_lu
+				j = (value2 >> 4) & 0xff;		  //r_port_map
+				printf("%c", (j & 0x01) ? '1' : '-');
+				printf("%c", (j & 0x02) ? '1' : '-');
+				printf("%c", (j & 0x04) ? '1' : '-');
+				printf("%c", (j & 0x08) ? '1' : '-');
+				printf(" %c", (j & 0x10) ? '1' : '-');
+				printf("%c", (j & 0x20) ? '1' : '-');
+				printf("%c", (j & 0x40) ? '1' : '-');
+
+				reg_read(REG_TSRA2_ADDR, &mac2);
+
+				ip_to_str(tmpstr, mac2);
+				printf("   %s", tmpstr);
+
+				//printf(" %4d", (value2 >> 24) & 0xff); //r_age_field
+				reg_read(REG_TSRA1_ADDR, &mac);
+				ip_to_str(tmpstr, mac);
+				printf("    %s", tmpstr);
+				printf("      0x%x\n", value2);
+				//printf("%04x", ((mac2 >> 16) & 0xffff));
+				//printf("     %c\n", (((value2 >> 20) & 0x03)== 0x03)? 'y':'-');
+				if (value & 0x4000) {
+					printf("end of table %d\n", i);
+					return;
+				}
+				break;
+			} else if (value & 0x4000) { //at_table_end
+				printf("found the last entry %d (not ready)\n", i);
+				return;
+			}
+			usleep(5000);
+		}
+
+	if(type == GENERAL_TABLE)
+		reg_write(REG_ATC_ADDR, 0x8205); //search for next sip address
+	else
+		reg_write(REG_ATC_ADDR, 0x822d); //search for next sip address
+	usleep(5000);
+	}
+}
+
+void sip_dump(void)
+{
+
+	sip_dump_internal(GENERAL_TABLE);
+
+}
+
+
+void sip_add(int argc, char *argv[])
+{
+	unsigned int i, j, value;
+
+	value = 0;
+	str_to_ip(&value, argv[3]); //SIP
+
+	reg_write(REG_ATA2_ADDR, value);
+	printf("REG_ATA2_ADDR is 0x%x\n\r", value);
+
+	value = 0;
+
+	str_to_ip(&value, argv[4]); //DIP
+	reg_write(REG_ATA1_ADDR, value);
+	printf("REG_ATA1_ADDR is 0x%x\n\r", value);
+
+	if (!argv[5] || strlen(argv[5]) != 8) {
+		printf("portmap format error, should be of length 7\n");
+		return;
+	}
+	j = 0;
+	for (i = 0; i < 7; i++) {
+		if (argv[5][i] != '0' && argv[5][i] != '1') {
+			printf("portmap format error, should be of combination of 0 or 1\n");
+			return;
+		}
+		j += (argv[5][i] - '0') * (1 << i);
+	}
+	value = j << 4;      //w_port_map
+	value |= (0x3 << 2); //static
+
+	reg_write(REG_ATWD_ADDR, value);
+
+	usleep(5000);
+	reg_read(REG_ATWD_ADDR, &value);
+	printf("REG_ATWD_ADDR is 0x%x\n\r", value);
+
+	value = 0x8021; //single w_sip_cmd
+	reg_write(REG_ATC_ADDR, value);
+
+	usleep(1000);
+
+	for (i = 0; i < 20; i++) {
+		reg_read(REG_ATC_ADDR, &value);
+		if ((value & 0x8000) == 0) { //mac address busy
+			printf("done.\n");
+			return;
+		}
+		usleep(1000);
+	}
+	if (i == 20)
+		printf("timeout.\n");
+}
+
+void sip_del(int argc, char *argv[])
+{
+	unsigned int i, value;
+
+	value = 0;
+	str_to_ip(&value, argv[3]);
+
+	reg_write(REG_ATA2_ADDR, value); //SIP
+
+	str_to_ip(&value, argv[4]);
+	reg_write(REG_ATA1_ADDR, value); //DIP
+
+	value = 0; //STATUS=0, delete sip
+	reg_write(REG_ATWD_ADDR, value);
+
+	value = 0x8021; //w_sip_cmd
+	reg_write(REG_ATC_ADDR, value);
+
+	for (i = 0; i < 20; i++) {
+		reg_read(REG_ATC_ADDR, &value);
+		if ((value & 0x8000) == 0) { //mac address busy
+			if (argv[1] != NULL)
+				printf("done.\n");
+			return;
+		}
+		usleep(1000);
+	}
+	if (i == 20)
+		printf("timeout.\n");
+}
+
+void sip_clear(void)
+{
+	unsigned int value;
+
+	reg_write(REG_ATC_ADDR, 0x8202); //clear all sip
+	usleep(5000);
+	reg_read(REG_ATC_ADDR, &value);
+	printf("REG_ATC_ADDR is 0x%x\n\r", value);
+}
+
+static void table_dump_internal(int type)
+{
+	unsigned int i, j, value, mac, mac2, value2;
+	int table_size = 0;
+	int table_end = 0;
+	int hit_value1 = 0;
+	int hit_value2 = 0;
+
+	if (type == GENERAL_TABLE){
+		table_size = 0x800;
+		table_end = 0x7FF;
+		reg_write(REG_ATC_ADDR, 0x8004);
+	} else {
+		table_size = 0x40;
+		table_end = 0x3F;
+		reg_write(REG_ATC_ADDR, 0x800C);
+	}
+	printf("hash  port(0:6)   fid   vid  age(s)   mac-address     filter my_mac\n");
+	for (i = 0; i < table_size; i++) {
+		while (1)
+		{
+			reg_read(REG_ATC_ADDR, &value);
+			//printf("ATC =  0x%x\n", value);
+			if(type == GENERAL_TABLE) {
+				hit_value1 = value & (0x1 << 13);
+				hit_value2 = 1;
+			} else {
+				hit_value1 = value & (0x1 << 13);
+				hit_value2 = value & (0x1 << 28);
+			}
+
+			if (hit_value1 && hit_value2 && (((value >> 15) & 0x1) == 0)) {
+				printf("%03x:   ", (value >> 16) & 0xfff);
+				reg_read(REG_ATRD_ADDR, &value2);
+				j = (value2 >> 4) & 0xff; //r_port_map
+				printf("%c", (j & 0x01) ? '1' : '-');
+				printf("%c", (j & 0x02) ? '1' : '-');
+				printf("%c", (j & 0x04) ? '1' : '-');
+				printf("%c", (j & 0x08) ? '1' : '-');
+				printf("%c", (j & 0x10) ? '1' : '-');
+				printf("%c", (j & 0x20) ? '1' : '-');
+				printf("%c", (j & 0x40) ? '1' : '-');
+				printf("%c", (j & 0x80) ? '1' : '-');
+
+				reg_read(REG_TSRA2_ADDR, &mac2);
+
+				printf("   %2d", (mac2 >> 12) & 0x7); //FID
+				printf("  %4d", (mac2 & 0xfff));
+				if (((value2 >> 24) & 0xff) == 0xFF)
+					printf("   --- "); //r_age_field:static
+				else
+					printf(" %5d ", (((value2 >> 24) & 0xff)+1)*2); //r_age_field
+				reg_read(REG_TSRA1_ADDR, &mac);
+				printf("  %08x", mac);
+				printf("%04x", ((mac2 >> 16) & 0xffff));
+				printf("     %c", (((value2 >> 20) & 0x03) == 0x03) ? 'y' : '-');
+				printf("     %c\n", (((value2 >> 23) & 0x01) == 0x01) ? 'y' : '-');
+				if ((value & 0x4000) && (((value >> 16) & 0xfff) == table_end)) {
+					printf("end of table %d\n", i);
+					return;
+				}
+				break;
+			}
+			else if ((value & 0x4000) && (((value >> 15) & 0x1) == 0) && (((value >> 16) & 0xfff) == table_end)) { //at_table_end
+				printf("found the last entry %d (not ready)\n", i);
+				return;
+			}
+			else
+				usleep(5);
+		}
+
+	if(type == GENERAL_TABLE)
+		reg_write(REG_ATC_ADDR, 0x8005);//search for next address
+	else
+		reg_write(REG_ATC_ADDR, 0x800d);//search for next address
+		usleep(5);
+	}
+}
+
+void table_dump(void)
+{
+	table_dump_internal(GENERAL_TABLE);
+
+}
+
+
+void table_add(int argc, char *argv[])
+{
+	unsigned int i, j, value, is_filter, is_mymac;
+	char tmpstr[9];
+
+	is_filter = (argv[1][0] == 'f') ? 1 : 0;
+	is_mymac = (argv[1][0] == 'm') ? 1 : 0;
+	if (!argv[2] || strlen(argv[2]) != 12) {
+		printf("MAC address format error, should be of length 12\n");
+		return;
+	}
+	strncpy(tmpstr, argv[2], 8);
+	tmpstr[8] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	reg_write(REG_ATA1_ADDR, value);
+	printf("REG_ATA1_ADDR is 0x%x\n\r", value);
+
+	strncpy(tmpstr, argv[2] + 8, 4);
+	tmpstr[4] = '\0';
+
+	value = strtoul(tmpstr, NULL, 16);
+	value = (value << 16);
+	value |= (1 << 15); //IVL=1
+
+	if (argc > 4) {
+		j = strtoul(argv[4], NULL, 0);
+		if (4095 < j) {
+			printf("wrong vid range, should be within 0~4095\n");
+			return;
+		}
+		value |= j; //vid
+	}
+
+	reg_write(REG_ATA2_ADDR, value);
+	printf("REG_ATA2_ADDR is 0x%x\n\r", value);
+
+	if (!argv[3] || strlen(argv[3]) != 8) {
+		if (is_filter)
+			argv[3] = "11111111";
+		else {
+			printf("portmap format error, should be of length 8\n");
+			return;
+		}
+	}
+	j = 0;
+	for (i = 0; i < 7; i++) {
+		if (argv[3][i] != '0' && argv[3][i] != '1') {
+			printf("portmap format error, should be of combination of 0 or 1\n");
+			return;
+		}
+		j += (argv[3][i] - '0') * (1 << i);
+	}
+	value = j << 4; //w_port_map
+
+	if (argc > 5) {
+		j = strtoul(argv[5], NULL, 0);
+		if (j < 1 || 255 < j) {
+			printf("wrong age range, should be within 1~255\n");
+			return;
+		}
+		value |= (j << 24);  //w_age_field
+		value |= (0x1 << 2); //dynamic
+	} else {
+		value |= (0xff << 24); //w_age_field
+		value |= (0x3 << 2);   //static
+	}
+
+	if (argc > 6) {
+		j = strtoul(argv[6], NULL, 0);
+		if (7 < j) {
+			printf("wrong eg-tag range, should be within 0~7\n");
+			return;
+		}
+		value |= (j << 13); //EG_TAG
+	}
+
+	if (is_filter)
+		value |= (7 << 20); //sa_filter
+
+	if (is_mymac)
+		value |= (1 << 23);
+
+	reg_write(REG_ATWD_ADDR, value);
+
+	usleep(5000);
+	reg_read(REG_ATWD_ADDR, &value);
+	printf("REG_ATWD_ADDR is 0x%x\n\r", value);
+
+	value = 0x8001; //w_mac_cmd
+	reg_write(REG_ATC_ADDR, value);
+
+	usleep(1000);
+
+	for (i = 0; i < 20; i++) {
+		reg_read(REG_ATC_ADDR, &value);
+		if ((value & 0x8000) == 0) { //mac address busy
+			printf("done.\n");
+			return;
+		}
+		usleep(1000);
+	}
+	if (i == 20)
+		printf("timeout.\n");
+}
+
+void table_search_mac_vid(int argc, char *argv[])
+{
+	unsigned int i, j, value, mac, mac2, value2;
+	char tmpstr[9];
+
+	if (!argv[3] || strlen(argv[3]) != 12) {
+		printf("MAC address format error, should be of length 12\n");
+		return;
+	}
+	strncpy(tmpstr, argv[3], 8);
+	tmpstr[8] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	reg_write(REG_ATA1_ADDR, value);
+	//printf("REG_ATA1_ADDR is 0x%x\n\r",value);
+
+	strncpy(tmpstr, argv[3] + 8, 4);
+	tmpstr[4] = '\0';
+
+	value = strtoul(tmpstr, NULL, 16);
+	value = (value << 16);
+	value |= (1 << 15); //IVL=1
+
+	j = strtoul(argv[5], NULL, 0);
+	if (4095 < j) {
+		printf("wrong vid range, should be within 0~4095\n");
+		return;
+	}
+	value |= j; //vid
+
+	reg_write(REG_ATA2_ADDR, value);
+	//printf("REG_ATA2_ADDR is 0x%x\n\r",value);
+
+	value = 0x8000; //w_mac_cmd
+	reg_write(REG_ATC_ADDR, value);
+
+	usleep(1000);
+
+	for (i = 0; i < 20; i++) {
+		reg_read(REG_ATC_ADDR, &value);
+		if ((value & 0x8000) == 0) { //mac address busy
+			break;
+		}
+		usleep(1000);
+	}
+	if (i == 20) {
+		printf("search timeout.\n");
+		return;
+	}
+
+	if (value & 0x1000) {
+		printf("search no entry.\n");
+		return;
+	}
+
+	printf("search done.\n");
+	printf("hash  port(0:6)   fid   vid  age   mac-address     filter my_mac\n");
+
+	printf("%03x:   ", (value >> 16) & 0xfff); //hash_addr_lu
+	reg_read(REG_ATRD_ADDR, &value2);
+	j = (value2 >> 4) & 0xff; //r_port_map
+	printf("%c", (j & 0x01) ? '1' : '-');
+	printf("%c", (j & 0x02) ? '1' : '-');
+	printf("%c", (j & 0x04) ? '1' : '-');
+	printf("%c ", (j & 0x08) ? '1' : '-');
+	printf("%c", (j & 0x10) ? '1' : '-');
+	printf("%c", (j & 0x20) ? '1' : '-');
+	printf("%c", (j & 0x40) ? '1' : '-');
+	printf("%c", (j & 0x80) ? '1' : '-');
+
+	reg_read(REG_TSRA2_ADDR, &mac2);
+
+	printf("   %2d", (mac2 >> 12) & 0x7); //FID
+	printf("  %4d", (mac2 & 0xfff));
+	printf(" %4d", (value2 >> 24) & 0xff); //r_age_field
+	reg_read(REG_TSRA1_ADDR, &mac);
+	printf("  %08x", mac);
+	printf("%04x", ((mac2 >> 16) & 0xffff));
+	printf("     %c", (((value2 >> 20) & 0x03) == 0x03) ? 'y' : '-');
+	printf("     %c\n", (((value2 >> 23) & 0x01) == 0x01) ? 'y' : '-');
+}
+
+void table_search_mac_fid(int argc, char *argv[])
+{
+	unsigned int i, j, value, mac, mac2, value2;
+	char tmpstr[9];
+
+	if (!argv[3] || strlen(argv[3]) != 12) {
+		printf("MAC address format error, should be of length 12\n");
+		return;
+	}
+	strncpy(tmpstr, argv[3], 8);
+	tmpstr[8] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	reg_write(REG_ATA1_ADDR, value);
+	//printf("REG_ATA1_ADDR is 0x%x\n\r",value);
+
+	strncpy(tmpstr, argv[3] + 8, 4);
+	tmpstr[4] = '\0';
+
+	value = strtoul(tmpstr, NULL, 16);
+	value = (value << 16);
+	value &= ~(1 << 15); //IVL=0
+
+	j = strtoul(argv[5], NULL, 0);
+	if (7 < j) {
+		printf("wrong fid range, should be within 0~7\n");
+		return;
+	}
+	value |= (j << 12); //vid
+
+	reg_write(REG_ATA2_ADDR, value);
+	//printf("REG_ATA2_ADDR is 0x%x\n\r",value);
+
+	value = 0x8000; //w_mac_cmd
+	reg_write(REG_ATC_ADDR, value);
+
+	usleep(1000);
+
+	for (i = 0; i < 20; i++) {
+		reg_read(REG_ATC_ADDR, &value);
+		if ((value & 0x8000) == 0) { //mac address busy
+			break;
+		}
+		usleep(1000);
+	}
+	if (i == 20) {
+		printf("search timeout.\n");
+		return;
+	}
+
+	if (value & 0x1000) {
+		printf("search no entry.\n");
+		return;
+	}
+
+	printf("search done.\n");
+	printf("hash  port(0:6)   fid   vid  age   mac-address     filter my_mac\n");
+
+	printf("%03x:   ", (value >> 16) & 0xfff); //hash_addr_lu
+	reg_read(REG_ATRD_ADDR, &value2);
+	j = (value2 >> 4) & 0xff; //r_port_map
+	printf("%c", (j & 0x01) ? '1' : '-');
+	printf("%c", (j & 0x02) ? '1' : '-');
+	printf("%c", (j & 0x04) ? '1' : '-');
+	printf("%c ", (j & 0x08) ? '1' : '-');
+	printf("%c", (j & 0x10) ? '1' : '-');
+	printf("%c", (j & 0x20) ? '1' : '-');
+	printf("%c", (j & 0x40) ? '1' : '-');
+	printf("%c", (j & 0x80) ? '1' : '-');
+
+	reg_read(REG_TSRA2_ADDR, &mac2);
+
+	printf("   %2d", (mac2 >> 12) & 0x7); //FID
+	printf("  %4d", (mac2 & 0xfff));
+	printf(" %4d", (value2 >> 24) & 0xff); //r_age_field
+	reg_read(REG_TSRA1_ADDR, &mac);
+	printf("  %08x", mac);
+	printf("%04x", ((mac2 >> 16) & 0xffff));
+	printf("     %c", (((value2 >> 20) & 0x03) == 0x03) ? 'y' : '-');
+	printf("     %c\n", (((value2 >> 23) & 0x01) == 0x01) ? 'y' : '-');
+}
+
+void table_del_fid(int argc, char *argv[])
+{
+	unsigned int i, j, value;
+	char tmpstr[9];
+
+	if (!argv[3] || strlen(argv[3]) != 12) {
+		printf("MAC address format error, should be of length 12\n");
+		return;
+	}
+	strncpy(tmpstr, argv[3], 8);
+	tmpstr[8] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	reg_write(REG_ATA1_ADDR, value);
+	strncpy(tmpstr, argv[3] + 8, 4);
+	tmpstr[4] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	value = (value << 16);
+
+	if (argc > 5) {
+		j = strtoul(argv[5], NULL, 0);
+		if (j > 7) {
+			printf("wrong fid range, should be within 0~7\n");
+			return;
+		}
+		value |= (j << 12); //fid
+	}
+
+	reg_write(REG_ATA2_ADDR, value);
+
+	value = 0; //STATUS=0, delete mac
+	reg_write(REG_ATWD_ADDR, value);
+
+	value = 0x8001; //w_mac_cmd
+	reg_write(REG_ATC_ADDR, value);
+
+	for (i = 0; i < 20; i++) {
+		reg_read(REG_ATC_ADDR, &value);
+		if ((value & 0x8000) == 0) { //mac address busy
+			if (argv[1] != NULL)
+				printf("done.\n");
+			return;
+		}
+		usleep(1000);
+	}
+	if (i == 20)
+		printf("timeout.\n");
+}
+
+void table_del_vid(int argc, char *argv[])
+{
+	unsigned int i, j, value;
+	char tmpstr[9];
+
+	if (!argv[3] || strlen(argv[3]) != 12) {
+		printf("MAC address format error, should be of length 12\n");
+		return;
+	}
+	strncpy(tmpstr, argv[3], 8);
+	tmpstr[8] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	reg_write(REG_ATA1_ADDR, value);
+
+	strncpy(tmpstr, argv[3] + 8, 4);
+	tmpstr[4] = '\0';
+	value = strtoul(tmpstr, NULL, 16);
+	value = (value << 16);
+
+	j = strtoul(argv[5], NULL, 0);
+	if (j > 4095) {
+		printf("wrong fid range, should be within 0~4095\n");
+		return;
+	}
+	value |= j; //vid
+	value |= 1 << 15;
+	reg_write(REG_ATA2_ADDR, value);
+
+	value = 0; //STATUS=0, delete mac
+	reg_write(REG_ATWD_ADDR, value);
+
+	value = 0x8001; //w_mac_cmd
+	reg_write(REG_ATC_ADDR, value);
+
+	for (i = 0; i < 20; i++) {
+		reg_read(REG_ATC_ADDR, &value);
+		if ((value & 0x8000) == 0) { //mac address busy
+			if (argv[1] != NULL)
+				printf("done.\n");
+			return;
+		}
+		usleep(1000);
+	}
+	if (i == 20)
+		printf("timeout.\n");
+}
+
+void table_clear(void)
+{
+	unsigned int value;
+	reg_write(REG_ATC_ADDR, 0x8002);
+	usleep(5000);
+	reg_read(REG_ATC_ADDR, &value);
+
+	printf("REG_ATC_ADDR is 0x%x\n\r", value);
+}
+
+void set_mirror_to(int argc, char *argv[])
+{
+	unsigned int value;
+	int idx;
+
+	idx = strtoul(argv[3], NULL, 0);
+	if (idx < 0 || MAX_PORT < idx) {
+		printf("wrong port member, should be within 0~%d\n", MAX_PORT);
+		return;
+	}
+	if (chip_name == 0x7530) {
+
+		reg_read(REG_MFC_ADDR, &value);
+		value |= 0x1 << 3;
+		value &= 0xfffffff8;
+		value |= idx << 0;
+
+		reg_write(REG_MFC_ADDR, value);
+	} else {
+
+		reg_read(REG_CFC_ADDR, &value);
+		value &= (~REG_CFC_MIRROR_EN_MASK);
+		value |= (1 << REG_CFC_MIRROR_EN_OFFT);
+		value &= (~REG_CFC_MIRROR_PORT_MASK);
+		value |= (idx << REG_CFC_MIRROR_PORT_OFFT);
+		reg_write(REG_CFC_ADDR, value);
+	}
+}
+
+void set_mirror_from(int argc, char *argv[])
+{
+	unsigned int offset, value;
+	int idx, mirror;
+
+	idx = strtoul(argv[3], NULL, 0);
+	mirror = strtoul(argv[4], NULL, 0);
+
+	if (idx < 0 || MAX_PORT < idx) {
+		printf("wrong port member, should be within 0~%d\n", MAX_PORT);
+		return;
+	}
+
+	if (mirror < 0 || 3 < mirror) {
+		printf("wrong mirror setting, should be within 0~3\n");
+		return;
+	}
+
+	offset = (0x2004 | (idx << 8));
+	reg_read(offset, &value);
+
+	value &= 0xfffffcff;
+	value |= mirror << 8;
+
+	reg_write(offset, value);
+}
+
+void vlan_dump(int argc, char *argv[])
+{
+	unsigned int i, j, value, value2;
+	int eg_tag = 0;
+
+	if (argc == 4) {
+		if (!strncmp(argv[3], "egtag", 6))
+			eg_tag = 1;
+	}
+
+	if (eg_tag)
+		printf("  vid  fid  portmap    s-tag\teg_tag(0:untagged 2:tagged)\n");
+	else
+		printf("  vid  fid  portmap    s-tag\n");
+
+	for (i = 1; i < 4095; i++) {
+		value = (0x80000000 + i); //r_vid_cmd
+		reg_write(REG_VTCR_ADDR, value);
+
+		for (j = 0; j < 20; j++) {
+			reg_read(REG_VTCR_ADDR, &value);
+			if ((value & 0x80000000) == 0) { //mac address busy
+				break;
+			}
+			usleep(1000);
+		}
+		if (j == 20)
+			printf("timeout.\n");
+
+		reg_read(REG_VAWD1_ADDR, &value);
+		reg_read(REG_VAWD2_ADDR, &value2);
+		//printf("REG_VAWD1_ADDR value%d is 0x%x\n\r", i, value);
+		//printf("REG_VAWD2_ADDR value%d is 0x%x\n\r", i, value2);
+
+		if ((value & 0x01) != 0) {
+			printf(" %4d  ", i);
+			printf(" %2d ", ((value & 0xe) >> 1));
+			printf(" %c", (value & 0x00010000) ? '1' : '-');
+			printf("%c", (value & 0x00020000) ? '1' : '-');
+			printf("%c", (value & 0x00040000) ? '1' : '-');
+			printf("%c", (value & 0x00080000) ? '1' : '-');
+			printf("%c", (value & 0x00100000) ? '1' : '-');
+			printf("%c", (value & 0x00200000) ? '1' : '-');
+			printf("%c", (value & 0x00400000) ? '1' : '-');
+			printf("%c", (value & 0x00800000) ? '1' : '-');
+			printf("    %4d", ((value & 0xfff0) >> 4));
+			if (eg_tag) {
+				printf("\t");
+				if ((value & (0x3 << 28)) == (0x3 << 28)) {
+					/* VTAG_EN=1 and EG_CON=1 */
+					printf("CONSISTENT");
+				} else if (value & (0x1 << 28)) {
+					/* VTAG_EN=1 */
+					printf("%d", (value2 & 0x0003) >> 0);
+					printf("%d", (value2 & 0x000c) >> 2);
+					printf("%d", (value2 & 0x0030) >> 4);
+					printf("%d", (value2 & 0x00c0) >> 6);
+					printf("%d", (value2 & 0x0300) >> 8);
+					printf("%d", (value2 & 0x0c00) >> 10);
+					printf("%d", (value2 & 0x3000) >> 12);
+					printf("%d", (value2 & 0xc000) >> 14);
+				} else {
+					/* VTAG_EN=0 */
+					printf("DISABLED");
+				}
+			}
+			printf("\n");
+		} else {
+			/*print 16 vid for reference information*/
+			if (i <= 16) {
+				printf(" %4d  ", i);
+				printf(" %2d ", ((value & 0xe) >> 1));
+				printf(" invalid\n");
+			}
+		}
+	}
+}
+
+
+static long timespec_diff_us(struct timespec start, struct timespec end)
+{
+	struct timespec temp;
+	unsigned long duration = 0;
+
+	if ((end.tv_nsec - start.tv_nsec) < 0) {
+		temp.tv_sec = end.tv_sec - start.tv_sec - 1;
+		temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
+	} else {
+		temp.tv_sec = end.tv_sec - start.tv_sec;
+		temp.tv_nsec = end.tv_nsec - start.tv_nsec;
+	}
+	/* calculate second part*/
+	duration += temp.tv_sec * 1000000;
+	/* calculate ns part*/
+	duration += temp.tv_nsec >> 10;
+
+	return duration;
+}
+
+
+void vlan_clear(int argc, char *argv[])
+{
+	unsigned int value;
+	int vid;
+	unsigned long duration_us = 0;
+	struct timespec start, end;
+
+	for (vid = 0; vid < 4096; vid++) {
+		clock_gettime(CLOCK_REALTIME, &start);
+		value = 0; //invalid
+		reg_write(REG_VAWD1_ADDR, value);
+
+		value = (0x80001000 + vid); //w_vid_cmd
+		reg_write(REG_VTCR_ADDR, value);
+		while (duration_us <= 1000) {
+			reg_read(REG_VTCR_ADDR, &value);
+			if ((value & 0x80000000) == 0) { //table busy
+				break;
+			}
+			clock_gettime(CLOCK_REALTIME, &end);
+			duration_us = timespec_diff_us(start, end);
+		}
+		if (duration_us > 1000)
+			printf("config vlan timeout: %ld.\n", duration_us);
+	}
+}
+
+void vlan_set(int argc, char *argv[])
+{
+	unsigned int vlan_mem = 0;
+	unsigned int value = 0;
+	unsigned int value2 = 0;
+	int i, vid, fid;
+	int stag = 0;
+	unsigned long eg_con = 0;
+	unsigned int eg_tag = 0;
+
+	if (argc < 5) {
+		printf("insufficient arguments!\n");
+		return;
+	}
+
+	fid = strtoul(argv[3], NULL, 0);
+	if (fid < 0 || fid > 7) {
+		printf("wrong filtering db id range, should be within 0~7\n");
+		return;
+	}
+	value |= (fid << 1);
+
+	vid = strtoul(argv[4], NULL, 0);
+	if (vid < 0 || 0xfff < vid) {
+		printf("wrong vlan id range, should be within 0~4095\n");
+		return;
+	}
+
+	if (strlen(argv[5]) != 8) {
+		printf("portmap format error, should be of length 7\n");
+		return;
+	}
+
+	vlan_mem = 0;
+	for (i = 0; i < 8; i++) {
+		if (argv[5][i] != '0' && argv[5][i] != '1') {
+			printf("portmap format error, should be of combination of 0 or 1\n");
+			return;
+		}
+		vlan_mem += (argv[5][i] - '0') * (1 << i);
+	}
+
+	/* VLAN stag */
+	if (argc > 6) {
+		stag = strtoul(argv[6], NULL, 16);
+		if (stag < 0 || 0xfff < stag) {
+			printf("wrong stag id range, should be within 0~4095\n");
+			return;
+		}
+		//printf("STAG is 0x%x\n", stag);
+	}
+
+	/* set vlan member */
+	value |= (vlan_mem << 16);
+	value |= (1 << 30);		//IVL=1
+	value |= ((stag & 0xfff) << 4); //stag
+	value |= 1;			//valid
+
+	if (argc > 7) {
+		eg_con = strtoul(argv[7], NULL, 2);
+		eg_con = !!eg_con;
+		value |= (eg_con << 29); //eg_con
+		value |= (1 << 28);      //eg tag control enable
+	}
+
+	if (argc > 8 && !eg_con) {
+		if (strlen(argv[8]) != 8) {
+			printf("egtag portmap format error, should be of length 7\n");
+			return;
+		}
+
+		for (i = 0; i < 8; i++) {
+			if (argv[8][i] < '0' || argv[8][i] > '3') {
+				printf("egtag portmap format error, should be of combination of 0 or 3\n");
+				return;
+			}
+			//eg_tag += (argv[8][i] - '0') * (1 << i * 2);
+			eg_tag |= (argv[8][i] - '0') << (i * 2);
+		}
+
+		value |= (1 << 28);    //eg tag control enable
+		value2 &= ~(0xffff);
+		value2 |= eg_tag;
+	}
+	reg_write(REG_VAWD1_ADDR, value);
+	reg_write(REG_VAWD2_ADDR, value2);
+	//printf("VAWD1=0x%08x VAWD2=0x%08x ", value, value2);
+
+	value = (0x80001000 + vid); //w_vid_cmd
+	reg_write(REG_VTCR_ADDR, value);
+	//printf("VTCR=0x%08x\n", value);
+
+	for (i = 0; i < 300; i++) {
+		usleep(1000);
+		reg_read(REG_VTCR_ADDR, &value);
+		if ((value & 0x80000000) == 0) //table busy
+			break;
+	}
+
+	if (i == 300)
+		printf("config vlan timeout.\n");
+}
+
+void igmp_on(int argc, char *argv[])
+{
+	unsigned int leaky_en = 0;
+	unsigned int wan_num = 4;
+	unsigned int port, offset, value;
+	char cmd[80];
+	int ret;
+
+	if (argc > 3)
+		leaky_en = strtoul(argv[3], NULL, 10);
+	if (argc > 4)
+		wan_num = strtoul(argv[4], NULL, 10);
+
+	if (leaky_en == 1) {
+		if (wan_num == 4) {
+			/* reg_write(0x2410, 0x810000c8); */
+			reg_read(0x2410, &value);
+			reg_write(0x2410, value | (1 << 3));
+			/* reg_write(0x2010, 0x810000c0); */
+			reg_read(0x2010, &value);
+			reg_write(0x2010, value & (~(1 << 3)));
+			reg_write(REG_ISC_ADDR, 0x10027d10);
+		} else {
+			/* reg_write(0x2010, 0x810000c8); */
+			reg_read(0x2010, &value);
+			reg_write(0x2010, value | (1 << 3));
+			/* reg_write(0x2410, 0x810000c0); */
+			reg_read(0x2410, &value);
+			reg_write(0x2410, value & (~(1 << 3)));
+			reg_write(REG_ISC_ADDR, 0x01027d01);
+		}
+	}
+	else
+		reg_write(REG_ISC_ADDR, 0x10027d60);
+
+	reg_write(0x1c, 0x08100810);
+	reg_write(0x2008, 0xb3ff);
+	reg_write(0x2108, 0xb3ff);
+	reg_write(0x2208, 0xb3ff);
+	reg_write(0x2308, 0xb3ff);
+	reg_write(0x2408, 0xb3ff);
+	reg_write(0x2608, 0xb3ff);
+	/* Enable Port ACL
+	* reg_write(0x2P04, 0xff0403);
+	*/
+	for (port = 0; port <= 6; port++) {
+		offset = 0x2004 + port * 0x100;
+		reg_read(offset, &value);
+		reg_write(offset, value | (1 << 10));
+	}
+
+	/*IGMP query only p4 -> p5*/
+	reg_write(0x94, 0x00ff0002);
+	if (wan_num == 4)
+		reg_write(0x98, 0x000a1008);
+	else
+		reg_write(0x98, 0x000a0108);
+	reg_write(0x90, 0x80005000);
+	reg_write(0x94, 0xff001100);
+	if (wan_num == 4)
+		reg_write(0x98, 0x000B1000);
+	else
+		reg_write(0x98, 0x000B0100);
+	reg_write(0x90, 0x80005001);
+	reg_write(0x94, 0x3);
+	reg_write(0x98, 0x0);
+	reg_write(0x90, 0x80009000);
+	reg_write(0x94, 0x1a002080);
+	reg_write(0x98, 0x0);
+	reg_write(0x90, 0x8000b000);
+
+	/*IGMP p5 -> p4*/
+	reg_write(0x94, 0x00ff0002);
+	reg_write(0x98, 0x000a2008);
+	reg_write(0x90, 0x80005002);
+	reg_write(0x94, 0x4);
+	reg_write(0x98, 0x0);
+	reg_write(0x90, 0x80009001);
+	if (wan_num == 4)
+		reg_write(0x94, 0x1a001080);
+	else
+		reg_write(0x94, 0x1a000180);
+	reg_write(0x98, 0x0);
+	reg_write(0x90, 0x8000b001);
+
+	/*IGMP p0~p3 -> p6*/
+	reg_write(0x94, 0x00ff0002);
+	if (wan_num == 4)
+		reg_write(0x98, 0x000a0f08);
+	else
+		reg_write(0x98, 0x000a1e08);
+	reg_write(0x90, 0x80005003);
+	reg_write(0x94, 0x8);
+	reg_write(0x98, 0x0);
+	reg_write(0x90, 0x80009002);
+	reg_write(0x94, 0x1a004080);
+	reg_write(0x98, 0x0);
+	reg_write(0x90, 0x8000b002);
+
+	/*IGMP query only p6 -> p0~p3*/
+	reg_write(0x94, 0x00ff0002);
+	reg_write(0x98, 0x000a4008);
+	reg_write(0x90, 0x80005004);
+	reg_write(0x94, 0xff001100);
+	reg_write(0x98, 0x000B4000);
+	reg_write(0x90, 0x80005005);
+	reg_write(0x94, 0x30);
+	reg_write(0x98, 0x0);
+	reg_write(0x90, 0x80009003);
+	if (wan_num == 4)
+		reg_write(0x94, 0x1a000f80);
+	else
+		reg_write(0x94, 0x1a001e80);
+	reg_write(0x98, 0x0);
+	reg_write(0x90, 0x8000b003);
+
+	/*Force eth2 to receive all igmp packets*/
+	snprintf(cmd, sizeof(cmd), "echo 2 > /sys/devices/virtual/net/%s/brif/%s/multicast_router", BR_DEVNAME, ETH_DEVNAME);
+	ret = system(cmd);
+	if (ret)
+		printf("Failed to set /sys/devices/virtual/net/%s/brif/%s/multicast_router\n",
+		       BR_DEVNAME, ETH_DEVNAME);
+}
+
+void igmp_disable(int argc, char *argv[])
+{
+	unsigned int reg_offset, value;
+	int port_num;
+
+	if (argc < 4) {
+		printf("insufficient arguments!\n");
+		return;
+	}
+	port_num = strtoul(argv[3], NULL, 0);
+	if (port_num < 0 || 6 < port_num) {
+		printf("wrong port range, should be within 0~6\n");
+		return;
+	}
+
+	//set ISC: IGMP Snooping Control Register (offset: 0x0018)
+	reg_offset = 0x2008;
+	reg_offset |= (port_num << 8);
+	value = 0x8000;
+
+	reg_write(reg_offset, value);
+}
+
+void igmp_enable(int argc, char *argv[])
+{
+	unsigned int reg_offset, value;
+	int port_num;
+
+	if (argc < 4) {
+		printf("insufficient arguments!\n");
+		return;
+	}
+	port_num = strtoul(argv[3], NULL, 0);
+	if (port_num < 0 || 6 < port_num) {
+		printf("wrong port range, should be within 0~6\n");
+		return;
+	}
+
+	//set ISC: IGMP Snooping Control Register (offset: 0x0018)
+	reg_offset = 0x2008;
+	reg_offset |= (port_num << 8);
+	value = 0x9755;
+	reg_write(reg_offset, value);
+}
+
+void igmp_off()
+{
+	unsigned int value;
+	//set ISC: IGMP Snooping Control Register (offset: 0x0018)
+	reg_read(REG_ISC_ADDR, &value);
+	value &= ~(1 << 18); //disable
+	reg_write(REG_ISC_ADDR, value);
+
+	/*restore wan port multicast leaky vlan function: default disabled*/
+	reg_read(0x2010, &value);
+	reg_write(0x2010, value & (~(1 << 3)));
+	reg_read(0x2410, &value);
+	reg_write(0x2410, value & (~(1 << 3)));
+
+	printf("config igmpsnoop off.\n");
+}
+
+void switch_reset(int argc, char *argv[])
+{
+	unsigned int value = 0;
+	/*Software Register Reset  and Software System Reset */
+	reg_write(0x7000, 0x3);
+	reg_read(0x7000, &value);
+	printf("SYS_CTRL(0x7000) register value =0x%x  \n", value);
+	if (chip_name == 0x7531) {
+		reg_write(0x7c0c, 0x11111111);
+		reg_read(0x7c0c, &value);
+		printf("GPIO Mode (0x7c0c) select value =0x%x  \n", value);
+	}
+	printf("Switch Software Reset !!! \n");
+}
+
+int phy_set_fc(int argc, char *argv[])
+{
+	unsigned int port, pause_capable;
+	unsigned int phy_value;
+
+	port = atoi(argv[3]);
+	pause_capable = atoi(argv[4]);
+
+	/*Check the input parameters is right or not.*/
+	if (port > MAX_PORT - 2 || pause_capable > 1) {
+		printf("Illegal parameter (port:0~4, full_duplex_pause_capable:0|1)\n");
+		return -1;
+	}
+	printf("port=%d, full_duplex_pause_capable:%d\n", port, pause_capable);
+	mii_mgr_read(port, 4, &phy_value);
+	printf("read phy_value:0x%x\r\n", phy_value);
+	phy_value &= (~(0x1 << 10));
+	phy_value &= (~(0x1 << 11));
+	if (pause_capable == 1) {
+		phy_value |= (0x1 << 10);
+		phy_value |= (0x1 << 11);
+	}
+	mii_mgr_write(port, 4, phy_value);
+	printf("write phy_value:0x%x\r\n", phy_value);
+	return 0;
+} /*end phy_set_fc*/
+
+int phy_set_an(int argc, char *argv[])
+{
+	unsigned int port, auto_negotiation_en;
+	unsigned int phy_value;
+
+	port = atoi(argv[3]);
+	auto_negotiation_en = atoi(argv[4]);
+
+	/*Check the input parameters is right or not.*/
+	if (port > MAX_PORT - 2 || auto_negotiation_en > 1) {
+		printf("Illegal parameter (port:0~4, auto_negotiation_en:0|1)\n");
+		return -1;
+	}
+	printf("port=%d, auto_negotiation_en:%d\n", port, auto_negotiation_en);
+	mii_mgr_read(port, 0, &phy_value);
+	printf("read phy_value:0x%x\r\n", phy_value);
+	phy_value &= (~(1 << 12));
+	phy_value |= (auto_negotiation_en << 12);
+	mii_mgr_write(port, 0, phy_value);
+	printf("write phy_value:0x%x\r\n", phy_value);
+	return 0;
+} /*end phy_set_an*/
+
+int set_mac_pfc(int argc, char *argv[])
+{
+	unsigned int value;
+	int port, enable = 0;
+
+	port = atoi(argv[3]);
+	enable = atoi(argv[4]);
+	printf("enable: %d\n", enable);
+	if (port < 0 || port > 6 || enable < 0 || enable > 1) {
+		printf("Illegal parameter (port:0~6, enable|diable:0|1) \n");
+		return -1;
+	}
+	if (chip_name == 0x7531) {
+		reg_read(REG_PFC_CTRL_ADDR, &value);
+		value &= ~(1 << port);
+		value |= (enable << port);
+		printf("write reg: %x, value: %x\n", REG_PFC_CTRL_ADDR, value);
+		reg_write(REG_PFC_CTRL_ADDR, value);
+	}
+	else
+		printf("\nCommand not support by this chip.\n");
+	return 0;
+}
+
+int global_set_mac_fc(int argc, char *argv[])
+{
+	unsigned char enable = 0;
+	unsigned int value, reg;
+
+	if (chip_name == 0x7530) {
+		enable = atoi(argv[3]);
+		printf("enable: %d\n", enable);
+
+		/*Check the input parameters is right or not.*/
+		if (enable > 1) {
+			printf(HELP_MACCTL_FC);
+			return -1;
+		}
+		reg_write(0x7000, 0x3);
+		reg = REG_GFCCR0_ADDR;
+		reg_read(REG_GFCCR0_ADDR, &value);
+		value &= (~REG_FC_EN_MASK);
+		value |= (enable << REG_FC_EN_OFFT);
+		printf("write reg: %x, value: %x\n", reg, value);
+		reg_write(REG_GFCCR0_ADDR, value);
+	} else
+		printf("\r\nCommand not support by this chip.\n");
+	return 0;
+} /*end mac_set_fc*/
+
+int qos_sch_select(int argc, char *argv[])
+{
+	unsigned char port, queue;
+	unsigned char type = 0;
+	unsigned int value, reg;
+
+	if (argc < 7)
+		return -1;
+
+	port = atoi(argv[3]);
+	queue = atoi(argv[4]);
+	type = atoi(argv[6]);
+
+	if (port > 6 || queue > 7) {
+		printf("\n Illegal input parameters\n");
+		return -1;
+	}
+
+	if ((type != 0 && type != 1 && type != 2)) {
+		printf(HELP_QOS_TYPE);
+		return -1;
+	}
+
+	printf("\r\nswitch qos type: %d.\n",type);
+
+	if (!strncmp(argv[5], "min", 4)) {
+
+		if (type == 0) {
+			/*min sharper-->round roubin, disable min sharper rate limit*/
+			reg = GSW_MMSCR0_Q(queue) + 0x100 * port;
+			reg_read(reg, &value);
+			value = 0x0;
+			reg_write(reg, value);
+		} else if (type == 1) {
+			/*min sharper-->sp, disable min sharper rate limit*/
+			reg = GSW_MMSCR0_Q(queue) + 0x100 * port;
+			reg_read(reg, &value);
+			value = 0x0;
+			value |= (1 << 31);
+			reg_write(reg, value);
+		} else {
+			printf("min sharper only support: rr or sp\n");
+			return -1;
+		}
+	} else if (!strncmp(argv[5], "max", 4)) {
+		if (type == 1) {
+			/*max sharper-->sp, disable max sharper rate limit*/
+			reg = GSW_MMSCR1_Q(queue) + 0x100 * port;
+			reg_read(reg, &value);
+			value = 0x0;
+			value |= (1 << 31);
+			reg_write(reg, value);
+		} else if (type == 2) {
+			/*max sharper-->wfq, disable max sharper rate limit*/
+			reg = GSW_MMSCR1_Q(queue) + 0x100 * port;
+			reg_read(reg, &value);
+			value = 0x0;
+			reg_write(reg, value);
+		} else {
+			printf("max sharper only support: wfq or sp\n");
+			return -1;
+		}
+	} else {
+		printf("\r\nIllegal sharper:%s\n",argv[5]);
+		return -1;
+	}
+	printf("reg:0x%x--value:0x%x\n",reg,value);
+
+	return 0;
+}
+
+void get_upw(unsigned int *value, unsigned char base)
+{
+	*value &= (~((0x7 << 0) | (0x7 << 4) | (0x7 << 8) | (0x7 << 12) |
+		     (0x7 << 16) | (0x7 << 20)));
+	switch (base)
+	{
+		case 0: /* port-based 0x2x40[18:16] */
+			*value |= ((0x2 << 0) | (0x2 << 4) | (0x2 << 8) |
+				(0x2 << 12) | (0x7 << 16) | (0x2 << 20));
+			break;
+		case 1: /* tagged-based 0x2x40[10:8] */
+			*value |= ((0x2 << 0) | (0x2 << 4) | (0x7 << 8) |
+				(0x2 << 12) | (0x2 << 16) | (0x2 << 20));
+			break;
+		case 2: /* DSCP-based 0x2x40[14:12] */
+			*value |= ((0x2 << 0) | (0x2 << 4) | (0x2 << 8) |
+				(0x7 << 12) | (0x2 << 16) | (0x2 << 20));
+			break;
+		case 3: /* acl-based 0x2x40[2:0] */
+			*value |= ((0x7 << 0) | (0x2 << 4) | (0x2 << 8) |
+				(0x2 << 12) | (0x2 << 16) | (0x2 << 20));
+			break;
+		case 4: /* arl-based 0x2x40[22:20] */
+			*value |= ((0x2 << 0) | (0x2 << 4) | (0x2 << 8) |
+				(0x2 << 12) | (0x2 << 16) | (0x7 << 20));
+			break;
+		case 5: /* stag-based 0x2x40[6:4] */
+			*value |= ((0x2 << 0) | (0x7 << 4) | (0x2 << 8) |
+				(0x2 << 12) | (0x2 << 16) | (0x2 << 20));
+			break;
+		default:
+			break;
+	}
+}
+
+void qos_set_base(int argc, char *argv[])
+{
+	unsigned char base = 0;
+	unsigned char port;
+	unsigned int value;
+
+	if (argc < 5)
+		return;
+
+	port = atoi(argv[3]);
+	base = atoi(argv[4]);
+
+	if (base > 6) {
+		printf(HELP_QOS_BASE);
+		return;
+	}
+
+	if (port > 6) {
+		printf("Illegal port index:%d\n",port);
+		return;
+	}
+
+	printf("\r\nswitch qos base : %d. (port-based:0, tag-based:1,\
+		dscp-based:2, acl-based:3, arl-based:4, stag-based:5)\n",
+	       base);
+	if (chip_name == 0x7530) {
+
+		reg_read(0x44, &value);
+		get_upw(&value, base);
+		reg_write(0x44, value);
+		printf("reg: 0x44, value: 0x%x\n", value);
+
+	} else if (chip_name == 0x7531) {
+
+		reg_read(GSW_UPW(port), &value);
+		get_upw(&value, base);
+		reg_write(GSW_UPW(port), value);
+		printf("reg:0x%x, value: 0x%x\n",GSW_UPW(port),value);
+
+	} else {
+		printf("unknown switch device");
+		return;
+	}
+}
+
+void qos_wfq_set_weight(int argc, char *argv[])
+{
+	int port, weight[8], i;
+	unsigned char queue;
+	unsigned int reg, value;
+
+	port = atoi(argv[3]);
+
+	for (i = 0; i < 8; i++) {
+		weight[i] = atoi(argv[i + 4]);
+	}
+
+	/* MT7530 total 7 port */
+	if (port < 0 || port > 6) {
+		printf(HELP_QOS_PORT_WEIGHT);
+		return;
+	}
+
+	for (i = 0; i < 8; i++) {
+		if (weight[i] < 1 || weight[i] > 16) {
+			printf(HELP_QOS_PORT_WEIGHT);
+			return;
+		}
+	}
+	printf("port: %x, q0: %x, q1: %x, q2: %x, q3: %x, \
+		q4: %x, q5: %x, q6: %x, q7: %x\n",
+	       port, weight[0], weight[1], weight[2], weight[3], weight[4],
+	       weight[5], weight[6], weight[7]);
+
+	for (queue = 0; queue < 8; queue++) {
+		reg = GSW_MMSCR1_Q(queue) + 0x100 * port;
+		reg_read(reg, &value);
+		value &= (~(0xf << 24)); //bit24~27
+		value |= (((weight[queue] - 1) & 0xf) << 24);
+		printf("reg: %x, value: %x\n", reg, value);
+		reg_write(reg, value);
+	}
+}
+
+void qos_set_portpri(int argc, char *argv[])
+{
+	unsigned char port, prio;
+	unsigned int value;
+
+	port = atoi(argv[3]);
+	prio = atoi(argv[4]);
+
+	if (port >= 7 || prio > 7) {
+		printf(HELP_QOS_PORT_PRIO);
+		return;
+	}
+
+	reg_read(GSW_PCR(port), &value);
+	value &= (~(0x7 << 24));
+	value |= (prio << 24);
+	reg_write(GSW_PCR(port), value);
+	printf("write reg: %x, value: %x\n", GSW_PCR(port), value);
+}
+
+void qos_set_dscppri(int argc, char *argv[])
+{
+	unsigned char prio, dscp, pim_n, pim_offset;
+	unsigned int reg, value;
+
+	dscp = atoi(argv[3]);
+	prio = atoi(argv[4]);
+
+	if (dscp > 63 || prio > 7) {
+		printf(HELP_QOS_DSCP_PRIO);
+		return;
+	}
+
+	pim_n = dscp / 10;
+	pim_offset = (dscp - pim_n * 10) * 3;
+	reg = 0x0058 + pim_n * 4;
+	reg_read(reg, &value);
+	value &= (~(0x7 << pim_offset));
+	value |= ((prio & 0x7) << pim_offset);
+	reg_write(reg, value);
+	printf("write reg: %x, value: %x\n", reg, value);
+}
+
+void qos_pri_mapping_queue(int argc, char *argv[])
+{
+	unsigned char prio, queue, pem_n, port;
+	unsigned int reg, value;
+
+	if (argc < 6)
+		return;
+
+	port = atoi(argv[3]);
+	prio = atoi(argv[4]);
+	queue = atoi(argv[5]);
+
+	if (prio > 7 || queue > 7) {
+		printf(HELP_QOS_PRIO_QMAP);
+		return;
+	}
+	if (chip_name == 0x7530) {
+		pem_n = prio / 2;
+		reg = pem_n * 0x4 + 0x48;
+		reg_read(reg, &value);
+		if (prio % 2) {
+			value &= (~(0x7 << 24));
+			value |= ((queue & 0x7) << 24);
+		} else {
+			value &= (~(0x7 << 8));
+			value |= ((queue & 0x7) << 8);
+		}
+		reg_write(reg, value);
+		printf("write reg: %x, value: %x\n", reg, value);
+	} else if (chip_name == 0x7531) {
+		pem_n = prio / 2;
+		reg = GSW_PEM(pem_n) + 0x100 * port;
+		reg_read(reg, &value);
+		if (prio % 2) { // 1 1
+			value &= (~(0x7 << 25));
+			value |= ((queue & 0x7) << 25);
+		} else { // 0 0
+			value &= (~(0x7 << 9));
+			value |= ((queue & 0x7) << 9);
+		}
+		reg_write(reg, value);
+		printf("write reg: %x, value: %x\n", reg, value);
+	}
+	else {
+		printf("unknown switch device");
+		return;
+	}
+}
+
+static int macMT753xVlanSetVid(unsigned char index, unsigned char active,
+			       unsigned short vid, unsigned char portMap, unsigned char tagPortMap,
+			       unsigned char ivl_en, unsigned char fid, unsigned short stag)
+{
+	unsigned int value = 0;
+	unsigned int value2 = 0;
+	unsigned int reg;
+	int i;
+
+	printf("index: %x, active: %x, vid: %x, portMap: %x, \
+		tagPortMap: %x, ivl_en: %x, fid: %x, stag: %x\n",
+	       index, active, vid, portMap, tagPortMap, ivl_en, fid, stag);
+
+	value = (portMap << 16);
+	value |= (stag << 4);
+	value |= (ivl_en << 30);
+	value |= (fid << 1);
+	value |= (active ? 1 : 0);
+
+	// total 7 ports
+	for (i = 0; i < 7; i++) {
+		if (tagPortMap & (1 << i))
+			value2 |= 0x2 << (i * 2);
+	}
+
+	if (value2)
+		value |= (1 << 28); // eg_tag
+
+	reg = 0x98; // VAWD2
+	reg_write(reg, value2);
+
+	reg = 0x94; // VAWD1
+	reg_write(reg, value);
+
+	reg = 0x90; // VTCR
+	value = (0x80001000 + vid);
+	reg_write(reg, value);
+
+	reg = 0x90; // VTCR
+	while (1) {
+		reg_read(reg, &value);
+		if ((value & 0x80000000) == 0) //table busy
+			break;
+	}
+
+	/* switch clear */
+	reg = 0x80;
+	reg_write(reg, 0x8002);
+	usleep(5000);
+	reg_read(reg, &value);
+
+	printf("SetVid: index:%d active:%d vid:%d portMap:%x tagPortMap:%x\r\n",
+	       index, active, vid, portMap, tagPortMap);
+	return 0;
+
+} /*end macMT753xVlanSetVid*/
+/*
+static int macMT753xVlanGetVtbl(unsigned short index)
+{
+	unsigned int reg, value, vawd1, vawd2;
+
+	reg = 0x90; // VTCR
+	value = (0x80000000 + index);
+
+	reg_write(reg, value);
+
+	reg = 0x90; // VTCR
+	while (1) {
+		reg_read(reg, &value);
+		if ((value & 0x80000000) == 0) //table busy
+			break;
+	}
+
+	reg = 0x94; // VAWD1
+	reg_read(reg, &vawd1);
+
+	reg = 0x98; // VAWD2
+	reg_read(reg, &vawd2);
+
+	if (vawd1 & 0x1) {
+		fprintf(stderr, "%d.%s vid:%d fid:%d portMap:0x%x \
+				tagMap:0x%x stag:0x%x ivl_en:0x%x\r\n",
+			index, (vawd1 & 0x1) ? "on" : "off", index, ((vawd1 & 0xe) >> 1),
+			(vawd1 & 0xff0000) >> 16, vawd2, (vawd1 & 0xfff0) >> 0x4, (vawd1 >> 30) & 0x1);
+	}
+	return 0;
+} */ /*end macMT753xVlanGetVtbl*/
+
+static int macMT753xVlanSetPvid(unsigned char port, unsigned short pvid)
+{
+	unsigned int value;
+	unsigned int reg;
+
+	/*Parameters is error*/
+	if (port > 6)
+		return -1;
+
+	reg = 0x2014 + (port * 0x100);
+	reg_read(reg, &value);
+	value &= ~0xfff;
+	value |= pvid;
+	reg_write(reg, value);
+
+	/* switch clear */
+	reg = 0x80;
+	reg_write(reg, 0x8002);
+	usleep(5000);
+	reg_read(reg, &value);
+
+	printf("SetPVID: port:%d pvid:%d\r\n", port, pvid);
+	return 0;
+}
+/*
+static int macMT753xVlanGetPvid(unsigned char port)
+{
+	unsigned int value;
+	unsigned int reg;
+
+	if (port > 6)
+		return -1;
+	reg = 0x2014 + (port * 0x100);
+	reg_read(reg, &value);
+	return (value & 0xfff);
+} */
+/*
+static int macMT753xVlanDisp(void)
+{
+	unsigned int i = 0;
+	unsigned int reg, value;
+
+	reg = 0x2604;
+	reg_read(reg, &value);
+	value &= 0x30000000;
+
+	fprintf(stderr, "VLAN function is %s\n", value ? ETHCMD_ENABLE : ETHCMD_DISABLE);
+	fprintf(stderr, "PVID e0:%02d e1:%02d e2:%02d e3:%02d e4:%02d e5:%02d e6:%02d\n",
+		macMT753xVlanGetPvid(0), macMT753xVlanGetPvid(1), macMT753xVlanGetPvid(2),
+		macMT753xVlanGetPvid(3), macMT753xVlanGetPvid(4), macMT753xVlanGetPvid(5), macMT753xVlanGetPvid(6));
+
+	for (i = 0; i < MAX_VID_VALUE; i++)
+		macMT753xVlanGetVtbl(i);
+
+	return 0;
+}*/ /*end macMT753xVlanDisp*/
+
+void doVlanSetPvid(int argc, char *argv[])
+{
+	unsigned char port = 0;
+	unsigned short pvid = 0;
+
+	port = atoi(argv[3]);
+	pvid = atoi(argv[4]);
+	/*Check the input parameters is right or not.*/
+	if ((port >= SWITCH_MAX_PORT) || (pvid > MAX_VID_VALUE)) {
+		printf(HELP_VLAN_PVID);
+		return;
+	}
+
+	macMT753xVlanSetPvid(port, pvid);
+
+	printf("port:%d pvid:%d,vlancap: max_port:%d maxvid:%d\r\n",
+	       port, pvid, SWITCH_MAX_PORT, MAX_VID_VALUE);
+} /*end doVlanSetPvid*/
+
+void doVlanSetVid(int argc, char *argv[])
+{
+	unsigned char index = 0;
+	unsigned char active = 0;
+	unsigned char portMap = 0;
+	unsigned char tagPortMap = 0;
+	unsigned short vid = 0;
+
+	unsigned char ivl_en = 0;
+	unsigned char fid = 0;
+	unsigned short stag = 0;
+
+	index = atoi(argv[3]);
+	active = atoi(argv[4]);
+	vid = atoi(argv[5]);
+
+	/*Check the input parameters is right or not.*/
+	if ((index >= MAX_VLAN_RULE) || (vid >= 4096) || (active > ACTIVED)) {
+		printf(HELP_VLAN_VID);
+		return;
+	}
+
+	/*CPU Port is always the membership*/
+	portMap = atoi(argv[6]);
+	tagPortMap = atoi(argv[7]);
+
+	printf("subcmd parameter argc = %d\r\n", argc);
+	if (argc >= 9) {
+		ivl_en = atoi(argv[8]);
+		if (argc >= 10) {
+			fid = atoi(argv[9]);
+			if (argc >= 11)
+				stag = atoi(argv[10]);
+		}
+	}
+	macMT753xVlanSetVid(index, active, vid, portMap, tagPortMap,
+			    ivl_en, fid, stag);
+	printf("index:%d active:%d vid:%d\r\n", index, active, vid);
+} /*end doVlanSetVid*/
+
+void doVlanSetAccFrm(int argc, char *argv[])
+{
+	unsigned char port = 0;
+	unsigned char type = 0;
+	unsigned int value;
+	unsigned int reg;
+
+	port = atoi(argv[3]);
+	type = atoi(argv[4]);
+
+	printf("port: %d, type: %d\n", port, type);
+
+	/*Check the input parameters is right or not.*/
+	if ((port > SWITCH_MAX_PORT) || (type > REG_PVC_ACC_FRM_RELMASK)) {
+		printf(HELP_VLAN_ACC_FRM);
+		return;
+	}
+
+	reg = REG_PVC_P0_ADDR + port * 0x100;
+	reg_read(reg, &value);
+	value &= (~REG_PVC_ACC_FRM_MASK);
+	value |= ((unsigned int)type << REG_PVC_ACC_FRM_OFFT);
+
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+} /*end doVlanSetAccFrm*/
+
+void doVlanSetPortAttr(int argc, char *argv[])
+{
+	unsigned char port = 0;
+	unsigned char attr = 0;
+	unsigned int value;
+	unsigned int reg;
+
+	port = atoi(argv[3]);
+	attr = atoi(argv[4]);
+
+	printf("port: %x, attr: %x\n", port, attr);
+
+	/*Check the input parameters is right or not.*/
+	if (port > SWITCH_MAX_PORT || attr > 3) {
+		printf(HELP_VLAN_PORT_ATTR);
+		return;
+	}
+
+	reg = 0x2010 + port * 0x100;
+	reg_read(reg, &value);
+	value &= (0xffffff3f);
+	value |= (attr << 6);
+
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+}
+
+void doVlanSetPortMode(int argc, char *argv[])
+{
+	unsigned char port = 0;
+	unsigned char mode = 0;
+	unsigned int value;
+	unsigned int reg;
+	port = atoi(argv[3]);
+	mode = atoi(argv[4]);
+	printf("port: %x, mode: %x\n", port, mode);
+
+	/*Check the input parameters is right or not.*/
+	if (port > SWITCH_MAX_PORT || mode > 3) {
+		printf(HELP_VLAN_PORT_MODE);
+		return;
+	}
+
+	reg = 0x2004 + port * 0x100;
+	reg_read(reg, &value);
+	value &= (~((1 << 0) | (1 << 1)));
+	value |= (mode & 0x3);
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+}
+
+void doVlanSetEgressTagPCR(int argc, char *argv[])
+{
+	unsigned char port = 0;
+	unsigned char eg_tag = 0;
+	unsigned int value;
+	unsigned int reg;
+
+	port = atoi(argv[3]);
+	eg_tag = atoi(argv[4]);
+
+	printf("port: %d, eg_tag: %d\n", port, eg_tag);
+
+	/*Check the input parameters is right or not.*/
+	if ((port > SWITCH_MAX_PORT) || (eg_tag > REG_PCR_EG_TAG_RELMASK)) {
+		printf(HELP_VLAN_EGRESS_TAG_PCR);
+		return;
+	}
+
+	reg = REG_PCR_P0_ADDR + port * 0x100;
+	reg_read(reg, &value);
+	value &= (~REG_PCR_EG_TAG_MASK);
+	value |= ((unsigned int)eg_tag << REG_PCR_EG_TAG_OFFT);
+
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+
+} /*end doVlanSetEgressTagPCR*/
+
+void doVlanSetEgressTagPVC(int argc, char *argv[])
+{
+	unsigned char port = 0;
+	unsigned char eg_tag = 0;
+	unsigned int value;
+	unsigned int reg;
+
+	port = atoi(argv[3]);
+	eg_tag = atoi(argv[4]);
+
+	printf("port: %d, eg_tag: %d\n", port, eg_tag);
+
+	/*Check the input parameters is right or not.*/
+	if ((port > SWITCH_MAX_PORT) || (eg_tag > REG_PVC_EG_TAG_RELMASK)) {
+		printf(HELP_VLAN_EGRESS_TAG_PVC);
+		return;
+	}
+
+	reg = REG_PVC_P0_ADDR + port * 0x100;
+	reg_read(reg, &value);
+	value &= (~REG_PVC_EG_TAG_MASK);
+	value |= ((unsigned int)eg_tag << REG_PVC_EG_TAG_OFFT);
+
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+} /*end doVlanSetEgressTagPVC*/
+
+void doArlAging(int argc, char *argv[])
+{
+	unsigned char aging_en = 0;
+	unsigned int time = 0, aging_cnt = 0, aging_unit = 0, value, reg;
+	;
+
+	aging_en = atoi(argv[3]);
+	time = atoi(argv[4]);
+	printf("aging_en: %x, aging time: %x\n", aging_en, time);
+
+	/*Check the input parameters is right or not.*/
+	if ((aging_en != 0 && aging_en != 1) || (time <= 0 || time > 65536)) {
+		printf(HELP_ARL_AGING);
+		return;
+	}
+
+	reg = 0xa0;
+	reg_read(reg, &value);
+	value &= (~(1 << 20));
+	if (!aging_en) {
+		value |= (1 << 20);
+	}
+
+	aging_unit = (time / 0x100) + 1;
+	aging_cnt = (time / aging_unit);
+	aging_unit--;
+	aging_cnt--;
+
+	value &= (0xfff00000);
+	value |= ((aging_cnt << 12) | aging_unit);
+
+	printf("aging_unit: %x, aging_cnt: %x\n", aging_unit, aging_cnt);
+	printf("write reg: %x, value: %x\n", reg, value);
+
+	reg_write(reg, value);
+}
+
+void doMirrorEn(int argc, char *argv[])
+{
+	unsigned char mirror_en;
+	unsigned char mirror_port;
+	unsigned int value, reg;
+
+	mirror_en = atoi(argv[3]);
+	mirror_port = atoi(argv[4]);
+
+	printf("mirror_en: %d, mirror_port: %d\n", mirror_en, mirror_port);
+
+	/*Check the input parameters is right or not.*/
+	if ((mirror_en > 1) || (mirror_port > REG_CFC_MIRROR_PORT_RELMASK)) {
+		printf(HELP_MIRROR_EN);
+		return;
+	}
+
+	reg = REG_CFC_ADDR;
+	reg_read(reg, &value);
+	value &= (~REG_CFC_MIRROR_EN_MASK);
+	value |= (mirror_en << REG_CFC_MIRROR_EN_OFFT);
+	value &= (~REG_CFC_MIRROR_PORT_MASK);
+	value |= (mirror_port << REG_CFC_MIRROR_PORT_OFFT);
+
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+
+} /*end doMirrorEn*/
+
+void doMirrorPortBased(int argc, char *argv[])
+{
+	unsigned char port, port_tx_mir, port_rx_mir, vlan_mis, acl_mir, igmp_mir;
+	unsigned int value, reg;
+
+	port = atoi(argv[3]);
+	port_tx_mir = atoi(argv[4]);
+	port_rx_mir = atoi(argv[5]);
+	acl_mir = atoi(argv[6]);
+	vlan_mis = atoi(argv[7]);
+	igmp_mir = atoi(argv[8]);
+
+	printf("port:%d, port_tx_mir:%d, port_rx_mir:%d, acl_mir:%d, vlan_mis:%d, igmp_mir:%d\n", port, port_tx_mir, port_rx_mir, acl_mir, vlan_mis, igmp_mir);
+
+	/*Check the input parameters is right or not.*/
+	//if((port >= vlanCap->max_port_no) || (port_tx_mir > 1) || (port_rx_mir > 1) || (acl_mir > 1) || (vlan_mis > 1)){
+	if ((port >= 7) || (port_tx_mir > 1) || (port_rx_mir > 1) || (acl_mir > 1) || (vlan_mis > 1)) { // also allow CPU port (port6)
+		printf(HELP_MIRROR_PORTBASED);
+		return;
+	}
+
+	reg = REG_PCR_P0_ADDR + port * 0x100;
+	reg_read(reg, &value);
+	value &= ~(REG_PORT_TX_MIR_MASK | REG_PORT_RX_MIR_MASK | REG_PCR_ACL_MIR_MASK | REG_PCR_VLAN_MIS_MASK);
+	value |= (port_tx_mir << REG_PORT_TX_MIR_OFFT) + (port_rx_mir << REG_PORT_RX_MIR_OFFT);
+	value |= (acl_mir << REG_PCR_ACL_MIR_OFFT) + (vlan_mis << REG_PCR_VLAN_MIS_OFFT);
+
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+
+	reg = REG_PIC_P0_ADDR + port * 0x100;
+	reg_read(reg, &value);
+	value &= ~(REG_PIC_IGMP_MIR_MASK);
+	value |= (igmp_mir << REG_PIC_IGMP_MIR_OFFT);
+
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+
+} /*end doMirrorPortBased*/
+
+void doStp(int argc, char *argv[])
+{
+	unsigned char port = 0;
+	unsigned char fid = 0;
+	unsigned char state = 0;
+	unsigned int value;
+	unsigned int reg;
+
+	port = atoi(argv[2]);
+	fid = atoi(argv[3]);
+	state = atoi(argv[4]);
+
+	printf("port: %d, fid: %d, state: %d\n", port, fid, state);
+
+	/*Check the input parameters is right or not.*/
+	if ((port > MAX_PORT + 1) || (fid > 7) || (state > 3)) {
+		printf(HELP_STP);
+		return;
+	}
+
+	reg = REG_SSC_P0_ADDR + port * 0x100;
+	reg_read(reg, &value);
+	value &= (~(3 << (fid << 2)));
+	value |= ((unsigned int)state << (fid << 2));
+
+	printf("write reg: %x, value: %x\n", reg, value);
+	reg_write(reg, value);
+}
+
+int ingress_rate_set(int on_off, unsigned int port, unsigned int bw)
+{
+	unsigned int reg, value;
+
+	reg = 0x1800 + (0x100 * port);
+	value = 0;
+	/*token-bucket*/
+	if (on_off == 1) {
+		if (chip_name == 0x7530) {
+			if (bw > 1000000) {
+				printf("\n**Charge rate(%d) is larger than line rate(1000000kbps)**\n",bw);
+				return -1;
+			}
+			value = ((bw / 32) << 16) + (1 << 15) + (7 << 8) + (1 << 7) + 0x0f;
+		} else if (chip_name == 0x7531) {
+			if (bw > 2500000) {
+				printf("\n**Charge rate(%d) is larger than line rate(2500000kbps)**\n",bw);
+				return -1;
+			}
+		        if (bw/32 >= 65536) //supoort 2.5G case
+                                value = ((bw / 32) << 16) + (1 << 15) + (1 << 14) + (1 << 12) + (7 << 8) + 0xf;
+                        else
+                                value = ((bw / 32) << 16) + (1 << 15) + (1 << 14) + (7 << 8) + 0xf;
+			}
+		else
+			printf("unknow chip\n");
+	}
+
+#if leaky_bucket
+	reg_read(reg, &value);
+	value &= 0xffff0000;
+	if (on_off == 1)
+	{
+		value |= on_off << 15;
+		//7530 same as 7531
+		if (bw < 100) {
+			value |= (0x0 << 8);
+			value |= bw;
+		} else if (bw < 1000) {
+			value |= (0x1 << 8);
+			value |= bw / 10;
+		} else if (bw < 10000) {
+			value |= (0x2 << 8);
+			value |= bw / 100;
+		} else if (bw < 100000) {
+			value |= (0x3 << 8);
+			value |= bw / 1000;
+		} else {
+			value |= (0x4 << 8);
+			value |= bw / 10000;
+		}
+	}
+#endif
+	reg_write(reg, value);
+	reg = 0x1FFC;
+	reg_read(reg, &value);
+	value = 0x110104;
+	reg_write(reg, value);
+	return 0;
+}
+
+int egress_rate_set(int on_off, int port, int bw)
+{
+	unsigned int reg, value;
+
+	reg = 0x1040 + (0x100 * port);
+	value = 0;
+	/*token-bucket*/
+	if (on_off == 1) {
+		if (chip_name == 0x7530) {
+			if (bw < 0 || bw > 1000000) {
+				printf("\n**Charge rate(%d) is larger than line rate(1000000kbps)**\n",bw);
+				return -1;
+			}
+			value = ((bw / 32) << 16) + (1 << 15) + (7 << 8) + (1 << 7) + 0xf;
+		} else if (chip_name == 0x7531) {
+			if (bw < 0 || bw > 2500000) {
+				printf("\n**Charge rate(%d) is larger than line rate(2500000kbps)**\n",bw);
+				return -1;
+			}
+		        if (bw/32 >= 65536)	//support 2.5G cases
+                                value = ((bw / 32) << 16) + (1 << 15) + (1 << 14) + (1 << 12) + (7 << 8) + 0xf;
+                        else
+                                value = ((bw / 32) << 16) + (1 << 15) + (1 << 14) + (7 << 8) + 0xf;
+		}
+		else
+			printf("unknow chip\n");
+	}
+	reg_write(reg, value);
+	reg = 0x10E0;
+	reg_read(reg, &value);
+	value &= 0x18;
+	reg_write(reg, value);
+
+	return 0;
+}
+
+void rate_control(int argc, char *argv[])
+{
+	unsigned char dir = 0;
+	unsigned char port = 0;
+	unsigned int rate = 0;
+
+	dir = atoi(argv[2]);
+	port = atoi(argv[3]);
+	rate = atoi(argv[4]);
+
+	if (port > 6)
+		return;
+
+	if (dir == 1) //ingress
+		ingress_rate_set(1, port, rate);
+	else if (dir == 0) //egress
+		egress_rate_set(1, port, rate);
+	else
+		return;
+}
+
+int collision_pool_enable(int argc, char *argv[])
+{
+
+	unsigned char enable;
+	unsigned int value, reg;
+
+	enable = atoi(argv[3]);
+
+
+	printf("collision pool enable: %d \n", enable);
+
+	/*Check the input parameters is right or not.*/
+	if (enable > 1) {
+		printf(HELP_COLLISION_POOL_EN);
+		return -1;
+	}
+
+	if (chip_name == 0x7531) {
+		reg = REG_CPGC_ADDR;
+		if(enable == 1) {
+			/* active reset */
+			reg_read(reg, &value);
+			value &= (~REG_CPCG_COL_RST_N_MASK);
+			reg_write(reg, value);
+
+			/* enanble clock */
+			reg_read(reg, &value);
+			value &= (~REG_CPCG_COL_CLK_EN_MASK);
+			value |= (1 << REG_CPCG_COL_CLK_EN_OFFT);
+			reg_write(reg, value);
+
+			/* inactive reset */
+			reg_read(reg, &value);
+			value &= (~REG_CPCG_COL_RST_N_MASK);
+			value |= (1 << REG_CPCG_COL_RST_N_OFFT);
+			reg_write(reg, value);
+
+			/* enable collision pool */
+			reg_read(reg, &value);
+			value &= (~REG_CPCG_COL_EN_MASK);
+			value |= (1 << REG_CPCG_COL_EN_OFFT);
+			reg_write(reg, value);
+
+			reg_read(reg, &value);
+			printf("write reg: %x, value: %x\n", reg, value);
+		}else {
+
+			/* disable collision pool */
+			reg_read(reg, &value);
+			value &= (~REG_CPCG_COL_EN_MASK);
+			reg_write(reg, value);
+
+			/* active reset */
+			reg_read(reg, &value);
+			value &= (~REG_CPCG_COL_RST_N_MASK);
+			reg_write(reg, value);
+
+			/* inactive reset */
+			reg_read(reg, &value);
+			value &= (~REG_CPCG_COL_RST_N_MASK);
+			value |= (1 << REG_CPCG_COL_RST_N_OFFT);
+			reg_write(reg, value);
+
+			/* disable clock */
+			reg_read(reg, &value);
+			value &= (~REG_CPCG_COL_CLK_EN_MASK);
+			reg_write(reg, value);
+
+			reg_read(reg, &value);
+			printf("write reg: %x, value: %x\n", reg, value);
+
+		}
+	}else{
+		printf("\nCommand not support by this chip.\n");
+}
+
+ return 0;
+}
+
+void collision_pool_mac_dump()
+{
+	unsigned int value, reg;
+
+	if (chip_name == 0x7531) {
+		reg = REG_CPGC_ADDR;
+		reg_read(reg, &value);
+		if(value & REG_CPCG_COL_EN_MASK)
+			table_dump_internal(COLLISION_TABLE);
+		else
+			printf("\ncollision pool is disabled, please enable it before use this command.\n");
+	}else {
+		printf("\nCommand not support by this chip.\n");
+	}
+}
+
+void collision_pool_dip_dump()
+{
+	unsigned int value, reg;
+
+	if (chip_name == 0x7531) {
+		reg = REG_CPGC_ADDR;
+		reg_read(reg, &value);
+		if(value & REG_CPCG_COL_EN_MASK)
+			dip_dump_internal(COLLISION_TABLE);
+		else
+			printf("\ncollision pool is disabled, please enable it before use this command.\n");
+		}else {
+		printf("\nCommand not support by this chip.\n");
+	}
+
+
+}
+
+void collision_pool_sip_dump()
+{
+	unsigned int value, reg;
+
+	if (chip_name == 0x7531) {
+		reg = REG_CPGC_ADDR;
+		reg_read(reg, &value);
+		if(value & REG_CPCG_COL_EN_MASK)
+			sip_dump_internal(COLLISION_TABLE);
+		else
+			printf("\ncollision pool is disabled, please enable it before use this command.\n");
+	}else {
+		printf("\nCommand not support by this chip.\n");
+	}
+
+
+}
+
+void pfc_get_rx_counter(int argc, char *argv[])
+{
+	int port;
+	unsigned int value, reg;
+	unsigned int user_pri;
+
+	port = strtoul(argv[3], NULL, 0);
+	if (port < 0 || 6 < port) {
+		printf("wrong port range, should be within 0~6\n");
+		return;
+	}
+
+	if (chip_name == 0x7531) {
+		reg= PFC_RX_COUNTER_L(port);
+		reg_read(reg, &value);
+		user_pri = value & 0xff;
+		printf("\n port %d rx pfc (up=0)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff00) >> 8;
+		printf("\n port %d rx pfc (up=1)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff0000) >> 16;
+		printf("\n port %d rx pfc (up=2)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff000000) >> 24;
+		printf("\n port %d rx pfc (up=3)pause on counter is %d.\n", port,user_pri);
+
+		reg= PFC_RX_COUNTER_H(port);
+		reg_read(reg, &value);
+		user_pri = value & 0xff;
+		printf("\n port %d rx pfc (up=4)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff00) >> 8;
+		printf("\n port %d rx pfc (up=5)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff0000) >> 16;
+		printf("\n port %d rx pfc (up=6)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff000000) >> 24;
+		printf("\n port %d rx pfc (up=7)pause on counter is %d.\n", port,user_pri);
+
+		/* for rx counter could be updated successfully */
+		reg_read(PMSR_P(port), &value);
+		reg_read(PMSR_P(port), &value);
+	}else {
+		printf("\nCommand not support by this chip.\n");
+	}
+
+}
+
+void pfc_get_tx_counter(int argc, char *argv[])
+{
+	int port;
+	unsigned int value, reg;
+	unsigned int user_pri;
+
+	port = strtoul(argv[3], NULL, 0);
+	if (port < 0 || 6 < port) {
+		printf("wrong port range, should be within 0~6\n");
+		return;
+	}
+
+	if (chip_name == 0x7531) {
+		reg= PFC_TX_COUNTER_L(port);
+		reg_read(reg, &value);
+		user_pri = value & 0xff;
+		printf("\n port %d tx pfc (up=0)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff00) >> 8;
+		printf("\n port %d tx pfc (up=1)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff0000) >> 16;
+		printf("\n port %d tx pfc (up=2)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff000000) >> 24;
+		printf("\n port %d tx pfc (up=3)pause on counter is %d.\n", port,user_pri);
+
+		reg= PFC_TX_COUNTER_H(port);
+		reg_read(reg, &value);
+		user_pri = value & 0xff;
+		printf("\n port %d tx pfc (up=4)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff00) >> 8;
+		printf("\n port %d tx pfc (up=5)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff0000) >> 16;
+		printf("\n port %d tx pfc (up=6)pause on counter is %d.\n", port,user_pri);
+		user_pri = (value & 0xff000000) >> 24;
+		printf("\n port %d tx pfc (up=7)pause on counter is %d.\n", port,user_pri);
+
+		/* for tx counter could be updated successfully */
+		reg_read(PMSR_P(port), &value);
+		reg_read(PMSR_P(port), &value);
+	}else {
+		 printf("\nCommand not support by this chip.\n");
+	}
+}
+
+void read_output_queue_counters()
+{
+	unsigned int port=0;
+	unsigned int value, output_queue;
+	unsigned int base=0x220;
+
+	for (port = 0; port < 7; port++) {
+		reg_write(0x7038, base + (port *4));
+		reg_read(0x7034, &value);
+		output_queue = value & 0xff;
+		printf("\n port %d  output queue 0 counter is %d.\n", port,output_queue);
+		output_queue = (value & 0xff00) >> 8;
+		printf("\n port %d  output queue 1 counter is %d.\n", port,output_queue);
+
+		reg_write(0x7038, base + (port *4) + 1);
+		reg_read(0x7034, &value);
+		output_queue = value & 0xff;
+		printf("\n port %d  output queue 2 counter is %d.\n", port,output_queue);
+		output_queue = (value & 0xff00) >> 8;
+		printf("\n port %d  output queue 3 counter is %d.\n", port,output_queue);
+
+		reg_write(0x7038, base + (port *4) + 2);
+		reg_read(0x7034, &value);
+		output_queue = value & 0xff;
+		printf("\n port %d  output queue 4 counter is %d.\n", port,output_queue);
+		output_queue = (value & 0xff00) >> 8;
+		printf("\n port %d  output queue 5 counter is %d.\n", port,output_queue);
+
+		reg_write(0x7038, base + (port *4) + 3);
+		reg_read(0x7034, &value);
+		output_queue = value & 0xff;
+		printf("\n port %d  output queue 6 counter is %d.\n", port,output_queue);
+		output_queue = (value & 0xff00) >> 8;
+		printf("\n port %d  output queue 7 counter is %d.\n", port,output_queue);
+	}
+}
+
+void read_free_page_counters()
+{
+	unsigned int value;
+	unsigned int free_page,free_page_last_read;
+	unsigned int fc_free_blk_lothd,fc_free_blk_hithd;
+	unsigned int fc_port_blk_thd,fc_port_blk_hi_thd;
+	unsigned int queue[8]={0};
+
+	if (chip_name == 0x7531) {
+		/* get system free page link counter*/
+		reg_read(0x1fc0, &value);
+		free_page = value & 0xFFF;
+		free_page_last_read = (value & 0xFFF0000) >> 16;
+
+		/* get system flow control waterwark */
+		reg_read(0x1fe0, &value);
+		fc_free_blk_lothd = value & 0x3FF;
+		fc_free_blk_hithd = (value & 0x3FF0000) >> 16;
+
+		/* get port flow control waterwark */
+		reg_read(0x1fe4, &value);
+		fc_port_blk_thd = value & 0x3FF;
+		fc_port_blk_hi_thd = (value & 0x3FF0000) >> 16;
+
+		/* get queue flow control waterwark */
+		reg_read(0x1fe8, &value);
+		queue[0]= value & 0x3F;
+		queue[1]= (value & 0x3F00) >> 8;
+		queue[2]= (value & 0x3F0000) >> 16;
+		queue[3]= (value & 0x3F000000) >> 24;
+		reg_read(0x1fec, &value);
+		queue[4]= value & 0x3F;
+		queue[5]= (value & 0x3F00) >> 8;
+		queue[6]= (value & 0x3F0000) >> 16;
+		queue[7]= (value & 0x3F000000) >> 24;
+	} else {
+		/* get system free page link counter*/
+		reg_read(0x1fc0, &value);
+		free_page = value & 0x3FF;
+		free_page_last_read = (value & 0x3FF0000) >> 16;
+
+		/* get system flow control waterwark */
+		reg_read(0x1fe0, &value);
+		fc_free_blk_lothd = value & 0xFF;
+		fc_free_blk_hithd = (value & 0xFF00) >> 8;
+
+		/* get port flow control waterwark */
+		reg_read(0x1fe0, &value);
+		fc_port_blk_thd = (value & 0xFF0000) >> 16;
+		reg_read(0x1ff4, &value);
+		fc_port_blk_hi_thd = (value & 0xFF00) >> 8;
+
+		/* get queue flow control waterwark */
+		reg_read(0x1fe4, &value);
+		queue[0]= value & 0xF;
+		queue[1]= (value & 0xF0) >> 4;
+		queue[2]= (value & 0xF00) >> 8;
+		queue[3]= (value & 0xF000) >>12;
+		queue[4]= (value & 0xF0000) >>16;
+		queue[5]= (value & 0xF00000) >> 20;
+		queue[6]= (value & 0xF000000) >> 24;
+		queue[7]= (value & 0xF0000000) >> 28;
+	}
+
+	printf("<===Free Page=======Current=======Last Read access=====> \n ");
+	printf("	                                                 \n ");
+	printf(" page counter      %u                %u               \n ",free_page,free_page_last_read);
+	printf("                                                        \n ");
+	printf("========================================================= \n ");
+	printf("<===Type=======High threshold======Low threshold=========\n ");
+	printf("                                                        \n ");
+	printf("  system:         %u                 %u               \n", fc_free_blk_hithd*2,  fc_free_blk_lothd*2);
+	printf("    port:         %u                 %u               \n", fc_port_blk_hi_thd*2, fc_port_blk_thd*2);
+	printf(" queue 0:         %u                 NA                \n", queue[0]);
+	printf(" queue 1:         %u                 NA                \n", queue[1]);
+	printf(" queue 2:         %u                 NA                 \n", queue[2]);
+	printf(" queue 3:         %u                 NA                \n", queue[3]);
+	printf(" queue 4:         %u                 NA                \n", queue[4]);
+	printf(" queue 5:         %u                 NA                \n", queue[5]);
+	printf(" queue 6:         %u                 NA                \n", queue[6]);
+	printf(" queue 7:         %u                 NA                \n", queue[7]);
+	printf("=========================================================\n ");
+}
+
+void eee_enable(int argc, char *argv[])
+{
+	unsigned long enable;
+	unsigned int value;
+	unsigned int eee_cap;
+	unsigned int eee_en_bitmap = 0;
+	unsigned long port_map;
+	long port_num = -1;
+	int p;
+
+	if (argc < 3)
+		goto error;
+
+	/*Check the input parameters is right or not.*/
+	if (!strncmp(argv[2], "enable", 7))
+		enable = 1;
+	else if (!strncmp(argv[2], "disable", 8))
+		enable = 0;
+	else
+		goto error;
+
+	if (argc > 3) {
+		if (strlen(argv[3]) == 1) {
+			port_num = strtol(argv[3], (char **)NULL, 10);
+			if (port_num < 0 || port_num > MAX_PHY_PORT - 1) {
+				printf("Illegal port index and port:0~4\n");
+				goto error;
+			}
+			port_map = 1 << port_num;
+		} else if (strlen(argv[3]) == 5) {
+			port_map = 0;
+			for (p = 0; p < MAX_PHY_PORT; p++) {
+				if (argv[3][p] != '0' && argv[3][p] != '1') {
+					printf("portmap format error, should be combination of 0 or 1\n");
+					goto error;
+				}
+				port_map |= ((argv[3][p] - '0') << p);
+			}
+		} else {
+			printf("port_no or portmap format error, should be length of 1 or 5\n");
+			goto error;
+		}
+	} else {
+		port_map = 0x1f;
+	}
+
+	eee_cap = (enable)? 6: 0;
+	for (p = 0; p < MAX_PHY_PORT; p++) {
+		/* port_map describe p0p1p2p3p4 from left to rignt */
+		if(!!(port_map & (1 << p)))
+			mii_mgr_c45_write(p, 0x7, 0x3c, eee_cap);
+
+		mii_mgr_c45_read(p, 0x7, 0x3c, &value);
+		/* mt7531: Always readback eee_cap = 0 when global EEE switch
+		 * is turned off.
+		 */
+		if (value | eee_cap)
+			eee_en_bitmap |= (1 << (MAX_PHY_PORT - 1 - p));
+	}
+
+	/* Turn on/off global EEE switch */
+	if (chip_name == 0x7531) {
+		mii_mgr_c45_read(0, 0x1f, 0x403, &value);
+		if (eee_en_bitmap)
+			value |= (1 << 6);
+		else
+			value &= ~(1 << 6);
+		mii_mgr_c45_write(0, 0x1f, 0x403, value);
+	} else {
+		printf("\nCommand not support by this chip.\n");
+	}
+
+	printf("EEE(802.3az) %s", (enable)? "enable": "disable");
+	if (argc == 4) {
+		if (port_num >= 0)
+			printf(" port%ld", port_num);
+		else
+			printf(" port_map: %s", argv[3]);
+	} else {
+		printf(" all ports");
+	}
+	printf("\n");
+
+	return;
+error:
+	printf(HELP_EEE_EN);
+	return;
+}
+
+void eee_dump(int argc, char *argv[])
+{
+	unsigned int cap, lp_cap;
+	long port = -1;
+	int p;
+
+	if (argc > 3) {
+		if (strlen(argv[3]) > 1) {
+			printf("port# format error, should be of length 1\n");
+			return;
+		}
+
+		port = strtol(argv[3], (char **)NULL, 0);
+		if (port < 0 || port > MAX_PHY_PORT) {
+			printf("port# format error, should be 0 to %d\n",
+				       MAX_PHY_PORT);
+			return;
+		}
+	}
+
+	for (p = 0; p < MAX_PHY_PORT; p++) {
+		if (port >= 0 && p != port)
+			continue;
+
+		mii_mgr_c45_read(p, 0x7, 0x3c, &cap);
+		mii_mgr_c45_read(p, 0x7, 0x3d, &lp_cap);
+		printf("port%d EEE cap=0x%02x, link partner EEE cap=0x%02x",
+		       p, cap, lp_cap);
+
+		if (port >= 0 && p == port) {
+			mii_mgr_c45_read(p, 0x3, 0x1, &cap);
+			printf(", st=0x%03x", cap);
+		}
+		printf("\n");
+	}
+}
+
+void dump_each_port(unsigned int base)
+{
+	unsigned int pkt_cnt = 0;
+	int i = 0;
+
+	for (i = 0; i < 7; i++) {
+		reg_read((base) + (i * 0x100), &pkt_cnt);
+		printf("%8u ", pkt_cnt);
+	}
+	printf("\n");
+}
+
+void read_mib_counters()
+{
+	printf("===================== %8s %8s %8s %8s %8s %8s %8s\n",
+	       "Port0", "Port1", "Port2", "Port3", "Port4", "Port5", "Port6");
+	printf("Tx Drop Packet      :");
+	dump_each_port(0x4000);
+	printf("Tx CRC Error        :");
+	dump_each_port(0x4004);
+	printf("Tx Unicast Packet   :");
+	dump_each_port(0x4008);
+	printf("Tx Multicast Packet :");
+	dump_each_port(0x400C);
+	printf("Tx Broadcast Packet :");
+	dump_each_port(0x4010);
+	printf("Tx Collision Event  :");
+	dump_each_port(0x4014);
+	printf("Tx Pause Packet     :");
+	dump_each_port(0x402C);
+	printf("Rx Drop Packet      :");
+	dump_each_port(0x4060);
+	printf("Rx Filtering Packet :");
+	dump_each_port(0x4064);
+	printf("Rx Unicast Packet   :");
+	dump_each_port(0x4068);
+	printf("Rx Multicast Packet :");
+	dump_each_port(0x406C);
+	printf("Rx Broadcast Packet :");
+	dump_each_port(0x4070);
+	printf("Rx Alignment Error  :");
+	dump_each_port(0x4074);
+	printf("Rx CRC Error	    :");
+	dump_each_port(0x4078);
+	printf("Rx Undersize Error  :");
+	dump_each_port(0x407C);
+	printf("Rx Fragment Error   :");
+	dump_each_port(0x4080);
+	printf("Rx Oversize Error   :");
+	dump_each_port(0x4084);
+	printf("Rx Jabber Error     :");
+	dump_each_port(0x4088);
+	printf("Rx Pause Packet     :");
+	dump_each_port(0x408C);
+}
+
+void clear_mib_counters()
+{
+	reg_write(0x4fe0, 0xf0);
+	read_mib_counters();
+	reg_write(0x4fe0, 0x800000f0);
+}
+
+
+void exit_free()
+{
+	free(attres);
+	attres = NULL;
+	switch_ioctl_fini();
+	mt753x_netlink_free();
+}
diff --git a/feed/switch/src/switch_fun.h b/feed/switch/src/switch_fun.h
new file mode 100644
index 0000000..95ff4b3
--- /dev/null
+++ b/feed/switch/src/switch_fun.h
@@ -0,0 +1,144 @@
+/*
+* switch_fun.h: switch function sets
+*/
+#ifndef SWITCH_FUN_H
+#define SWITCH_FUN_H
+
+#include <stdbool.h>
+
+#define MT7530_T10_TEST_CONTROL 0x145
+
+#define MAX_PORT 6
+#define MAX_PHY_PORT 5
+#define CONFIG_MTK_7531_DVT 1
+
+extern int chip_name;
+extern struct mt753x_attr *attres;
+extern bool nl_init_flag;
+
+/*basic operation*/
+int reg_read(unsigned int offset, unsigned int *value);
+int reg_write(unsigned int offset, unsigned int value);
+int mii_mgr_read(unsigned int port_num, unsigned int reg, unsigned int *value);
+int mii_mgr_write(unsigned int port_num, unsigned int reg, unsigned int value);
+int mii_mgr_c45_read(unsigned int port_num, unsigned int dev, unsigned int reg, unsigned int *value);
+int mii_mgr_c45_write(unsigned int port_num, unsigned int dev, unsigned int reg, unsigned int value);
+
+/*phy setting*/
+int phy_dump(int phy_addr);
+void phy_crossover(int argc, char *argv[]);
+int rw_phy_token_ring(int argc, char *argv[]);
+/*arl setting*/
+void doArlAging(int argc, char *argv[]);
+
+/*acl setting*/
+void acl_mac_add(int argc, char *argv[]);
+void acl_dip_meter(int argc, char *argv[]);
+void acl_dip_trtcm(int argc, char *argv[]);
+void acl_ethertype(int argc, char *argv[]);
+void acl_ethertype(int argc, char *argv[]);
+void acl_dip_modify(int argc, char *argv[]);
+void acl_dip_pppoe(int argc, char *argv[]);
+void acl_dip_add(int argc, char *argv[]);
+void acl_l4_add(int argc, char *argv[]);
+void acl_sp_add(int argc, char *argv[]);
+
+void acl_port_enable(int argc, char *argv[]);
+void acl_table_add(int argc, char *argv[]);
+void acl_mask_table_add(int argc, char *argv[]);
+void acl_rule_table_add(int argc, char *argv[]);
+void acl_rate_table_add(int argc, char *argv[]);
+
+/*dip table*/
+void dip_dump(void);
+void dip_add(int argc, char *argv[]);
+void dip_del(int argc, char *argv[]);
+void dip_clear(void);
+
+/*sip table*/
+void sip_dump(void);
+void sip_add(int argc, char *argv[]);
+void sip_del(int argc, char *argv[]);
+void sip_clear(void);
+
+/*stp*/
+void doStp(int argc, char *argv[]);
+
+/*mac table*/
+void table_dump(void);
+void table_add(int argc, char *argv[]);
+void table_search_mac_vid(int argc, char *argv[]);
+void table_search_mac_fid(int argc, char *argv[]);
+void table_del_fid(int argc, char *argv[]);
+void table_del_vid(int argc, char *argv[]);
+void table_clear(void);
+
+/*vlan table*/
+void vlan_dump(int argc, char *argv[]);
+void vlan_clear(int argc, char *argv[]);
+void vlan_set(int argc, char *argv[]);
+
+void doVlanSetPvid(int argc, char *argv[]);
+void doVlanSetVid(int argc, char *argv[]);
+void doVlanSetAccFrm(int argc, char *argv[]);
+void doVlanSetPortAttr(int argc, char *argv[]);
+void doVlanSetPortMode(int argc, char *argv[]);
+void doVlanSetEgressTagPCR(int argc, char *argv[]);
+void doVlanSetEgressTagPVC(int argc, char *argv[]);
+
+/*igmp function*/
+void igmp_on(int argc, char *argv[]);
+void igmp_off();
+void igmp_disable(int argc, char *argv[]);
+void igmp_enable(int argc, char *argv[]);
+
+/*mirror function*/
+void set_mirror_to(int argc, char *argv[]);
+void set_mirror_from(int argc, char *argv[]);
+void doMirrorPortBased(int argc, char *argv[]);
+void doMirrorEn(int argc, char *argv[]);
+
+/*rate control*/
+void rate_control(int argc, char *argv[]);
+int ingress_rate_set(int on_off, unsigned int port, unsigned int bw);
+int egress_rate_set(int on_off, int port, int bw);
+
+/*QoS*/
+int qos_sch_select(int argc, char *argv[]);
+void qos_set_base(int argc, char *argv[]);
+void qos_wfq_set_weight(int argc, char *argv[]);
+void qos_set_portpri(int argc, char *argv[]);
+void qos_set_dscppri(int argc, char *argv[]);
+void qos_pri_mapping_queue(int argc, char *argv[]);
+
+/*flow control*/
+int global_set_mac_fc(int argc, char *argv[]);
+int phy_set_fc(int argc, char *argv[]);
+int phy_set_an(int argc, char *argv[]);
+
+/* collision pool functions */
+int collision_pool_enable(int argc, char *argv[]);
+void collision_pool_mac_dump();
+void collision_pool_dip_dump();
+void collision_pool_sip_dump();
+
+/*pfc functions*/
+int set_mac_pfc(int argc, char *argv[]);
+void pfc_get_rx_counter(int argc, char *argv[]);
+void pfc_get_tx_counter(int argc, char *argv[]);
+
+/*switch reset*/
+void switch_reset(int argc, char *argv[]);
+
+/* EEE(802.3az) function  */
+void eee_enable(int argc, char *argv[]);
+void eee_dump(int argc, char *argv[]);
+
+void read_mib_counters();
+void clear_mib_counters();
+void read_output_queue_counters();
+void read_free_page_counters();
+
+void phy_crossover(int argc, char *argv[]);
+void exit_free();
+#endif
diff --git a/feed/switch/src/switch_ioctl.c b/feed/switch/src/switch_ioctl.c
new file mode 100644
index 0000000..082eab1
--- /dev/null
+++ b/feed/switch/src/switch_ioctl.c
@@ -0,0 +1,346 @@
+/*
+ * switch_ioctl.c: switch(ioctl) set API
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <linux/if.h>
+
+#include "switch_fun.h"
+#include "switch_ioctl.h"
+
+static int esw_fd;
+
+void switch_ioctl_init(void)
+{
+	esw_fd = socket(AF_INET, SOCK_DGRAM, 0);
+	if (esw_fd < 0) {
+		perror("socket");
+		exit(0);
+	}
+}
+
+void switch_ioctl_fini(void)
+{
+	close(esw_fd);
+}
+
+int reg_read_ioctl(unsigned int offset, unsigned int *value)
+{
+	struct ifreq ifr;
+	struct ra_mii_ioctl_data mii;
+
+	strncpy(ifr.ifr_name, ETH_DEVNAME, 5);
+	ifr.ifr_data = &mii;
+
+	mii.phy_id = 0x1f;
+	mii.reg_num = offset;
+
+	if (-1 == ioctl(esw_fd, RAETH_MII_READ, &ifr)) {
+		perror("ioctl");
+		close(esw_fd);
+		exit(0);
+	}
+	*value = mii.val_out;
+	return 0;
+}
+
+int reg_read_tr(int offset, int *value)
+{
+	struct ifreq ifr;
+	struct ra_mii_ioctl_data mii;
+
+	strncpy(ifr.ifr_name, ETH_DEVNAME, 5);
+	ifr.ifr_data = &mii;
+
+	mii.phy_id = 0;
+	mii.reg_num = offset;
+
+	if (-1 == ioctl(esw_fd, RAETH_MII_READ, &ifr)) {
+		perror("ioctl");
+		close(esw_fd);
+		exit(0);
+	}
+	*value = mii.val_out;
+	return 0;
+}
+
+int reg_write_ioctl(unsigned int offset, unsigned int value)
+{
+	struct ifreq ifr;
+	struct ra_mii_ioctl_data mii;
+
+	strncpy(ifr.ifr_name, ETH_DEVNAME, 5);
+	ifr.ifr_data = &mii;
+
+	mii.phy_id = 0x1f;
+	mii.reg_num = offset;
+	mii.val_in = value;
+
+	if (-1 == ioctl(esw_fd, RAETH_MII_WRITE, &ifr)) {
+		perror("ioctl");
+		close(esw_fd);
+		exit(0);
+	}
+	return 0;
+}
+
+int reg_write_tr(int offset, int value)
+{
+	struct ifreq ifr;
+	struct ra_mii_ioctl_data mii;
+
+	strncpy(ifr.ifr_name, ETH_DEVNAME, 5);
+	ifr.ifr_data = &mii;
+
+	mii.phy_id = 0;
+	mii.reg_num = offset;
+	mii.val_in = value;
+
+	if (-1 == ioctl(esw_fd, RAETH_MII_WRITE, &ifr)) {
+		perror("ioctl");
+		close(esw_fd);
+		exit(0);
+	}
+	return 0;
+}
+
+int phy_dump_ioctl(unsigned int phy_addr)
+{
+	struct ifreq ifr;
+	struct esw_reg reg;
+
+	reg.val = phy_addr;
+	strncpy(ifr.ifr_name, ETH_DEVNAME, 5);
+	ifr.ifr_data = &reg;
+	if (-1 == ioctl(esw_fd, RAETH_ESW_PHY_DUMP, &ifr)) {
+		perror("ioctl");
+		close(esw_fd);
+		exit(0);
+	}
+	return 0;
+}
+
+int mii_mgr_cl22_read_ioctl(unsigned int port_num, unsigned int reg, unsigned int *value)
+{
+	unsigned int reg_value;
+	int loop_cnt;
+	int op_busy;
+
+	loop_cnt = 0;
+
+	/*Change to indirect access mode*/
+	/*if you need to use direct access mode, please change back manually by reset bit5*/
+	reg_read(0x7804, &reg_value);
+	if (((reg_value >> 5) & 0x1) == 0) {
+		reg_value |= 1 << 5;
+		reg_write(0x7804, reg_value);
+		printf("Change to indirect access mode:0x%x\n", reg_value);
+	}
+	reg_value = 0x80090000 | (port_num << 20) | (reg << 25);
+	reg_write(0x701c, reg_value);
+	while (1)
+	{
+		reg_read(0x701c, &reg_value);
+		op_busy = reg_value & (1 << 31);
+		if (!op_busy) {
+			reg_value = reg_value & 0xFFFF;
+			break;
+		} else if (loop_cnt < 10)
+			loop_cnt++;
+		else {
+			printf("MDIO read opeartion timeout\n");
+			reg_value = 0;
+			break;
+		}
+	}
+	printf(" PHY Indirect Access Control(0x701c) register read value =0x%x  \n", reg_value);
+	*value = reg_value;
+
+	return 0;
+}
+
+int mii_mgr_cl22_write_ioctl(unsigned int port_num, unsigned int reg, unsigned int value)
+{
+	unsigned int reg_value;
+	int loop_cnt;
+	int op_busy;
+
+	loop_cnt = 0;
+	/*Change to indirect access mode*/
+	/*if you need to use direct access mode, please change back manually by reset bit5*/
+	reg_read(0x7804, &reg_value);
+	if (((reg_value >> 5) & 0x1) == 0) {
+		reg_value |= 1 << 5;
+		reg_write(0x7804, reg_value);
+		printf("Change to indirect access mode:0x%x\n", reg_value);
+	}
+
+	reg_value = 0x80050000 | (port_num << 20) | (reg << 25) | value;
+	reg_write(0x701c, reg_value);
+	while (1)
+	{
+		reg_read(0x701c, &reg_value);
+		op_busy = reg_value & (1 << 31);
+		if (!op_busy)
+			break;
+		else if (loop_cnt < 10)
+			loop_cnt++;
+		else {
+			printf("MDIO write opeartion timeout\n");
+			break;
+		}
+	}
+
+	printf(" PHY Indirect Access Control(0x701c) register write value =0x%x  \n", reg_value);
+
+	return 0;
+}
+
+int mii_mgr_cl45_read_ioctl(unsigned int port_num, unsigned int dev,
+			    unsigned int reg, unsigned int *value)
+{
+	int sk, method, ret;
+	struct ifreq ifr;
+	struct ra_mii_ioctl_data mii;
+
+	if (!value)
+		return -1;
+
+	sk = socket(AF_INET, SOCK_DGRAM, 0);
+	if (sk < 0) {
+		printf("Open socket failed\n");
+
+		return -1;
+	}
+
+	strncpy(ifr.ifr_name, ETH_DEVNAME, 5);
+	ifr.ifr_data = &mii;
+
+	method = RAETH_MII_WRITE;
+	mii.phy_id = port_num;
+	mii.reg_num = 13;
+	mii.val_in = dev;
+	ret = ioctl(sk, method, &ifr);
+
+	method = RAETH_MII_WRITE;
+	mii.phy_id = port_num;
+	mii.reg_num = 14;
+	mii.val_in = reg;
+	ret = ioctl(sk, method, &ifr);
+
+	method = RAETH_MII_WRITE;
+	mii.phy_id = port_num;
+	mii.reg_num = 13;
+	mii.val_in = (0x6000 | dev);
+	ret = ioctl(sk, method, &ifr);
+
+	usleep(1000);
+
+	method = RAETH_MII_READ;
+	mii.phy_id = port_num;
+	mii.reg_num = 14;
+	ret = ioctl(sk, method, &ifr);
+
+	close(sk);
+	*value = mii.val_out;
+
+	return ret;
+}
+
+int mii_mgr_cl45_write_ioctl(unsigned int port_num, unsigned int dev,
+			     unsigned int reg, unsigned int value)
+{
+	int sk, method, ret;
+	struct ifreq ifr;
+	struct ra_mii_ioctl_data mii;
+
+	sk = socket(AF_INET, SOCK_DGRAM, 0);
+	if (sk < 0) {
+		printf("Open socket failed\n");
+
+		return -1;
+	}
+
+	strncpy(ifr.ifr_name, ETH_DEVNAME, 5);
+	ifr.ifr_data = &mii;
+
+	method = RAETH_MII_WRITE;
+	mii.phy_id = port_num;
+	mii.reg_num = 13;
+	mii.val_in = dev;
+	ret = ioctl(sk, method, &ifr);
+
+	method = RAETH_MII_WRITE;
+	mii.phy_id = port_num;
+	mii.reg_num = 14;
+	mii.val_in = reg;
+	ret = ioctl(sk, method, &ifr);
+
+	method = RAETH_MII_WRITE;
+	mii.phy_id = port_num;
+	mii.reg_num = 13;
+	mii.val_in = (0x6000 | dev);
+	ret = ioctl(sk, method, &ifr);
+
+	usleep(1000);
+
+	method = RAETH_MII_WRITE;
+	mii.phy_id = port_num;
+	mii.reg_num = 14;
+	mii.val_in = value;
+	ret = ioctl(sk, method, &ifr);
+
+	close(sk);
+
+	return ret;
+}
+
+int dump_gphy(void)
+{
+	int cl22_reg[6] = {0x00, 0x01, 0x04, 0x05, 0x09, 0x0A};
+	int cl45_start_reg = 0x9B;
+	int cl45_end_reg = 0xA2;
+	unsigned int value;
+	int port_num = 5;
+	int i, j, ret;
+
+	int sk, method;
+	struct ifreq ifr;
+	struct ra_mii_ioctl_data mii;
+
+	sk = socket(AF_INET, SOCK_DGRAM, 0);
+	if (sk < 0) {
+		printf("Open socket failed\n");
+		return -1;
+	}
+
+	strncpy(ifr.ifr_name, ETH_DEVNAME, 5);
+	ifr.ifr_data = &mii;
+	/* dump CL45 reg first*/
+	for (i = 0; i < port_num; i++) {
+		printf("== Port %d ==\n", i);
+		for (j = cl45_start_reg; j < (cl45_end_reg + 1); j++) {
+			ret = mii_mgr_cl45_read_ioctl(i, 0x1E, j, &value);
+			if (ret)
+				continue;
+			printf("dev1Eh_reg%xh = 0x%x\n", j, value);
+		}
+	}
+	printf("== Global ==\n");
+	for (i = 0; i < sizeof(cl22_reg) / sizeof(cl22_reg[0]); i++) {
+		method = RAETH_MII_READ;
+		mii.phy_id = 0;
+		mii.reg_num = cl22_reg[i];
+		ret = ioctl(sk, method, &ifr);
+		printf("Reg%xh = 0x%x\n", cl22_reg[i], mii.val_out);
+	}
+
+	close(sk);
+
+	return ret;
+}
diff --git a/feed/switch/src/switch_ioctl.h b/feed/switch/src/switch_ioctl.h
new file mode 100644
index 0000000..97946af
--- /dev/null
+++ b/feed/switch/src/switch_ioctl.h
@@ -0,0 +1,68 @@
+/*
+ * switch_ioctl.h: switch(ioctl) set API
+ */
+
+#ifndef SWITCH_IOCTL_H
+#define SWITCH_IOCTL_H
+
+#define ETH_DEVNAME "eth0"
+#define BR_DEVNAME "br-lan"
+
+#define RAETH_MII_READ                  0x89F3
+#define RAETH_MII_WRITE                 0x89F4
+#define RAETH_ESW_PHY_DUMP              0x89F7
+
+struct esw_reg {
+        unsigned int off;
+        unsigned int val;
+};
+
+struct ra_mii_ioctl_data {
+        __u32 phy_id;
+        __u32 reg_num;
+        __u32 val_in;
+        __u32 val_out;
+        __u32 port_num;
+        __u32 dev_addr;
+        __u32 reg_addr;
+};
+
+struct ra_switch_ioctl_data {
+        unsigned int cmd;
+        unsigned int on_off;
+        unsigned int port;
+        unsigned int bw;
+        unsigned int vid;
+        unsigned int fid;
+        unsigned int port_map;
+        unsigned int rx_port_map;
+        unsigned int tx_port_map;
+        unsigned int igmp_query_interval;
+        unsigned int reg_addr;
+        unsigned int reg_val;
+        unsigned int mode;
+        unsigned int qos_queue_num;
+        unsigned int qos_type;
+        unsigned int qos_pri;
+        unsigned int qos_dscp;
+        unsigned int qos_table_idx;
+        unsigned int qos_weight;
+        unsigned char mac[6];
+};
+
+extern int chip_name;
+
+void switch_ioctl_init(void);
+void switch_ioctl_fini(void);
+int reg_read_ioctl(unsigned int offset, unsigned int *value);
+int reg_write_ioctl(unsigned int offset, unsigned int value);
+int phy_dump_ioctl(unsigned int phy_addr);
+int mii_mgr_cl22_read_ioctl(unsigned int port_num, unsigned int reg,
+			    unsigned int *value);
+int mii_mgr_cl22_write_ioctl(unsigned int port_num, unsigned int reg,
+			     unsigned int value);
+int mii_mgr_cl45_read_ioctl(unsigned int port_num, unsigned int dev,
+			    unsigned int reg, unsigned int *value);
+int mii_mgr_cl45_write_ioctl(unsigned int port_num, unsigned int dev,
+			     unsigned int reg, unsigned int value);
+#endif
diff --git a/feed/switch/src/switch_netlink.c b/feed/switch/src/switch_netlink.c
new file mode 100644
index 0000000..90a4a19
--- /dev/null
+++ b/feed/switch/src/switch_netlink.c
@@ -0,0 +1,445 @@
+/*
+ * switch_netlink.c: switch(netlink) set API
+ *
+ * Author: Sirui Zhao <Sirui.Zhao@mediatek.com>
+ */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netlink/netlink.h>
+#include <netlink/genl/genl.h>
+#include <netlink/genl/family.h>
+#include <netlink/genl/ctrl.h>
+
+#include "switch_netlink.h"
+
+static struct nl_sock *user_sock;
+static struct nl_cache *cache;
+static struct genl_family *family;
+static struct nlattr *attrs[MT753X_ATTR_TYPE_MAX + 1];
+
+static int wait_handler(struct nl_msg *msg, void *arg)
+{
+	int *finished = arg;
+
+	*finished = 1;
+	return NL_STOP;
+}
+
+static int list_swdevs(struct nl_msg *msg, void *arg)
+{
+	struct mt753x_attr *val = arg;
+	struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg));
+
+	if (nla_parse(attrs, MT753X_ATTR_TYPE_MAX, genlmsg_attrdata(gnlh, 0),
+		      genlmsg_attrlen(gnlh, 0), NULL) < 0)
+		goto done;
+
+	if (gnlh->cmd == MT753X_CMD_REPLY) {
+		if (attrs[MT753X_ATTR_TYPE_MESG]) {
+			val->dev_info =
+				nla_get_string(attrs[MT753X_ATTR_TYPE_MESG]);
+			printf("register switch dev:\n%s", val->dev_info);
+		}
+		else {
+			fprintf(stderr, "ERROR:No switch dev now\n");
+			goto done;
+		}
+	} else
+		goto done;
+	return 0;
+done:
+	return NL_SKIP;
+}
+
+static int construct_attrs(struct nl_msg *msg, void *arg)
+{
+	struct mt753x_attr *val = arg;
+	int type = val->type;
+
+	if (val->dev_id > -1)
+		NLA_PUT_U32(msg, MT753X_ATTR_TYPE_DEV_ID, val->dev_id);
+
+	if (val->op == 'r') {
+		if (val->phy_dev != -1)
+			NLA_PUT_U32(msg, MT753X_ATTR_TYPE_PHY_DEV, val->phy_dev);
+		if (val->port_num >= 0)
+			NLA_PUT_U32(msg, MT753X_ATTR_TYPE_PHY, val->port_num);
+		NLA_PUT_U32(msg, type, val->reg);
+	} else if (val->op == 'w') {
+		if (val->phy_dev != -1)
+			NLA_PUT_U32(msg, MT753X_ATTR_TYPE_PHY_DEV, val->phy_dev);
+		if (val->port_num >= 0)
+			NLA_PUT_U32(msg, MT753X_ATTR_TYPE_PHY, val->port_num);
+		NLA_PUT_U32(msg, type, val->reg);
+		NLA_PUT_U32(msg, MT753X_ATTR_TYPE_VAL, val->value);
+	} else {
+		printf("construct_attrs_message\n");
+		NLA_PUT_STRING(msg, type, "hello");
+	}
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static int spilt_attrs(struct nl_msg *msg, void *arg)
+{
+	struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg));
+	struct mt753x_attr *val = arg;
+	char *str;
+
+	if (nla_parse(attrs, MT753X_ATTR_TYPE_MAX, genlmsg_attrdata(gnlh, 0),
+		      genlmsg_attrlen(gnlh, 0), NULL) < 0)
+		goto done;
+
+	if ((gnlh->cmd == MT753X_CMD_WRITE) || (gnlh->cmd == MT753X_CMD_READ)) {
+		if (attrs[MT753X_ATTR_TYPE_MESG]) {
+			str = nla_get_string(attrs[MT753X_ATTR_TYPE_MESG]);
+			printf(" %s\n", str);
+			if (!strncmp(str, "No", 2))
+				goto done;
+		}
+		if (attrs[MT753X_ATTR_TYPE_REG]) {
+			val->reg =
+			    nla_get_u32(attrs[MT753X_ATTR_TYPE_REG]);
+		}
+		if (attrs[MT753X_ATTR_TYPE_VAL]) {
+			val->value =
+			    nla_get_u32(attrs[MT753X_ATTR_TYPE_VAL]);
+		}
+	}
+	else
+		goto done;
+
+	return 0;
+done:
+	return NL_SKIP;
+}
+
+static int mt753x_request_callback(int cmd, int (*spilt)(struct nl_msg *, void *),
+				   int (*construct)(struct nl_msg *, void *),
+				   void *arg)
+{
+	struct nl_msg *msg;
+	struct nl_cb *callback = NULL;
+	int finished;
+	int flags = 0;
+	int err;
+
+	/*Allocate an netllink message buffer*/
+	msg = nlmsg_alloc();
+	if (!msg) {
+		fprintf(stderr, "Failed to allocate netlink message\n");
+		exit(1);
+	}
+	if (!construct) {
+		if (cmd == MT753X_CMD_REQUEST)
+			flags |= NLM_F_REQUEST;
+		else
+			flags |= NLM_F_DUMP;
+	}
+	genlmsg_put(msg, NL_AUTO_PID, NL_AUTO_SEQ, genl_family_get_id(family),
+		    0, flags, cmd, 0);
+
+	/*Fill attaribute of netlink message by construct function*/
+	if (construct) {
+		err = construct(msg, arg);
+		if (err < 0) {
+			fprintf(stderr, "attributes error\n");
+			goto nal_put_failure;
+		}
+	}
+
+	/*Allocate an new callback handler*/
+	callback = nl_cb_alloc(NL_CB_CUSTOM);
+	if (!callback) {
+		fprintf(stderr, "Failed to allocate callback handler\n");
+		exit(1);
+	}
+
+	/*Send netlink message*/
+	err = nl_send_auto_complete(user_sock, msg);
+	if (err < 0) {
+		fprintf(stderr, "nl_send_auto_complete failied:%d\n", err);
+		goto out;
+	}
+	finished = 0;
+	if (spilt)
+		nl_cb_set(callback, NL_CB_VALID, NL_CB_CUSTOM, spilt, arg);
+
+	if (construct)
+		nl_cb_set(callback, NL_CB_ACK, NL_CB_CUSTOM, wait_handler,
+			  &finished);
+	else
+		nl_cb_set(callback, NL_CB_FINISH, NL_CB_CUSTOM, wait_handler,
+			  &finished);
+
+	/*receive message from kernel request*/
+	err = nl_recvmsgs(user_sock, callback);
+	if (err < 0)
+		goto out;
+
+	/*wait until an ACK is received for the latest not yet acknowledge*/
+	if (!finished)
+		err = nl_wait_for_ack(user_sock);
+out:
+	if (callback)
+		nl_cb_put(callback);
+
+nal_put_failure:
+	nlmsg_free(msg);
+	return err;
+}
+
+void mt753x_netlink_free(void)
+{
+	if (family)
+		nl_object_put((struct nl_object *)family);
+	if (cache)
+		nl_cache_free(cache);
+	if (user_sock)
+		nl_socket_free(user_sock);
+	user_sock = NULL;
+	cache = NULL;
+	family = NULL;
+}
+
+int mt753x_netlink_init(const char *name)
+{
+	int ret;
+
+	user_sock = NULL;
+	cache = NULL;
+	family = NULL;
+
+	/*Allocate an new netlink socket*/
+	user_sock = nl_socket_alloc();
+	if (!user_sock) {
+		fprintf(stderr, "Failed to create user socket\n");
+		goto err;
+	}
+	/*Connetct the genl controller*/
+	if (genl_connect(user_sock)) {
+		fprintf(stderr, "Failed to connetct to generic netlink\n");
+		goto err;
+	}
+	/*Allocate an new nl_cache*/
+	ret = genl_ctrl_alloc_cache(user_sock, &cache);
+	if (ret < 0) {
+		fprintf(stderr, "Failed to allocate netlink cache\n");
+		goto err;
+	}
+
+	if (name == NULL)
+		return -EINVAL;
+
+	/*Look up generic netlik family by "mt753x" in the provided cache*/
+	family = genl_ctrl_search_by_name(cache, name);
+	if (!family) {
+		//fprintf(stderr,"switch(mt753x) API not be prepared\n");
+		goto err;
+	}
+	return 0;
+err:
+	mt753x_netlink_free();
+	return -EINVAL;
+}
+
+void mt753x_list_swdev(struct mt753x_attr *arg, int cmd)
+{
+	int err;
+
+	err = mt753x_request_callback(cmd, list_swdevs, NULL, arg);
+	if (err < 0)
+		fprintf(stderr, "mt753x list dev error\n");
+}
+
+static int mt753x_request(struct mt753x_attr *arg, int cmd)
+{
+	int err;
+
+	err = mt753x_request_callback(cmd, spilt_attrs, construct_attrs, arg);
+	if (err < 0) {
+		fprintf(stderr, "mt753x deal request error\n");
+		return err;
+	}
+	return 0;
+}
+
+static int phy_operate_netlink(char op, struct mt753x_attr *arg,
+			       unsigned int port_num, unsigned int phy_dev,
+			       unsigned int offset, unsigned int *value)
+{
+	int ret = 0;
+	struct mt753x_attr *attr = arg;
+
+	attr->port_num = port_num;
+	attr->phy_dev = phy_dev;
+	attr->reg = offset;
+	attr->value = -1;
+	attr->type = MT753X_ATTR_TYPE_REG;
+
+	switch (op)
+	{
+		case 'r':
+			attr->op = 'r';
+			ret = mt753x_request(attr, MT753X_CMD_READ);
+			*value = attr->value;
+			break;
+		case 'w':
+			attr->op = 'w';
+			attr->value = *value;
+			ret = mt753x_request(attr, MT753X_CMD_WRITE);
+			break;
+		default:
+			break;
+	}
+
+	return ret;
+}
+
+int reg_read_netlink(struct mt753x_attr *arg, unsigned int offset,
+		     unsigned int *value)
+{
+	int ret;
+
+	ret = phy_operate_netlink('r', arg, -1, -1, offset, value);
+	return ret;
+}
+
+int reg_write_netlink(struct mt753x_attr *arg, unsigned int offset,
+		      unsigned int value)
+{
+	int ret;
+
+	ret = phy_operate_netlink('w', arg, -1, -1, offset, &value);
+	return ret;
+}
+
+int phy_cl22_read_netlink(struct mt753x_attr *arg, unsigned int port_num,
+			  unsigned int phy_addr, unsigned int *value)
+{
+	int ret;
+
+	ret = phy_operate_netlink('r', arg, port_num, -1, phy_addr, value);
+	return ret;
+}
+
+int phy_cl22_write_netlink(struct mt753x_attr *arg, unsigned int port_num,
+			   unsigned int phy_addr, unsigned int value)
+{
+	int ret;
+
+	ret = phy_operate_netlink('w', arg, port_num, -1, phy_addr, &value);
+	return ret;
+}
+
+int phy_cl45_read_netlink(struct mt753x_attr *arg, unsigned int port_num,
+			  unsigned int phy_dev, unsigned int phy_addr,
+			  unsigned int *value)
+{
+	int ret;
+
+	ret = phy_operate_netlink('r', arg, port_num, phy_dev, phy_addr, value);
+	return ret;
+}
+
+int phy_cl45_write_netlink(struct mt753x_attr *arg, unsigned int port_num,
+			   unsigned int phy_dev, unsigned int phy_addr,
+			   unsigned int value)
+{
+	int ret;
+
+	ret = phy_operate_netlink('w', arg, port_num, phy_dev, phy_addr, &value);
+	return ret;
+}
+
+void dump_extend_phy_reg(struct mt753x_attr *arg, int port_no, int from,
+			int to, int is_local, int page_no)
+{
+        unsigned int temp = 0;
+        int r31 = 0;
+        int i = 0;
+
+        if (is_local == 0) {
+            printf("\n\nGlobal Register Page %d\n",page_no);
+            printf("===============");
+            r31 |= 0 << 15; //global
+            r31 |= ((page_no&0x7) << 12); //page no
+            phy_cl22_write_netlink(arg, port_no, 31, r31); //select global page x
+            for (i = 16; i < 32; i++) {
+                if(i%8 == 0)
+                    printf("\n");
+		phy_cl22_read_netlink(arg, port_no, i, &temp);
+                printf("%02d: %04X ", i, temp);
+            }
+        } else {
+            printf("\n\nLocal Register Port %d Page %d\n",port_no, page_no);
+            printf("===============");
+            r31 |= 1 << 15; //local
+            r31 |= ((page_no&0x7) << 12); //page no
+            phy_cl22_write_netlink(arg, port_no, 31, r31); //select global page x
+            for (i = 16; i < 32; i++) {
+                if (i%8 == 0) {
+                    printf("\n");
+                }
+		phy_cl22_read_netlink(arg, port_no, i, &temp);
+                printf("%02d: %04X ",i, temp);
+            }
+        }
+        printf("\n");
+}
+
+int phy_dump_netlink(struct mt753x_attr *arg, int phy_addr)
+{
+	int i;
+	int ret;
+	unsigned int offset, value;
+
+	if (phy_addr == 32) {
+		/*dump all phy register*/
+		for (i = 0; i < 5; i++) {
+			printf("\n[Port %d]=============", i);
+			for (offset = 0; offset < 16; offset++) {
+				if (offset % 8 == 0)
+					printf("\n");
+				ret = phy_cl22_read_netlink(arg, i, offset, &value);
+				printf("%02d: %04X ", offset, value);
+			}
+		}
+	} else {
+		printf("\n[Port %d]=============", phy_addr);
+		for (offset = 0; offset < 16; offset++) {
+			if (offset % 8 == 0)
+				printf("\n");
+			ret = phy_cl22_read_netlink(arg, phy_addr, offset, &value);
+			printf("%02d: %04X ", offset, value);
+		}
+	}
+	printf("\n");
+	for (offset = 0; offset < 5; offset++) { //global register  page 0~4
+		if (phy_addr == 32) //dump all phy register
+			dump_extend_phy_reg(arg, 0, 16, 31, 0, offset);
+		else
+			dump_extend_phy_reg(arg, phy_addr, 16, 31, 0, offset);
+	}
+
+	if (phy_addr == 32) {	//dump all phy register
+		for (offset = 0; offset < 5; offset++) { //local register port 0-port4
+			dump_extend_phy_reg(arg, offset, 16, 31, 1, 0); //dump local page 0
+			dump_extend_phy_reg(arg, offset, 16, 31, 1, 1); //dump local page 1
+			dump_extend_phy_reg(arg, offset, 16, 31, 1, 2); //dump local page 2
+			dump_extend_phy_reg(arg, offset, 16, 31, 1, 3); //dump local page 3
+		}
+	} else {
+		dump_extend_phy_reg(arg, phy_addr, 16, 31, 1, 0); //dump local page 0
+		dump_extend_phy_reg(arg, phy_addr, 16, 31, 1, 1); //dump local page 1
+		dump_extend_phy_reg(arg, phy_addr, 16, 31, 1, 2); //dump local page 2
+		dump_extend_phy_reg(arg, phy_addr, 16, 31, 1, 3); //dump local page 3
+	}
+	return ret;
+}
diff --git a/feed/switch/src/switch_netlink.h b/feed/switch/src/switch_netlink.h
new file mode 100644
index 0000000..b3f946e
--- /dev/null
+++ b/feed/switch/src/switch_netlink.h
@@ -0,0 +1,70 @@
+/*
+ * switch_netlink.h: switch(netlink) set API
+ * 
+ * Author: Sirui Zhao <Sirui.Zhao@mediatek.com>
+ */
+#ifndef MT753X_NETLINK_H
+#define MT753X_NETLINK_H
+
+#define MT753X_GENL_NAME "mt753x"
+#define MT753X_DSA_GENL_NAME "mt753x_dsa"
+#define MT753X_GENL_VERSION 0X1
+
+/*add your cmd to here*/
+enum {
+	MT753X_CMD_UNSPEC = 0, /*Reserved*/
+	MT753X_CMD_REQUEST,    /*user->kernelrequest/get-response*/
+	MT753X_CMD_REPLY,      /*kernel->user event*/
+	MT753X_CMD_READ,
+	MT753X_CMD_WRITE,
+	__MT753X_CMD_MAX,
+};
+#define MT753X_CMD_MAX (__MT753X_CMD_MAX - 1)
+
+/*define attar types */
+enum
+{
+	MT753X_ATTR_TYPE_UNSPEC = 0,
+	MT753X_ATTR_TYPE_MESG, /*MT753X message*/
+	MT753X_ATTR_TYPE_PHY,
+	MT753X_ATTR_TYPE_PHY_DEV,
+	MT753X_ATTR_TYPE_REG,
+	MT753X_ATTR_TYPE_VAL,
+	MT753X_ATTR_TYPE_DEV_NAME,
+	MT753X_ATTR_TYPE_DEV_ID,
+	__MT753X_ATTR_TYPE_MAX,
+};
+#define MT753X_ATTR_TYPE_MAX (__MT753X_ATTR_TYPE_MAX - 1)
+
+struct mt753x_attr {
+	int port_num;
+	int phy_dev;
+	int reg;
+	int value;
+	int type;
+	char op;
+	char *dev_info;
+	int dev_name;
+	int dev_id;
+};
+
+int mt753x_netlink_init(const char *name);
+void mt753x_netlink_free(void);
+void mt753x_list_swdev(struct mt753x_attr *arg, int cmd);
+int reg_read_netlink(struct mt753x_attr *arg, unsigned int offset,
+		     unsigned int *value);
+int reg_write_netlink(struct mt753x_attr *arg, unsigned int offset,
+		      unsigned int value);
+int phy_cl22_read_netlink(struct mt753x_attr *arg, unsigned int port_num,
+			  unsigned int phy_addr, unsigned int *value);
+int phy_cl22_write_netlink(struct mt753x_attr *arg, unsigned int port_num,
+			   unsigned int phy_addr, unsigned int value);
+int phy_cl45_read_netlink(struct mt753x_attr *arg, unsigned int port_num,
+			  unsigned int phy_dev, unsigned int phy_addr,
+			  unsigned int *value);
+int phy_cl45_write_netlink(struct mt753x_attr *arg, unsigned int port_num,
+			   unsigned int phy_dev, unsigned int phy_addr,
+			   unsigned int value);
+int phy_dump_netlink(struct mt753x_attr *arg, int phy_addr);
+
+#endif
diff --git a/openwrt_patches-21.02/001-target-mediatek-add-mt7986-subtarget.patch b/openwrt_patches-21.02/001-target-mediatek-add-mt7986-subtarget.patch
new file mode 100644
index 0000000..f300666
--- /dev/null
+++ b/openwrt_patches-21.02/001-target-mediatek-add-mt7986-subtarget.patch
@@ -0,0 +1,13 @@
+diff --git a/target/linux/mediatek/Makefile b/target/linux/mediatek/Makefile
+index c8ab5e0..01e993d 100644
+--- a/target/linux/mediatek/Makefile
++++ b/target/linux/mediatek/Makefile
+@@ -5,7 +5,7 @@ include $(TOPDIR)/rules.mk
+ ARCH:=arm
+ BOARD:=mediatek
+ BOARDNAME:=MediaTek Ralink ARM
+-SUBTARGETS:=mt7622 mt7623 mt7629
++SUBTARGETS:=mt7622 mt7623 mt7629 mt7986
+ FEATURES:=squashfs nand ramdisk fpu
+ 
+ KERNEL_PATCHVER:=5.4
diff --git a/openwrt_patches-21.02/101-fstool-add-mtk-patches.patch b/openwrt_patches-21.02/101-fstool-add-mtk-patches.patch
new file mode 100644
index 0000000..5afc342
--- /dev/null
+++ b/openwrt_patches-21.02/101-fstool-add-mtk-patches.patch
@@ -0,0 +1,18 @@
+diff -urN a/package/system/fstools/patches/0101-jffs2-mount-on-mtk-flash-workaround.patch b/package/system/fstools/patches/0101-jffs2-mount-on-mtk-flash-workaround.patch
+--- a/package/system/fstools/patches/0101-jffs2-mount-on-mtk-flash-workaround.patch	1970-01-01 08:00:00.000000000 +0800
++++ b/package/system/fstools/patches/0101-jffs2-mount-on-mtk-flash-workaround.patch	2020-07-30 18:16:13.178070668 +0800
+@@ -0,0 +1,14 @@
++Index: fstools-2016-12-04-84b530a7/libfstools/mtd.c
++===================================================================
++--- fstools-2016-12-04-84b530a7.orig/libfstools/mtd.c	2017-08-29 15:00:46.824333000 +0800
+++++ fstools-2016-12-04-84b530a7/libfstools/mtd.c	2017-08-29 15:02:52.848520000 +0800
++@@ -218,6 +218,9 @@
++ 	if (v->type == UBIVOLUME && deadc0de == 0xffffffff) {
++ 		return FS_JFFS2;
++ 	}
+++	if (v->type == NANDFLASH && deadc0de == 0xffffffff) {
+++		return FS_JFFS2;
+++	}
++ 
++ 	return FS_NONE;
++ }
diff --git a/openwrt_patches-21.02/103-generic-kernel-config-for-mtk-snand-driver.patch b/openwrt_patches-21.02/103-generic-kernel-config-for-mtk-snand-driver.patch
new file mode 100644
index 0000000..4df19fd
--- /dev/null
+++ b/openwrt_patches-21.02/103-generic-kernel-config-for-mtk-snand-driver.patch
@@ -0,0 +1,10 @@
+--- a/target/linux/generic/config-5.4
++++ b/target/linux/generic/config-5.4
+@@ -3273,6 +3273,7 @@ CONFIG_MTD_SPLIT_SUPPORT=y
+ # CONFIG_MTD_UIMAGE_SPLIT is not set
+ # CONFIG_MTD_VIRT_CONCAT is not set
+ # CONFIG_MTK_MMC is not set
++# CONFIG_MTK_SPI_NAND is not set
+ CONFIG_MULTIUSER=y
+ # CONFIG_MUTEX_SPIN_ON_OWNER is not set
+ # CONFIG_MV643XX_ETH is not set
diff --git a/openwrt_patches-21.02/104-enable-mtk-snand-for-mt7622.patch b/openwrt_patches-21.02/104-enable-mtk-snand-for-mt7622.patch
new file mode 100644
index 0000000..ee5059c
--- /dev/null
+++ b/openwrt_patches-21.02/104-enable-mtk-snand-for-mt7622.patch
@@ -0,0 +1,10 @@
+--- a/target/linux/mediatek/mt7622/config-5.4
++++ b/target/linux/mediatek/mt7622/config-5.4
+@@ -414,6 +414,7 @@ CONFIG_MTK_SCPSYS=y
+ CONFIG_MTK_THERMAL=y
+ CONFIG_MTK_TIMER=y
+ # CONFIG_MTK_UART_APDMA is not set
++CONFIG_MTK_SPI_NAND=y
+ CONFIG_MUTEX_SPIN_ON_OWNER=y
+ CONFIG_NEED_DMA_MAP_STATE=y
+ CONFIG_NEED_SG_DMA_LENGTH=y
diff --git a/openwrt_patches-21.02/105-enable-dm-verity-for-mt7622.patch b/openwrt_patches-21.02/105-enable-dm-verity-for-mt7622.patch
new file mode 100644
index 0000000..c4386ba
--- /dev/null
+++ b/openwrt_patches-21.02/105-enable-dm-verity-for-mt7622.patch
@@ -0,0 +1,38 @@
+diff --git a/target/linux/mediatek/mt7622/config-5.4 b/target/linux/mediatek/mt7622/config-5.4
+index edecba3..c95ad3f 100644
+--- a/target/linux/mediatek/mt7622/config-5.4
++++ b/target/linux/mediatek/mt7622/config-5.4
+@@ -114,6 +114,9 @@ CONFIG_ARM_PMU=y
+ CONFIG_ARM_PSCI_FW=y
+ CONFIG_ATA=y
+ CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
++CONFIG_BLK_DEV_DM=y
++CONFIG_BLK_DEV_DM_BUILTIN=y
++# CONFIG_BLK_DEV_MD is not set
+ CONFIG_BLK_DEV_SD=y
+ CONFIG_BLK_MQ_PCI=y
+ CONFIG_BLK_PM=y
+@@ -220,6 +223,15 @@ CONFIG_DMA_ENGINE_RAID=y
+ CONFIG_DMA_OF=y
+ CONFIG_DMA_REMAP=y
+ CONFIG_DMA_VIRTUAL_CHANNELS=y
++CONFIG_DM_BUFIO=y
++# CONFIG_DM_CRYPT is not set
++# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
++CONFIG_DM_INIT=y
++# CONFIG_DM_MIRROR is not set
++# CONFIG_DM_SNAPSHOT is not set
++CONFIG_DM_VERITY=y
++# CONFIG_DM_VERITY_FEC is not set
++# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set
+ CONFIG_DRM_RCAR_WRITEBACK=y
+ CONFIG_DTC=y
+ CONFIG_DYNAMIC_DEBUG=y
+@@ -378,6 +390,7 @@ CONFIG_LOCK_SPIN_ON_OWNER=y
+ CONFIG_LZO_COMPRESS=y
+ CONFIG_LZO_DECOMPRESS=y
+ CONFIG_MAGIC_SYSRQ=y
++CONFIG_MD=y
+ CONFIG_MDIO_BUS=y
+ CONFIG_MDIO_DEVICE=y
+ CONFIG_MEDIATEK_MT6577_AUXADC=y
diff --git a/openwrt_patches-21.02/200-mt7621-modify-image-load-address.patch b/openwrt_patches-21.02/200-mt7621-modify-image-load-address.patch
new file mode 100644
index 0000000..6c85a34
--- /dev/null
+++ b/openwrt_patches-21.02/200-mt7621-modify-image-load-address.patch
@@ -0,0 +1,11 @@
+--- a/target/linux/ramips/image/Makefile
++++ b/target/linux/ramips/image/Makefile
+@@ -16,7 +16,7 @@ DEVICE_VARS += SERCOMM_PAD JCG_MAXSIZE
+ 
+ loadaddr-y := 0x80000000
+ loadaddr-$(CONFIG_TARGET_ramips_rt288x) := 0x88000000
+-loadaddr-$(CONFIG_TARGET_ramips_mt7621) := 0x80001000
++loadaddr-$(CONFIG_TARGET_ramips_mt7621) := 0x81001000
+ 
+ ldrplatform-y := ralink
+ ldrplatform-$(CONFIG_TARGET_ramips_mt7621) := mt7621
diff --git a/openwrt_patches-21.02/210-mt7622-modify-ubi-support.patch b/openwrt_patches-21.02/210-mt7622-modify-ubi-support.patch
new file mode 100644
index 0000000..7f2335f
--- /dev/null
+++ b/openwrt_patches-21.02/210-mt7622-modify-ubi-support.patch
@@ -0,0 +1,43 @@
+--- a/target/linux/mediatek/image/mt7622.mk
++++ b/target/linux/mediatek/image/mt7622.mk
+@@ -46,15 +46,15 @@ define Device/mediatek_mt7622-ubi
+   DEVICE_MODEL := MTK7622 AP (UBI)
+   DEVICE_DTS := mt7622-rfb1-ubi
+   DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
++  SUPPORTED_DEVICES := mediatek,mt7622,ubi
+   UBINIZE_OPTS := -E 5
+   BLOCKSIZE := 128k
+   PAGESIZE := 2048
+-  KERNEL_SIZE := 4194304
+-  IMAGE_SIZE := 32768k
++  IMAGE_SIZE := 36864k
++  KERNEL_IN_UBI := 1
+   IMAGES += factory.bin
+-  IMAGE/factory.bin := append-kernel | pad-to $$(KERNEL_SIZE) | append-ubi | \
+-                check-size $$$$(IMAGE_SIZE)
+-  IMAGE/sysupgrade.bin := sysupgrade-tar
++  IMAGE/factory.bin := append-ubi | check-size $$$$(IMAGE_SIZE)
++  IMAGE/sysupgrade.bin := sysupgrade-tar | append-metadata
+   DEVICE_PACKAGES := kmod-usb-ohci kmod-usb2 kmod-usb3 kmod-ata-ahci-mtk
+ endef
+ TARGET_DEVICES += mediatek_mt7622-ubi
+--- a/target/linux/mediatek/mt7622/base-files/lib/upgrade/platform.sh
++++ b/target/linux/mediatek/mt7622/base-files/lib/upgrade/platform.sh
+@@ -25,6 +25,17 @@ platform_check_image() {
+ 	[ "$#" -gt 1 ] && return 1
+ 
+ 	case "$board" in
++	mediatek,mt7622,ubi)
++		# tar magic `ustar`
++		magic="$(dd if="$1" bs=1 skip=257 count=5 2>/dev/null)"
++
++		[ "$magic" != "ustar" ] && {
++			echo "Invalid image type."
++			return 1
++		}
++
++		return 0
++		;;
+ 	*)
+ 		[ "$magic" != "d00dfeed" ] && {
+ 			echo "Invalid image type."
diff --git a/openwrt_patches-21.02/300-mt7622-network-setup-mac.patch b/openwrt_patches-21.02/300-mt7622-network-setup-mac.patch
new file mode 100644
index 0000000..5fd7d00
--- /dev/null
+++ b/openwrt_patches-21.02/300-mt7622-network-setup-mac.patch
@@ -0,0 +1,30 @@
+diff --git a/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network b/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network
+index 3a409c8..4b19c0d 100755
+--- a/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network
++++ b/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network
+@@ -29,9 +29,25 @@ mediatek_setup_interfaces()
+ mediatek_setup_macs()
+ {
+ 	local board="$1"
++	local part_name="Factory"
++	local lan_mac=""
++	local wan_mac=""
++	local lan_mac_offset=""
++	local wan_mac_offset=""
+ 
+ 	case $board in
++	*)
++		#512k - 12 byte
++		lan_mac_offset="0x7FFF4"
++		wan_mac_offset="0x7FFFA"
++		;;
+ 	esac
++
++	lan_mac=$(mtd_get_mac_binary $part_name $lan_mac_offset)
++	wan_mac=$(mtd_get_mac_binary $part_name $wan_mac_offset)
++
++	[ -n "$lan_mac" ] && ucidef_set_interface_macaddr "lan" "$lan_mac"
++	[ -n "$wan_mac" ] && ucidef_set_interface_macaddr "wan" "$wan_mac"
+ }
+ 
+ board_config_update
diff --git a/openwrt_patches/101-fstool-add-mtk-patches.patch b/openwrt_patches/101-fstool-add-mtk-patches.patch
new file mode 100644
index 0000000..5afc342
--- /dev/null
+++ b/openwrt_patches/101-fstool-add-mtk-patches.patch
@@ -0,0 +1,18 @@
+diff -urN a/package/system/fstools/patches/0101-jffs2-mount-on-mtk-flash-workaround.patch b/package/system/fstools/patches/0101-jffs2-mount-on-mtk-flash-workaround.patch
+--- a/package/system/fstools/patches/0101-jffs2-mount-on-mtk-flash-workaround.patch	1970-01-01 08:00:00.000000000 +0800
++++ b/package/system/fstools/patches/0101-jffs2-mount-on-mtk-flash-workaround.patch	2020-07-30 18:16:13.178070668 +0800
+@@ -0,0 +1,14 @@
++Index: fstools-2016-12-04-84b530a7/libfstools/mtd.c
++===================================================================
++--- fstools-2016-12-04-84b530a7.orig/libfstools/mtd.c	2017-08-29 15:00:46.824333000 +0800
+++++ fstools-2016-12-04-84b530a7/libfstools/mtd.c	2017-08-29 15:02:52.848520000 +0800
++@@ -218,6 +218,9 @@
++ 	if (v->type == UBIVOLUME && deadc0de == 0xffffffff) {
++ 		return FS_JFFS2;
++ 	}
+++	if (v->type == NANDFLASH && deadc0de == 0xffffffff) {
+++		return FS_JFFS2;
+++	}
++ 
++ 	return FS_NONE;
++ }
diff --git a/openwrt_patches/103-generic-kernel-config-for-mtk-snand-driver.patch b/openwrt_patches/103-generic-kernel-config-for-mtk-snand-driver.patch
new file mode 100644
index 0000000..4df19fd
--- /dev/null
+++ b/openwrt_patches/103-generic-kernel-config-for-mtk-snand-driver.patch
@@ -0,0 +1,10 @@
+--- a/target/linux/generic/config-5.4
++++ b/target/linux/generic/config-5.4
+@@ -3273,6 +3273,7 @@ CONFIG_MTD_SPLIT_SUPPORT=y
+ # CONFIG_MTD_UIMAGE_SPLIT is not set
+ # CONFIG_MTD_VIRT_CONCAT is not set
+ # CONFIG_MTK_MMC is not set
++# CONFIG_MTK_SPI_NAND is not set
+ CONFIG_MULTIUSER=y
+ # CONFIG_MUTEX_SPIN_ON_OWNER is not set
+ # CONFIG_MV643XX_ETH is not set
diff --git a/openwrt_patches/200-mt7621-modify-image-load-address.patch b/openwrt_patches/200-mt7621-modify-image-load-address.patch
new file mode 100644
index 0000000..6c85a34
--- /dev/null
+++ b/openwrt_patches/200-mt7621-modify-image-load-address.patch
@@ -0,0 +1,11 @@
+--- a/target/linux/ramips/image/Makefile
++++ b/target/linux/ramips/image/Makefile
+@@ -16,7 +16,7 @@ DEVICE_VARS += SERCOMM_PAD JCG_MAXSIZE
+ 
+ loadaddr-y := 0x80000000
+ loadaddr-$(CONFIG_TARGET_ramips_rt288x) := 0x88000000
+-loadaddr-$(CONFIG_TARGET_ramips_mt7621) := 0x80001000
++loadaddr-$(CONFIG_TARGET_ramips_mt7621) := 0x81001000
+ 
+ ldrplatform-y := ralink
+ ldrplatform-$(CONFIG_TARGET_ramips_mt7621) := mt7621
diff --git a/prepare_sdk.sh b/prepare_sdk.sh
new file mode 100755
index 0000000..c3e7b87
--- /dev/null
+++ b/prepare_sdk.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+MTK_FEEDS_DIR=${1}
+
+OPENWRT_VER=`cat ./feeds.conf.default | grep "src-git packages" | awk -F ";openwrt" '{print $2}'`
+
+if [ -z ${1} ]; then
+        MTK_FEEDS_DIR=feeds/mtk_openwrt_feed
+fi
+
+remove_patches(){
+        echo "remove conflict patches"
+        for aa in `cat ${MTK_FEEDS_DIR}/remove.patch.list`
+        do
+                echo "rm $aa"
+                rm -rf ./$aa
+        done
+}
+
+sdk_patch(){
+	files=`find ${MTK_FEEDS_DIR}/openwrt_patches${OPENWRT_VER} -name "*.patch" | sort`
+	for file in $files
+	do
+		patch -f -p1 -i ${file} || exit 1
+	done
+}
+
+sdk_patch
+#cp mtk target to OpenWRT
+cp -fpR ${MTK_FEEDS_DIR}/target ./
+#remove patch if choose to not "keep" patch
+if [ -z ${2} ]; then
+	remove_patches
+fi
+
diff --git a/remove.patch.list b/remove.patch.list
new file mode 100644
index 0000000..46d32de
--- /dev/null
+++ b/remove.patch.list
@@ -0,0 +1,6 @@
+target/linux/generic/backport-5.4/760-net-ethernet-mediatek-Integrate-GDM-PSE-setup-operat.patch
+target/linux/generic/backport-5.4/761-net-ethernet-mediatek-Refine-the-timing-of-GDM-PSE-s.patch
+target/linux/generic/backport-5.4/762-net-ethernet-mediatek-Enable-GDM-GDMA_DROP_ALL-mode.patch
+target/linux/mediatek/patches-5.4/1011-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch
+target/linux/mediatek/patches-5.4/1012-pci-pcie-mediatek-add-support-for-coherent-DMA.patch
+target/linux/generic/pending-5.4/770-*.patch
diff --git a/target/linux/generic/pending-5.4/770-17-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch b/target/linux/generic/pending-5.4/770-17-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch
new file mode 100644
index 0000000..03f8057
--- /dev/null
+++ b/target/linux/generic/pending-5.4/770-17-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch
@@ -0,0 +1,182 @@
+Index: linux-5.4.77/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+===================================================================
+--- linux-5.4.77.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ linux-5.4.77/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1354,9 +1354,21 @@ static int mtk_poll_rx(struct napi_struc
+ 			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ 
+ 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+-		    (trxd.rxd2 & RX_DMA_VTAG))
+-			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+-					       RX_DMA_VID(trxd.rxd3));
++		    (trxd.rxd2 & RX_DMA_VTAG)) {
++			__vlan_hwaccel_put_tag(skb,
++					       htons(RX_DMA_VPID(trxd.rxd3)),
++					       RX_DMA_TCI(trxd.rxd3));
++
++			/* If netdev is attached to dsa switch, the special
++			 * tag inserted in VLAN field by switch hardware can
++			 * be offload by RX HW VLAN offload. Clears the VLAN
++			 * information from @skb to avoid unexpected 8021d
++			 * handler before packet enter dsa framework.
++			 */
++			if (netdev_uses_dsa(netdev))
++				__vlan_hwaccel_clear_tag(skb);
++		}
++
+ 		if (mtk_offload_check_rx(eth, skb, trxd.rxd4) == 0) {
+ 			skb_record_rx_queue(skb, 0);
+ 			napi_gro_receive(napi, skb);
+@@ -2050,19 +2062,32 @@ static netdev_features_t mtk_fix_feature
+ 		}
+ 	}
+ 
++	if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
++		netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
++
++		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
++	}
++
+ 	return features;
+ }
+ 
+ static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+ {
++	struct mtk_mac *mac = netdev_priv(dev);
++	struct mtk_eth *eth = mac->hw;
+ 	int err = 0;
+ 
+-	if (!((dev->features ^ features) & NETIF_F_LRO))
++	if (!((dev->features ^ features) & MTK_SET_FEATURES))
+ 		return 0;
+ 
+ 	if (!(features & NETIF_F_LRO))
+ 		mtk_hwlro_netdev_disable(dev);
+ 
++	if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
++		mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
++	else
++		mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
++
+ 	return err;
+ }
+ 
+@@ -2326,6 +2351,15 @@ static int mtk_open(struct net_device *d
+ 
+ 		mtk_gdm_config(eth, gdm_config);
+ 
++		/* Indicates CDM to parse the MTK special tag from CPU */
++		if (netdev_uses_dsa(dev)) {
++			u32 val;
++			val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
++			mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
++			val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
++			mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
++		}
++
+ 		napi_enable(&eth->tx_napi);
+ 		napi_enable(&eth->rx_napi);
+ 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+@@ -2500,7 +2534,7 @@ static void mtk_dim_tx(struct work_struc
+ 
+ static int mtk_hw_init(struct mtk_eth *eth)
+ {
+-	int i, val, ret;
++	int i, ret;
+ 
+ 	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+ 		return 0;
+@@ -2555,12 +2589,6 @@ static int mtk_hw_init(struct mtk_eth *e
+ 	for (i = 0; i < MTK_MAC_COUNT; i++)
+ 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
+ 
+-	/* Indicates CDM to parse the MTK special tag from CPU
+-	 * which also is working out for untag packets.
+-	 */
+-	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+-	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+-
+ 	/* Enable RX VLan Offloading */
+ 	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+ 
+Index: linux-5.4.77/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+===================================================================
+--- linux-5.4.77.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ linux-5.4.77/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -42,6 +42,8 @@
+ 				 NETIF_F_SG | NETIF_F_TSO | \
+ 				 NETIF_F_TSO6 | \
+ 				 NETIF_F_IPV6_CSUM)
++#define MTK_SET_FEATURES	(NETIF_F_LRO | \
++				 NETIF_F_HW_VLAN_CTAG_RX)
+ #define MTK_HW_FEATURES_MT7628	(NETIF_F_SG | NETIF_F_RXCSUM)
+ #define NEXT_DESP_IDX(X, Y)	(((X) + 1) & ((Y) - 1))
+ 
+@@ -78,6 +80,10 @@
+ #define MTK_CDMQ_IG_CTRL	0x1400
+ #define MTK_CDMQ_STAG_EN	BIT(0)
+ 
++/* CDMP Ingress Control Register */
++#define MTK_CDMP_IG_CTRL	0x400
++#define MTK_CDMP_STAG_EN	BIT(0)
++
+ /* CDMP Exgress Control Register */
+ #define MTK_CDMP_EG_CTRL	0x404
+ 
+@@ -307,7 +313,9 @@
+ #define RX_DMA_VTAG		BIT(15)
+ 
+ /* QDMA descriptor rxd3 */
+-#define RX_DMA_VID(_x)		((_x) & 0xfff)
++#define RX_DMA_VID(_x)		((_x) & VLAN_VID_MASK)
++#define RX_DMA_TCI(_x)		((_x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
++#define RX_DMA_VPID(_x)		(((_x) >> 16) & 0xffff)
+ 
+ /* QDMA descriptor rxd4 */
+ #define MTK_RXD4_FOE_ENTRY	GENMASK(13, 0)
+Index: linux-5.4.77/net/dsa/tag_mtk.c
+===================================================================
+--- linux-5.4.77.orig/net/dsa/tag_mtk.c
++++ linux-5.4.77/net/dsa/tag_mtk.c
+@@ -73,22 +73,28 @@ static struct sk_buff *mtk_tag_rcv(struc
+ 	bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+ 				!is_broadcast_ether_addr(dest);
+ 
+-	if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
+-		return NULL;
++	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) {
++		hdr = ntohs(skb->vlan_proto);
++		skb->vlan_proto = 0;
++		skb->vlan_tci = 0;
++	} else {
++		if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
++			return NULL;
+ 
+-	/* The MTK header is added by the switch between src addr
+-	 * and ethertype at this point, skb->data points to 2 bytes
+-	 * after src addr so header should be 2 bytes right before.
+-	 */
+-	phdr = (__be16 *)(skb->data - 2);
+-	hdr = ntohs(*phdr);
++		/* The MTK header is added by the switch between src addr
++		 * and ethertype at this point, skb->data points to 2 bytes
++		 * after src addr so header should be 2 bytes right before.
++		 */
++		phdr = (__be16 *)(skb->data - 2);
++		hdr = ntohs(*phdr);
+ 
+-	/* Remove MTK tag and recalculate checksum. */
+-	skb_pull_rcsum(skb, MTK_HDR_LEN);
++		/* Remove MTK tag and recalculate checksum. */
++		skb_pull_rcsum(skb, MTK_HDR_LEN);
+ 
+-	memmove(skb->data - ETH_HLEN,
+-		skb->data - ETH_HLEN - MTK_HDR_LEN,
+-		2 * ETH_ALEN);
++		memmove(skb->data - ETH_HLEN,
++			skb->data - ETH_HLEN - MTK_HDR_LEN,
++			2 * ETH_ALEN);
++	}
+ 
+ 	/* Get source port information */
+ 	port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);
diff --git a/target/linux/mediatek/base-files/etc/inittab b/target/linux/mediatek/base-files/etc/inittab
new file mode 100644
index 0000000..4374da2
--- /dev/null
+++ b/target/linux/mediatek/base-files/etc/inittab
@@ -0,0 +1,3 @@
+::sysinit:/etc/init.d/rcS S boot
+::shutdown:/etc/init.d/rcS K shutdown
+ttyS0::respawnlate:/usr/libexec/login.sh
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7622-rfb1-ubi.dts b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7622-rfb1-ubi.dts
new file mode 100644
index 0000000..bf59b83
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7622-rfb1-ubi.dts
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+ */
+
+/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+
+#include "mt7622.dtsi"
+#include "mt6380.dtsi"
+
+/ {
+	model = "MT7622_MT7531 RFB";
+	compatible = "mediatek,mt7622,ubi";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+		bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+	};
+
+	cpus {
+		cpu@0 {
+			proc-supply = <&mt6380_vcpu_reg>;
+			sram-supply = <&mt6380_vm_reg>;
+		};
+
+		cpu@1 {
+			proc-supply = <&mt6380_vcpu_reg>;
+			sram-supply = <&mt6380_vm_reg>;
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+
+		factory {
+			label = "factory";
+			linux,code = <BTN_0>;
+			gpios = <&pio 0 GPIO_ACTIVE_HIGH>;
+		};
+
+		wps {
+			label = "wps";
+			linux,code = <KEY_WPS_BUTTON>;
+			gpios = <&pio 102 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	gsw: gsw@0 {
+		compatible = "mediatek,mt753x";
+		mediatek,ethsys = <&ethsys>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		green {
+			label = "bpi-r64:pio:green";
+			gpios = <&pio 89 GPIO_ACTIVE_HIGH>;
+		};
+
+		red {
+			label = "bpi-r64:pio:red";
+			gpios = <&pio 88 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	memory {
+		reg = <0 0x40000000 0 0x40000000>;
+	};
+
+	reg_1p8v: regulator-1p8v {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-1.8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_5v: regulator-5v {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-5V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+};
+
+&btif {
+	status = "okay";
+};
+
+&cir {
+	pinctrl-names = "default";
+	pinctrl-0 = <&irrx_pins>;
+	status = "okay";
+};
+
+&eth {
+	status = "okay";
+	gmac0: mac@0 {
+		compatible = "mediatek,eth-mac";
+		reg = <0>;
+		phy-mode = "2500base-x";
+
+		fixed-link {
+			speed = <2500>;
+			full-duplex;
+			pause;
+		};
+	};
+
+	gmac1: mac@1 {
+		compatible = "mediatek,eth-mac";
+		reg = <1>;
+		phy-mode = "rgmii";
+
+		fixed-link {
+			speed = <1000>;
+			full-duplex;
+			pause;
+		};
+	};
+
+	mdio: mdio-bus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+};
+
+&gsw {
+	mediatek,mdio = <&mdio>;
+	mediatek,portmap = "llllw";
+	mediatek,mdio_master_pinmux = <0>;
+	reset-gpios = <&pio 54 0>;
+	interrupt-parent = <&pio>;
+	interrupts = <53 IRQ_TYPE_LEVEL_HIGH>;
+	status = "okay";
+
+	port5: port@5 {
+		compatible = "mediatek,mt753x-port";
+		reg = <5>;
+		phy-mode = "rgmii";
+		fixed-link {
+			speed = <1000>;
+			full-duplex;
+		};
+	};
+
+	port6: port@6 {
+		compatible = "mediatek,mt753x-port";
+		reg = <6>;
+		phy-mode = "sgmii";
+		fixed-link {
+			speed = <2500>;
+			full-duplex;
+		};
+	};
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins>;
+	status = "okay";
+};
+
+&i2c2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c2_pins>;
+	status = "okay";
+};
+
+&mmc0 {
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&emmc_pins_default>;
+	pinctrl-1 = <&emmc_pins_uhs>;
+	status = "okay";
+	bus-width = <8>;
+	max-frequency = <50000000>;
+	cap-mmc-highspeed;
+	mmc-hs200-1_8v;
+	vmmc-supply = <&reg_3p3v>;
+	vqmmc-supply = <&reg_1p8v>;
+	assigned-clocks = <&topckgen CLK_TOP_MSDC30_0_SEL>;
+	assigned-clock-parents = <&topckgen CLK_TOP_UNIV48M>;
+	non-removable;
+};
+
+&mmc1 {
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&sd0_pins_default>;
+	pinctrl-1 = <&sd0_pins_uhs>;
+	status = "okay";
+	bus-width = <4>;
+	max-frequency = <50000000>;
+	cap-sd-highspeed;
+	r_smpl = <1>;
+	cd-gpios = <&pio 81 GPIO_ACTIVE_LOW>;
+	vmmc-supply = <&reg_3p3v>;
+	vqmmc-supply = <&reg_3p3v>;
+	assigned-clocks = <&topckgen CLK_TOP_MSDC30_1_SEL>;
+	assigned-clock-parents = <&topckgen CLK_TOP_UNIV48M>;
+};
+
+&nandc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&parallel_nand_pins>;
+	status = "disabled";
+};
+
+&nor_flash {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_nor_pins>;
+	status = "disabled";
+
+	flash@0 {
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+	};
+};
+
+&pcie0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie0_pins>;
+	status = "okay";
+};
+
+&pcie1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie1_pins>;
+	status = "okay";
+};
+
+&pio {
+	/* Attention: GPIO 90 is used to switch between PCIe@1,0 and
+	 * SATA functions. i.e. output-high: PCIe, output-low: SATA
+	 */
+	asm_sel {
+		gpio-hog;
+		gpios = <90 GPIO_ACTIVE_HIGH>;
+		output-high;
+	};
+
+	/* eMMC is shared pin with parallel NAND */
+	emmc_pins_default: emmc-pins-default {
+		mux {
+			function = "emmc", "emmc_rst";
+			groups = "emmc";
+		};
+
+		/* "NDL0","NDL1","NDL2","NDL3","NDL4","NDL5","NDL6","NDL7",
+		 * "NRB","NCLE" pins are used as DAT0,DAT1,DAT2,DAT3,DAT4,
+		 * DAT5,DAT6,DAT7,CMD,CLK for eMMC respectively
+		 */
+		conf-cmd-dat {
+			pins = "NDL0", "NDL1", "NDL2",
+			       "NDL3", "NDL4", "NDL5",
+			       "NDL6", "NDL7", "NRB";
+			input-enable;
+			bias-pull-up;
+		};
+
+		conf-clk {
+			pins = "NCLE";
+			bias-pull-down;
+		};
+	};
+
+	emmc_pins_uhs: emmc-pins-uhs {
+		mux {
+			function = "emmc";
+			groups = "emmc";
+		};
+
+		conf-cmd-dat {
+			pins = "NDL0", "NDL1", "NDL2",
+			       "NDL3", "NDL4", "NDL5",
+			       "NDL6", "NDL7", "NRB";
+			input-enable;
+			drive-strength = <4>;
+			bias-pull-up;
+		};
+
+		conf-clk {
+			pins = "NCLE";
+			drive-strength = <4>;
+			bias-pull-down;
+		};
+	};
+
+	eth_pins: eth-pins {
+		mux {
+			function = "eth";
+			groups = "mdc_mdio", "rgmii_via_gmac2";
+		};
+	};
+
+	i2c1_pins: i2c1-pins {
+		mux {
+			function = "i2c";
+			groups =  "i2c1_0";
+		};
+	};
+
+	i2c2_pins: i2c2-pins {
+		mux {
+			function = "i2c";
+			groups =  "i2c2_0";
+		};
+	};
+
+	i2s1_pins: i2s1-pins {
+		mux {
+			function = "i2s";
+			groups =  "i2s_out_mclk_bclk_ws",
+				  "i2s1_in_data",
+				  "i2s1_out_data";
+		};
+
+		conf {
+			pins = "I2S1_IN", "I2S1_OUT", "I2S_BCLK",
+			       "I2S_WS", "I2S_MCLK";
+			drive-strength = <12>;
+			bias-pull-down;
+		};
+	};
+
+	irrx_pins: irrx-pins {
+		mux {
+			function = "ir";
+			groups =  "ir_1_rx";
+		};
+	};
+
+	irtx_pins: irtx-pins {
+		mux {
+			function = "ir";
+			groups =  "ir_1_tx";
+		};
+	};
+
+	/* Parallel nand is shared pin with eMMC */
+	parallel_nand_pins: parallel-nand-pins {
+		mux {
+			function = "flash";
+			groups = "par_nand";
+		};
+	};
+
+	pcie0_pins: pcie0-pins {
+		mux {
+			function = "pcie";
+			groups = "pcie0_pad_perst",
+				 "pcie0_1_waken",
+				 "pcie0_1_clkreq";
+		};
+	};
+
+	pcie1_pins: pcie1-pins {
+		mux {
+			function = "pcie";
+			groups = "pcie1_pad_perst",
+				 "pcie1_0_waken",
+				 "pcie1_0_clkreq";
+		};
+	};
+
+	pmic_bus_pins: pmic-bus-pins {
+		mux {
+			function = "pmic";
+			groups = "pmic_bus";
+		};
+	};
+
+	pwm7_pins: pwm1-2-pins {
+		mux {
+			function = "pwm";
+			groups = "pwm_ch7_2";
+		};
+	};
+
+	wled_pins: wled-pins {
+		mux {
+			function = "led";
+			groups = "wled";
+		};
+	};
+
+	sd0_pins_default: sd0-pins-default {
+		mux {
+			function = "sd";
+			groups = "sd_0";
+		};
+
+		/* "I2S2_OUT, "I2S4_IN"", "I2S3_IN", "I2S2_IN",
+		 *  "I2S4_OUT", "I2S3_OUT" are used as DAT0, DAT1,
+		 *  DAT2, DAT3, CMD, CLK for SD respectively.
+		 */
+		conf-cmd-data {
+			pins = "I2S2_OUT", "I2S4_IN", "I2S3_IN",
+			       "I2S2_IN","I2S4_OUT";
+			input-enable;
+			drive-strength = <8>;
+			bias-pull-up;
+		};
+		conf-clk {
+			pins = "I2S3_OUT";
+			drive-strength = <12>;
+			bias-pull-down;
+		};
+		conf-cd {
+			pins = "TXD3";
+			bias-pull-up;
+		};
+	};
+
+	sd0_pins_uhs: sd0-pins-uhs {
+		mux {
+			function = "sd";
+			groups = "sd_0";
+		};
+
+		conf-cmd-data {
+			pins = "I2S2_OUT", "I2S4_IN", "I2S3_IN",
+			       "I2S2_IN","I2S4_OUT";
+			input-enable;
+			bias-pull-up;
+		};
+
+		conf-clk {
+			pins = "I2S3_OUT";
+			bias-pull-down;
+		};
+	};
+
+	/* Serial NAND is shared pin with SPI-NOR */
+	serial_nand_pins: serial-nand-pins {
+		mux {
+			function = "flash";
+			groups = "snfi";
+		};
+	};
+
+	spic0_pins: spic0-pins {
+		mux {
+			function = "spi";
+			groups = "spic0_0";
+		};
+	};
+
+	spic1_pins: spic1-pins {
+		mux {
+			function = "spi";
+			groups = "spic1_0";
+		};
+	};
+
+	/* SPI-NOR is shared pin with serial NAND */
+	spi_nor_pins: spi-nor-pins {
+		mux {
+			function = "flash";
+			groups = "spi_nor";
+		};
+	};
+
+	/* serial NAND is shared pin with SPI-NOR */
+	serial_nand_pins: serial-nand-pins {
+		mux {
+			function = "flash";
+			groups = "snfi";
+		};
+	};
+
+	uart0_pins: uart0-pins {
+		mux {
+			function = "uart";
+			groups = "uart0_0_tx_rx" ;
+		};
+	};
+
+	uart2_pins: uart2-pins {
+		mux {
+			function = "uart";
+			groups = "uart2_1_tx_rx" ;
+		};
+	};
+
+	watchdog_pins: watchdog-pins {
+		mux {
+			function = "watchdog";
+			groups = "watchdog";
+		};
+	};
+};
+
+&pwm {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pwm7_pins>;
+	status = "okay";
+};
+
+&pwrap {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pmic_bus_pins>;
+
+	status = "okay";
+};
+
+&sata {
+	status = "disable";
+};
+
+&sata_phy {
+	status = "disable";
+};
+
+&snand {
+	pinctrl-names = "default";
+	pinctrl-0 = <&serial_nand_pins>;
+	status = "okay";
+	mediatek,quad-spi;
+
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "BL2";
+			reg = <0x00000 0x0080000>;
+			read-only;
+		};
+
+		partition@80000 {
+			label = "FIP";
+			reg = <0x80000 0x0200000>;
+		};
+
+		partition@280000 {
+			label = "Config";
+			reg = <0x280000 0x0080000>;
+		};
+
+		factory: partition@300000 {
+			label = "Factory";
+			reg = <0x300000 0x0100000>;
+		};
+
+		partition@400000 {
+			label = "ubi";
+			reg = <0x400000 0x2400000>;
+		};
+	};
+};
+
+&spi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spic0_pins>;
+	status = "okay";
+};
+
+&spi1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spic1_pins>;
+	status = "okay";
+};
+
+&ssusb {
+	vusb33-supply = <&reg_3p3v>;
+	vbus-supply = <&reg_5v>;
+	status = "okay";
+};
+
+&u3phy {
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart2_pins>;
+	status = "okay";
+};
+
+&watchdog {
+	pinctrl-names = "default";
+	pinctrl-0 = <&watchdog_pins>;
+	status = "okay";
+};
+
+&wmac {
+	mediatek,mtd-eeprom = <&factory 0x0000>;
+	status = "okay";
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986-fpga-ubi.dts b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986-fpga-ubi.dts
new file mode 100644
index 0000000..1f4125c
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986-fpga-ubi.dts
@@ -0,0 +1,170 @@
+/dts-v1/;
+#include "mt7986-fpga.dtsi"
+/ {
+	model = "MediaTek MT7986 FPGA (UBI)";
+	compatible = "mediatek,mt7986-fpga,ubi";
+	chosen {
+		bootargs = "console=ttyS0,115200n1 loglevel=8  \
+				earlycon=uart8250,mmio32,0x11002000";
+	};
+
+	memory {
+		// fpga ddr2: 128MB*2
+		reg = <0 0x40000000 0 0x10000000>;
+	};
+
+        wsys_adie: wsys_adie@0 {
+		// fpga cases need to manual change adie_id / sku_type for dvt only
+                compatible = "mediatek,rebb-mt7986-adie";
+                adie_id = <7976>;
+                sku_type = <6000>;
+        };
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&spi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_flash_pins>;
+	status = "okay";
+	spi_nor@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <500000>;
+
+		partition@00000 {
+			label = "BL2";
+			reg = <0x00000 0x0060000>;
+		};
+		partition@60000 {
+			label = "u-boot-env";
+			reg = <0x60000 0x0010000>;
+		};
+		partition@70000 {
+			label = "Factory";
+			reg = <0x70000 0x00B0000>;
+		};
+		partition@120000 {
+			label = "BL31";
+			reg = <0x120000 0x0010000>;
+		};
+		partition@130000 {
+			label = "u-boot";
+			reg = <0x130000 0x00D0000>;
+		};
+		partition@200000 {
+			label = "firmware";
+			reg = <0x200000 0xE00000>;
+		};
+	};
+	spi_nand@1 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "spi-nand";
+		reg = <1>;
+		spi-max-frequency = <500000>;
+
+		partition@00000 {
+			label = "BL2";
+			reg = <0x00000 0x0100000>;
+		};
+		partition@100000 {
+			label = "u-boot-env";
+			reg = <0x100000 0x0080000>;
+		};
+		partition@180000 {
+			label = "Factory";
+			reg = <0x180000 0x00200000>;
+		};
+		partition@380000 {
+			label = "BL31";
+			reg = <0x380000 0x0080000>;
+		};
+		partition@400000 {
+			label = "u-boot";
+			reg = <0x400000 0x0180000>;
+		};
+		partition@580000 {
+			label = "firmware";
+			reg = <0x580000 0x7a80000>;
+		};
+	};
+};
+
+&spi1 {
+	pinctrl-names = "default";
+	/* pin shared with snfi */
+	pinctrl-0 = <&spic_pins>;
+	status = "disabled";
+};
+
+&pio {
+	spi_flash_pins: spi0-pins {
+		mux {
+			function = "flash";
+			groups = "spi0", "spi0_wp_hold";
+		};
+	};
+
+	snfi_pins: snfi-pins {
+		mux {
+			function = "flash";
+			groups = "snfi";
+		};
+	};
+
+	spic_pins: spi1-pins {
+		mux {
+			function = "spi";
+			groups = "spi1_1";
+		};
+	};
+};
+
+&watchdog {
+	status = "disabled";
+};
+
+&snand {
+	pinctrl-names = "default";
+	/* pin shared with spic */
+	pinctrl-0 = <&snfi_pins>;
+	status = "okay";
+	mediatek,quad-spi;
+
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "BL2";
+			reg = <0x00000 0x0100000>;
+			read-only;
+		};
+
+		partition@100000 {
+			label = "u-boot-env";
+			reg = <0x0100000 0x0080000>;
+		};
+
+		partition@180000 {
+			label = "Factory";
+			reg = <0x180000 0x0200000>;
+		};
+
+		partition@380000 {
+			label = "FIP";
+			reg = <0x380000 0x0200000>;
+		};
+
+		partition@580000 {
+			label = "ubi";
+			reg = <0x580000 0x4000000>;
+		};
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986-fpga.dts b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986-fpga.dts
new file mode 100644
index 0000000..5f8ef81
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986-fpga.dts
@@ -0,0 +1,170 @@
+/dts-v1/;
+#include "mt7986-fpga.dtsi"
+/ {
+	model = "MediaTek MT7986 FPGA";
+	compatible = "mediatek,mt7986-fpga";
+	chosen {
+		bootargs = "console=ttyS0,115200n1 loglevel=8  \
+				earlycon=uart8250,mmio32,0x11002000";
+	};
+
+	memory {
+		// fpga ddr2: 128MB*2
+		reg = <0 0x40000000 0 0x10000000>;
+	};
+
+        wsys_adie: wsys_adie@0 {
+		// fpga cases need to manual change adie_id / sku_type for dvt only
+                compatible = "mediatek,rebb-mt7986-adie";
+                adie_id = <7976>;
+                sku_type = <6000>;
+        };
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&spi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_flash_pins>;
+	status = "okay";
+	spi_nor@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <500000>;
+
+		partition@00000 {
+			label = "BL2";
+			reg = <0x00000 0x0060000>;
+		};
+		partition@60000 {
+			label = "u-boot-env";
+			reg = <0x60000 0x0010000>;
+		};
+		partition@70000 {
+			label = "Factory";
+			reg = <0x70000 0x00B0000>;
+		};
+		partition@120000 {
+			label = "BL31";
+			reg = <0x120000 0x0010000>;
+		};
+		partition@130000 {
+			label = "u-boot";
+			reg = <0x130000 0x00D0000>;
+		};
+		partition@200000 {
+			label = "firmware";
+			reg = <0x200000 0xE00000>;
+		};
+	};
+	spi_nand@1 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "spi-nand";
+		reg = <1>;
+		spi-max-frequency = <500000>;
+
+		partition@00000 {
+			label = "BL2";
+			reg = <0x00000 0x0100000>;
+		};
+		partition@100000 {
+			label = "u-boot-env";
+			reg = <0x100000 0x0080000>;
+		};
+		partition@180000 {
+			label = "Factory";
+			reg = <0x180000 0x00200000>;
+		};
+		partition@380000 {
+			label = "BL31";
+			reg = <0x380000 0x0080000>;
+		};
+		partition@400000 {
+			label = "u-boot";
+			reg = <0x400000 0x0180000>;
+		};
+		partition@580000 {
+			label = "firmware";
+			reg = <0x580000 0x7a80000>;
+		};
+	};
+};
+
+&spi1 {
+	pinctrl-names = "default";
+	/* pin shared with snfi */
+	pinctrl-0 = <&spic_pins>;
+	status = "disabled";
+};
+
+&pio {
+	spi_flash_pins: spi0-pins {
+		mux {
+			function = "flash";
+			groups = "spi0", "spi0_wp_hold";
+		};
+	};
+
+	snfi_pins: snfi-pins {
+		mux {
+			function = "flash";
+			groups = "snfi";
+		};
+	};
+
+	spic_pins: spi1-pins {
+		mux {
+			function = "spi";
+			groups = "spi1_1";
+		};
+	};
+};
+
+&watchdog {
+	status = "disabled";
+};
+
+&snand {
+	pinctrl-names = "default";
+	/* pin shared with spic */
+	pinctrl-0 = <&snfi_pins>;
+	status = "okay";
+	mediatek,quad-spi;
+
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "BL2";
+			reg = <0x00000 0x0080000>;
+			read-only;
+		};
+
+		partition@80000 {
+			label = "FIP";
+			reg = <0x80000 0x0200000>;
+		};
+
+		partition@280000 {
+			label = "u-boot-env";
+			reg = <0x280000 0x0080000>;
+		};
+
+		partition@300000 {
+			label = "Factory";
+			reg = <0x300000 0x0080000>;
+		};
+
+		partition@380000 {
+			label = "firmware";
+			reg = <0x380000 0x7c00000>;
+		};
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986-fpga.dtsi b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986-fpga.dtsi
new file mode 100644
index 0000000..d8fa26d
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986-fpga.dtsi
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Sam.Shih <sam.shih@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/reset/ti-syscon.h>
+/ {
+	compatible = "mediatek,mt7986-fpga";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x0>;
+		};
+
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x1>;
+		};
+	};
+
+	wed: wed@15010000 {
+		compatible = "mediatek,wed";
+		wed_num = <2>;
+		/* add this property for wed get the pci slot number. */
+		pci_slot_map = <0>, <1>;
+		reg = <0 0x15010000 0 0x1000>,
+		      <0 0x15011000 0 0x1000>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	wed2: wed2@15011000 {
+		compatible = "mediatek,wed2";
+		wed_num = <2>;
+		reg = <0 0x15010000 0 0x1000>,
+		      <0 0x15011000 0 0x1000>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	wdma: wdma@15104800 {
+		compatible = "mediatek,wed-wdma";
+		reg = <0 0x15104800 0 0x400>,
+		      <0 0x15104c00 0 0x400>;
+	};
+
+	ap2woccif: ap2woccif@151A5000 {
+		compatible = "mediatek,ap2woccif";
+		reg = <0 0x151A5000 0 0x1000>,
+		      <0 0x151AD000 0 0x1000>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+        };
+
+	wocpu0_ilm: wocpu0_ilm@151E0000 {
+		compatible = "mediatek,wocpu0_ilm";
+		reg = <0 0x151E0000 0 0x8000>;
+	};
+
+        wocpu1_ilm: wocpu1_ilm@151F0000 {
+                compatible = "mediatek,wocpu1_ilm";
+                reg = <0 0x151F0000 0 0x8000>;
+        };
+
+	wocpu_dlm: wocpu_dlm@151E8000 {
+		compatible = "mediatek,wocpu_dlm";
+		reg = <0 0x151E8000 0 0x2000>,
+		      <0 0x151F8000 0 0x2000>;
+
+		resets = <&ethsysrst 0>;
+		reset-names = "wocpu_rst";
+	};
+
+	cpu_boot: wocpu_boot@15194000 {
+		compatible = "mediatek,wocpu_boot";
+		reg = <0 0x15194000 0 0x1000>;
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/* 192 KiB reserved for ARM Trusted Firmware (BL31) */
+		secmon_reserved: secmon@43000000 {
+			reg = <0 0x43000000 0 0x30000>;
+			no-map;
+		};
+
+		wmcpu_emi: wmcpu-reserved@4FC00000 {
+			compatible = "mediatek,wmcpu-reserved";
+			no-map;
+			reg = <0 0x4FC00000 0 0x00100000>;
+		};
+
+		wocpu0_emi: wocpu0_emi@4FD00000 {
+			compatible = "mediatek,wocpu0_emi";
+			no-map;
+			reg = <0 0x4FD00000 0 0x40000>;
+			shared = <0>;
+		};
+
+		wocpu1_emi: wocpu1_emi@4FD80000 {
+			compatible = "mediatek,wocpu1_emi";
+			no-map;
+			reg = <0 0x4FD40000 0 0x40000>;
+			shared = <0>;
+		};
+
+		wocpu_data: wocpu_data@4FE00000 {
+			compatible = "mediatek,wocpu_data";
+			no-map;
+			reg = <0 0x4FD80000 0 0x200000>;
+			shared = <1>;
+		};
+	};
+
+	psci {
+		compatible  = "arm,psci-0.2";
+		method      = "smc";
+	};
+
+	system_clk: dummy13m {
+		compatible = "fixed-clock";
+		clock-frequency = <13000000>;
+		#clock-cells = <0>;
+	};
+
+	rtc_clk: dummy32k {
+		compatible = "fixed-clock";
+		clock-frequency = <32000>;
+		#clock-cells = <0>;
+	};
+
+	uart_clk: dummy12m {
+		compatible = "fixed-clock";
+		clock-frequency = <12000000>;
+		#clock-cells = <0>;
+	};
+
+	gpt_clk: dummy6m {
+		compatible = "fixed-clock";
+		clock-frequency = <6000000>;
+		#clock-cells = <0>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&gic>;
+		clock-frequency = <12000000>;
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+
+	};
+
+	watchdog: watchdog@1001c000 {
+		compatible = "mediatek,mt7622-wdt",
+			     "mediatek,mt6589-wdt";
+		reg = <0 0x1001c000 0 0x1000>;
+		interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+		#reset-cells = <1>;
+	};
+
+	gic: interrupt-controller@c000000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		interrupt-parent = <&gic>;
+		interrupt-controller;
+		reg = <0 0x0c000000 0 0x40000>,  /* GICD */
+		      <0 0x0c080000 0 0x200000>; /* GICR */
+
+		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	uart0: serial@11002000 {
+		compatible = "mediatek,mt7986-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11002000 0 0x400>;
+		interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart1: serial@11003000 {
+		compatible = "mediatek,mt7986-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11003000 0 0x400>;
+		interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart2: serial@11004000 {
+		compatible = "mediatek,mt7986-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11004000 0 0x400>;
+		interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	pcie: pcie@11280000 {
+		compatible = "mediatek,mt7986-pcie";
+		device_type = "pci";
+		reg = <0 0x11280000 0 0x5000>;
+		reg-names = "port0";
+		#address-cells = <3>;
+		#size-cells = <2>;
+		interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+		bus-range = <0x00 0xff>;
+		ranges = <0x82000000 0 0x20000000
+			  0x0 0x20000000 0 0x10000000>;
+
+		pcie0: pcie@0,0 {
+			device_type = "pci";
+			reg = <0x0000 0 0 0 0>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+					<0 0 0 2 &pcie_intc0 1>,
+					<0 0 0 3 &pcie_intc0 2>,
+					<0 0 0 4 &pcie_intc0 3>;
+			pcie_intc0: interrupt-controller {
+				interrupt-controller;
+				#address-cells = <0>;
+				#interrupt-cells = <1>;
+			};
+		};
+	};
+
+	pio: pinctrl@1001f000 {
+		compatible = "mediatek,mt7986-pinctrl";
+		reg = <0 0x1001f000 0 0x1000>,
+		      <0 0x11c30000 0 0x1000>,
+                      <0 0x11c40000 0 0x1000>,
+                      <0 0x11e20000 0 0x1000>,
+                      <0 0x11e30000 0 0x1000>,
+                      <0 0x11f00000 0 0x1000>,
+                      <0 0x11f10000 0 0x1000>,
+		      <0 0x1000b000 0 0x1000>;
+		reg-names = "gpio_base", "iocfg_rt_base", "iocfg_rb_base",
+                            "iocfg_lt_base", "iocfg_lb_base", "iocfg_tr_base",
+                            "iocfg_tl_base", "eint";
+		gpio-controller;
+		#gpio-cells = <2>;
+		gpio-ranges = <&pio 0 0 100>;
+		interrupt-controller;
+		interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-parent = <&gic>;
+		#interrupt-cells = <2>;
+	};
+
+        ethsys: syscon@15000000 {
+                #address-cells = <1>;
+                #size-cells = <1>;
+                compatible = "mediatek,mt7986-ethsys",
+                             "syscon";
+                reg = <0 0x15000000 0 0x1000>;
+                #clock-cells = <1>;
+                #reset-cells = <1>;
+
+		ethsysrst: reset-controller {
+			compatible = "ti,syscon-reset";
+			#reset-cells = <1>;
+			ti,reset-bits = <0x34 4 0x34 4 0x34 4 (ASSERT_SET | DEASSERT_CLEAR | STATUS_SET)>;
+		};
+        };
+
+        eth: ethernet@15100000 {
+                compatible = "mediatek,mt7986-eth";
+                reg = <0 0x15100000 0 0x80000>;
+                interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
+                mediatek,ethsys = <&ethsys>;
+                #reset-cells = <1>;
+                #address-cells = <1>;
+                #size-cells = <0>;
+                status = "disabled";
+        };
+
+	snand: snfi@11005000 {
+		compatible = "mediatek,mt7986-snand";
+		reg = <0 0x11005000 0 0x1000>, <0 0x11006000 0 0x1000>;
+		reg-names = "nfi", "ecc";
+		interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>;
+		clock-names = "nfi_clk", "pad_clk", "ecc_clk";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	wed_pcie: wed_pcie@10003000 {
+		compatible = "mediatek,wed_pcie";
+		reg = <0 0x10003000 0 0x10>;
+	};
+
+	wbsys: wbsys@18000000 {
+		compatible = "mediatek,wbsys";
+		reg = <0 0x18000000 0  0x1000000>;
+		interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+					 <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+					 <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+					 <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
+		chip_id = <0x7986>;
+	};
+
+	spi0: spi@1100a000 {
+		compatible = "mediatek,ipm-spi";
+		reg = <0 0x1100a000 0 0x100>;
+		interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+	};
+
+	spi1: spi@1100b000 {
+		compatible = "mediatek,ipm-spi";
+		reg = <0 0x1100b000 0 0x100>;
+		interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+	};
+
+	consys: consys@10000000 {
+		compatible = "mediatek,mt7986-consys";
+		reg = <0 0x10000000 0 0x8600000>;
+		memory-region = <&wmcpu_emi>;
+	};
+
+	xhci: xhci@11200000 {
+		compatible = "mediatek,mt7986-xhci",
+			     "mediatek,mtk-xhci";
+		reg = <0 0x11200000 0 0x2e00>,
+		      <0 0x11203e00 0 0x0100>;
+		reg-names = "mac", "ippc";
+		interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+		phys = <&u2port0 PHY_TYPE_USB2>;
+		clocks = <&system_clk>,
+		         <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>;
+		clock-names = "sys_ck",
+			      "xhci_ck",
+			      "ref_ck",
+			      "mcu_ck",
+			      "dma_ck";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		mediatek,u3p-dis-msk=<0x01>;
+		status = "okay";
+	};
+
+	usbtphy: usb-phy@11203e00 {
+		compatible = "mediatek,a60810-u2phy",
+			     "mediatek,a60931-u3phy",
+			     "mediatek,a60xxx-usbphy";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		status = "okay";
+
+		u2port0: usb-phy@11203ed0 {
+			reg = <0 0x11203ed0 0 0x008>;
+			clocks = <&system_clk>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "okay";
+		};
+
+		u3port0: usb-phy@11203ed8 {
+			reg = <0 0x11203ed8 0 0x008>;
+			clocks = <&system_clk>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "disabled";
+		};
+
+		u2port1: usb-phy@11203ee0 {
+			reg = <0 0x11203ee0 0 0x008>;
+			clocks = <&system_clk>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-mt7975-ax6000-rfb1.dts b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-mt7975-ax6000-rfb1.dts
new file mode 100644
index 0000000..58d11b6
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-mt7975-ax6000-rfb1.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+#include "mt7986a-rfb.dtsi"
+/ {
+	model = "MediaTek MT7986a RFB";
+	compatible = "mediatek,mt7986a-rfb";
+
+	wsys_adie: wsys_adie@0 {
+		compatible = "mediatek,rebb-mt7986-adie";
+		adie_id = <7975>;
+		sku_type = <6000>;
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-mt7976-ax6000-rfb2.dts b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-mt7976-ax6000-rfb2.dts
new file mode 100644
index 0000000..60d2372
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-mt7976-ax6000-rfb2.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+#include "mt7986a-rfb.dtsi"
+/ {
+	model = "MediaTek MT7986a RFB";
+	compatible = "mediatek,mt7986a-rfb";
+
+	wsys_adie: wsys_adie@0 {
+		compatible = "mediatek,rebb-mt7986-adie";
+		adie_id = <7976>;
+		sku_type = <6000>;
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-mt7976-ax7800-rfb2.dts b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-mt7976-ax7800-rfb2.dts
new file mode 100644
index 0000000..5790653
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-mt7976-ax7800-rfb2.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+#include "mt7986a-rfb.dtsi"
+/ {
+	model = "MediaTek MT7986a RFB";
+	compatible = "mediatek,mt7986a-rfb";
+
+	wsys_adie: wsys_adie@0 {
+		compatible = "mediatek,rebb-mt7986-adie";
+		adie_id = <7976>;
+		sku_type = <7800>;
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dtsi b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dtsi
new file mode 100644
index 0000000..6013acb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dtsi
@@ -0,0 +1,248 @@
+/dts-v1/;
+#include "mt7986a.dtsi"
+/ {
+	model = "MediaTek MT7986a RFB";
+	compatible = "mediatek,mt7986a-rfb";
+	chosen {
+		bootargs = "console=ttyS0,115200n1 loglevel=8  \
+				earlycon=uart8250,mmio32,0x11002000";
+	};
+
+	memory {
+		// fpga ddr2: 128MB*2
+		reg = <0 0x40000000 0 0x10000000>;
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&watchdog {
+	status = "okay";
+};
+
+&eth {
+        status = "okay";
+
+        gmac0: mac@0 {
+                compatible = "mediatek,eth-mac";
+                reg = <0>;
+                phy-mode = "2500base-x";
+
+                fixed-link {
+                        speed = <2500>;
+                        full-duplex;
+                        pause;
+                };
+        };
+
+        gmac1: mac@1 {
+                compatible = "mediatek,eth-mac";
+                reg = <1>;
+                phy-mode = "2500base-x";
+
+                fixed-link {
+                        speed = <2500>;
+                        full-duplex;
+                        pause;
+                };
+        };
+
+        mdio: mdio-bus {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                switch@0 {
+                        compatible = "mediatek,mt7531";
+                        reg = <0>;
+                        reset-gpios = <&pio 5 0>;
+
+                        ports {
+                                #address-cells = <1>;
+                                #size-cells = <0>;
+
+                                port@0 {
+                                        reg = <0>;
+                                        label = "lan1";
+                                };
+
+                                port@1 {
+                                        reg = <1>;
+                                        label = "lan2";
+                                };
+
+                                port@2 {
+                                        reg = <2>;
+                                        label = "lan3";
+                                };
+
+                                port@3 {
+                                        reg = <3>;
+                                        label = "lan4";
+                                };
+
+                                port@4 {
+                                        reg = <4>;
+                                        label = "wan";
+                                };
+
+                                port@6 {
+                                        reg = <6>;
+                                        label = "cpu";
+                                        ethernet = <&gmac0>;
+                                        phy-mode = "2500base-x";
+
+                                        fixed-link {
+                                                speed = <2500>;
+                                                full-duplex;
+                                                pause;
+                                        };
+                                };
+                        };
+                };
+        };
+};
+
+&hnat {
+	mtketh-wan = "eth1";
+	mtketh-max-gmac = <2>;
+	status = "okay";
+};
+
+&spi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_flash_pins>;
+	cs-gpios = <0>, <0>;		
+	status = "okay";
+	spi_nor@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <20000000>;
+		spi-tx-buswidth = <4>;
+		spi-rx-buswidth = <4>;
+
+		partition@00000 {
+			label = "BL2";
+			reg = <0x00000 0x0040000>;
+		};
+		partition@40000 {
+			label = "u-boot-env";
+			reg = <0x40000 0x0010000>;
+		};
+		partition@50000 {
+			label = "Factory";
+			reg = <0x50000 0x00B0000>;
+		};
+		partition@100000 {
+			label = "FIP";
+			reg = <0x100000 0x0080000>;
+		};
+		partition@180000 {
+			label = "firmware";
+			reg = <0x180000 0xE00000>;
+		};
+	};
+	spi_nand@1 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "spi-nand";
+		reg = <1>;
+		spi-max-frequency = <20000000>;
+		spi-tx-buswidth = <4>;
+		spi-rx-buswidth = <4>;
+
+		partition@00000 {
+			label = "BL2";
+			reg = <0x00000 0x0100000>;
+		};
+		partition@100000 {
+			label = "u-boot-env";
+			reg = <0x100000 0x0080000>;
+		};
+		partition@180000 {
+			label = "Factory";
+			reg = <0x180000 0x00200000>;
+		};
+		partition@380000 {
+			label = "FIP";
+			reg = <0x380000 0x0200000>;
+		};
+		partition@580000 {
+			label = "ubi";
+			reg = <0x580000 0x4000000>;
+		};
+	};
+};
+
+&snand {
+	pinctrl-names = "default";
+	/* pin shared with spic */
+	pinctrl-0 = <&snfi_pins>;
+	status = "okay";
+	mediatek,quad-spi;
+
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "BL2";
+			reg = <0x00000 0x0100000>;
+			read-only;
+		};
+
+		partition@100000 {
+			label = "u-boot-env";
+			reg = <0x0100000 0x0080000>;
+		};
+
+		partition@180000 {
+			label = "Factory";
+			reg = <0x180000 0x0200000>;
+		};
+
+		partition@380000 {
+			label = "FIP";
+			reg = <0x380000 0x0200000>;
+		};
+
+		partition@580000 {
+			label = "ubi";
+			reg = <0x580000 0x4000000>;
+		};
+	};
+};
+
+&spi1 {
+	pinctrl-names = "default";
+	/* pin shared with snfi */
+	pinctrl-0 = <&spic_pins>;
+	status = "okay";
+};
+
+&pio {
+	spi_flash_pins: spi0-pins {
+		mux {
+			function = "flash";
+			groups = "spi0", "spi0_wp_hold";
+		};
+	};
+
+	snfi_pins: snfi-pins {
+		mux {
+			function = "flash";
+			groups = "snfi";
+		};
+	};
+
+	spic_pins: spi1-pins {
+		mux {
+			function = "spi";
+			groups = "spi1_1";
+		};
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
new file mode 100644
index 0000000..b0891ad
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Sam.Shih <sam.shih@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/reset/ti-syscon.h>
+/ {
+	compatible = "mediatek,mt7986a-rfb";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x0>;
+		};
+
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x1>;
+		};
+
+		cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x2>;
+		};
+
+		cpu@3 {
+			device_type = "cpu";
+			enable-method = "psci";
+			compatible = "arm,cortex-a53";
+			reg = <0x3>;
+		};
+	};
+
+	wed: wed@15010000 {
+		compatible = "mediatek,wed";
+		wed_num = <2>;
+		/* add this property for wed get the pci slot number. */
+		pci_slot_map = <0>, <1>;
+		reg = <0 0x15010000 0 0x1000>,
+		      <0 0x15011000 0 0x1000>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	wed2: wed2@15011000 {
+		compatible = "mediatek,wed2";
+		wed_num = <2>;
+		reg = <0 0x15010000 0 0x1000>,
+		      <0 0x15011000 0 0x1000>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	wdma: wdma@15104800 {
+		compatible = "mediatek,wed-wdma";
+		reg = <0 0x15104800 0 0x400>,
+		      <0 0x15104c00 0 0x400>;
+	};
+
+	ap2woccif: ap2woccif@151A5000 {
+		compatible = "mediatek,ap2woccif";
+		reg = <0 0x151A5000 0 0x1000>,
+		      <0 0x151AD000 0 0x1000>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+        };
+
+	wocpu0_ilm: wocpu0_ilm@151E0000 {
+		compatible = "mediatek,wocpu0_ilm";
+		reg = <0 0x151E0000 0 0x8000>;
+	};
+
+        wocpu1_ilm: wocpu1_ilm@151F0000 {
+                compatible = "mediatek,wocpu1_ilm";
+                reg = <0 0x151F0000 0 0x8000>;
+        };
+
+	wocpu_dlm: wocpu_dlm@151E8000 {
+		compatible = "mediatek,wocpu_dlm";
+		reg = <0 0x151E8000 0 0x2000>,
+		      <0 0x151F8000 0 0x2000>;
+
+		resets = <&ethsysrst 0>;
+		reset-names = "wocpu_rst";
+	};
+
+	cpu_boot: wocpu_boot@15194000 {
+		compatible = "mediatek,wocpu_boot";
+		reg = <0 0x15194000 0 0x1000>;
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/* 192 KiB reserved for ARM Trusted Firmware (BL31) */
+		secmon_reserved: secmon@43000000 {
+			reg = <0 0x43000000 0 0x30000>;
+			no-map;
+		};
+
+		wmcpu_emi: wmcpu-reserved@4FC00000 {
+			compatible = "mediatek,wmcpu-reserved";
+			no-map;
+			reg = <0 0x4FC00000 0 0x00100000>;
+		};
+
+		wocpu0_emi: wocpu0_emi@4FD00000 {
+			compatible = "mediatek,wocpu0_emi";
+			no-map;
+			reg = <0 0x4FD00000 0 0x40000>;
+			shared = <0>;
+		};
+
+		wocpu1_emi: wocpu1_emi@4FD80000 {
+			compatible = "mediatek,wocpu1_emi";
+			no-map;
+			reg = <0 0x4FD40000 0 0x40000>;
+			shared = <0>;
+		};
+
+		wocpu_data: wocpu_data@4FE00000 {
+			compatible = "mediatek,wocpu_data";
+			no-map;
+			reg = <0 0x4FD80000 0 0x200000>;
+			shared = <1>;
+		};
+	};
+
+	psci {
+		compatible  = "arm,psci-0.2";
+		method      = "smc";
+	};
+
+	system_clk: dummy_system_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <40000000>;
+		#clock-cells = <0>;
+	};
+
+	spi0_clk: dummy_spi0_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <208000000>;
+		#clock-cells = <0>;
+	};
+
+	spi1_clk: dummy_spi1_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <40000000>;
+		#clock-cells = <0>;
+	};
+
+	uart_clk: dummy_uart_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <40000000>;
+		#clock-cells = <0>;
+	};
+
+	gpt_clk: dummy_gpt_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <20000000>;
+		#clock-cells = <0>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&gic>;
+		clock-frequency = <40000000>;
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+
+	};
+
+	watchdog: watchdog@1001c000 {
+		compatible = "mediatek,mt7622-wdt",
+			     "mediatek,mt6589-wdt";
+		reg = <0 0x1001c000 0 0x1000>;
+		interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+		#reset-cells = <1>;
+	};
+
+	gic: interrupt-controller@c000000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		interrupt-parent = <&gic>;
+		interrupt-controller;
+		reg = <0 0x0c000000 0 0x40000>,  /* GICD */
+		      <0 0x0c080000 0 0x200000>; /* GICR */
+
+		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	uart0: serial@11002000 {
+		compatible = "mediatek,mt7986-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11002000 0 0x400>;
+		interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart1: serial@11003000 {
+		compatible = "mediatek,mt7986-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11003000 0 0x400>;
+		interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart2: serial@11004000 {
+		compatible = "mediatek,mt7986-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11004000 0 0x400>;
+		interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	pcie: pcie@11280000 {
+		compatible = "mediatek,mt7986-pcie";
+		device_type = "pci";
+		reg = <0 0x11280000 0 0x5000>;
+		reg-names = "port0";
+		#address-cells = <3>;
+		#size-cells = <2>;
+		interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+		bus-range = <0x00 0xff>;
+		ranges = <0x82000000 0 0x20000000
+			  0x0 0x20000000 0 0x10000000>;
+
+		pcie0: pcie@0,0 {
+			device_type = "pci";
+			reg = <0x0000 0 0 0 0>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+					<0 0 0 2 &pcie_intc0 1>,
+					<0 0 0 3 &pcie_intc0 2>,
+					<0 0 0 4 &pcie_intc0 3>;
+			pcie_intc0: interrupt-controller {
+				interrupt-controller;
+				#address-cells = <0>;
+				#interrupt-cells = <1>;
+			};
+		};
+	};
+
+	pio: pinctrl@1001f000 {
+		compatible = "mediatek,mt7986-pinctrl";
+		reg = <0 0x1001f000 0 0x1000>,
+		      <0 0x11c30000 0 0x1000>,
+		      <0 0x11c40000 0 0x1000>,
+		      <0 0x11e20000 0 0x1000>,
+		      <0 0x11e30000 0 0x1000>,
+		      <0 0x11f00000 0 0x1000>,
+		      <0 0x11f10000 0 0x1000>,
+		      <0 0x1000b000 0 0x1000>;
+		reg-names = "gpio_base", "iocfg_rt_base", "iocfg_rb_base",
+			    "iocfg_lt_base", "iocfg_lb_base", "iocfg_tr_base",
+			    "iocfg_tl_base", "eint";
+		gpio-controller;
+		#gpio-cells = <2>;
+		gpio-ranges = <&pio 0 0 100>;
+		interrupt-controller;
+		interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-parent = <&gic>;
+		#interrupt-cells = <2>;
+	};
+
+	ethsys: syscon@15000000 {
+                #address-cells = <1>;
+                #size-cells = <1>;
+                compatible = "mediatek,mt7986-ethsys",
+                             "syscon";
+                reg = <0 0x15000000 0 0x1000>;
+                #clock-cells = <1>;
+                #reset-cells = <1>;
+
+		ethsysrst: reset-controller {
+			compatible = "ti,syscon-reset";
+			#reset-cells = <1>;
+			ti,reset-bits = <0x34 4 0x34 4 0x34 4 (ASSERT_SET | DEASSERT_CLEAR | STATUS_SET)>;
+		};
+        };
+
+        eth: ethernet@15100000 {
+                compatible = "mediatek,mt7986-eth";
+                reg = <0 0x15100000 0 0x80000>;
+                interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
+                clocks = <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>;
+                clock-names = "fe", "gp2", "gp1", "wocpu1", "wocpu0",
+                         "sgmii_tx250m", "sgmii_rx250m",
+                         "sgmii_cdr_ref", "sgmii_cdr_fb",
+                         "sgmii2_tx250m", "sgmii2_rx250m",
+                         "sgmii2_cdr_ref", "sgmii2_cdr_fb";
+                mediatek,ethsys = <&ethsys>;
+		mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+                #reset-cells = <1>;
+                #address-cells = <1>;
+                #size-cells = <0>;
+                status = "disabled";
+        };
+
+	hnat: hnat@15000000 {
+		compatible = "mediatek,mtk-hnat_v4";
+		reg = <0 0x15100000 0 0x80000>;
+		resets = <&ethsys 0>;
+		reset-names = "mtketh";
+		status = "disabled";
+	};
+
+	sgmiisys0: syscon@10060000 {
+		compatible = "mediatek,mt7986-sgmiisys", "syscon";
+		reg = <0 0x10060000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	sgmiisys1: syscon@10070000 {
+		compatible = "mediatek,mt7986-sgmiisys", "syscon";
+		reg = <0 0x10070000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	snand: snfi@11005000 {
+		compatible = "mediatek,mt7986-snand";
+		reg = <0 0x11005000 0 0x1000>, <0 0x11006000 0 0x1000>;
+		reg-names = "nfi", "ecc";
+		interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>;
+		clock-names = "nfi_clk", "pad_clk", "ecc_clk";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	wbsys: wbsys@18000000 {
+		compatible = "mediatek,wbsys";
+		reg = <0 0x18000000 0  0x1000000>;
+		interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+					 <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+					 <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+					 <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
+		chip_id = <0x7986>;
+	};
+
+	wed_pcie: wed_pcie@10003000 {
+		compatible = "mediatek,wed_pcie";
+		reg = <0 0x10003000 0 0x10>;
+	};
+
+	spi0: spi@1100a000 {
+		compatible = "mediatek,ipm-spi";
+		reg = <0 0x1100a000 0 0x100>;
+		interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&spi0_clk>,
+			 <&spi0_clk>,
+			 <&spi0_clk>;
+		clock-names = "parent-clk", "sel-clk", "spi-clk";
+		status = "disabled";
+	};
+
+	spi1: spi@1100b000 {
+		compatible = "mediatek,ipm-spi";
+		reg = <0 0x1100b000 0 0x100>;
+		interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&spi1_clk>,
+			 <&spi1_clk>,
+			 <&spi1_clk>;
+		clock-names = "parent-clk", "sel-clk", "spi-clk";
+		status = "disabled";
+	};
+
+	consys: consys@10000000 {
+		compatible = "mediatek,mt7986-consys";
+		reg = <0 0x10000000 0 0x8600000>;
+		memory-region = <&wmcpu_emi>;
+	};
+
+	xhci: xhci@11200000 {
+		compatible = "mediatek,mt7986-xhci",
+			     "mediatek,mtk-xhci";
+		reg = <0 0x11200000 0 0x2e00>,
+		      <0 0x11203e00 0 0x0100>;
+		reg-names = "mac", "ippc";
+		interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+		phys = <&u2port0 PHY_TYPE_USB2>,
+		       <&u3port0 PHY_TYPE_USB3>,
+		       <&u2port1 PHY_TYPE_USB2>;
+		clocks = <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>;
+		clock-names = "sys_ck",
+			      "xhci_ck",
+			      "ref_ck",
+			      "mcu_ck",
+			      "dma_ck";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		status = "okay";
+	};
+
+	usbtphy: usb-phy@11e10000 {
+		compatible = "mediatek,mt7986",
+			     "mediatek,generic-tphy-v2";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		status = "okay";
+
+		u2port0: usb-phy@11e10000 {
+			reg = <0 0x11e10000 0 0x700>;
+			clocks = <&system_clk>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "okay";
+		};
+
+		u3port0: usb-phy@11e10700 {
+			reg = <0 0x11e10700 0 0x900>;
+			clocks = <&system_clk>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "okay";
+		};
+
+		u2port1: usb-phy@11e11000 {
+			reg = <0 0x11e11000 0 0x700>;
+			clocks = <&system_clk>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "okay";
+		};
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986b-mt7975-ax6000-rfb1.dts b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986b-mt7975-ax6000-rfb1.dts
new file mode 100644
index 0000000..87e54d2
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986b-mt7975-ax6000-rfb1.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+#include "mt7986b-rfb.dtsi"
+/ {
+	model = "MediaTek MT7986b RFB";
+	compatible = "mediatek,mt7986b-rfb";
+
+	wsys_adie: wsys_adie@0 {
+		compatible = "mediatek,rebb-mt7986-adie";
+		adie_id = <7975>;
+		sku_type = <6000>;
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dtsi b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dtsi
new file mode 100644
index 0000000..12cd596
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dtsi
@@ -0,0 +1,248 @@
+/dts-v1/;
+#include "mt7986b.dtsi"
+/ {
+	model = "MediaTek MT7986b RFB";
+	compatible = "mediatek,mt7986b-rfb";
+	chosen {
+		bootargs = "console=ttyS0,115200n1 loglevel=8  \
+				earlycon=uart8250,mmio32,0x11002000";
+	};
+
+	memory {
+		// fpga ddr2: 128MB*2
+		reg = <0 0x40000000 0 0x10000000>;
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&watchdog {
+	status = "okay";
+};
+
+&eth {
+        status = "okay";
+
+        gmac0: mac@0 {
+                compatible = "mediatek,eth-mac";
+                reg = <0>;
+                phy-mode = "2500base-x";
+
+                fixed-link {
+                        speed = <2500>;
+                        full-duplex;
+                        pause;
+                };
+        };
+
+        gmac1: mac@1 {
+                compatible = "mediatek,eth-mac";
+                reg = <1>;
+                phy-mode = "2500base-x";
+
+                fixed-link {
+                        speed = <2500>;
+                        full-duplex;
+                        pause;
+                };
+        };
+
+        mdio: mdio-bus {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                switch@0 {
+                        compatible = "mediatek,mt7531";
+                        reg = <0>;
+                        reset-gpios = <&pio 5 0>;
+
+                        ports {
+                                #address-cells = <1>;
+                                #size-cells = <0>;
+
+                                port@0 {
+                                        reg = <0>;
+                                        label = "lan1";
+                                };
+
+                                port@1 {
+                                        reg = <1>;
+                                        label = "lan2";
+                                };
+
+                                port@2 {
+                                        reg = <2>;
+                                        label = "lan3";
+                                };
+
+                                port@3 {
+                                        reg = <3>;
+                                        label = "lan4";
+                                };
+
+                                port@4 {
+                                        reg = <4>;
+                                        label = "wan";
+                                };
+
+                                port@6 {
+                                        reg = <6>;
+                                        label = "cpu";
+                                        ethernet = <&gmac0>;
+                                        phy-mode = "2500base-x";
+
+                                        fixed-link {
+                                                speed = <2500>;
+                                                full-duplex;
+                                                pause;
+                                        };
+                                };
+                        };
+                };
+        };
+};
+
+&hnat {
+	mtketh-wan = "eth1";
+	mtketh-max-gmac = <2>;
+	status = "okay";
+};
+
+&spi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_flash_pins>;
+	cs-gpios = <0>, <0>;
+	status = "okay";
+	spi_nor@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <20000000>;
+		spi-tx-buswidth = <4>;
+		spi-rx-buswidth = <4>;
+
+		partition@00000 {
+			label = "BL2";
+			reg = <0x00000 0x0040000>;
+		};
+		partition@40000 {
+			label = "u-boot-env";
+			reg = <0x40000 0x0010000>;
+		};
+		partition@50000 {
+			label = "Factory";
+			reg = <0x50000 0x00B0000>;
+		};
+		partition@100000 {
+			label = "FIP";
+			reg = <0x100000 0x0080000>;
+		};
+		partition@180000 {
+			label = "firmware";
+			reg = <0x180000 0xE00000>;
+		};
+	};
+	spi_nand@1 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "spi-nand";
+		reg = <1>;
+		spi-max-frequency = <20000000>;
+		spi-tx-buswidth = <4>;
+		spi-rx-buswidth = <4>;
+
+		partition@00000 {
+			label = "BL2";
+			reg = <0x00000 0x0100000>;
+		};
+		partition@100000 {
+			label = "u-boot-env";
+			reg = <0x100000 0x0080000>;
+		};
+		partition@180000 {
+			label = "Factory";
+			reg = <0x180000 0x00200000>;
+		};
+		partition@380000 {
+			label = "FIP";
+			reg = <0x380000 0x0200000>;
+		};
+		partition@580000 {
+			label = "ubi";
+			reg = <0x580000 0x4000000>;
+		};
+	};
+};
+
+&snand {
+	pinctrl-names = "default";
+	/* pin shared with spic */
+	pinctrl-0 = <&snfi_pins>;
+	status = "okay";
+	mediatek,quad-spi;
+
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "BL2";
+			reg = <0x00000 0x0100000>;
+			read-only;
+		};
+
+		partition@100000 {
+			label = "u-boot-env";
+			reg = <0x0100000 0x0080000>;
+		};
+
+		partition@180000 {
+			label = "Factory";
+			reg = <0x180000 0x0200000>;
+		};
+
+		partition@380000 {
+			label = "FIP";
+			reg = <0x380000 0x0200000>;
+		};
+
+		partition@580000 {
+			label = "ubi";
+			reg = <0x580000 0x4000000>;
+		};
+	};
+};
+
+&spi1 {
+	pinctrl-names = "default";
+	/* pin shared with snfi */
+	pinctrl-0 = <&spic_pins>;
+	status = "okay";
+};
+
+&pio {
+	spi_flash_pins: spi0-pins {
+		mux {
+			function = "flash";
+			groups = "spi0", "spi0_wp_hold";
+		};
+	};
+
+	snfi_pins: snfi-pins {
+		mux {
+			function = "flash";
+			groups = "snfi";
+		};
+	};
+
+	spic_pins: spi1-pins {
+		mux {
+			function = "spi";
+			groups = "spi1_1";
+		};
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986b.dtsi b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
new file mode 100644
index 0000000..544c028
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7986b.dtsi
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Sam.Shih <sam.shih@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/reset/ti-syscon.h>
+/ {
+	compatible = "mediatek,mt7986b-rfb";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x0>;
+		};
+
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x1>;
+		};
+
+		cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x2>;
+		};
+
+		cpu@3 {
+			device_type = "cpu";
+			enable-method = "psci";
+			compatible = "arm,cortex-a53";
+			reg = <0x3>;
+		};
+	};
+
+	wed: wed@15010000 {
+		compatible = "mediatek,wed";
+		wed_num = <2>;
+		/* add this property for wed get the pci slot number. */
+		pci_slot_map = <0>, <1>;
+		reg = <0 0x15010000 0 0x1000>,
+		      <0 0x15011000 0 0x1000>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	wed2: wed2@15011000 {
+		compatible = "mediatek,wed2";
+		wed_num = <2>;
+		reg = <0 0x15010000 0 0x1000>,
+		      <0 0x15011000 0 0x1000>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	wdma: wdma@15104800 {
+		compatible = "mediatek,wed-wdma";
+		reg = <0 0x15104800 0 0x400>,
+		      <0 0x15104c00 0 0x400>;
+	};
+
+	ap2woccif: ap2woccif@151A5000 {
+		compatible = "mediatek,ap2woccif";
+		reg = <0 0x151A5000 0 0x1000>,
+		      <0 0x151AD000 0 0x1000>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+        };
+
+	wocpu0_ilm: wocpu0_ilm@151E0000 {
+		compatible = "mediatek,wocpu0_ilm";
+		reg = <0 0x151E0000 0 0x8000>;
+	};
+
+        wocpu1_ilm: wocpu1_ilm@151F0000 {
+                compatible = "mediatek,wocpu1_ilm";
+                reg = <0 0x151F0000 0 0x8000>;
+        };
+
+	wocpu_dlm: wocpu_dlm@151E8000 {
+		compatible = "mediatek,wocpu_dlm";
+		reg = <0 0x151E8000 0 0x2000>,
+		      <0 0x151F8000 0 0x2000>;
+
+		resets = <&ethsysrst 0>;
+		reset-names = "wocpu_rst";
+	};
+
+	cpu_boot: wocpu_boot@15194000 {
+		compatible = "mediatek,wocpu_boot";
+		reg = <0 0x15194000 0 0x1000>;
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/* 192 KiB reserved for ARM Trusted Firmware (BL31) */
+		secmon_reserved: secmon@43000000 {
+			reg = <0 0x43000000 0 0x30000>;
+			no-map;
+		};
+
+		wmcpu_emi: wmcpu-reserved@4FC00000 {
+			compatible = "mediatek,wmcpu-reserved";
+			no-map;
+			reg = <0 0x4FC00000 0 0x00100000>;
+		};
+
+		wocpu0_emi: wocpu0_emi@4FD00000 {
+			compatible = "mediatek,wocpu0_emi";
+			no-map;
+			reg = <0 0x4FD00000 0 0x40000>;
+			shared = <0>;
+		};
+
+		wocpu1_emi: wocpu1_emi@4FD80000 {
+			compatible = "mediatek,wocpu1_emi";
+			no-map;
+			reg = <0 0x4FD40000 0 0x40000>;
+			shared = <0>;
+		};
+
+		wocpu_data: wocpu_data@4FE00000 {
+			compatible = "mediatek,wocpu_data";
+			no-map;
+			reg = <0 0x4FD80000 0 0x200000>;
+			shared = <1>;
+		};
+	};
+
+	psci {
+		compatible  = "arm,psci-0.2";
+		method      = "smc";
+	};
+
+	system_clk: dummy_system_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <40000000>;
+		#clock-cells = <0>;
+	};
+
+	spi0_clk: dummy_spi0_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <208000000>;
+		#clock-cells = <0>;
+	};
+
+	spi1_clk: dummy_spi1_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <40000000>;
+		#clock-cells = <0>;
+	};
+
+	uart_clk: dummy_uart_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <40000000>;
+		#clock-cells = <0>;
+	};
+
+	gpt_clk: dummy_gpt_clk {
+		compatible = "fixed-clock";
+		clock-frequency = <20000000>;
+		#clock-cells = <0>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&gic>;
+		clock-frequency = <40000000>;
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+
+	};
+
+	watchdog: watchdog@1001c000 {
+		compatible = "mediatek,mt7622-wdt",
+			     "mediatek,mt6589-wdt";
+		reg = <0 0x1001c000 0 0x1000>;
+		interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+		#reset-cells = <1>;
+	};
+
+	gic: interrupt-controller@c000000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		interrupt-parent = <&gic>;
+		interrupt-controller;
+		reg = <0 0x0c000000 0 0x40000>,  /* GICD */
+		      <0 0x0c080000 0 0x200000>; /* GICR */
+
+		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	uart0: serial@11002000 {
+		compatible = "mediatek,mt7986-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11002000 0 0x400>;
+		interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart1: serial@11003000 {
+		compatible = "mediatek,mt7986-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11003000 0 0x400>;
+		interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart2: serial@11004000 {
+		compatible = "mediatek,mt7986-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11004000 0 0x400>;
+		interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	pcie: pcie@11280000 {
+		compatible = "mediatek,mt7986-pcie";
+		device_type = "pci";
+		reg = <0 0x11280000 0 0x5000>;
+		reg-names = "port0";
+		#address-cells = <3>;
+		#size-cells = <2>;
+		interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+		bus-range = <0x00 0xff>;
+		ranges = <0x82000000 0 0x20000000
+			  0x0 0x20000000 0 0x10000000>;
+
+		pcie0: pcie@0,0 {
+			device_type = "pci";
+			reg = <0x0000 0 0 0 0>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+					<0 0 0 2 &pcie_intc0 1>,
+					<0 0 0 3 &pcie_intc0 2>,
+					<0 0 0 4 &pcie_intc0 3>;
+			pcie_intc0: interrupt-controller {
+				interrupt-controller;
+				#address-cells = <0>;
+				#interrupt-cells = <1>;
+			};
+		};
+	};
+
+	pio: pinctrl@1001f000 {
+		compatible = "mediatek,mt7986-pinctrl";
+		reg = <0 0x1001f000 0 0x1000>,
+		      <0 0x11c30000 0 0x1000>,
+		      <0 0x11c40000 0 0x1000>,
+		      <0 0x11e20000 0 0x1000>,
+		      <0 0x11e30000 0 0x1000>,
+		      <0 0x11f00000 0 0x1000>,
+		      <0 0x11f10000 0 0x1000>,
+		      <0 0x1000b000 0 0x1000>;
+		reg-names = "gpio_base", "iocfg_rt_base", "iocfg_rb_base",
+			    "iocfg_lt_base", "iocfg_lb_base", "iocfg_tr_base",
+			    "iocfg_tl_base", "eint";
+		gpio-controller;
+		#gpio-cells = <2>;
+		gpio-ranges = <&pio 0 0 100>;
+		interrupt-controller;
+		interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-parent = <&gic>;
+		#interrupt-cells = <2>;
+	};
+
+	ethsys: syscon@15000000 {
+                #address-cells = <1>;
+                #size-cells = <1>;
+                compatible = "mediatek,mt7986-ethsys",
+                             "syscon";
+                reg = <0 0x15000000 0 0x1000>;
+                #clock-cells = <1>;
+                #reset-cells = <1>;
+
+		ethsysrst: reset-controller {
+			compatible = "ti,syscon-reset";
+			#reset-cells = <1>;
+			ti,reset-bits = <0x34 4 0x34 4 0x34 4 (ASSERT_SET | DEASSERT_CLEAR | STATUS_SET)>;
+		};
+        };
+
+        eth: ethernet@15100000 {
+                compatible = "mediatek,mt7986-eth";
+                reg = <0 0x15100000 0 0x80000>;
+                interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
+                clocks = <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>,
+                         <&system_clk>;
+                clock-names = "fe", "gp2", "gp1", "wocpu1", "wocpu0",
+                         "sgmii_tx250m", "sgmii_rx250m",
+                         "sgmii_cdr_ref", "sgmii_cdr_fb",
+                         "sgmii2_tx250m", "sgmii2_rx250m",
+                         "sgmii2_cdr_ref", "sgmii2_cdr_fb";
+                mediatek,ethsys = <&ethsys>;
+		mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+                #reset-cells = <1>;
+                #address-cells = <1>;
+                #size-cells = <0>;
+                status = "disabled";
+        };
+
+	hnat: hnat@15000000 {
+		compatible = "mediatek,mtk-hnat_v4";
+		reg = <0 0x15100000 0 0x80000>;
+		resets = <&ethsys 0>;
+		reset-names = "mtketh";
+		status = "disabled";
+	};
+
+	sgmiisys0: syscon@10060000 {
+		compatible = "mediatek,mt7986-sgmiisys", "syscon";
+		reg = <0 0x10060000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	sgmiisys1: syscon@10070000 {
+		compatible = "mediatek,mt7986-sgmiisys", "syscon";
+		reg = <0 0x10070000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	snand: snfi@11005000 {
+		compatible = "mediatek,mt7986-snand";
+		reg = <0 0x11005000 0 0x1000>, <0 0x11006000 0 0x1000>;
+		reg-names = "nfi", "ecc";
+		interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>;
+		clock-names = "nfi_clk", "pad_clk", "ecc_clk";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	wbsys: wbsys@18000000 {
+		compatible = "mediatek,wbsys";
+		reg = <0 0x18000000 0  0x1000000>;
+		interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+					 <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+					 <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+					 <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
+		chip_id = <0x7986>;
+	};
+
+	wed_pcie: wed_pcie@10003000 {
+		compatible = "mediatek,wed_pcie";
+		reg = <0 0x10003000 0 0x10>;
+	};
+
+	spi0: spi@1100a000 {
+		compatible = "mediatek,ipm-spi";
+		reg = <0 0x1100a000 0 0x100>;
+		interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&spi0_clk>,
+			 <&spi0_clk>,
+			 <&spi0_clk>;
+		clock-names = "parent-clk", "sel-clk", "spi-clk";
+		status = "disabled";
+	};
+
+	spi1: spi@1100b000 {
+		compatible = "mediatek,ipm-spi";
+		reg = <0 0x1100b000 0 0x100>;
+		interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&spi1_clk>,
+			 <&spi1_clk>,
+			 <&spi1_clk>;
+		clock-names = "parent-clk", "sel-clk", "spi-clk";
+		status = "disabled";
+	};
+
+	consys: consys@10000000 {
+		compatible = "mediatek,mt7986-consys";
+		reg = <0 0x10000000 0 0x8600000>;
+		memory-region = <&wmcpu_emi>;
+	};
+
+	xhci: xhci@11200000 {
+		compatible = "mediatek,mt7986-xhci",
+			     "mediatek,mtk-xhci";
+		reg = <0 0x11200000 0 0x2e00>,
+		      <0 0x11203e00 0 0x0100>;
+		reg-names = "mac", "ippc";
+		interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+		phys = <&u2port0 PHY_TYPE_USB2>,
+		       <&u3port0 PHY_TYPE_USB3>,
+		       <&u2port1 PHY_TYPE_USB2>;
+		clocks = <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>,
+			 <&system_clk>;
+		clock-names = "sys_ck",
+			      "xhci_ck",
+			      "ref_ck",
+			      "mcu_ck",
+			      "dma_ck";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		status = "okay";
+	};
+
+	usbtphy: usb-phy@11e10000 {
+		compatible = "mediatek,mt7986",
+			     "mediatek,generic-tphy-v2";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		status = "okay";
+
+		u2port0: usb-phy@11e10000 {
+			reg = <0 0x11e10000 0 0x700>;
+			clocks = <&system_clk>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "okay";
+		};
+
+		u3port0: usb-phy@11e10700 {
+			reg = <0 0x11e10700 0 0x900>;
+			clocks = <&system_clk>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "okay";
+		};
+
+		u2port1: usb-phy@11e11000 {
+			reg = <0 0x11e11000 0 0x700>;
+			clocks = <&system_clk>;
+			clock-names = "ref";
+			#phy-cells = <1>;
+			status = "okay";
+		};
+	};
+};
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/Kconfig b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/Kconfig
new file mode 100644
index 0000000..138b939
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/Kconfig
@@ -0,0 +1,14 @@
+#
+# Copyright (C) 2020 MediaTek Inc. All rights reserved.
+# Author: Weijie Gao <weijie.gao@mediatek.com>
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+
+config MTK_SPI_NAND
+	tristate "MediaTek SPI NAND flash controller driver"
+	depends on MTD
+	default n
+	help
+	  This option enables access to SPI-NAND flashes through the
+	  MTD interface of MediaTek SPI NAND Flash Controller
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/Makefile b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/Makefile
new file mode 100644
index 0000000..a39f1ca
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/Makefile
@@ -0,0 +1,11 @@
+#
+# Copyright (C) 2020 MediaTek Inc. All rights reserved.
+# Author: Weijie Gao <weijie.gao@mediatek.com>
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+
+obj-y += mtk-snand.o mtk-snand-ecc.o mtk-snand-ids.o mtk-snand-os.o \
+	 mtk-snand-mtd.o
+
+ccflags-y += -DPRIVATE_MTK_SNAND_HEADER
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-def.h b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-def.h
new file mode 100644
index 0000000..95c4bb3
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-def.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#ifndef _MTK_SNAND_DEF_H_
+#define _MTK_SNAND_DEF_H_
+
+#include "mtk-snand-os.h"
+
+#ifdef PRIVATE_MTK_SNAND_HEADER
+#include "mtk-snand.h"
+#else
+#include <mtk-snand.h>
+#endif
+
+struct mtk_snand_plat_dev;
+
+enum snand_flash_io {
+	SNAND_IO_1_1_1,
+	SNAND_IO_1_1_2,
+	SNAND_IO_1_2_2,
+	SNAND_IO_1_1_4,
+	SNAND_IO_1_4_4,
+
+	__SNAND_IO_MAX
+};
+
+#define SPI_IO_1_1_1			BIT(SNAND_IO_1_1_1)
+#define SPI_IO_1_1_2			BIT(SNAND_IO_1_1_2)
+#define SPI_IO_1_2_2			BIT(SNAND_IO_1_2_2)
+#define SPI_IO_1_1_4			BIT(SNAND_IO_1_1_4)
+#define SPI_IO_1_4_4			BIT(SNAND_IO_1_4_4)
+
+struct snand_opcode {
+	uint8_t opcode;
+	uint8_t dummy;
+};
+
+struct snand_io_cap {
+	uint8_t caps;
+	struct snand_opcode opcodes[__SNAND_IO_MAX];
+};
+
+#define SNAND_OP(_io, _opcode, _dummy) [_io] = { .opcode = (_opcode), \
+						 .dummy = (_dummy) }
+
+#define SNAND_IO_CAP(_name, _caps, ...) \
+	struct snand_io_cap _name = { .caps = (_caps), \
+				      .opcodes = { __VA_ARGS__ } }
+
+#define SNAND_MAX_ID_LEN		4
+
+enum snand_id_type {
+	SNAND_ID_DYMMY,
+	SNAND_ID_ADDR = SNAND_ID_DYMMY,
+	SNAND_ID_DIRECT,
+
+	__SNAND_ID_TYPE_MAX
+};
+
+struct snand_id {
+	uint8_t type;	/* enum snand_id_type */
+	uint8_t len;
+	uint8_t id[SNAND_MAX_ID_LEN];
+};
+
+#define SNAND_ID(_type, ...) \
+	{ .type = (_type), .id = { __VA_ARGS__ }, \
+	  .len = sizeof((uint8_t[]) { __VA_ARGS__ }) }
+
+struct snand_mem_org {
+	uint16_t pagesize;
+	uint16_t sparesize;
+	uint16_t pages_per_block;
+	uint16_t blocks_per_die;
+	uint16_t planes_per_die;
+	uint16_t ndies;
+};
+
+#define SNAND_MEMORG(_ps, _ss, _ppb, _bpd, _ppd, _nd) \
+	{ .pagesize = (_ps), .sparesize = (_ss), .pages_per_block = (_ppb), \
+	  .blocks_per_die = (_bpd), .planes_per_die = (_ppd), .ndies = (_nd) }
+
+typedef int (*snand_select_die_t)(struct mtk_snand *snf, uint32_t dieidx);
+
+struct snand_flash_info {
+	const char *model;
+	struct snand_id id;
+	const struct snand_mem_org memorg;
+	const struct snand_io_cap *cap_rd;
+	const struct snand_io_cap *cap_pl;
+	snand_select_die_t select_die;
+};
+
+#define SNAND_INFO(_model, _id, _memorg, _cap_rd, _cap_pl, ...) \
+	{ .model = (_model), .id = _id, .memorg = _memorg, \
+	  .cap_rd = (_cap_rd), .cap_pl = (_cap_pl), __VA_ARGS__ }
+
+const struct snand_flash_info *snand_flash_id_lookup(enum snand_id_type type,
+						     const uint8_t *id);
+
+struct mtk_snand_soc_data {
+	uint16_t sector_size;
+	uint16_t max_sectors;
+	uint16_t fdm_size;
+	uint16_t fdm_ecc_size;
+	uint16_t fifo_size;
+
+	bool bbm_swap;
+	bool empty_page_check;
+	uint32_t mastersta_mask;
+
+	const uint8_t *spare_sizes;
+	uint32_t num_spare_size;
+};
+
+enum mtk_ecc_regs {
+	ECC_DECDONE,
+};
+
+struct mtk_ecc_soc_data {
+	const uint8_t *ecc_caps;
+	uint32_t num_ecc_cap;
+	const uint32_t *regs;
+	uint16_t mode_shift;
+	uint8_t errnum_bits;
+	uint8_t errnum_shift;
+};
+
+struct mtk_snand {
+	struct mtk_snand_plat_dev *pdev;
+
+	void __iomem *nfi_base;
+	void __iomem *ecc_base;
+
+	enum mtk_snand_soc soc;
+	const struct mtk_snand_soc_data *nfi_soc;
+	const struct mtk_ecc_soc_data *ecc_soc;
+	bool snfi_quad_spi;
+	bool quad_spi_op;
+
+	const char *model;
+	uint64_t size;
+	uint64_t die_size;
+	uint32_t erasesize;
+	uint32_t writesize;
+	uint32_t oobsize;
+
+	uint32_t num_dies;
+	snand_select_die_t select_die;
+
+	uint8_t opcode_rfc;
+	uint8_t opcode_pl;
+	uint8_t dummy_rfc;
+	uint8_t mode_rfc;
+	uint8_t mode_pl;
+
+	uint32_t writesize_mask;
+	uint32_t writesize_shift;
+	uint32_t erasesize_mask;
+	uint32_t erasesize_shift;
+	uint64_t die_mask;
+	uint32_t die_shift;
+
+	uint32_t spare_per_sector;
+	uint32_t raw_sector_size;
+	uint32_t ecc_strength;
+	uint32_t ecc_steps;
+	uint32_t ecc_bytes;
+	uint32_t ecc_parity_bits;
+
+	uint8_t *page_cache;	/* Used by read/write page */
+	uint8_t *buf_cache;	/* Used by block bad/markbad & auto_oob */
+};
+
+enum mtk_snand_log_category {
+	SNAND_LOG_NFI,
+	SNAND_LOG_SNFI,
+	SNAND_LOG_ECC,
+	SNAND_LOG_CHIP,
+
+	__SNAND_LOG_CAT_MAX
+};
+
+int mtk_ecc_setup(struct mtk_snand *snf, void *fmdaddr, uint32_t max_ecc_bytes,
+		  uint32_t msg_size);
+int mtk_snand_ecc_encoder_start(struct mtk_snand *snf);
+void mtk_snand_ecc_encoder_stop(struct mtk_snand *snf);
+int mtk_snand_ecc_decoder_start(struct mtk_snand *snf);
+void mtk_snand_ecc_decoder_stop(struct mtk_snand *snf);
+int mtk_ecc_wait_decoder_done(struct mtk_snand *snf);
+int mtk_ecc_check_decode_error(struct mtk_snand *snf, uint32_t page);
+
+int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
+		     uint8_t *in, uint32_t inlen);
+int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val);
+
+int mtk_snand_log(struct mtk_snand_plat_dev *pdev,
+		  enum mtk_snand_log_category cat, const char *fmt, ...);
+
+#define snand_log_nfi(pdev, fmt, ...) \
+	mtk_snand_log(pdev, SNAND_LOG_NFI, fmt, ##__VA_ARGS__)
+
+#define snand_log_snfi(pdev, fmt, ...) \
+	mtk_snand_log(pdev, SNAND_LOG_SNFI, fmt, ##__VA_ARGS__)
+
+#define snand_log_ecc(pdev, fmt, ...) \
+	mtk_snand_log(pdev, SNAND_LOG_ECC, fmt, ##__VA_ARGS__)
+
+#define snand_log_chip(pdev, fmt, ...) \
+	mtk_snand_log(pdev, SNAND_LOG_CHIP, fmt, ##__VA_ARGS__)
+
+/* ffs64 */
+static inline int mtk_snand_ffs64(uint64_t x)
+{
+	if (!x)
+		return 0;
+
+	if (!(x & 0xffffffff))
+		return ffs((uint32_t)(x >> 32)) + 32;
+
+	return ffs((uint32_t)(x & 0xffffffff));
+}
+
+/* NFI dummy commands */
+#define NFI_CMD_DUMMY_READ		0x00
+#define NFI_CMD_DUMMY_WRITE		0x80
+
+/* SPI-NAND opcodes */
+#define SNAND_CMD_RESET			0xff
+#define SNAND_CMD_BLOCK_ERASE		0xd8
+#define SNAND_CMD_READ_FROM_CACHE_QUAD	0xeb
+#define SNAND_CMD_WINBOND_SELECT_DIE	0xc2
+#define SNAND_CMD_READ_FROM_CACHE_DUAL	0xbb
+#define SNAND_CMD_READID		0x9f
+#define SNAND_CMD_READ_FROM_CACHE_X4	0x6b
+#define SNAND_CMD_READ_FROM_CACHE_X2	0x3b
+#define SNAND_CMD_PROGRAM_LOAD_X4	0x32
+#define SNAND_CMD_SET_FEATURE		0x1f
+#define SNAND_CMD_READ_TO_CACHE		0x13
+#define SNAND_CMD_PROGRAM_EXECUTE	0x10
+#define SNAND_CMD_GET_FEATURE		0x0f
+#define SNAND_CMD_READ_FROM_CACHE	0x0b
+#define SNAND_CMD_WRITE_ENABLE		0x06
+#define SNAND_CMD_PROGRAM_LOAD		0x02
+
+/* SPI-NAND feature addresses */
+#define SNAND_FEATURE_MICRON_DIE_ADDR	0xd0
+#define SNAND_MICRON_DIE_SEL_1		BIT(6)
+
+#define SNAND_FEATURE_STATUS_ADDR	0xc0
+#define SNAND_STATUS_OIP		BIT(0)
+#define SNAND_STATUS_WEL		BIT(1)
+#define SNAND_STATUS_ERASE_FAIL		BIT(2)
+#define SNAND_STATUS_PROGRAM_FAIL	BIT(3)
+
+#define SNAND_FEATURE_CONFIG_ADDR	0xb0
+#define SNAND_FEATURE_QUAD_ENABLE	BIT(0)
+#define SNAND_FEATURE_ECC_EN		BIT(4)
+
+#define SNAND_FEATURE_PROTECT_ADDR	0xa0
+
+#endif /* _MTK_SNAND_DEF_H_ */
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-ecc.c b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-ecc.c
new file mode 100644
index 0000000..57ba611
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-ecc.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include "mtk-snand-def.h"
+
+/* ECC registers */
+#define ECC_ENCCON			0x000
+#define ENC_EN				BIT(0)
+
+#define ECC_ENCCNFG			0x004
+#define ENC_MS_S			16
+#define ENC_BURST_EN			BIT(8)
+#define ENC_TNUM_S			0
+
+#define ECC_ENCIDLE			0x00c
+#define ENC_IDLE			BIT(0)
+
+#define ECC_DECCON			0x100
+#define DEC_EN				BIT(0)
+
+#define ECC_DECCNFG			0x104
+#define DEC_EMPTY_EN			BIT(31)
+#define DEC_CS_S			16
+#define DEC_CON_S			12
+#define   DEC_CON_CORRECT		3
+#define DEC_BURST_EN			BIT(8)
+#define DEC_TNUM_S			0
+
+#define ECC_DECIDLE			0x10c
+#define DEC_IDLE			BIT(0)
+
+#define ECC_DECENUM0			0x114
+#define ECC_DECENUM(n)			(ECC_DECENUM0 + (n) * 4)
+
+/* ECC_ENCIDLE & ECC_DECIDLE */
+#define ECC_IDLE			BIT(0)
+
+/* ENC_MODE & DEC_MODE */
+#define ECC_MODE_NFI			1
+
+#define ECC_TIMEOUT			500000
+
+static const uint8_t mt7622_ecc_caps[] = { 4, 6, 8, 10, 12 };
+
+static const uint8_t mt7986_ecc_caps[] = {
+	4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
+};
+
+static const uint32_t mt7622_ecc_regs[] = {
+	[ECC_DECDONE] = 0x11c,
+};
+
+static const uint32_t mt7986_ecc_regs[] = {
+	[ECC_DECDONE] = 0x124,
+};
+
+static const struct mtk_ecc_soc_data mtk_ecc_socs[__SNAND_SOC_MAX] = {
+	[SNAND_SOC_MT7622] = {
+		.ecc_caps = mt7622_ecc_caps,
+		.num_ecc_cap = ARRAY_SIZE(mt7622_ecc_caps),
+		.regs = mt7622_ecc_regs,
+		.mode_shift = 4,
+		.errnum_bits = 5,
+		.errnum_shift = 5,
+	},
+	[SNAND_SOC_MT7629] = {
+		.ecc_caps = mt7622_ecc_caps,
+		.num_ecc_cap = ARRAY_SIZE(mt7622_ecc_caps),
+		.regs = mt7622_ecc_regs,
+		.mode_shift = 4,
+		.errnum_bits = 5,
+		.errnum_shift = 5,
+	},
+	[SNAND_SOC_MT7986] = {
+		.ecc_caps = mt7986_ecc_caps,
+		.num_ecc_cap = ARRAY_SIZE(mt7986_ecc_caps),
+		.regs = mt7986_ecc_regs,
+		.mode_shift = 5,
+		.errnum_bits = 5,
+		.errnum_shift = 8,
+	},
+};
+
+static inline uint32_t ecc_read32(struct mtk_snand *snf, uint32_t reg)
+{
+	return readl(snf->ecc_base + reg);
+}
+
+static inline void ecc_write32(struct mtk_snand *snf, uint32_t reg,
+			       uint32_t val)
+{
+	writel(val, snf->ecc_base + reg);
+}
+
+static inline void ecc_write16(struct mtk_snand *snf, uint32_t reg,
+			       uint16_t val)
+{
+	writew(val, snf->ecc_base + reg);
+}
+
+static int mtk_ecc_poll(struct mtk_snand *snf, uint32_t reg, uint32_t bits)
+{
+	uint32_t val;
+
+	return read16_poll_timeout(snf->ecc_base + reg, val, (val & bits), 0,
+				   ECC_TIMEOUT);
+}
+
+static int mtk_ecc_wait_idle(struct mtk_snand *snf, uint32_t reg)
+{
+	int ret;
+
+	ret = mtk_ecc_poll(snf, reg, ECC_IDLE);
+	if (ret) {
+		snand_log_ecc(snf->pdev, "ECC engine is busy\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+int mtk_ecc_setup(struct mtk_snand *snf, void *fmdaddr, uint32_t max_ecc_bytes,
+		  uint32_t msg_size)
+{
+	uint32_t i, val, ecc_msg_bits, ecc_strength;
+	int ret;
+
+	snf->ecc_soc = &mtk_ecc_socs[snf->soc];
+
+	snf->ecc_parity_bits = fls(1 + 8 * msg_size);
+	ecc_strength = max_ecc_bytes * 8 / snf->ecc_parity_bits;
+
+	for (i = snf->ecc_soc->num_ecc_cap - 1; i >= 0; i--) {
+		if (snf->ecc_soc->ecc_caps[i] <= ecc_strength)
+			break;
+	}
+
+	if (unlikely(i < 0)) {
+		snand_log_ecc(snf->pdev, "Page size %u+%u is not supported\n",
+			      snf->writesize, snf->oobsize);
+		return -ENOTSUPP;
+	}
+
+	snf->ecc_strength = snf->ecc_soc->ecc_caps[i];
+	snf->ecc_bytes = DIV_ROUND_UP(snf->ecc_strength * snf->ecc_parity_bits,
+				      8);
+
+	/* Encoder config */
+	ecc_write16(snf, ECC_ENCCON, 0);
+	ret = mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
+	if (ret)
+		return ret;
+
+	ecc_msg_bits = msg_size * 8;
+	val = (ecc_msg_bits << ENC_MS_S) |
+	      (ECC_MODE_NFI << snf->ecc_soc->mode_shift) | i;
+	ecc_write32(snf, ECC_ENCCNFG, val);
+
+	/* Decoder config */
+	ecc_write16(snf, ECC_DECCON, 0);
+	ret = mtk_ecc_wait_idle(snf, ECC_DECIDLE);
+	if (ret)
+		return ret;
+
+	ecc_msg_bits += snf->ecc_strength * snf->ecc_parity_bits;
+	val = DEC_EMPTY_EN | (ecc_msg_bits << DEC_CS_S) |
+	      (DEC_CON_CORRECT << DEC_CON_S) |
+	      (ECC_MODE_NFI << snf->ecc_soc->mode_shift) | i;
+	ecc_write32(snf, ECC_DECCNFG, val);
+
+	return 0;
+}
+
+int mtk_snand_ecc_encoder_start(struct mtk_snand *snf)
+{
+	int ret;
+
+	ret = mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
+	if (ret) {
+		ecc_write16(snf, ECC_ENCCON, 0);
+		mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
+	}
+
+	ecc_write16(snf, ECC_ENCCON, ENC_EN);
+
+	return 0;
+}
+
+void mtk_snand_ecc_encoder_stop(struct mtk_snand *snf)
+{
+	mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
+	ecc_write16(snf, ECC_ENCCON, 0);
+}
+
+int mtk_snand_ecc_decoder_start(struct mtk_snand *snf)
+{
+	int ret;
+
+	ret = mtk_ecc_wait_idle(snf, ECC_DECIDLE);
+	if (ret) {
+		ecc_write16(snf, ECC_DECCON, 0);
+		mtk_ecc_wait_idle(snf, ECC_DECIDLE);
+	}
+
+	ecc_write16(snf, ECC_DECCON, DEC_EN);
+
+	return 0;
+}
+
+void mtk_snand_ecc_decoder_stop(struct mtk_snand *snf)
+{
+	mtk_ecc_wait_idle(snf, ECC_DECIDLE);
+	ecc_write16(snf, ECC_DECCON, 0);
+}
+
+int mtk_ecc_wait_decoder_done(struct mtk_snand *snf)
+{
+	uint16_t val, step_mask = (1 << snf->ecc_steps) - 1;
+	uint32_t reg = snf->ecc_soc->regs[ECC_DECDONE];
+	int ret;
+
+	ret = read16_poll_timeout(snf->ecc_base + reg, val,
+				  (val & step_mask) == step_mask, 0,
+				  ECC_TIMEOUT);
+	if (ret)
+		snand_log_ecc(snf->pdev, "ECC decoder is busy\n");
+
+	return ret;
+}
+
+int mtk_ecc_check_decode_error(struct mtk_snand *snf, uint32_t page)
+{
+	uint32_t i, regi, fi, errnum;
+	uint32_t errnum_shift = snf->ecc_soc->errnum_shift;
+	uint32_t errnum_mask = (1 << snf->ecc_soc->errnum_bits) - 1;
+	int ret = 0;
+
+	for (i = 0; i < snf->ecc_steps; i++) {
+		regi = i / 4;
+		fi = i % 4;
+
+		errnum = ecc_read32(snf, ECC_DECENUM(regi));
+		errnum = (errnum >> (fi * errnum_shift)) & errnum_mask;
+		if (!errnum)
+			continue;
+
+		if (errnum <= snf->ecc_strength) {
+			if (ret >= 0)
+				ret += errnum;
+			continue;
+		}
+
+		snand_log_ecc(snf->pdev,
+			      "Uncorrectable bitflips in page %u sect %u\n",
+			      page, i);
+		ret = -EBADMSG;
+	}
+
+	return ret;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-ids.c b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-ids.c
new file mode 100644
index 0000000..1756ff7
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-ids.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include "mtk-snand-def.h"
+
+static int mtk_snand_winbond_select_die(struct mtk_snand *snf, uint32_t dieidx);
+static int mtk_snand_micron_select_die(struct mtk_snand *snf, uint32_t dieidx);
+
+#define SNAND_MEMORG_512M_2K_64		SNAND_MEMORG(2048, 64, 64, 512, 1, 1)
+#define SNAND_MEMORG_1G_2K_64		SNAND_MEMORG(2048, 64, 64, 1024, 1, 1)
+#define SNAND_MEMORG_2G_2K_64		SNAND_MEMORG(2048, 64, 64, 2048, 1, 1)
+#define SNAND_MEMORG_2G_2K_120		SNAND_MEMORG(2048, 120, 64, 2048, 1, 1)
+#define SNAND_MEMORG_4G_2K_64		SNAND_MEMORG(2048, 64, 64, 4096, 1, 1)
+#define SNAND_MEMORG_1G_2K_120		SNAND_MEMORG(2048, 120, 64, 1024, 1, 1)
+#define SNAND_MEMORG_1G_2K_128		SNAND_MEMORG(2048, 128, 64, 1024, 1, 1)
+#define SNAND_MEMORG_2G_2K_128		SNAND_MEMORG(2048, 128, 64, 2048, 1, 1)
+#define SNAND_MEMORG_4G_2K_128		SNAND_MEMORG(2048, 128, 64, 4096, 1, 1)
+#define SNAND_MEMORG_4G_4K_240		SNAND_MEMORG(4096, 240, 64, 2048, 1, 1)
+#define SNAND_MEMORG_4G_4K_256		SNAND_MEMORG(4096, 256, 64, 2048, 1, 1)
+#define SNAND_MEMORG_8G_4K_256		SNAND_MEMORG(4096, 256, 64, 4096, 1, 1)
+#define SNAND_MEMORG_2G_2K_64_2P	SNAND_MEMORG(2048, 64, 64, 2048, 2, 1)
+#define SNAND_MEMORG_2G_2K_64_2D	SNAND_MEMORG(2048, 64, 64, 1024, 1, 2)
+#define SNAND_MEMORG_2G_2K_128_2P	SNAND_MEMORG(2048, 128, 64, 2048, 2, 1)
+#define SNAND_MEMORG_4G_2K_64_2P	SNAND_MEMORG(2048, 64, 64, 4096, 2, 1)
+#define SNAND_MEMORG_4G_2K_128_2P_2D	SNAND_MEMORG(2048, 128, 64, 2048, 2, 2)
+#define SNAND_MEMORG_8G_4K_256_2D	SNAND_MEMORG(4096, 256, 64, 2048, 1, 2)
+
+static const SNAND_IO_CAP(snand_cap_read_from_cache_quad,
+	SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2 | SPI_IO_1_1_4 |
+	SPI_IO_1_4_4,
+	SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
+	SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
+	SNAND_OP(SNAND_IO_1_2_2, SNAND_CMD_READ_FROM_CACHE_DUAL, 4),
+	SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8),
+	SNAND_OP(SNAND_IO_1_4_4, SNAND_CMD_READ_FROM_CACHE_QUAD, 4));
+
+static const SNAND_IO_CAP(snand_cap_read_from_cache_quad_q2d,
+	SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2 | SPI_IO_1_1_4 |
+	SPI_IO_1_4_4,
+	SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
+	SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
+	SNAND_OP(SNAND_IO_1_2_2, SNAND_CMD_READ_FROM_CACHE_DUAL, 4),
+	SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8),
+	SNAND_OP(SNAND_IO_1_4_4, SNAND_CMD_READ_FROM_CACHE_QUAD, 2));
+
+static const SNAND_IO_CAP(snand_cap_read_from_cache_quad_a8d,
+	SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2 | SPI_IO_1_1_4 |
+	SPI_IO_1_4_4,
+	SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
+	SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
+	SNAND_OP(SNAND_IO_1_2_2, SNAND_CMD_READ_FROM_CACHE_DUAL, 8),
+	SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8),
+	SNAND_OP(SNAND_IO_1_4_4, SNAND_CMD_READ_FROM_CACHE_QUAD, 8));
+
+static const SNAND_IO_CAP(snand_cap_read_from_cache_x4,
+	SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_1_4,
+	SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
+	SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
+	SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8));
+
+static const SNAND_IO_CAP(snand_cap_read_from_cache_x4_only,
+	SPI_IO_1_1_1 | SPI_IO_1_1_4,
+	SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
+	SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8));
+
+static const SNAND_IO_CAP(snand_cap_program_load_x1,
+	SPI_IO_1_1_1,
+	SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_PROGRAM_LOAD, 0));
+
+static const SNAND_IO_CAP(snand_cap_program_load_x4,
+	SPI_IO_1_1_1 | SPI_IO_1_1_4,
+	SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_PROGRAM_LOAD, 0),
+	SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_PROGRAM_LOAD_X4, 0));
+
+static const struct snand_flash_info snand_flash_ids[] = {
+	SNAND_INFO("W25N512GV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xaa, 0x20),
+		   SNAND_MEMORG_512M_2K_64,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("W25N01GV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xaa, 0x21),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("W25M02GV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xab, 0x21),
+		   SNAND_MEMORG_2G_2K_64_2D,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4,
+		   mtk_snand_winbond_select_die),
+	SNAND_INFO("W25N02KV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xaa, 0x22),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("GD5F1GQ4UAWxx", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0x10),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F1GQ4UExIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd1),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F1GQ4UExxH", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd9),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F1GQ4xAYIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xf1),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F2GQ4UExIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd2),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F2GQ5UExxH", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0x32),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_a8d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F2GQ4xAYIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xf2),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F4GQ4UBxIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd4),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F4GQ4xAYIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xf4),
+		   SNAND_MEMORG_4G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F2GQ5UExxG", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x52),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("GD5F4GQ4UCxIG", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0xb4),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("MX35LF1GE4AB", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x12),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MX35LF1G24AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x14),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MX31LF1GE4BC", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x1e),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MX35LF2GE4AB", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x22),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MX35LF2G24AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x24),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MX35LF2GE4AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x26),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MX35LF2G14AC", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x20),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MX35LF4G24AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x35),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MX35LF4GE4AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x37),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("MT29F1G01AAADD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x12),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x1),
+	SNAND_INFO("MT29F1G01ABAFD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x14),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MT29F2G01AAAED", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x9f),
+		   SNAND_MEMORG_2G_2K_64_2P,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x1),
+	SNAND_INFO("MT29F2G01ABAGD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x24),
+		   SNAND_MEMORG_2G_2K_128_2P,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MT29F4G01AAADD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x32),
+		   SNAND_MEMORG_4G_2K_64_2P,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x1),
+	SNAND_INFO("MT29F4G01ABAFD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x34),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("MT29F4G01ADAGD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x36),
+		   SNAND_MEMORG_4G_2K_128_2P_2D,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4,
+		   mtk_snand_micron_select_die),
+	SNAND_INFO("MT29F8G01ADAFD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x46),
+		   SNAND_MEMORG_8G_4K_256_2D,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4,
+		   mtk_snand_micron_select_die),
+
+	SNAND_INFO("TC58CVG0S3HRAIG", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xc2),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x1),
+	SNAND_INFO("TC58CVG1S3HRAIG", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xcb),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x1),
+	SNAND_INFO("TC58CVG2S0HRAIG", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xcd),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x1),
+	SNAND_INFO("TC58CVG0S3HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xe2),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("TC58CVG1S3HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xeb),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("TC58CVG2S0HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xed),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("TH58CVG3S0HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xe4),
+		   SNAND_MEMORG_8G_4K_256,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("F50L512M41A", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x20),
+		   SNAND_MEMORG_512M_2K_64,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("F50L1G41A", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x21),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("F50L1G41LB", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x01),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("F50L2G41LB", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x0a),
+		   SNAND_MEMORG_2G_2K_64_2D,
+		   &snand_cap_read_from_cache_quad,
+		   &snand_cap_program_load_x4,
+		   mtk_snand_winbond_select_die),
+
+	SNAND_INFO("CS11G0T0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x00),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("CS11G0G0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x10),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("CS11G0S0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x20),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("CS11G1T0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x01),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("CS11G1S0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x21),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("CS11G2T0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x02),
+		   SNAND_MEMORG_4G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("CS11G2S0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x22),
+		   SNAND_MEMORG_4G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("EM73B044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x01),
+		   SNAND_MEMORG_512M_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044SNB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x11),
+		   SNAND_MEMORG_1G_2K_120,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044SNF", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x09),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x18),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x19),
+		   SNAND_MEMORG(2048, 64, 128, 512, 1, 1),
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044VCD", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1c),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1d),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1e),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044VCC", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x22),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044VCF", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x25),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044SNC", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x31),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044SNC", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0a),
+		   SNAND_MEMORG_2G_2K_120,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x12),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044SNF", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x10),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x13),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044VCB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x14),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044VCD", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x17),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044VCH", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1b),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1d),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044VCG", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1f),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044VCE", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x20),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044VCL", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2e),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044SNB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x32),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73E044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x03),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73E044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0b),
+		   SNAND_MEMORG_4G_4K_240,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73E044SNB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x23),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73E044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2c),
+		   SNAND_MEMORG_4G_4K_256,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73E044VCB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2f),
+		   SNAND_MEMORG_4G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73F044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x24),
+		   SNAND_MEMORG_8G_4K_256,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73F044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2d),
+		   SNAND_MEMORG_8G_4K_256,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73E044SNE", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0e),
+		   SNAND_MEMORG_8G_4K_256,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73C044SNG", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0c),
+		   SNAND_MEMORG_1G_2K_120,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("EM73D044VCN", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0f),
+		   SNAND_MEMORG_2G_2K_64,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("FM35Q1GA", SNAND_ID(SNAND_ID_DYMMY, 0xe5, 0x71),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("PN26G01A", SNAND_ID(SNAND_ID_DYMMY, 0xa1, 0xe1),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("PN26G02A", SNAND_ID(SNAND_ID_DYMMY, 0xa1, 0xe2),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("IS37SML01G1", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x21),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_x4,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("ATO25D1GA", SNAND_ID(SNAND_ID_DYMMY, 0x9b, 0x12),
+		   SNAND_MEMORG_1G_2K_64,
+		   &snand_cap_read_from_cache_x4_only,
+		   &snand_cap_program_load_x4),
+
+	SNAND_INFO("HYF1GQ4U", SNAND_ID(SNAND_ID_DYMMY, 0xc9, 0x51),
+		   SNAND_MEMORG_1G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+	SNAND_INFO("HYF2GQ4U", SNAND_ID(SNAND_ID_DYMMY, 0xc9, 0x52),
+		   SNAND_MEMORG_2G_2K_128,
+		   &snand_cap_read_from_cache_quad_q2d,
+		   &snand_cap_program_load_x4),
+};
+
+static int mtk_snand_winbond_select_die(struct mtk_snand *snf, uint32_t dieidx)
+{
+	uint8_t op[2];
+
+	if (dieidx > 1) {
+		snand_log_chip(snf->pdev, "Invalid die index %u\n", dieidx);
+		return -EINVAL;
+	}
+
+	op[0] = SNAND_CMD_WINBOND_SELECT_DIE;
+	op[1] = (uint8_t)dieidx;
+
+	return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
+}
+
+static int mtk_snand_micron_select_die(struct mtk_snand *snf, uint32_t dieidx)
+{
+	int ret;
+
+	if (dieidx > 1) {
+		snand_log_chip(snf->pdev, "Invalid die index %u\n", dieidx);
+		return -EINVAL;
+	}
+
+	ret = mtk_snand_set_feature(snf, SNAND_FEATURE_MICRON_DIE_ADDR,
+				    SNAND_MICRON_DIE_SEL_1);
+	if (ret) {
+		snand_log_chip(snf->pdev,
+			       "Failed to set die selection feature\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+const struct snand_flash_info *snand_flash_id_lookup(enum snand_id_type type,
+						     const uint8_t *id)
+{
+	const struct snand_id *fid;
+	uint32_t i;
+
+	for (i = 0; i < ARRAY_SIZE(snand_flash_ids); i++) {
+		if (snand_flash_ids[i].id.type != type)
+			continue;
+
+		fid = &snand_flash_ids[i].id;
+		if (memcmp(fid->id, id, fid->len))
+			continue;
+
+		return &snand_flash_ids[i];
+	}
+
+	return NULL;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-mtd.c b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-mtd.c
new file mode 100644
index 0000000..27cfb18
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-mtd.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/wait.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_platform.h>
+
+#include "mtk-snand.h"
+#include "mtk-snand-os.h"
+
+struct mtk_snand_of_id {
+	enum mtk_snand_soc soc;
+};
+
+struct mtk_snand_mtd {
+	struct mtk_snand_plat_dev pdev;
+
+	struct clk *nfi_clk;
+	struct clk *pad_clk;
+	struct clk *ecc_clk;
+
+	void __iomem *nfi_regs;
+	void __iomem *ecc_regs;
+
+	int irq;
+
+	bool quad_spi;
+	enum mtk_snand_soc soc;
+
+	struct mtd_info mtd;
+	struct mtk_snand *snf;
+	struct mtk_snand_chip_info cinfo;
+	uint8_t *page_cache;
+	struct mutex lock;
+};
+
+#define mtd_to_msm(mtd) container_of(mtd, struct mtk_snand_mtd, mtd)
+
+static int mtk_snand_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
+	u64 start_addr, end_addr;
+	int ret;
+
+	/* Do not allow write past end of device */
+	if ((instr->addr + instr->len) > mtd->size) {
+		dev_err(msm->pdev.dev,
+			"attempt to erase beyond end of device\n");
+		return -EINVAL;
+	}
+
+	start_addr = instr->addr & (~mtd->erasesize_mask);
+	end_addr = instr->addr + instr->len;
+	if (end_addr & mtd->erasesize_mask) {
+		end_addr = (end_addr + mtd->erasesize_mask) &
+			   (~mtd->erasesize_mask);
+	}
+
+	mutex_lock(&msm->lock);
+
+	while (start_addr < end_addr) {
+		if (mtk_snand_block_isbad(msm->snf, start_addr)) {
+			instr->fail_addr = start_addr;
+			ret = -EIO;
+			break;
+		}
+
+		ret = mtk_snand_erase_block(msm->snf, start_addr);
+		if (ret) {
+			instr->fail_addr = start_addr;
+			break;
+		}
+
+		start_addr += mtd->erasesize;
+	}
+
+	mutex_unlock(&msm->lock);
+
+	return ret;
+}
+
+static int mtk_snand_mtd_read_data(struct mtk_snand_mtd *msm, uint64_t addr,
+				   struct mtd_oob_ops *ops)
+{
+	struct mtd_info *mtd = &msm->mtd;
+	size_t len, ooblen, maxooblen, chklen;
+	uint32_t col, ooboffs;
+	uint8_t *datcache, *oobcache;
+	bool raw = ops->mode == MTD_OPS_RAW ? true : false;
+	int ret;
+
+	col = addr & mtd->writesize_mask;
+	addr &= ~mtd->writesize_mask;
+	maxooblen = mtd_oobavail(mtd, ops);
+	ooboffs = ops->ooboffs;
+	ooblen = ops->ooblen;
+	len = ops->len;
+
+	datcache = len ? msm->page_cache : NULL;
+	oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
+
+	ops->oobretlen = 0;
+	ops->retlen = 0;
+
+	while (len || ooblen) {
+		if (ops->mode == MTD_OPS_AUTO_OOB)
+			ret = mtk_snand_read_page_auto_oob(msm->snf, addr,
+				datcache, oobcache, maxooblen, NULL, raw);
+		else
+			ret = mtk_snand_read_page(msm->snf, addr, datcache,
+				oobcache, raw);
+
+		if (ret < 0)
+			return ret;
+
+		if (len) {
+			/* Move data */
+			chklen = mtd->writesize - col;
+			if (chklen > len)
+				chklen = len;
+
+			memcpy(ops->datbuf + ops->retlen, datcache + col,
+			       chklen);
+			len -= chklen;
+			col = 0; /* (col + chklen) %  */
+			ops->retlen += chklen;
+		}
+
+		if (ooblen) {
+			/* Move oob */
+			chklen = maxooblen - ooboffs;
+			if (chklen > ooblen)
+				chklen = ooblen;
+
+			memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
+			       chklen);
+			ooblen -= chklen;
+			ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
+			ops->oobretlen += chklen;
+		}
+
+		addr += mtd->writesize;
+	}
+
+	return 0;
+}
+
+static int mtk_snand_mtd_read_oob(struct mtd_info *mtd, loff_t from,
+				  struct mtd_oob_ops *ops)
+{
+	struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
+	uint32_t maxooblen;
+	int ret;
+
+	if (!ops->oobbuf && !ops->datbuf) {
+		if (ops->ooblen || ops->len)
+			return -EINVAL;
+
+		return 0;
+	}
+
+	switch (ops->mode) {
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_AUTO_OOB:
+	case MTD_OPS_RAW:
+		break;
+	default:
+		dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
+		return -EINVAL;
+	}
+
+	maxooblen = mtd_oobavail(mtd, ops);
+
+	/* Do not allow read past end of device */
+	if (ops->datbuf && (from + ops->len) > mtd->size) {
+		dev_err(msm->pdev.dev,
+			"attempt to read beyond end of device\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(ops->ooboffs >= maxooblen)) {
+		dev_err(msm->pdev.dev, "attempt to start read outside oob\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(from >= mtd->size ||
+	    ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
+	    (from >> mtd->writesize_shift)) * maxooblen)) {
+		dev_err(msm->pdev.dev,
+			"attempt to read beyond end of device\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&msm->lock);
+	ret = mtk_snand_mtd_read_data(msm, from, ops);
+	mutex_unlock(&msm->lock);
+
+	return ret;
+}
+
+static int mtk_snand_mtd_write_data(struct mtk_snand_mtd *msm, uint64_t addr,
+				    struct mtd_oob_ops *ops)
+{
+	struct mtd_info *mtd = &msm->mtd;
+	size_t len, ooblen, maxooblen, chklen, oobwrlen;
+	uint32_t col, ooboffs;
+	uint8_t *datcache, *oobcache;
+	bool raw = ops->mode == MTD_OPS_RAW ? true : false;
+	int ret;
+
+	col = addr & mtd->writesize_mask;
+	addr &= ~mtd->writesize_mask;
+	maxooblen = mtd_oobavail(mtd, ops);
+	ooboffs = ops->ooboffs;
+	ooblen = ops->ooblen;
+	len = ops->len;
+
+	datcache = len ? msm->page_cache : NULL;
+	oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
+
+	ops->oobretlen = 0;
+	ops->retlen = 0;
+
+	while (len || ooblen) {
+		if (len) {
+			/* Move data */
+			chklen = mtd->writesize - col;
+			if (chklen > len)
+				chklen = len;
+
+			memset(datcache, 0xff, col);
+			memcpy(datcache + col, ops->datbuf + ops->retlen,
+			       chklen);
+			memset(datcache + col + chklen, 0xff,
+			       mtd->writesize - col - chklen);
+			len -= chklen;
+			col = 0; /* (col + chklen) %  */
+			ops->retlen += chklen;
+		}
+
+		oobwrlen = 0;
+		if (ooblen) {
+			/* Move oob */
+			chklen = maxooblen - ooboffs;
+			if (chklen > ooblen)
+				chklen = ooblen;
+
+			memset(oobcache, 0xff, ooboffs);
+			memcpy(oobcache + ooboffs,
+			       ops->oobbuf + ops->oobretlen, chklen);
+			memset(oobcache + ooboffs + chklen, 0xff,
+			       mtd->oobsize - ooboffs - chklen);
+			oobwrlen = chklen + ooboffs;
+			ooblen -= chklen;
+			ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
+			ops->oobretlen += chklen;
+		}
+
+		if (ops->mode == MTD_OPS_AUTO_OOB)
+			ret = mtk_snand_write_page_auto_oob(msm->snf, addr,
+				datcache, oobcache, oobwrlen, NULL, raw);
+		else
+			ret = mtk_snand_write_page(msm->snf, addr, datcache,
+				oobcache, raw);
+
+		if (ret)
+			return ret;
+
+		addr += mtd->writesize;
+	}
+
+	return 0;
+}
+
+static int mtk_snand_mtd_write_oob(struct mtd_info *mtd, loff_t to,
+				   struct mtd_oob_ops *ops)
+{
+	struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
+	uint32_t maxooblen;
+	int ret;
+
+	if (!ops->oobbuf && !ops->datbuf) {
+		if (ops->ooblen || ops->len)
+			return -EINVAL;
+
+		return 0;
+	}
+
+	switch (ops->mode) {
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_AUTO_OOB:
+	case MTD_OPS_RAW:
+		break;
+	default:
+		dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
+		return -EINVAL;
+	}
+
+	maxooblen = mtd_oobavail(mtd, ops);
+
+	/* Do not allow write past end of device */
+	if (ops->datbuf && (to + ops->len) > mtd->size) {
+		dev_err(msm->pdev.dev,
+			"attempt to write beyond end of device\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(ops->ooboffs >= maxooblen)) {
+		dev_err(msm->pdev.dev,
+			"attempt to start write outside oob\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(to >= mtd->size ||
+	    ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
+	    (to >> mtd->writesize_shift)) * maxooblen)) {
+		dev_err(msm->pdev.dev,
+			"attempt to write beyond end of device\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&msm->lock);
+	ret = mtk_snand_mtd_write_data(msm, to, ops);
+	mutex_unlock(&msm->lock);
+
+	return ret;
+}
+
+static int mtk_snand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+	struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
+	int ret;
+
+	mutex_lock(&msm->lock);
+	ret = mtk_snand_block_isbad(msm->snf, offs);
+	mutex_unlock(&msm->lock);
+
+	return ret;
+}
+
+static int mtk_snand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
+{
+	struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
+	int ret;
+
+	mutex_lock(&msm->lock);
+	ret = mtk_snand_block_markbad(msm->snf, offs);
+	mutex_unlock(&msm->lock);
+
+	return ret;
+}
+
+static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobecc)
+{
+	struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobecc->offset = msm->cinfo.fdm_size * msm->cinfo.num_sectors;
+	oobecc->length = mtd->oobsize - oobecc->offset;
+
+	return 0;
+}
+
+static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
+				    struct mtd_oob_region *oobfree)
+{
+	struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
+
+	if (section >= msm->cinfo.num_sectors)
+		return -ERANGE;
+
+	oobfree->length = msm->cinfo.fdm_size - 1;
+	oobfree->offset = section * msm->cinfo.fdm_size + 1;
+
+	return 0;
+}
+
+static irqreturn_t mtk_snand_irq(int irq, void *id)
+{
+	struct mtk_snand_mtd *msm = id;
+	int ret;
+
+	ret = mtk_snand_irq_process(msm->snf);
+	if (ret > 0)
+		return IRQ_HANDLED;
+
+	return IRQ_NONE;
+}
+
+static int mtk_snand_enable_clk(struct mtk_snand_mtd *msm)
+{
+	int ret;
+
+	ret = clk_prepare_enable(msm->nfi_clk);
+	if (ret) {
+		dev_err(msm->pdev.dev, "unable to enable nfi clk\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(msm->pad_clk);
+	if (ret) {
+		dev_err(msm->pdev.dev, "unable to enable pad clk\n");
+		clk_disable_unprepare(msm->nfi_clk);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(msm->ecc_clk);
+	if (ret) {
+		dev_err(msm->pdev.dev, "unable to enable ecc clk\n");
+		clk_disable_unprepare(msm->nfi_clk);
+		clk_disable_unprepare(msm->pad_clk);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void mtk_snand_disable_clk(struct mtk_snand_mtd *msm)
+{
+	clk_disable_unprepare(msm->nfi_clk);
+	clk_disable_unprepare(msm->pad_clk);
+	clk_disable_unprepare(msm->ecc_clk);
+}
+
+static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
+	.ecc = mtk_snand_ooblayout_ecc,
+	.free = mtk_snand_ooblayout_free,
+};
+
+static struct mtk_snand_of_id mt7622_soc_id = { .soc = SNAND_SOC_MT7622 };
+static struct mtk_snand_of_id mt7629_soc_id = { .soc = SNAND_SOC_MT7629 };
+static struct mtk_snand_of_id mt7986_soc_id = { .soc = SNAND_SOC_MT7986 };
+
+static const struct of_device_id mtk_snand_ids[] = {
+	{ .compatible = "mediatek,mt7622-snand", .data = &mt7622_soc_id },
+	{ .compatible = "mediatek,mt7629-snand", .data = &mt7629_soc_id },
+	{ .compatible = "mediatek,mt7986-snand", .data = &mt7986_soc_id },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, mtk_snand_ids);
+
+static int mtk_snand_probe(struct platform_device *pdev)
+{
+	struct mtk_snand_platdata mtk_snand_pdata = {};
+	struct device_node *np = pdev->dev.of_node;
+	const struct of_device_id *of_soc_id;
+	const struct mtk_snand_of_id *soc_id;
+	struct mtk_snand_mtd *msm;
+	struct mtd_info *mtd;
+	struct resource *r;
+	uint32_t size;
+	int ret;
+
+	of_soc_id = of_match_node(mtk_snand_ids, np);
+	if (!of_soc_id)
+		return -EINVAL;
+
+	soc_id = of_soc_id->data;
+
+	msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
+	if (!msm)
+		return -ENOMEM;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
+	msm->nfi_regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(msm->nfi_regs)) {
+		ret = PTR_ERR(msm->nfi_regs);
+		goto errout1;
+	}
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
+	msm->ecc_regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(msm->ecc_regs)) {
+		ret = PTR_ERR(msm->ecc_regs);
+		goto errout1;
+	}
+
+	msm->pdev.dev = &pdev->dev;
+	msm->quad_spi = of_property_read_bool(np, "mediatek,quad-spi");
+	msm->soc = soc_id->soc;
+
+	msm->nfi_clk = devm_clk_get(msm->pdev.dev, "nfi_clk");
+	if (IS_ERR(msm->nfi_clk)) {
+		ret = PTR_ERR(msm->nfi_clk);
+		dev_err(msm->pdev.dev, "unable to get nfi_clk, err = %d\n",
+			ret);
+		goto errout1;
+	}
+
+	msm->ecc_clk = devm_clk_get(msm->pdev.dev, "ecc_clk");
+	if (IS_ERR(msm->ecc_clk)) {
+		ret = PTR_ERR(msm->ecc_clk);
+		dev_err(msm->pdev.dev, "unable to get ecc_clk, err = %d\n",
+			ret);
+		goto errout1;
+	}
+
+	msm->pad_clk = devm_clk_get(msm->pdev.dev, "pad_clk");
+	if (IS_ERR(msm->pad_clk)) {
+		ret = PTR_ERR(msm->pad_clk);
+		dev_err(msm->pdev.dev, "unable to get pad_clk, err = %d\n",
+			ret);
+		goto errout1;
+	}
+
+	ret = mtk_snand_enable_clk(msm);
+	if (ret)
+		goto errout1;
+
+	/* Probe SPI-NAND Flash */
+	mtk_snand_pdata.soc = msm->soc;
+	mtk_snand_pdata.quad_spi = msm->quad_spi;
+	mtk_snand_pdata.nfi_base = msm->nfi_regs;
+	mtk_snand_pdata.ecc_base = msm->ecc_regs;
+
+	ret = mtk_snand_init(&msm->pdev, &mtk_snand_pdata, &msm->snf);
+	if (ret)
+		goto errout1;
+
+	msm->irq = platform_get_irq(pdev, 0);
+	if (msm->irq >= 0) {
+		ret = devm_request_irq(msm->pdev.dev, msm->irq, mtk_snand_irq,
+				       0x0, "mtk-snand", msm);
+		if (ret) {
+			dev_err(msm->pdev.dev, "failed to request snfi irq\n");
+			goto errout2;
+		}
+
+		ret = dma_set_mask(msm->pdev.dev, DMA_BIT_MASK(32));
+		if (ret) {
+			dev_err(msm->pdev.dev, "failed to set dma mask\n");
+			goto errout3;
+		}
+	}
+
+	mtk_snand_get_chip_info(msm->snf, &msm->cinfo);
+
+	size = msm->cinfo.pagesize + msm->cinfo.sparesize;
+	msm->page_cache = devm_kmalloc(msm->pdev.dev, size, GFP_KERNEL);
+	if (!msm->page_cache) {
+		dev_err(msm->pdev.dev, "failed to allocate page cache\n");
+		ret = -ENOMEM;
+		goto errout3;
+	}
+
+	mutex_init(&msm->lock);
+
+	dev_info(msm->pdev.dev,
+		 "chip is %s, size %lluMB, page size %u, oob size %u\n",
+		 msm->cinfo.model, msm->cinfo.chipsize >> 20,
+		 msm->cinfo.pagesize, msm->cinfo.sparesize);
+
+	/* Initialize mtd for SPI-NAND */
+	mtd = &msm->mtd;
+
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = &pdev->dev;
+	mtd->type = MTD_NANDFLASH;
+	mtd->flags = MTD_CAP_NANDFLASH;
+
+	mtd_set_of_node(mtd, np);
+
+	mtd->size = msm->cinfo.chipsize;
+	mtd->erasesize = msm->cinfo.blocksize;
+	mtd->writesize = msm->cinfo.pagesize;
+	mtd->writebufsize = mtd->writesize;
+	mtd->oobsize = msm->cinfo.sparesize;
+	mtd->oobavail = msm->cinfo.num_sectors * (msm->cinfo.fdm_size - 1);
+
+	mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+	mtd->writesize_shift = ffs(mtd->writesize) - 1;
+	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
+	mtd->ooblayout = &mtk_snand_ooblayout;
+
+	mtd->ecc_strength = msm->cinfo.ecc_strength * msm->cinfo.num_sectors;
+	mtd->bitflip_threshold = (mtd->ecc_strength * 3) / 4;
+	mtd->ecc_step_size = msm->cinfo.sector_size;
+
+	mtd->_erase = mtk_snand_mtd_erase;
+	mtd->_read_oob = mtk_snand_mtd_read_oob;
+	mtd->_write_oob = mtk_snand_mtd_write_oob;
+	mtd->_block_isbad = mtk_snand_mtd_block_isbad;
+	mtd->_block_markbad = mtk_snand_mtd_block_markbad;
+
+	ret = mtd_device_register(mtd, NULL, 0);
+	if (ret) {
+		dev_err(msm->pdev.dev, "failed to register mtd partition\n");
+		goto errout4;
+	}
+
+	platform_set_drvdata(pdev, msm);
+
+	return 0;
+
+errout4:
+	devm_kfree(msm->pdev.dev, msm->page_cache);
+
+errout3:
+	if (msm->irq >= 0)
+		devm_free_irq(msm->pdev.dev, msm->irq, msm);
+
+errout2:
+	mtk_snand_cleanup(msm->snf);
+
+errout1:
+	devm_kfree(msm->pdev.dev, msm);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return ret;
+}
+
+static int mtk_snand_remove(struct platform_device *pdev)
+{
+	struct mtk_snand_mtd *msm = platform_get_drvdata(pdev);
+	struct mtd_info *mtd = &msm->mtd;
+	int ret;
+
+	ret = mtd_device_unregister(mtd);
+	if (ret)
+		return ret;
+
+	mtk_snand_cleanup(msm->snf);
+
+	if (msm->irq >= 0)
+		devm_free_irq(msm->pdev.dev, msm->irq, msm);
+
+	mtk_snand_disable_clk(msm);
+
+	devm_kfree(msm->pdev.dev, msm->page_cache);
+	devm_kfree(msm->pdev.dev, msm);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver mtk_snand_driver = {
+	.probe = mtk_snand_probe,
+	.remove = mtk_snand_remove,
+	.driver = {
+		.name = "mtk-snand",
+		.of_match_table = mtk_snand_ids,
+	},
+};
+
+module_platform_driver(mtk_snand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
+MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-os.c b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-os.c
new file mode 100644
index 0000000..0c3ffec
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-os.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include "mtk-snand-def.h"
+
+int mtk_snand_log(struct mtk_snand_plat_dev *pdev,
+		  enum mtk_snand_log_category cat, const char *fmt, ...)
+{
+	const char *catname = "";
+	va_list ap;
+	char *msg;
+
+	switch (cat) {
+	case SNAND_LOG_NFI:
+		catname = "NFI";
+		break;
+	case SNAND_LOG_SNFI:
+		catname = "SNFI";
+		break;
+	case SNAND_LOG_ECC:
+		catname = "ECC";
+		break;
+	default:
+		break;
+	}
+
+	va_start(ap, fmt);
+	msg = kvasprintf(GFP_KERNEL, fmt, ap);
+	va_end(ap);
+
+	if (!msg) {
+		dev_warn(pdev->dev, "unable to print log\n");
+		return -1;
+	}
+
+	if (*catname)
+		dev_warn(pdev->dev, "%s: %s", catname, msg);
+	else
+		dev_warn(pdev->dev, "%s", msg);
+
+	kfree(msg);
+
+	return 0;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-os.h b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-os.h
new file mode 100644
index 0000000..223f73f
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand-os.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#ifndef _MTK_SNAND_OS_H_
+#define _MTK_SNAND_OS_H_
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/iopoll.h>
+#include <linux/hrtimer.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <asm/div64.h>
+
+struct mtk_snand_plat_dev {
+	struct device *dev;
+	struct completion done;
+};
+
+/* Polling helpers */
+#define read16_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
+	readw_poll_timeout((addr), (val), (cond), (sleep_us), (timeout_us))
+
+#define read32_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
+	readl_poll_timeout((addr), (val), (cond), (sleep_us), (timeout_us))
+
+/* Timer helpers */
+#define mtk_snand_time_t ktime_t
+
+static inline mtk_snand_time_t timer_get_ticks(void)
+{
+	return ktime_get();
+}
+
+static inline mtk_snand_time_t timer_time_to_tick(uint32_t timeout_us)
+{
+	return ktime_add_us(ktime_set(0, 0), timeout_us);
+}
+
+static inline bool timer_is_timeout(mtk_snand_time_t start_tick,
+				    mtk_snand_time_t timeout_tick)
+{
+	ktime_t tmo = ktime_add(start_tick, timeout_tick);
+
+	return ktime_compare(ktime_get(), tmo) > 0;
+}
+
+/* Memory helpers */
+static inline void *generic_mem_alloc(struct mtk_snand_plat_dev *pdev,
+				      size_t size)
+{
+	return devm_kzalloc(pdev->dev, size, GFP_KERNEL);
+}
+static inline void generic_mem_free(struct mtk_snand_plat_dev *pdev, void *ptr)
+{
+	devm_kfree(pdev->dev, ptr);
+}
+
+static inline void *dma_mem_alloc(struct mtk_snand_plat_dev *pdev, size_t size)
+{
+	return kzalloc(size, GFP_KERNEL);
+}
+static inline void dma_mem_free(struct mtk_snand_plat_dev *pdev, void *ptr)
+{
+	kfree(ptr);
+}
+
+static inline int dma_mem_map(struct mtk_snand_plat_dev *pdev, void *vaddr,
+			      uintptr_t *dma_addr, size_t size, bool to_device)
+{
+	dma_addr_t addr;
+	int ret;
+
+	addr = dma_map_single(pdev->dev, vaddr, size,
+			      to_device ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	ret = dma_mapping_error(pdev->dev, addr);
+	if (ret)
+		return ret;
+
+	*dma_addr = (uintptr_t)addr;
+
+	return 0;
+}
+
+static inline void dma_mem_unmap(struct mtk_snand_plat_dev *pdev,
+				 uintptr_t dma_addr, size_t size,
+				 bool to_device)
+{
+	dma_unmap_single(pdev->dev, dma_addr, size,
+			 to_device ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+}
+
+/* Interrupt helpers */
+static inline void irq_completion_done(struct mtk_snand_plat_dev *pdev)
+{
+	complete(&pdev->done);
+}
+
+static inline void irq_completion_init(struct mtk_snand_plat_dev *pdev)
+{
+	init_completion(&pdev->done);
+}
+
+static inline int irq_completion_wait(struct mtk_snand_plat_dev *pdev,
+				       void __iomem *reg, uint32_t bit,
+				       uint32_t timeout_us)
+{
+#if 0
+	uint32_t val;
+
+	return read32_poll_timeout(reg, val, val & bit, 0, timeout_us);
+#else
+	int ret;
+
+	ret = wait_for_completion_timeout(&pdev->done,
+					  usecs_to_jiffies(timeout_us));
+	if (!ret)
+		return -ETIMEDOUT;
+
+	return 0;
+#endif
+}
+
+#endif /* _MTK_SNAND_OS_H_ */
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand.c b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand.c
new file mode 100644
index 0000000..17254a3
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand.c
@@ -0,0 +1,1776 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include "mtk-snand-def.h"
+
+/* NFI registers */
+#define NFI_CNFG			0x000
+#define CNFG_OP_MODE_S			12
+#define   CNFG_OP_MODE_CUST		6
+#define   CNFG_OP_MODE_PROGRAM		3
+#define CNFG_AUTO_FMT_EN		BIT(9)
+#define CNFG_HW_ECC_EN			BIT(8)
+#define CNFG_DMA_BURST_EN		BIT(2)
+#define CNFG_READ_MODE			BIT(1)
+#define CNFG_DMA_MODE			BIT(0)
+
+#define NFI_PAGEFMT			0x0004
+#define NFI_SPARE_SIZE_LS_S		16
+#define NFI_FDM_ECC_NUM_S		12
+#define NFI_FDM_NUM_S			8
+#define NFI_SPARE_SIZE_S		4
+#define NFI_SEC_SEL_512			BIT(2)
+#define NFI_PAGE_SIZE_S			0
+#define   NFI_PAGE_SIZE_512_2K		0
+#define   NFI_PAGE_SIZE_2K_4K		1
+#define   NFI_PAGE_SIZE_4K_8K		2
+#define   NFI_PAGE_SIZE_8K_16K		3
+
+#define NFI_CON				0x008
+#define CON_SEC_NUM_S			12
+#define CON_BWR				BIT(9)
+#define CON_BRD				BIT(8)
+#define CON_NFI_RST			BIT(1)
+#define CON_FIFO_FLUSH			BIT(0)
+
+#define NFI_INTR_EN			0x010
+#define NFI_INTR_STA			0x014
+#define NFI_IRQ_INTR_EN			BIT(31)
+#define NFI_IRQ_CUS_READ		BIT(8)
+#define NFI_IRQ_CUS_PG			BIT(7)
+
+#define NFI_CMD				0x020
+
+#define NFI_STRDATA			0x040
+#define STR_DATA			BIT(0)
+
+#define NFI_STA				0x060
+#define NFI_NAND_FSM			GENMASK(28, 24)
+#define NFI_FSM				GENMASK(19, 16)
+#define READ_EMPTY			BIT(12)
+
+#define NFI_FIFOSTA			0x064
+#define FIFO_WR_REMAIN_S		8
+#define FIFO_RD_REMAIN_S		0
+
+#define NFI_STRADDR			0x080
+
+#define NFI_FDM0L			0x0a0
+#define NFI_FDM0M			0x0a4
+#define NFI_FDML(n)			(NFI_FDM0L + (n) * 8)
+#define NFI_FDMM(n)			(NFI_FDM0M + (n) * 8)
+
+#define NFI_DEBUG_CON1			0x220
+#define WBUF_EN				BIT(2)
+
+#define NFI_MASTERSTA			0x224
+#define MAS_ADDR			GENMASK(11, 9)
+#define MAS_RD				GENMASK(8, 6)
+#define MAS_WR				GENMASK(5, 3)
+#define MAS_RDDLY			GENMASK(2, 0)
+#define NFI_MASTERSTA_MASK_7622		(MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
+#define AHB_BUS_BUSY			BIT(1)
+#define BUS_BUSY			BIT(0)
+#define NFI_MASTERSTA_MASK_7986		(AHB_BUS_BUSY | BUS_BUSY)
+
+/* SNFI registers */
+#define SNF_MAC_CTL			0x500
+#define MAC_XIO_SEL			BIT(4)
+#define SF_MAC_EN			BIT(3)
+#define SF_TRIG				BIT(2)
+#define WIP_READY			BIT(1)
+#define WIP				BIT(0)
+
+#define SNF_MAC_OUTL			0x504
+#define SNF_MAC_INL			0x508
+
+#define SNF_RD_CTL2			0x510
+#define DATA_READ_DUMMY_S		8
+#define DATA_READ_CMD_S			0
+
+#define SNF_RD_CTL3			0x514
+
+#define SNF_PG_CTL1			0x524
+#define PG_LOAD_CMD_S			8
+
+#define SNF_PG_CTL2			0x528
+
+#define SNF_MISC_CTL			0x538
+#define SW_RST				BIT(28)
+#define FIFO_RD_LTC_S			25
+#define PG_LOAD_X4_EN			BIT(20)
+#define DATA_READ_MODE_S		16
+#define DATA_READ_MODE			GENMASK(18, 16)
+#define   DATA_READ_MODE_X1		0
+#define   DATA_READ_MODE_X2		1
+#define   DATA_READ_MODE_X4		2
+#define   DATA_READ_MODE_DUAL		5
+#define   DATA_READ_MODE_QUAD		6
+#define PG_LOAD_CUSTOM_EN		BIT(7)
+#define DATARD_CUSTOM_EN		BIT(6)
+#define CS_DESELECT_CYC_S		0
+
+#define SNF_MISC_CTL2			0x53c
+#define PROGRAM_LOAD_BYTE_NUM_S		16
+#define READ_DATA_BYTE_NUM_S		11
+
+#define SNF_DLY_CTL3			0x548
+#define SFCK_SAM_DLY_S			0
+
+#define SNF_STA_CTL1			0x550
+#define CUS_PG_DONE			BIT(28)
+#define CUS_READ_DONE			BIT(27)
+#define SPI_STATE_S			0
+#define SPI_STATE			GENMASK(3, 0)
+
+#define SNF_CFG				0x55c
+#define SPI_MODE			BIT(0)
+
+#define SNF_GPRAM			0x800
+#define SNF_GPRAM_SIZE			0xa0
+
+#define SNFI_POLL_INTERVAL		1000000
+
+static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
+
+static const uint8_t mt7986_spare_sizes[] = {
+	16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
+	67, 74
+};
+
+static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
+	[SNAND_SOC_MT7622] = {
+		.sector_size = 512,
+		.max_sectors = 8,
+		.fdm_size = 8,
+		.fdm_ecc_size = 1,
+		.fifo_size = 32,
+		.bbm_swap = false,
+		.empty_page_check = false,
+		.mastersta_mask = NFI_MASTERSTA_MASK_7622,
+		.spare_sizes = mt7622_spare_sizes,
+		.num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
+	},
+	[SNAND_SOC_MT7629] = {
+		.sector_size = 512,
+		.max_sectors = 8,
+		.fdm_size = 8,
+		.fdm_ecc_size = 1,
+		.fifo_size = 32,
+		.bbm_swap = true,
+		.empty_page_check = false,
+		.mastersta_mask = NFI_MASTERSTA_MASK_7622,
+		.spare_sizes = mt7622_spare_sizes,
+		.num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
+	},
+	[SNAND_SOC_MT7986] = {
+		.sector_size = 1024,
+		.max_sectors = 16,
+		.fdm_size = 8,
+		.fdm_ecc_size = 1,
+		.fifo_size = 64,
+		.bbm_swap = true,
+		.empty_page_check = true,
+		.mastersta_mask = NFI_MASTERSTA_MASK_7986,
+		.spare_sizes = mt7986_spare_sizes,
+		.num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
+	},
+};
+
+static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
+{
+	return readl(snf->nfi_base + reg);
+}
+
+static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
+			       uint32_t val)
+{
+	writel(val, snf->nfi_base + reg);
+}
+
+static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
+			       uint16_t val)
+{
+	writew(val, snf->nfi_base + reg);
+}
+
+static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
+			     uint32_t set)
+{
+	uint32_t val;
+
+	val = readl(snf->nfi_base + reg);
+	val &= ~clr;
+	val |= set;
+	writel(val, snf->nfi_base + reg);
+}
+
+static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
+			   const uint8_t *data, uint32_t len)
+{
+	uint32_t i, val = 0, es = sizeof(uint32_t);
+
+	for (i = reg; i < reg + len; i++) {
+		val |= ((uint32_t)*data++) << (8 * (i % es));
+
+		if (i % es == es - 1 || i == reg + len - 1) {
+			nfi_write32(snf, i & ~(es - 1), val);
+			val = 0;
+		}
+	}
+}
+
+static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
+			  uint32_t len)
+{
+	uint32_t i, val = 0, es = sizeof(uint32_t);
+
+	for (i = reg; i < reg + len; i++) {
+		if (i == reg || i % es == 0)
+			val = nfi_read32(snf, i & ~(es - 1));
+
+		*data++ = (uint8_t)(val >> (8 * (i % es)));
+	}
+}
+
+static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
+{
+	uint8_t tmp = *bm1;
+	*bm1 = *bm2;
+	*bm2 = tmp;
+}
+
+static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
+{
+	uint32_t fdm_bbm_pos;
+
+	if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
+		return;
+
+	fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
+		      snf->nfi_soc->sector_size;
+	do_bm_swap(&snf->page_cache[fdm_bbm_pos],
+		   &snf->page_cache[snf->writesize]);
+}
+
+static void mtk_snand_bm_swap(struct mtk_snand *snf)
+{
+	uint32_t buf_bbm_pos, fdm_bbm_pos;
+
+	if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
+		return;
+
+	buf_bbm_pos = snf->writesize -
+		      (snf->ecc_steps - 1) * snf->spare_per_sector;
+	fdm_bbm_pos = snf->writesize +
+		      (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
+	do_bm_swap(&snf->page_cache[fdm_bbm_pos],
+		   &snf->page_cache[buf_bbm_pos]);
+}
+
+static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
+{
+	uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
+
+	if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
+		return;
+
+	fdm_bbm_pos1 = snf->nfi_soc->sector_size;
+	fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
+		       snf->nfi_soc->sector_size;
+	do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
+		   &snf->page_cache[fdm_bbm_pos2]);
+}
+
+static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
+{
+	uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
+
+	if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
+		return;
+
+	fdm_bbm_pos1 = snf->writesize;
+	fdm_bbm_pos2 = snf->writesize +
+		       (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
+	do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
+		   &snf->page_cache[fdm_bbm_pos2]);
+}
+
+static int mtk_nfi_reset(struct mtk_snand *snf)
+{
+	uint32_t val, fifo_mask;
+	int ret;
+
+	nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
+
+	ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
+				  !(val & snf->nfi_soc->mastersta_mask), 0,
+				  SNFI_POLL_INTERVAL);
+	if (ret) {
+		snand_log_nfi(snf->pdev,
+			      "NFI master is still busy after reset\n");
+		return ret;
+	}
+
+	ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
+				  !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
+				  SNFI_POLL_INTERVAL);
+	if (ret) {
+		snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
+		return ret;
+	}
+
+	fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
+		    ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
+	ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
+				  !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
+	if (ret) {
+		snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int mtk_snand_mac_reset(struct mtk_snand *snf)
+{
+	int ret;
+	uint32_t val;
+
+	nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
+
+	ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
+				  !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
+	if (ret)
+		snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
+
+	nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
+		    (10 << CS_DESELECT_CYC_S));
+
+	return ret;
+}
+
+static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
+				 uint32_t inlen)
+{
+	int ret;
+	uint32_t val;
+
+	nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
+	nfi_write32(snf, SNF_MAC_OUTL, outlen);
+	nfi_write32(snf, SNF_MAC_INL, inlen);
+
+	nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
+
+	ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
+				  val & WIP_READY, 0, SNFI_POLL_INTERVAL);
+	if (ret) {
+		snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
+		goto cleanup;
+	}
+
+	ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
+				  !(val & WIP), 0, SNFI_POLL_INTERVAL);
+	if (ret) {
+		snand_log_snfi(snf->pdev,
+			       "Timed out waiting for WIP cleared\n");
+	}
+
+cleanup:
+	nfi_write32(snf, SNF_MAC_CTL, 0);
+
+	return ret;
+}
+
+int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
+		     uint8_t *in, uint32_t inlen)
+{
+	int ret;
+
+	if (outlen + inlen > SNF_GPRAM_SIZE)
+		return -EINVAL;
+
+	mtk_snand_mac_reset(snf);
+
+	nfi_write_data(snf, SNF_GPRAM, out, outlen);
+
+	ret = mtk_snand_mac_trigger(snf, outlen, inlen);
+	if (ret)
+		return ret;
+
+	if (!inlen)
+		return 0;
+
+	nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
+
+	return 0;
+}
+
+static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
+{
+	uint8_t op[2], val;
+	int ret;
+
+	op[0] = SNAND_CMD_GET_FEATURE;
+	op[1] = (uint8_t)addr;
+
+	ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
+	if (ret)
+		return ret;
+
+	return val;
+}
+
+int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
+{
+	uint8_t op[3];
+
+	op[0] = SNAND_CMD_SET_FEATURE;
+	op[1] = (uint8_t)addr;
+	op[2] = (uint8_t)val;
+
+	return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
+}
+
+static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
+{
+	int val;
+	mtk_snand_time_t time_start, tmo;
+
+	time_start = timer_get_ticks();
+	tmo = timer_time_to_tick(wait_us);
+
+	do {
+		val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
+		if (!(val & SNAND_STATUS_OIP))
+			return val & (SNAND_STATUS_ERASE_FAIL |
+				      SNAND_STATUS_PROGRAM_FAIL);
+	} while (!timer_is_timeout(time_start, tmo));
+
+	return -ETIMEDOUT;
+}
+
+int mtk_snand_chip_reset(struct mtk_snand *snf)
+{
+	uint8_t op = SNAND_CMD_RESET;
+	int ret;
+
+	ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
+	if (ret)
+		return ret;
+
+	ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
+				    uint8_t set)
+{
+	int val, newval;
+	int ret;
+
+	val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
+	if (val < 0) {
+		snand_log_chip(snf->pdev,
+			       "Failed to get configuration feature\n");
+		return val;
+	}
+
+	newval = (val & (~clr)) | set;
+
+	if (newval == val)
+		return 0;
+
+	ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
+				    (uint8_t)newval);
+	if (val < 0) {
+		snand_log_chip(snf->pdev,
+			       "Failed to set configuration feature\n");
+		return ret;
+	}
+
+	val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
+	if (val < 0) {
+		snand_log_chip(snf->pdev,
+			       "Failed to get configuration feature\n");
+		return val;
+	}
+
+	if (newval != val)
+		return -ENOTSUPP;
+
+	return 0;
+}
+
+static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
+{
+	int ret;
+
+	if (enable)
+		ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
+	else
+		ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
+
+	if (ret) {
+		snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
+			       enable ? "enable" : "disable");
+	}
+
+	return ret;
+}
+
+static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
+{
+	int ret;
+
+	if (enable) {
+		ret = mtk_snand_config_feature(snf, 0,
+					       SNAND_FEATURE_QUAD_ENABLE);
+	} else {
+		ret = mtk_snand_config_feature(snf,
+					       SNAND_FEATURE_QUAD_ENABLE, 0);
+	}
+
+	if (ret) {
+		snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
+			       enable ? "enable" : "disable");
+	}
+
+	return ret;
+}
+
+static int mtk_snand_unlock(struct mtk_snand *snf)
+{
+	int ret;
+
+	ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
+	if (ret) {
+		snand_log_chip(snf->pdev, "Failed to set protection feature\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int mtk_snand_write_enable(struct mtk_snand *snf)
+{
+	uint8_t op = SNAND_CMD_WRITE_ENABLE;
+	int ret, val;
+
+	ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
+	if (ret)
+		return ret;
+
+	val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
+	if (val < 0)
+		return ret;
+
+	if (val & SNAND_STATUS_WEL)
+		return 0;
+
+	snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
+
+	return -ENOTSUPP;
+}
+
+static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
+{
+	if (!snf->select_die)
+		return 0;
+
+	return snf->select_die(snf, dieidx);
+}
+
+static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
+					     uint64_t addr)
+{
+	uint32_t dieidx;
+
+	if (!snf->select_die)
+		return addr;
+
+	dieidx = addr >> snf->die_shift;
+
+	mtk_snand_select_die(snf, dieidx);
+
+	return addr & snf->die_mask;
+}
+
+static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
+					    uint32_t page)
+{
+	uint32_t pages_per_block;
+
+	pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
+
+	if (page & pages_per_block)
+		return 1 << (snf->writesize_shift + 1);
+
+	return 0;
+}
+
+static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
+{
+	uint8_t op[4];
+
+	op[0] = cmd;
+	op[1] = (page >> 16) & 0xff;
+	op[2] = (page >> 8) & 0xff;
+	op[3] = page & 0xff;
+
+	return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
+}
+
+static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
+{
+	uint32_t vall, valm;
+	uint8_t *oobptr = buf;
+	int i, j;
+
+	for (i = 0; i < snf->ecc_steps; i++) {
+		vall = nfi_read32(snf, NFI_FDML(i));
+		valm = nfi_read32(snf, NFI_FDMM(i));
+
+		for (j = 0; j < snf->nfi_soc->fdm_size; j++)
+			oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+
+		oobptr += snf->nfi_soc->fdm_size;
+	}
+}
+
+static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
+{
+	uint32_t coladdr, rwbytes, mode, len;
+	uintptr_t dma_addr;
+	int ret;
+
+	/* Column address with plane bit */
+	coladdr = mtk_snand_get_plane_address(snf, page);
+
+	mtk_snand_mac_reset(snf);
+	mtk_nfi_reset(snf);
+
+	/* Command and dummy cycles */
+	nfi_write32(snf, SNF_RD_CTL2,
+		    ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
+		    (snf->opcode_rfc << DATA_READ_CMD_S));
+
+	/* Column address */
+	nfi_write32(snf, SNF_RD_CTL3, coladdr);
+
+	/* Set read mode */
+	mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
+	nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, mode | DATARD_CUSTOM_EN);
+
+	/* Set bytes to read */
+	rwbytes = snf->ecc_steps * snf->raw_sector_size;
+	nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
+		    rwbytes);
+
+	/* NFI read prepare */
+	mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
+	nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
+		    CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
+
+	nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
+
+	/* Prepare for DMA read */
+	len = snf->writesize + snf->oobsize;
+	ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
+	if (ret) {
+		snand_log_nfi(snf->pdev,
+			      "DMA map from device failed with %d\n", ret);
+		return ret;
+	}
+
+	nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
+
+	if (!raw)
+		mtk_snand_ecc_decoder_start(snf);
+
+	/* Prepare for custom read interrupt */
+	nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
+	irq_completion_init(snf->pdev);
+
+	/* Trigger NFI into custom mode */
+	nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
+
+	/* Start DMA read */
+	nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
+	nfi_write16(snf, NFI_STRDATA, STR_DATA);
+
+	/* Wait for operation finished */
+	ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
+				  CUS_READ_DONE, SNFI_POLL_INTERVAL);
+	if (ret) {
+		snand_log_nfi(snf->pdev,
+			      "DMA timed out for reading from cache\n");
+		goto cleanup;
+	}
+
+	if (!raw) {
+		ret = mtk_ecc_wait_decoder_done(snf);
+		if (ret)
+			goto cleanup;
+
+		mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
+
+		/*
+		 * For new IPs, ecc error may occure on empty pages.
+		 * Use an specific indication bit to check empty page.
+		 */
+		if (snf->nfi_soc->empty_page_check &&
+		    (nfi_read32(snf, NFI_STA) & READ_EMPTY))
+			ret = 0;
+		else
+			ret = mtk_ecc_check_decode_error(snf, page);
+
+		mtk_snand_ecc_decoder_stop(snf);
+	}
+
+cleanup:
+	/* DMA cleanup */
+	dma_mem_unmap(snf->pdev, dma_addr, len, false);
+
+	/* Stop read */
+	nfi_write32(snf, NFI_CON, 0);
+
+	/* Clear SNF done flag */
+	nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
+	nfi_write32(snf, SNF_STA_CTL1, 0);
+
+	/* Disable interrupt */
+	nfi_read32(snf, NFI_INTR_STA);
+	nfi_write32(snf, NFI_INTR_EN, 0);
+
+	nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
+
+	return ret;
+}
+
+static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
+{
+	uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
+	uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
+	uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
+
+	for (i = 0; i < snf->ecc_steps; i++) {
+		raw_sector = snf->page_cache + i * snf->raw_sector_size;
+
+		if (buf) {
+			memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
+			bufptr += snf->nfi_soc->sector_size;
+		}
+
+		raw_sector += snf->nfi_soc->sector_size;
+
+		if (oob) {
+			memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
+			oobptr += snf->nfi_soc->fdm_size;
+			raw_sector += snf->nfi_soc->fdm_size;
+
+			memcpy(eccptr, raw_sector, ecc_bytes);
+			eccptr += ecc_bytes;
+		}
+	}
+}
+
+static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
+				  void *buf, void *oob, bool raw, bool format)
+{
+	uint64_t die_addr;
+	uint32_t page;
+	int ret;
+
+	die_addr = mtk_snand_select_die_address(snf, addr);
+	page = die_addr >> snf->writesize_shift;
+
+	ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
+	if (ret)
+		return ret;
+
+	ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
+	if (ret < 0) {
+		snand_log_chip(snf->pdev, "Read to cache command timed out\n");
+		return ret;
+	}
+
+	ret = mtk_snand_read_cache(snf, page, raw);
+	if (ret < 0 && ret != -EBADMSG)
+		return ret;
+
+	if (raw) {
+		if (format) {
+			mtk_snand_bm_swap_raw(snf);
+			mtk_snand_fdm_bm_swap_raw(snf);
+			mtk_snand_from_raw_page(snf, buf, oob);
+		} else {
+			if (buf)
+				memcpy(buf, snf->page_cache, snf->writesize);
+
+			if (oob) {
+				memset(oob, 0xff, snf->oobsize);
+				memcpy(oob, snf->page_cache + snf->writesize,
+				       snf->ecc_steps * snf->spare_per_sector);
+			}
+		}
+	} else {
+		mtk_snand_bm_swap(snf);
+		mtk_snand_fdm_bm_swap(snf);
+
+		if (buf)
+			memcpy(buf, snf->page_cache, snf->writesize);
+
+		if (oob) {
+			memset(oob, 0xff, snf->oobsize);
+			memcpy(oob, snf->page_cache + snf->writesize,
+			       snf->ecc_steps * snf->nfi_soc->fdm_size);
+		}
+	}
+
+	return ret;
+}
+
+int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
+			void *oob, bool raw)
+{
+	if (!snf || (!buf && !oob))
+		return -EINVAL;
+
+	if (addr >= snf->size)
+		return -EINVAL;
+
+	return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
+}
+
+static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
+{
+	uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
+	const uint8_t *oobptr = buf;
+	int i, j;
+
+	for (i = 0; i < snf->ecc_steps; i++) {
+		vall = 0;
+		valm = 0;
+
+		for (j = 0; j < 8; j++) {
+			if (j < 4)
+				vall |= (j < fdm_size ? oobptr[j] : 0xff)
+						<< (j * 8);
+			else
+				valm |= (j < fdm_size ? oobptr[j] : 0xff)
+						<< ((j - 4) * 8);
+		}
+
+		nfi_write32(snf, NFI_FDML(i), vall);
+		nfi_write32(snf, NFI_FDMM(i), valm);
+
+		oobptr += fdm_size;
+	}
+}
+
+static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
+				  bool raw)
+{
+	uint32_t coladdr, rwbytes, mode, len;
+	uintptr_t dma_addr;
+	int ret;
+
+	/* Column address with plane bit */
+	coladdr = mtk_snand_get_plane_address(snf, page);
+
+	mtk_snand_mac_reset(snf);
+	mtk_nfi_reset(snf);
+
+	/* Write FDM registers if necessary */
+	if (!raw)
+		mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
+
+	/* Command */
+	nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
+
+	/* Column address */
+	nfi_write32(snf, SNF_PG_CTL2, coladdr);
+
+	/* Set write mode */
+	mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
+	nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
+
+	/* Set bytes to write */
+	rwbytes = snf->ecc_steps * snf->raw_sector_size;
+	nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
+		    rwbytes);
+
+	/* NFI write prepare */
+	mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
+	nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
+		    CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
+
+	nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
+
+	/* Prepare for DMA write */
+	len = snf->writesize + snf->oobsize;
+	ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
+	if (ret) {
+		snand_log_nfi(snf->pdev,
+			      "DMA map to device failed with %d\n", ret);
+		return ret;
+	}
+
+	nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
+
+	if (!raw)
+		mtk_snand_ecc_encoder_start(snf);
+
+	/* Prepare for custom write interrupt */
+	nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
+	irq_completion_init(snf->pdev);
+
+	/* Trigger NFI into custom mode */
+	nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
+
+	/* Start DMA write */
+	nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
+	nfi_write16(snf, NFI_STRDATA, STR_DATA);
+
+	/* Wait for operation finished */
+	ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
+				  CUS_PG_DONE, SNFI_POLL_INTERVAL);
+	if (ret) {
+		snand_log_nfi(snf->pdev,
+			      "DMA timed out for program load\n");
+		goto cleanup;
+	}
+
+	if (!raw)
+		mtk_snand_ecc_encoder_stop(snf);
+
+cleanup:
+	/* DMA cleanup */
+	dma_mem_unmap(snf->pdev, dma_addr, len, true);
+
+	/* Stop write */
+	nfi_write16(snf, NFI_CON, 0);
+
+	/* Clear SNF done flag */
+	nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
+	nfi_write32(snf, SNF_STA_CTL1, 0);
+
+	/* Disable interrupt */
+	nfi_read32(snf, NFI_INTR_STA);
+	nfi_write32(snf, NFI_INTR_EN, 0);
+
+	nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
+
+	return ret;
+}
+
+static void mtk_snand_to_raw_page(struct mtk_snand *snf,
+				  const void *buf, const void *oob,
+				  bool empty_ecc)
+{
+	uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
+	const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
+	const uint8_t *bufptr = buf, *oobptr = oob;
+	uint8_t *raw_sector;
+
+	memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
+	for (i = 0; i < snf->ecc_steps; i++) {
+		raw_sector = snf->page_cache + i * snf->raw_sector_size;
+
+		if (buf) {
+			memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
+			bufptr += snf->nfi_soc->sector_size;
+		}
+
+		raw_sector += snf->nfi_soc->sector_size;
+
+		if (oob) {
+			memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
+			oobptr += snf->nfi_soc->fdm_size;
+			raw_sector += snf->nfi_soc->fdm_size;
+
+			if (empty_ecc)
+				memset(raw_sector, 0xff, ecc_bytes);
+			else
+				memcpy(raw_sector, eccptr, ecc_bytes);
+			eccptr += ecc_bytes;
+		}
+	}
+}
+
+static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
+				    const void *oob)
+{
+	const uint8_t *p = buf;
+	uint32_t i, j;
+
+	if (buf) {
+		for (i = 0; i < snf->writesize; i++) {
+			if (p[i] != 0xff)
+				return false;
+		}
+	}
+
+	if (oob) {
+		for (j = 0; j < snf->ecc_steps; j++) {
+			p = oob + j * snf->nfi_soc->fdm_size;
+
+			for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
+				if (p[i] != 0xff)
+					return false;
+			}
+		}
+	}
+
+	return true;
+}
+
+static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
+				   const void *buf, const void *oob,
+				   bool raw, bool format)
+{
+	uint64_t die_addr;
+	bool empty_ecc = false;
+	uint32_t page;
+	int ret;
+
+	die_addr = mtk_snand_select_die_address(snf, addr);
+	page = die_addr >> snf->writesize_shift;
+
+	if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
+		/*
+		 * If the data in the page to be ecc-ed is full 0xff,
+		 * change to raw write mode
+		 */
+		raw = true;
+		format = true;
+
+		/* fill ecc parity code region with 0xff */
+		empty_ecc = true;
+	}
+
+	if (raw) {
+		if (format) {
+			mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
+			mtk_snand_fdm_bm_swap_raw(snf);
+			mtk_snand_bm_swap_raw(snf);
+		} else {
+			memset(snf->page_cache, 0xff,
+			       snf->writesize + snf->oobsize);
+
+			if (buf)
+				memcpy(snf->page_cache, buf, snf->writesize);
+
+			if (oob) {
+				memcpy(snf->page_cache + snf->writesize, oob,
+				       snf->ecc_steps * snf->spare_per_sector);
+			}
+		}
+	} else {
+		memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
+		if (buf)
+			memcpy(snf->page_cache, buf, snf->writesize);
+
+		if (oob) {
+			memcpy(snf->page_cache + snf->writesize, oob,
+			       snf->ecc_steps * snf->nfi_soc->fdm_size);
+		}
+
+		mtk_snand_fdm_bm_swap(snf);
+		mtk_snand_bm_swap(snf);
+	}
+
+	ret = mtk_snand_write_enable(snf);
+	if (ret)
+		return ret;
+
+	ret = mtk_snand_program_load(snf, page, raw);
+	if (ret)
+		return ret;
+
+	ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
+	if (ret)
+		return ret;
+
+	ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
+	if (ret < 0) {
+		snand_log_chip(snf->pdev,
+			       "Page program command timed out on page %u\n",
+			       page);
+		return ret;
+	}
+
+	if (ret & SNAND_STATUS_PROGRAM_FAIL) {
+		snand_log_chip(snf->pdev,
+			       "Page program failed on page %u\n", page);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
+			 const void *oob, bool raw)
+{
+	if (!snf || (!buf && !oob))
+		return -EINVAL;
+
+	if (addr >= snf->size)
+		return -EINVAL;
+
+	return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
+}
+
+int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
+{
+	uint64_t die_addr;
+	uint32_t page, block;
+	int ret;
+
+	if (!snf)
+		return -EINVAL;
+
+	if (addr >= snf->size)
+		return -EINVAL;
+
+	die_addr = mtk_snand_select_die_address(snf, addr);
+	block = die_addr >> snf->erasesize_shift;
+	page = block << (snf->erasesize_shift - snf->writesize_shift);
+
+	ret = mtk_snand_write_enable(snf);
+	if (ret)
+		return ret;
+
+	ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
+	if (ret)
+		return ret;
+
+	ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
+	if (ret < 0) {
+		snand_log_chip(snf->pdev,
+			       "Block erase command timed out on block %u\n",
+			       block);
+		return ret;
+	}
+
+	if (ret & SNAND_STATUS_ERASE_FAIL) {
+		snand_log_chip(snf->pdev,
+			       "Block erase failed on block %u\n", block);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
+{
+	int ret;
+
+	ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
+				     false);
+	if (ret && ret != -EBADMSG)
+		return ret;
+
+	return snf->buf_cache[0] != 0xff;
+}
+
+static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
+{
+	int ret;
+
+	ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
+				     true);
+	if (ret && ret != -EBADMSG)
+		return ret;
+
+	return snf->buf_cache[0] != 0xff;
+}
+
+int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
+{
+	if (!snf)
+		return -EINVAL;
+
+	if (addr >= snf->size)
+		return -EINVAL;
+
+	addr &= ~snf->erasesize_mask;
+
+	if (snf->nfi_soc->bbm_swap)
+		return mtk_snand_block_isbad_std(snf, addr);
+
+	return mtk_snand_block_isbad_mtk(snf, addr);
+}
+
+static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
+{
+	/* Standard BBM position */
+	memset(snf->buf_cache, 0xff, snf->oobsize);
+	snf->buf_cache[0] = 0;
+
+	return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
+				       false);
+}
+
+static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
+{
+	/* Write the whole page with zeros */
+	memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
+
+	return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
+				       snf->buf_cache + snf->writesize, true,
+				       true);
+}
+
+int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
+{
+	if (!snf)
+		return -EINVAL;
+
+	if (addr >= snf->size)
+		return -EINVAL;
+
+	addr &= ~snf->erasesize_mask;
+
+	if (snf->nfi_soc->bbm_swap)
+		return mtk_snand_block_markbad_std(snf, addr);
+
+	return mtk_snand_block_markbad_mtk(snf, addr);
+}
+
+int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
+		       const uint8_t *oobbuf, size_t ooblen)
+{
+	size_t len = ooblen, sect_fdm_len;
+	const uint8_t *oob = oobbuf;
+	uint32_t step = 0;
+
+	if (!snf || !oobraw || !oob)
+		return -EINVAL;
+
+	while (len && step < snf->ecc_steps) {
+		sect_fdm_len = snf->nfi_soc->fdm_size - 1;
+		if (sect_fdm_len > len)
+			sect_fdm_len = len;
+
+		memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
+		       sect_fdm_len);
+
+		len -= sect_fdm_len;
+		oob += sect_fdm_len;
+		step++;
+	}
+
+	return len;
+}
+
+int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
+			   size_t ooblen, const uint8_t *oobraw)
+{
+	size_t len = ooblen, sect_fdm_len;
+	uint8_t *oob = oobbuf;
+	uint32_t step = 0;
+
+	if (!snf || !oobraw || !oob)
+		return -EINVAL;
+
+	while (len && step < snf->ecc_steps) {
+		sect_fdm_len = snf->nfi_soc->fdm_size - 1;
+		if (sect_fdm_len > len)
+			sect_fdm_len = len;
+
+		memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
+		       sect_fdm_len);
+
+		len -= sect_fdm_len;
+		oob += sect_fdm_len;
+		step++;
+	}
+
+	return len;
+}
+
+int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
+				 void *buf, void *oob, size_t ooblen,
+				 size_t *actualooblen, bool raw)
+{
+	int ret, oobremain;
+
+	if (!snf)
+		return -EINVAL;
+
+	if (!oob)
+		return mtk_snand_read_page(snf, addr, buf, NULL, raw);
+
+	ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
+	if (ret && ret != -EBADMSG) {
+		if (actualooblen)
+			*actualooblen = 0;
+		return ret;
+	}
+
+	oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
+	if (actualooblen)
+		*actualooblen = ooblen - oobremain;
+
+	return ret;
+}
+
+int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
+				  const void *buf, const void *oob,
+				  size_t ooblen, size_t *actualooblen, bool raw)
+{
+	int oobremain;
+
+	if (!snf)
+		return -EINVAL;
+
+	if (!oob)
+		return mtk_snand_write_page(snf, addr, buf, NULL, raw);
+
+	memset(snf->buf_cache, 0xff, snf->oobsize);
+	oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
+	if (actualooblen)
+		*actualooblen = ooblen - oobremain;
+
+	return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
+}
+
+int mtk_snand_get_chip_info(struct mtk_snand *snf,
+			    struct mtk_snand_chip_info *info)
+{
+	if (!snf || !info)
+		return -EINVAL;
+
+	info->model = snf->model;
+	info->chipsize = snf->size;
+	info->blocksize = snf->erasesize;
+	info->pagesize = snf->writesize;
+	info->sparesize = snf->oobsize;
+	info->spare_per_sector = snf->spare_per_sector;
+	info->fdm_size = snf->nfi_soc->fdm_size;
+	info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
+	info->num_sectors = snf->ecc_steps;
+	info->sector_size = snf->nfi_soc->sector_size;
+	info->ecc_strength = snf->ecc_strength;
+	info->ecc_bytes = snf->ecc_bytes;
+
+	return 0;
+}
+
+int mtk_snand_irq_process(struct mtk_snand *snf)
+{
+	uint32_t sta, ien;
+
+	if (!snf)
+		return -EINVAL;
+
+	sta = nfi_read32(snf, NFI_INTR_STA);
+	ien = nfi_read32(snf, NFI_INTR_EN);
+
+	if (!(sta & ien))
+		return 0;
+
+	nfi_write32(snf, NFI_INTR_EN, 0);
+	irq_completion_done(snf->pdev);
+
+	return 1;
+}
+
+static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
+{
+	uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
+	int i, mul = 1;
+
+	/*
+	 * If we're using the 1KB sector size, HW will automatically
+	 * double the spare size. So we should only use half of the value.
+	 */
+	if (snf->nfi_soc->sector_size == 1024)
+		mul = 2;
+
+	spare_per_step /= mul;
+
+	for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
+		if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
+			snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
+			snf->spare_per_sector *= mul;
+			return i;
+		}
+	}
+
+	snand_log_nfi(snf->pdev,
+		      "Page size %u+%u is not supported\n", snf->writesize,
+		      snf->oobsize);
+
+	return -1;
+}
+
+static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
+{
+	uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
+	uint32_t sector_size_512;
+
+	if (snf->nfi_soc->sector_size == 512) {
+		sector_size_512 = NFI_SEC_SEL_512;
+		spare_size_shift = NFI_SPARE_SIZE_S;
+	} else {
+		sector_size_512 = 0;
+		spare_size_shift = NFI_SPARE_SIZE_LS_S;
+	}
+
+	switch (snf->writesize) {
+	case SZ_512:
+		pagesize_idx = NFI_PAGE_SIZE_512_2K;
+		break;
+	case SZ_2K:
+		if (snf->nfi_soc->sector_size == 512)
+			pagesize_idx = NFI_PAGE_SIZE_2K_4K;
+		else
+			pagesize_idx = NFI_PAGE_SIZE_512_2K;
+		break;
+	case SZ_4K:
+		if (snf->nfi_soc->sector_size == 512)
+			pagesize_idx = NFI_PAGE_SIZE_4K_8K;
+		else
+			pagesize_idx = NFI_PAGE_SIZE_2K_4K;
+		break;
+	case SZ_8K:
+		if (snf->nfi_soc->sector_size == 512)
+			pagesize_idx = NFI_PAGE_SIZE_8K_16K;
+		else
+			pagesize_idx = NFI_PAGE_SIZE_4K_8K;
+		break;
+	case SZ_16K:
+		pagesize_idx = NFI_PAGE_SIZE_8K_16K;
+		break;
+	default:
+		snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
+			      snf->writesize);
+		return -ENOTSUPP;
+	}
+
+	spare_size_idx = mtk_snand_select_spare_per_sector(snf);
+	if (unlikely(spare_size_idx < 0))
+		return -ENOTSUPP;
+
+	snf->raw_sector_size = snf->nfi_soc->sector_size +
+			       snf->spare_per_sector;
+
+	/* Setup page format */
+	nfi_write32(snf, NFI_PAGEFMT,
+		    (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
+		    (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
+		    (spare_size_idx << spare_size_shift) |
+		    (pagesize_idx << NFI_PAGE_SIZE_S) |
+		    sector_size_512);
+
+	return 0;
+}
+
+static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
+				   uint32_t snfi_caps, uint8_t *opcode,
+				   uint8_t *dummy,
+				   const struct snand_io_cap *op_cap)
+{
+	uint32_t i, caps;
+
+	caps = snfi_caps & op_cap->caps;
+
+	i = fls(caps);
+	if (i > 0) {
+		*opcode = op_cap->opcodes[i - 1].opcode;
+		if (dummy)
+			*dummy = op_cap->opcodes[i - 1].dummy;
+		return i - 1;
+	}
+
+	return __SNAND_IO_MAX;
+}
+
+static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
+				       uint32_t snfi_caps,
+				       const struct snand_io_cap *op_cap)
+{
+	enum snand_flash_io idx;
+
+	static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
+		[SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
+		[SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
+		[SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
+		[SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
+		[SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
+	};
+
+	idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
+				      &snf->dummy_rfc, op_cap);
+	if (idx >= __SNAND_IO_MAX) {
+		snand_log_snfi(snf->pdev,
+			       "No capable opcode for read from cache\n");
+		return -ENOTSUPP;
+	}
+
+	snf->mode_rfc = rfc_modes[idx];
+
+	if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
+		snf->quad_spi_op = true;
+
+	return 0;
+}
+
+static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
+				      const struct snand_io_cap *op_cap)
+{
+	enum snand_flash_io idx;
+
+	static const uint8_t pl_modes[__SNAND_IO_MAX] = {
+		[SNAND_IO_1_1_1] = 0,
+		[SNAND_IO_1_1_4] = 1,
+	};
+
+	idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
+				      NULL, op_cap);
+	if (idx >= __SNAND_IO_MAX) {
+		snand_log_snfi(snf->pdev,
+			       "No capable opcode for program load\n");
+		return -ENOTSUPP;
+	}
+
+	snf->mode_pl = pl_modes[idx];
+
+	if (idx == SNAND_IO_1_1_4)
+		snf->quad_spi_op = true;
+
+	return 0;
+}
+
+static int mtk_snand_setup(struct mtk_snand *snf,
+			   const struct snand_flash_info *snand_info)
+{
+	const struct snand_mem_org *memorg = &snand_info->memorg;
+	uint32_t i, msg_size, snfi_caps;
+	int ret;
+
+	/* Calculate flash memory organization */
+	snf->model = snand_info->model;
+	snf->writesize = memorg->pagesize;
+	snf->oobsize = memorg->sparesize;
+	snf->erasesize = snf->writesize * memorg->pages_per_block;
+	snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
+	snf->size = snf->die_size * memorg->ndies;
+	snf->num_dies = memorg->ndies;
+
+	snf->writesize_mask = snf->writesize - 1;
+	snf->erasesize_mask = snf->erasesize - 1;
+	snf->die_mask = snf->die_size - 1;
+
+	snf->writesize_shift = ffs(snf->writesize) - 1;
+	snf->erasesize_shift = ffs(snf->erasesize) - 1;
+	snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
+
+	snf->select_die = snand_info->select_die;
+
+	/* Determine opcodes for read from cache/program load */
+	snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
+	if (snf->snfi_quad_spi)
+		snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
+
+	ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
+	if (ret)
+		return ret;
+
+	ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
+	if (ret)
+		return ret;
+
+	/* ECC and page format */
+	snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
+	if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
+		snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
+			      snf->writesize);
+		return -ENOTSUPP;
+	}
+
+	ret = mtk_snand_pagefmt_setup(snf);
+	if (ret)
+		return ret;
+
+	msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
+	ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
+			    snf->spare_per_sector - snf->nfi_soc->fdm_size,
+			    msg_size);
+	if (ret)
+		return ret;
+
+	nfi_write16(snf, NFI_CNFG, 0);
+
+	/* Tuning options */
+	nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
+	nfi_write32(snf, SNF_DLY_CTL3, (40 << SFCK_SAM_DLY_S));
+
+	/* Interrupts */
+	nfi_read32(snf, NFI_INTR_STA);
+	nfi_write32(snf, NFI_INTR_EN, 0);
+
+	/* Clear SNF done flag */
+	nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
+	nfi_write32(snf, SNF_STA_CTL1, 0);
+
+	/* Initialization on all dies */
+	for (i = 0; i < snf->num_dies; i++) {
+		mtk_snand_select_die(snf, i);
+
+		/* Disable On-Die ECC engine */
+		ret = mtk_snand_ondie_ecc_control(snf, false);
+		if (ret)
+			return ret;
+
+		/* Disable block protection */
+		mtk_snand_unlock(snf);
+
+		/* Enable/disable quad-spi */
+		mtk_snand_qspi_control(snf, snf->quad_spi_op);
+	}
+
+	mtk_snand_select_die(snf, 0);
+
+	return 0;
+}
+
+static int mtk_snand_id_probe(struct mtk_snand *snf,
+			      const struct snand_flash_info **snand_info)
+{
+	uint8_t id[4], op[2];
+	int ret;
+
+	/* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
+	op[0] = SNAND_CMD_READID;
+	op[1] = 0;
+	ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
+	if (ret)
+		return ret;
+
+	*snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
+	if (*snand_info)
+		return 0;
+
+	/* Read SPI-NAND JEDEC ID, OP + ID */
+	op[0] = SNAND_CMD_READID;
+	ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
+	if (ret)
+		return ret;
+
+	*snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
+	if (*snand_info)
+		return 0;
+
+	snand_log_chip(snf->pdev,
+		       "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
+		       id[0], id[1], id[2], id[3]);
+
+	return -EINVAL;
+}
+
+int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
+		   struct mtk_snand **psnf)
+{
+	const struct snand_flash_info *snand_info;
+	struct mtk_snand tmpsnf, *snf;
+	uint32_t rawpage_size;
+	int ret;
+
+	if (!pdata || !psnf)
+		return -EINVAL;
+
+	if (pdata->soc >= __SNAND_SOC_MAX) {
+		snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
+			       pdata->soc);
+		return -EINVAL;
+	}
+
+	/* Dummy instance only for initial reset and id probe */
+	tmpsnf.nfi_base = pdata->nfi_base;
+	tmpsnf.ecc_base = pdata->ecc_base;
+	tmpsnf.soc = pdata->soc;
+	tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
+	tmpsnf.pdev = dev;
+
+	/* Switch to SNFI mode */
+	writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
+
+	/* Reset SNFI & NFI */
+	mtk_snand_mac_reset(&tmpsnf);
+	mtk_nfi_reset(&tmpsnf);
+
+	/* Reset SPI-NAND chip */
+	ret = mtk_snand_chip_reset(&tmpsnf);
+	if (ret) {
+		snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
+		return ret;
+	}
+
+	/* Probe SPI-NAND flash by JEDEC ID */
+	ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
+	if (ret)
+		return ret;
+
+	rawpage_size = snand_info->memorg.pagesize +
+		       snand_info->memorg.sparesize;
+
+	/* Allocate memory for instance and cache */
+	snf = generic_mem_alloc(dev, sizeof(*snf) + rawpage_size);
+	if (!snf) {
+		snand_log_chip(dev, "Failed to allocate memory for instance\n");
+		return -ENOMEM;
+	}
+
+	snf->buf_cache = (uint8_t *)((uintptr_t)snf + sizeof(*snf));
+
+	/* Allocate memory for DMA buffer */
+	snf->page_cache = dma_mem_alloc(dev, rawpage_size);
+	if (!snf->page_cache) {
+		generic_mem_free(dev, snf);
+		snand_log_chip(dev,
+			       "Failed to allocate memory for DMA buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Fill up instance */
+	snf->pdev = dev;
+	snf->nfi_base = pdata->nfi_base;
+	snf->ecc_base = pdata->ecc_base;
+	snf->soc = pdata->soc;
+	snf->nfi_soc = &mtk_snand_socs[pdata->soc];
+	snf->snfi_quad_spi = pdata->quad_spi;
+
+	/* Initialize SNFI & ECC engine */
+	ret = mtk_snand_setup(snf, snand_info);
+	if (ret) {
+		dma_mem_free(dev, snf->page_cache);
+		generic_mem_free(dev, snf);
+		return ret;
+	}
+
+	*psnf = snf;
+
+	return 0;
+}
+
+int mtk_snand_cleanup(struct mtk_snand *snf)
+{
+	if (!snf)
+		return 0;
+
+	dma_mem_free(snf->pdev, snf->page_cache);
+	generic_mem_free(snf->pdev, snf);
+
+	return 0;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand.h b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand.h
new file mode 100644
index 0000000..382f80c
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/mtd/mtk-snand/mtk-snand.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#ifndef _MTK_SNAND_H_
+#define _MTK_SNAND_H_
+
+#ifndef PRIVATE_MTK_SNAND_HEADER
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#endif
+
+enum mtk_snand_soc {
+	SNAND_SOC_MT7622,
+	SNAND_SOC_MT7629,
+	SNAND_SOC_MT7986,
+
+	__SNAND_SOC_MAX
+};
+
+struct mtk_snand_platdata {
+	void *nfi_base;
+	void *ecc_base;
+	enum mtk_snand_soc soc;
+	bool quad_spi;
+};
+
+struct mtk_snand_chip_info {
+	const char *model;
+	uint64_t chipsize;
+	uint32_t blocksize;
+	uint32_t pagesize;
+	uint32_t sparesize;
+	uint32_t spare_per_sector;
+	uint32_t fdm_size;
+	uint32_t fdm_ecc_size;
+	uint32_t num_sectors;
+	uint32_t sector_size;
+	uint32_t ecc_strength;
+	uint32_t ecc_bytes;
+};
+
+struct mtk_snand;
+struct snand_flash_info;
+
+int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
+		   struct mtk_snand **psnf);
+int mtk_snand_cleanup(struct mtk_snand *snf);
+
+int mtk_snand_chip_reset(struct mtk_snand *snf);
+int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
+			void *oob, bool raw);
+int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
+			 const void *oob, bool raw);
+int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr);
+int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr);
+int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr);
+int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
+		       const uint8_t *oobbuf, size_t ooblen);
+int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
+			   size_t ooblen, const uint8_t *oobraw);
+int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
+				 void *buf, void *oob, size_t ooblen,
+				 size_t *actualooblen, bool raw);
+int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
+				  const void *buf, const void *oob,
+				  size_t ooblen, size_t *actualooblen,
+				  bool raw);
+int mtk_snand_get_chip_info(struct mtk_snand *snf,
+			    struct mtk_snand_chip_info *info);
+int mtk_snand_irq_process(struct mtk_snand *snf);
+
+#endif /* _MTK_SNAND_H_ */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Kconfig b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Kconfig
new file mode 100755
index 0000000..b097f52
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Kconfig
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_MEDIATEK
+	bool "MediaTek ethernet driver"
+	depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
+	---help---
+	  If you have a Mediatek SoC with ethernet, say Y.
+
+if NET_VENDOR_MEDIATEK
+
+config NET_MEDIATEK_SOC
+	tristate "MediaTek SoC Gigabit Ethernet support"
+	select PHYLINK
+	---help---
+	  This driver supports the gigabit ethernet MACs in the
+	  MediaTek SoC family.
+
+config MEDIATEK_NETSYS_V2
+	tristate "MediaTek Ethernet NETSYS V2 support"
+	 depends on ARCH_MEDIATEK && NET_MEDIATEK_SOC
+	---help---
+	  This options enable MTK Ethernet NETSYS V2 support
+
+config NET_MEDIATEK_HNAT
+	tristate "MediaTek HW NAT support"
+	depends on NET_MEDIATEK_SOC && NF_CONNTRACK && IP_NF_NAT
+	---help---
+	  This driver supports the hardward Network Address Translation
+	  in the MediaTek MT7986/MT2701/MT7622/MT7629/MT7621 chipset
+	  family.
+
+config NET_MEDIATEK_HW_QOS
+	bool "Mediatek HW QoS support"
+	depends on NET_MEDIATEK_HNAT
+	default n
+	---help---
+	  This driver supports the hardward
+	  quality of service (QoS) control
+	  for the hardware NAT in the
+	  MediaTek chipset family.
+
+endif #NET_VENDOR_MEDIATEK
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile
new file mode 100755
index 0000000..f046e73
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_MEDIATEK_SOC)			+= mtk_eth.o
+mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o
+obj-$(CONFIG_NET_MEDIATEK_HNAT)			+= mtk_hnat/
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
new file mode 100755
index 0000000..82aa6ca
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
@@ -0,0 +1,840 @@
+/*
+ *   Copyright (C) 2018 MediaTek Inc.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; version 2 of the License
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#include <linux/trace_seq.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/of_mdio.h>
+
+#include "mtk_eth_soc.h"
+#include "mtk_eth_dbg.h"
+
+struct mtk_eth_debug {
+        struct dentry *root;
+};
+
+struct mtk_eth *g_eth;
+
+struct mtk_eth_debug eth_debug;
+
+void mt7530_mdio_w32(struct mtk_eth *eth, u32 reg, u32 val)
+{
+	mutex_lock(&eth->mii_bus->mdio_lock);
+
+	_mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
+	_mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf,  val & 0xffff);
+	_mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
+
+	mutex_unlock(&eth->mii_bus->mdio_lock);
+}
+
+u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
+{
+	u16 high, low;
+
+	mutex_lock(&eth->mii_bus->mdio_lock);
+
+	_mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
+	low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
+	high = _mtk_mdio_read(eth, 0x1f, 0x10);
+
+	mutex_unlock(&eth->mii_bus->mdio_lock);
+
+	return (high << 16) | (low & 0xffff);
+}
+
+void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+{
+	mtk_w32(eth, val, reg + 0x10000);
+}
+EXPORT_SYMBOL(mtk_switch_w32);
+
+u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
+{
+	return mtk_r32(eth, reg + 0x10000);
+}
+EXPORT_SYMBOL(mtk_switch_r32);
+
+static int mtketh_debug_show(struct seq_file *m, void *private)
+{
+	struct mtk_eth *eth = m->private;
+	struct mtk_mac *mac = 0;
+	u32 d;
+	int  i, j = 0;
+
+	for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
+		if (!eth->mac[i] ||
+		    of_phy_is_fixed_link(eth->mac[i]->of_node))
+			continue;
+		mac = eth->mac[i];
+#if 0 //FIXME
+		while (j < 30) {
+			d =  _mtk_mdio_read(eth, mac->phy_dev->addr, j);
+
+			seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
+				   mac->phy_dev->addr, j, d);
+			j++;
+		}
+#endif		
+	}
+	return 0;
+}
+
+static int mtketh_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mtketh_debug_show, inode->i_private);
+}
+
+static const struct file_operations mtketh_debug_fops = {
+	.open = mtketh_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
+{
+	struct mtk_eth *eth = m->private;
+	u32  offset, data;
+	int i;
+	struct mt7530_ranges {
+		u32 start;
+		u32 end;
+	} ranges[] = {
+		{0x0, 0xac},
+		{0x1000, 0x10e0},
+		{0x1100, 0x1140},
+		{0x1200, 0x1240},
+		{0x1300, 0x1340},
+		{0x1400, 0x1440},
+		{0x1500, 0x1540},
+		{0x1600, 0x1640},
+		{0x1800, 0x1848},
+		{0x1900, 0x1948},
+		{0x1a00, 0x1a48},
+		{0x1b00, 0x1b48},
+		{0x1c00, 0x1c48},
+		{0x1d00, 0x1d48},
+		{0x1e00, 0x1e48},
+		{0x1f60, 0x1ffc},
+		{0x2000, 0x212c},
+		{0x2200, 0x222c},
+		{0x2300, 0x232c},
+		{0x2400, 0x242c},
+		{0x2500, 0x252c},
+		{0x2600, 0x262c},
+		{0x3000, 0x3014},
+		{0x30c0, 0x30f8},
+		{0x3100, 0x3114},
+		{0x3200, 0x3214},
+		{0x3300, 0x3314},
+		{0x3400, 0x3414},
+		{0x3500, 0x3514},
+		{0x3600, 0x3614},
+		{0x4000, 0x40d4},
+		{0x4100, 0x41d4},
+		{0x4200, 0x42d4},
+		{0x4300, 0x43d4},
+		{0x4400, 0x44d4},
+		{0x4500, 0x45d4},
+		{0x4600, 0x46d4},
+		{0x4f00, 0x461c},
+		{0x7000, 0x7038},
+		{0x7120, 0x7124},
+		{0x7800, 0x7804},
+		{0x7810, 0x7810},
+		{0x7830, 0x7830},
+		{0x7a00, 0x7a7c},
+		{0x7b00, 0x7b04},
+		{0x7e00, 0x7e04},
+		{0x7ffc, 0x7ffc},
+	};
+
+	if (!mt7530_exist(eth))
+		return -EOPNOTSUPP;
+
+	if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
+	    (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
+		seq_puts(m, "no switch found\n");
+		return 0;
+	}
+
+	for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
+		for (offset = ranges[i].start;
+		     offset <= ranges[i].end; offset += 4) {
+			data =  mt7530_mdio_r32(eth, offset);
+			seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
+				   offset, data);
+		}
+	}
+
+	return 0;
+}
+
+static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
+}
+
+static const struct file_operations mtketh_debug_mt7530sw_fops = {
+	.open = mtketh_debug_mt7530sw_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
+					     const char __user *ptr,
+					     size_t len, loff_t *off)
+{
+	struct mtk_eth *eth = file->private_data;
+	char buf[32], *token, *p = buf;
+	u32 reg, value, phy;
+	int ret;
+
+	if (!mt7530_exist(eth))
+		return -EOPNOTSUPP;
+
+	if (*off != 0)
+		return 0;
+
+	if (len > sizeof(buf) - 1)
+		len = sizeof(buf) - 1;
+
+	ret = strncpy_from_user(buf, ptr, len);
+	if (ret < 0)
+		return ret;
+	buf[len] = '\0';
+
+	token = strsep(&p, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtoul(token, 16, (unsigned long *)&phy))
+		return -EINVAL;
+
+	token = strsep(&p, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtoul(token, 16, (unsigned long *)&reg))
+		return -EINVAL;
+
+	token = strsep(&p, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtoul(token, 16, (unsigned long *)&value))
+		return -EINVAL;
+
+	pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
+		0x1f, reg, value);
+	mt7530_mdio_w32(eth, reg, value);
+	pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
+		0x1f, reg, mt7530_mdio_r32(eth, reg));
+
+	return len;
+}
+
+static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
+				    size_t len, loff_t *off)
+{
+	struct mtk_eth *eth = file->private_data;
+	char buf[32], *token, *p = buf;
+	u32 reg, value, phy;
+	int ret;
+
+	if (*off != 0)
+		return 0;
+
+	if (len > sizeof(buf) - 1)
+		len = sizeof(buf) - 1;
+
+	ret = strncpy_from_user(buf, ptr, len);
+	if (ret < 0)
+		return ret;
+	buf[len] = '\0';
+
+	token = strsep(&p, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtoul(token, 16, (unsigned long *)&phy))
+		return -EINVAL;
+
+	token = strsep(&p, " ");
+
+	if (!token)
+		return -EINVAL;
+	if (kstrtoul(token, 16, (unsigned long *)&reg))
+		return -EINVAL;
+
+	token = strsep(&p, " ");
+
+	if (!token)
+		return -EINVAL;
+	if (kstrtoul(token, 16, (unsigned long *)&value))
+		return -EINVAL;
+
+	pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
+		phy, reg, value);
+
+	_mtk_mdio_write(eth, phy,  reg, value);
+
+	pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
+		phy, reg, _mtk_mdio_read(eth, phy, reg));
+
+	return len;
+}
+
+static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
+				    size_t len, loff_t *off)
+{
+	struct mtk_eth *eth = file->private_data;
+
+	schedule_work(&eth->pending_work);
+	return len;
+}
+
+static const struct file_operations fops_reg_w = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = mtketh_debugfs_write,
+	.llseek = noop_llseek,
+};
+
+static const struct file_operations fops_eth_reset = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = mtketh_debugfs_reset,
+	.llseek = noop_llseek,
+};
+
+static const struct file_operations fops_mt7530sw_reg_w = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = mtketh_mt7530sw_debugfs_write,
+	.llseek = noop_llseek,
+};
+
+void mtketh_debugfs_exit(struct mtk_eth *eth)
+{
+	debugfs_remove_recursive(eth_debug.root);
+}
+
+int mtketh_debugfs_init(struct mtk_eth *eth)
+{
+	int ret = 0;
+
+	eth_debug.root = debugfs_create_dir("mtketh", NULL);
+	if (!eth_debug.root) {
+		dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
+		ret = -ENOMEM;
+	}
+
+	debugfs_create_file("phy_regs", S_IRUGO,
+			    eth_debug.root, eth, &mtketh_debug_fops);
+	debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
+			    eth_debug.root, eth,  &fops_reg_w);
+	debugfs_create_file("reset", S_IFREG | S_IWUSR,
+			    eth_debug.root, eth,  &fops_eth_reset);
+	if (mt7530_exist(eth)) {
+		debugfs_create_file("mt7530sw_regs", S_IRUGO,
+				    eth_debug.root, eth,
+				    &mtketh_debug_mt7530sw_fops);
+		debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
+				    eth_debug.root, eth,
+				    &fops_mt7530sw_reg_w);
+	}
+	return ret;
+}
+
+void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
+			  u32 *read_data)
+{
+	if (mt7530_exist(eth) && phy_addr == 31)
+		*read_data = mt7530_mdio_r32(eth, phy_register);
+
+	else
+		*read_data = _mtk_mdio_read(eth, phy_addr, phy_register);
+}
+
+void mii_mgr_write_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
+			   u32 write_data)
+{
+	if (mt7530_exist(eth) && phy_addr == 31)
+		mt7530_mdio_w32(eth, phy_register, write_data);
+
+	else
+		_mtk_mdio_write(eth, phy_addr, phy_register, write_data);
+}
+
+static void mii_mgr_read_cl45(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data)
+{
+	mtk_cl45_ind_read(eth, port, devad, reg, data);
+}
+
+static void mii_mgr_write_cl45(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data)
+{
+	mtk_cl45_ind_write(eth, port, devad, reg, data);
+}
+
+int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	struct mtk_mii_ioctl_data mii;
+	struct mtk_esw_reg reg;
+
+	switch (cmd) {
+	case MTKETH_MII_READ:
+		if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+			goto err_copy;
+		mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
+				     &mii.val_out);
+		if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
+			goto err_copy;
+
+		return 0;
+	case MTKETH_MII_WRITE:
+		if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+			goto err_copy;
+		mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
+				      mii.val_in);
+
+		return 0;
+	case MTKETH_MII_READ_CL45:
+		if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+			goto err_copy;
+		mii_mgr_read_cl45(eth, mii.port_num, mii.dev_addr, mii.reg_addr,
+				  &mii.val_out);
+		if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
+			goto err_copy;
+
+		return 0;
+	case MTKETH_MII_WRITE_CL45:
+		if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+			goto err_copy;
+		mii_mgr_write_cl45(eth, mii.port_num, mii.dev_addr, mii.reg_addr,
+				   mii.val_in);
+		return 0;
+	case MTKETH_ESW_REG_READ:
+		if (!mt7530_exist(eth))
+			return -EOPNOTSUPP;
+		if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
+			goto err_copy;
+		if (reg.off > REG_ESW_MAX)
+			return -EINVAL;
+		reg.val = mtk_switch_r32(eth, reg.off);
+
+		if (copy_to_user(ifr->ifr_data, &reg, sizeof(reg)))
+			goto err_copy;
+
+		return 0;
+	case MTKETH_ESW_REG_WRITE:
+		if (!mt7530_exist(eth))
+			return -EOPNOTSUPP;
+		if (copy_from_user(&reg, ifr->ifr_data, sizeof(reg)))
+			goto err_copy;
+		if (reg.off > REG_ESW_MAX)
+			return -EINVAL;
+		mtk_switch_w32(eth, reg.val, reg.off);
+
+		return 0;
+	default:
+		break;
+	}
+
+	return -EOPNOTSUPP;
+err_copy:
+	return -EFAULT;
+}
+
+int esw_cnt_read(struct seq_file *seq, void *v)
+{
+	unsigned int pkt_cnt = 0;
+	int i = 0;
+	struct mtk_eth *eth = g_eth;
+	unsigned int mib_base = MTK_GDM1_TX_GBCNT;
+
+	seq_puts(seq, "\n		  <<CPU>>\n");
+	seq_puts(seq, "		    |\n");
+	seq_puts(seq, "+-----------------------------------------------+\n");
+	seq_puts(seq, "|		  <<PSE>>		        |\n");
+	seq_puts(seq, "+-----------------------------------------------+\n");
+	seq_puts(seq, "		   |\n");
+	seq_puts(seq, "+-----------------------------------------------+\n");
+	seq_puts(seq, "|		  <<GDMA>>		        |\n");
+	seq_printf(seq, "| GDMA1_RX_GBCNT  : %010u (Rx Good Bytes)	|\n",
+		   mtk_r32(eth, mib_base));
+	seq_printf(seq, "| GDMA1_RX_GPCNT  : %010u (Rx Good Pkts)	|\n",
+		   mtk_r32(eth, mib_base+0x08));
+	seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error)	|\n",
+		   mtk_r32(eth, mib_base+0x10));
+	seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error)	|\n",
+		   mtk_r32(eth, mib_base+0x14));
+	seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short)	|\n",
+		   mtk_r32(eth, mib_base+0x18));
+	seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long)	|\n",
+		   mtk_r32(eth, mib_base+0x1C));
+	seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error)	|\n",
+		   mtk_r32(eth, mib_base+0x20));
+	seq_printf(seq, "| GDMA1_RX_FCCNT  : %010u (flow control)	|\n",
+		   mtk_r32(eth, mib_base+0x24));
+	seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count)	|\n",
+		   mtk_r32(eth, mib_base+0x28));
+	seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count)	|\n",
+		   mtk_r32(eth, mib_base+0x2C));
+	seq_printf(seq, "| GDMA1_TX_GBCNT  : %010u (Tx Good Bytes)	|\n",
+		   mtk_r32(eth, mib_base+0x30));
+	seq_printf(seq, "| GDMA1_TX_GPCNT  : %010u (Tx Good Pkts)	|\n",
+		   mtk_r32(eth, mib_base+0x38));
+	seq_puts(seq, "|						|\n");
+	seq_printf(seq, "| GDMA2_RX_GBCNT  : %010u (Rx Good Bytes)	|\n",
+		   mtk_r32(eth, mib_base+0x40));
+	seq_printf(seq, "| GDMA2_RX_GPCNT  : %010u (Rx Good Pkts)	|\n",
+		   mtk_r32(eth, mib_base+0x48));
+	seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error)	|\n",
+		   mtk_r32(eth, mib_base+0x50));
+	seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error)	|\n",
+		   mtk_r32(eth, mib_base+0x54));
+	seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short)	|\n",
+		   mtk_r32(eth, mib_base+0x58));
+	seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long)	|\n",
+		   mtk_r32(eth, mib_base+0x5C));
+	seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error)	|\n",
+		   mtk_r32(eth, mib_base+0x60));
+	seq_printf(seq, "| GDMA2_RX_FCCNT  : %010u (flow control)	|\n",
+		   mtk_r32(eth, mib_base+0x64));
+	seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip)		|\n",
+		   mtk_r32(eth, mib_base+0x68));
+	seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision)	|\n",
+		   mtk_r32(eth, mib_base+0x6C));
+	seq_printf(seq, "| GDMA2_TX_GBCNT  : %010u (Tx Good Bytes)	|\n",
+		   mtk_r32(eth, mib_base+0x70));
+	seq_printf(seq, "| GDMA2_TX_GPCNT  : %010u (Tx Good Pkts)	|\n",
+		   mtk_r32(eth, mib_base+0x78));
+	seq_puts(seq, "+-----------------------------------------------+\n");
+
+	if (!mt7530_exist(eth))
+		return 0;
+
+#define DUMP_EACH_PORT(base)					\
+	do { \
+		for (i = 0; i < 7; i++) {				\
+			pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));\
+			seq_printf(seq, "%8u ", pkt_cnt);		\
+		}							\
+		seq_puts(seq, "\n"); \
+	} while (0)
+
+	seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
+		   "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
+		   "Port6");
+	seq_puts(seq, "Tx Drop Packet      :");
+	DUMP_EACH_PORT(0x4000);
+	seq_puts(seq, "Tx CRC Error        :");
+	DUMP_EACH_PORT(0x4004);
+	seq_puts(seq, "Tx Unicast Packet   :");
+	DUMP_EACH_PORT(0x4008);
+	seq_puts(seq, "Tx Multicast Packet :");
+	DUMP_EACH_PORT(0x400C);
+	seq_puts(seq, "Tx Broadcast Packet :");
+	DUMP_EACH_PORT(0x4010);
+	seq_puts(seq, "Tx Collision Event  :");
+	DUMP_EACH_PORT(0x4014);
+	seq_puts(seq, "Tx Pause Packet     :");
+	DUMP_EACH_PORT(0x402C);
+	seq_puts(seq, "Rx Drop Packet      :");
+	DUMP_EACH_PORT(0x4060);
+	seq_puts(seq, "Rx Filtering Packet :");
+	DUMP_EACH_PORT(0x4064);
+	seq_puts(seq, "Rx Unicast Packet   :");
+	DUMP_EACH_PORT(0x4068);
+	seq_puts(seq, "Rx Multicast Packet :");
+	DUMP_EACH_PORT(0x406C);
+	seq_puts(seq, "Rx Broadcast Packet :");
+	DUMP_EACH_PORT(0x4070);
+	seq_puts(seq, "Rx Alignment Error  :");
+	DUMP_EACH_PORT(0x4074);
+	seq_puts(seq, "Rx CRC Error	    :");
+	DUMP_EACH_PORT(0x4078);
+	seq_puts(seq, "Rx Undersize Error  :");
+	DUMP_EACH_PORT(0x407C);
+	seq_puts(seq, "Rx Fragment Error   :");
+	DUMP_EACH_PORT(0x4080);
+	seq_puts(seq, "Rx Oversize Error   :");
+	DUMP_EACH_PORT(0x4084);
+	seq_puts(seq, "Rx Jabber Error     :");
+	DUMP_EACH_PORT(0x4088);
+	seq_puts(seq, "Rx Pause Packet     :");
+	DUMP_EACH_PORT(0x408C);
+	mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
+	mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
+
+	seq_puts(seq, "\n");
+
+	return 0;
+}
+
+static int switch_count_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, esw_cnt_read, 0);
+}
+
+static const struct file_operations switch_count_fops = {
+	.owner = THIS_MODULE,
+	.open = switch_count_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static struct proc_dir_entry *proc_tx_ring, *proc_rx_ring;
+
+int tx_ring_read(struct seq_file *seq, void *v)
+{
+	struct mtk_tx_ring *ring = &g_eth->tx_ring;
+	struct mtk_tx_dma *tx_ring;
+	int i = 0;
+
+	tx_ring =
+	    kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
+	if (!tx_ring) {
+		seq_puts(seq, " allocate temp tx_ring fail.\n");
+		return 0;
+	}
+
+	for (i = 0; i < MTK_DMA_SIZE; i++)
+		tx_ring[i] = ring->dma[i];
+
+	seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
+	seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
+	seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
+	for (i = 0; i < MTK_DMA_SIZE; i++) {
+		dma_addr_t tmp = ring->phys + i * sizeof(*tx_ring);
+
+		seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
+			   *(int *)&tx_ring[i].txd1, *(int *)&tx_ring[i].txd2,
+			   *(int *)&tx_ring[i].txd3, *(int *)&tx_ring[i].txd4);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+		seq_printf(seq, " %08x %08x %08x %08x",
+			   *(int *)&tx_ring[i].txd5, *(int *)&tx_ring[i].txd6,
+			   *(int *)&tx_ring[i].txd7, *(int *)&tx_ring[i].txd8);
+#endif
+		seq_printf(seq, "\n");
+	}
+
+	kfree(tx_ring);
+	return 0;
+}
+
+static int tx_ring_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, tx_ring_read, NULL);
+}
+
+static const struct file_operations tx_ring_fops = {
+	.owner = THIS_MODULE,
+	.open = tx_ring_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+int rx_ring_read(struct seq_file *seq, void *v)
+{
+	struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
+	struct mtk_rx_dma *rx_ring;
+
+	int i = 0;
+
+	rx_ring =
+	    kmalloc(sizeof(struct mtk_rx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
+	if (!rx_ring) {
+		seq_puts(seq, " allocate temp rx_ring fail.\n");
+		return 0;
+	}
+
+	for (i = 0; i < MTK_DMA_SIZE; i++)
+		rx_ring[i] = ring->dma[i];
+
+	seq_printf(seq, "next to read: %d\n",
+		   NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
+	for (i = 0; i < MTK_DMA_SIZE; i++) {
+		seq_printf(seq, "%d: %08x %08x %08x %08x", i,
+			   *(int *)&rx_ring[i].rxd1, *(int *)&rx_ring[i].rxd2,
+			   *(int *)&rx_ring[i].rxd3, *(int *)&rx_ring[i].rxd4);
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+		seq_printf(seq, " %08x %08x %08x %08x",
+			   *(int *)&rx_ring[i].rxd5, *(int *)&rx_ring[i].rxd6,
+			   *(int *)&rx_ring[i].rxd7, *(int *)&rx_ring[i].rxd8);
+#endif
+		seq_printf(seq, "\n");
+	}
+
+	kfree(rx_ring);
+	return 0;
+}
+
+static int rx_ring_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rx_ring_read, NULL);
+}
+
+static const struct file_operations rx_ring_fops = {
+	.owner = THIS_MODULE,
+	.open = rx_ring_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+int dbg_regs_read(struct seq_file *seq, void *v)
+{
+	struct mtk_eth *eth = g_eth;
+
+	seq_puts(seq, "   <<PSE DEBUG REG DUMP>>\n");
+	seq_printf(seq, "| PSE_FQFC_CFG	: %08x |\n",
+		   mtk_r32(eth, MTK_PSE_FQFC_CFG));
+	seq_printf(seq, "| PSE_IQ_STA1	: %08x |\n",
+		   mtk_r32(eth, MTK_PSE_IQ_STA(0)));
+	seq_printf(seq, "| PSE_IQ_STA2	: %08x |\n",
+		   mtk_r32(eth, MTK_PSE_IQ_STA(1)));
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+		seq_printf(seq, "| PSE_IQ_STA3	: %08x |\n",
+			   mtk_r32(eth, MTK_PSE_IQ_STA(2)));
+		seq_printf(seq, "| PSE_IQ_STA4	: %08x |\n",
+			   mtk_r32(eth, MTK_PSE_IQ_STA(3)));
+	}
+
+	seq_printf(seq, "| PSE_OQ_STA1	: %08x |\n",
+		   mtk_r32(eth, MTK_PSE_OQ_STA(0)));
+	seq_printf(seq, "| PSE_OQ_STA2	: %08x |\n",
+		   mtk_r32(eth, MTK_PSE_OQ_STA(1)));
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+		seq_printf(seq, "| PSE_OQ_STA3	: %08x |\n",
+			   mtk_r32(eth, MTK_PSE_OQ_STA(2)));
+		seq_printf(seq, "| PSE_OQ_STA4	: %08x |\n",
+			   mtk_r32(eth, MTK_PSE_OQ_STA(3)));
+	}
+
+	seq_printf(seq, "| QDMA_FQ_CNT	: %08x |\n",
+		   mtk_r32(eth, MTK_QDMA_FQ_CNT));
+	seq_printf(seq, "| FE_PSE_FREE	: %08x |\n",
+		   mtk_r32(eth, MTK_FE_PSE_FREE));
+	seq_printf(seq, "| FE_DROP_FQ	: %08x |\n",
+		   mtk_r32(eth, MTK_FE_DROP_FQ));
+	seq_printf(seq, "| FE_DROP_FC	: %08x |\n",
+		   mtk_r32(eth, MTK_FE_DROP_FC));
+	seq_printf(seq, "| FE_DROP_PPE	: %08x |\n",
+		   mtk_r32(eth, MTK_FE_DROP_PPE));
+	seq_printf(seq, "| GDM1_IG_CTRL	: %08x |\n",
+		   mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
+	seq_printf(seq, "| GDM2_IG_CTRL	: %08x |\n",
+		   mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
+	seq_printf(seq, "| MAC_P1_MCR	: %08x |\n",
+		   mtk_r32(eth, MTK_MAC_MCR(0)));
+	seq_printf(seq, "| MAC_P2_MCR	: %08x |\n",
+		   mtk_r32(eth, MTK_MAC_MCR(1)));
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+		seq_printf(seq, "| FE_CDM1_FSM	: %08x |\n",
+			   mtk_r32(eth, MTK_FE_CDM1_FSM));
+		seq_printf(seq, "| FE_CDM2_FSM	: %08x |\n",
+			   mtk_r32(eth, MTK_FE_CDM2_FSM));
+		seq_printf(seq, "| FE_GDM1_FSM	: %08x |\n",
+			   mtk_r32(eth, MTK_FE_GDM1_FSM));
+		seq_printf(seq, "| FE_GDM2_FSM	: %08x |\n",
+			   mtk_r32(eth, MTK_FE_GDM2_FSM));
+	}
+
+	return 0;
+}
+
+static int dbg_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dbg_regs_read, 0);
+}
+
+static const struct file_operations dbg_regs_fops = {
+	.owner = THIS_MODULE,
+	.open = dbg_regs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+#define PROCREG_ESW_CNT         "esw_cnt"
+#define PROCREG_TXRING          "tx_ring"
+#define PROCREG_RXRING          "rx_ring"
+#define PROCREG_DIR             "mtketh"
+#define PROCREG_DBG_REGS        "dbg_regs"
+
+struct proc_dir_entry *proc_reg_dir;
+static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs;
+
+int debug_proc_init(struct mtk_eth *eth)
+{
+	g_eth = eth;
+
+	if (!proc_reg_dir)
+		proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
+
+	proc_tx_ring =
+	    proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
+	if (!proc_tx_ring)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
+
+	proc_rx_ring =
+	    proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
+	if (!proc_rx_ring)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
+
+	proc_esw_cnt =
+	    proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
+	if (!proc_esw_cnt)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
+
+	proc_dbg_regs =
+	    proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
+	if (!proc_dbg_regs)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
+
+	return 0;
+}
+
+void debug_proc_exit(void)
+{
+	if (proc_tx_ring)
+		remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
+	if (proc_rx_ring)
+		remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
+
+	if (proc_esw_cnt)
+		remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
+
+	if (proc_reg_dir)
+		remove_proc_entry(PROCREG_DIR, 0);
+
+	if (proc_dbg_regs)
+		remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h
new file mode 100755
index 0000000..c7924f4
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h
@@ -0,0 +1,89 @@
+/*
+ *   Copyright (C) 2018 MediaTek Inc.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; version 2 of the License
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#ifndef MTK_ETH_DBG_H
+#define MTK_ETH_DBG_H
+
+/* Debug Purpose Register */
+#define MTK_PSE_FQFC_CFG		0x100
+#define MTK_FE_CDM1_FSM			0x220
+#define MTK_FE_CDM2_FSM			0x224
+#define MTK_FE_GDM1_FSM			0x228
+#define MTK_FE_GDM2_FSM			0x22C
+#define MTK_FE_PSE_FREE			0x240
+#define MTK_FE_DROP_FQ			0x244
+#define MTK_FE_DROP_FC			0x248
+#define MTK_FE_DROP_PPE			0x24C
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define MTK_PSE_IQ_STA(x)		(0x180 + (x) * 0x4)
+#define MTK_PSE_OQ_STA(x)		(0x1A0 + (x) * 0x4)
+#else
+#define MTK_PSE_IQ_STA(x)		(0x110 + (x) * 0x4)
+#define MTK_PSE_OQ_STA(x)		(0x118 + (x) * 0x4)
+#endif
+
+#define MTKETH_MII_READ                  0x89F3
+#define MTKETH_MII_WRITE                 0x89F4
+#define MTKETH_ESW_REG_READ              0x89F1
+#define MTKETH_ESW_REG_WRITE             0x89F2
+#define MTKETH_MII_READ_CL45             0x89FC
+#define MTKETH_MII_WRITE_CL45            0x89FD
+#define REG_ESW_MAX                     0xFC
+
+struct mtk_esw_reg {
+	unsigned int off;
+	unsigned int val;
+};
+
+struct mtk_mii_ioctl_data {
+	unsigned int phy_id;
+	unsigned int reg_num;
+	unsigned int val_in;
+	unsigned int val_out;
+	unsigned int port_num;
+	unsigned int dev_addr;
+	unsigned int reg_addr;
+};
+
+#if defined(CONFIG_NET_DSA_MT7530) || defined(CONFIG_MT753X_GSW)
+static inline bool mt7530_exist(struct mtk_eth *eth)
+{
+	return true;
+}
+#else
+static inline bool mt7530_exist(struct mtk_eth *eth)
+{
+	return false;
+}
+#endif
+
+extern u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg);
+extern u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
+		    u32 phy_register, u32 write_data);
+
+extern u32 mtk_cl45_ind_read(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data);
+extern u32 mtk_cl45_ind_write(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data);
+
+int debug_proc_init(struct mtk_eth *eth);
+void debug_proc_exit(void);
+
+int mtketh_debugfs_init(struct mtk_eth *eth);
+void mtketh_debugfs_exit(struct mtk_eth *eth);
+int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+#endif /* MTK_ETH_DBG_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c
new file mode 100755
index 0000000..ef11cf3
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for configuring path from GMAC/GDM to target PHY
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+struct mtk_eth_muxc {
+	const char	*name;
+	int		cap_bit;
+	int		(*set_path)(struct mtk_eth *eth, int path);
+};
+
+static const char *mtk_eth_path_name(int path)
+{
+	switch (path) {
+	case MTK_ETH_PATH_GMAC1_RGMII:
+		return "gmac1_rgmii";
+	case MTK_ETH_PATH_GMAC1_TRGMII:
+		return "gmac1_trgmii";
+	case MTK_ETH_PATH_GMAC1_SGMII:
+		return "gmac1_sgmii";
+	case MTK_ETH_PATH_GMAC2_RGMII:
+		return "gmac2_rgmii";
+	case MTK_ETH_PATH_GMAC2_SGMII:
+		return "gmac2_sgmii";
+	case MTK_ETH_PATH_GMAC2_GEPHY:
+		return "gmac2_gephy";
+	case MTK_ETH_PATH_GDM1_ESW:
+		return "gdm1_esw";
+	default:
+		return "unknown path";
+	}
+}
+
+static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
+{
+	bool updated = true;
+	u32 val, mask, set;
+
+	switch (path) {
+	case MTK_ETH_PATH_GMAC1_SGMII:
+		mask = ~(u32)MTK_MUX_TO_ESW;
+		set = 0;
+		break;
+	case MTK_ETH_PATH_GDM1_ESW:
+		mask = ~(u32)MTK_MUX_TO_ESW;
+		set = MTK_MUX_TO_ESW;
+		break;
+	default:
+		updated = false;
+		break;
+	};
+
+	if (updated) {
+		val = mtk_r32(eth, MTK_MAC_MISC);
+		val = (val & mask) | set;
+		mtk_w32(eth, val, MTK_MAC_MISC);
+	}
+
+	dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+		mtk_eth_path_name(path), __func__, updated);
+
+	return 0;
+}
+
+static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
+{
+	unsigned int val = 0;
+	bool updated = true;
+
+	switch (path) {
+	case MTK_ETH_PATH_GMAC2_GEPHY:
+		val = ~(u32)GEPHY_MAC_SEL;
+		break;
+	default:
+		updated = false;
+		break;
+	}
+
+	if (updated)
+		regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
+
+	dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+		mtk_eth_path_name(path), __func__, updated);
+
+	return 0;
+}
+
+static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
+{
+	unsigned int val = 0;
+	bool updated = true;
+
+	switch (path) {
+	case MTK_ETH_PATH_GMAC2_SGMII:
+		val = CO_QPHY_SEL;
+		break;
+	default:
+		updated = false;
+		break;
+	}
+
+	if (updated)
+		regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val);
+
+	dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+		mtk_eth_path_name(path), __func__, updated);
+
+	return 0;
+}
+
+static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
+{
+	unsigned int val = 0;
+	bool updated = true;
+
+	switch (path) {
+	case MTK_ETH_PATH_GMAC1_SGMII:
+		val = SYSCFG0_SGMII_GMAC1;
+		break;
+	case MTK_ETH_PATH_GMAC2_SGMII:
+		val = SYSCFG0_SGMII_GMAC2;
+		break;
+	case MTK_ETH_PATH_GMAC1_RGMII:
+	case MTK_ETH_PATH_GMAC2_RGMII:
+		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+		val &= SYSCFG0_SGMII_MASK;
+
+		if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
+		    (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
+			val = 0;
+		else
+			updated = false;
+		break;
+	default:
+		updated = false;
+		break;
+	};
+
+	if (updated)
+		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+				   SYSCFG0_SGMII_MASK, val);
+
+	dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+		mtk_eth_path_name(path), __func__, updated);
+
+	return 0;
+}
+
+static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
+{
+	unsigned int val = 0;
+	bool updated = true;
+
+	regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+	switch (path) {
+	case MTK_ETH_PATH_GMAC1_SGMII:
+		val |= SYSCFG0_SGMII_GMAC1_V2;
+		break;
+	case MTK_ETH_PATH_GMAC2_GEPHY:
+		val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
+		break;
+	case MTK_ETH_PATH_GMAC2_SGMII:
+		val |= SYSCFG0_SGMII_GMAC2_V2;
+		break;
+	default:
+		updated = false;
+	};
+
+	if (updated)
+		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+				   SYSCFG0_SGMII_MASK, val);
+
+	dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+		mtk_eth_path_name(path), __func__, updated);
+
+	return 0;
+}
+
+static const struct mtk_eth_muxc mtk_eth_muxc[] = {
+	{
+		.name = "mux_gdm1_to_gmac1_esw",
+		.cap_bit = MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
+		.set_path = set_mux_gdm1_to_gmac1_esw,
+	}, {
+		.name = "mux_gmac2_gmac0_to_gephy",
+		.cap_bit = MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
+		.set_path = set_mux_gmac2_gmac0_to_gephy,
+	}, {
+		.name = "mux_u3_gmac2_to_qphy",
+		.cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
+		.set_path = set_mux_u3_gmac2_to_qphy,
+	}, {
+		.name = "mux_gmac1_gmac2_to_sgmii_rgmii",
+		.cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
+		.set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
+	}, {
+		.name = "mux_gmac12_to_gephy_sgmii",
+		.cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
+		.set_path = set_mux_gmac12_to_gephy_sgmii,
+	},
+};
+
+static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
+{
+	int i, err = 0;
+
+	if (!MTK_HAS_CAPS(eth->soc->caps, path)) {
+		dev_err(eth->dev, "path %s isn't support on the SoC\n",
+			mtk_eth_path_name(path));
+		return -EINVAL;
+	}
+
+	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
+		return 0;
+
+	/* Setup MUX in path fabric */
+	for (i = 0; i < ARRAY_SIZE(mtk_eth_muxc); i++) {
+		if (MTK_HAS_CAPS(eth->soc->caps, mtk_eth_muxc[i].cap_bit)) {
+			err = mtk_eth_muxc[i].set_path(eth, path);
+			if (err)
+				goto out;
+		} else {
+			dev_dbg(eth->dev, "mux %s isn't present on the SoC\n",
+				mtk_eth_muxc[i].name);
+		}
+	}
+
+out:
+	return err;
+}
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+	int err, path;
+
+	path = (mac_id == 0) ?  MTK_ETH_PATH_GMAC1_SGMII :
+				MTK_ETH_PATH_GMAC2_SGMII;
+
+	/* Setup proper MUXes along the path */
+	err = mtk_eth_mux_setup(eth, path);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+	int err, path = 0;
+
+	if (mac_id == 1)
+		path = MTK_ETH_PATH_GMAC2_GEPHY;
+
+	if (!path)
+		return -EINVAL;
+
+	/* Setup proper MUXes along the path */
+	err = mtk_eth_mux_setup(eth, path);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+	int err, path;
+
+	path = (mac_id == 0) ?  MTK_ETH_PATH_GMAC1_RGMII :
+				MTK_ETH_PATH_GMAC2_RGMII;
+
+	/* Setup proper MUXes along the path */
+	err = mtk_eth_mux_setup(eth, path);
+	if (err)
+		return err;
+
+	return 0;
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
new file mode 100755
index 0000000..5aa0bc0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -0,0 +1,3465 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/if_vlan.h>
+#include <linux/reset.h>
+#include <linux/tcp.h>
+#include <linux/interrupt.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/phylink.h>
+#include <net/dsa.h>
+
+#include "mtk_eth_soc.h"
+#include "mtk_eth_dbg.h"
+
+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+#include "mtk_hnat/nf_hnat_mtk.h"
+#endif
+
+static int mtk_msg_level = -1;
+module_param_named(msg_level, mtk_msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
+
+#define MTK_ETHTOOL_STAT(x) { #x, \
+			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+
+/* strings used by ethtool */
+static const struct mtk_ethtool_stats {
+	char str[ETH_GSTRING_LEN];
+	u32 offset;
+} mtk_ethtool_stats[] = {
+	MTK_ETHTOOL_STAT(tx_bytes),
+	MTK_ETHTOOL_STAT(tx_packets),
+	MTK_ETHTOOL_STAT(tx_skip),
+	MTK_ETHTOOL_STAT(tx_collisions),
+	MTK_ETHTOOL_STAT(rx_bytes),
+	MTK_ETHTOOL_STAT(rx_packets),
+	MTK_ETHTOOL_STAT(rx_overflow),
+	MTK_ETHTOOL_STAT(rx_fcs_errors),
+	MTK_ETHTOOL_STAT(rx_short_errors),
+	MTK_ETHTOOL_STAT(rx_long_errors),
+	MTK_ETHTOOL_STAT(rx_checksum_errors),
+	MTK_ETHTOOL_STAT(rx_flow_control_packets),
+};
+
+static const char * const mtk_clks_source_name[] = {
+	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
+	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
+	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
+	"sgmii_ck", "eth2pll", "wocpu0","wocpu1",
+};
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+{
+	__raw_writel(val, eth->base + reg);
+}
+
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
+{
+	return __raw_readl(eth->base + reg);
+}
+
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+{
+	u32 val;
+
+	val = mtk_r32(eth, reg);
+	val &= ~mask;
+	val |= set;
+	mtk_w32(eth, val, reg);
+	return reg;
+}
+
+static int mtk_mdio_busy_wait(struct mtk_eth *eth)
+{
+	unsigned long t_start = jiffies;
+
+	while (1) {
+		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
+			return 0;
+		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
+			break;
+		usleep_range(10, 20);
+	}
+
+	dev_err(eth->dev, "mdio: MDIO timeout\n");
+	return -1;
+}
+
+u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
+			   u32 phy_register, u32 write_data)
+{
+	if (mtk_mdio_busy_wait(eth))
+		return -1;
+
+	write_data &= 0xffff;
+
+	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
+		(phy_register << PHY_IAC_REG_SHIFT) |
+		(phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
+		MTK_PHY_IAC);
+
+	if (mtk_mdio_busy_wait(eth))
+		return -1;
+
+	return 0;
+}
+
+u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+{
+	u32 d;
+
+	if (mtk_mdio_busy_wait(eth))
+		return 0xffff;
+
+	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
+		(phy_reg << PHY_IAC_REG_SHIFT) |
+		(phy_addr << PHY_IAC_ADDR_SHIFT),
+		MTK_PHY_IAC);
+
+	if (mtk_mdio_busy_wait(eth))
+		return 0xffff;
+
+	d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
+
+	return d;
+}
+
+static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
+			  int phy_reg, u16 val)
+{
+	struct mtk_eth *eth = bus->priv;
+
+	return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
+}
+
+static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+	struct mtk_eth *eth = bus->priv;
+
+	return _mtk_mdio_read(eth, phy_addr, phy_reg);
+}
+
+u32 mtk_cl45_ind_read(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data)
+{
+        mutex_lock(&eth->mii_bus->mdio_lock);
+
+        _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
+        _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
+        _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
+        *data = _mtk_mdio_read(eth, port, MII_MMD_ADDR_DATA_REG);
+
+        mutex_unlock(&eth->mii_bus->mdio_lock);
+
+        return 0;
+}
+
+u32 mtk_cl45_ind_write(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data)
+{
+        mutex_lock(&eth->mii_bus->mdio_lock);
+
+        _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
+        _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
+        _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
+        _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, data);
+
+        mutex_unlock(&eth->mii_bus->mdio_lock);
+
+        return 0;
+}
+
+static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
+				     phy_interface_t interface)
+{
+	u32 val;
+
+	/* Check DDR memory type.
+	 * Currently TRGMII mode with DDR2 memory is not supported.
+	 */
+	regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
+	if (interface == PHY_INTERFACE_MODE_TRGMII &&
+	    val & SYSCFG_DRAM_TYPE_DDR2) {
+		dev_err(eth->dev,
+			"TRGMII mode with DDR2 memory is not supported!\n");
+		return -EOPNOTSUPP;
+	}
+
+	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
+		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
+
+	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+			   ETHSYS_TRGMII_MT7621_MASK, val);
+
+	return 0;
+}
+
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
+				   phy_interface_t interface, int speed)
+{
+	u32 val;
+	int ret;
+
+	if (interface == PHY_INTERFACE_MODE_TRGMII) {
+		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
+		val = 500000000;
+		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+		if (ret)
+			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+		return;
+	}
+
+	val = (speed == SPEED_1000) ?
+		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
+	mtk_w32(eth, val, INTF_MODE);
+
+	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+			   ETHSYS_TRGMII_CLK_SEL362_5,
+			   ETHSYS_TRGMII_CLK_SEL362_5);
+
+	val = (speed == SPEED_1000) ? 250000000 : 500000000;
+	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+	if (ret)
+		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+
+	val = (speed == SPEED_1000) ?
+		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
+	mtk_w32(eth, val, TRGMII_RCK_CTRL);
+
+	val = (speed == SPEED_1000) ?
+		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
+	mtk_w32(eth, val, TRGMII_TCK_CTRL);
+}
+
+static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
+			   const struct phylink_link_state *state)
+{
+	struct mtk_mac *mac = container_of(config, struct mtk_mac,
+					   phylink_config);
+	struct mtk_eth *eth = mac->hw;
+	u32 mcr_cur, mcr_new, sid, i;
+	int val, ge_mode, err;
+
+	/* MT76x8 has no hardware settings between for the MAC */
+	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+	    mac->interface != state->interface) {
+		/* Setup soc pin functions */
+		switch (state->interface) {
+		case PHY_INTERFACE_MODE_TRGMII:
+			if (mac->id)
+				goto err_phy;
+			if (!MTK_HAS_CAPS(mac->hw->soc->caps,
+					  MTK_GMAC1_TRGMII))
+				goto err_phy;
+			/* fall through */
+		case PHY_INTERFACE_MODE_RGMII_TXID:
+		case PHY_INTERFACE_MODE_RGMII_RXID:
+		case PHY_INTERFACE_MODE_RGMII_ID:
+		case PHY_INTERFACE_MODE_RGMII:
+		case PHY_INTERFACE_MODE_MII:
+		case PHY_INTERFACE_MODE_REVMII:
+		case PHY_INTERFACE_MODE_RMII:
+			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
+				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
+				if (err)
+					goto init_err;
+			}
+			break;
+		case PHY_INTERFACE_MODE_1000BASEX:
+		case PHY_INTERFACE_MODE_2500BASEX:
+		case PHY_INTERFACE_MODE_SGMII:
+			if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+				err = mtk_gmac_sgmii_path_setup(eth, mac->id);
+				if (err)
+					goto init_err;
+			}
+			break;
+		case PHY_INTERFACE_MODE_GMII:
+			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
+				err = mtk_gmac_gephy_path_setup(eth, mac->id);
+				if (err)
+					goto init_err;
+			}
+			break;
+		default:
+			goto err_phy;
+		}
+
+		/* Setup clock for 1st gmac */
+		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
+		    !phy_interface_mode_is_8023z(state->interface) &&
+		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
+			if (MTK_HAS_CAPS(mac->hw->soc->caps,
+					 MTK_TRGMII_MT7621_CLK)) {
+				if (mt7621_gmac0_rgmii_adjust(mac->hw,
+							      state->interface))
+					goto err_phy;
+			} else {
+				mtk_gmac0_rgmii_adjust(mac->hw,
+						       state->interface,
+						       state->speed);
+
+				/* mt7623_pad_clk_setup */
+				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+					mtk_w32(mac->hw,
+						TD_DM_DRVP(8) | TD_DM_DRVN(8),
+						TRGMII_TD_ODT(i));
+
+				/* Assert/release MT7623 RXC reset */
+				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
+					TRGMII_RCK_CTRL);
+				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
+			}
+		}
+
+		ge_mode = 0;
+		switch (state->interface) {
+		case PHY_INTERFACE_MODE_MII:
+		case PHY_INTERFACE_MODE_GMII:
+			ge_mode = 1;
+			break;
+		case PHY_INTERFACE_MODE_REVMII:
+			ge_mode = 2;
+			break;
+		case PHY_INTERFACE_MODE_RMII:
+			if (mac->id)
+				goto err_phy;
+			ge_mode = 3;
+			break;
+		default:
+			break;
+		}
+
+		/* put the gmac into the right mode */
+		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
+		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+
+		mac->interface = state->interface;
+	}
+
+	/* SGMII */
+	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+	    phy_interface_mode_is_8023z(state->interface)) {
+		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
+		 * being setup done.
+		 */
+		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+				   SYSCFG0_SGMII_MASK,
+				   ~(u32)SYSCFG0_SGMII_MASK);
+
+		/* Decide how GMAC and SGMIISYS be mapped */
+		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+		       0 : mac->id;
+
+		/* Setup SGMIISYS with the determined property */
+		if (state->interface != PHY_INTERFACE_MODE_SGMII)
+			err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
+							 state);
+		else if (phylink_autoneg_inband(mode))
+			err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
+
+		if (err)
+			goto init_err;
+
+		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+				   SYSCFG0_SGMII_MASK, val);
+	} else if (phylink_autoneg_inband(mode)) {
+		dev_err(eth->dev,
+			"In-band mode not supported in non SGMII mode!\n");
+		return;
+	}
+
+	/* Setup gmac */
+	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+	mcr_new = mcr_cur;
+	mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
+		     MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
+		     MAC_MCR_FORCE_RX_FC);
+	mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+
+	switch (state->speed) {
+	case SPEED_2500:
+	case SPEED_1000:
+		mcr_new |= MAC_MCR_SPEED_1000;
+		break;
+	case SPEED_100:
+		mcr_new |= MAC_MCR_SPEED_100;
+		break;
+	}
+	if (state->duplex == DUPLEX_FULL) {
+		mcr_new |= MAC_MCR_FORCE_DPX;
+		if (state->pause & MLO_PAUSE_TX)
+			mcr_new |= MAC_MCR_FORCE_TX_FC;
+		if (state->pause & MLO_PAUSE_RX)
+			mcr_new |= MAC_MCR_FORCE_RX_FC;
+	}
+
+	/* Only update control register when needed! */
+	if (mcr_new != mcr_cur)
+		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+
+	return;
+
+err_phy:
+	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
+		mac->id, phy_modes(state->interface));
+	return;
+
+init_err:
+	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
+		mac->id, phy_modes(state->interface), err);
+}
+
+static int mtk_mac_link_state(struct phylink_config *config,
+			      struct phylink_link_state *state)
+{
+	struct mtk_mac *mac = container_of(config, struct mtk_mac,
+					   phylink_config);
+	u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
+
+	state->link = (pmsr & MAC_MSR_LINK);
+	state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
+
+	switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
+	case 0:
+		state->speed = SPEED_10;
+		break;
+	case MAC_MSR_SPEED_100:
+		state->speed = SPEED_100;
+		break;
+	case MAC_MSR_SPEED_1000:
+		state->speed = SPEED_1000;
+		break;
+	default:
+		state->speed = SPEED_UNKNOWN;
+		break;
+	}
+
+	state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
+	if (pmsr & MAC_MSR_RX_FC)
+		state->pause |= MLO_PAUSE_RX;
+	if (pmsr & MAC_MSR_TX_FC)
+		state->pause |= MLO_PAUSE_TX;
+
+	return 1;
+}
+
+static void mtk_mac_an_restart(struct phylink_config *config)
+{
+	struct mtk_mac *mac = container_of(config, struct mtk_mac,
+					   phylink_config);
+
+	mtk_sgmii_restart_an(mac->hw, mac->id);
+}
+
+static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
+			      phy_interface_t interface)
+{
+	struct mtk_mac *mac = container_of(config, struct mtk_mac,
+					   phylink_config);
+	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+
+	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
+	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
+
+static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
+			    phy_interface_t interface,
+			    struct phy_device *phy)
+{
+	struct mtk_mac *mac = container_of(config, struct mtk_mac,
+					   phylink_config);
+	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+
+	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
+	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
+
+static void mtk_validate(struct phylink_config *config,
+			 unsigned long *supported,
+			 struct phylink_link_state *state)
+{
+	struct mtk_mac *mac = container_of(config, struct mtk_mac,
+					   phylink_config);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+	if (state->interface != PHY_INTERFACE_MODE_NA &&
+	    state->interface != PHY_INTERFACE_MODE_MII &&
+	    state->interface != PHY_INTERFACE_MODE_GMII &&
+	    !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
+	      phy_interface_mode_is_rgmii(state->interface)) &&
+	    !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
+	      !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
+	    !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
+	      (state->interface == PHY_INTERFACE_MODE_SGMII ||
+	       phy_interface_mode_is_8023z(state->interface)))) {
+		linkmode_zero(supported);
+		return;
+	}
+
+	phylink_set_port_modes(mask);
+	phylink_set(mask, Autoneg);
+
+	switch (state->interface) {
+	case PHY_INTERFACE_MODE_TRGMII:
+		phylink_set(mask, 1000baseT_Full);
+		break;
+	case PHY_INTERFACE_MODE_1000BASEX:
+	case PHY_INTERFACE_MODE_2500BASEX:
+		phylink_set(mask, 1000baseX_Full);
+		phylink_set(mask, 2500baseX_Full);
+		break;
+	case PHY_INTERFACE_MODE_GMII:
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
+	case PHY_INTERFACE_MODE_RGMII_TXID:
+		phylink_set(mask, 1000baseT_Half);
+		/* fall through */
+	case PHY_INTERFACE_MODE_SGMII:
+		phylink_set(mask, 1000baseT_Full);
+		phylink_set(mask, 1000baseX_Full);
+		/* fall through */
+	case PHY_INTERFACE_MODE_MII:
+	case PHY_INTERFACE_MODE_RMII:
+	case PHY_INTERFACE_MODE_REVMII:
+	case PHY_INTERFACE_MODE_NA:
+	default:
+		phylink_set(mask, 10baseT_Half);
+		phylink_set(mask, 10baseT_Full);
+		phylink_set(mask, 100baseT_Half);
+		phylink_set(mask, 100baseT_Full);
+		break;
+	}
+
+	if (state->interface == PHY_INTERFACE_MODE_NA) {
+		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+			phylink_set(mask, 1000baseT_Full);
+			phylink_set(mask, 1000baseX_Full);
+			phylink_set(mask, 2500baseX_Full);
+		}
+		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
+			phylink_set(mask, 1000baseT_Full);
+			phylink_set(mask, 1000baseT_Half);
+			phylink_set(mask, 1000baseX_Full);
+		}
+		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
+			phylink_set(mask, 1000baseT_Full);
+			phylink_set(mask, 1000baseT_Half);
+		}
+	}
+
+	phylink_set(mask, Pause);
+	phylink_set(mask, Asym_Pause);
+
+	linkmode_and(supported, supported, mask);
+	linkmode_and(state->advertising, state->advertising, mask);
+
+	/* We can only operate at 2500BaseX or 1000BaseX. If requested
+	 * to advertise both, only report advertising at 2500BaseX.
+	 */
+	phylink_helper_basex_speed(state);
+}
+
+static const struct phylink_mac_ops mtk_phylink_ops = {
+	.validate = mtk_validate,
+	.mac_link_state = mtk_mac_link_state,
+	.mac_an_restart = mtk_mac_an_restart,
+	.mac_config = mtk_mac_config,
+	.mac_link_down = mtk_mac_link_down,
+	.mac_link_up = mtk_mac_link_up,
+};
+
+static int mtk_mdio_init(struct mtk_eth *eth)
+{
+	struct device_node *mii_np;
+	int ret;
+
+	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+	if (!mii_np) {
+		dev_err(eth->dev, "no %s child node found", "mdio-bus");
+		return -ENODEV;
+	}
+
+	if (!of_device_is_available(mii_np)) {
+		ret = -ENODEV;
+		goto err_put_node;
+	}
+
+	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
+	if (!eth->mii_bus) {
+		ret = -ENOMEM;
+		goto err_put_node;
+	}
+
+	eth->mii_bus->name = "mdio";
+	eth->mii_bus->read = mtk_mdio_read;
+	eth->mii_bus->write = mtk_mdio_write;
+	eth->mii_bus->priv = eth;
+	eth->mii_bus->parent = eth->dev;
+
+	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
+	ret = of_mdiobus_register(eth->mii_bus, mii_np);
+
+err_put_node:
+	of_node_put(mii_np);
+	return ret;
+}
+
+static void mtk_mdio_cleanup(struct mtk_eth *eth)
+{
+	if (!eth->mii_bus)
+		return;
+
+	mdiobus_unregister(eth->mii_bus);
+}
+
+static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&eth->tx_irq_lock, flags);
+	val = mtk_r32(eth, eth->tx_int_mask_reg);
+	mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
+	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
+}
+
+static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&eth->tx_irq_lock, flags);
+	val = mtk_r32(eth, eth->tx_int_mask_reg);
+	mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
+	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&eth->rx_irq_lock, flags);
+	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+	mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&eth->rx_irq_lock, flags);
+	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+	mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
+}
+
+static int mtk_set_mac_address(struct net_device *dev, void *p)
+{
+	int ret = eth_mac_addr(dev, p);
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	const char *macaddr = dev->dev_addr;
+
+	if (ret)
+		return ret;
+
+	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+		return -EBUSY;
+
+	spin_lock_bh(&mac->hw->page_lock);
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+			MT7628_SDM_MAC_ADRH);
+		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+			(macaddr[4] << 8) | macaddr[5],
+			MT7628_SDM_MAC_ADRL);
+	} else {
+		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+			MTK_GDMA_MAC_ADRH(mac->id));
+		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+			(macaddr[4] << 8) | macaddr[5],
+			MTK_GDMA_MAC_ADRL(mac->id));
+	}
+	spin_unlock_bh(&mac->hw->page_lock);
+
+	return 0;
+}
+
+void mtk_stats_update_mac(struct mtk_mac *mac)
+{
+	struct mtk_hw_stats *hw_stats = mac->hw_stats;
+	unsigned int base = MTK_GDM1_TX_GBCNT;
+	u64 stats;
+
+	base += hw_stats->reg_offset;
+
+	u64_stats_update_begin(&hw_stats->syncp);
+
+	hw_stats->rx_bytes += mtk_r32(mac->hw, base);
+	stats =  mtk_r32(mac->hw, base + 0x04);
+	if (stats)
+		hw_stats->rx_bytes += (stats << 32);
+	hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
+	hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
+	hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
+	hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
+	hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
+	hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
+	hw_stats->rx_flow_control_packets +=
+					mtk_r32(mac->hw, base + 0x24);
+	hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
+	hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
+	hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
+	stats =  mtk_r32(mac->hw, base + 0x34);
+	if (stats)
+		hw_stats->tx_bytes += (stats << 32);
+	hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+	u64_stats_update_end(&hw_stats->syncp);
+}
+
+static void mtk_stats_update(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
+			continue;
+		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
+			mtk_stats_update_mac(eth->mac[i]);
+			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
+		}
+	}
+}
+
+static void mtk_get_stats64(struct net_device *dev,
+			    struct rtnl_link_stats64 *storage)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_hw_stats *hw_stats = mac->hw_stats;
+	unsigned int start;
+
+	if (netif_running(dev) && netif_device_present(dev)) {
+		if (spin_trylock_bh(&hw_stats->stats_lock)) {
+			mtk_stats_update_mac(mac);
+			spin_unlock_bh(&hw_stats->stats_lock);
+		}
+	}
+
+	do {
+		start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
+		storage->rx_packets = hw_stats->rx_packets;
+		storage->tx_packets = hw_stats->tx_packets;
+		storage->rx_bytes = hw_stats->rx_bytes;
+		storage->tx_bytes = hw_stats->tx_bytes;
+		storage->collisions = hw_stats->tx_collisions;
+		storage->rx_length_errors = hw_stats->rx_short_errors +
+			hw_stats->rx_long_errors;
+		storage->rx_over_errors = hw_stats->rx_overflow;
+		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
+		storage->rx_errors = hw_stats->rx_checksum_errors;
+		storage->tx_aborted_errors = hw_stats->tx_skip;
+	} while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
+
+	storage->tx_errors = dev->stats.tx_errors;
+	storage->rx_dropped = dev->stats.rx_dropped;
+	storage->tx_dropped = dev->stats.tx_dropped;
+}
+
+static inline int mtk_max_frag_size(int mtu)
+{
+	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
+	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
+		mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
+	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+static inline int mtk_max_buf_size(int frag_size)
+{
+	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
+		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
+
+	return buf_size;
+}
+
+static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
+				   struct mtk_rx_dma *dma_rxd)
+{
+	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
+	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+	rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+	rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+#endif
+}
+
+/* the qdma core needs scratch memory to be setup */
+static int mtk_init_fq_dma(struct mtk_eth *eth)
+{
+	dma_addr_t phy_ring_tail;
+	int cnt = MTK_DMA_SIZE;
+	dma_addr_t dma_addr;
+	int i;
+
+	if (!eth->soc->has_sram) {
+		eth->scratch_ring = dma_alloc_coherent(eth->dev,
+					       cnt * sizeof(struct mtk_tx_dma),
+					       &eth->phy_scratch_ring,
+					       GFP_ATOMIC);
+	} else {
+		eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
+	}
+
+	if (unlikely(!eth->scratch_ring))
+                        return -ENOMEM;
+
+	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
+				    GFP_KERNEL);
+	if (unlikely(!eth->scratch_head))
+		return -ENOMEM;
+
+	dma_addr = dma_map_single(eth->dev,
+				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
+				  DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+		return -ENOMEM;
+
+	phy_ring_tail = eth->phy_scratch_ring +
+			(sizeof(struct mtk_tx_dma) * (cnt - 1));
+
+	for (i = 0; i < cnt; i++) {
+		eth->scratch_ring[i].txd1 =
+				(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
+		if (i < cnt - 1)
+			eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
+				((i + 1) * sizeof(struct mtk_tx_dma)));
+		eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
+
+		eth->scratch_ring[i].txd4 = 0;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+		if (eth->soc->has_sram && ((sizeof(struct mtk_tx_dma)) > 16)) {
+			eth->scratch_ring[i].txd5 = 0;
+			eth->scratch_ring[i].txd6 = 0;
+			eth->scratch_ring[i].txd7 = 0;
+			eth->scratch_ring[i].txd8 = 0;
+		}
+#endif
+	}
+
+	mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
+	mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
+	mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
+	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
+
+	return 0;
+}
+
+static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
+{
+	void *ret = ring->dma;
+
+	return ret + (desc - ring->phys);
+}
+
+static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+						    struct mtk_tx_dma *txd)
+{
+	int idx = txd - ring->dma;
+
+	return &ring->buf[idx];
+}
+
+static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
+				       struct mtk_tx_dma *dma)
+{
+	return ring->dma_pdma - ring->dma + dma;
+}
+
+static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+{
+	return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+}
+
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
+{
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+			dma_unmap_single(eth->dev,
+					 dma_unmap_addr(tx_buf, dma_addr0),
+					 dma_unmap_len(tx_buf, dma_len0),
+					 DMA_TO_DEVICE);
+		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
+			dma_unmap_page(eth->dev,
+				       dma_unmap_addr(tx_buf, dma_addr0),
+				       dma_unmap_len(tx_buf, dma_len0),
+				       DMA_TO_DEVICE);
+		}
+	} else {
+		if (dma_unmap_len(tx_buf, dma_len0)) {
+			dma_unmap_page(eth->dev,
+				       dma_unmap_addr(tx_buf, dma_addr0),
+				       dma_unmap_len(tx_buf, dma_len0),
+				       DMA_TO_DEVICE);
+		}
+
+		if (dma_unmap_len(tx_buf, dma_len1)) {
+			dma_unmap_page(eth->dev,
+				       dma_unmap_addr(tx_buf, dma_addr1),
+				       dma_unmap_len(tx_buf, dma_len1),
+				       DMA_TO_DEVICE);
+		}
+	}
+
+	tx_buf->flags = 0;
+	if (tx_buf->skb &&
+	    (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
+		dev_kfree_skb_any(tx_buf->skb);
+	tx_buf->skb = NULL;
+}
+
+static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
+			 size_t size, int idx)
+{
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+		dma_unmap_len_set(tx_buf, dma_len0, size);
+	} else {
+		if (idx & 1) {
+			txd->txd3 = mapped_addr;
+			txd->txd2 |= TX_DMA_PLEN1(size);
+			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+			dma_unmap_len_set(tx_buf, dma_len1, size);
+		} else {
+			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+			txd->txd1 = mapped_addr;
+			txd->txd2 = TX_DMA_PLEN0(size);
+			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+			dma_unmap_len_set(tx_buf, dma_len0, size);
+		}
+	}
+}
+
+static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+		      int tx_num, struct mtk_tx_ring *ring, bool gso)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	struct mtk_tx_dma *itxd, *txd;
+	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
+	struct mtk_tx_buf *itx_buf, *tx_buf;
+	dma_addr_t mapped_addr;
+	unsigned int nr_frags;
+	int i, n_desc = 1;
+	u32 txd4 = 0, fport;
+	u32 qid = 0;
+	int k = 0;
+
+	itxd = ring->next_free;
+	itxd_pdma = qdma_to_pdma(ring, itxd);
+	if (itxd == ring->last_free)
+		return -ENOMEM;
+
+	itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+	memset(itx_buf, 0, sizeof(*itx_buf));
+
+	mapped_addr = dma_map_single(eth->dev, skb->data,
+				     skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+		return -ENOMEM;
+
+	WRITE_ONCE(itxd->txd1, mapped_addr);
+	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+			  MTK_TX_FLAGS_FPORT1;
+	setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+		     k++);
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+        qid = skb->mark & (MTK_QDMA_TX_MASK);
+#endif
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+		u32 txd5 = 0, txd6 = 0;
+		/* set the forward port */
+		fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2;
+		txd4 |= fport;
+
+		if (gso)
+			txd5 |= TX_DMA_TSO_V2;
+
+		/* TX Checksum offload */
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			txd5 |= TX_DMA_CHKSUM_V2;
+
+		/* VLAN header offload */
+		if (skb_vlan_tag_present(skb))
+			txd6 |= TX_DMA_INS_VLAN_V2 | skb_vlan_tag_get(skb);
+
+		txd4 = txd4 | TX_DMA_SWC_V2;
+
+		WRITE_ONCE(itxd->txd3, (TX_DMA_PLEN0(skb_headlen(skb)) |
+				(!nr_frags * TX_DMA_LS0)));
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+		WRITE_ONCE(itxd->txd5, txd5);
+		WRITE_ONCE(itxd->txd6, txd6);
+#endif
+	} else {
+		/* set the forward port */
+		fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+		txd4 |= fport;
+
+                if (gso)
+                        txd4 |= TX_DMA_TSO;
+
+                /* TX Checksum offload */
+                if (skb->ip_summed == CHECKSUM_PARTIAL)
+                        txd4 |= TX_DMA_CHKSUM;
+
+		/* VLAN header offload */
+		if (skb_vlan_tag_present(skb))
+			txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
+
+		WRITE_ONCE(itxd->txd3,
+			   TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
+			   (!nr_frags * TX_DMA_LS0) | QID_LOW_BITS(qid));
+	}
+	/* TX SG offload */
+	txd = itxd;
+	txd_pdma = qdma_to_pdma(ring, txd);
+
+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+	if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+			txd4 &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
+			txd4 |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
+		} else {
+			txd4 &= ~(0x7 << TX_DMA_FPORT_SHIFT);
+			txd4 |= 0x4 << TX_DMA_FPORT_SHIFT;
+		}
+	}
+
+	trace_printk("[%s] nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
+		     __func__, nr_frags, HNAT_SKB_CB2(skb)->magic, txd4);
+#endif
+
+	for (i = 0; i < nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		unsigned int offset = 0;
+		int frag_size = skb_frag_size(frag);
+
+		while (frag_size) {
+			bool last_frag = false;
+			unsigned int frag_map_size;
+			bool new_desc = true;
+
+			if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+			    (i & 0x1)) {
+				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+				txd_pdma = qdma_to_pdma(ring, txd);
+				if (txd == ring->last_free)
+					goto err_dma;
+
+				n_desc++;
+			} else {
+				new_desc = false;
+			}
+
+
+			frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
+			mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
+						       frag_map_size,
+						       DMA_TO_DEVICE);
+			if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+				goto err_dma;
+
+			if (i == nr_frags - 1 &&
+			    (frag_size - frag_map_size) == 0)
+				last_frag = true;
+
+			WRITE_ONCE(txd->txd1, mapped_addr);
+
+			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+				WRITE_ONCE(txd->txd3, (TX_DMA_PLEN0(frag_map_size) |
+					   last_frag * TX_DMA_LS0));
+				WRITE_ONCE(txd->txd4, fport | TX_DMA_SWC_V2 |
+						      QID_BITS_V2(qid));
+			} else {
+				WRITE_ONCE(txd->txd3,
+					   (TX_DMA_SWC | QID_LOW_BITS(qid) |
+					    TX_DMA_PLEN0(frag_map_size) |
+					    last_frag * TX_DMA_LS0));
+				WRITE_ONCE(txd->txd4,
+					   fport | QID_HIGH_BITS(qid));
+			}
+
+			tx_buf = mtk_desc_to_tx_buf(ring, txd);
+			if (new_desc)
+				memset(tx_buf, 0, sizeof(*tx_buf));
+			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+					 MTK_TX_FLAGS_FPORT1;
+
+			setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
+				     frag_map_size, k++);
+
+			frag_size -= frag_map_size;
+			offset += frag_map_size;
+		}
+	}
+
+	/* store skb to cleanup */
+	itx_buf->skb = skb;
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2))
+		WRITE_ONCE(itxd->txd4, txd4 | QID_BITS_V2(qid));
+	else
+		WRITE_ONCE(itxd->txd4, txd4 | QID_HIGH_BITS(qid));
+
+	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		if (k & 0x1)
+			txd_pdma->txd2 |= TX_DMA_LS0;
+		else
+			txd_pdma->txd2 |= TX_DMA_LS1;
+	}
+
+	netdev_sent_queue(dev, skb->len);
+	skb_tx_timestamp(skb);
+
+	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+	atomic_sub(n_desc, &ring->free_count);
+
+	/* make sure that all changes to the dma ring are flushed before we
+	 * continue
+	 */
+	wmb();
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+		    !netdev_xmit_more())
+			mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+	} else {
+		int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
+					     ring->dma_size);
+		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+	}
+
+	return 0;
+
+err_dma:
+	do {
+		tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+
+		/* unmap dma */
+		mtk_tx_unmap(eth, tx_buf);
+
+		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+		if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
+		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
+		itxd_pdma = qdma_to_pdma(ring, itxd);
+	} while (itxd != txd);
+
+	return -ENOMEM;
+}
+
+static inline int mtk_cal_txd_req(struct sk_buff *skb)
+{
+	int i, nfrags;
+	skb_frag_t *frag;
+
+	nfrags = 1;
+	if (skb_is_gso(skb)) {
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			frag = &skb_shinfo(skb)->frags[i];
+			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+						MTK_TX_DMA_BUF_LEN);
+		}
+	} else {
+		nfrags += skb_shinfo(skb)->nr_frags;
+	}
+
+	return nfrags;
+}
+
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		if (netif_queue_stopped(eth->netdev[i]))
+			return 1;
+	}
+
+	return 0;
+}
+
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		netif_wake_queue(eth->netdev[i]);
+	}
+}
+
+static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	struct mtk_tx_ring *ring = &eth->tx_ring;
+	struct net_device_stats *stats = &dev->stats;
+	bool gso = false;
+	int tx_num;
+
+	/* normally we can rely on the stack not calling this more than once,
+	 * however we have 2 queues running on the same ring so we need to lock
+	 * the ring access
+	 */
+	spin_lock(&eth->page_lock);
+
+	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+		goto drop;
+
+	tx_num = mtk_cal_txd_req(skb);
+	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
+		netif_stop_queue(dev);
+		netif_err(eth, tx_queued, dev,
+			  "Tx Ring full when queue awake!\n");
+		spin_unlock(&eth->page_lock);
+		return NETDEV_TX_BUSY;
+	}
+
+	/* TSO: fill MSS info in tcp checksum field */
+	if (skb_is_gso(skb)) {
+		if (skb_cow_head(skb, 0)) {
+			netif_warn(eth, tx_err, dev,
+				   "GSO expand head fail.\n");
+			goto drop;
+		}
+
+		if (skb_shinfo(skb)->gso_type &
+				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+			gso = true;
+			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
+		}
+	}
+
+	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
+		goto drop;
+
+	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
+		netif_stop_queue(dev);
+
+	spin_unlock(&eth->page_lock);
+
+	return NETDEV_TX_OK;
+
+drop:
+	spin_unlock(&eth->page_lock);
+	stats->tx_dropped++;
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
+{
+	int i;
+	struct mtk_rx_ring *ring;
+	int idx;
+
+	if (!eth->hwlro)
+		return &eth->rx_ring[0];
+
+	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+		ring = &eth->rx_ring[i];
+		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+		if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+			ring->calc_idx_update = true;
+			return ring;
+		}
+	}
+
+	return NULL;
+}
+
+static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
+{
+	struct mtk_rx_ring *ring;
+	int i;
+
+	if (!eth->hwlro) {
+		ring = &eth->rx_ring[0];
+		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+	} else {
+		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+			ring = &eth->rx_ring[i];
+			if (ring->calc_idx_update) {
+				ring->calc_idx_update = false;
+				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+			}
+		}
+	}
+}
+
+static int mtk_poll_rx(struct napi_struct *napi, int budget,
+		       struct mtk_eth *eth)
+{
+	struct mtk_rx_ring *ring;
+	int idx;
+	struct sk_buff *skb;
+	u8 *data, *new_data;
+	struct mtk_rx_dma *rxd, trxd;
+	int done = 0;
+
+	while (done < budget) {
+		struct net_device *netdev;
+		unsigned int pktlen;
+		dma_addr_t dma_addr;
+		int mac;
+
+		ring = mtk_get_rx_ring(eth);
+		if (unlikely(!ring))
+			goto rx_done;
+
+		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+		rxd = &ring->dma[idx];
+		data = ring->data[idx];
+
+		mtk_rx_get_desc(&trxd, rxd);
+		if (!(trxd.rxd2 & RX_DMA_DONE))
+			break;
+
+		/* find out which mac the packet come from. values start at 1 */
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+			mac = 0;
+		} else {
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+				mac = RX_DMA_GET_SPORT(trxd.rxd5) - 1;
+			else
+#endif
+				mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
+				      0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
+		}
+
+		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+			     !eth->netdev[mac]))
+			goto release_desc;
+
+		netdev = eth->netdev[mac];
+
+		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+			goto release_desc;
+
+		/* alloc new buffer */
+		new_data = napi_alloc_frag(ring->frag_size);
+		if (unlikely(!new_data)) {
+			netdev->stats.rx_dropped++;
+			goto release_desc;
+		}
+		dma_addr = dma_map_single(eth->dev,
+					  new_data + NET_SKB_PAD +
+					  eth->ip_align,
+					  ring->buf_size,
+					  DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
+			skb_free_frag(new_data);
+			netdev->stats.rx_dropped++;
+			goto release_desc;
+		}
+
+		/* receive data */
+		skb = build_skb(data, ring->frag_size);
+		if (unlikely(!skb)) {
+			skb_free_frag(new_data);
+			netdev->stats.rx_dropped++;
+			goto release_desc;
+		}
+		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+		dma_unmap_single(eth->dev, trxd.rxd1,
+				 ring->buf_size, DMA_FROM_DEVICE);
+		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+		skb->dev = netdev;
+		skb_put(skb, pktlen);
+
+		if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) &&
+				  (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
+		    (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) &&
+				  (trxd.rxd3 & eth->rx_dma_l4_valid)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb_checksum_none_assert(skb);
+		skb->protocol = eth_type_trans(skb, netdev);
+
+		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
+				if (trxd.rxd4 & RX_DMA_VTAG_V2)
+					__vlan_hwaccel_put_tag(skb,
+					htons(RX_DMA_VPID_V2(trxd.rxd3,
+							     trxd.rxd4)),
+					RX_DMA_VID_V2(trxd.rxd4));
+			} else {
+				if (trxd.rxd2 & RX_DMA_VTAG)
+					__vlan_hwaccel_put_tag(skb,
+					htons(RX_DMA_VPID(trxd.rxd3)),
+					RX_DMA_VID(trxd.rxd3));
+			}
+
+			/* If netdev is attached to dsa switch, the special
+			 * tag inserted in VLAN field by switch hardware can
+			 * be offload by RX HW VLAN offload. Clears the VLAN
+			 * information from @skb to avoid unexpected 8021d
+			 * handler before packet enter dsa framework.
+			 */
+			if (netdev_uses_dsa(netdev))
+				__vlan_hwaccel_clear_tag(skb);
+		}
+
+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+			*(u32 *)(skb->head) = trxd.rxd5;
+		else
+#endif
+			*(u32 *)(skb->head) = trxd.rxd4;
+
+		skb_hnat_alg(skb) = 0;
+		skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+
+		if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
+			trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
+				     __func__, skb_hnat_reason(skb));
+			skb->pkt_type = PACKET_HOST;
+		}
+
+		trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
+			     __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
+			     skb_hnat_reason(skb), skb_hnat_alg(skb));
+#endif
+
+		skb_record_rx_queue(skb, 0);
+		napi_gro_receive(napi, skb);
+
+		ring->data[idx] = new_data;
+		rxd->rxd1 = (unsigned int)dma_addr;
+
+release_desc:
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+			rxd->rxd2 = RX_DMA_LSO;
+		else
+			rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+
+		ring->calc_idx = idx;
+
+		done++;
+	}
+
+rx_done:
+	if (done) {
+		/* make sure that all changes to the dma ring are flushed before
+		 * we continue
+		 */
+		wmb();
+		mtk_update_rx_cpu_idx(eth);
+	}
+
+	return done;
+}
+
+static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
+			    unsigned int *done, unsigned int *bytes)
+{
+	struct mtk_tx_ring *ring = &eth->tx_ring;
+	struct mtk_tx_dma *desc;
+	struct sk_buff *skb;
+	struct mtk_tx_buf *tx_buf;
+	u32 cpu, dma;
+
+	cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
+	dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+
+	desc = mtk_qdma_phys_to_virt(ring, cpu);
+
+	while ((cpu != dma) && budget) {
+		u32 next_cpu = desc->txd2;
+		int mac = 0;
+
+		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
+			break;
+
+		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
+
+		tx_buf = mtk_desc_to_tx_buf(ring, desc);
+		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
+			mac = 1;
+
+		skb = tx_buf->skb;
+		if (!skb)
+			break;
+
+		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+			bytes[mac] += skb->len;
+			done[mac]++;
+			budget--;
+		}
+		mtk_tx_unmap(eth, tx_buf);
+
+		ring->last_free = desc;
+		atomic_inc(&ring->free_count);
+
+		cpu = next_cpu;
+	}
+
+	mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+
+	return budget;
+}
+
+static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
+			    unsigned int *done, unsigned int *bytes)
+{
+	struct mtk_tx_ring *ring = &eth->tx_ring;
+	struct mtk_tx_dma *desc;
+	struct sk_buff *skb;
+	struct mtk_tx_buf *tx_buf;
+	u32 cpu, dma;
+
+	cpu = ring->cpu_idx;
+	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+
+	while ((cpu != dma) && budget) {
+		tx_buf = &ring->buf[cpu];
+		skb = tx_buf->skb;
+		if (!skb)
+			break;
+
+		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+			bytes[0] += skb->len;
+			done[0]++;
+			budget--;
+		}
+
+		mtk_tx_unmap(eth, tx_buf);
+
+		desc = &ring->dma[cpu];
+		ring->last_free = desc;
+		atomic_inc(&ring->free_count);
+
+		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
+	}
+
+	ring->cpu_idx = cpu;
+
+	return budget;
+}
+
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+{
+	struct mtk_tx_ring *ring = &eth->tx_ring;
+	unsigned int done[MTK_MAX_DEVS];
+	unsigned int bytes[MTK_MAX_DEVS];
+	int total = 0, i;
+
+	memset(done, 0, sizeof(done));
+	memset(bytes, 0, sizeof(bytes));
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+		budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
+	else
+		budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i] || !done[i])
+			continue;
+		netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
+		total += done[i];
+	}
+
+	if (mtk_queue_stopped(eth) &&
+	    (atomic_read(&ring->free_count) > ring->thresh))
+		mtk_wake_queue(eth);
+
+	return total;
+}
+
+static void mtk_handle_status_irq(struct mtk_eth *eth)
+{
+	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
+
+	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
+		mtk_stats_update(eth);
+		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
+			MTK_INT_STATUS2);
+	}
+}
+
+static int mtk_napi_tx(struct napi_struct *napi, int budget)
+{
+	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+	u32 status, mask;
+	int tx_done = 0;
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+		mtk_handle_status_irq(eth);
+	mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
+	tx_done = mtk_poll_tx(eth, budget);
+
+	if (unlikely(netif_msg_intr(eth))) {
+		status = mtk_r32(eth, eth->tx_int_status_reg);
+		mask = mtk_r32(eth, eth->tx_int_mask_reg);
+		dev_info(eth->dev,
+			 "done tx %d, intr 0x%08x/0x%x\n",
+			 tx_done, status, mask);
+	}
+
+	if (tx_done == budget)
+		return budget;
+
+	status = mtk_r32(eth, eth->tx_int_status_reg);
+	if (status & MTK_TX_DONE_INT)
+		return budget;
+
+	napi_complete(napi);
+	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+
+	return tx_done;
+}
+
+static int mtk_napi_rx(struct napi_struct *napi, int budget)
+{
+	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+	u32 status, mask;
+	int rx_done = 0;
+	int remain_budget = budget;
+
+	mtk_handle_status_irq(eth);
+
+poll_again:
+	mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+	rx_done = mtk_poll_rx(napi, remain_budget, eth);
+
+	if (unlikely(netif_msg_intr(eth))) {
+		status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+		mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
+		dev_info(eth->dev,
+			 "done rx %d, intr 0x%08x/0x%x\n",
+			 rx_done, status, mask);
+	}
+	if (rx_done == remain_budget)
+		return budget;
+
+	status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+	if (status & MTK_RX_DONE_INT) {
+		remain_budget -= rx_done;
+		goto poll_again;
+	}
+	napi_complete(napi);
+	mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+
+	return rx_done + budget - remain_budget;
+}
+
+static int mtk_tx_alloc(struct mtk_eth *eth)
+{
+	struct mtk_tx_ring *ring = &eth->tx_ring;
+	int i, sz = sizeof(*ring->dma);
+
+	ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
+			       GFP_KERNEL);
+	if (!ring->buf)
+		goto no_tx_mem;
+
+	if (!eth->soc->has_sram)
+		ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+					       &ring->phys, GFP_ATOMIC);
+	else {
+		ring->dma =  eth->scratch_ring + MTK_DMA_SIZE;
+		ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
+	}
+
+	if (!ring->dma)
+		goto no_tx_mem;
+
+	for (i = 0; i < MTK_DMA_SIZE; i++) {
+		int next = (i + 1) % MTK_DMA_SIZE;
+		u32 next_ptr = ring->phys + next * sz;
+
+		ring->dma[i].txd2 = next_ptr;
+		ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+		ring->dma[i].txd4 = 0;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+                if (eth->soc->has_sram && ( sz > 16)) {
+                        ring->dma[i].txd5 = 0;
+                        ring->dma[i].txd6 = 0;
+                        ring->dma[i].txd7 = 0;
+                        ring->dma[i].txd8 = 0;
+                }
+#endif
+	}
+
+	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
+	 * only as the framework. The real HW descriptors are the PDMA
+	 * descriptors in ring->dma_pdma.
+	 */
+	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+						    &ring->phys_pdma,
+						    GFP_ATOMIC);
+		if (!ring->dma_pdma)
+			goto no_tx_mem;
+
+		for (i = 0; i < MTK_DMA_SIZE; i++) {
+			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
+			ring->dma_pdma[i].txd4 = 0;
+		}
+	}
+
+	ring->dma_size = MTK_DMA_SIZE;
+	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
+	ring->next_free = &ring->dma[0];
+	ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+	ring->thresh = MAX_SKB_FRAGS;
+
+	/* make sure that all changes to the dma ring are flushed before we
+	 * continue
+	 */
+	wmb();
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
+		mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+		mtk_w32(eth,
+			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+			MTK_QTX_CRX_PTR);
+		mtk_w32(eth,
+			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+			MTK_QTX_DRX_PTR);
+		mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
+			MTK_QTX_CFG(0));
+	} else {
+		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
+		mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
+		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
+		mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+	}
+
+	return 0;
+
+no_tx_mem:
+	return -ENOMEM;
+}
+
+static void mtk_tx_clean(struct mtk_eth *eth)
+{
+	struct mtk_tx_ring *ring = &eth->tx_ring;
+	int i;
+
+	if (ring->buf) {
+		for (i = 0; i < MTK_DMA_SIZE; i++)
+			mtk_tx_unmap(eth, &ring->buf[i]);
+		kfree(ring->buf);
+		ring->buf = NULL;
+	}
+
+	if (!eth->soc->has_sram && ring->dma) {
+		dma_free_coherent(eth->dev,
+				  MTK_DMA_SIZE * sizeof(*ring->dma),
+				  ring->dma,
+				  ring->phys);
+		ring->dma = NULL;
+	}
+
+	if (ring->dma_pdma) {
+		dma_free_coherent(eth->dev,
+				  MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
+				  ring->dma_pdma,
+				  ring->phys_pdma);
+		ring->dma_pdma = NULL;
+	}
+}
+
+static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+{
+	struct mtk_rx_ring *ring;
+	int rx_data_len, rx_dma_size;
+	int i;
+
+	if (rx_flag == MTK_RX_FLAGS_QDMA) {
+		if (ring_no)
+			return -EINVAL;
+		ring = &eth->rx_ring_qdma;
+	} else {
+		ring = &eth->rx_ring[ring_no];
+	}
+
+	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
+		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
+		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+	} else {
+		rx_data_len = ETH_DATA_LEN;
+		rx_dma_size = MTK_DMA_SIZE;
+	}
+
+	ring->frag_size = mtk_max_frag_size(rx_data_len);
+	ring->buf_size = mtk_max_buf_size(ring->frag_size);
+	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
+			     GFP_KERNEL);
+	if (!ring->data)
+		return -ENOMEM;
+
+	for (i = 0; i < rx_dma_size; i++) {
+		ring->data[i] = netdev_alloc_frag(ring->frag_size);
+		if (!ring->data[i])
+			return -ENOMEM;
+	}
+
+	if ((!eth->soc->has_sram) || (eth->soc->has_sram
+				&& (rx_flag != MTK_RX_FLAGS_NORMAL)))
+		ring->dma = dma_alloc_coherent(eth->dev,
+					       rx_dma_size * sizeof(*ring->dma),
+					       &ring->phys, GFP_ATOMIC);
+	else {
+		struct mtk_tx_ring *tx_ring = &eth->tx_ring;
+		ring->dma =  (struct mtk_rx_dma *)(tx_ring->dma + MTK_DMA_SIZE);
+		ring->phys = tx_ring->phys + MTK_DMA_SIZE * sizeof(*tx_ring->dma);
+	}
+
+	if (!ring->dma)
+		return -ENOMEM;
+
+	for (i = 0; i < rx_dma_size; i++) {
+		dma_addr_t dma_addr = dma_map_single(eth->dev,
+				ring->data[i] + NET_SKB_PAD + eth->ip_align,
+				ring->buf_size,
+				DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+			return -ENOMEM;
+		ring->dma[i].rxd1 = (unsigned int)dma_addr;
+
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+			ring->dma[i].rxd2 = RX_DMA_LSO;
+		else
+			ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+
+		ring->dma[i].rxd3 = 0;
+		ring->dma[i].rxd4 = 0;
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+		if (eth->soc->has_sram && ((sizeof(struct mtk_rx_dma)) > 16)) {
+			ring->dma[i].rxd5 = 0;
+			ring->dma[i].rxd6 = 0;
+			ring->dma[i].rxd7 = 0;
+			ring->dma[i].rxd8 = 0;
+		}
+#endif
+	}
+	ring->dma_size = rx_dma_size;
+	ring->calc_idx_update = false;
+	ring->calc_idx = rx_dma_size - 1;
+	ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
+			     MTK_QRX_CRX_IDX_CFG(ring_no) :
+			     MTK_PRX_CRX_IDX_CFG(ring_no);
+	/* make sure that all changes to the dma ring are flushed before we
+	 * continue
+	 */
+	wmb();
+
+	if (rx_flag == MTK_RX_FLAGS_QDMA) {
+		mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
+		mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
+		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
+	} else {
+		mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
+		mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
+		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
+	}
+
+	return 0;
+}
+
+static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
+{
+	int i;
+
+	if (ring->data && ring->dma) {
+		for (i = 0; i < ring->dma_size; i++) {
+			if (!ring->data[i])
+				continue;
+			if (!ring->dma[i].rxd1)
+				continue;
+			dma_unmap_single(eth->dev,
+					 ring->dma[i].rxd1,
+					 ring->buf_size,
+					 DMA_FROM_DEVICE);
+			skb_free_frag(ring->data[i]);
+		}
+		kfree(ring->data);
+		ring->data = NULL;
+	}
+
+	if(in_sram)
+		return;
+
+	if (ring->dma) {
+		dma_free_coherent(eth->dev,
+				  ring->dma_size * sizeof(*ring->dma),
+				  ring->dma,
+				  ring->phys);
+		ring->dma = NULL;
+	}
+}
+
+static int mtk_hwlro_rx_init(struct mtk_eth *eth)
+{
+	int i;
+	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
+	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
+
+	/* set LRO rings to auto-learn modes */
+	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
+
+	/* validate LRO ring */
+	ring_ctrl_dw2 |= MTK_RING_VLD;
+
+	/* set AGE timer (unit: 20us) */
+	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
+	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
+
+	/* set max AGG timer (unit: 20us) */
+	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
+
+	/* set max LRO AGG count */
+	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
+	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
+
+	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
+		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
+		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
+	}
+
+	/* IPv4 checksum update enable */
+	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
+
+	/* switch priority comparison to packet count mode */
+	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
+
+	/* bandwidth threshold setting */
+	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
+
+	/* auto-learn score delta setting */
+	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
+
+	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
+	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
+		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
+
+	/* set HW LRO mode & the max aggregation count for rx packets */
+	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
+
+	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
+	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
+
+	/* enable HW LRO */
+	lro_ctrl_dw0 |= MTK_LRO_EN;
+
+	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
+	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
+
+	return 0;
+}
+
+static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
+{
+	int i;
+	u32 val;
+
+	/* relinquish lro rings, flush aggregated packets */
+	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
+
+	/* wait for relinquishments done */
+	for (i = 0; i < 10; i++) {
+		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
+		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
+			msleep(20);
+			continue;
+		}
+		break;
+	}
+
+	/* invalidate lro rings */
+	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
+
+	/* disable HW LRO */
+	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
+}
+
+static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
+{
+	u32 reg_val;
+
+	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+	/* invalidate the IP setting */
+	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
+
+	/* validate the IP setting */
+	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+}
+
+static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
+{
+	u32 reg_val;
+
+	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+	/* invalidate the IP setting */
+	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
+}
+
+static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
+{
+	int cnt = 0;
+	int i;
+
+	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+		if (mac->hwlro_ip[i])
+			cnt++;
+	}
+
+	return cnt;
+}
+
+static int mtk_hwlro_add_ipaddr(struct net_device *dev,
+				struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	int hwlro_idx;
+
+	if ((fsp->flow_type != TCP_V4_FLOW) ||
+	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
+	    (fsp->location > 1))
+		return -EINVAL;
+
+	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
+	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
+
+	return 0;
+}
+
+static int mtk_hwlro_del_ipaddr(struct net_device *dev,
+				struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	int hwlro_idx;
+
+	if (fsp->location > 1)
+		return -EINVAL;
+
+	mac->hwlro_ip[fsp->location] = 0;
+	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+
+	return 0;
+}
+
+static void mtk_hwlro_netdev_disable(struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	int i, hwlro_idx;
+
+	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+		mac->hwlro_ip[i] = 0;
+		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
+
+		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+	}
+
+	mac->hwlro_ip_cnt = 0;
+}
+
+static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
+				    struct ethtool_rxnfc *cmd)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+
+	/* only tcp dst ipv4 is meaningful, others are meaningless */
+	fsp->flow_type = TCP_V4_FLOW;
+	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
+	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
+
+	fsp->h_u.tcp_ip4_spec.ip4src = 0;
+	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+	fsp->h_u.tcp_ip4_spec.psrc = 0;
+	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+	fsp->h_u.tcp_ip4_spec.pdst = 0;
+	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+	fsp->h_u.tcp_ip4_spec.tos = 0;
+	fsp->m_u.tcp_ip4_spec.tos = 0xff;
+
+	return 0;
+}
+
+static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+				  struct ethtool_rxnfc *cmd,
+				  u32 *rule_locs)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	int cnt = 0;
+	int i;
+
+	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+		if (mac->hwlro_ip[i]) {
+			rule_locs[cnt] = i;
+			cnt++;
+		}
+	}
+
+	cmd->rule_cnt = cnt;
+
+	return 0;
+}
+
+static netdev_features_t mtk_fix_features(struct net_device *dev,
+					  netdev_features_t features)
+{
+	if (!(features & NETIF_F_LRO)) {
+		struct mtk_mac *mac = netdev_priv(dev);
+		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+		if (ip_cnt) {
+			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
+
+			features |= NETIF_F_LRO;
+		}
+	}
+
+	if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
+		netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
+
+		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+	}
+
+	return features;
+}
+
+static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	int err = 0;
+
+	if (!((dev->features ^ features) & MTK_SET_FEATURES))
+		return 0;
+
+	if (!(features & NETIF_F_LRO))
+		mtk_hwlro_netdev_disable(dev);
+
+	if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
+		mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
+	else
+		mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+	return err;
+}
+
+/* wait for DMA to finish whatever it is doing before we start using it again */
+static int mtk_dma_busy_wait(struct mtk_eth *eth)
+{
+	unsigned long t_start = jiffies;
+
+	while (1) {
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+			if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
+			      (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+				return 0;
+		} else {
+			if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
+			      (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+				return 0;
+		}
+
+		if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
+			break;
+	}
+
+	dev_err(eth->dev, "DMA init timeout\n");
+	return -1;
+}
+
+static int mtk_dma_init(struct mtk_eth *eth)
+{
+	int err;
+	u32 i;
+
+	if (mtk_dma_busy_wait(eth))
+		return -EBUSY;
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		/* QDMA needs scratch memory for internal reordering of the
+		 * descriptors
+		 */
+		err = mtk_init_fq_dma(eth);
+		if (err)
+			return err;
+	}
+
+	err = mtk_tx_alloc(eth);
+	if (err)
+		return err;
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
+		if (err)
+			return err;
+	}
+
+	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
+	if (err)
+		return err;
+
+	if (eth->hwlro) {
+		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
+			if (err)
+				return err;
+		}
+		err = mtk_hwlro_rx_init(eth);
+		if (err)
+			return err;
+	}
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		/* Enable random early drop and set drop threshold
+		 * automatically
+		 */
+		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
+			FC_THRES_MIN, MTK_QDMA_FC_THRES);
+		mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+	}
+
+	return 0;
+}
+
+static void mtk_dma_free(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++)
+		if (eth->netdev[i])
+			netdev_reset_queue(eth->netdev[i]);
+	if ( !eth->soc->has_sram && eth->scratch_ring) {
+		dma_free_coherent(eth->dev,
+				  MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+				  eth->scratch_ring,
+				  eth->phy_scratch_ring);
+		eth->scratch_ring = NULL;
+		eth->phy_scratch_ring = 0;
+	}
+	mtk_tx_clean(eth);
+	mtk_rx_clean(eth, &eth->rx_ring[0],1);
+	mtk_rx_clean(eth, &eth->rx_ring_qdma,0);
+
+	if (eth->hwlro) {
+		mtk_hwlro_rx_uninit(eth);
+		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+			mtk_rx_clean(eth, &eth->rx_ring[i],0);
+	}
+
+	kfree(eth->scratch_head);
+}
+
+static void mtk_tx_timeout(struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+
+	eth->netdev[mac->id]->stats.tx_errors++;
+	netif_err(eth, tx_err, dev,
+		  "transmit timed out\n");
+	schedule_work(&eth->pending_work);
+}
+
+static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
+{
+	struct mtk_eth *eth = _eth;
+
+	if (likely(napi_schedule_prep(&eth->rx_napi))) {
+		__napi_schedule(&eth->rx_napi);
+		mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
+{
+	struct mtk_eth *eth = _eth;
+
+	if (likely(napi_schedule_prep(&eth->tx_napi))) {
+		__napi_schedule(&eth->tx_napi);
+		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+{
+	struct mtk_eth *eth = _eth;
+
+	if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
+		if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
+			mtk_handle_irq_rx(irq, _eth);
+	}
+	if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
+		if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+			mtk_handle_irq_tx(irq, _eth);
+	}
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mtk_poll_controller(struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+
+	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+	mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+	mtk_handle_irq_rx(eth->irq[2], dev);
+	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+	mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+}
+#endif
+
+static int mtk_start_dma(struct mtk_eth *eth)
+{
+	u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+	int err;
+
+	err = mtk_dma_init(eth);
+	if (err) {
+		mtk_dma_free(eth);
+		return err;
+	}
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2))
+			mtk_w32(eth,
+				MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+				MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
+				MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
+				MTK_RESV_BUF | MTK_WCOMP_EN |
+				MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
+				MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
+		else
+			mtk_w32(eth,
+				MTK_TX_DMA_EN |
+				MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
+				MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+				MTK_RX_BT_32DWORDS,
+				MTK_QDMA_GLO_CFG);
+
+		mtk_w32(eth,
+			MTK_RX_DMA_EN | rx_2b_offset |
+			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
+			MTK_PDMA_GLO_CFG);
+	} else {
+		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
+			MTK_PDMA_GLO_CFG);
+	}
+
+	return 0;
+}
+
+static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+{
+	int i;
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+		return;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+
+		/* default setup the forward port to send frame to PDMA */
+		val &= ~0xffff;
+
+		/* Enable RX checksum */
+		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+
+		val |= config;
+
+		if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
+			val |= MTK_GDMA_SPECIAL_TAG;
+
+		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
+	}
+	/* Reset and enable PSE */
+	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+	mtk_w32(eth, 0, MTK_RST_GL);
+}
+
+static int mtk_open(struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	int err;
+
+	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
+	if (err) {
+		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
+			   err);
+		return err;
+	}
+
+	/* we run 2 netdevs on the same dma ring so we only bring it up once */
+	if (!refcount_read(&eth->dma_refcnt)) {
+		int err = mtk_start_dma(eth);
+
+		if (err)
+			return err;
+
+		mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
+
+		/* Indicates CDM to parse the MTK special tag from CPU */
+		if (netdev_uses_dsa(dev)) {
+			u32 val;
+			val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+			mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+			val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+			mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
+		}
+
+		napi_enable(&eth->tx_napi);
+		napi_enable(&eth->rx_napi);
+		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+		mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+		refcount_set(&eth->dma_refcnt, 1);
+	}
+	else
+		refcount_inc(&eth->dma_refcnt);
+
+	phylink_start(mac->phylink);
+	netif_start_queue(dev);
+	return 0;
+}
+
+static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
+{
+	u32 val;
+	int i;
+
+	/* stop the dma engine */
+	spin_lock_bh(&eth->page_lock);
+	val = mtk_r32(eth, glo_cfg);
+	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
+		glo_cfg);
+	spin_unlock_bh(&eth->page_lock);
+
+	/* wait for dma stop */
+	for (i = 0; i < 10; i++) {
+		val = mtk_r32(eth, glo_cfg);
+		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
+			msleep(20);
+			continue;
+		}
+		break;
+	}
+}
+
+static int mtk_stop(struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+
+	phylink_stop(mac->phylink);
+
+	netif_tx_disable(dev);
+
+	phylink_disconnect_phy(mac->phylink);
+
+	/* only shutdown DMA if this is the last user */
+	if (!refcount_dec_and_test(&eth->dma_refcnt))
+		return 0;
+
+	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
+	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+	mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+	napi_disable(&eth->tx_napi);
+	napi_disable(&eth->rx_napi);
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+		mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+	mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+
+	mtk_dma_free(eth);
+
+	return 0;
+}
+
+static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
+{
+	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+			   reset_bits,
+			   reset_bits);
+
+	usleep_range(1000, 1100);
+	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+			   reset_bits,
+			   ~reset_bits);
+	mdelay(10);
+}
+
+static void mtk_clk_disable(struct mtk_eth *eth)
+{
+	int clk;
+
+	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
+		clk_disable_unprepare(eth->clks[clk]);
+}
+
+static int mtk_clk_enable(struct mtk_eth *eth)
+{
+	int clk, ret;
+
+	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
+		ret = clk_prepare_enable(eth->clks[clk]);
+		if (ret)
+			goto err_disable_clks;
+	}
+
+	return 0;
+
+err_disable_clks:
+	while (--clk >= 0)
+		clk_disable_unprepare(eth->clks[clk]);
+
+	return ret;
+}
+
+static int mtk_hw_init(struct mtk_eth *eth)
+{
+	int i, val, ret;
+
+	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+		return 0;
+
+	pm_runtime_enable(eth->dev);
+	pm_runtime_get_sync(eth->dev);
+
+	ret = mtk_clk_enable(eth);
+	if (ret)
+		goto err_disable_pm;
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+		ret = device_reset(eth->dev);
+		if (ret) {
+			dev_err(eth->dev, "MAC reset failed!\n");
+			goto err_disable_pm;
+		}
+
+		/* enable interrupt delay for RX */
+		mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+
+		/* disable delay and normal interrupt */
+		mtk_tx_irq_disable(eth, ~0);
+		mtk_rx_irq_disable(eth, ~0);
+
+		return 0;
+	}
+
+	/* Non-MT7628 handling... */
+	ethsys_reset(eth, RSTCTRL_FE);
+	ethsys_reset(eth, RSTCTRL_PPE);
+
+	/* Set FE to PDMAv2 if necessary */
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+		mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
+
+	if (eth->pctl) {
+		/* Set GE2 driving and slew rate */
+		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+
+		/* set GE2 TDSEL */
+		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+
+		/* set GE2 TUNE */
+		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+	}
+
+	/* Set linkdown as the default for each GMAC. Its own MCR would be set
+	 * up with the more appropriate value when mtk_mac_config call is being
+	 * invoked.
+	 */
+	for (i = 0; i < MTK_MAC_COUNT; i++)
+		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
+
+	/* Enable RX VLan Offloading */
+	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+	/* enable interrupt delay for RX/TX */
+	mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
+	mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
+
+	mtk_tx_irq_disable(eth, ~0);
+	mtk_rx_irq_disable(eth, ~0);
+
+	/* FE int grouping */
+	mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
+	mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
+	mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
+	mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
+		/* PSE config input/output queue threshold */
+		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
+		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
+		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
+		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
+		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
+		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
+		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
+		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
+
+		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
+		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
+		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
+		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
+		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
+		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
+		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
+		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
+	}
+
+	return 0;
+
+err_disable_pm:
+	pm_runtime_put_sync(eth->dev);
+	pm_runtime_disable(eth->dev);
+
+	return ret;
+}
+
+static int mtk_hw_deinit(struct mtk_eth *eth)
+{
+	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
+		return 0;
+
+	mtk_clk_disable(eth);
+
+	pm_runtime_put_sync(eth->dev);
+	pm_runtime_disable(eth->dev);
+
+	return 0;
+}
+
+static int __init mtk_init(struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+	const char *mac_addr;
+
+	mac_addr = of_get_mac_address(mac->of_node);
+	if (!IS_ERR(mac_addr))
+		ether_addr_copy(dev->dev_addr, mac_addr);
+
+	/* If the mac address is invalid, use random mac address  */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		eth_hw_addr_random(dev);
+		dev_err(eth->dev, "generated random MAC address %pM\n",
+			dev->dev_addr);
+	}
+
+	return 0;
+}
+
+static void mtk_uninit(struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_eth *eth = mac->hw;
+
+	phylink_disconnect_phy(mac->phylink);
+	mtk_tx_irq_disable(eth, ~0);
+	mtk_rx_irq_disable(eth, ~0);
+}
+
+static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
+	default:
+		/* default invoke the mtk_eth_dbg handler */
+		return mtk_do_priv_ioctl(dev, ifr, cmd);
+		break;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+static void mtk_pending_work(struct work_struct *work)
+{
+	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
+	int err, i;
+	unsigned long restart = 0;
+
+	rtnl_lock();
+
+	dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
+
+	while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
+		cpu_relax();
+
+	dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
+	/* stop all devices to make sure that dma is properly shut down */
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		mtk_stop(eth->netdev[i]);
+		__set_bit(i, &restart);
+	}
+	dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
+
+	/* restart underlying hardware such as power, clock, pin mux
+	 * and the connected phy
+	 */
+	mtk_hw_deinit(eth);
+
+	if (eth->dev->pins)
+		pinctrl_select_state(eth->dev->pins->p,
+				     eth->dev->pins->default_state);
+	mtk_hw_init(eth);
+
+	/* restart DMA and enable IRQs */
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!test_bit(i, &restart))
+			continue;
+		err = mtk_open(eth->netdev[i]);
+		if (err) {
+			netif_alert(eth, ifup, eth->netdev[i],
+			      "Driver up/down cycle failed, closing device.\n");
+			dev_close(eth->netdev[i]);
+		}
+	}
+
+	dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+
+	clear_bit_unlock(MTK_RESETTING, &eth->state);
+
+	rtnl_unlock();
+}
+
+static int mtk_free_dev(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		free_netdev(eth->netdev[i]);
+	}
+
+	return 0;
+}
+
+static int mtk_unreg_dev(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		unregister_netdev(eth->netdev[i]);
+	}
+
+	return 0;
+}
+
+static int mtk_cleanup(struct mtk_eth *eth)
+{
+	mtk_unreg_dev(eth);
+	mtk_free_dev(eth);
+	cancel_work_sync(&eth->pending_work);
+
+	return 0;
+}
+
+static int mtk_get_link_ksettings(struct net_device *ndev,
+				  struct ethtool_link_ksettings *cmd)
+{
+	struct mtk_mac *mac = netdev_priv(ndev);
+
+	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+		return -EBUSY;
+
+	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
+}
+
+static int mtk_set_link_ksettings(struct net_device *ndev,
+				  const struct ethtool_link_ksettings *cmd)
+{
+	struct mtk_mac *mac = netdev_priv(ndev);
+
+	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+		return -EBUSY;
+
+	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
+}
+
+static void mtk_get_drvinfo(struct net_device *dev,
+			    struct ethtool_drvinfo *info)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+
+	strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+	strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
+}
+
+static u32 mtk_get_msglevel(struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+
+	return mac->hw->msg_enable;
+}
+
+static void mtk_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+
+	mac->hw->msg_enable = value;
+}
+
+static int mtk_nway_reset(struct net_device *dev)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+
+	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+		return -EBUSY;
+
+	if (!mac->phylink)
+		return -ENOTSUPP;
+
+	return phylink_ethtool_nway_reset(mac->phylink);
+}
+
+static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
+			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int mtk_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(mtk_ethtool_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void mtk_get_ethtool_stats(struct net_device *dev,
+				  struct ethtool_stats *stats, u64 *data)
+{
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct mtk_hw_stats *hwstats = mac->hw_stats;
+	u64 *data_src, *data_dst;
+	unsigned int start;
+	int i;
+
+	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+		return;
+
+	if (netif_running(dev) && netif_device_present(dev)) {
+		if (spin_trylock_bh(&hwstats->stats_lock)) {
+			mtk_stats_update_mac(mac);
+			spin_unlock_bh(&hwstats->stats_lock);
+		}
+	}
+
+	data_src = (u64 *)hwstats;
+
+	do {
+		data_dst = data;
+		start = u64_stats_fetch_begin_irq(&hwstats->syncp);
+
+		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
+			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+	} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
+}
+
+static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+			 u32 *rule_locs)
+{
+	int ret = -EOPNOTSUPP;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		if (dev->hw_features & NETIF_F_LRO) {
+			cmd->data = MTK_MAX_RX_RING_NUM;
+			ret = 0;
+		}
+		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		if (dev->hw_features & NETIF_F_LRO) {
+			struct mtk_mac *mac = netdev_priv(dev);
+
+			cmd->rule_cnt = mac->hwlro_ip_cnt;
+			ret = 0;
+		}
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		if (dev->hw_features & NETIF_F_LRO)
+			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		if (dev->hw_features & NETIF_F_LRO)
+			ret = mtk_hwlro_get_fdir_all(dev, cmd,
+						     rule_locs);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+	int ret = -EOPNOTSUPP;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		if (dev->hw_features & NETIF_F_LRO)
+			ret = mtk_hwlro_add_ipaddr(dev, cmd);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		if (dev->hw_features & NETIF_F_LRO)
+			ret = mtk_hwlro_del_ipaddr(dev, cmd);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static const struct ethtool_ops mtk_ethtool_ops = {
+	.get_link_ksettings	= mtk_get_link_ksettings,
+	.set_link_ksettings	= mtk_set_link_ksettings,
+	.get_drvinfo		= mtk_get_drvinfo,
+	.get_msglevel		= mtk_get_msglevel,
+	.set_msglevel		= mtk_set_msglevel,
+	.nway_reset		= mtk_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_strings		= mtk_get_strings,
+	.get_sset_count		= mtk_get_sset_count,
+	.get_ethtool_stats	= mtk_get_ethtool_stats,
+	.get_rxnfc		= mtk_get_rxnfc,
+	.set_rxnfc              = mtk_set_rxnfc,
+};
+
+static const struct net_device_ops mtk_netdev_ops = {
+	.ndo_init		= mtk_init,
+	.ndo_uninit		= mtk_uninit,
+	.ndo_open		= mtk_open,
+	.ndo_stop		= mtk_stop,
+	.ndo_start_xmit		= mtk_start_xmit,
+	.ndo_set_mac_address	= mtk_set_mac_address,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= mtk_do_ioctl,
+	.ndo_tx_timeout		= mtk_tx_timeout,
+	.ndo_get_stats64        = mtk_get_stats64,
+	.ndo_fix_features	= mtk_fix_features,
+	.ndo_set_features	= mtk_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= mtk_poll_controller,
+#endif
+};
+
+static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+{
+	const __be32 *_id = of_get_property(np, "reg", NULL);
+	struct phylink *phylink;
+	int phy_mode, id, err;
+	struct mtk_mac *mac;
+
+	if (!_id) {
+		dev_err(eth->dev, "missing mac id\n");
+		return -EINVAL;
+	}
+
+	id = be32_to_cpup(_id);
+	if (id >= MTK_MAC_COUNT) {
+		dev_err(eth->dev, "%d is not a valid mac id\n", id);
+		return -EINVAL;
+	}
+
+	if (eth->netdev[id]) {
+		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
+		return -EINVAL;
+	}
+
+	eth->netdev[id] = alloc_etherdev(sizeof(*mac));
+	if (!eth->netdev[id]) {
+		dev_err(eth->dev, "alloc_etherdev failed\n");
+		return -ENOMEM;
+	}
+	mac = netdev_priv(eth->netdev[id]);
+	eth->mac[id] = mac;
+	mac->id = id;
+	mac->hw = eth;
+	mac->of_node = np;
+
+	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
+	mac->hwlro_ip_cnt = 0;
+
+	mac->hw_stats = devm_kzalloc(eth->dev,
+				     sizeof(*mac->hw_stats),
+				     GFP_KERNEL);
+	if (!mac->hw_stats) {
+		dev_err(eth->dev, "failed to allocate counter memory\n");
+		err = -ENOMEM;
+		goto free_netdev;
+	}
+	spin_lock_init(&mac->hw_stats->stats_lock);
+	u64_stats_init(&mac->hw_stats->syncp);
+	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
+
+	/* phylink create */
+	phy_mode = of_get_phy_mode(np);
+	if (phy_mode < 0) {
+		dev_err(eth->dev, "incorrect phy-mode\n");
+		err = -EINVAL;
+		goto free_netdev;
+	}
+
+	/* mac config is not set */
+	mac->interface = PHY_INTERFACE_MODE_NA;
+	mac->mode = MLO_AN_PHY;
+	mac->speed = SPEED_UNKNOWN;
+
+	mac->phylink_config.dev = &eth->netdev[id]->dev;
+	mac->phylink_config.type = PHYLINK_NETDEV;
+
+	phylink = phylink_create(&mac->phylink_config,
+				 of_fwnode_handle(mac->of_node),
+				 phy_mode, &mtk_phylink_ops);
+	if (IS_ERR(phylink)) {
+		err = PTR_ERR(phylink);
+		goto free_netdev;
+	}
+
+	mac->phylink = phylink;
+
+	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+	eth->netdev[id]->watchdog_timeo = 5 * HZ;
+	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
+	eth->netdev[id]->base_addr = (unsigned long)eth->base;
+
+	eth->netdev[id]->hw_features = eth->soc->hw_features;
+	if (eth->hwlro)
+		eth->netdev[id]->hw_features |= NETIF_F_LRO;
+
+	eth->netdev[id]->vlan_features = eth->soc->hw_features &
+		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
+	eth->netdev[id]->features |= eth->soc->hw_features;
+	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
+
+	eth->netdev[id]->irq = eth->irq[0];
+	eth->netdev[id]->dev.of_node = np;
+
+	return 0;
+
+free_netdev:
+	free_netdev(eth->netdev[id]);
+	return err;
+}
+
+static int mtk_probe(struct platform_device *pdev)
+{
+	struct device_node *mac_np;
+	struct mtk_eth *eth;
+	int err, i;
+
+	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
+	if (!eth)
+		return -ENOMEM;
+
+	eth->soc = of_device_get_match_data(&pdev->dev);
+
+	eth->dev = &pdev->dev;
+	eth->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(eth->base))
+		return PTR_ERR(eth->base);
+
+	if(eth->soc->has_sram) {
+		struct resource *res;
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
+	}
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+		eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
+		eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
+	} else {
+		eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
+		eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
+	}
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+		eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+		eth->ip_align = NET_IP_ALIGN;
+	} else {
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+			eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
+		else
+			eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
+	}
+
+	spin_lock_init(&eth->page_lock);
+	spin_lock_init(&eth->tx_irq_lock);
+	spin_lock_init(&eth->rx_irq_lock);
+
+	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+							      "mediatek,ethsys");
+		if (IS_ERR(eth->ethsys)) {
+			dev_err(&pdev->dev, "no ethsys regmap found\n");
+			return PTR_ERR(eth->ethsys);
+		}
+	}
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
+		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+							     "mediatek,infracfg");
+		if (IS_ERR(eth->infra)) {
+			dev_err(&pdev->dev, "no infracfg regmap found\n");
+			return PTR_ERR(eth->infra);
+		}
+	}
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
+					  GFP_KERNEL);
+		if (!eth->sgmii)
+			return -ENOMEM;
+
+		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
+				     eth->soc->ana_rgc3);
+
+		if (err)
+			return err;
+	}
+
+	if (eth->soc->required_pctl) {
+		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+							    "mediatek,pctl");
+		if (IS_ERR(eth->pctl)) {
+			dev_err(&pdev->dev, "no pctl regmap found\n");
+			return PTR_ERR(eth->pctl);
+		}
+	}
+
+	for (i = 0; i < 3; i++) {
+		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
+			eth->irq[i] = eth->irq[0];
+		else
+			eth->irq[i] = platform_get_irq(pdev, i);
+		if (eth->irq[i] < 0) {
+			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+			return -ENXIO;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+		eth->clks[i] = devm_clk_get(eth->dev,
+					    mtk_clks_source_name[i]);
+		if (IS_ERR(eth->clks[i])) {
+			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+				return -EPROBE_DEFER;
+			if (eth->soc->required_clks & BIT(i)) {
+				dev_err(&pdev->dev, "clock %s not found\n",
+					mtk_clks_source_name[i]);
+				return -EINVAL;
+			}
+			eth->clks[i] = NULL;
+		}
+	}
+
+	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+	INIT_WORK(&eth->pending_work, mtk_pending_work);
+
+	err = mtk_hw_init(eth);
+	if (err)
+		return err;
+
+	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
+
+	for_each_child_of_node(pdev->dev.of_node, mac_np) {
+		if (!of_device_is_compatible(mac_np,
+					     "mediatek,eth-mac"))
+			continue;
+
+		if (!of_device_is_available(mac_np))
+			continue;
+
+		err = mtk_add_mac(eth, mac_np);
+		if (err) {
+			of_node_put(mac_np);
+			goto err_deinit_hw;
+		}
+	}
+
+	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
+		err = devm_request_irq(eth->dev, eth->irq[0],
+				       mtk_handle_irq, 0,
+				       dev_name(eth->dev), eth);
+	} else {
+		err = devm_request_irq(eth->dev, eth->irq[1],
+				       mtk_handle_irq_tx, 0,
+				       dev_name(eth->dev), eth);
+		if (err)
+			goto err_free_dev;
+
+		err = devm_request_irq(eth->dev, eth->irq[2],
+				       mtk_handle_irq_rx, 0,
+				       dev_name(eth->dev), eth);
+	}
+	if (err)
+		goto err_free_dev;
+
+	/* No MT7628/88 support yet */
+	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+		err = mtk_mdio_init(eth);
+		if (err)
+			goto err_free_dev;
+	}
+
+	for (i = 0; i < MTK_MAX_DEVS; i++) {
+		if (!eth->netdev[i])
+			continue;
+
+		err = register_netdev(eth->netdev[i]);
+		if (err) {
+			dev_err(eth->dev, "error bringing up device\n");
+			goto err_deinit_mdio;
+		} else
+			netif_info(eth, probe, eth->netdev[i],
+				   "mediatek frame engine at 0x%08lx, irq %d\n",
+				   eth->netdev[i]->base_addr, eth->irq[0]);
+	}
+
+	/* we run 2 devices on the same DMA ring so we need a dummy device
+	 * for NAPI to work
+	 */
+	init_dummy_netdev(&eth->dummy_dev);
+	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
+		       MTK_NAPI_WEIGHT);
+	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
+		       MTK_NAPI_WEIGHT);
+
+	mtketh_debugfs_init(eth);
+	debug_proc_init(eth);
+
+	platform_set_drvdata(pdev, eth);
+
+	return 0;
+
+err_deinit_mdio:
+	mtk_mdio_cleanup(eth);
+err_free_dev:
+	mtk_free_dev(eth);
+err_deinit_hw:
+	mtk_hw_deinit(eth);
+
+	return err;
+}
+
+static int mtk_remove(struct platform_device *pdev)
+{
+	struct mtk_eth *eth = platform_get_drvdata(pdev);
+	struct mtk_mac *mac;
+	int i;
+
+	/* stop all devices to make sure that dma is properly shut down */
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		mtk_stop(eth->netdev[i]);
+		mac = netdev_priv(eth->netdev[i]);
+		phylink_disconnect_phy(mac->phylink);
+	}
+
+	mtk_hw_deinit(eth);
+
+	netif_napi_del(&eth->tx_napi);
+	netif_napi_del(&eth->rx_napi);
+	mtk_cleanup(eth);
+	mtk_mdio_cleanup(eth);
+
+	return 0;
+}
+
+static const struct mtk_soc_data mt2701_data = {
+	.caps = MT7623_CAPS | MTK_HWLRO,
+	.hw_features = MTK_HW_FEATURES,
+	.required_clks = MT7623_CLKS_BITMAP,
+	.required_pctl = true,
+	.has_sram = false,
+};
+
+static const struct mtk_soc_data mt7621_data = {
+	.caps = MT7621_CAPS,
+	.hw_features = MTK_HW_FEATURES,
+	.required_clks = MT7621_CLKS_BITMAP,
+	.required_pctl = false,
+	.has_sram = false,
+};
+
+static const struct mtk_soc_data mt7622_data = {
+	.ana_rgc3 = 0x2028,
+	.caps = MT7622_CAPS | MTK_HWLRO,
+	.hw_features = MTK_HW_FEATURES,
+	.required_clks = MT7622_CLKS_BITMAP,
+	.required_pctl = false,
+	.has_sram = false,
+};
+
+static const struct mtk_soc_data mt7623_data = {
+	.caps = MT7623_CAPS | MTK_HWLRO,
+	.hw_features = MTK_HW_FEATURES,
+	.required_clks = MT7623_CLKS_BITMAP,
+	.required_pctl = true,
+	.has_sram = false,
+};
+
+static const struct mtk_soc_data mt7629_data = {
+	.ana_rgc3 = 0x128,
+	.caps = MT7629_CAPS | MTK_HWLRO,
+	.hw_features = MTK_HW_FEATURES,
+	.required_clks = MT7629_CLKS_BITMAP,
+	.required_pctl = false,
+	.has_sram = false,
+};
+
+static const struct mtk_soc_data mt7986_data = {
+	.ana_rgc3 = 0x128,
+	.caps = MT7986_CAPS,
+	.required_clks = MT7986_CLKS_BITMAP,
+	.required_pctl = false,
+	.has_sram = true,
+};
+
+static const struct mtk_soc_data rt5350_data = {
+	.caps = MT7628_CAPS,
+	.hw_features = MTK_HW_FEATURES_MT7628,
+	.required_clks = MT7628_CLKS_BITMAP,
+	.required_pctl = false,
+	.has_sram = false,
+};
+
+const struct of_device_id of_mtk_match[] = {
+	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
+	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
+	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
+	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
+	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
+	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_mtk_match);
+
+static struct platform_driver mtk_driver = {
+	.probe = mtk_probe,
+	.remove = mtk_remove,
+	.driver = {
+		.name = "mtk_soc_eth",
+		.of_match_table = of_mtk_match,
+	},
+};
+
+module_platform_driver(mtk_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
new file mode 100755
index 0000000..f240e63
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -0,0 +1,1091 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#ifndef MTK_ETH_H
+#define MTK_ETH_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/refcount.h>
+#include <linux/phylink.h>
+
+#define MTK_QDMA_PAGE_SIZE	2048
+#define	MTK_MAX_RX_LENGTH	1536
+#define MTK_DMA_SIZE		2048
+#define MTK_NAPI_WEIGHT		256
+#define MTK_MAC_COUNT		2
+#define MTK_RX_ETH_HLEN		(VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MTK_RX_HLEN		(NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
+#define MTK_DMA_DUMMY_DESC	0xffffffff
+#define MTK_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV | \
+				 NETIF_MSG_PROBE | \
+				 NETIF_MSG_LINK | \
+				 NETIF_MSG_TIMER | \
+				 NETIF_MSG_IFDOWN | \
+				 NETIF_MSG_IFUP | \
+				 NETIF_MSG_RX_ERR | \
+				 NETIF_MSG_TX_ERR)
+#define MTK_HW_FEATURES		(NETIF_F_IP_CSUM | \
+				 NETIF_F_RXCSUM | \
+				 NETIF_F_HW_VLAN_CTAG_TX | \
+				 NETIF_F_HW_VLAN_CTAG_RX | \
+				 NETIF_F_SG | NETIF_F_TSO | \
+				 NETIF_F_TSO6 | \
+				 NETIF_F_IPV6_CSUM)
+#define MTK_SET_FEATURES	(NETIF_F_LRO | \
+				 NETIF_F_HW_VLAN_CTAG_RX)
+#define MTK_HW_FEATURES_MT7628	(NETIF_F_SG | NETIF_F_RXCSUM)
+#define NEXT_DESP_IDX(X, Y)	(((X) + 1) & ((Y) - 1))
+
+#define MTK_MAX_RX_RING_NUM	4
+#define MTK_HW_LRO_DMA_SIZE	8
+
+#define	MTK_MAX_LRO_RX_LENGTH		(4096 * 3)
+#define	MTK_MAX_LRO_IP_CNT		2
+#define	MTK_HW_LRO_TIMER_UNIT		1	/* 20 us */
+#define	MTK_HW_LRO_REFRESH_TIME		50000	/* 1 sec. */
+#define	MTK_HW_LRO_AGG_TIME		10	/* 200us */
+#define	MTK_HW_LRO_AGE_TIME		50	/* 1ms */
+#define	MTK_HW_LRO_MAX_AGG_CNT		64
+#define	MTK_HW_LRO_BW_THRE		3000
+#define	MTK_HW_LRO_REPLACE_DELTA	1000
+#define	MTK_HW_LRO_SDL_REMAIN_ROOM	1522
+
+/* Frame Engine Global Reset Register */
+#define MTK_RST_GL		0x04
+#define RST_GL_PSE		BIT(0)
+
+/* Frame Engine Interrupt Status Register */
+#define MTK_INT_STATUS2		0x08
+#define MTK_GDM1_AF		BIT(28)
+#define MTK_GDM2_AF		BIT(29)
+
+/* PDMA HW LRO Alter Flow Timer Register */
+#define MTK_PDMA_LRO_ALT_REFRESH_TIMER	0x1c
+
+/* Frame Engine Interrupt Grouping Register */
+#define MTK_FE_INT_GRP		0x20
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMQ_IG_CTRL	0x1400
+#define MTK_CDMQ_STAG_EN	BIT(0)
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMP_IG_CTRL	0x400
+#define MTK_CDMP_STAG_EN	BIT(0)
+
+/* CDMP Exgress Control Register */
+#define MTK_CDMP_EG_CTRL	0x404
+
+/* GDM Exgress Control Register */
+#define MTK_GDMA_FWD_CFG(x)	(0x500 + (x * 0x1000))
+#define MTK_GDMA_SPECIAL_TAG	BIT(24)
+#define MTK_GDMA_ICS_EN		BIT(22)
+#define MTK_GDMA_TCS_EN		BIT(21)
+#define MTK_GDMA_UCS_EN		BIT(20)
+#define MTK_GDMA_TO_PDMA	0x0
+#define MTK_GDMA_DROP_ALL	0x7777
+
+/* Unicast Filter MAC Address Register - Low */
+#define MTK_GDMA_MAC_ADRL(x)	(0x508 + (x * 0x1000))
+
+/* Unicast Filter MAC Address Register - High */
+#define MTK_GDMA_MAC_ADRH(x)	(0x50C + (x * 0x1000))
+
+/* Internal SRAM offset */
+#define MTK_ETH_SRAM_OFFSET	0x40000
+
+/* FE global misc reg*/
+#define MTK_FE_GLO_MISC         0x124
+
+/* PSE Input Queue Reservation Register*/
+#define PSE_IQ_REV(x)		(0x140 + ((x - 1) * 0x4))
+
+/* PSE Output Queue Threshold Register*/
+#define PSE_OQ_TH(x)		(0x160 + ((x - 1) * 0x4))
+
+#define MTK_PDMA_V2		BIT(4)
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define CONFIG_MEDIATEK_NETSYS_RX_V2 1
+
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define PDMA_BASE               0x6000
+#else
+#define PDMA_BASE		0x4000
+#endif
+
+#define QDMA_BASE               0x4400
+#else
+#define PDMA_BASE               0x0800
+#define QDMA_BASE               0x1800
+#endif
+/* PDMA RX Base Pointer Register */
+#define MTK_PRX_BASE_PTR0	(PDMA_BASE + 0x100)
+#define MTK_PRX_BASE_PTR_CFG(x)	(MTK_PRX_BASE_PTR0 + (x * 0x10))
+
+/* PDMA RX Maximum Count Register */
+#define MTK_PRX_MAX_CNT0	(MTK_PRX_BASE_PTR0 + 0x04)
+#define MTK_PRX_MAX_CNT_CFG(x)	(MTK_PRX_MAX_CNT0 + (x * 0x10))
+
+/* PDMA RX CPU Pointer Register */
+#define MTK_PRX_CRX_IDX0	(MTK_PRX_BASE_PTR0 + 0x08)
+#define MTK_PRX_CRX_IDX_CFG(x)	(MTK_PRX_CRX_IDX0 + (x * 0x10))
+
+/* PDMA HW LRO Control Registers */
+#define MTK_PDMA_LRO_CTRL_DW0	(PDMA_BASE + 0x180)
+#define MTK_LRO_EN			BIT(0)
+#define MTK_L3_CKS_UPD_EN		BIT(7)
+#define MTK_LRO_ALT_PKT_CNT_MODE	BIT(21)
+#define MTK_LRO_RING_RELINQUISH_REQ	(0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_DONE	(0x7 << 29)
+
+#define MTK_PDMA_LRO_CTRL_DW1	(MTK_PDMA_LRO_CTRL_DW0 + 0x04)
+#define MTK_PDMA_LRO_CTRL_DW2	(MTK_PDMA_LRO_CTRL_DW0 + 0x08)
+#define MTK_PDMA_LRO_CTRL_DW3	(MTK_PDMA_LRO_CTRL_DW0 + 0x0c)
+#define MTK_ADMA_MODE		BIT(15)
+#define MTK_LRO_MIN_RXD_SDL	(MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
+
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_GLO_CFG	(PDMA_BASE + 0x204)
+#define MTK_MULTI_EN		BIT(10)
+#define MTK_PDMA_SIZE_8DWORDS	(1 << 4)
+
+/* PDMA Reset Index Register */
+#define MTK_PDMA_RST_IDX	(PDMA_BASE + 0x208)
+#define MTK_PST_DRX_IDX0	BIT(16)
+#define MTK_PST_DRX_IDX_CFG(x)	(MTK_PST_DRX_IDX0 << (x))
+
+/* PDMA Delay Interrupt Register */
+#define MTK_PDMA_DELAY_INT		(PDMA_BASE + 0x20c)
+#define MTK_PDMA_DELAY_RX_EN		BIT(15)
+#define MTK_PDMA_DELAY_RX_PINT		4
+#define MTK_PDMA_DELAY_RX_PINT_SHIFT	8
+#define MTK_PDMA_DELAY_RX_PTIME		4
+#define MTK_PDMA_DELAY_RX_DELAY		\
+	(MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
+	(MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
+
+/* PDMA Interrupt Status Register */
+#define MTK_PDMA_INT_STATUS	(PDMA_BASE + 0x220)
+
+/* PDMA Interrupt Mask Register */
+#define MTK_PDMA_INT_MASK	(PDMA_BASE + 0x228)
+
+/* PDMA HW LRO Alter Flow Delta Register */
+#define MTK_PDMA_LRO_ALT_SCORE_DELTA	(PDMA_BASE + 0x24c)
+
+/* PDMA Interrupt grouping registers */
+#define MTK_PDMA_INT_GRP1	(PDMA_BASE + 0x250)
+#define MTK_PDMA_INT_GRP2	(PDMA_BASE + 0x254)
+
+/* PDMA HW LRO IP Setting Registers */
+#define MTK_LRO_RX_RING0_DIP_DW0	(PDMA_BASE + 0x304)
+#define MTK_LRO_DIP_DW0_CFG(x)		(MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
+#define MTK_RING_MYIP_VLD		BIT(9)
+
+/* PDMA HW LRO Ring Control Registers */
+#define MTK_LRO_RX_RING0_CTRL_DW1	(PDMA_BASE + 0x328)
+#define MTK_LRO_RX_RING0_CTRL_DW2	(PDMA_BASE + 0x32c)
+#define MTK_LRO_RX_RING0_CTRL_DW3	(PDMA_BASE + 0x330)
+#define MTK_LRO_CTRL_DW1_CFG(x)		(MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
+#define MTK_LRO_CTRL_DW2_CFG(x)		(MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
+#define MTK_LRO_CTRL_DW3_CFG(x)		(MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
+#define MTK_RING_AGE_TIME_L		((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
+#define MTK_RING_AGE_TIME_H		((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
+#define MTK_RING_AUTO_LERAN_MODE	(3 << 6)
+#define MTK_RING_VLD			BIT(8)
+#define MTK_RING_MAX_AGG_TIME		((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
+#define MTK_RING_MAX_AGG_CNT_L		((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
+#define MTK_RING_MAX_AGG_CNT_H		((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
+
+/* QDMA TX Queue Configuration Registers */
+#define MTK_QTX_CFG(x)		(QDMA_BASE + (x * 0x10))
+#define QDMA_RES_THRES		4
+
+/* QDMA TX Queue Scheduler Registers */
+#define MTK_QTX_SCH(x)		(QDMA_BASE + 4 + (x * 0x10))
+
+/* QDMA RX Base Pointer Register */
+#define MTK_QRX_BASE_PTR0	(QDMA_BASE + 0x100)
+#define MTK_QRX_BASE_PTR_CFG(x)	(MTK_QRX_BASE_PTR0 + ((x) * 0x10))
+
+/* QDMA RX Maximum Count Register */
+#define MTK_QRX_MAX_CNT0	(QDMA_BASE + 0x104)
+#define MTK_QRX_MAX_CNT_CFG(x)	(MTK_QRX_MAX_CNT0 + ((x) * 0x10))
+
+/* QDMA RX CPU Pointer Register */
+#define MTK_QRX_CRX_IDX0	(QDMA_BASE + 0x108)
+#define MTK_QRX_CRX_IDX_CFG(x)	(MTK_QRX_CRX_IDX0 + ((x) * 0x10))
+
+/* QDMA RX DMA Pointer Register */
+#define MTK_QRX_DRX_IDX0	(QDMA_BASE + 0x10c)
+
+/* QDMA Global Configuration Register */
+#define MTK_QDMA_GLO_CFG	(QDMA_BASE + 0x204)
+#define MTK_RX_2B_OFFSET	BIT(31)
+#define MTK_RX_BT_32DWORDS	(3 << 11)
+#define MTK_NDP_CO_PRO		BIT(10)
+#define MTK_TX_WB_DDONE		BIT(6)
+#define MTK_DMA_SIZE_16DWORDS	(2 << 4)
+#define MTK_DMA_SIZE_32DWORDS	(3 << 4)
+#define MTK_RX_DMA_BUSY		BIT(3)
+#define MTK_TX_DMA_BUSY		BIT(1)
+#define MTK_RX_DMA_EN		BIT(2)
+#define MTK_TX_DMA_EN		BIT(0)
+#define MTK_DMA_BUSY_TIMEOUT	HZ
+
+/* QDMA V2 Global Configuration Register */
+#define MTK_CHK_DDONE_EN	BIT(28)
+#define MTK_DMAD_WR_WDONE	BIT(26)
+#define MTK_WCOMP_EN		BIT(24)
+#define MTK_RESV_BUF		(0x40 << 16)
+#define MTK_MUTLI_CNT		(0x4 << 12)
+
+/* QDMA Reset Index Register */
+#define MTK_QDMA_RST_IDX	(QDMA_BASE + 0x208)
+
+/* QDMA Delay Interrupt Register */
+#define MTK_QDMA_DELAY_INT	(QDMA_BASE + 0x20c)
+
+/* QDMA Flow Control Register */
+#define MTK_QDMA_FC_THRES	(QDMA_BASE + 0x210)
+#define FC_THRES_DROP_MODE	BIT(20)
+#define FC_THRES_DROP_EN	(7 << 16)
+#define FC_THRES_MIN		0x4444
+
+/* QDMA Interrupt Status Register */
+#define MTK_QDMA_INT_STATUS	(QDMA_BASE + 0x218)
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define MTK_RX_DONE_DLY 	BIT(14)
+#else
+#define MTK_RX_DONE_DLY 	BIT(30)
+#endif
+#define MTK_RX_DONE_INT3	BIT(19)
+#define MTK_RX_DONE_INT2	BIT(18)
+#define MTK_RX_DONE_INT1	BIT(17)
+#define MTK_RX_DONE_INT0	BIT(16)
+#define MTK_TX_DONE_INT3	BIT(3)
+#define MTK_TX_DONE_INT2	BIT(2)
+#define MTK_TX_DONE_INT1	BIT(1)
+#define MTK_TX_DONE_INT0	BIT(0)
+#define MTK_RX_DONE_INT		MTK_RX_DONE_DLY
+#define MTK_TX_DONE_DLY         BIT(28)
+#define MTK_TX_DONE_INT         MTK_TX_DONE_DLY
+
+/* QDMA Interrupt grouping registers */
+#define MTK_QDMA_INT_GRP1	(QDMA_BASE + 0x220)
+#define MTK_QDMA_INT_GRP2	(QDMA_BASE + 0x224)
+#define MTK_RLS_DONE_INT	BIT(0)
+
+/* QDMA Interrupt Status Register */
+#define MTK_QDMA_INT_MASK	(QDMA_BASE + 0x21c)
+
+/* QDMA Interrupt Mask Register */
+#define MTK_QDMA_HRED2		(QDMA_BASE + 0x244)
+
+/* QDMA TX Forward CPU Pointer Register */
+#define MTK_QTX_CTX_PTR		(QDMA_BASE +0x300)
+
+/* QDMA TX Forward DMA Pointer Register */
+#define MTK_QTX_DTX_PTR		(QDMA_BASE +0x304)
+
+/* QDMA TX Release CPU Pointer Register */
+#define MTK_QTX_CRX_PTR		(QDMA_BASE +0x310)
+
+/* QDMA TX Release DMA Pointer Register */
+#define MTK_QTX_DRX_PTR		(QDMA_BASE +0x314)
+
+/* QDMA FQ Head Pointer Register */
+#define MTK_QDMA_FQ_HEAD	(QDMA_BASE +0x320)
+
+/* QDMA FQ Head Pointer Register */
+#define MTK_QDMA_FQ_TAIL	(QDMA_BASE +0x324)
+
+/* QDMA FQ Free Page Counter Register */
+#define MTK_QDMA_FQ_CNT		(QDMA_BASE +0x328)
+
+/* QDMA FQ Free Page Buffer Length Register */
+#define MTK_QDMA_FQ_BLEN	(QDMA_BASE +0x32c)
+
+/* GMA1 Received Good Byte Count Register */
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define MTK_GDM1_TX_GBCNT       0x1C00
+#else
+#define MTK_GDM1_TX_GBCNT	0x2400
+#endif
+#define MTK_STAT_OFFSET		0x40
+
+/* QDMA TX NUM */
+#define MTK_QDMA_TX_NUM		16
+#define MTK_QDMA_TX_MASK	((MTK_QDMA_TX_NUM) - 1)
+#define QID_LOW_BITS(x)         ((x) & 0xf)
+#define QID_HIGH_BITS(x)        ((((x) >> 4) & 0x3) << 20)
+#define QID_BITS_V2(x)		(((x) & 0x3f) << 16)
+
+/* QDMA V2 descriptor txd6 */
+#define TX_DMA_INS_VLAN_V2         BIT(16)
+
+/* QDMA V2 descriptor txd5 */
+#define TX_DMA_CHKSUM_V2           (0x7 << 28)
+#define TX_DMA_TSO_V2              BIT(31)
+
+/* QDMA V2 descriptor txd4 */
+#define TX_DMA_FPORT_SHIFT_V2      8
+#define TX_DMA_FPORT_MASK_V2       0xf
+#define TX_DMA_SWC_V2              BIT(30)
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define MTK_TX_DMA_BUF_LEN      0xffff
+#define MTK_TX_DMA_BUF_SHIFT    8
+#else
+#define MTK_TX_DMA_BUF_LEN      0x3fff
+#define MTK_TX_DMA_BUF_SHIFT    16
+#endif
+
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define MTK_RX_DMA_BUF_LEN      0xffff
+#define MTK_RX_DMA_BUF_SHIFT    8
+#define RX_DMA_SPORT_SHIFT      26
+#define RX_DMA_SPORT_MASK       0xf
+#else
+#define MTK_RX_DMA_BUF_LEN      0x3fff
+#define MTK_RX_DMA_BUF_SHIFT    16
+#define RX_DMA_SPORT_SHIFT      19
+#define RX_DMA_SPORT_MASK       0x7
+#endif
+
+/* QDMA descriptor txd4 */
+#define TX_DMA_CHKSUM		(0x7 << 29)
+#define TX_DMA_TSO		BIT(28)
+#define TX_DMA_FPORT_SHIFT	25
+#define TX_DMA_FPORT_MASK	0x7
+#define TX_DMA_INS_VLAN		BIT(16)
+
+/* QDMA descriptor txd3 */
+#define TX_DMA_OWNER_CPU	BIT(31)
+#define TX_DMA_LS0		BIT(30)
+#define TX_DMA_PLEN0(_x)	(((_x) & MTK_TX_DMA_BUF_LEN) << MTK_TX_DMA_BUF_SHIFT)
+#define TX_DMA_PLEN1(_x)	((_x) & MTK_TX_DMA_BUF_LEN)
+#define TX_DMA_SWC		BIT(14)
+#define TX_DMA_SDL(_x)		(TX_DMA_PLEN0(_x))
+
+/* PDMA on MT7628 */
+#define TX_DMA_DONE		BIT(31)
+#define TX_DMA_LS1		BIT(14)
+#define TX_DMA_DESP2_DEF	(TX_DMA_LS0 | TX_DMA_DONE)
+
+/* QDMA descriptor rxd2 */
+#define RX_DMA_DONE		BIT(31)
+#define RX_DMA_LSO		BIT(30)
+#define RX_DMA_PLEN0(_x)	(((_x) & MTK_RX_DMA_BUF_LEN) << MTK_RX_DMA_BUF_SHIFT)
+#define RX_DMA_GET_PLEN0(_x)	(((_x) >> MTK_RX_DMA_BUF_SHIFT) & MTK_RX_DMA_BUF_LEN)
+#define RX_DMA_VTAG		BIT(15)
+
+/* QDMA descriptor rxd3 */
+#define RX_DMA_VID(_x)		((_x) & VLAN_VID_MASK)
+#define RX_DMA_TCI(_x)		((_x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID(_x)		(((_x) >> 16) & 0xffff)
+
+/* QDMA descriptor rxd4 */
+#define RX_DMA_L4_VALID		BIT(24)
+#define RX_DMA_L4_VALID_PDMA	BIT(30)		/* when PDMA is used */
+#define RX_DMA_SPECIAL_TAG	BIT(22)		/* switch header in packet */
+
+#define RX_DMA_GET_SPORT(_x) 	(((_x) >> RX_DMA_SPORT_SHIFT) & RX_DMA_SPORT_MASK)
+
+/* PDMA V2 descriptor rxd3 */
+#define RX_DMA_VTAG_V2          BIT(0)
+#define RX_DMA_L4_VALID_V2      BIT(2)
+
+/* PDMA V2 descriptor rxd4 */
+#define RX_DMA_VID_V2(_x)       RX_DMA_VID(_x)
+#define RX_DMA_TCI_V2(_x)	(((_x) >> 1) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID_V2(x3, x4)	((((x3) & 1) << 15) | (((x4) >> 17) & 0x7fff))
+
+/* PHY Indirect Access Control registers */
+#define MTK_PHY_IAC		0x10004
+#define PHY_IAC_ACCESS		BIT(31)
+#define PHY_IAC_READ		BIT(19)
+#define PHY_IAC_WRITE		BIT(18)
+#define PHY_IAC_START		BIT(16)
+#define PHY_IAC_ADDR_SHIFT	20
+#define PHY_IAC_REG_SHIFT	25
+#define PHY_IAC_TIMEOUT		HZ
+
+#define MTK_MAC_MISC		0x1000c
+#define MTK_MUX_TO_ESW		BIT(0)
+
+/* Mac control registers */
+#define MTK_MAC_MCR(x)		(0x10100 + (x * 0x100))
+#define MAC_MCR_MAX_RX_1536	BIT(24)
+#define MAC_MCR_IPG_CFG		(BIT(18) | BIT(16))
+#define MAC_MCR_FORCE_MODE	BIT(15)
+#define MAC_MCR_TX_EN		BIT(14)
+#define MAC_MCR_RX_EN		BIT(13)
+#define MAC_MCR_BACKOFF_EN	BIT(9)
+#define MAC_MCR_BACKPR_EN	BIT(8)
+#define MAC_MCR_FORCE_RX_FC	BIT(5)
+#define MAC_MCR_FORCE_TX_FC	BIT(4)
+#define MAC_MCR_SPEED_1000	BIT(3)
+#define MAC_MCR_SPEED_100	BIT(2)
+#define MAC_MCR_FORCE_DPX	BIT(1)
+#define MAC_MCR_FORCE_LINK	BIT(0)
+#define MAC_MCR_FORCE_LINK_DOWN	(MAC_MCR_FORCE_MODE)
+
+/* Mac status registers */
+#define MTK_MAC_MSR(x)		(0x10108 + (x * 0x100))
+#define MAC_MSR_EEE1G		BIT(7)
+#define MAC_MSR_EEE100M		BIT(6)
+#define MAC_MSR_RX_FC		BIT(5)
+#define MAC_MSR_TX_FC		BIT(4)
+#define MAC_MSR_SPEED_1000	BIT(3)
+#define MAC_MSR_SPEED_100	BIT(2)
+#define MAC_MSR_SPEED_MASK	(MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)
+#define MAC_MSR_DPX		BIT(1)
+#define MAC_MSR_LINK		BIT(0)
+
+/* TRGMII RXC control register */
+#define TRGMII_RCK_CTRL		0x10300
+#define DQSI0(x)		((x << 0) & GENMASK(6, 0))
+#define DQSI1(x)		((x << 8) & GENMASK(14, 8))
+#define RXCTL_DMWTLAT(x)	((x << 16) & GENMASK(18, 16))
+#define RXC_RST			BIT(31)
+#define RXC_DQSISEL		BIT(30)
+#define RCK_CTRL_RGMII_1000	(RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
+#define RCK_CTRL_RGMII_10_100	RXCTL_DMWTLAT(2)
+
+#define NUM_TRGMII_CTRL		5
+
+/* TRGMII RXC control register */
+#define TRGMII_TCK_CTRL		0x10340
+#define TXCTL_DMWTLAT(x)	((x << 16) & GENMASK(18, 16))
+#define TXC_INV			BIT(30)
+#define TCK_CTRL_RGMII_1000	TXCTL_DMWTLAT(2)
+#define TCK_CTRL_RGMII_10_100	(TXC_INV | TXCTL_DMWTLAT(2))
+
+/* TRGMII TX Drive Strength */
+#define TRGMII_TD_ODT(i)	(0x10354 + 8 * (i))
+#define  TD_DM_DRVP(x)		((x) & 0xf)
+#define  TD_DM_DRVN(x)		(((x) & 0xf) << 4)
+
+/* TRGMII Interface mode register */
+#define INTF_MODE		0x10390
+#define TRGMII_INTF_DIS		BIT(0)
+#define TRGMII_MODE		BIT(1)
+#define TRGMII_CENTRAL_ALIGNED	BIT(2)
+#define INTF_MODE_RGMII_1000    (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
+#define INTF_MODE_RGMII_10_100  0
+
+/* GPIO port control registers for GMAC 2*/
+#define GPIO_OD33_CTRL8		0x4c0
+#define GPIO_BIAS_CTRL		0xed0
+#define GPIO_DRV_SEL10		0xf00
+
+/* ethernet subsystem chip id register */
+#define ETHSYS_CHIPID0_3	0x0
+#define ETHSYS_CHIPID4_7	0x4
+#define MT7623_ETH		7623
+#define MT7622_ETH		7622
+#define MT7621_ETH		7621
+
+/* ethernet system control register */
+#define ETHSYS_SYSCFG		0x10
+#define SYSCFG_DRAM_TYPE_DDR2	BIT(4)
+
+/* ethernet subsystem config register */
+#define ETHSYS_SYSCFG0		0x14
+#define SYSCFG0_GE_MASK		0x3
+#define SYSCFG0_GE_MODE(x, y)	(x << (12 + (y * 2)))
+#define SYSCFG0_SGMII_MASK     GENMASK(9, 8)
+#define SYSCFG0_SGMII_GMAC1    ((2 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC2    ((3 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
+#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
+
+
+/* ethernet subsystem clock register */
+#define ETHSYS_CLKCFG0		0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5	BIT(11)
+#define ETHSYS_TRGMII_MT7621_MASK	(BIT(5) | BIT(6))
+#define ETHSYS_TRGMII_MT7621_APLL	BIT(6)
+#define ETHSYS_TRGMII_MT7621_DDR_PLL	BIT(5)
+
+/* ethernet reset control register */
+#define ETHSYS_RSTCTRL		0x34
+#define RSTCTRL_FE		BIT(6)
+#define RSTCTRL_PPE		BIT(31)
+
+/* SGMII subsystem config registers */
+/* Register to auto-negotiation restart */
+#define SGMSYS_PCS_CONTROL_1	0x0
+#define SGMII_AN_RESTART	BIT(9)
+#define SGMII_ISOLATE		BIT(10)
+#define SGMII_AN_ENABLE		BIT(12)
+#define SGMII_LINK_STATYS	BIT(18)
+#define SGMII_AN_ABILITY	BIT(19)
+#define SGMII_AN_COMPLETE	BIT(21)
+#define SGMII_PCS_FAULT		BIT(23)
+#define SGMII_AN_EXPANSION_CLR	BIT(30)
+
+/* Register to programmable link timer, the unit in 2 * 8ns */
+#define SGMSYS_PCS_LINK_TIMER	0x18
+#define SGMII_LINK_TIMER_DEFAULT	(0x186a0 & GENMASK(19, 0))
+
+/* Register to control remote fault */
+#define SGMSYS_SGMII_MODE		0x20
+#define SGMII_IF_MODE_BIT0		BIT(0)
+#define SGMII_SPEED_DUPLEX_AN		BIT(1)
+#define SGMII_SPEED_10			0x0
+#define SGMII_SPEED_100			BIT(2)
+#define SGMII_SPEED_1000		BIT(3)
+#define SGMII_DUPLEX_FULL		BIT(4)
+#define SGMII_IF_MODE_BIT5		BIT(5)
+#define SGMII_REMOTE_FAULT_DIS		BIT(8)
+#define SGMII_CODE_SYNC_SET_VAL		BIT(9)
+#define SGMII_CODE_SYNC_SET_EN		BIT(10)
+#define SGMII_SEND_AN_ERROR_EN		BIT(11)
+#define SGMII_IF_MODE_MASK		GENMASK(5, 1)
+
+/* Register to set SGMII speed, ANA RG_ Control Signals III*/
+#define SGMSYS_ANA_RG_CS3	0x2028
+#define RG_PHY_SPEED_MASK	(BIT(2) | BIT(3))
+#define RG_PHY_SPEED_1_25G	0x0
+#define RG_PHY_SPEED_3_125G	BIT(2)
+
+/* Register to power up QPHY */
+#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
+#define	SGMII_PHYA_PWD		BIT(4)
+
+/* Infrasys subsystem config registers */
+#define INFRA_MISC2            0x70c
+#define CO_QPHY_SEL            BIT(0)
+#define GEPHY_MAC_SEL          BIT(1)
+
+/*MDIO control*/
+#define MII_MMD_ACC_CTL_REG             0x0d
+#define MII_MMD_ADDR_DATA_REG           0x0e
+#define MMD_OP_MODE_DATA BIT(14)
+
+/* MT7628/88 specific stuff */
+#define MT7628_PDMA_OFFSET	0x0800
+#define MT7628_SDM_OFFSET	0x0c00
+
+#define MT7628_TX_BASE_PTR0	(MT7628_PDMA_OFFSET + 0x00)
+#define MT7628_TX_MAX_CNT0	(MT7628_PDMA_OFFSET + 0x04)
+#define MT7628_TX_CTX_IDX0	(MT7628_PDMA_OFFSET + 0x08)
+#define MT7628_TX_DTX_IDX0	(MT7628_PDMA_OFFSET + 0x0c)
+#define MT7628_PST_DTX_IDX0	BIT(0)
+
+#define MT7628_SDM_MAC_ADRL	(MT7628_SDM_OFFSET + 0x0c)
+#define MT7628_SDM_MAC_ADRH	(MT7628_SDM_OFFSET + 0x10)
+
+struct mtk_rx_dma {
+	unsigned int rxd1;
+	unsigned int rxd2;
+	unsigned int rxd3;
+	unsigned int rxd4;
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+	unsigned int rxd5;
+	unsigned int rxd6;
+	unsigned int rxd7;
+	unsigned int rxd8;
+#endif
+} __packed __aligned(4);
+
+struct mtk_tx_dma {
+	unsigned int txd1;
+	unsigned int txd2;
+	unsigned int txd3;
+	unsigned int txd4;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	unsigned int txd5;
+	unsigned int txd6;
+	unsigned int txd7;
+	unsigned int txd8;
+#endif
+} __packed __aligned(4);
+
+struct mtk_eth;
+struct mtk_mac;
+
+/* struct mtk_hw_stats - the structure that holds the traffic statistics.
+ * @stats_lock:		make sure that stats operations are atomic
+ * @reg_offset:		the status register offset of the SoC
+ * @syncp:		the refcount
+ *
+ * All of the supported SoCs have hardware counters for traffic statistics.
+ * Whenever the status IRQ triggers we can read the latest stats from these
+ * counters and store them in this struct.
+ */
+struct mtk_hw_stats {
+	u64 tx_bytes;
+	u64 tx_packets;
+	u64 tx_skip;
+	u64 tx_collisions;
+	u64 rx_bytes;
+	u64 rx_packets;
+	u64 rx_overflow;
+	u64 rx_fcs_errors;
+	u64 rx_short_errors;
+	u64 rx_long_errors;
+	u64 rx_checksum_errors;
+	u64 rx_flow_control_packets;
+
+	spinlock_t		stats_lock;
+	u32			reg_offset;
+	struct u64_stats_sync	syncp;
+};
+
+enum mtk_tx_flags {
+	/* PDMA descriptor can point at 1-2 segments. This enum allows us to
+	 * track how memory was allocated so that it can be freed properly.
+	 */
+	MTK_TX_FLAGS_SINGLE0	= 0x01,
+	MTK_TX_FLAGS_PAGE0	= 0x02,
+
+	/* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
+	 * SKB out instead of looking up through hardware TX descriptor.
+	 */
+	MTK_TX_FLAGS_FPORT0	= 0x04,
+	MTK_TX_FLAGS_FPORT1	= 0x08,
+};
+
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+	MTK_CLK_ETHIF,
+	MTK_CLK_SGMIITOP,
+	MTK_CLK_ESW,
+	MTK_CLK_GP0,
+	MTK_CLK_GP1,
+	MTK_CLK_GP2,
+	MTK_CLK_FE,
+	MTK_CLK_TRGPLL,
+	MTK_CLK_SGMII_TX_250M,
+	MTK_CLK_SGMII_RX_250M,
+	MTK_CLK_SGMII_CDR_REF,
+	MTK_CLK_SGMII_CDR_FB,
+	MTK_CLK_SGMII2_TX_250M,
+	MTK_CLK_SGMII2_RX_250M,
+	MTK_CLK_SGMII2_CDR_REF,
+	MTK_CLK_SGMII2_CDR_FB,
+	MTK_CLK_SGMII_CK,
+	MTK_CLK_ETH2PLL,
+	MTK_CLK_WOCPU0,
+	MTK_CLK_WOCPU1,
+	MTK_CLK_MAX
+};
+
+#define MT7623_CLKS_BITMAP	(BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) |  \
+				 BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \
+				 BIT(MTK_CLK_TRGPLL))
+#define MT7622_CLKS_BITMAP	(BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) |  \
+				 BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
+				 BIT(MTK_CLK_GP2) | \
+				 BIT(MTK_CLK_SGMII_TX_250M) | \
+				 BIT(MTK_CLK_SGMII_RX_250M) | \
+				 BIT(MTK_CLK_SGMII_CDR_REF) | \
+				 BIT(MTK_CLK_SGMII_CDR_FB) | \
+				 BIT(MTK_CLK_SGMII_CK) | \
+				 BIT(MTK_CLK_ETH2PLL))
+#define MT7621_CLKS_BITMAP	(0)
+#define MT7628_CLKS_BITMAP	(0)
+#define MT7629_CLKS_BITMAP	(BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) |  \
+				 BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
+				 BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
+				 BIT(MTK_CLK_SGMII_TX_250M) | \
+				 BIT(MTK_CLK_SGMII_RX_250M) | \
+				 BIT(MTK_CLK_SGMII_CDR_REF) | \
+				 BIT(MTK_CLK_SGMII_CDR_FB) | \
+				 BIT(MTK_CLK_SGMII2_TX_250M) | \
+				 BIT(MTK_CLK_SGMII2_RX_250M) | \
+				 BIT(MTK_CLK_SGMII2_CDR_REF) | \
+				 BIT(MTK_CLK_SGMII2_CDR_FB) | \
+				 BIT(MTK_CLK_SGMII_CK) | \
+				 BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
+
+#define MT7986_CLKS_BITMAP	(BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
+                                 BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
+                                 BIT(MTK_CLK_SGMII_TX_250M) | \
+                                 BIT(MTK_CLK_SGMII_RX_250M) | \
+                                 BIT(MTK_CLK_SGMII_CDR_REF) | \
+                                 BIT(MTK_CLK_SGMII_CDR_FB) | \
+                                 BIT(MTK_CLK_SGMII2_TX_250M) | \
+                                 BIT(MTK_CLK_SGMII2_RX_250M) | \
+                                 BIT(MTK_CLK_SGMII2_CDR_REF) | \
+                                 BIT(MTK_CLK_SGMII2_CDR_FB))
+
+enum mtk_dev_state {
+	MTK_HW_INIT,
+	MTK_RESETTING
+};
+
+/* struct mtk_tx_buf -	This struct holds the pointers to the memory pointed at
+ *			by the TX descriptor	s
+ * @skb:		The SKB pointer of the packet being sent
+ * @dma_addr0:		The base addr of the first segment
+ * @dma_len0:		The length of the first segment
+ * @dma_addr1:		The base addr of the second segment
+ * @dma_len1:		The length of the second segment
+ */
+struct mtk_tx_buf {
+	struct sk_buff *skb;
+	u32 flags;
+	DEFINE_DMA_UNMAP_ADDR(dma_addr0);
+	DEFINE_DMA_UNMAP_LEN(dma_len0);
+	DEFINE_DMA_UNMAP_ADDR(dma_addr1);
+	DEFINE_DMA_UNMAP_LEN(dma_len1);
+};
+
+/* struct mtk_tx_ring -	This struct holds info describing a TX ring
+ * @dma:		The descriptor ring
+ * @buf:		The memory pointed at by the ring
+ * @phys:		The physical addr of tx_buf
+ * @next_free:		Pointer to the next free descriptor
+ * @last_free:		Pointer to the last free descriptor
+ * @thresh:		The threshold of minimum amount of free descriptors
+ * @free_count:		QDMA uses a linked list. Track how many free descriptors
+ *			are present
+ */
+struct mtk_tx_ring {
+	struct mtk_tx_dma *dma;
+	struct mtk_tx_buf *buf;
+	dma_addr_t phys;
+	struct mtk_tx_dma *next_free;
+	struct mtk_tx_dma *last_free;
+	u16 thresh;
+	atomic_t free_count;
+	int dma_size;
+	struct mtk_tx_dma *dma_pdma;	/* For MT7628/88 PDMA handling */
+	dma_addr_t phys_pdma;
+	int cpu_idx;
+};
+
+/* PDMA rx ring mode */
+enum mtk_rx_flags {
+	MTK_RX_FLAGS_NORMAL = 0,
+	MTK_RX_FLAGS_HWLRO,
+	MTK_RX_FLAGS_QDMA,
+};
+
+/* struct mtk_rx_ring -	This struct holds info describing a RX ring
+ * @dma:		The descriptor ring
+ * @data:		The memory pointed at by the ring
+ * @phys:		The physical addr of rx_buf
+ * @frag_size:		How big can each fragment be
+ * @buf_size:		The size of each packet buffer
+ * @calc_idx:		The current head of ring
+ */
+struct mtk_rx_ring {
+	struct mtk_rx_dma *dma;
+	u8 **data;
+	dma_addr_t phys;
+	u16 frag_size;
+	u16 buf_size;
+	u16 dma_size;
+	bool calc_idx_update;
+	u16 calc_idx;
+	u32 crx_idx_reg;
+};
+
+enum mkt_eth_capabilities {
+	MTK_RGMII_BIT = 0,
+	MTK_TRGMII_BIT,
+	MTK_SGMII_BIT,
+	MTK_ESW_BIT,
+	MTK_GEPHY_BIT,
+	MTK_MUX_BIT,
+	MTK_INFRA_BIT,
+	MTK_SHARED_SGMII_BIT,
+	MTK_HWLRO_BIT,
+	MTK_SHARED_INT_BIT,
+	MTK_TRGMII_MT7621_CLK_BIT,
+	MTK_QDMA_BIT,
+	MTK_NETSYS_TX_V2_BIT,
+	MTK_NETSYS_RX_V2_BIT,
+	MTK_SOC_MT7628_BIT,
+
+	/* MUX BITS*/
+	MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
+	MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
+	MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+	MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
+	MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
+
+	/* PATH BITS */
+	MTK_ETH_PATH_GMAC1_RGMII_BIT,
+	MTK_ETH_PATH_GMAC1_TRGMII_BIT,
+	MTK_ETH_PATH_GMAC1_SGMII_BIT,
+	MTK_ETH_PATH_GMAC2_RGMII_BIT,
+	MTK_ETH_PATH_GMAC2_SGMII_BIT,
+	MTK_ETH_PATH_GMAC2_GEPHY_BIT,
+	MTK_ETH_PATH_GDM1_ESW_BIT,
+};
+
+/* Supported hardware group on SoCs */
+#define MTK_RGMII		BIT(MTK_RGMII_BIT)
+#define MTK_TRGMII		BIT(MTK_TRGMII_BIT)
+#define MTK_SGMII		BIT(MTK_SGMII_BIT)
+#define MTK_ESW			BIT(MTK_ESW_BIT)
+#define MTK_GEPHY		BIT(MTK_GEPHY_BIT)
+#define MTK_MUX			BIT(MTK_MUX_BIT)
+#define MTK_INFRA		BIT(MTK_INFRA_BIT)
+#define MTK_SHARED_SGMII	BIT(MTK_SHARED_SGMII_BIT)
+#define MTK_HWLRO		BIT(MTK_HWLRO_BIT)
+#define MTK_SHARED_INT		BIT(MTK_SHARED_INT_BIT)
+#define MTK_TRGMII_MT7621_CLK	BIT(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA		BIT(MTK_QDMA_BIT)
+#define MTK_NETSYS_TX_V2	BIT(MTK_NETSYS_TX_V2_BIT)
+#define MTK_NETSYS_RX_V2	BIT(MTK_NETSYS_RX_V2_BIT)
+#define MTK_SOC_MT7628		BIT(MTK_SOC_MT7628_BIT)
+
+#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW		\
+	BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
+#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY	\
+	BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
+#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY		\
+	BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII	\
+	BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
+#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII	\
+	BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
+
+/* Supported path present on SoCs */
+#define MTK_ETH_PATH_GMAC1_RGMII	BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_TRGMII	BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_SGMII	BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_RGMII	BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_SGMII	BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_GEPHY	BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
+#define MTK_ETH_PATH_GDM1_ESW		BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
+
+#define MTK_GMAC1_RGMII		(MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
+#define MTK_GMAC1_TRGMII	(MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
+#define MTK_GMAC1_SGMII		(MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII)
+#define MTK_GMAC2_RGMII		(MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
+#define MTK_GMAC2_SGMII		(MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
+#define MTK_GMAC2_GEPHY		(MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GDM1_ESW		(MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
+
+/* MUXes present on SoCs */
+/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
+#define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX)
+
+/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
+#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY    \
+	(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA)
+
+/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
+#define MTK_MUX_U3_GMAC2_TO_QPHY        \
+	(MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA)
+
+/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
+#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII      \
+	(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
+	MTK_SHARED_SGMII)
+
+/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
+#define MTK_MUX_GMAC12_TO_GEPHY_SGMII   \
+	(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
+
+#define MTK_HAS_CAPS(caps, _x)		(((caps) & (_x)) == (_x))
+
+#define MT7621_CAPS  (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
+		      MTK_GMAC2_RGMII | MTK_SHARED_INT | \
+		      MTK_TRGMII_MT7621_CLK | MTK_QDMA)
+
+#define MT7622_CAPS  (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
+		      MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
+		      MTK_MUX_GDM1_TO_GMAC1_ESW | \
+		      MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
+
+#define MT7623_CAPS  (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
+		      MTK_QDMA)
+
+#define MT7628_CAPS  (MTK_SHARED_INT | MTK_SOC_MT7628)
+
+#define MT7629_CAPS  (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+		      MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
+		      MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
+		      MTK_MUX_U3_GMAC2_TO_QPHY | \
+		      MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
+
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define MT7986_CAPS   (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+                       MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+                       MTK_NETSYS_TX_V2 | MTK_NETSYS_RX_V2)
+#else
+#define MT7986_CAPS   (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+                       MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+                       MTK_NETSYS_TX_V2)
+#endif
+
+/* struct mtk_eth_data -	This is the structure holding all differences
+ *				among various plaforms
+ * @ana_rgc3:                   The offset for register ANA_RGC3 related to
+ *				sgmiisys syscon
+ * @caps			Flags shown the extra capability for the SoC
+ * @hw_features			Flags shown HW features
+ * @required_clks		Flags shown the bitmap for required clocks on
+ *				the target SoC
+ * @required_pctl		A bool value to show whether the SoC requires
+ *				the extra setup for those pins used by GMAC.
+ */
+struct mtk_soc_data {
+	u32             ana_rgc3;
+	u32		caps;
+	u32		required_clks;
+	bool		required_pctl;
+	netdev_features_t hw_features;
+	bool		has_sram;
+};
+
+/* currently no SoC has more than 2 macs */
+#define MTK_MAX_DEVS			2
+
+#define MTK_SGMII_PHYSPEED_AN          BIT(31)
+#define MTK_SGMII_PHYSPEED_MASK        GENMASK(2, 0)
+#define MTK_SGMII_PHYSPEED_1000        BIT(0)
+#define MTK_SGMII_PHYSPEED_2500        BIT(1)
+#define MTK_HAS_FLAGS(flags, _x)       (((flags) & (_x)) == (_x))
+
+/* struct mtk_sgmii -  This is the structure holding sgmii regmap and its
+ *                     characteristics
+ * @regmap:            The register map pointing at the range used to setup
+ *                     SGMII modes
+ * @flags:             The enum refers to which mode the sgmii wants to run on
+ * @ana_rgc3:          The offset refers to register ANA_RGC3 related to regmap
+ */
+
+struct mtk_sgmii {
+	struct regmap   *regmap[MTK_MAX_DEVS];
+	u32             flags[MTK_MAX_DEVS];
+	u32             ana_rgc3;
+};
+
+/* struct mtk_eth -	This is the main datasructure for holding the state
+ *			of the driver
+ * @dev:		The device pointer
+ * @base:		The mapped register i/o base
+ * @page_lock:		Make sure that register operations are atomic
+ * @tx_irq__lock:	Make sure that IRQ register operations are atomic
+ * @rx_irq__lock:	Make sure that IRQ register operations are atomic
+ * @dummy_dev:		we run 2 netdevs on 1 physical DMA ring and need a
+ *			dummy for NAPI to work
+ * @netdev:		The netdev instances
+ * @mac:		Each netdev is linked to a physical MAC
+ * @irq:		The IRQ that we are using
+ * @msg_enable:		Ethtool msg level
+ * @ethsys:		The register map pointing at the range used to setup
+ *			MII modes
+ * @infra:              The register map pointing at the range used to setup
+ *                      SGMII and GePHY path
+ * @pctl:		The register map pointing at the range used to setup
+ *			GMAC port drive/slew values
+ * @dma_refcnt:		track how many netdevs are using the DMA engine
+ * @tx_ring:		Pointer to the memory holding info about the TX ring
+ * @rx_ring:		Pointer to the memory holding info about the RX ring
+ * @rx_ring_qdma:	Pointer to the memory holding info about the QDMA RX ring
+ * @tx_napi:		The TX NAPI struct
+ * @rx_napi:		The RX NAPI struct
+ * @scratch_ring:	Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring:	physical address of scratch_ring
+ * @scratch_head:	The scratch memory that scratch_ring points to.
+ * @clks:		clock array for all clocks required
+ * @mii_bus:		If there is a bus we need to create an instance for it
+ * @pending_work:	The workqueue used to reset the dma ring
+ * @state:		Initialization and runtime state of the device
+ * @soc:		Holding specific data among vaious SoCs
+ */
+
+struct mtk_eth {
+	struct device			*dev;
+	void __iomem			*base;
+	spinlock_t			page_lock;
+	spinlock_t			tx_irq_lock;
+	spinlock_t			rx_irq_lock;
+	struct net_device		dummy_dev;
+	struct net_device		*netdev[MTK_MAX_DEVS];
+	struct mtk_mac			*mac[MTK_MAX_DEVS];
+	int				irq[3];
+	u32				msg_enable;
+	unsigned long			sysclk;
+	struct regmap			*ethsys;
+	struct regmap                   *infra;
+	struct mtk_sgmii                *sgmii;
+	struct regmap			*pctl;
+	bool				hwlro;
+	refcount_t			dma_refcnt;
+	struct mtk_tx_ring		tx_ring;
+	struct mtk_rx_ring		rx_ring[MTK_MAX_RX_RING_NUM];
+	struct mtk_rx_ring		rx_ring_qdma;
+	struct napi_struct		tx_napi;
+	struct napi_struct		rx_napi;
+	struct mtk_tx_dma		*scratch_ring;
+	dma_addr_t			phy_scratch_ring;
+	void				*scratch_head;
+	struct clk			*clks[MTK_CLK_MAX];
+
+	struct mii_bus			*mii_bus;
+	struct work_struct		pending_work;
+	unsigned long			state;
+
+	const struct mtk_soc_data	*soc;
+
+	u32				tx_int_mask_reg;
+	u32				tx_int_status_reg;
+	u32				rx_dma_l4_valid;
+	int				ip_align;
+};
+
+/* struct mtk_mac -	the structure that holds the info about the MACs of the
+ *			SoC
+ * @id:			The number of the MAC
+ * @interface:		Interface mode kept for detecting change in hw settings
+ * @of_node:		Our devicetree node
+ * @hw:			Backpointer to our main datastruture
+ * @hw_stats:		Packet statistics counter
+ */
+struct mtk_mac {
+	int				id;
+	phy_interface_t			interface;
+	unsigned int			mode;
+	int				speed;
+	struct device_node		*of_node;
+	struct phylink			*phylink;
+	struct phylink_config		phylink_config;
+	struct mtk_eth			*hw;
+	struct mtk_hw_stats		*hw_stats;
+	__be32				hwlro_ip[MTK_MAX_LRO_IP_CNT];
+	int				hwlro_ip_cnt;
+};
+
+/* the struct describing the SoC. these are declared in the soc_xyz.c files */
+extern const struct of_device_id of_mtk_match[];
+
+/* read the hardware status register */
+void mtk_stats_update_mac(struct mtk_mac *mac);
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
+		   u32 ana_rgc3);
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+			       const struct phylink_link_state *state);
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
+
+#endif /* MTK_ETH_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/Makefile b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/Makefile
new file mode 100644
index 0000000..bf1bbcb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/Makefile
@@ -0,0 +1,5 @@
+ccflags-y=-Werror
+
+obj-$(CONFIG_NET_MEDIATEK_HNAT)         += mtkhnat.o
+mtkhnat-objs := hnat.o hnat_nf_hook.o hnat_debugfs.o hnat_mcast.o
+mtkhnat-$(CONFIG_NET_DSA_MT7530)	+= hnat_stag.o
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
new file mode 100644
index 0000000..3e87791
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
@@ -0,0 +1,665 @@
+/*   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; version 2 of the License
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ *   Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/if.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "nf_hnat_mtk.h"
+#include "hnat.h"
+
+struct mtk_hnat *hnat_priv;
+
+int (*ra_sw_nat_hook_rx)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_rx);
+int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_tx);
+
+void (*ppe_dev_register_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_register_hook);
+void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_unregister_hook);
+
+static void hnat_sma_build_entry(struct timer_list *t)
+{
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
+}
+
+void hnat_cache_ebl(int enable)
+{
+	cr_set_field(hnat_priv->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 0);
+	cr_set_field(hnat_priv->ppe_base + PPE_CAH_CTRL, CAH_EN, enable);
+}
+
+static void hnat_reset_timestamp(struct timer_list *t)
+{
+	struct foe_entry *entry;
+	int hash_index;
+
+	hnat_cache_ebl(0);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 0);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 0);
+	writel(0, hnat_priv->fe_base + 0x0010);
+
+	for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+		entry = hnat_priv->foe_table_cpu + hash_index;
+		if (entry->bfib1.state == BIND)
+			entry->bfib1.time_stamp =
+				readl(hnat_priv->fe_base + 0x0010) & (0xFFFF);
+	}
+
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 1);
+	hnat_cache_ebl(1);
+
+	mod_timer(&hnat_priv->hnat_reset_timestamp_timer, jiffies + 14400 * HZ);
+}
+
+static void cr_set_bits(void __iomem *reg, u32 bs)
+{
+	u32 val = readl(reg);
+
+	val |= bs;
+	writel(val, reg);
+}
+
+static void cr_clr_bits(void __iomem *reg, u32 bs)
+{
+	u32 val = readl(reg);
+
+	val &= ~bs;
+	writel(val, reg);
+}
+
+void cr_set_field(void __iomem *reg, u32 field, u32 val)
+{
+	unsigned int tv = readl(reg);
+
+	tv &= ~field;
+	tv |= ((val) << (ffs((unsigned int)field) - 1));
+	writel(tv, reg);
+}
+
+/*boundary entry can't be used to accelerate data flow*/
+static void exclude_boundary_entry(struct foe_entry *foe_table_cpu)
+{
+	int entry_base = 0;
+	int bad_entry, i, j;
+	struct foe_entry *foe_entry;
+	/*these entries are boundary every 128 entries*/
+	int boundary_entry_offset[8] = { 12, 25, 38, 51, 76, 89, 102, 115};
+
+	if (!foe_table_cpu)
+		return;
+
+	for (i = 0; entry_base < hnat_priv->foe_etry_num; i++) {
+		/* set boundary entries as static*/
+		for (j = 0; j < 8; j++) {
+			bad_entry = entry_base + boundary_entry_offset[j];
+			foe_entry = &foe_table_cpu[bad_entry];
+			foe_entry->udib1.sta = 1;
+		}
+		entry_base = (i + 1) * 128;
+	}
+}
+
+void set_gmac_ppe_fwd(int id, int enable)
+{
+	void __iomem *reg;
+	u32 val;
+
+	reg = hnat_priv->fe_base + (id ? GDMA2_FWD_CFG : GDMA1_FWD_CFG);
+
+	if (enable) {
+		cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE);
+
+		return;
+	}
+
+	/*disabled */
+	val = readl(reg);
+	if ((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE)
+		cr_set_field(reg, GDM_ALL_FRC_MASK,
+			     BITS_GDM_ALL_FRC_P_CPU_PDMA);
+}
+
+static int hnat_start(void)
+{
+	u32 foe_table_sz;
+	u32 foe_mib_tb_sz;
+	int etry_num_cfg;
+
+	/* mapp the FOE table */
+	for (etry_num_cfg = DEF_ETRY_NUM_CFG ; etry_num_cfg >= 0 ; etry_num_cfg--, hnat_priv->foe_etry_num /= 2) {
+		foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
+		hnat_priv->foe_table_cpu = dma_alloc_coherent(
+			hnat_priv->dev, foe_table_sz, &hnat_priv->foe_table_dev, GFP_KERNEL);
+
+		if (hnat_priv->foe_table_cpu)
+			break;
+	}
+
+	if (!hnat_priv->foe_table_cpu)
+		return -1;
+	dev_info(hnat_priv->dev, "FOE entry number = %d\n", hnat_priv->foe_etry_num);
+
+	writel(hnat_priv->foe_table_dev, hnat_priv->ppe_base + PPE_TB_BASE);
+	memset(hnat_priv->foe_table_cpu, 0, foe_table_sz);
+
+	if (hnat_priv->data->version == MTK_HNAT_V1)
+		exclude_boundary_entry(hnat_priv->foe_table_cpu);
+
+	if (hnat_priv->data->per_flow_accounting) {
+		foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
+		hnat_priv->foe_mib_cpu = dma_alloc_coherent(hnat_priv->dev, foe_mib_tb_sz,
+						       &hnat_priv->foe_mib_dev, GFP_KERNEL);
+		if (!hnat_priv->foe_mib_cpu)
+			return -1;
+		writel(hnat_priv->foe_mib_dev, hnat_priv->ppe_base + PPE_MIB_TB_BASE);
+		memset(hnat_priv->foe_mib_cpu, 0, foe_mib_tb_sz);
+
+		hnat_priv->acct =
+			kzalloc(hnat_priv->foe_etry_num * sizeof(struct hnat_accounting),
+				GFP_KERNEL);
+		if (!hnat_priv->acct)
+			return -1;
+	}
+	/* setup hashing */
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TB_ETRY_NUM, etry_num_cfg);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, HASH_MODE, HASH_MODE_1);
+	writel(HASH_SEED_KEY, hnat_priv->ppe_base + PPE_HASH_SEED);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, XMODE, 0);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TB_ENTRY_SIZE, ENTRY_80B);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
+
+	/* set ip proto */
+	writel(0xFFFFFFFF, hnat_priv->ppe_base + PPE_IP_PROT_CHK);
+
+	/* setup caching */
+	hnat_cache_ebl(1);
+
+	/* enable FOE */
+	cr_set_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+		    BIT_UDP_IP4F_NAT_EN | BIT_IPV4_NAT_EN | BIT_IPV4_NAPT_EN |
+		    BIT_IPV4_NAT_FRAG_EN | BIT_IPV4_HASH_GREK |
+		    BIT_IPV4_DSL_EN | BIT_IPV6_6RD_EN |
+		    BIT_IPV6_3T_ROUTE_EN | BIT_IPV6_5T_ROUTE_EN);
+
+	if (hnat_priv->data->version == MTK_HNAT_V4)
+		cr_set_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+			    BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
+
+	/* setup FOE aging */
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, NTU_AGE, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UNBD_AGE, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_UNB_AGE, UNB_MNP, 1000);
+	cr_set_field(hnat_priv->ppe_base + PPE_UNB_AGE, UNB_DLTA, 3);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, FIN_AGE, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_0, UDP_DLTA, 12);
+	cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_0, NTU_DLTA, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, FIN_DLTA, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, TCP_DLTA, 7);
+
+	/* setup FOE ka */
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SCAN_MODE, 2);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, KA_CFG, 3);
+	cr_set_field(hnat_priv->ppe_base + PPE_KA, KA_T, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_KA, TCP_KA, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_KA, UDP_KA, 1);
+	cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_1, NTU_KA, 1);
+
+	/* setup FOE rate limit */
+	cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_0, QURT_LMT, 16383);
+	cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_0, HALF_LMT, 16383);
+	cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_1, FULL_LMT, 16383);
+	/* setup binding threshold as 30 packets per second */
+	cr_set_field(hnat_priv->ppe_base + PPE_BNDR, BIND_RATE, 0x1E);
+
+	/* setup FOE cf gen */
+	cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, PPE_EN, 1);
+	writel(0, hnat_priv->ppe_base + PPE_DFT_CPORT); /* pdma */
+	/* writel(0x55555555, hnat_priv->ppe_base + PPE_DFT_CPORT); */ /* qdma */
+	cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, TTL0_DRP, 0);
+
+	if (hnat_priv->data->version == MTK_HNAT_V4) {
+		writel(0xcb777, hnat_priv->ppe_base + PPE_DFT_CPORT1);
+		writel(0x7f, hnat_priv->ppe_base + PPE_SBW_CTRL);
+	}
+
+	/*enable ppe mib counter*/
+	if (hnat_priv->data->per_flow_accounting) {
+		cr_set_field(hnat_priv->ppe_base + PPE_MIB_CFG, MIB_EN, 1);
+		cr_set_field(hnat_priv->ppe_base + PPE_MIB_CFG, MIB_READ_CLEAR, 1);
+		cr_set_field(hnat_priv->ppe_base + PPE_MIB_CAH_CTRL, MIB_CAH_EN, 1);
+	}
+
+	hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
+
+	dev_info(hnat_priv->dev, "hwnat start\n");
+
+	return 0;
+}
+
+static int ppe_busy_wait(void)
+{
+	unsigned long t_start = jiffies;
+	u32 r = 0;
+
+	while (1) {
+		r = readl((hnat_priv->ppe_base + 0x0));
+		if (!(r & BIT(31)))
+			return 0;
+		if (time_after(jiffies, t_start + HZ))
+			break;
+		usleep_range(10, 20);
+	}
+
+	dev_notice(hnat_priv->dev, "ppe:%s timeout\n", __func__);
+
+	return -1;
+}
+
+static void hnat_stop(void)
+{
+	u32 foe_table_sz;
+	u32 foe_mib_tb_sz;
+	struct foe_entry *entry, *end;
+	u32 r1 = 0, r2 = 0;
+
+	/* send all traffic back to the DMA engine */
+	set_gmac_ppe_fwd(0, 0);
+	set_gmac_ppe_fwd(1, 0);
+
+	dev_info(hnat_priv->dev, "hwnat stop\n");
+
+	if (hnat_priv->foe_table_cpu) {
+		entry = hnat_priv->foe_table_cpu;
+		end = hnat_priv->foe_table_cpu + hnat_priv->foe_etry_num;
+		while (entry < end) {
+			entry->bfib1.state = INVALID;
+			entry++;
+		}
+	}
+	/* disable caching */
+	hnat_cache_ebl(0);
+
+	/* flush cache has to be ahead of hnat disable --*/
+	cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, PPE_EN, 0);
+
+	/* disable scan mode and keep-alive */
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SCAN_MODE, 0);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, KA_CFG, 0);
+
+	ppe_busy_wait();
+
+	/* disable FOE */
+	cr_clr_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+		    BIT_IPV4_NAPT_EN | BIT_IPV4_NAT_EN | BIT_IPV4_NAT_FRAG_EN |
+		    BIT_IPV6_HASH_GREK | BIT_IPV4_DSL_EN |
+		    BIT_IPV6_6RD_EN | BIT_IPV6_3T_ROUTE_EN |
+		    BIT_IPV6_5T_ROUTE_EN | BIT_FUC_FOE | BIT_FMC_FOE);
+
+	if (hnat_priv->data->version == MTK_HNAT_V4)
+		cr_clr_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+			    BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
+
+	/* disable FOE aging */
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, NTU_AGE, 0);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UNBD_AGE, 0);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 0);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 0);
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, FIN_AGE, 0);
+
+	r1 = readl(hnat_priv->fe_base + 0x100);
+	r2 = readl(hnat_priv->fe_base + 0x10c);
+
+	dev_info(hnat_priv->dev, "0x100 = 0x%x, 0x10c = 0x%x\n", r1, r2);
+
+	if (((r1 & 0xff00) >> 0x8) >= (r1 & 0xff) ||
+	    ((r1 & 0xff00) >> 0x8) >= (r2 & 0xff)) {
+		dev_info(hnat_priv->dev, "reset pse\n");
+		writel(0x1, hnat_priv->fe_base + 0x4);
+	}
+
+	/* free the FOE table */
+	foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
+	if (hnat_priv->foe_table_cpu)
+		dma_free_coherent(hnat_priv->dev, foe_table_sz, hnat_priv->foe_table_cpu,
+				  hnat_priv->foe_table_dev);
+	writel(0, hnat_priv->ppe_base + PPE_TB_BASE);
+
+	if (hnat_priv->data->per_flow_accounting) {
+		foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
+		if (hnat_priv->foe_mib_cpu)
+			dma_free_coherent(hnat_priv->dev, foe_mib_tb_sz,
+					  hnat_priv->foe_mib_cpu, hnat_priv->foe_mib_dev);
+		writel(0, hnat_priv->ppe_base + PPE_MIB_TB_BASE);
+		kfree(hnat_priv->acct);
+	}
+}
+
+static void hnat_release_netdev(void)
+{
+	int i;
+	struct extdev_entry *ext_entry;
+
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+		ext_entry = hnat_priv->ext_if[i];
+		if (ext_entry->dev)
+			dev_put(ext_entry->dev);
+		ext_if_del(ext_entry);
+		kfree(ext_entry);
+	}
+
+	if (hnat_priv->g_ppdev)
+		dev_put(hnat_priv->g_ppdev);
+}
+
+static struct notifier_block nf_hnat_netdevice_nb __read_mostly = {
+	.notifier_call = nf_hnat_netdevice_event,
+};
+
+static struct notifier_block nf_hnat_netevent_nb __read_mostly = {
+	.notifier_call = nf_hnat_netevent_handler,
+};
+
+int hnat_enable_hook(void)
+{
+	/* register hook functions used by WHNAT module.
+	 */
+	if (hnat_priv->data->whnat) {
+		ra_sw_nat_hook_rx =
+			(hnat_priv->data->version == MTK_HNAT_V4) ?
+			 mtk_sw_nat_hook_rx : NULL;
+		ra_sw_nat_hook_tx = mtk_sw_nat_hook_tx;
+		ppe_dev_register_hook = mtk_ppe_dev_register_hook;
+		ppe_dev_unregister_hook = mtk_ppe_dev_unregister_hook;
+	}
+
+	if (hnat_register_nf_hooks())
+		return -1;
+
+	hook_toggle = 1;
+
+	return 0;
+}
+
+int hnat_disable_hook(void)
+{
+	int hash_index;
+	struct foe_entry *entry;
+
+	ra_sw_nat_hook_tx = NULL;
+	ra_sw_nat_hook_rx = NULL;
+	hnat_unregister_nf_hooks();
+
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_ONLY_FWD_CPU);
+	for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+		entry = hnat_priv->foe_table_cpu + hash_index;
+		if (entry->bfib1.state == BIND) {
+			entry->ipv4_hnapt.udib1.state = INVALID;
+			entry->ipv4_hnapt.udib1.time_stamp =
+				readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
+		}
+	}
+
+	/* clear HWNAT cache */
+	hnat_cache_ebl(1);
+
+	mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
+	hook_toggle = 0;
+
+	return 0;
+}
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+static struct packet_type mtk_pack_type __read_mostly = {
+	.type   = HQOS_MAGIC_TAG,
+	.func   = mtk_hqos_ptype_cb,
+};
+#endif
+
+static int hnat_probe(struct platform_device *pdev)
+{
+	int i;
+	int err = 0;
+	int index = 0;
+	struct resource *res;
+	const char *name;
+	struct device_node *np;
+	unsigned int val;
+	struct property *prop;
+	struct extdev_entry *ext_entry;
+	const struct of_device_id *match;
+
+	hnat_priv = devm_kzalloc(&pdev->dev, sizeof(struct mtk_hnat), GFP_KERNEL);
+	if (!hnat_priv)
+		return -ENOMEM;
+
+	hnat_priv->foe_etry_num = DEF_ETRY_NUM;
+
+	match = of_match_device(of_hnat_match, &pdev->dev);
+	hnat_priv->data = (struct mtk_hnat_data *)match->data;
+
+	hnat_priv->dev = &pdev->dev;
+	np = hnat_priv->dev->of_node;
+
+	err = of_property_read_string(np, "mtketh-wan", &name);
+	if (err < 0)
+		return -EINVAL;
+
+	strncpy(hnat_priv->wan, (char *)name, IFNAMSIZ);
+	dev_info(&pdev->dev, "wan = %s\n", hnat_priv->wan);
+
+	err = of_property_read_string(np, "mtketh-lan", &name);
+	if (err < 0)
+		strncpy(hnat_priv->lan, "eth0", IFNAMSIZ);
+	else
+		strncpy(hnat_priv->lan, (char *)name, IFNAMSIZ);
+	dev_info(&pdev->dev, "lan = %s\n", hnat_priv->lan);
+
+	err = of_property_read_string(np, "mtketh-ppd", &name);
+	if (err < 0)
+		strncpy(hnat_priv->ppd, "eth0", IFNAMSIZ);
+	else
+		strncpy(hnat_priv->ppd, (char *)name, IFNAMSIZ);
+	dev_info(&pdev->dev, "ppd = %s\n", hnat_priv->ppd);
+
+	/*get total gmac num in hnat*/
+	err = of_property_read_u32_index(np, "mtketh-max-gmac", 0, &val);
+
+	if (err < 0)
+		return -EINVAL;
+
+	hnat_priv->gmac_num = val;
+
+	dev_info(&pdev->dev, "gmac num = %d\n", hnat_priv->gmac_num);
+
+	err = of_property_read_u32_index(np, "mtkdsa-wan-port", 0, &val);
+
+	if (err < 0) {
+		hnat_priv->wan_dsa_port = NONE_DSA_PORT;
+	} else {
+		hnat_priv->wan_dsa_port = val;
+		dev_info(&pdev->dev, "wan dsa port = %d\n", hnat_priv->wan_dsa_port);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENOENT;
+
+	hnat_priv->fe_base = devm_ioremap_nocache(&pdev->dev, res->start,
+					     res->end - res->start + 1);
+	if (!hnat_priv->fe_base)
+		return -EADDRNOTAVAIL;
+
+	hnat_priv->ppe_base = (hnat_priv->data->version == MTK_HNAT_V4) ?
+		hnat_priv->fe_base + 0x2600 : hnat_priv->fe_base + 0xe00;
+
+	err = hnat_init_debugfs(hnat_priv);
+	if (err)
+		return err;
+
+	prop = of_find_property(np, "ext-devices", NULL);
+	for (name = of_prop_next_string(prop, NULL); name;
+	     name = of_prop_next_string(prop, name), index++) {
+		ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
+		if (!ext_entry) {
+			err = -ENOMEM;
+			goto err_out1;
+		}
+		strncpy(ext_entry->name, (char *)name, IFNAMSIZ);
+		ext_if_add(ext_entry);
+	}
+
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+		ext_entry = hnat_priv->ext_if[i];
+		dev_info(&pdev->dev, "ext devices = %s\n", ext_entry->name);
+	}
+
+	hnat_priv->lvid = 1;
+	hnat_priv->wvid = 2;
+
+	err = hnat_start();
+	if (err)
+		goto err_out;
+
+	if (hnat_priv->data->whnat) {
+		err = whnat_adjust_nf_hooks();
+		if (err)
+			goto err_out;
+	}
+
+	err = hnat_enable_hook();
+	if (err)
+		goto err_out;
+
+	register_netdevice_notifier(&nf_hnat_netdevice_nb);
+	register_netevent_notifier(&nf_hnat_netevent_nb);
+	if (hnat_priv->data->mcast)
+		hnat_mcast_enable();
+	timer_setup(&hnat_priv->hnat_sma_build_entry_timer, hnat_sma_build_entry, 0);
+	if (hnat_priv->data->version == MTK_HNAT_V3) {
+		timer_setup(&hnat_priv->hnat_reset_timestamp_timer, hnat_reset_timestamp, 0);
+		hnat_priv->hnat_reset_timestamp_timer.expires = jiffies;
+		add_timer(&hnat_priv->hnat_reset_timestamp_timer);
+	}
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+	if (IS_GMAC1_MODE)
+		dev_add_pack(&mtk_pack_type);
+#endif
+
+	return 0;
+
+err_out:
+	hnat_stop();
+err_out1:
+	hnat_deinit_debugfs(hnat_priv);
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+		ext_entry = hnat_priv->ext_if[i];
+		ext_if_del(ext_entry);
+		kfree(ext_entry);
+	}
+	return err;
+}
+
+static int hnat_remove(struct platform_device *pdev)
+{
+	unregister_netdevice_notifier(&nf_hnat_netdevice_nb);
+	unregister_netevent_notifier(&nf_hnat_netevent_nb);
+	hnat_disable_hook();
+
+	if (hnat_priv->data->mcast)
+		hnat_mcast_disable();
+
+	hnat_stop();
+	hnat_deinit_debugfs(hnat_priv);
+	hnat_release_netdev();
+	del_timer_sync(&hnat_priv->hnat_sma_build_entry_timer);
+	if (hnat_priv->data->version == MTK_HNAT_V3)
+		del_timer_sync(&hnat_priv->hnat_reset_timestamp_timer);
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+	if (IS_GMAC1_MODE)
+		dev_remove_pack(&mtk_pack_type);
+#endif
+
+	return 0;
+}
+
+static const struct mtk_hnat_data hnat_data_v1 = {
+	.num_of_sch = 2,
+	.whnat = false,
+	.per_flow_accounting = false,
+	.mcast = false,
+	.version = MTK_HNAT_V1,
+};
+
+static const struct mtk_hnat_data hnat_data_v2 = {
+	.num_of_sch = 2,
+	.whnat = true,
+	.per_flow_accounting = true,
+	.mcast = false,
+	.version = MTK_HNAT_V2,
+};
+
+static const struct mtk_hnat_data hnat_data_v3 = {
+	.num_of_sch = 4,
+	.whnat = false,
+	.per_flow_accounting = false,
+	.mcast = false,
+	.version = MTK_HNAT_V3,
+};
+
+static const struct mtk_hnat_data hnat_data_v4 = {
+	.num_of_sch = 4,
+	.whnat = true,
+	.per_flow_accounting = true,
+	.mcast = false,
+	.version = MTK_HNAT_V4,
+};
+
+const struct of_device_id of_hnat_match[] = {
+	{ .compatible = "mediatek,mtk-hnat", .data = &hnat_data_v3 },
+	{ .compatible = "mediatek,mtk-hnat_v1", .data = &hnat_data_v1 },
+	{ .compatible = "mediatek,mtk-hnat_v2", .data = &hnat_data_v2 },
+	{ .compatible = "mediatek,mtk-hnat_v3", .data = &hnat_data_v3 },
+	{ .compatible = "mediatek,mtk-hnat_v4", .data = &hnat_data_v4 },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_hnat_match);
+
+static struct platform_driver hnat_driver = {
+	.probe = hnat_probe,
+	.remove = hnat_remove,
+	.driver = {
+		.name = "mediatek_soc_hnat",
+		.of_match_table = of_hnat_match,
+	},
+};
+
+module_platform_driver(hnat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Mediatek Hardware NAT");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
new file mode 100644
index 0000000..336b4ad
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
@@ -0,0 +1,925 @@
+/*   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; version 2 of the License
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ *   Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <net/netevent.h>
+#include <linux/mod_devicetable.h>
+#include "hnat_mcast.h"
+
+/*--------------------------------------------------------------------------*/
+/* Register Offset*/
+/*--------------------------------------------------------------------------*/
+#define PPE_GLO_CFG 0x00
+#define PPE_FLOW_CFG 0x04
+#define PPE_IP_PROT_CHK 0x08
+#define PPE_IP_PROT_0 0x0C
+#define PPE_IP_PROT_1 0x10
+#define PPE_IP_PROT_2 0x14
+#define PPE_IP_PROT_3 0x18
+#define PPE_TB_CFG 0x1C
+#define PPE_TB_BASE 0x20
+#define PPE_TB_USED 0x24
+#define PPE_BNDR 0x28
+#define PPE_BIND_LMT_0 0x2C
+#define PPE_BIND_LMT_1 0x30
+#define PPE_KA 0x34
+#define PPE_UNB_AGE 0x38
+#define PPE_BND_AGE_0 0x3C
+#define PPE_BND_AGE_1 0x40
+#define PPE_HASH_SEED 0x44
+#define PPE_DFT_CPORT 0x48
+#define PPE_DFT_CPORT1 0x4C
+#define PPE_MCAST_PPSE 0x84
+#define PPE_MCAST_L_0 0x88
+#define PPE_MCAST_H_0 0x8C
+#define PPE_MCAST_L_1 0x90
+#define PPE_MCAST_H_1 0x94
+#define PPE_MCAST_L_2 0x98
+#define PPE_MCAST_H_2 0x9C
+#define PPE_MCAST_L_3 0xA0
+#define PPE_MCAST_H_3 0xA4
+#define PPE_MCAST_L_4 0xA8
+#define PPE_MCAST_H_4 0xAC
+#define PPE_MCAST_L_5 0xB0
+#define PPE_MCAST_H_5 0xB4
+#define PPE_MCAST_L_6 0xBC
+#define PPE_MCAST_H_6 0xC0
+#define PPE_MCAST_L_7 0xC4
+#define PPE_MCAST_H_7 0xC8
+#define PPE_MCAST_L_8 0xCC
+#define PPE_MCAST_H_8 0xD0
+#define PPE_MCAST_L_9 0xD4
+#define PPE_MCAST_H_9 0xD8
+#define PPE_MCAST_L_A 0xDC
+#define PPE_MCAST_H_A 0xE0
+#define PPE_MCAST_L_B 0xE4
+#define PPE_MCAST_H_B 0xE8
+#define PPE_MCAST_L_C 0xEC
+#define PPE_MCAST_H_C 0xF0
+#define PPE_MCAST_L_D 0xF4
+#define PPE_MCAST_H_D 0xF8
+#define PPE_MCAST_L_E 0xFC
+#define PPE_MCAST_H_E 0xE0
+#define PPE_MCAST_L_F 0x100
+#define PPE_MCAST_H_F 0x104
+#define PPE_MCAST_L_10 0xC00
+#define PPE_MCAST_H_10 0xC04
+#define PPE_MTU_DRP 0x108
+#define PPE_MTU_VLYR_0 0x10C
+#define PPE_MTU_VLYR_1 0x110
+#define PPE_MTU_VLYR_2 0x114
+#define PPE_VPM_TPID 0x118
+#define PPE_CAH_CTRL 0x120
+#define PPE_CAH_TAG_SRH 0x124
+#define PPE_CAH_LINE_RW 0x128
+#define PPE_CAH_WDATA 0x12C
+#define PPE_CAH_RDATA 0x130
+
+#define PPE_MIB_CFG 0X134
+#define PPE_MIB_TB_BASE 0X138
+#define PPE_MIB_SER_CR 0X13C
+#define PPE_MIB_SER_R0 0X140
+#define PPE_MIB_SER_R1 0X144
+#define PPE_MIB_SER_R2 0X148
+#define PPE_MIB_CAH_CTRL 0X150
+#define PPE_MIB_CAH_TAG_SRH 0X154
+#define PPE_MIB_CAH_LINE_RW 0X158
+#define PPE_MIB_CAH_WDATA 0X15C
+#define PPE_MIB_CAH_RDATA 0X160
+#define PPE_SBW_CTRL 0x174
+
+#define GDMA1_FWD_CFG 0x500
+#define GDMA2_FWD_CFG 0x1500
+
+#define QTX_CFG(x)		(QDMA_BASE + ((x) * 0x10))
+#define QTX_SCH(x)		(QDMA_BASE + 0x4 + ((x) * 0x10))
+#define QDMA_PAGE		(QDMA_BASE + 0x1f0)
+#define QDMA_TX_2SCH_BASE	(QDMA_BASE + 0x214)
+#define QTX_MIB_IF		(QDMA_BASE + 0x2bc)
+#define QDMA_TX_4SCH_BASE(x)	(QDMA_BASE + 0x398 + (((x) >> 1) * 0x4))
+
+/*--------------------------------------------------------------------------*/
+/* Register Mask*/
+/*--------------------------------------------------------------------------*/
+/* PPE_TB_CFG mask */
+#define TB_ETRY_NUM (0x7 << 0) /* RW */
+#define TB_ENTRY_SIZE (0x1 << 3) /* RW */
+#define SMA (0x3 << 4) /* RW */
+#define NTU_AGE (0x1 << 7) /* RW */
+#define UNBD_AGE (0x1 << 8) /* RW */
+#define TCP_AGE (0x1 << 9) /* RW */
+#define UDP_AGE (0x1 << 10) /* RW */
+#define FIN_AGE (0x1 << 11) /* RW */
+#define KA_CFG (0x3 << 12)
+#define HASH_MODE (0x3 << 14) /* RW */
+#define SCAN_MODE (0x3 << 16) /* RW */
+#define XMODE (0x3 << 18) /* RW */
+
+/*PPE_CAH_CTRL mask*/
+#define CAH_EN (0x1 << 0) /* RW */
+#define CAH_X_MODE (0x1 << 9) /* RW */
+
+/*PPE_UNB_AGE mask*/
+#define UNB_DLTA (0xff << 0) /* RW */
+#define UNB_MNP (0xffff << 16) /* RW */
+
+/*PPE_BND_AGE_0 mask*/
+#define UDP_DLTA (0xffff << 0) /* RW */
+#define NTU_DLTA (0xffff << 16) /* RW */
+
+/*PPE_BND_AGE_1 mask*/
+#define TCP_DLTA (0xffff << 0) /* RW */
+#define FIN_DLTA (0xffff << 16) /* RW */
+
+/*PPE_KA mask*/
+#define KA_T (0xffff << 0) /* RW */
+#define TCP_KA (0xff << 16) /* RW */
+#define UDP_KA (0xff << 24) /* RW */
+
+/*PPE_BIND_LMT_0 mask*/
+#define QURT_LMT (0x3ff << 0) /* RW */
+#define HALF_LMT (0x3ff << 16) /* RW */
+
+/*PPE_BIND_LMT_1 mask*/
+#define FULL_LMT (0x3fff << 0) /* RW */
+#define NTU_KA (0xff << 16) /* RW */
+
+/*PPE_BNDR mask*/
+#define BIND_RATE (0xffff << 0) /* RW */
+#define PBND_RD_PRD (0xffff << 16) /* RW */
+
+/*PPE_GLO_CFG mask*/
+#define PPE_EN (0x1 << 0) /* RW */
+#define TTL0_DRP (0x1 << 4) /* RW */
+#define MCAST_TB_EN (0x1 << 7) /* RW */
+#define MCAST_HASH (0x3 << 12) /* RW */
+
+#define MC_P3_PPSE (0xf << 12) /* RW */
+#define MC_P2_PPSE (0xf << 8) /* RW */
+#define MC_P1_PPSE (0xf << 4) /* RW */
+#define MC_P0_PPSE (0xf << 0) /* RW */
+
+#define MIB_EN (0x1 << 0) /* RW */
+#define MIB_READ_CLEAR (0X1 << 1) /* RW */
+#define MIB_CAH_EN (0X1 << 0) /* RW */
+
+/*GDMA_FWD_CFG mask */
+#define GDM_UFRC_MASK (0x7 << 12) /* RW */
+#define GDM_BFRC_MASK (0x7 << 8) /*RW*/
+#define GDM_MFRC_MASK (0x7 << 4) /*RW*/
+#define GDM_OFRC_MASK (0x7 << 0) /*RW*/
+#define GDM_ALL_FRC_MASK                                                      \
+	(GDM_UFRC_MASK | GDM_BFRC_MASK | GDM_MFRC_MASK | GDM_OFRC_MASK)
+
+/*QDMA_PAGE mask*/
+#define QTX_CFG_PAGE (0xf << 0) /* RW */
+
+/*QTX_MIB_IF mask*/
+#define MIB_ON_QTX_CFG (0x1 << 31) /* RW */
+#define VQTX_MIB_EN (0x1 << 28) /* RW */
+
+/*--------------------------------------------------------------------------*/
+/* Descriptor Structure */
+/*--------------------------------------------------------------------------*/
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+struct hnat_unbind_info_blk {
+	u32 time_stamp : 8;
+	u32 sp : 4;
+	u32 pcnt : 8;
+	u32 ilgf : 1;
+	u32 mc : 1;
+	u32 preb : 1;
+	u32 pkt_type : 5;
+	u32 state : 2;
+	u32 udp : 1;
+	u32 sta : 1;		/* static entry */
+} __packed;
+
+struct hnat_bind_info_blk {
+	u32 time_stamp : 8;
+	u32 sp : 4;
+	u32 mc : 1;
+	u32 ka : 1;		/* keep alive */
+	u32 vlan_layer : 3;
+	u32 psn : 1;		/* egress packet has PPPoE session */
+	u32 vpm : 1;		/* 0:ethertype remark, 1:0x8100(CR default) */
+	u32 ps : 1;		/* packet sampling */
+	u32 cah : 1;		/* cacheable flag */
+	u32 rmt : 1;		/* remove tunnel ip header (6rd/dslite only) */
+	u32 ttl : 1;
+	u32 pkt_type : 5;
+	u32 state : 2;
+	u32 udp : 1;
+	u32 sta : 1;		/* static entry */
+} __packed;
+
+struct hnat_info_blk2 {
+	u32 qid : 7;		/* QID in Qos Port */
+	u32 port_mg : 1;
+	u32 fqos : 1;		/* force to PSE QoS port */
+	u32 dp : 4;		/* force to PSE port x */
+	u32 mcast : 1;		/* multicast this packet to CPU */
+	u32 pcpl : 1;		/* OSBN */
+	u32 mibf : 1;
+	u32 alen : 1;
+	u32 rxid : 2;
+	u32 winfoi : 1;
+	u32 port_ag : 4;
+	u32 dscp : 8;		/* DSCP value */
+} __packed;
+
+struct hnat_winfo {
+	u32 bssid : 6;		/* WiFi Bssidx */
+	u32 wcid : 10;		/* WiFi wtable Idx */
+} __packed;
+
+#else
+struct hnat_unbind_info_blk {
+	u32 time_stamp : 8;
+	u32 pcnt : 16; /* packet count */
+	u32 preb : 1;
+	u32 pkt_type : 3;
+	u32 state : 2;
+	u32 udp : 1;
+	u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_bind_info_blk {
+	u32 time_stamp : 15;
+	u32 ka : 1; /* keep alive */
+	u32 vlan_layer : 3;
+	u32 psn : 1; /* egress packet has PPPoE session */
+	u32 vpm : 1; /* 0:ethertype remark, 1:0x8100(CR default) */
+	u32 ps : 1; /* packet sampling */
+	u32 cah : 1; /* cacheable flag */
+	u32 rmt : 1; /* remove tunnel ip header (6rd/dslite only) */
+	u32 ttl : 1;
+	u32 pkt_type : 3;
+	u32 state : 2;
+	u32 udp : 1;
+	u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_info_blk2 {
+	u32 qid : 4; /* QID in Qos Port */
+	u32 fqos : 1; /* force to PSE QoS port */
+	u32 dp : 3; /* force to PSE port x
+		     * 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP
+		     */
+	u32 mcast : 1; /* multicast this packet to CPU */
+	u32 pcpl : 1; /* OSBN */
+	u32 mibf : 1; /* 0:off 1:on PPE MIB counter */
+	u32 alen : 1; /* 0:post 1:pre packet length in accounting */
+	u32 port_mg : 6; /* port meter group */
+	u32 port_ag : 6; /* port account group */
+	u32 dscp : 8; /* DSCP value */
+} __packed;
+
+struct hnat_winfo {
+	u32 bssid : 6;		/* WiFi Bssidx */
+	u32 wcid : 8;		/* WiFi wtable Idx */
+	u32 rxid : 2;		/* WiFi Ring idx */
+} __packed;
+#endif
+
+/* info blk2 for WHNAT */
+struct hnat_info_blk2_whnat {
+	u32 qid : 4; /* QID[3:0] in Qos Port */
+	u32 fqos : 1; /* force to PSE QoS port */
+	u32 dp : 3; /* force to PSE port x
+		     * 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP
+		     */
+	u32 mcast : 1; /* multicast this packet to CPU */
+	u32 pcpl : 1; /* OSBN */
+	u32 mibf : 1; /* 0:off 1:on PPE MIB counter */
+	u32 alen : 1; /* 0:post 1:pre packet length in accounting */
+	u32 qid2 : 2; /* QID[5:4] in Qos Port */
+	u32 resv : 2;
+	u32 wdmaid : 1; /* 0:to pcie0 dev 1:to pcie1 dev */
+	u32 winfoi : 1; /* 0:off 1:on Wi-Fi hwnat support */
+	u32 port_ag : 6; /* port account group */
+	u32 dscp : 8; /* DSCP value */
+} __packed;
+
+struct hnat_ipv4_hnapt {
+	union {
+		struct hnat_bind_info_blk bfib1;
+		struct hnat_unbind_info_blk udib1;
+		u32 info_blk1;
+	};
+	u32 sip;
+	u32 dip;
+	u16 dport;
+	u16 sport;
+	union {
+		struct hnat_info_blk2 iblk2;
+		struct hnat_info_blk2_whnat iblk2w;
+		u32 info_blk2;
+	};
+	u32 new_sip;
+	u32 new_dip;
+	u16 new_dport;
+	u16 new_sport;
+	u16 m_timestamp; /* For mcast*/
+	u16 resv1;
+	u32 resv2;
+	u32 resv3 : 26;
+	u32 act_dp : 6; /* UDF */
+	u16 vlan1;
+	u16 etype;
+	u32 dmac_hi;
+	union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+		struct hnat_winfo winfo;
+#endif
+		u16 vlan2;
+	};
+	u16 dmac_lo;
+	u32 smac_hi;
+	u16 pppoe_id;
+	u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	u16 minfo;
+	struct hnat_winfo winfo;
+#endif
+} __packed;
+
+struct hnat_ipv4_dslite {
+	union {
+		struct hnat_bind_info_blk bfib1;
+		struct hnat_unbind_info_blk udib1;
+		u32 info_blk1;
+	};
+	u32 sip;
+	u32 dip;
+	u16 dport;
+	u16 sport;
+
+	u32 tunnel_sipv6_0;
+	u32 tunnel_sipv6_1;
+	u32 tunnel_sipv6_2;
+	u32 tunnel_sipv6_3;
+
+	u32 tunnel_dipv6_0;
+	u32 tunnel_dipv6_1;
+	u32 tunnel_dipv6_2;
+	u32 tunnel_dipv6_3;
+
+	u8 flow_lbl[3]; /* in order to consist with Linux kernel (should be 20bits) */
+	u8 priority;    /* in order to consist with Linux kernel (should be 8bits) */
+	u32 hop_limit : 8;
+	u32 resv2 : 18;
+	u32 act_dp : 6; /* UDF */
+
+	union {
+		struct hnat_info_blk2 iblk2;
+		struct hnat_info_blk2_whnat iblk2w;
+		u32 info_blk2;
+	};
+
+	u16 vlan1;
+	u16 etype;
+	u32 dmac_hi;
+	union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+		struct hnat_winfo winfo;
+#endif
+		u16 vlan2;
+	};
+	u16 dmac_lo;
+	u32 smac_hi;
+	u16 pppoe_id;
+	u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	u16 minfo;
+	struct hnat_winfo winfo;
+	u32 new_sip;
+        u32 new_dip;
+        u16 new_dport;
+        u16 new_sport;
+#endif
+} __packed;
+
+struct hnat_ipv6_3t_route {
+	union {
+		struct hnat_bind_info_blk bfib1;
+		struct hnat_unbind_info_blk udib1;
+		u32 info_blk1;
+	};
+	u32 ipv6_sip0;
+	u32 ipv6_sip1;
+	u32 ipv6_sip2;
+	u32 ipv6_sip3;
+	u32 ipv6_dip0;
+	u32 ipv6_dip1;
+	u32 ipv6_dip2;
+	u32 ipv6_dip3;
+	u32 prot : 8;
+	u32 resv : 24;
+
+	u32 resv1;
+	u32 resv2;
+	u32 resv3;
+	u32 resv4 : 26;
+	u32 act_dp : 6; /* UDF */
+
+	union {
+		struct hnat_info_blk2 iblk2;
+		struct hnat_info_blk2_whnat iblk2w;
+		u32 info_blk2;
+	};
+	u16 vlan1;
+	u16 etype;
+	u32 dmac_hi;
+	union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+		struct hnat_winfo winfo;
+#endif
+		u16 vlan2;
+	};
+	u16 dmac_lo;
+	u32 smac_hi;
+	u16 pppoe_id;
+	u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	u16 minfo;
+	struct hnat_winfo winfo;
+#endif
+} __packed;
+
+struct hnat_ipv6_5t_route {
+	union {
+		struct hnat_bind_info_blk bfib1;
+		struct hnat_unbind_info_blk udib1;
+		u32 info_blk1;
+	};
+	u32 ipv6_sip0;
+	u32 ipv6_sip1;
+	u32 ipv6_sip2;
+	u32 ipv6_sip3;
+	u32 ipv6_dip0;
+	u32 ipv6_dip1;
+	u32 ipv6_dip2;
+	u32 ipv6_dip3;
+	u16 dport;
+	u16 sport;
+
+	u32 resv1;
+	u32 resv2;
+	u32 resv3;
+	u32 resv4 : 26;
+	u32 act_dp : 6; /* UDF */
+
+	union {
+		struct hnat_info_blk2 iblk2;
+		struct hnat_info_blk2_whnat iblk2w;
+		u32 info_blk2;
+	};
+
+	u16 vlan1;
+	u16 etype;
+	u32 dmac_hi;
+	union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+		struct hnat_winfo winfo;
+#endif
+		u16 vlan2;
+	};
+	u16 dmac_lo;
+	u32 smac_hi;
+	u16 pppoe_id;
+	u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	u16 minfo;
+	struct hnat_winfo winfo;
+#endif
+} __packed;
+
+struct hnat_ipv6_6rd {
+	union {
+		struct hnat_bind_info_blk bfib1;
+		struct hnat_unbind_info_blk udib1;
+		u32 info_blk1;
+	};
+	u32 ipv6_sip0;
+	u32 ipv6_sip1;
+	u32 ipv6_sip2;
+	u32 ipv6_sip3;
+	u32 ipv6_dip0;
+	u32 ipv6_dip1;
+	u32 ipv6_dip2;
+	u32 ipv6_dip3;
+	u16 dport;
+	u16 sport;
+
+	u32 tunnel_sipv4;
+	u32 tunnel_dipv4;
+	u32 hdr_chksum : 16;
+	u32 dscp : 8;
+	u32 ttl : 8;
+	u32 flag : 3;
+	u32 resv1 : 13;
+	u32 per_flow_6rd_id : 1;
+	u32 resv2 : 9;
+	u32 act_dp : 6; /* UDF */
+
+	union {
+		struct hnat_info_blk2 iblk2;
+		struct hnat_info_blk2_whnat iblk2w;
+		u32 info_blk2;
+	};
+
+	u16 vlan1;
+	u16 etype;
+	u32 dmac_hi;
+	union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+		struct hnat_winfo winfo;
+#endif
+		u16 vlan2;
+	};
+	u16 dmac_lo;
+	u32 smac_hi;
+	u16 pppoe_id;
+	u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	u16 minfo;
+	struct hnat_winfo winfo;
+	u32 resv3;
+        u32 resv4;
+        u16 new_dport;
+        u16 new_sport;
+#endif
+} __packed;
+
+struct foe_entry {
+	union {
+		struct hnat_unbind_info_blk udib1;
+		struct hnat_bind_info_blk bfib1;
+		struct hnat_ipv4_hnapt ipv4_hnapt;
+		struct hnat_ipv4_dslite ipv4_dslite;
+		struct hnat_ipv6_3t_route ipv6_3t_route;
+		struct hnat_ipv6_5t_route ipv6_5t_route;
+		struct hnat_ipv6_6rd ipv6_6rd;
+	};
+};
+
+/* If user wants to change default FOE entry number, both DEF_ETRY_NUM and
+ * DEF_ETRY_NUM_CFG need to be modified.
+ */
+#define DEF_ETRY_NUM		8192
+/* feasible values : 16384, 8192, 4096, 2048, 1024 */
+#define DEF_ETRY_NUM_CFG	TABLE_8K
+/* corresponding values : TABLE_16K, TABLE_8K, TABLE_4K, TABLE_2K, TABLE_1K */
+#define MAX_EXT_DEVS		(0x3fU)
+#define MAX_IF_NUM		64
+
+struct mib_entry {
+	u32 byt_cnt_l;
+	u16 byt_cnt_h;
+	u32 pkt_cnt_l;
+	u8 pkt_cnt_h;
+	u8 resv0;
+	u32 resv1;
+} __packed;
+
+struct hnat_accounting {
+	u64 bytes;
+	u64 packets;
+};
+
+enum mtk_hnat_version {
+	MTK_HNAT_V1 = 1, /* version 1: mt7621, mt7623 */
+	MTK_HNAT_V2, /* version 2: mt7622 */
+	MTK_HNAT_V3, /* version 3: mt7629 */
+	MTK_HNAT_V4, /* version 4: mt7986 */
+};
+
+struct mtk_hnat_data {
+	u8 num_of_sch;
+	bool whnat;
+	bool per_flow_accounting;
+	bool mcast;
+	enum mtk_hnat_version version;
+};
+
+struct mtk_hnat {
+	struct device *dev;
+	void __iomem *fe_base;
+	void __iomem *ppe_base;
+	struct foe_entry *foe_table_cpu;
+	dma_addr_t foe_table_dev;
+	u8 enable;
+	u8 enable1;
+	struct dentry *root;
+	struct debugfs_regset32 *regset;
+
+	struct mib_entry *foe_mib_cpu;
+	dma_addr_t foe_mib_dev;
+	struct hnat_accounting *acct;
+	const struct mtk_hnat_data *data;
+
+	/*devices we plays for*/
+	char wan[IFNAMSIZ];
+	char lan[IFNAMSIZ];
+	char ppd[IFNAMSIZ];
+	u16 lvid;
+	u16 wvid;
+
+	struct reset_control *rstc;
+
+	u8 gmac_num;
+	u8 wan_dsa_port;
+	struct ppe_mcast_table *pmcast;
+
+	u32 foe_etry_num;
+	struct net_device *g_ppdev;
+	struct net_device *wifi_hook_if[MAX_IF_NUM];
+	struct extdev_entry *ext_if[MAX_EXT_DEVS];
+	struct timer_list hnat_sma_build_entry_timer;
+	struct timer_list hnat_reset_timestamp_timer;
+	struct timer_list hnat_mcast_check_timer;
+};
+
+struct extdev_entry {
+	char name[IFNAMSIZ];
+	struct net_device *dev;
+};
+
+struct tcpudphdr {
+	__be16 src;
+	__be16 dst;
+};
+
+enum FoeEntryState { INVALID = 0, UNBIND = 1, BIND = 2, FIN = 3 };
+
+enum FoeIpAct {
+	IPV4_HNAPT = 0,
+	IPV4_HNAT = 1,
+	IPV4_DSLITE = 3,
+	IPV6_3T_ROUTE = 4,
+	IPV6_5T_ROUTE = 5,
+	IPV6_6RD = 7,
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	IPV4_MAP_T = 8,
+	IPV4_MAP_E = 9,
+#else
+	IPV4_MAP_T = 6,
+	IPV4_MAP_E = 6,
+#endif
+};
+
+/*--------------------------------------------------------------------------*/
+/* Common Definition*/
+/*--------------------------------------------------------------------------*/
+
+#define HNAT_SW_VER   "1.1.0"
+#define HASH_SEED_KEY 0x12345678
+
+/*PPE_TB_CFG value*/
+#define ENTRY_80B 1
+#define ENTRY_64B 0
+#define TABLE_1K 0
+#define TABLE_2K 1
+#define TABLE_4K 2
+#define TABLE_8K 3
+#define TABLE_16K 4
+#define SMA_DROP 0 /* Drop the packet */
+#define SMA_DROP2 1 /* Drop the packet */
+#define SMA_ONLY_FWD_CPU 2 /* Only Forward to CPU */
+#define SMA_FWD_CPU_BUILD_ENTRY 3 /* Forward to CPU and build new FOE entry */
+#define HASH_MODE_0 0
+#define HASH_MODE_1 1
+#define HASH_MODE_2 2
+#define HASH_MODE_3 3
+
+/*PPE_FLOW_CFG*/
+#define BIT_FUC_FOE BIT(2)
+#define BIT_FMC_FOE BIT(1)
+#define BIT_FBC_FOE BIT(0)
+#define BIT_UDP_IP4F_NAT_EN BIT(7) /*Enable IPv4 fragment + UDP packet NAT*/
+#define BIT_IPV6_3T_ROUTE_EN BIT(8)
+#define BIT_IPV6_5T_ROUTE_EN BIT(9)
+#define BIT_IPV6_6RD_EN BIT(10)
+#define BIT_IPV4_NAT_EN BIT(12)
+#define BIT_IPV4_NAPT_EN BIT(13)
+#define BIT_IPV4_DSL_EN BIT(14)
+#define BIT_MIB_BUSY BIT(16)
+#define BIT_IPV4_NAT_FRAG_EN BIT(17)
+#define BIT_IPV4_HASH_GREK BIT(19)
+#define BIT_IPV6_HASH_GREK BIT(20)
+#define BIT_IPV4_MAPE_EN BIT(21)
+#define BIT_IPV4_MAPT_EN BIT(22)
+
+/*GDMA_FWD_CFG value*/
+#define BITS_GDM_UFRC_P_PPE (NR_PPE_PORT << 12)
+#define BITS_GDM_BFRC_P_PPE (NR_PPE_PORT << 8)
+#define BITS_GDM_MFRC_P_PPE (NR_PPE_PORT << 4)
+#define BITS_GDM_OFRC_P_PPE (NR_PPE_PORT << 0)
+#define BITS_GDM_ALL_FRC_P_PPE                                              \
+	(BITS_GDM_UFRC_P_PPE | BITS_GDM_BFRC_P_PPE | BITS_GDM_MFRC_P_PPE |  \
+	 BITS_GDM_OFRC_P_PPE)
+
+#define BITS_GDM_UFRC_P_CPU_PDMA (NR_PDMA_PORT << 12)
+#define BITS_GDM_BFRC_P_CPU_PDMA (NR_PDMA_PORT << 8)
+#define BITS_GDM_MFRC_P_CPU_PDMA (NR_PDMA_PORT << 4)
+#define BITS_GDM_OFRC_P_CPU_PDMA (NR_PDMA_PORT << 0)
+#define BITS_GDM_ALL_FRC_P_CPU_PDMA                                           \
+	(BITS_GDM_UFRC_P_CPU_PDMA | BITS_GDM_BFRC_P_CPU_PDMA |               \
+	 BITS_GDM_MFRC_P_CPU_PDMA | BITS_GDM_OFRC_P_CPU_PDMA)
+
+#define BITS_GDM_UFRC_P_CPU_QDMA (NR_QDMA_PORT << 12)
+#define BITS_GDM_BFRC_P_CPU_QDMA (NR_QDMA_PORT << 8)
+#define BITS_GDM_MFRC_P_CPU_QDMA (NR_QDMA_PORT << 4)
+#define BITS_GDM_OFRC_P_CPU_QDMA (NR_QDMA_PORT << 0)
+#define BITS_GDM_ALL_FRC_P_CPU_QDMA                                           \
+	(BITS_GDM_UFRC_P_CPU_QDMA | BITS_GDM_BFRC_P_CPU_QDMA |               \
+	 BITS_GDM_MFRC_P_CPU_QDMA | BITS_GDM_OFRC_P_CPU_QDMA)
+
+#define BITS_GDM_UFRC_P_DISCARD (NR_DISCARD << 12)
+#define BITS_GDM_BFRC_P_DISCARD (NR_DISCARD << 8)
+#define BITS_GDM_MFRC_P_DISCARD (NR_DISCARD << 4)
+#define BITS_GDM_OFRC_P_DISCARD (NR_DISCARD << 0)
+#define BITS_GDM_ALL_FRC_P_DISCARD                                            \
+	(BITS_GDM_UFRC_P_DISCARD | BITS_GDM_BFRC_P_DISCARD |                 \
+	 BITS_GDM_MFRC_P_DISCARD | BITS_GDM_OFRC_P_DISCARD)
+
+#define hnat_is_enabled(hnat_priv) (hnat_priv->enable)
+#define hnat_enabled(hnat_priv) (hnat_priv->enable = 1)
+#define hnat_disabled(hnat_priv) (hnat_priv->enable = 0)
+#define hnat_is_enabled1(hnat_priv) (hnat_priv->enable1)
+#define hnat_enabled1(hnat_priv) (hnat_priv->enable1 = 1)
+#define hnat_disabled1(hnat_priv) (hnat_priv->enable1 = 0)
+
+#define entry_hnat_is_bound(e) (e->bfib1.state == BIND)
+#define entry_hnat_state(e) (e->bfib1.state)
+
+#define skb_hnat_is_hashed(skb)                                                \
+	(skb_hnat_entry(skb) != 0x3fff && skb_hnat_entry(skb) < hnat_priv->foe_etry_num)
+#define FROM_GE_LAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_LAN)
+#define FROM_GE_WAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_WAN)
+#define FROM_GE_PPD(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_PPD)
+#define FROM_GE_VIRTUAL(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL)
+#define FROM_EXT(skb) (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
+#define FOE_MAGIC_GE_LAN 0x1
+#define FOE_MAGIC_GE_WAN 0x2
+#define FOE_MAGIC_EXT 0x3
+#define FOE_MAGIC_GE_VIRTUAL 0x4
+#define FOE_MAGIC_GE_PPD 0x5
+#define FOE_MAGIC_WED0 0x6
+#define FOE_MAGIC_WED1 0x7
+#define FOE_INVALID 0xf
+#define index6b(i) (0x3fU - i)
+
+#define IPV4_HNAPT 0
+#define IPV4_HNAT 1
+#define IP_FORMAT(addr)                                                        \
+	(((unsigned char *)&addr)[3], ((unsigned char *)&addr)[2],              \
+	((unsigned char *)&addr)[1], ((unsigned char *)&addr)[0])
+
+/*PSE Ports*/
+#define NR_PDMA_PORT 0
+#define NR_GMAC1_PORT 1
+#define NR_GMAC2_PORT 2
+#define NR_WHNAT_WDMA_PORT 3
+#define NR_PPE_PORT 4
+#define NR_QDMA_PORT 5
+#define NR_DISCARD 7
+#define NR_WDMA0_PORT 8
+#define NR_WDMA1_PORT 9
+#define LAN_DEV_NAME hnat_priv->lan
+#define IS_WAN(dev)                                                            \
+	(!strncmp((dev)->name, hnat_priv->wan, strlen(hnat_priv->wan)))
+#define IS_LAN(dev) (!strncmp(dev->name, LAN_DEV_NAME, strlen(LAN_DEV_NAME)))
+#define IS_BR(dev) (!strncmp(dev->name, "br", 2))
+#define IS_WHNAT(dev)								\
+	((hnat_priv->data->whnat &&						\
+	 (get_wifi_hook_if_index_from_dev(dev) != 0)) ? 1 : 0)
+#define IS_EXT(dev) ((get_index_from_dev(dev) != 0) ? 1 : 0)
+#define IS_PPD(dev) (!strcmp(dev->name, hnat_priv->ppd))
+#define IS_IPV4_HNAPT(x) (((x)->bfib1.pkt_type == IPV4_HNAPT) ? 1 : 0)
+#define IS_IPV4_HNAT(x) (((x)->bfib1.pkt_type == IPV4_HNAT) ? 1 : 0)
+#define IS_IPV4_GRP(x) (IS_IPV4_HNAPT(x) | IS_IPV4_HNAT(x))
+#define IS_IPV4_DSLITE(x) (((x)->bfib1.pkt_type == IPV4_DSLITE) ? 1 : 0)
+#define IS_IPV4_MAPE(x) (((x)->bfib1.pkt_type == IPV4_MAP_E) ? 1 : 0)
+#define IS_IPV4_MAPT(x) (((x)->bfib1.pkt_type == IPV4_MAP_T) ? 1 : 0)
+#define IS_IPV6_3T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_3T_ROUTE) ? 1 : 0)
+#define IS_IPV6_5T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_5T_ROUTE) ? 1 : 0)
+#define IS_IPV6_6RD(x) (((x)->bfib1.pkt_type == IPV6_6RD) ? 1 : 0)
+#define IS_IPV6_GRP(x)                                                         \
+	(IS_IPV6_3T_ROUTE(x) | IS_IPV6_5T_ROUTE(x) | IS_IPV6_6RD(x) |          \
+	 IS_IPV4_DSLITE(x) | IS_IPV4_MAPE(x) | IS_IPV4_MAPT(x))
+#define IS_BOND_MODE (!strncmp(LAN_DEV_NAME, "bond", 4))
+#define IS_GMAC1_MODE ((hnat_priv->gmac_num == 1) ? 1 : 0)
+
+#define es(entry) (entry_state[entry->bfib1.state])
+#define ei(entry, end) (hnat_priv->foe_etry_num - (int)(end - entry))
+#define pt(entry) (packet_type[entry->ipv4_hnapt.bfib1.pkt_type])
+#define ipv4_smac(mac, e)                                                      \
+	({                                                                     \
+		mac[0] = e->ipv4_hnapt.smac_hi[3];                             \
+		mac[1] = e->ipv4_hnapt.smac_hi[2];                             \
+		mac[2] = e->ipv4_hnapt.smac_hi[1];                             \
+		mac[3] = e->ipv4_hnapt.smac_hi[0];                             \
+		mac[4] = e->ipv4_hnapt.smac_lo[1];                             \
+		mac[5] = e->ipv4_hnapt.smac_lo[0];                             \
+	})
+#define ipv4_dmac(mac, e)                                                      \
+	({                                                                     \
+		mac[0] = e->ipv4_hnapt.dmac_hi[3];                             \
+		mac[1] = e->ipv4_hnapt.dmac_hi[2];                             \
+		mac[2] = e->ipv4_hnapt.dmac_hi[1];                             \
+		mac[3] = e->ipv4_hnapt.dmac_hi[0];                             \
+		mac[4] = e->ipv4_hnapt.dmac_lo[1];                             \
+		mac[5] = e->ipv4_hnapt.dmac_lo[0];                             \
+	})
+
+#define IS_DSA_LAN(dev) (!strncmp(dev->name, "lan", 3))
+#define IS_DSA_WAN(dev) (!strncmp(dev->name, "wan", 3))
+#define NONE_DSA_PORT 0xff
+#define MAX_CRSN_NUM 32
+#define IPV6_HDR_LEN 40
+
+/*QDMA_PAGE value*/
+#define NUM_OF_Q_PER_PAGE 16
+
+/*IPv6 Header*/
+#ifndef NEXTHDR_IPIP
+#define NEXTHDR_IPIP 4
+#endif
+
+extern const struct of_device_id of_hnat_match[];
+extern struct mtk_hnat *hnat_priv;
+
+#if defined(CONFIG_NET_DSA_MT7530)
+void hnat_dsa_fill_stag(const struct net_device *netdev,
+			struct foe_entry *entry,
+			struct flow_offload_hw_path *hw_path,
+			u16 eth_proto, int mape);
+
+static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)
+{
+	return (priv->wan_dsa_port != NONE_DSA_PORT);
+}
+#else
+static inline void hnat_dsa_fill_stag(const struct net_device *netdev,
+				      struct foe_entry *entry,
+				      struct flow_offload_hw_path *hw_path,
+				      u16 eth_proto, int mape)
+{
+}
+
+static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)
+{
+	return false;
+}
+#endif
+
+void hnat_deinit_debugfs(struct mtk_hnat *h);
+int hnat_init_debugfs(struct mtk_hnat *h);
+int hnat_register_nf_hooks(void);
+void hnat_unregister_nf_hooks(void);
+int whnat_adjust_nf_hooks(void);
+int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
+		      struct packet_type *pt, struct net_device *unused);
+extern int dbg_cpu_reason;
+extern int debug_level;
+extern int hook_toggle;
+extern int mape_toggle;
+
+int ext_if_add(struct extdev_entry *ext_entry);
+int ext_if_del(struct extdev_entry *ext_entry);
+void cr_set_field(void __iomem *reg, u32 field, u32 val);
+int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no);
+int mtk_sw_nat_hook_rx(struct sk_buff *skb);
+void mtk_ppe_dev_register_hook(struct net_device *dev);
+void mtk_ppe_dev_unregister_hook(struct net_device *dev);
+int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
+			    void *ptr);
+int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
+			     void *ptr);
+uint32_t foe_dump_pkt(struct sk_buff *skb);
+uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb);
+int hnat_enable_hook(void);
+int hnat_disable_hook(void);
+void hnat_cache_ebl(int enable);
+void set_gmac_ppe_fwd(int gmac_no, int enable);
+int entry_delete(int index);
+
+static inline u16 foe_timestamp(struct mtk_hnat *h)
+{
+	return (readl(hnat_priv->fe_base + 0x0010)) & 0xffff;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
new file mode 100644
index 0000000..4ae9128
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
@@ -0,0 +1,1952 @@
+/*   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; version 2 of the License
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ *   Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/iopoll.h>
+
+#include "hnat.h"
+#include "nf_hnat_mtk.h"
+#include "../mtk_eth_soc.h"
+
+int dbg_entry_state = BIND;
+typedef int (*debugfs_write_func)(int par1);
+int debug_level;
+int dbg_cpu_reason;
+int hook_toggle;
+int mape_toggle;
+unsigned int dbg_cpu_reason_cnt[MAX_CRSN_NUM];
+
+static const char * const entry_state[] = { "INVALID", "UNBIND", "BIND", "FIN" };
+
+static const char * const packet_type[] = {
+	"IPV4_HNAPT",    "IPV4_HNAT",     "IPV6_1T_ROUTE", "IPV4_DSLITE",
+	"IPV6_3T_ROUTE", "IPV6_5T_ROUTE", "REV",	   "IPV6_6RD",
+	"IPV4_MAP_T",    "IPV4_MAP_E",
+};
+
+static uint8_t *show_cpu_reason(struct sk_buff *skb)
+{
+	static u8 buf[32];
+
+	switch (skb_hnat_reason(skb)) {
+	case TTL_0:
+		return "IPv4(IPv6) TTL(hop limit)\n";
+	case HAS_OPTION_HEADER:
+		return "Ipv4(IPv6) has option(extension) header\n";
+	case NO_FLOW_IS_ASSIGNED:
+		return "No flow is assigned\n";
+	case IPV4_WITH_FRAGMENT:
+		return "IPv4 HNAT doesn't support IPv4 /w fragment\n";
+	case IPV4_HNAPT_DSLITE_WITH_FRAGMENT:
+		return "IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n";
+	case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP:
+		return "IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n";
+	case IPV6_5T_6RD_WITHOUT_TCP_UDP:
+		return "IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n";
+	case TCP_FIN_SYN_RST:
+		return "Ingress packet is TCP fin/syn/rst\n";
+	case UN_HIT:
+		return "FOE Un-hit\n";
+	case HIT_UNBIND:
+		return "FOE Hit unbind\n";
+	case HIT_UNBIND_RATE_REACH:
+		return "FOE Hit unbind & rate reach\n";
+	case HIT_BIND_TCP_FIN:
+		return "Hit bind PPE TCP FIN entry\n";
+	case HIT_BIND_TTL_1:
+		return "Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1\n";
+	case HIT_BIND_WITH_VLAN_VIOLATION:
+		return "Hit bind and VLAN replacement violation\n";
+	case HIT_BIND_KEEPALIVE_UC_OLD_HDR:
+		return "Hit bind and keep alive with unicast old-header packet\n";
+	case HIT_BIND_KEEPALIVE_MC_NEW_HDR:
+		return "Hit bind and keep alive with multicast new-header packet\n";
+	case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+		return "Hit bind and keep alive with duplicate old-header packet\n";
+	case HIT_BIND_FORCE_TO_CPU:
+		return "FOE Hit bind & force to CPU\n";
+	case HIT_BIND_EXCEED_MTU:
+		return "Hit bind and exceed MTU\n";
+	case HIT_BIND_MULTICAST_TO_CPU:
+		return "Hit bind multicast packet to CPU\n";
+	case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+		return "Hit bind multicast packet to GMAC & CPU\n";
+	case HIT_PRE_BIND:
+		return "Pre bind\n";
+	}
+
+	sprintf(buf, "CPU Reason Error - %X\n", skb_hnat_entry(skb));
+	return buf;
+}
+
+uint32_t foe_dump_pkt(struct sk_buff *skb)
+{
+	struct foe_entry *entry;
+
+	entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+	pr_info("\nRx===<FOE_Entry=%d>=====\n", skb_hnat_entry(skb));
+	pr_info("RcvIF=%s\n", skb->dev->name);
+	pr_info("FOE_Entry=%d\n", skb_hnat_entry(skb));
+	pr_info("CPU Reason=%s", show_cpu_reason(skb));
+	pr_info("ALG=%d\n", skb_hnat_alg(skb));
+	pr_info("SP=%d\n", skb_hnat_sport(skb));
+
+	/* some special alert occurred, so entry_num is useless (just skip it) */
+	if (skb_hnat_entry(skb) == 0x3fff)
+		return 1;
+
+	/* PPE: IPv4 packet=IPV4_HNAT IPv6 packet=IPV6_ROUTE */
+	if (IS_IPV4_GRP(entry)) {
+		__be32 saddr = htonl(entry->ipv4_hnapt.sip);
+		__be32 daddr = htonl(entry->ipv4_hnapt.dip);
+
+		pr_info("Information Block 1=%x\n",
+			entry->ipv4_hnapt.info_blk1);
+		pr_info("SIP=%pI4\n", &saddr);
+		pr_info("DIP=%pI4\n", &daddr);
+		pr_info("SPORT=%d\n", entry->ipv4_hnapt.sport);
+		pr_info("DPORT=%d\n", entry->ipv4_hnapt.dport);
+		pr_info("Information Block 2=%x\n",
+			entry->ipv4_hnapt.info_blk2);
+		pr_info("State = %s, proto = %s\n", entry->bfib1.state == 0 ?
+			"Invalid" : entry->bfib1.state == 1 ?
+			"Unbind" : entry->bfib1.state == 2 ?
+			"BIND" : entry->bfib1.state == 3 ?
+			"FIN" : "Unknown",
+			entry->ipv4_hnapt.bfib1.udp == 0 ?
+			"TCP" : entry->ipv4_hnapt.bfib1.udp == 1 ?
+			"UDP" : "Unknown");
+	} else if (IS_IPV6_GRP(entry)) {
+		pr_info("Information Block 1=%x\n",
+			entry->ipv6_5t_route.info_blk1);
+		pr_info("IPv6_SIP=%08X:%08X:%08X:%08X\n",
+			entry->ipv6_5t_route.ipv6_sip0,
+			entry->ipv6_5t_route.ipv6_sip1,
+			entry->ipv6_5t_route.ipv6_sip2,
+			entry->ipv6_5t_route.ipv6_sip3);
+		pr_info("IPv6_DIP=%08X:%08X:%08X:%08X\n",
+			entry->ipv6_5t_route.ipv6_dip0,
+			entry->ipv6_5t_route.ipv6_dip1,
+			entry->ipv6_5t_route.ipv6_dip2,
+			entry->ipv6_5t_route.ipv6_dip3);
+		pr_info("SPORT=%d\n", entry->ipv6_5t_route.sport);
+		pr_info("DPORT=%d\n", entry->ipv6_5t_route.dport);
+		pr_info("Information Block 2=%x\n",
+			entry->ipv6_5t_route.info_blk2);
+		pr_info("State = %s, proto = %s\n", entry->bfib1.state == 0 ?
+			"Invalid" : entry->bfib1.state == 1 ?
+			"Unbind" : entry->bfib1.state == 2 ?
+			"BIND" : entry->bfib1.state == 3 ?
+			"FIN" : "Unknown",
+			entry->ipv6_5t_route.bfib1.udp == 0 ?
+			"TCP" : entry->ipv6_5t_route.bfib1.udp == 1 ?
+			"UDP" :	"Unknown");
+	} else {
+		pr_info("unknown Pkt_type=%d\n", entry->bfib1.pkt_type);
+	}
+
+	pr_info("==================================\n");
+	return 1;
+}
+
+uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb)
+{
+	switch (skb_hnat_reason(skb)) {
+	case TTL_0:
+		dbg_cpu_reason_cnt[0]++;
+		return 0;
+	case HAS_OPTION_HEADER:
+		dbg_cpu_reason_cnt[1]++;
+		return 0;
+	case NO_FLOW_IS_ASSIGNED:
+		dbg_cpu_reason_cnt[2]++;
+		return 0;
+	case IPV4_WITH_FRAGMENT:
+		dbg_cpu_reason_cnt[3]++;
+		return 0;
+	case IPV4_HNAPT_DSLITE_WITH_FRAGMENT:
+		dbg_cpu_reason_cnt[4]++;
+		return 0;
+	case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP:
+		dbg_cpu_reason_cnt[5]++;
+		return 0;
+	case IPV6_5T_6RD_WITHOUT_TCP_UDP:
+		dbg_cpu_reason_cnt[6]++;
+		return 0;
+	case TCP_FIN_SYN_RST:
+		dbg_cpu_reason_cnt[7]++;
+		return 0;
+	case UN_HIT:
+		dbg_cpu_reason_cnt[8]++;
+		return 0;
+	case HIT_UNBIND:
+		dbg_cpu_reason_cnt[9]++;
+		return 0;
+	case HIT_UNBIND_RATE_REACH:
+		dbg_cpu_reason_cnt[10]++;
+		return 0;
+	case HIT_BIND_TCP_FIN:
+		dbg_cpu_reason_cnt[11]++;
+		return 0;
+	case HIT_BIND_TTL_1:
+		dbg_cpu_reason_cnt[12]++;
+		return 0;
+	case HIT_BIND_WITH_VLAN_VIOLATION:
+		dbg_cpu_reason_cnt[13]++;
+		return 0;
+	case HIT_BIND_KEEPALIVE_UC_OLD_HDR:
+		dbg_cpu_reason_cnt[14]++;
+		return 0;
+	case HIT_BIND_KEEPALIVE_MC_NEW_HDR:
+		dbg_cpu_reason_cnt[15]++;
+		return 0;
+	case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+		dbg_cpu_reason_cnt[16]++;
+		return 0;
+	case HIT_BIND_FORCE_TO_CPU:
+		dbg_cpu_reason_cnt[17]++;
+		return 0;
+	case HIT_BIND_EXCEED_MTU:
+		dbg_cpu_reason_cnt[18]++;
+		return 0;
+	case HIT_BIND_MULTICAST_TO_CPU:
+		dbg_cpu_reason_cnt[19]++;
+		return 0;
+	case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+		dbg_cpu_reason_cnt[20]++;
+		return 0;
+	case HIT_PRE_BIND:
+		dbg_cpu_reason_cnt[21]++;
+		return 0;
+	}
+
+	return 0;
+}
+
+int hnat_set_usage(int level)
+{
+	debug_level = level;
+	pr_info("Read cpu_reason count: cat /sys/kernel/debug/hnat/cpu_reason\n\n");
+	pr_info("====================Advanced Settings====================\n");
+	pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/cpu_reason\n\n");
+	pr_info("Commands:   [type] [option]\n");
+	pr_info("              0       0~7      Set debug_level(0~7), current debug_level=%d\n",
+		debug_level);
+	pr_info("              1    cpu_reason  Track entries of the set cpu_reason\n");
+	pr_info("                               Set type=1 will change debug_level=7\n");
+	pr_info("cpu_reason list:\n");
+	pr_info("                       2       IPv4(IPv6) TTL(hop limit) = 0\n");
+	pr_info("                       3       IPv4(IPv6) has option(extension) header\n");
+	pr_info("                       7       No flow is assigned\n");
+	pr_info("                       8       IPv4 HNAT doesn't support IPv4 /w fragment\n");
+	pr_info("                       9       IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n");
+	pr_info("                      10       IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n");
+	pr_info("                      11       IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n");
+	pr_info("                      12       Ingress packet is TCP fin/syn/rst\n");
+	pr_info("                      13       FOE Un-hit\n");
+	pr_info("                      14       FOE Hit unbind\n");
+	pr_info("                      15       FOE Hit unbind & rate reach\n");
+	pr_info("                      16       Hit bind PPE TCP FIN entry\n");
+	pr_info("                      17       Hit bind PPE entry and TTL(hop limit) = 1\n");
+	pr_info("                      18       Hit bind and VLAN replacement violation\n");
+	pr_info("                      19       Hit bind and keep alive with unicast old-header packet\n");
+	pr_info("                      20       Hit bind and keep alive with multicast new-header packet\n");
+	pr_info("                      21       Hit bind and keep alive with duplicate old-header packet\n");
+	pr_info("                      22       FOE Hit bind & force to CPU\n");
+	pr_info("                      23       HIT_BIND_WITH_OPTION_HEADER\n");
+	pr_info("                      24       Switch clone multicast packet to CPU\n");
+	pr_info("                      25       Switch clone multicast packet to GMAC1 & CPU\n");
+	pr_info("                      26       HIT_PRE_BIND\n");
+	pr_info("                      27       HIT_BIND_PACKET_SAMPLING\n");
+	pr_info("                      28       Hit bind and exceed MTU\n");
+
+	return 0;
+}
+
+int hnat_cpu_reason(int cpu_reason)
+{
+	dbg_cpu_reason = cpu_reason;
+	debug_level = 7;
+	pr_info("show cpu reason = %d\n", cpu_reason);
+
+	return 0;
+}
+
+int entry_set_usage(int level)
+{
+	debug_level = level;
+	pr_info("Show all entries(default state=bind): cat /sys/kernel/debug/hnat/hnat_entry\n\n");
+	pr_info("====================Advanced Settings====================\n");
+	pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/hnat_entry\n\n");
+	pr_info("Commands:   [type] [option]\n");
+	pr_info("              0       0~7      Set debug_level(0~7), current debug_level=%d\n",
+		debug_level);
+	pr_info("              1       0~3      Change tracking state\n");
+	pr_info("                               (0:invalid; 1:unbind; 2:bind; 3:fin)\n");
+	pr_info("              2   <entry_idx>  Show specific foe entry info. of assigned <entry_idx>\n");
+	pr_info("              3   <entry_idx>  Delete specific foe entry of assigned <entry_idx>\n");
+
+	return 0;
+}
+
+int entry_set_state(int state)
+{
+	dbg_entry_state = state;
+	pr_info("ENTRY STATE = %s\n", dbg_entry_state == 0 ?
+		"Invalid" : dbg_entry_state == 1 ?
+		"Unbind" : dbg_entry_state == 2 ?
+		"BIND" : dbg_entry_state == 3 ?
+		"FIN" : "Unknown");
+	return 0;
+}
+
+int entry_detail(int index)
+{
+	struct foe_entry *entry;
+	struct mtk_hnat *h = hnat_priv;
+	u32 *p;
+	u32 i = 0;
+	u32 print_cnt;
+	unsigned char h_dest[ETH_ALEN];
+	unsigned char h_source[ETH_ALEN];
+	__be32 saddr, daddr, nsaddr, ndaddr;
+
+	entry = h->foe_table_cpu + index;
+	saddr = htonl(entry->ipv4_hnapt.sip);
+	daddr = htonl(entry->ipv4_hnapt.dip);
+	nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+	ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+	p = (uint32_t *)entry;
+	pr_info("==========<Flow Table Entry=%d (%p)>===============\n", index,
+		entry);
+	if (debug_level >= 2) {
+		print_cnt = 20;
+		for (i = 0; i < print_cnt; i++)
+			pr_info("%02d: %08X\n", i, *(p + i));
+	}
+	pr_info("-----------------<Flow Info>------------------\n");
+	pr_info("Information Block 1: %08X\n", entry->ipv4_hnapt.info_blk1);
+
+	if (IS_IPV4_HNAPT(entry)) {
+		pr_info("Information Block 2: %08X (FP=%d FQOS=%d QID=%d)",
+			entry->ipv4_hnapt.info_blk2,
+			entry->ipv4_hnapt.iblk2.dp,
+			entry->ipv4_hnapt.iblk2.fqos,
+			entry->ipv4_hnapt.iblk2.qid);
+		pr_info("Create IPv4 HNAPT entry\n");
+		pr_info("IPv4 Org IP/Port: %pI4:%d->%pI4:%d\n", &saddr,
+			entry->ipv4_hnapt.sport, &daddr,
+			entry->ipv4_hnapt.dport);
+		pr_info("IPv4 New IP/Port: %pI4:%d->%pI4:%d\n", &nsaddr,
+			entry->ipv4_hnapt.new_sport, &ndaddr,
+			entry->ipv4_hnapt.new_dport);
+	} else if (IS_IPV4_HNAT(entry)) {
+		pr_info("Information Block 2: %08X\n",
+			entry->ipv4_hnapt.info_blk2);
+		pr_info("Create IPv4 HNAT entry\n");
+		pr_info("IPv4 Org IP: %pI4->%pI4\n", &saddr, &daddr);
+		pr_info("IPv4 New IP: %pI4->%pI4\n", &nsaddr, &ndaddr);
+	} else if (IS_IPV4_DSLITE(entry)) {
+		pr_info("Information Block 2: %08X\n",
+			entry->ipv4_dslite.info_blk2);
+		pr_info("Create IPv4 Ds-Lite entry\n");
+		pr_info("IPv4 Ds-Lite: %pI4:%d->%pI4:%d\n", &saddr,
+			entry->ipv4_dslite.sport, &daddr,
+			entry->ipv4_dslite.dport);
+		pr_info("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+			entry->ipv4_dslite.tunnel_sipv6_0,
+			entry->ipv4_dslite.tunnel_sipv6_1,
+			entry->ipv4_dslite.tunnel_sipv6_2,
+			entry->ipv4_dslite.tunnel_sipv6_3,
+			entry->ipv4_dslite.tunnel_dipv6_0,
+			entry->ipv4_dslite.tunnel_dipv6_1,
+			entry->ipv4_dslite.tunnel_dipv6_2,
+			entry->ipv4_dslite.tunnel_dipv6_3);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	} else if (IS_IPV4_MAPE(entry)) {
+		nsaddr = htonl(entry->ipv4_dslite.new_sip);
+		ndaddr = htonl(entry->ipv4_dslite.new_dip);
+
+		pr_info("Information Block 2: %08X\n",
+			entry->ipv4_dslite.info_blk2);
+		pr_info("Create IPv4 MAP-E entry\n");
+		pr_info("IPv4 MAP-E Org IP/Port: %pI4:%d->%pI4:%d\n",
+			&saddr,	entry->ipv4_dslite.sport,
+			&daddr,	entry->ipv4_dslite.dport);
+		pr_info("IPv4 MAP-E New IP/Port: %pI4:%d->%pI4:%d\n",
+			&nsaddr, entry->ipv4_dslite.new_sport,
+			&ndaddr, entry->ipv4_dslite.new_dport);
+		pr_info("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+			entry->ipv4_dslite.tunnel_sipv6_0,
+			entry->ipv4_dslite.tunnel_sipv6_1,
+			entry->ipv4_dslite.tunnel_sipv6_2,
+			entry->ipv4_dslite.tunnel_sipv6_3,
+			entry->ipv4_dslite.tunnel_dipv6_0,
+			entry->ipv4_dslite.tunnel_dipv6_1,
+			entry->ipv4_dslite.tunnel_dipv6_2,
+			entry->ipv4_dslite.tunnel_dipv6_3);
+#endif
+	} else if (IS_IPV6_3T_ROUTE(entry)) {
+		pr_info("Information Block 2: %08X\n",
+			entry->ipv6_3t_route.info_blk2);
+		pr_info("Create IPv6 3-Tuple entry\n");
+		pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Prot=%d)\n",
+			entry->ipv6_3t_route.ipv6_sip0,
+			entry->ipv6_3t_route.ipv6_sip1,
+			entry->ipv6_3t_route.ipv6_sip2,
+			entry->ipv6_3t_route.ipv6_sip3,
+			entry->ipv6_3t_route.ipv6_dip0,
+			entry->ipv6_3t_route.ipv6_dip1,
+			entry->ipv6_3t_route.ipv6_dip2,
+			entry->ipv6_3t_route.ipv6_dip3,
+			entry->ipv6_3t_route.prot);
+	} else if (IS_IPV6_5T_ROUTE(entry)) {
+		pr_info("Information Block 2: %08X\n",
+			entry->ipv6_5t_route.info_blk2);
+		pr_info("Create IPv6 5-Tuple entry\n");
+		pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+			entry->ipv6_5t_route.ipv6_sip0,
+			entry->ipv6_5t_route.ipv6_sip1,
+			entry->ipv6_5t_route.ipv6_sip2,
+			entry->ipv6_5t_route.ipv6_sip3,
+			entry->ipv6_5t_route.sport,
+			entry->ipv6_5t_route.ipv6_dip0,
+			entry->ipv6_5t_route.ipv6_dip1,
+			entry->ipv6_5t_route.ipv6_dip2,
+			entry->ipv6_5t_route.ipv6_dip3,
+			entry->ipv6_5t_route.dport);
+	} else if (IS_IPV6_6RD(entry)) {
+		pr_info("Information Block 2: %08X\n",
+			entry->ipv6_6rd.info_blk2);
+		pr_info("Create IPv6 6RD entry\n");
+		pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+			entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+			entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+			entry->ipv6_6rd.sport, entry->ipv6_6rd.ipv6_dip0,
+			entry->ipv6_6rd.ipv6_dip1, entry->ipv6_6rd.ipv6_dip2,
+			entry->ipv6_6rd.ipv6_dip3, entry->ipv6_6rd.dport);
+	}
+	if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry)) {
+		*((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi);
+		*((u16 *)&h_source[4]) = swab16(entry->ipv4_hnapt.smac_lo);
+		*((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+		*((u16 *)&h_dest[4]) = swab16(entry->ipv4_hnapt.dmac_lo);
+		pr_info("SMAC=%pM => DMAC=%pM\n", h_source, h_dest);
+		pr_info("State = %s, ",	entry->bfib1.state == 0 ?
+			"Invalid" : entry->bfib1.state == 1 ?
+			"Unbind" : entry->bfib1.state == 2 ?
+			"BIND" : entry->bfib1.state == 3 ?
+			"FIN" : "Unknown");
+		pr_info("Vlan_Layer = %u, ", entry->bfib1.vlan_layer);
+		pr_info("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+			entry->ipv4_hnapt.etype, entry->ipv4_hnapt.vlan1,
+			entry->ipv4_hnapt.vlan2);
+		pr_info("multicast = %d, pppoe = %d, proto = %s\n",
+			entry->ipv4_hnapt.iblk2.mcast,
+			entry->ipv4_hnapt.bfib1.psn,
+			entry->ipv4_hnapt.bfib1.udp == 0 ?
+			"TCP" :	entry->ipv4_hnapt.bfib1.udp == 1 ?
+			"UDP" : "Unknown");
+		pr_info("=========================================\n\n");
+	} else {
+		*((u32 *)h_source) = swab32(entry->ipv6_5t_route.smac_hi);
+		*((u16 *)&h_source[4]) = swab16(entry->ipv6_5t_route.smac_lo);
+		*((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+		*((u16 *)&h_dest[4]) = swab16(entry->ipv6_5t_route.dmac_lo);
+		pr_info("SMAC=%pM => DMAC=%pM\n", h_source, h_dest);
+		pr_info("State = %s, ",	entry->bfib1.state == 0 ?
+			"Invalid" : entry->bfib1.state == 1 ?
+			"Unbind" : entry->bfib1.state == 2 ?
+			"BIND" : entry->bfib1.state == 3 ?
+			"FIN" : "Unknown");
+
+		pr_info("Vlan_Layer = %u, ", entry->bfib1.vlan_layer);
+		pr_info("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+			entry->ipv6_5t_route.etype, entry->ipv6_5t_route.vlan1,
+			entry->ipv6_5t_route.vlan2);
+		pr_info("multicast = %d, pppoe = %d, proto = %s\n",
+			entry->ipv6_5t_route.iblk2.mcast,
+			entry->ipv6_5t_route.bfib1.psn,
+			entry->ipv6_5t_route.bfib1.udp == 0 ?
+			"TCP" :	entry->ipv6_5t_route.bfib1.udp == 1 ?
+			"UDP" :	"Unknown");
+		pr_info("=========================================\n\n");
+	}
+	return 0;
+}
+
+int entry_delete(int index)
+{
+	struct foe_entry *entry;
+	struct mtk_hnat *h = hnat_priv;
+
+	entry = h->foe_table_cpu + index;
+	memset(entry, 0, sizeof(struct foe_entry));
+
+	/* clear HWNAT cache */
+	hnat_cache_ebl(1);
+
+	pr_info("delete entry idx = %d\n", index);
+
+	return 0;
+}
+EXPORT_SYMBOL(entry_delete);
+
+int cr_set_usage(int level)
+{
+	debug_level = level;
+	pr_info("Dump hnat CR: cat /sys/kernel/debug/hnat/hnat_setting\n\n");
+	pr_info("====================Advanced Settings====================\n");
+	pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/hnat_setting\n\n");
+	pr_info("Commands:   [type] [option]\n");
+	pr_info("              0     0~7        Set debug_level(0~7), current debug_level=%d\n",
+		debug_level);
+	pr_info("              1     0~65535    Set binding threshold\n");
+	pr_info("              2     0~65535    Set TCP bind lifetime\n");
+	pr_info("              3     0~65535    Set FIN bind lifetime\n");
+	pr_info("              4     0~65535    Set UDP bind lifetime\n");
+	pr_info("              5     0~255      Set TCP keep alive interval\n");
+	pr_info("              6     0~255      Set UDP keep alive interval\n");
+
+	return 0;
+}
+
+int binding_threshold(int threshold)
+{
+	pr_info("Binding Threshold =%d\n", threshold);
+	writel(threshold, hnat_priv->ppe_base + PPE_BNDR);
+	return 0;
+}
+
+int tcp_bind_lifetime(int tcp_life)
+{
+	pr_info("tcp_life = %d\n", tcp_life);
+	/* set Delta time for aging out an bind TCP FOE entry */
+	cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, TCP_DLTA, tcp_life);
+
+	return 0;
+}
+
+int fin_bind_lifetime(int fin_life)
+{
+	pr_info("fin_life = %d\n", fin_life);
+	/* set Delta time for aging out an bind TCP FIN FOE entry */
+	cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, FIN_DLTA, fin_life);
+
+	return 0;
+}
+
+int udp_bind_lifetime(int udp_life)
+{
+	pr_info("udp_life = %d\n", udp_life);
+	/* set Delta time for aging out an bind UDP FOE entry */
+	cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_0, UDP_DLTA, udp_life);
+
+	return 0;
+}
+
+int tcp_keep_alive(int tcp_interval)
+{
+	if (tcp_interval > 255) {
+		tcp_interval = 255;
+		pr_info("TCP keep alive max interval = 255\n");
+	} else {
+		pr_info("tcp_interval = %d\n", tcp_interval);
+	}
+	/* Keep alive time for bind FOE TCP entry */
+	cr_set_field(hnat_priv->ppe_base + PPE_KA, TCP_KA, tcp_interval);
+
+	return 0;
+}
+
+int udp_keep_alive(int udp_interval)
+{
+	if (udp_interval > 255) {
+		udp_interval = 255;
+		pr_info("TCP/UDP keep alive max interval = 255\n");
+	} else {
+		pr_info("udp_interval = %d\n", udp_interval);
+	}
+	/* Keep alive timer for bind FOE UDP entry */
+	cr_set_field(hnat_priv->ppe_base + PPE_KA, UDP_KA, udp_interval);
+
+	return 0;
+}
+
+static const debugfs_write_func hnat_set_func[] = {
+	[0] = hnat_set_usage,
+	[1] = hnat_cpu_reason,
+};
+
+static const debugfs_write_func entry_set_func[] = {
+	[0] = entry_set_usage,
+	[1] = entry_set_state,
+	[2] = entry_detail,
+	[3] = entry_delete,
+};
+
+static const debugfs_write_func cr_set_func[] = {
+	[0] = cr_set_usage,      [1] = binding_threshold,
+	[2] = tcp_bind_lifetime, [3] = fin_bind_lifetime,
+	[4] = udp_bind_lifetime, [5] = tcp_keep_alive,
+	[6] = udp_keep_alive,
+};
+
+static struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 index)
+{
+	struct hnat_accounting *acount;
+	u32 val, cnt_r0, cnt_r1, cnt_r2;
+	int ret = -1;
+
+	if (!hnat_priv->data->per_flow_accounting)
+		return NULL;
+
+	writel(index | (1 << 16), h->ppe_base + PPE_MIB_SER_CR);
+	ret = readx_poll_timeout_atomic(readl, h->ppe_base + PPE_MIB_SER_CR, val,
+					!(val & BIT_MIB_BUSY), 20, 10000);
+	if (ret < 0) {
+		pr_notice("mib busy,please check later\n");
+		return NULL;
+	}
+	cnt_r0 = readl(h->ppe_base + PPE_MIB_SER_R0);
+	cnt_r1 = readl(h->ppe_base + PPE_MIB_SER_R1);
+	cnt_r2 = readl(h->ppe_base + PPE_MIB_SER_R2);
+	acount = &h->acct[index];
+	acount->bytes += cnt_r0 + ((u64)(cnt_r1 & 0xffff) << 32);
+	acount->packets +=
+		((cnt_r1 & 0xffff0000) >> 16) + ((cnt_r2 & 0xffffff) << 16);
+
+	return acount;
+}
+
+#define PRINT_COUNT(m, acount) {if (acount) \
+		seq_printf(m, "bytes=%llu|packets=%llu|", \
+			   acount->bytes, acount->packets); }
+static int hnat_debug_show(struct seq_file *m, void *private)
+{
+	struct mtk_hnat *h = hnat_priv;
+	struct foe_entry *entry, *end;
+	unsigned char h_dest[ETH_ALEN];
+	unsigned char h_source[ETH_ALEN];
+	struct hnat_accounting *acount;
+	u32 entry_index = 0;
+
+	entry = h->foe_table_cpu;
+	end = h->foe_table_cpu + hnat_priv->foe_etry_num;
+	while (entry < end) {
+		if (!entry->bfib1.state) {
+			entry++;
+			entry_index++;
+			continue;
+		}
+		acount = hnat_get_count(h, entry_index);
+		if (IS_IPV4_HNAPT(entry)) {
+			__be32 saddr = htonl(entry->ipv4_hnapt.sip);
+			__be32 daddr = htonl(entry->ipv4_hnapt.dip);
+			__be32 nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+			__be32 ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+
+			*((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi);
+			*((u16 *)&h_source[4]) =
+				swab16(entry->ipv4_hnapt.smac_lo);
+			*((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+			*((u16 *)&h_dest[4]) =
+				swab16(entry->ipv4_hnapt.dmac_lo);
+			PRINT_COUNT(m, acount);
+			seq_printf(m,
+				   "addr=0x%p|index=%d|state=%s|type=%s|%pI4:%d->%pI4:%d=>%pI4:%d->%pI4:%d|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x|vlan1=%d|vlan2=%d\n",
+				   entry, ei(entry, end), es(entry), pt(entry), &saddr,
+				   entry->ipv4_hnapt.sport, &daddr,
+				   entry->ipv4_hnapt.dport, &nsaddr,
+				   entry->ipv4_hnapt.new_sport, &ndaddr,
+				   entry->ipv4_hnapt.new_dport, h_source, h_dest,
+				   ntohs(entry->ipv4_hnapt.etype),
+				   entry->ipv4_hnapt.info_blk1,
+				   entry->ipv4_hnapt.info_blk2,
+				   entry->ipv4_hnapt.vlan1,
+				   entry->ipv4_hnapt.vlan2);
+		} else if (IS_IPV4_HNAT(entry)) {
+			__be32 saddr = htonl(entry->ipv4_hnapt.sip);
+			__be32 daddr = htonl(entry->ipv4_hnapt.dip);
+			__be32 nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+			__be32 ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+
+			*((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi);
+			*((u16 *)&h_source[4]) =
+				swab16(entry->ipv4_hnapt.smac_lo);
+			*((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+			*((u16 *)&h_dest[4]) =
+				swab16(entry->ipv4_hnapt.dmac_lo);
+			PRINT_COUNT(m, acount);
+			seq_printf(m,
+				   "addr=0x%p|index=%d|state=%s|type=%s|%pI4->%pI4=>%pI4->%pI4|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x|vlan1=%d|vlan2=%d\n",
+				   entry, ei(entry, end), es(entry), pt(entry), &saddr,
+				   &daddr, &nsaddr, &ndaddr, h_source, h_dest,
+				   ntohs(entry->ipv4_hnapt.etype),
+				   entry->ipv4_hnapt.info_blk1,
+				   entry->ipv4_hnapt.info_blk2,
+				   entry->ipv4_hnapt.vlan1,
+				   entry->ipv4_hnapt.vlan2);
+		} else if (IS_IPV6_5T_ROUTE(entry)) {
+			u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0;
+			u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1;
+			u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2;
+			u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3;
+			u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0;
+			u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1;
+			u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2;
+			u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3;
+
+			*((u32 *)h_source) =
+				swab32(entry->ipv6_5t_route.smac_hi);
+			*((u16 *)&h_source[4]) =
+				swab16(entry->ipv6_5t_route.smac_lo);
+			*((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+			*((u16 *)&h_dest[4]) =
+				swab16(entry->ipv6_5t_route.dmac_lo);
+			PRINT_COUNT(m, acount);
+			seq_printf(m,
+				   "addr=0x%p|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x(sp=%d)->DIP=%08x:%08x:%08x:%08x(dp=%d)|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+				   entry, ei(entry, end), es(entry), pt(entry), ipv6_sip0,
+				   ipv6_sip1, ipv6_sip2, ipv6_sip3,
+				   entry->ipv6_5t_route.sport, ipv6_dip0,
+				   ipv6_dip1, ipv6_dip2, ipv6_dip3,
+				   entry->ipv6_5t_route.dport, h_source, h_dest,
+				   ntohs(entry->ipv6_5t_route.etype),
+				   entry->ipv6_5t_route.info_blk1,
+				   entry->ipv6_5t_route.info_blk2);
+		} else if (IS_IPV6_3T_ROUTE(entry)) {
+			u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0;
+			u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1;
+			u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2;
+			u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3;
+			u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0;
+			u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1;
+			u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2;
+			u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3;
+
+			*((u32 *)h_source) =
+				swab32(entry->ipv6_5t_route.smac_hi);
+			*((u16 *)&h_source[4]) =
+				swab16(entry->ipv6_5t_route.smac_lo);
+			*((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+			*((u16 *)&h_dest[4]) =
+				swab16(entry->ipv6_5t_route.dmac_lo);
+			PRINT_COUNT(m, acount);
+			seq_printf(m,
+				   "addr=0x%p|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x->DIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+				   entry, ei(entry, end), es(entry), pt(entry), ipv6_sip0,
+				   ipv6_sip1, ipv6_sip2, ipv6_sip3, ipv6_dip0,
+				   ipv6_dip1, ipv6_dip2, ipv6_dip3, h_source,
+				   h_dest, ntohs(entry->ipv6_5t_route.etype),
+				   entry->ipv6_5t_route.info_blk1,
+				   entry->ipv6_5t_route.info_blk2);
+		} else if (IS_IPV6_6RD(entry)) {
+			u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0;
+			u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1;
+			u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2;
+			u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3;
+			u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0;
+			u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1;
+			u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2;
+			u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3;
+			__be32 tsaddr = htonl(entry->ipv6_6rd.tunnel_sipv4);
+			__be32 tdaddr = htonl(entry->ipv6_6rd.tunnel_dipv4);
+
+			*((u32 *)h_source) =
+				swab32(entry->ipv6_5t_route.smac_hi);
+			*((u16 *)&h_source[4]) =
+				swab16(entry->ipv6_5t_route.smac_lo);
+			*((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+			*((u16 *)&h_dest[4]) =
+				swab16(entry->ipv6_5t_route.dmac_lo);
+			PRINT_COUNT(m, acount);
+			seq_printf(m,
+				   "addr=0x%p|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x(sp=%d)->DIP=%08x:%08x:%08x:%08x(dp=%d)|TSIP=%pI4->TDIP=%pI4|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+				   entry, ei(entry, end), es(entry), pt(entry), ipv6_sip0,
+				   ipv6_sip1, ipv6_sip2, ipv6_sip3,
+				   entry->ipv6_5t_route.sport, ipv6_dip0,
+				   ipv6_dip1, ipv6_dip2, ipv6_dip3,
+				   entry->ipv6_5t_route.dport, &tsaddr, &tdaddr,
+				   h_source, h_dest,
+				   ntohs(entry->ipv6_5t_route.etype),
+				   entry->ipv6_5t_route.info_blk1,
+				   entry->ipv6_5t_route.info_blk2);
+		} else if (IS_IPV4_DSLITE(entry)) {
+			__be32 saddr = htonl(entry->ipv4_hnapt.sip);
+			__be32 daddr = htonl(entry->ipv4_hnapt.dip);
+			u32 ipv6_tsip0 = entry->ipv4_dslite.tunnel_sipv6_0;
+			u32 ipv6_tsip1 = entry->ipv4_dslite.tunnel_sipv6_1;
+			u32 ipv6_tsip2 = entry->ipv4_dslite.tunnel_sipv6_2;
+			u32 ipv6_tsip3 = entry->ipv4_dslite.tunnel_sipv6_3;
+			u32 ipv6_tdip0 = entry->ipv4_dslite.tunnel_dipv6_0;
+			u32 ipv6_tdip1 = entry->ipv4_dslite.tunnel_dipv6_1;
+			u32 ipv6_tdip2 = entry->ipv4_dslite.tunnel_dipv6_2;
+			u32 ipv6_tdip3 = entry->ipv4_dslite.tunnel_dipv6_3;
+
+			*((u32 *)h_source) = swab32(entry->ipv4_dslite.smac_hi);
+			*((u16 *)&h_source[4]) =
+				swab16(entry->ipv4_dslite.smac_lo);
+			*((u32 *)h_dest) = swab32(entry->ipv4_dslite.dmac_hi);
+			*((u16 *)&h_dest[4]) =
+				swab16(entry->ipv4_dslite.dmac_lo);
+			PRINT_COUNT(m, acount);
+			seq_printf(m,
+				   "addr=0x%p|index=%d|state=%s|type=%s|SIP=%pI4->DIP=%pI4|TSIP=%08x:%08x:%08x:%08x->TDIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+				   entry, ei(entry, end), es(entry), pt(entry), &saddr,
+				   &daddr, ipv6_tsip0, ipv6_tsip1, ipv6_tsip2,
+				   ipv6_tsip3, ipv6_tdip0, ipv6_tdip1, ipv6_tdip2,
+				   ipv6_tdip3, h_source, h_dest,
+				   ntohs(entry->ipv6_5t_route.etype),
+				   entry->ipv6_5t_route.info_blk1,
+				   entry->ipv6_5t_route.info_blk2);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+		} else if (IS_IPV4_MAPE(entry)) {
+			__be32 saddr = htonl(entry->ipv4_dslite.sip);
+			__be32 daddr = htonl(entry->ipv4_dslite.dip);
+			__be32 nsaddr = htonl(entry->ipv4_dslite.new_sip);
+			__be32 ndaddr = htonl(entry->ipv4_dslite.new_dip);
+			u32 ipv6_tsip0 = entry->ipv4_dslite.tunnel_sipv6_0;
+			u32 ipv6_tsip1 = entry->ipv4_dslite.tunnel_sipv6_1;
+			u32 ipv6_tsip2 = entry->ipv4_dslite.tunnel_sipv6_2;
+			u32 ipv6_tsip3 = entry->ipv4_dslite.tunnel_sipv6_3;
+			u32 ipv6_tdip0 = entry->ipv4_dslite.tunnel_dipv6_0;
+			u32 ipv6_tdip1 = entry->ipv4_dslite.tunnel_dipv6_1;
+			u32 ipv6_tdip2 = entry->ipv4_dslite.tunnel_dipv6_2;
+			u32 ipv6_tdip3 = entry->ipv4_dslite.tunnel_dipv6_3;
+
+			*((u32 *)h_source) = swab32(entry->ipv4_dslite.smac_hi);
+			*((u16 *)&h_source[4]) =
+				swab16(entry->ipv4_dslite.smac_lo);
+			*((u32 *)h_dest) = swab32(entry->ipv4_dslite.dmac_hi);
+			*((u16 *)&h_dest[4]) =
+				swab16(entry->ipv4_dslite.dmac_lo);
+			PRINT_COUNT(m, acount);
+			seq_printf(m,
+				   "addr=0x%p|index=%d|state=%s|type=%s|SIP=%pI4:%d->DIP=%pI4:%d|NSIP=%pI4:%d->NDIP=%pI4:%d|TSIP=%08x:%08x:%08x:%08x->TDIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+				   entry, ei(entry, end), es(entry), pt(entry),
+				   &saddr, entry->ipv4_dslite.sport,
+				   &daddr, entry->ipv4_dslite.dport,
+				   &nsaddr, entry->ipv4_dslite.new_sport,
+				   &ndaddr, entry->ipv4_dslite.new_dport,
+				   ipv6_tsip0, ipv6_tsip1, ipv6_tsip2,
+				   ipv6_tsip3, ipv6_tdip0, ipv6_tdip1,
+				   ipv6_tdip2, ipv6_tdip3, h_source, h_dest,
+				   ntohs(entry->ipv6_5t_route.etype),
+				   entry->ipv6_5t_route.info_blk1,
+				   entry->ipv6_5t_route.info_blk2);
+#endif
+		} else
+			seq_printf(m, "addr=0x%p|index=%d state=%s\n", entry, ei(entry, end),
+				   es(entry));
+		entry++;
+		entry_index++;
+	}
+
+	return 0;
+}
+
+static int hnat_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_debug_show, file->private_data);
+}
+
+static const struct file_operations hnat_debug_fops = {
+	.open = hnat_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int hnat_whnat_show(struct seq_file *m, void *private)
+{
+	int i;
+	struct net_device *dev;
+
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		dev = hnat_priv->wifi_hook_if[i];
+		if (dev)
+			seq_printf(m, "%d:%s\n", i, dev->name);
+		else
+			continue;
+	}
+
+	return 0;
+}
+
+static int hnat_whnat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_whnat_show, file->private_data);
+}
+
+static const struct file_operations hnat_whnat_fops = {
+	.open = hnat_whnat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+int cpu_reason_read(struct seq_file *m, void *private)
+{
+	int i;
+
+	pr_info("============ CPU REASON =========\n");
+	pr_info("(2)IPv4(IPv6) TTL(hop limit) = %u\n", dbg_cpu_reason_cnt[0]);
+	pr_info("(3)Ipv4(IPv6) has option(extension) header = %u\n",
+		dbg_cpu_reason_cnt[1]);
+	pr_info("(7)No flow is assigned = %u\n", dbg_cpu_reason_cnt[2]);
+	pr_info("(8)IPv4 HNAT doesn't support IPv4 /w fragment = %u\n",
+		dbg_cpu_reason_cnt[3]);
+	pr_info("(9)IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment = %u\n",
+		dbg_cpu_reason_cnt[4]);
+	pr_info("(10)IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport = %u\n",
+		dbg_cpu_reason_cnt[5]);
+	pr_info("(11)IPv6 5T-route/6RD can't find TCP/UDP sport/dport = %u\n",
+		dbg_cpu_reason_cnt[6]);
+	pr_info("(12)Ingress packet is TCP fin/syn/rst = %u\n",
+		dbg_cpu_reason_cnt[7]);
+	pr_info("(13)FOE Un-hit = %u\n", dbg_cpu_reason_cnt[8]);
+	pr_info("(14)FOE Hit unbind = %u\n", dbg_cpu_reason_cnt[9]);
+	pr_info("(15)FOE Hit unbind & rate reach = %u\n",
+		dbg_cpu_reason_cnt[10]);
+	pr_info("(16)Hit bind PPE TCP FIN entry = %u\n",
+		dbg_cpu_reason_cnt[11]);
+	pr_info("(17)Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1 = %u\n",
+		dbg_cpu_reason_cnt[12]);
+	pr_info("(18)Hit bind and VLAN replacement violation = %u\n",
+		dbg_cpu_reason_cnt[13]);
+	pr_info("(19)Hit bind and keep alive with unicast old-header packet = %u\n",
+		dbg_cpu_reason_cnt[14]);
+	pr_info("(20)Hit bind and keep alive with multicast new-header packet = %u\n",
+		dbg_cpu_reason_cnt[15]);
+	pr_info("(21)Hit bind and keep alive with duplicate old-header packet = %u\n",
+		dbg_cpu_reason_cnt[16]);
+	pr_info("(22)FOE Hit bind & force to CPU = %u\n",
+		dbg_cpu_reason_cnt[17]);
+	pr_info("(28)Hit bind and exceed MTU =%u\n", dbg_cpu_reason_cnt[18]);
+	pr_info("(24)Hit bind multicast packet to CPU = %u\n",
+		dbg_cpu_reason_cnt[19]);
+	pr_info("(25)Hit bind multicast packet to GMAC & CPU = %u\n",
+		dbg_cpu_reason_cnt[20]);
+	pr_info("(26)Pre bind = %u\n", dbg_cpu_reason_cnt[21]);
+
+	for (i = 0; i < 22; i++)
+		dbg_cpu_reason_cnt[i] = 0;
+	return 0;
+}
+
+static int cpu_reason_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cpu_reason_read, file->private_data);
+}
+
+ssize_t cpu_reason_write(struct file *file, const char __user *buffer,
+			 size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_info("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+	case 1:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	default:
+		pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+		arg0 = 0;
+		arg1 = 0;
+		break;
+	}
+
+	(*hnat_set_func[arg0])(arg1);
+
+	return len;
+}
+
+static const struct file_operations cpu_reason_fops = {
+	.open = cpu_reason_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = cpu_reason_write,
+	.release = single_release,
+};
+
+void dbg_dump_entry(struct seq_file *m, struct foe_entry *entry,
+		    uint32_t index)
+{
+	__be32 saddr, daddr, nsaddr, ndaddr;
+
+	saddr = htonl(entry->ipv4_hnapt.sip);
+	daddr = htonl(entry->ipv4_hnapt.dip);
+	nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+	ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+
+	if (IS_IPV4_HNAPT(entry)) {
+		seq_printf(m,
+			   "NAPT(%d): %pI4:%d->%pI4:%d => %pI4:%d->%pI4:%d\n",
+			   index, &saddr, entry->ipv4_hnapt.sport, &daddr,
+			   entry->ipv4_hnapt.dport, &nsaddr,
+			   entry->ipv4_hnapt.new_sport, &ndaddr,
+			   entry->ipv4_hnapt.new_dport);
+	} else if (IS_IPV4_HNAT(entry)) {
+		seq_printf(m, "NAT(%d): %pI4->%pI4 => %pI4->%pI4\n",
+			   index, &saddr, &daddr, &nsaddr, &ndaddr);
+	}
+
+	if (IS_IPV4_DSLITE(entry)) {
+		seq_printf(m,
+			   "IPv4 Ds-Lite(%d): %pI4:%d->%pI4:%d => %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+			   index, &saddr, entry->ipv4_dslite.sport, &daddr,
+			   entry->ipv4_dslite.dport,
+			   entry->ipv4_dslite.tunnel_sipv6_0,
+			   entry->ipv4_dslite.tunnel_sipv6_1,
+			   entry->ipv4_dslite.tunnel_sipv6_2,
+			   entry->ipv4_dslite.tunnel_sipv6_3,
+			   entry->ipv4_dslite.tunnel_dipv6_0,
+			   entry->ipv4_dslite.tunnel_dipv6_1,
+			   entry->ipv4_dslite.tunnel_dipv6_2,
+			   entry->ipv4_dslite.tunnel_dipv6_3);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	} else if (IS_IPV4_MAPE(entry)) {
+		nsaddr = htonl(entry->ipv4_dslite.new_sip);
+		ndaddr = htonl(entry->ipv4_dslite.new_dip);
+
+		seq_printf(m,
+			   "IPv4 MAP-E(%d): %pI4:%d->%pI4:%d => %pI4:%d->%pI4:%d | Tunnel=%08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+			   index, &saddr, entry->ipv4_dslite.sport,
+			   &daddr, entry->ipv4_dslite.dport,
+			   &nsaddr, entry->ipv4_dslite.new_sport,
+			   &ndaddr, entry->ipv4_dslite.new_dport,
+			   entry->ipv4_dslite.tunnel_sipv6_0,
+			   entry->ipv4_dslite.tunnel_sipv6_1,
+			   entry->ipv4_dslite.tunnel_sipv6_2,
+			   entry->ipv4_dslite.tunnel_sipv6_3,
+			   entry->ipv4_dslite.tunnel_dipv6_0,
+			   entry->ipv4_dslite.tunnel_dipv6_1,
+			   entry->ipv4_dslite.tunnel_dipv6_2,
+			   entry->ipv4_dslite.tunnel_dipv6_3);
+#endif
+	} else if (IS_IPV6_3T_ROUTE(entry)) {
+		seq_printf(m,
+			   "IPv6_3T(%d): %08X:%08X:%08X:%08X => %08X:%08X:%08X:%08X (Prot=%d)\n",
+			   index, entry->ipv6_3t_route.ipv6_sip0,
+			   entry->ipv6_3t_route.ipv6_sip1,
+			   entry->ipv6_3t_route.ipv6_sip2,
+			   entry->ipv6_3t_route.ipv6_sip3,
+			   entry->ipv6_3t_route.ipv6_dip0,
+			   entry->ipv6_3t_route.ipv6_dip1,
+			   entry->ipv6_3t_route.ipv6_dip2,
+			   entry->ipv6_3t_route.ipv6_dip3,
+			   entry->ipv6_3t_route.prot);
+	} else if (IS_IPV6_5T_ROUTE(entry)) {
+		seq_printf(m,
+			   "IPv6_5T(%d): %08X:%08X:%08X:%08X:%d => %08X:%08X:%08X:%08X:%d\n",
+			   index, entry->ipv6_5t_route.ipv6_sip0,
+			   entry->ipv6_5t_route.ipv6_sip1,
+			   entry->ipv6_5t_route.ipv6_sip2,
+			   entry->ipv6_5t_route.ipv6_sip3,
+			   entry->ipv6_5t_route.sport,
+			   entry->ipv6_5t_route.ipv6_dip0,
+			   entry->ipv6_5t_route.ipv6_dip1,
+			   entry->ipv6_5t_route.ipv6_dip2,
+			   entry->ipv6_5t_route.ipv6_dip3,
+			   entry->ipv6_5t_route.dport);
+	} else if (IS_IPV6_6RD(entry)) {
+		seq_printf(m,
+			   "IPv6_6RD(%d): %08X:%08X:%08X:%08X:%d => %08X:%08X:%08X:%08X:%d\n",
+			   index, entry->ipv6_6rd.ipv6_sip0,
+			   entry->ipv6_6rd.ipv6_sip1, entry->ipv6_6rd.ipv6_sip2,
+			   entry->ipv6_6rd.ipv6_sip3, entry->ipv6_6rd.sport,
+			   entry->ipv6_6rd.ipv6_dip0, entry->ipv6_6rd.ipv6_dip1,
+			   entry->ipv6_6rd.ipv6_dip2, entry->ipv6_6rd.ipv6_dip3,
+			   entry->ipv6_6rd.dport);
+	}
+}
+
+int hnat_entry_read(struct seq_file *m, void *private)
+{
+	struct mtk_hnat *h = hnat_priv;
+	struct foe_entry *entry, *end;
+	int hash_index;
+	int cnt;
+
+	hash_index = 0;
+	cnt = 0;
+	entry = h->foe_table_cpu;
+	end = h->foe_table_cpu + hnat_priv->foe_etry_num;
+
+	while (entry < end) {
+		if (entry->bfib1.state == dbg_entry_state) {
+			cnt++;
+			dbg_dump_entry(m, entry, hash_index);
+		}
+		hash_index++;
+		entry++;
+	}
+
+	seq_printf(m, "Total State = %s cnt = %d\n",
+		   dbg_entry_state == 0 ?
+		   "Invalid" : dbg_entry_state == 1 ?
+		   "Unbind" : dbg_entry_state == 2 ?
+		   "BIND" : dbg_entry_state == 3 ?
+		   "FIN" : "Unknown", cnt);
+
+	return 0;
+}
+
+ssize_t hnat_entry_write(struct file *file, const char __user *buffer,
+			 size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_info("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	default:
+		pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+		arg0 = 0;
+		arg1 = 0;
+		break;
+	}
+
+	(*entry_set_func[arg0])(arg1);
+
+	return len;
+}
+
+static int hnat_entry_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_entry_read, file->private_data);
+}
+
+static const struct file_operations hnat_entry_fops = {
+	.open = hnat_entry_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = hnat_entry_write,
+	.release = single_release,
+};
+
+int hnat_setting_read(struct seq_file *m, void *private)
+{
+	struct mtk_hnat *h = hnat_priv;
+	int i;
+	int cr_max;
+
+	cr_max = 319 * 4;
+	for (i = 0; i < cr_max; i = i + 0x10) {
+		pr_info("0x%p : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			(void *)h->foe_table_dev + i, readl(h->ppe_base + i),
+			readl(h->ppe_base + i + 4), readl(h->ppe_base + i + 8),
+			readl(h->ppe_base + i + 0xc));
+	}
+
+	return 0;
+}
+
+static int hnat_setting_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_setting_read, file->private_data);
+}
+
+ssize_t hnat_setting_write(struct file *file, const char __user *buffer,
+			   size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_info("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+	case 6:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	default:
+		pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+		arg0 = 0;
+		arg1 = 0;
+		break;
+	}
+
+	(*cr_set_func[arg0])(arg1);
+
+	return len;
+}
+
+static const struct file_operations hnat_setting_fops = {
+	.open = hnat_setting_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = hnat_setting_write,
+	.release = single_release,
+};
+
+int mcast_table_dump(struct seq_file *m, void *private)
+{
+	struct mtk_hnat *h = hnat_priv;
+	struct ppe_mcast_h mcast_h;
+	struct ppe_mcast_l mcast_l;
+	u8 i, max;
+	void __iomem *reg;
+
+	if (!h->pmcast)
+		return 0;
+
+	max = h->pmcast->max_entry;
+	pr_info("MAC | VID | PortMask | QosPortMask\n");
+	for (i = 0; i < max; i++) {
+		if (i < 0x10) {
+			reg = h->ppe_base + PPE_MCAST_H_0 + i * 8;
+			mcast_h.u.value = readl(reg);
+			reg = h->ppe_base + PPE_MCAST_L_0 + i * 8;
+			mcast_l.addr = readl(reg);
+		} else {
+			reg = h->fe_base + PPE_MCAST_H_10 + (i - 0x10) * 8;
+			mcast_h.u.value = readl(reg);
+			reg = h->fe_base + PPE_MCAST_L_10 + (i - 0x10) * 8;
+			mcast_l.addr = readl(reg);
+		}
+		pr_info("%08x %d %c%c%c%c %c%c%c%c (QID=%d, mc_mpre_sel=%d)\n",
+			mcast_l.addr,
+			mcast_h.u.info.mc_vid,
+			(mcast_h.u.info.mc_px_en & 0x08) ? '1' : '-',
+			(mcast_h.u.info.mc_px_en & 0x04) ? '1' : '-',
+			(mcast_h.u.info.mc_px_en & 0x02) ? '1' : '-',
+			(mcast_h.u.info.mc_px_en & 0x01) ? '1' : '-',
+			(mcast_h.u.info.mc_px_qos_en & 0x08) ? '1' : '-',
+			(mcast_h.u.info.mc_px_qos_en & 0x04) ? '1' : '-',
+			(mcast_h.u.info.mc_px_qos_en & 0x02) ? '1' : '-',
+			(mcast_h.u.info.mc_px_qos_en & 0x01) ? '1' : '-',
+			mcast_h.u.info.mc_qos_qid +
+			((mcast_h.u.info.mc_qos_qid54) << 4),
+			mcast_h.u.info.mc_mpre_sel);
+	}
+
+	return 0;
+}
+
+static int mcast_table_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mcast_table_dump, file->private_data);
+}
+
+static const struct file_operations hnat_mcast_fops = {
+	.open = mcast_table_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int hnat_ext_show(struct seq_file *m, void *private)
+{
+	int i;
+	struct extdev_entry *ext_entry;
+
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+		ext_entry = hnat_priv->ext_if[i];
+		if (ext_entry->dev)
+			seq_printf(m, "ext devices [%d] = %s  (dev=%p, ifindex=%d)\n",
+				   i, ext_entry->name, ext_entry->dev,
+				   ext_entry->dev->ifindex);
+	}
+
+	return 0;
+}
+
+static int hnat_ext_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_ext_show, file->private_data);
+}
+
+static const struct file_operations hnat_ext_fops = {
+	.open = hnat_ext_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static ssize_t hnat_sched_show(struct file *file, char __user *user_buf,
+			       size_t count, loff_t *ppos)
+{
+	long id = (long)file->private_data;
+	struct mtk_hnat *h = hnat_priv;
+	u32 qdma_tx_sch;
+	int enable;
+	int scheduling;
+	int max_rate;
+	char *buf;
+	unsigned int len = 0, buf_len = 1500;
+	ssize_t ret_cnt;
+	int scheduler, i;
+	u32 sch_reg;
+
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (hnat_priv->data->num_of_sch == 4)
+		qdma_tx_sch = readl(h->fe_base + QDMA_TX_4SCH_BASE(id));
+	else
+		qdma_tx_sch = readl(h->fe_base + QDMA_TX_2SCH_BASE);
+
+	if (id & 0x1)
+		qdma_tx_sch >>= 16;
+	qdma_tx_sch &= 0xffff;
+	enable = !!(qdma_tx_sch & BIT(11));
+	scheduling = !!(qdma_tx_sch & BIT(15));
+	max_rate = ((qdma_tx_sch >> 4) & 0x7f);
+	qdma_tx_sch &= 0xf;
+	while (qdma_tx_sch--)
+		max_rate *= 10;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "EN\tScheduling\tMAX\tQueue#\n%d\t%s%16d\t", enable,
+			 (scheduling == 1) ? "WRR" : "SP", max_rate);
+
+	for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
+		cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE,
+			     (i / NUM_OF_Q_PER_PAGE));
+		sch_reg = readl(h->fe_base + QTX_SCH(i % NUM_OF_Q_PER_PAGE));
+		if (hnat_priv->data->num_of_sch == 4)
+			scheduler = (sch_reg >> 30) & 0x3;
+		else
+			scheduler = !!(sch_reg & BIT(31));
+		if (id == scheduler)
+			len += scnprintf(buf + len, buf_len - len, "%d  ", i);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	if (len > buf_len)
+		len = buf_len;
+
+	ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+	kfree(buf);
+	return ret_cnt;
+}
+
+static ssize_t hnat_sched_write(struct file *file, const char __user *buf,
+				size_t length, loff_t *offset)
+{
+	long id = (long)file->private_data;
+	struct mtk_hnat *h = hnat_priv;
+	char line[64];
+	int enable, rate, exp = 0, shift = 0;
+	char scheduling[32];
+	size_t size;
+	u32 qdma_tx_sch;
+	u32 val = 0;
+
+	if (length > sizeof(line))
+		return -EINVAL;
+
+	if (copy_from_user(line, buf, length))
+		return -EFAULT;
+
+	if (sscanf(line, "%d %s %d", &enable, scheduling, &rate) != 3)
+		return -EFAULT;
+
+	while (rate > 127) {
+		rate /= 10;
+		exp++;
+	}
+
+	if (enable)
+		val |= BIT(11);
+	if (strcmp(scheduling, "sp") != 0)
+		val |= BIT(15);
+	val |= (rate & 0x7f) << 4;
+	val |= exp & 0xf;
+	if (id & 0x1)
+		shift = 16;
+
+	if (hnat_priv->data->num_of_sch == 4)
+		qdma_tx_sch = readl(h->fe_base + QDMA_TX_4SCH_BASE(id));
+	else
+		qdma_tx_sch = readl(h->fe_base + QDMA_TX_2SCH_BASE);
+
+	qdma_tx_sch &= ~(0xffff << shift);
+	qdma_tx_sch |= val << shift;
+	if (hnat_priv->data->num_of_sch == 4)
+		writel(qdma_tx_sch, h->fe_base + QDMA_TX_4SCH_BASE(id));
+	else
+		writel(qdma_tx_sch, h->fe_base + QDMA_TX_2SCH_BASE);
+
+	size = strlen(line);
+	*offset += size;
+
+	return length;
+}
+
+static const struct file_operations hnat_sched_fops = {
+	.open = simple_open,
+	.read = hnat_sched_show,
+	.write = hnat_sched_write,
+	.llseek = default_llseek,
+};
+
+static ssize_t hnat_queue_show(struct file *file, char __user *user_buf,
+			       size_t count, loff_t *ppos)
+{
+	struct mtk_hnat *h = hnat_priv;
+	long id = (long)file->private_data;
+	u32 qtx_sch;
+	u32 qtx_cfg;
+	int scheduler;
+	int min_rate_en;
+	int min_rate;
+	int min_rate_exp;
+	int max_rate_en;
+	int max_weight;
+	int max_rate;
+	int max_rate_exp;
+	char *buf;
+	unsigned int len = 0, buf_len = 1500;
+	ssize_t ret_cnt;
+
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE, (id / NUM_OF_Q_PER_PAGE));
+	qtx_cfg = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+	qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+	if (hnat_priv->data->num_of_sch == 4)
+		scheduler = (qtx_sch >> 30) & 0x3;
+	else
+		scheduler = !!(qtx_sch & BIT(31));
+	min_rate_en = !!(qtx_sch & BIT(27));
+	min_rate = (qtx_sch >> 20) & 0x7f;
+	min_rate_exp = (qtx_sch >> 16) & 0xf;
+	max_rate_en = !!(qtx_sch & BIT(11));
+	max_weight = (qtx_sch >> 12) & 0xf;
+	max_rate = (qtx_sch >> 4) & 0x7f;
+	max_rate_exp = qtx_sch & 0xf;
+	while (min_rate_exp--)
+		min_rate *= 10;
+
+	while (max_rate_exp--)
+		max_rate *= 10;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "scheduler: %d\nhw resv: %d\nsw resv: %d\n", scheduler,
+			 (qtx_cfg >> 8) & 0xff, qtx_cfg & 0xff);
+
+	if (hnat_priv->data->version != MTK_HNAT_V1) {
+		/* Switch to debug mode */
+		cr_set_field(h->fe_base + QTX_MIB_IF, MIB_ON_QTX_CFG, 1);
+		cr_set_field(h->fe_base + QTX_MIB_IF, VQTX_MIB_EN, 1);
+		qtx_cfg = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+		qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+		len += scnprintf(buf + len, buf_len - len,
+				 "packet count: %u\n", qtx_cfg);
+		len += scnprintf(buf + len, buf_len - len,
+				 "packet drop: %u\n\n", qtx_sch);
+
+		/* Recover to normal mode */
+		cr_set_field(hnat_priv->fe_base + QTX_MIB_IF,
+			     MIB_ON_QTX_CFG, 0);
+		cr_set_field(hnat_priv->fe_base + QTX_MIB_IF, VQTX_MIB_EN, 0);
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "      EN     RATE     WEIGHT\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "----------------------------\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "max%5d%9d%9d\n", max_rate_en, max_rate, max_weight);
+	len += scnprintf(buf + len, buf_len - len,
+			 "min%5d%9d        -\n", min_rate_en, min_rate);
+
+	if (len > buf_len)
+		len = buf_len;
+
+	ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+	kfree(buf);
+	return ret_cnt;
+}
+
+static ssize_t hnat_queue_write(struct file *file, const char __user *buf,
+				size_t length, loff_t *offset)
+{
+	long id = (long)file->private_data;
+	struct mtk_hnat *h = hnat_priv;
+	char line[64];
+	int max_enable, max_rate, max_exp = 0;
+	int min_enable, min_rate, min_exp = 0;
+	int weight;
+	int resv;
+	int scheduler;
+	size_t size;
+	u32 qtx_sch;
+
+	cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE, (id / NUM_OF_Q_PER_PAGE));
+	qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+	if (length > sizeof(line))
+		return -EINVAL;
+
+	if (copy_from_user(line, buf, length))
+		return -EFAULT;
+
+	if (sscanf(line, "%d %d %d %d %d %d %d", &scheduler, &min_enable, &min_rate,
+		   &max_enable, &max_rate, &weight, &resv) != 7)
+		return -EFAULT;
+
+	while (max_rate > 127) {
+		max_rate /= 10;
+		max_exp++;
+	}
+
+	while (min_rate > 127) {
+		min_rate /= 10;
+		min_exp++;
+	}
+
+	qtx_sch &= 0x70000000;
+	if (hnat_priv->data->num_of_sch == 4)
+		qtx_sch |= (scheduler & 0x3) << 30;
+	else
+		qtx_sch |= (scheduler & 0x1) << 31;
+	if (min_enable)
+		qtx_sch |= BIT(27);
+	qtx_sch |= (min_rate & 0x7f) << 20;
+	qtx_sch |= (min_exp & 0xf) << 16;
+	if (max_enable)
+		qtx_sch |= BIT(11);
+	qtx_sch |= (weight & 0xf) << 12;
+	qtx_sch |= (max_rate & 0x7f) << 4;
+	qtx_sch |= max_exp & 0xf;
+	writel(qtx_sch, h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+
+	resv &= 0xff;
+	qtx_sch = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+	qtx_sch &= 0xffff0000;
+	qtx_sch |= (resv << 8) | resv;
+	writel(qtx_sch, h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+
+	size = strlen(line);
+	*offset += size;
+
+	return length;
+}
+
+static const struct file_operations hnat_queue_fops = {
+	.open = simple_open,
+	.read = hnat_queue_show,
+	.write = hnat_queue_write,
+	.llseek = default_llseek,
+};
+
+static ssize_t hnat_ppd_if_write(struct file *file, const char __user *buffer,
+				 size_t count, loff_t *data)
+{
+	char buf[IFNAMSIZ];
+	struct net_device *dev;
+	char *p, *tmp;
+
+	if (count >= IFNAMSIZ)
+		return -EFAULT;
+
+	memset(buf, 0, IFNAMSIZ);
+	if (copy_from_user(buf, buffer, count))
+		return -EFAULT;
+
+	tmp = buf;
+	p = strsep(&tmp, "\n\r ");
+	dev = dev_get_by_name(&init_net, p);
+
+	if (dev) {
+		if (hnat_priv->g_ppdev)
+			dev_put(hnat_priv->g_ppdev);
+		hnat_priv->g_ppdev = dev;
+
+		strncpy(hnat_priv->ppd, p, IFNAMSIZ);
+		pr_info("hnat_priv ppd = %s\n", hnat_priv->ppd);
+	} else {
+		pr_info("no such device!\n");
+	}
+
+	return count;
+}
+
+static int hnat_ppd_if_read(struct seq_file *m, void *private)
+{
+	pr_info("hnat_priv ppd = %s\n", hnat_priv->ppd);
+
+	if (hnat_priv->g_ppdev) {
+		pr_info("hnat_priv g_ppdev name = %s\n",
+			hnat_priv->g_ppdev->name);
+	} else {
+		pr_info("hnat_priv g_ppdev is null!\n");
+	}
+
+	return 0;
+}
+
+static int hnat_ppd_if_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_ppd_if_read, file->private_data);
+}
+
+static const struct file_operations hnat_ppd_if_fops = {
+	.open = hnat_ppd_if_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = hnat_ppd_if_write,
+	.release = single_release,
+};
+
+static int hnat_mape_toggle_read(struct seq_file *m, void *private)
+{
+	pr_info("value=%d, %s is enabled now!\n", mape_toggle, (mape_toggle) ? "mape" : "ds-lite");
+
+	return 0;
+}
+
+static int hnat_mape_toggle_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_mape_toggle_read, file->private_data);
+}
+
+static ssize_t hnat_mape_toggle_write(struct file *file, const char __user *buffer,
+				      size_t count, loff_t *data)
+{
+	char buf;
+	int len = count;
+
+	if (copy_from_user(&buf, buffer, len))
+		return -EFAULT;
+
+	if (buf == '1' && !mape_toggle) {
+		pr_info("mape is going to be enabled, ds-lite is going to be disabled !\n");
+		mape_toggle = 1;
+	} else if (buf == '0' && mape_toggle) {
+		pr_info("ds-lite is going to be enabled, mape is going to be disabled !\n");
+		mape_toggle = 0;
+	}
+
+	return len;
+}
+
+static const struct file_operations hnat_mape_toggle_fops = {
+	.open = hnat_mape_toggle_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = hnat_mape_toggle_write,
+	.release = single_release,
+};
+
+static int hnat_hook_toggle_read(struct seq_file *m, void *private)
+{
+	pr_info("value=%d, hook is %s now!\n", hook_toggle, (hook_toggle) ? "enabled" : "disabled");
+
+	return 0;
+}
+
+static int hnat_hook_toggle_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_hook_toggle_read, file->private_data);
+}
+
+static ssize_t hnat_hook_toggle_write(struct file *file, const char __user *buffer,
+				      size_t count, loff_t *data)
+{
+	char buf[8];
+	int len = count;
+
+	if ((len > 8) || copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	if (buf[0] == '1' && !hook_toggle) {
+		pr_info("hook is going to be enabled !\n");
+		hnat_enable_hook();
+	} else if (buf[0] == '0' && hook_toggle) {
+		pr_info("hook is going to be disabled !\n");
+		hnat_disable_hook();
+	}
+
+	return len;
+}
+
+static const struct file_operations hnat_hook_toggle_fops = {
+	.open = hnat_hook_toggle_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = hnat_hook_toggle_write,
+	.release = single_release,
+};
+
+static int hnat_version_read(struct seq_file *m, void *private)
+{
+	pr_info("HNAT SW version : %s\nHNAT HW version : %d\n", HNAT_SW_VER, hnat_priv->data->version);
+
+	return 0;
+}
+
+static int hnat_version_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_version_read, file->private_data);
+}
+
+static const struct file_operations hnat_version_fops = {
+	.open = hnat_version_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+int get_ppe_mib(int index, u64 *pkt_cnt, u64 *byte_cnt)
+{
+	struct mtk_hnat *h = hnat_priv;
+	struct hnat_accounting *acount;
+	struct foe_entry *entry;
+
+	acount = hnat_get_count(h, index);
+	entry = hnat_priv->foe_table_cpu + index;
+
+	if (!acount)
+		return -1;
+
+	if (entry->bfib1.state != BIND)
+		return -1;
+
+	*pkt_cnt = acount->packets;
+	*byte_cnt = acount->bytes;
+
+	return 0;
+}
+EXPORT_SYMBOL(get_ppe_mib);
+
+int is_entry_binding(int index)
+{
+	struct foe_entry *entry;
+
+	entry = hnat_priv->foe_table_cpu + index;
+
+	return entry->bfib1.state == BIND;
+}
+EXPORT_SYMBOL(is_entry_binding);
+
+#define dump_register(nm)                                                      \
+	{                                                                      \
+		.name = __stringify(nm), .offset = PPE_##nm,                   \
+	}
+
+static const struct debugfs_reg32 hnat_regs[] = {
+	dump_register(GLO_CFG),     dump_register(FLOW_CFG),
+	dump_register(IP_PROT_CHK), dump_register(IP_PROT_0),
+	dump_register(IP_PROT_1),   dump_register(IP_PROT_2),
+	dump_register(IP_PROT_3),   dump_register(TB_CFG),
+	dump_register(TB_BASE),     dump_register(TB_USED),
+	dump_register(BNDR),	dump_register(BIND_LMT_0),
+	dump_register(BIND_LMT_1),  dump_register(KA),
+	dump_register(UNB_AGE),     dump_register(BND_AGE_0),
+	dump_register(BND_AGE_1),   dump_register(HASH_SEED),
+	dump_register(DFT_CPORT),   dump_register(MCAST_PPSE),
+	dump_register(MCAST_L_0),   dump_register(MCAST_H_0),
+	dump_register(MCAST_L_1),   dump_register(MCAST_H_1),
+	dump_register(MCAST_L_2),   dump_register(MCAST_H_2),
+	dump_register(MCAST_L_3),   dump_register(MCAST_H_3),
+	dump_register(MCAST_L_4),   dump_register(MCAST_H_4),
+	dump_register(MCAST_L_5),   dump_register(MCAST_H_5),
+	dump_register(MCAST_L_6),   dump_register(MCAST_H_6),
+	dump_register(MCAST_L_7),   dump_register(MCAST_H_7),
+	dump_register(MCAST_L_8),   dump_register(MCAST_H_8),
+	dump_register(MCAST_L_9),   dump_register(MCAST_H_9),
+	dump_register(MCAST_L_A),   dump_register(MCAST_H_A),
+	dump_register(MCAST_L_B),   dump_register(MCAST_H_B),
+	dump_register(MCAST_L_C),   dump_register(MCAST_H_C),
+	dump_register(MCAST_L_D),   dump_register(MCAST_H_D),
+	dump_register(MCAST_L_E),   dump_register(MCAST_H_E),
+	dump_register(MCAST_L_F),   dump_register(MCAST_H_F),
+	dump_register(MTU_DRP),     dump_register(MTU_VLYR_0),
+	dump_register(MTU_VLYR_1),  dump_register(MTU_VLYR_2),
+	dump_register(VPM_TPID),    dump_register(VPM_TPID),
+	dump_register(CAH_CTRL),    dump_register(CAH_TAG_SRH),
+	dump_register(CAH_LINE_RW), dump_register(CAH_WDATA),
+	dump_register(CAH_RDATA),
+};
+
+int hnat_init_debugfs(struct mtk_hnat *h)
+{
+	int ret = 0;
+	struct dentry *root;
+	struct dentry *file;
+	long i;
+	char name[16];
+
+	root = debugfs_create_dir("hnat", NULL);
+	if (!root) {
+		dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
+		ret = -ENOMEM;
+		goto err0;
+	}
+	h->root = root;
+	h->regset = kzalloc(sizeof(*h->regset), GFP_KERNEL);
+	if (!h->regset) {
+		dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
+		ret = -ENOMEM;
+		goto err1;
+	}
+	h->regset->regs = hnat_regs;
+	h->regset->nregs = ARRAY_SIZE(hnat_regs);
+	h->regset->base = h->ppe_base;
+
+	file = debugfs_create_regset32("regdump", S_IRUGO, root, h->regset);
+	if (!file) {
+		dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
+		ret = -ENOMEM;
+		goto err1;
+	}
+	debugfs_create_file("all_entry", S_IRUGO, root, h, &hnat_debug_fops);
+	debugfs_create_file("external_interface", S_IRUGO, root, h,
+			    &hnat_ext_fops);
+	debugfs_create_file("whnat_interface", S_IRUGO, root, h,
+			    &hnat_whnat_fops);
+	debugfs_create_file("cpu_reason", S_IFREG | S_IRUGO, root, h,
+			    &cpu_reason_fops);
+	debugfs_create_file("hnat_entry", S_IRUGO | S_IRUGO, root, h,
+			    &hnat_entry_fops);
+	debugfs_create_file("hnat_setting", S_IRUGO | S_IRUGO, root, h,
+			    &hnat_setting_fops);
+	debugfs_create_file("mcast_table", S_IRUGO | S_IRUGO, root, h,
+			    &hnat_mcast_fops);
+	debugfs_create_file("hook_toggle", S_IRUGO | S_IRUGO, root, h,
+			    &hnat_hook_toggle_fops);
+	debugfs_create_file("mape_toggle", S_IRUGO | S_IRUGO, root, h,
+			    &hnat_mape_toggle_fops);
+	debugfs_create_file("hnat_version", S_IRUGO | S_IRUGO, root, h,
+			    &hnat_version_fops);
+	debugfs_create_file("hnat_ppd_if", S_IRUGO | S_IRUGO, root, h,
+			    &hnat_ppd_if_fops);
+
+	for (i = 0; i < hnat_priv->data->num_of_sch; i++) {
+		snprintf(name, sizeof(name), "qdma_sch%ld", i);
+		debugfs_create_file(name, S_IRUGO, root, (void *)i,
+				    &hnat_sched_fops);
+	}
+
+	for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
+		snprintf(name, sizeof(name), "qdma_txq%ld", i);
+		debugfs_create_file(name, S_IRUGO, root, (void *)i,
+				    &hnat_queue_fops);
+	}
+
+	return 0;
+
+err1:
+	debugfs_remove_recursive(root);
+err0:
+	return ret;
+}
+
+void hnat_deinit_debugfs(struct mtk_hnat *h)
+{
+	debugfs_remove_recursive(h->root);
+	h->root = NULL;
+	kfree(h->regset);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.c
new file mode 100644
index 0000000..79e4bd0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.c
@@ -0,0 +1,347 @@
+/*   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; version 2 of the License
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   Copyright (C) 2014-2016 Zhiqiang Yang <zhiqiang.yang@mediatek.com>
+ */
+#include <net/sock.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_bridge.h>
+#include "hnat.h"
+
+/* *
+ * mcast_entry_get - Returns the index of an unused entry
+ * or an already existed entry in mtbl
+ */
+static int mcast_entry_get(u16 vlan_id, u32 dst_mac)
+{
+	int index = -1;
+	u8 i;
+	struct ppe_mcast_group *p = hnat_priv->pmcast->mtbl;
+	u8 max = hnat_priv->pmcast->max_entry;
+
+	for (i = 0; i < max; i++) {
+		if ((index == -1) && (!p->valid)) {
+			index = i; /*get the first unused entry index*/
+			continue;
+		}
+		if ((p->vid == vlan_id) && (p->mac_hi == dst_mac)) {
+			index = i;
+			break;
+		}
+		p++;
+	}
+	if (index == -1)
+		pr_info("%s:group table is full\n", __func__);
+
+	return index;
+}
+
+static void get_mac_from_mdb_entry(struct br_mdb_entry *entry,
+				   u32 *mac_hi, u16 *mac_lo)
+{
+	switch (ntohs(entry->addr.proto)) {
+	case ETH_P_IP:
+		*mac_lo = 0x0100;
+		*mac_hi = swab32((entry->addr.u.ip4 & 0xfffffe00) + 0x5e);
+		break;
+	case ETH_P_IPV6:
+		*mac_lo = 0x3333;
+		*mac_hi = swab32(entry->addr.u.ip6.s6_addr32[3]);
+		break;
+	}
+	trace_printk("%s:group mac_h=0x%08x, mac_l=0x%04x\n",
+		     __func__, *mac_hi, *mac_lo);
+}
+
+/*set_hnat_mtbl - set ppe multicast register*/
+static int set_hnat_mtbl(struct ppe_mcast_group *group, int index)
+{
+	struct ppe_mcast_h mcast_h;
+	struct ppe_mcast_l mcast_l;
+	u16 mac_lo = group->mac_lo;
+	u32 mac_hi = group->mac_hi;
+	u8 mc_port = group->mc_port;
+	void __iomem *reg;
+
+	mcast_h.u.value = 0;
+	mcast_l.addr = 0;
+	if (mac_lo == 0x0100)
+		mcast_h.u.info.mc_mpre_sel = 0;
+	else if (mac_lo == 0x3333)
+		mcast_h.u.info.mc_mpre_sel = 1;
+
+	mcast_h.u.info.mc_px_en = mc_port;
+	mcast_l.addr = mac_hi;
+	mcast_h.u.info.valid = group->valid;
+	trace_printk("%s:index=%d,group info=0x%x,addr=0x%x\n",
+		     __func__, index, mcast_h.u.value, mcast_l.addr);
+	if (index < 0x10) {
+		reg = hnat_priv->ppe_base + PPE_MCAST_H_0 + ((index) * 8);
+		writel(mcast_h.u.value, reg);
+		reg = hnat_priv->ppe_base + PPE_MCAST_L_0 + ((index) * 8);
+		writel(mcast_l.addr, reg);
+	} else {
+		index = index - 0x10;
+		reg = hnat_priv->fe_base + PPE_MCAST_H_10 + ((index) * 8);
+		writel(mcast_h.u.value, reg);
+		reg = hnat_priv->fe_base + PPE_MCAST_L_10 + ((index) * 8);
+		writel(mcast_h.u.value, reg);
+	}
+
+	return 0;
+}
+
+/**
+ * hnat_mcast_table_update -
+ *	1.get a valid group entry
+ *	2.update group info
+ *		a.update eif&oif count
+ *		b.eif ==0 & oif == 0,delete it from group table
+ *		c.oif != 0,set mc forward port to cpu,else do not forward to cpu
+ *	3.set the group info to ppe register
+ */
+static int hnat_mcast_table_update(int type, struct br_mdb_entry *entry)
+{
+	struct net_device *dev;
+	u32 mac_hi;
+	u16 mac_lo;
+	int index;
+	struct ppe_mcast_group *group;
+
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(&init_net, entry->ifindex);
+	if (!dev) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	rcu_read_unlock();
+
+	get_mac_from_mdb_entry(entry, &mac_hi, &mac_lo);
+	index = mcast_entry_get(entry->vid, mac_hi);
+	if (index == -1)
+		return -1;
+
+	group = &hnat_priv->pmcast->mtbl[index];
+	group->mac_hi = mac_hi;
+	group->mac_lo = mac_lo;
+	switch (type) {
+	case RTM_NEWMDB:
+		if (IS_LAN(dev) || IS_WAN(dev))
+			group->eif++;
+		else
+			group->oif++;
+		group->vid = entry->vid;
+		group->valid = true;
+		break;
+	case RTM_DELMDB:
+		if (group->valid) {
+			if (IS_LAN(dev) || IS_WAN(dev))
+				group->eif--;
+			else
+				group->oif--;
+			}
+		break;
+	}
+	trace_printk("%s:devname=%s,eif=%d,oif=%d\n", __func__,
+		     dev->name, group->eif, group->oif);
+	if (group->valid) {
+		if (group->oif && group->eif)
+			/*eth&wifi both in group,forward to cpu&GDMA1*/
+			group->mc_port = (MCAST_TO_PDMA || MCAST_TO_GDMA1);
+		else if (group->oif)
+			/*only wifi in group,forward to cpu only*/
+			group->mc_port = MCAST_TO_PDMA;
+		else
+			/*only eth in group,forward to GDMA1 only*/
+			group->mc_port = MCAST_TO_GDMA1;
+		if (!group->oif && !group->eif)
+			/*nobody in this group,clear the entry*/
+			memset(group, 0, sizeof(struct ppe_mcast_group));
+		set_hnat_mtbl(group, index);
+	}
+
+	return 0;
+}
+
+static void hnat_mcast_nlmsg_handler(struct work_struct *work)
+{
+	struct sk_buff *skb = NULL;
+	struct nlmsghdr *nlh;
+	struct nlattr *nest, *nest2, *info;
+	struct br_port_msg *bpm;
+	struct br_mdb_entry *entry;
+	struct ppe_mcast_table *pmcast;
+	struct sock *sk;
+
+	pmcast = container_of(work, struct ppe_mcast_table, work);
+	sk = pmcast->msock->sk;
+
+	while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+		nlh = nlmsg_hdr(skb);
+		if (!nlmsg_ok(nlh, skb->len)) {
+			kfree_skb(skb);
+			continue;
+		}
+		bpm = nlmsg_data(nlh);
+		nest = nlmsg_find_attr(nlh, sizeof(bpm), MDBA_MDB);
+		if (!nest) {
+			kfree_skb(skb);
+			continue;
+		}
+		nest2 = nla_find_nested(nest, MDBA_MDB_ENTRY);
+		if (nest2) {
+			info = nla_find_nested(nest2, MDBA_MDB_ENTRY_INFO);
+			if (!info) {
+				kfree_skb(skb);
+				continue;
+			}
+
+			entry = (struct br_mdb_entry *)nla_data(info);
+			trace_printk("%s:cmd=0x%2x,ifindex=0x%x,state=0x%x",
+				     __func__, nlh->nlmsg_type,
+				     entry->ifindex, entry->state);
+			trace_printk("vid=0x%x,ip=0x%x,proto=0x%x\n",
+				     entry->vid, entry->addr.u.ip4,
+				     entry->addr.proto);
+			hnat_mcast_table_update(nlh->nlmsg_type, entry);
+		}
+		kfree_skb(skb);
+	}
+}
+
+static void hnat_mcast_nlmsg_rcv(struct sock *sk)
+{
+	struct ppe_mcast_table *pmcast = hnat_priv->pmcast;
+	struct workqueue_struct *queue = pmcast->queue;
+	struct work_struct *work = &pmcast->work;
+
+	queue_work(queue, work);
+}
+
+static struct socket *hnat_mcast_netlink_open(struct net *net)
+{
+	struct socket *sock = NULL;
+	int ret;
+	struct sockaddr_nl addr;
+
+	ret = sock_create_kern(net, PF_NETLINK, SOCK_RAW, NETLINK_ROUTE, &sock);
+	if (ret < 0)
+		goto out;
+
+	sock->sk->sk_data_ready = hnat_mcast_nlmsg_rcv;
+	addr.nl_family = PF_NETLINK;
+	addr.nl_pid = 65536; /*fix me:how to get an unique id?*/
+	addr.nl_groups = RTMGRP_MDB;
+	ret = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+	if (ret < 0)
+		goto out;
+
+	return sock;
+out:
+	if (sock)
+		sock_release(sock);
+
+	return NULL;
+}
+
+static void hnat_mcast_check_timestamp(struct timer_list *t)
+{
+	struct foe_entry *entry;
+	int hash_index;
+	u16 e_ts, foe_ts;
+
+	for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+		entry = hnat_priv->foe_table_cpu + hash_index;
+		if (entry->bfib1.sta == 1) {
+			e_ts = (entry->ipv4_hnapt.m_timestamp) & 0xffff;
+			foe_ts = foe_timestamp(hnat_priv);
+			if ((foe_ts - e_ts) > 0x3000)
+				foe_ts = (~(foe_ts)) & 0xffff;
+			if (abs(foe_ts - e_ts) > 20)
+				entry_delete(hash_index);
+		}
+	}
+	mod_timer(&hnat_priv->hnat_mcast_check_timer, jiffies + 10 * HZ);
+}
+
+int hnat_mcast_enable(void)
+{
+	struct ppe_mcast_table *pmcast;
+
+	pmcast = kzalloc(sizeof(*pmcast), GFP_KERNEL);
+	if (!pmcast)
+		goto err;
+
+	if (hnat_priv->data->version == MTK_HNAT_V1)
+		pmcast->max_entry = 0x10;
+	else
+		pmcast->max_entry = MAX_MCAST_ENTRY;
+
+	INIT_WORK(&pmcast->work, hnat_mcast_nlmsg_handler);
+	pmcast->queue = create_singlethread_workqueue("ppe_mcast");
+	if (!pmcast->queue)
+		goto err;
+
+	pmcast->msock = hnat_mcast_netlink_open(&init_net);
+	if (!pmcast->msock)
+		goto err;
+
+	hnat_priv->pmcast = pmcast;
+
+	/* mt7629 should checkout mcast entry life time manualy */
+	if (hnat_priv->data->version == MTK_HNAT_V3) {
+		timer_setup(&hnat_priv->hnat_mcast_check_timer,
+			    hnat_mcast_check_timestamp, 0);
+		hnat_priv->hnat_mcast_check_timer.expires = jiffies;
+		add_timer(&hnat_priv->hnat_mcast_check_timer);
+	}
+
+	/* Enable multicast table lookup */
+	cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, MCAST_TB_EN, 1);
+	/* multicast port0 map to PDMA */
+	cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P0_PPSE, 0);
+	/* multicast port1 map to GMAC1 */
+	cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P1_PPSE, 1);
+	/* multicast port2 map to GMAC2 */
+	cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P2_PPSE, 2);
+	/* multicast port3 map to QDMA */
+	cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P3_PPSE, 5);
+
+	return 0;
+err:
+	if (pmcast->queue)
+		destroy_workqueue(pmcast->queue);
+	if (pmcast->msock)
+		sock_release(pmcast->msock);
+	kfree(pmcast);
+
+	return -1;
+}
+
+int hnat_mcast_disable(void)
+{
+	struct ppe_mcast_table *pmcast = hnat_priv->pmcast;
+	struct socket *sock = pmcast->msock;
+	struct workqueue_struct *queue = pmcast->queue;
+	struct work_struct *work = &pmcast->work;
+
+	if (hnat_priv->data->version == MTK_HNAT_V3)
+		del_timer_sync(&hnat_priv->hnat_mcast_check_timer);
+
+	if (pmcast) {
+		flush_work(work);
+		destroy_workqueue(queue);
+		sock_release(sock);
+		kfree(pmcast);
+	}
+
+	return 0;
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.h
new file mode 100644
index 0000000..048bc58
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.h
@@ -0,0 +1,69 @@
+/*   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; version 2 of the License
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   Copyright (C) 2014-2016 Zhiqiang Yang <zhiqiang.yang@mediatek.com>
+ */
+
+#ifndef NF_HNAT_MCAST_H
+#define NF_HNAT_MCAST_H
+
+#define RTMGRP_IPV4_MROUTE 0x20
+#define RTMGRP_MDB 0x2000000
+
+#define MAX_MCAST_ENTRY 64
+
+#define MCAST_TO_PDMA (0x1 << 0)
+#define MCAST_TO_GDMA1 (0x1 << 1)
+#define MCAST_TO_GDMA2 (0x1 << 2)
+
+struct ppe_mcast_group {
+	u32 mac_hi; /*multicast mac addr*/
+	u16 mac_lo; /*multicast mac addr*/
+	u16 vid;
+	u8 mc_port; /*1:forward to cpu,2:forward to GDMA1,4:forward to GDMA2*/
+	u8 eif; /*num of eth if added to multi group. */
+	u8 oif; /* num of other if added to multi group ,ex wifi.*/
+	bool valid;
+};
+
+struct ppe_mcast_table {
+	struct workqueue_struct *queue;
+	struct work_struct work;
+	struct socket *msock;
+	struct ppe_mcast_group mtbl[MAX_MCAST_ENTRY];
+	u8 max_entry;
+};
+
+struct ppe_mcast_h {
+	union {
+		u32 value;
+		struct {
+			u32 mc_vid:12;
+			u32 mc_qos_qid54:2; /* mt7622 only */
+			u32 valid:1;
+			u32 rev1:1;
+			/*0:forward to cpu,1:forward to GDMA1*/
+			u32 mc_px_en:4;
+			u32 mc_mpre_sel:2; /* 0=01:00, 2=33:33 */
+			u32 mc_vid_cmp:1;
+			u32 rev2:1;
+			u32 mc_px_qos_en:4;
+			u32 mc_qos_qid:4;
+		} info;
+	} u;
+};
+
+struct ppe_mcast_l {
+	u32 addr;
+};
+
+int hnat_mcast_enable(void);
+int hnat_mcast_disable(void);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
new file mode 100644
index 0000000..fe495ce
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
@@ -0,0 +1,2138 @@
+/*   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; version 2 of the License
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ *   Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv6.h>
+
+#include <net/arp.h>
+#include <net/neighbour.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#include "nf_hnat_mtk.h"
+#include "hnat.h"
+
+#include "../mtk_eth_soc.h"
+
+#define do_ge2ext_fast(dev, skb)                                               \
+	((IS_LAN(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
+	 skb_hnat_is_hashed(skb) && \
+	 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
+#define do_ext2ge_fast_learn(dev, skb)                                         \
+	(IS_PPD(dev) &&                                                        \
+	 (skb_hnat_sport(skb) == NR_PDMA_PORT ||                           \
+	  skb_hnat_sport(skb) == NR_QDMA_PORT) &&                       \
+	  ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) ||   \
+		 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
+#define do_mape_w2l_fast(dev, skb)                                          \
+		(mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
+
+static struct ipv6hdr mape_l2w_v6h;
+static struct ipv6hdr mape_w2l_v6h;
+static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
+{
+	int i;
+
+	for (i = 1; i < MAX_IF_NUM; i++) {
+		if (hnat_priv->wifi_hook_if[i] == dev)
+			return i;
+	}
+
+	return 0;
+}
+
+static inline int get_ext_device_number(void)
+{
+	int i, number = 0;
+
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
+		number += 1;
+	return number;
+}
+
+static inline int find_extif_from_devname(const char *name)
+{
+	int i;
+	struct extdev_entry *ext_entry;
+
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+		ext_entry = hnat_priv->ext_if[i];
+		if (!strcmp(name, ext_entry->name))
+			return 1;
+	}
+	return 0;
+}
+
+static inline int get_index_from_dev(const struct net_device *dev)
+{
+	int i;
+	struct extdev_entry *ext_entry;
+
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+		ext_entry = hnat_priv->ext_if[i];
+		if (dev == ext_entry->dev)
+			return ext_entry->dev->ifindex;
+	}
+	return 0;
+}
+
+static inline struct net_device *get_dev_from_index(int index)
+{
+	int i;
+	struct extdev_entry *ext_entry;
+	struct net_device *dev = 0;
+
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+		ext_entry = hnat_priv->ext_if[i];
+		if (ext_entry->dev && index == ext_entry->dev->ifindex) {
+			dev = ext_entry->dev;
+			break;
+		}
+	}
+	return dev;
+}
+
+static inline struct net_device *get_wandev_from_index(int index)
+{
+	struct net_device *wandev = 0;
+
+	wandev = dev_get_by_name(&init_net, hnat_priv->wan);
+	if (wandev->ifindex == index)
+		return wandev;
+	return NULL;
+}
+
+static inline int extif_set_dev(struct net_device *dev)
+{
+	int i;
+	struct extdev_entry *ext_entry;
+
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+		ext_entry = hnat_priv->ext_if[i];
+		if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
+			dev_hold(dev);
+			ext_entry->dev = dev;
+			pr_info("%s(%s)\n", __func__, dev->name);
+
+			return ext_entry->dev->ifindex;
+		}
+	}
+
+	return -1;
+}
+
+static inline int extif_put_dev(struct net_device *dev)
+{
+	int i;
+	struct extdev_entry *ext_entry;
+
+	for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+		ext_entry = hnat_priv->ext_if[i];
+		if (ext_entry->dev == dev) {
+			ext_entry->dev = NULL;
+			dev_put(dev);
+			pr_info("%s(%s)\n", __func__, dev->name);
+
+			return ext_entry->dev->ifindex;
+		}
+	}
+
+	return -1;
+}
+
+int ext_if_add(struct extdev_entry *ext_entry)
+{
+	int len = get_ext_device_number();
+
+	hnat_priv->ext_if[len++] = ext_entry;
+	return len;
+}
+
+int ext_if_del(struct extdev_entry *ext_entry)
+{
+	int i, j;
+
+	for (i = 0; i < MAX_EXT_DEVS; i++) {
+		if (hnat_priv->ext_if[i] == ext_entry) {
+			for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
+				hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
+			hnat_priv->ext_if[j] = NULL;
+			break;
+		}
+	}
+
+	return i;
+}
+
+void foe_clear_all_bind_entries(struct net_device *dev)
+{
+	int hash_index;
+	struct foe_entry *entry;
+
+	if (!IS_LAN(dev) && !IS_WAN(dev) &&
+	    !find_extif_from_devname(dev->name) &&
+	    !dev->netdev_ops->ndo_flow_offload_check)
+		return;
+
+	cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_ONLY_FWD_CPU);
+	for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+		entry = hnat_priv->foe_table_cpu + hash_index;
+		if (entry->bfib1.state == BIND) {
+			entry->ipv4_hnapt.udib1.state = INVALID;
+			entry->ipv4_hnapt.udib1.time_stamp =
+				readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
+		}
+	}
+
+	/* clear HWNAT cache */
+	hnat_cache_ebl(1);
+
+	mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
+}
+
+static void gmac_ppe_fwd_enable(struct net_device *dev)
+{
+	if (IS_LAN(dev) || IS_GMAC1_MODE)
+		set_gmac_ppe_fwd(0, 1);
+	else if (IS_WAN(dev))
+		set_gmac_ppe_fwd(1, 1);
+}
+
+int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
+			    void *ptr)
+{
+	struct net_device *dev;
+
+	dev = netdev_notifier_info_to_dev(ptr);
+
+	switch (event) {
+	case NETDEV_UP:
+		gmac_ppe_fwd_enable(dev);
+
+		extif_set_dev(dev);
+
+		break;
+	case NETDEV_GOING_DOWN:
+		if (!get_wifi_hook_if_index_from_dev(dev))
+			extif_put_dev(dev);
+
+		foe_clear_all_bind_entries(dev);
+
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+void foe_clear_entry(struct neighbour *neigh)
+{
+	u32 *daddr = (u32 *)neigh->primary_key;
+	unsigned char h_dest[ETH_ALEN];
+	struct foe_entry *entry;
+	int hash_index;
+	u32 dip;
+
+	dip = (u32)(*daddr);
+
+	for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+		entry = hnat_priv->foe_table_cpu + hash_index;
+		if (entry->bfib1.state == BIND &&
+		    entry->ipv4_hnapt.new_dip == ntohl(dip)) {
+			*((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+			*((u16 *)&h_dest[4]) =
+				swab16(entry->ipv4_hnapt.dmac_lo);
+			if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
+				pr_info("%s: state=%d\n", __func__,
+					neigh->nud_state);
+				cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA,
+					     SMA_ONLY_FWD_CPU);
+
+				entry->ipv4_hnapt.udib1.state = INVALID;
+				entry->ipv4_hnapt.udib1.time_stamp =
+					readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
+
+				/* clear HWNAT cache */
+				hnat_cache_ebl(1);
+
+				mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
+					  jiffies + 3 * HZ);
+
+				pr_info("Delete old entry: dip =%pI4\n", &dip);
+				pr_info("Old mac= %pM\n", h_dest);
+				pr_info("New mac= %pM\n", neigh->ha);
+			}
+		}
+	}
+}
+
+int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
+			     void *ptr)
+{
+	struct net_device *dev = NULL;
+	struct neighbour *neigh = NULL;
+
+	switch (event) {
+	case NETEVENT_NEIGH_UPDATE:
+		neigh = ptr;
+		dev = neigh->dev;
+		if (dev)
+			foe_clear_entry(neigh);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
+{
+	struct ethhdr *eth = NULL;
+	struct ipv6hdr *ip6h = NULL;
+	struct iphdr *iph = NULL;
+
+	if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
+	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
+		return -1;
+	}
+
+	/* point to L3 */
+	memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
+	memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
+
+	eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+	eth->h_proto = htons(ETH_P_IPV6);
+	skb->protocol = htons(ETH_P_IPV6);
+
+	iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
+	ip6h = (struct ipv6hdr *)(skb->data);
+	ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
+
+	skb_set_network_header(skb, 0);
+	skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
+	return 0;
+}
+
+static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
+				struct ethhdr *eth)
+{
+	skb->pkt_type = PACKET_HOST;
+	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+		if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+			skb->pkt_type = PACKET_BROADCAST;
+		else
+			skb->pkt_type = PACKET_MULTICAST;
+	}
+}
+
+unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
+			       const char *func)
+{
+	if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
+		u16 vlan_id = 0;
+		skb_set_network_header(skb, 0);
+		skb_push(skb, ETH_HLEN);
+		set_to_ppe(skb);
+
+		vlan_id = skb_vlan_tag_get_id(skb);
+		if (vlan_id) {
+			skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
+			if (!skb)
+				return -1;
+		}
+
+		/*set where we come from*/
+		skb->vlan_proto = htons(ETH_P_8021Q);
+		skb->vlan_tci =
+			(VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
+		trace_printk(
+			"%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
+			__func__, ntohs(skb->vlan_proto), skb->vlan_tci,
+			in->name, hnat_priv->g_ppdev->name);
+		skb->dev = hnat_priv->g_ppdev;
+		dev_queue_xmit(skb);
+		trace_printk("%s: called from %s successfully\n", __func__, func);
+		return 0;
+	}
+
+	trace_printk("%s: called from %s fail\n", __func__, func);
+	return -1;
+}
+
+unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
+{
+	struct ethhdr *eth = eth_hdr(skb);
+	struct net_device *dev;
+	struct foe_entry *entry;
+
+	trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
+		     ntohs(skb->vlan_proto), skb->vlan_tci);
+
+	dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
+
+	if (dev) {
+		/*set where we to go*/
+		skb->dev = dev;
+		skb->vlan_proto = 0;
+		skb->vlan_tci = 0;
+
+		if (ntohs(eth->h_proto) == ETH_P_8021Q) {
+			skb = skb_vlan_untag(skb);
+			if (unlikely(!skb))
+				return -1;
+		}
+
+		if (IS_BOND_MODE &&
+		    (((hnat_priv->data->version == MTK_HNAT_V4) &&
+				(skb_hnat_entry(skb) != 0x7fff)) ||
+		     ((hnat_priv->data->version != MTK_HNAT_V4) &&
+				(skb_hnat_entry(skb) != 0x3fff))))
+			skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
+
+		set_from_extge(skb);
+		fix_skb_packet_type(skb, skb->dev, eth);
+		netif_rx(skb);
+		trace_printk("%s: called from %s successfully\n", __func__,
+			     func);
+		return 0;
+	} else {
+		/* MapE WAN --> LAN/WLAN PingPong. */
+		dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
+		if (mape_toggle && dev) {
+			if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
+				skb_set_mac_header(skb, -ETH_HLEN);
+				skb->dev = dev;
+				set_from_mape(skb);
+				skb->vlan_proto = 0;
+				skb->vlan_tci = 0;
+				fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
+				entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+				entry->bfib1.pkt_type = IPV4_HNAPT;
+				netif_rx(skb);
+				return 0;
+			}
+		}
+		trace_printk("%s: called from %s fail\n", __func__, func);
+		return -1;
+	}
+}
+
+unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
+{
+	/*set where we to go*/
+	u8 index;
+	struct foe_entry *entry;
+	struct net_device *dev;
+
+	entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+
+	if (IS_IPV4_GRP(entry))
+		index = entry->ipv4_hnapt.act_dp;
+	else
+		index = entry->ipv6_5t_route.act_dp;
+
+	skb->dev = get_dev_from_index(index);
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+	if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
+		skb = skb_unshare(skb, GFP_ATOMIC);
+		if (!skb)
+			return NF_ACCEPT;
+
+		if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+			return NF_ACCEPT;
+
+		skb_pull_rcsum(skb, VLAN_HLEN);
+
+		memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
+			2 * ETH_ALEN);
+	}
+#endif
+
+	if (skb->dev) {
+		skb_set_network_header(skb, 0);
+		skb_push(skb, ETH_HLEN);
+		dev_queue_xmit(skb);
+		trace_printk("%s: called from %s successfully\n", __func__,
+			     func);
+		return 0;
+	} else {
+		if (mape_toggle) {
+			/* Add ipv6 header mape for lan/wlan -->wan */
+			dev = get_wandev_from_index(index);
+			if (dev) {
+				if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
+					skb_set_network_header(skb, 0);
+					skb_push(skb, ETH_HLEN);
+					skb_set_mac_header(skb, 0);
+					skb->dev = dev;
+					dev_queue_xmit(skb);
+					return 0;
+				}
+				trace_printk("%s: called from %s fail[MapE]\n", __func__,
+					     func);
+				return -1;
+			}
+		}
+	}
+	/*if external devices is down, invalidate related ppe entry*/
+	if (entry_hnat_is_bound(entry)) {
+		entry->bfib1.state = INVALID;
+		if (IS_IPV4_GRP(entry))
+			entry->ipv4_hnapt.act_dp = 0;
+		else
+			entry->ipv6_5t_route.act_dp = 0;
+
+		/* clear HWNAT cache */
+		hnat_cache_ebl(1);
+	}
+	trace_printk("%s: called from %s fail, index=%x\n", __func__,
+		     func, index);
+	return -1;
+}
+
+static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
+			      const struct net_device *out, const char *func)
+{
+	trace_printk(
+		"[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
+		__func__, in->name, skb_hnat_iface(skb),
+		HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
+		skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
+		func);
+}
+
+static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
+			       const struct net_device *out, const char *func)
+{
+	trace_printk(
+		"[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
+		__func__, in->name, skb_hnat_iface(skb),
+		HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
+		skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
+		func);
+}
+
+static inline void hnat_set_iif(const struct nf_hook_state *state,
+				struct sk_buff *skb, int val)
+{
+	if (IS_LAN(state->in)) {
+		skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
+	} else if (IS_PPD(state->in)) {
+		skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
+	} else if (IS_EXT(state->in)) {
+		skb_hnat_iface(skb) = FOE_MAGIC_EXT;
+	} else if (IS_WAN(state->in)) {
+		skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
+	} else if (state->in->netdev_ops->ndo_flow_offload_check) {
+		skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
+	} else if (!IS_BR(state->in)) {
+		skb_hnat_iface(skb) = FOE_INVALID;
+
+		if (is_magic_tag_valid(skb) && IS_SPACE_AVAILABLE_HEAD(skb))
+			memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
+	}
+}
+
+static inline void hnat_set_alg(const struct nf_hook_state *state,
+				struct sk_buff *skb, int val)
+{
+	skb_hnat_alg(skb) = val;
+}
+
+static inline void hnat_set_head_frags(const struct nf_hook_state *state,
+				       struct sk_buff *head_skb, int val,
+				       void (*fn)(const struct nf_hook_state *state,
+						  struct sk_buff *skb, int val))
+{
+	struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
+
+	fn(state, head_skb, val);
+	while (segs) {
+		fn(state, segs, val);
+		segs = segs->next;
+	}
+}
+
+unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
+				   const char *func)
+{
+	struct ipv6hdr *ip6h = ipv6_hdr(skb);
+	struct iphdr _iphdr;
+	struct iphdr *iph;
+	struct ethhdr *eth;
+
+	/* WAN -> LAN/WLAN MapE. */
+	if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
+		iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
+		switch (iph->protocol) {
+		case IPPROTO_UDP:
+		case IPPROTO_TCP:
+			break;
+		default:
+			return -1;
+		}
+		mape_w2l_v6h = *ip6h;
+
+		/* Remove ipv6 header. */
+		memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
+		       skb->data - ETH_HLEN, ETH_HLEN);
+		skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
+		skb_set_mac_header(skb, 0);
+		skb_set_network_header(skb, ETH_HLEN);
+		skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
+
+		eth = eth_hdr(skb);
+		eth->h_proto = htons(ETH_P_IP);
+		set_to_ppe(skb);
+
+		skb->vlan_proto = htons(ETH_P_8021Q);
+		skb->vlan_tci =
+		(VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
+
+		if (!hnat_priv->g_ppdev)
+			hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
+
+		skb->dev = hnat_priv->g_ppdev;
+		skb->protocol = htons(ETH_P_IP);
+
+		dev_queue_xmit(skb);
+
+		return 0;
+	}
+	return -1;
+}
+
+static unsigned int is_ppe_support_type(struct sk_buff *skb)
+{
+	struct ethhdr *eth = NULL;
+	struct iphdr *iph = NULL;
+	struct ipv6hdr *ip6h = NULL;
+	struct iphdr _iphdr;
+
+	eth = eth_hdr(skb);
+	if (is_broadcast_ether_addr(eth->h_dest))
+		return 0;
+
+	switch (ntohs(skb->protocol)) {
+	case ETH_P_IP:
+		iph = ip_hdr(skb);
+
+		/* do not accelerate non tcp/udp traffic */
+		if ((iph->protocol == IPPROTO_TCP) ||
+		    (iph->protocol == IPPROTO_UDP) ||
+		    (iph->protocol == IPPROTO_IPV6)) {
+			return 1;
+		}
+
+		break;
+	case ETH_P_IPV6:
+		ip6h = ipv6_hdr(skb);
+
+		if ((ip6h->nexthdr == NEXTHDR_TCP) ||
+		    (ip6h->nexthdr == NEXTHDR_UDP)) {
+			return 1;
+		} else if (ip6h->nexthdr == NEXTHDR_IPIP) {
+			iph = skb_header_pointer(skb, IPV6_HDR_LEN,
+						 sizeof(_iphdr), &_iphdr);
+
+			if ((iph->protocol == IPPROTO_TCP) ||
+			    (iph->protocol == IPPROTO_UDP)) {
+				return 1;
+			}
+
+		}
+
+		break;
+	case ETH_P_8021Q:
+		return 1;
+	}
+
+	return 0;
+}
+
+static unsigned int
+mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
+			     const struct nf_hook_state *state)
+{
+	if (!is_ppe_support_type(skb)) {
+		hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+		return NF_ACCEPT;
+	}
+
+	hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
+	pre_routing_print(skb, state->in, state->out, __func__);
+
+	if ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) ||
+	    (skb_hnat_iface(skb) == FOE_MAGIC_WED1))
+		return NF_ACCEPT;
+
+	/* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
+	if (do_ext2ge_fast_try(state->in, skb)) {
+		if (!do_hnat_ext_to_ge(skb, state->in, __func__))
+			return NF_STOLEN;
+		if (!skb)
+			goto drop;
+		return NF_ACCEPT;
+	}
+
+	/* packets form ge -> external device
+	 * For standalone wan interface
+	 */
+	if (do_ge2ext_fast(state->in, skb)) {
+		if (!do_hnat_ge_to_ext(skb, __func__))
+			return NF_STOLEN;
+		goto drop;
+	}
+
+	/* MapE need remove ipv6 header and pingpong. */
+	if (do_mape_w2l_fast(state->in, skb)) {
+		if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
+			return NF_STOLEN;
+		else
+			return NF_ACCEPT;
+	}
+
+	if (is_from_mape(skb))
+		clr_from_extge(skb);
+
+	return NF_ACCEPT;
+drop:
+	printk_ratelimited(KERN_WARNING
+				"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+				__func__, state->in->name, skb_hnat_iface(skb),
+				HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+				skb_hnat_sport(skb), skb_hnat_reason(skb),
+				skb_hnat_alg(skb));
+
+	return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
+			     const struct nf_hook_state *state)
+{
+	if (!is_ppe_support_type(skb)) {
+		hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+		return NF_ACCEPT;
+	}
+
+	hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
+	pre_routing_print(skb, state->in, state->out, __func__);
+
+	if ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) ||
+	    (skb_hnat_iface(skb) == FOE_MAGIC_WED1))
+		return NF_ACCEPT;
+
+	/* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
+	if (do_ext2ge_fast_try(state->in, skb)) {
+		if (!do_hnat_ext_to_ge(skb, state->in, __func__))
+			return NF_STOLEN;
+		if (!skb)
+			goto drop;
+		return NF_ACCEPT;
+	}
+
+	/* packets form ge -> external device
+	 * For standalone wan interface
+	 */
+	if (do_ge2ext_fast(state->in, skb)) {
+		if (!do_hnat_ge_to_ext(skb, __func__))
+			return NF_STOLEN;
+		goto drop;
+	}
+
+	return NF_ACCEPT;
+drop:
+	printk_ratelimited(KERN_WARNING
+				"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+				__func__, state->in->name, skb_hnat_iface(skb),
+				HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+				skb_hnat_sport(skb), skb_hnat_reason(skb),
+				skb_hnat_alg(skb));
+
+	return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
+			const struct nf_hook_state *state)
+{
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+	struct vlan_ethhdr *veth;
+
+	if (hnat_priv->data->whnat) {
+		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+		if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
+			skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
+			skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
+		}
+	}
+#endif
+
+	if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
+		hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+		return NF_ACCEPT;
+	}
+
+	hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
+	pre_routing_print(skb, state->in, state->out, __func__);
+
+	if (unlikely(debug_level >= 7)) {
+		hnat_cpu_reason_cnt(skb);
+		if (skb_hnat_reason(skb) == dbg_cpu_reason)
+			foe_dump_pkt(skb);
+	}
+
+	if ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) ||
+	    (skb_hnat_iface(skb) == FOE_MAGIC_WED1))
+		return NF_ACCEPT;
+
+	/* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
+	if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
+	    !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
+		if (!hnat_priv->g_ppdev)
+			hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
+
+		if (!do_hnat_ext_to_ge(skb, state->in, __func__))
+			return NF_STOLEN;
+		if (!skb)
+			goto drop;
+		return NF_ACCEPT;
+	}
+
+	if (hnat_priv->data->whnat) {
+		if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
+			clr_from_extge(skb);
+
+		/* packets from external devices -> xxx ,step 2, learning stage */
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+		if (do_ext2ge_fast_learn(state->in, skb) && (eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG)) {
+#else
+		if (do_ext2ge_fast_learn(state->in, skb)) {
+#endif
+			if (!do_hnat_ext_to_ge2(skb, __func__))
+				return NF_STOLEN;
+			goto drop;
+		}
+
+		/* packets form ge -> external device */
+		if (do_ge2ext_fast(state->in, skb)) {
+			if (!do_hnat_ge_to_ext(skb, __func__))
+				return NF_STOLEN;
+			goto drop;
+		}
+	}
+
+	/* MapE need remove ipv6 header and pingpong. (bridge mode) */
+	if (do_mape_w2l_fast(state->in, skb)) {
+		if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
+			return NF_STOLEN;
+		else
+			return NF_ACCEPT;
+	}
+
+	return NF_ACCEPT;
+drop:
+	printk_ratelimited(KERN_WARNING
+				"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+				__func__, state->in->name, skb_hnat_iface(skb),
+				HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+				skb_hnat_sport(skb), skb_hnat_reason(skb),
+				skb_hnat_alg(skb));
+
+	return NF_DROP;
+}
+
+static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
+					  const struct net_device *out,
+					  struct flow_offload_hw_path *hw_path)
+{
+	const struct in6_addr *ipv6_nexthop;
+	struct neighbour *neigh = NULL;
+	struct dst_entry *dst = skb_dst(skb);
+	struct ethhdr *eth;
+
+	if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
+		memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
+		memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
+		return 0;
+	}
+
+	rcu_read_lock_bh();
+	ipv6_nexthop =
+		rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+	neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
+	if (unlikely(!neigh)) {
+		dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
+			   &ipv6_hdr(skb)->daddr);
+		rcu_read_unlock_bh();
+		return -1;
+	}
+
+	/* why do we get all zero ethernet address ? */
+	if (!is_valid_ether_addr(neigh->ha)) {
+		rcu_read_unlock_bh();
+		return -1;
+	}
+
+	if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
+		/*copy ether type for DS-Lite and MapE */
+		eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+		eth->h_proto = skb->protocol;
+	} else {
+		eth = eth_hdr(skb);
+	}
+
+	ether_addr_copy(eth->h_dest, neigh->ha);
+	ether_addr_copy(eth->h_source, out->dev_addr);
+
+	rcu_read_unlock_bh();
+
+	return 0;
+}
+
+static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
+					  const struct net_device *out,
+					  struct flow_offload_hw_path *hw_path)
+{
+	u32 nexthop;
+	struct neighbour *neigh;
+	struct dst_entry *dst = skb_dst(skb);
+	struct rtable *rt = (struct rtable *)dst;
+	struct net_device *dev = (__force struct net_device *)out;
+
+	if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
+		memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
+		memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
+		return 0;
+	}
+
+	rcu_read_lock_bh();
+	nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
+	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+	if (unlikely(!neigh)) {
+		dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
+			   &ip_hdr(skb)->daddr);
+		rcu_read_unlock_bh();
+		return -1;
+	}
+
+	/* why do we get all zero ethernet address ? */
+	if (!is_valid_ether_addr(neigh->ha)) {
+		rcu_read_unlock_bh();
+		return -1;
+	}
+
+	memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
+	memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
+
+	rcu_read_unlock_bh();
+
+	return 0;
+}
+
+static u16 ppe_get_chkbase(struct iphdr *iph)
+{
+	u16 org_chksum = ntohs(iph->check);
+	u16 org_tot_len = ntohs(iph->tot_len);
+	u16 org_id = ntohs(iph->id);
+	u16 chksum_tmp, tot_len_tmp, id_tmp;
+	u32 tmp = 0;
+	u16 chksum_base = 0;
+
+	chksum_tmp = ~(org_chksum);
+	tot_len_tmp = ~(org_tot_len);
+	id_tmp = ~(org_id);
+	tmp = chksum_tmp + tot_len_tmp + id_tmp;
+	tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
+	tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
+	chksum_base = tmp & 0xFFFF;
+
+	return chksum_base;
+}
+
+struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
+				  struct flow_offload_hw_path *hw_path)
+{
+	switch (entry.bfib1.pkt_type) {
+	case IPV4_HNAPT:
+	case IPV4_HNAT:
+		entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
+		entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
+		entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+		entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
+		entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
+		break;
+	case IPV4_DSLITE:
+	case IPV4_MAP_E:
+	case IPV6_6RD:
+	case IPV6_5T_ROUTE:
+	case IPV6_3T_ROUTE:
+		entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
+		entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
+		entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
+		entry.ipv6_5t_route.smac_lo =
+			swab16(*((u16 *)&eth->h_source[4]));
+		entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
+		break;
+	}
+	return entry;
+}
+
+struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
+				   struct flow_offload_hw_path *hw_path)
+{
+	entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
+	entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
+	entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
+	entry.bfib1.ttl = 1;
+	entry.bfib1.cah = 1;
+	entry.bfib1.ka = 1;
+	entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4) ?
+		readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
+		readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
+
+	switch (entry.bfib1.pkt_type) {
+	case IPV4_HNAPT:
+	case IPV4_HNAT:
+		if (is_multicast_ether_addr(&eth->h_dest[0])) {
+			entry.ipv4_hnapt.iblk2.mcast = 1;
+			if (hnat_priv->data->version == MTK_HNAT_V3) {
+				entry.bfib1.sta = 1;
+				entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
+			}
+		} else {
+			entry.ipv4_hnapt.iblk2.mcast = 0;
+		}
+
+		entry.ipv4_hnapt.iblk2.port_ag =
+			(hnat_priv->data->version == MTK_HNAT_V4) ? 0x3f : 0xf;
+		break;
+	case IPV4_DSLITE:
+	case IPV4_MAP_E:
+	case IPV6_6RD:
+	case IPV6_5T_ROUTE:
+	case IPV6_3T_ROUTE:
+		if (is_multicast_ether_addr(&eth->h_dest[0])) {
+			entry.ipv6_5t_route.iblk2.mcast = 1;
+			if (hnat_priv->data->version == MTK_HNAT_V3) {
+				entry.bfib1.sta = 1;
+				entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
+			}
+		} else {
+			entry.ipv6_5t_route.iblk2.mcast = 0;
+		}
+
+		entry.ipv6_5t_route.iblk2.port_ag =
+			(hnat_priv->data->version == MTK_HNAT_V4) ? 0x3f : 0xf;
+		break;
+	}
+	return entry;
+}
+
+static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
+{
+	entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
+	entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
+	entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
+}
+
+static unsigned int skb_to_hnat_info(struct sk_buff *skb,
+				     const struct net_device *dev,
+				     struct foe_entry *foe,
+				     struct flow_offload_hw_path *hw_path)
+{
+	struct foe_entry entry = { 0 };
+	int whnat = IS_WHNAT(dev);
+	struct ethhdr *eth;
+	struct iphdr *iph;
+	struct ipv6hdr *ip6h;
+	struct tcpudphdr _ports;
+	const struct tcpudphdr *pptr;
+	u32 gmac = NR_DISCARD;
+	int udp = 0;
+	u32 qid = 0;
+	int mape = 0;
+
+	if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
+		/* point to ethernet header for DS-Lite and MapE */
+		eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+	else
+		eth = eth_hdr(skb);
+	if (is_multicast_ether_addr(eth->h_dest)) {
+		/*do not bind multicast if PPE mcast not enable*/
+		if (!hnat_priv->pmcast)
+			return 0;
+	}
+
+	entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+	entry.bfib1.sp = foe->udib1.sp;
+#endif
+
+	switch (ntohs(eth->h_proto)) {
+	case ETH_P_IP:
+		iph = ip_hdr(skb);
+		switch (iph->protocol) {
+		case IPPROTO_UDP:
+			udp = 1;
+			/* fallthrough */
+		case IPPROTO_TCP:
+			entry.ipv4_hnapt.etype = htons(ETH_P_IP);
+
+			/* DS-Lite WAN->LAN */
+			if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
+			    entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
+				entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
+				entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
+				entry.ipv4_dslite.sport =
+					foe->ipv4_dslite.sport;
+				entry.ipv4_dslite.dport =
+					foe->ipv4_dslite.dport;
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+				if (entry.bfib1.pkt_type == IPV4_MAP_E) {
+					pptr = skb_header_pointer(skb,
+								  iph->ihl * 4,
+								  sizeof(_ports),
+								  &_ports);
+
+					entry.ipv4_dslite.new_sip =
+							ntohl(iph->saddr);
+					entry.ipv4_dslite.new_dip =
+							ntohl(iph->daddr);
+					entry.ipv4_dslite.new_sport =
+							ntohs(pptr->src);
+					entry.ipv4_dslite.new_dport =
+							ntohs(pptr->dst);
+				}
+#endif
+
+				entry.ipv4_dslite.tunnel_sipv6_0 =
+					foe->ipv4_dslite.tunnel_sipv6_0;
+				entry.ipv4_dslite.tunnel_sipv6_1 =
+					foe->ipv4_dslite.tunnel_sipv6_1;
+				entry.ipv4_dslite.tunnel_sipv6_2 =
+					foe->ipv4_dslite.tunnel_sipv6_2;
+				entry.ipv4_dslite.tunnel_sipv6_3 =
+					foe->ipv4_dslite.tunnel_sipv6_3;
+
+				entry.ipv4_dslite.tunnel_dipv6_0 =
+					foe->ipv4_dslite.tunnel_dipv6_0;
+				entry.ipv4_dslite.tunnel_dipv6_1 =
+					foe->ipv4_dslite.tunnel_dipv6_1;
+				entry.ipv4_dslite.tunnel_dipv6_2 =
+					foe->ipv4_dslite.tunnel_dipv6_2;
+				entry.ipv4_dslite.tunnel_dipv6_3 =
+					foe->ipv4_dslite.tunnel_dipv6_3;
+
+				entry.ipv4_dslite.bfib1.rmt = 1;
+				entry.ipv4_dslite.iblk2.dscp = iph->tos;
+				entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
+				if (hnat_priv->data->per_flow_accounting)
+					entry.ipv4_dslite.iblk2.mibf = 1;
+
+			} else {
+				entry.ipv4_hnapt.iblk2.dscp = iph->tos;
+				if (hnat_priv->data->per_flow_accounting)
+					entry.ipv4_hnapt.iblk2.mibf = 1;
+
+				entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
+
+				if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
+					entry.bfib1.vlan_layer += 1;
+
+					if (entry.ipv4_hnapt.vlan1)
+						entry.ipv4_hnapt.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
+					else
+						entry.ipv4_hnapt.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
+				}
+
+				entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
+				entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
+				entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
+				entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
+
+				entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
+				entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
+			}
+
+			entry.ipv4_hnapt.bfib1.udp = udp;
+			if (IS_IPV4_HNAPT(foe)) {
+				pptr = skb_header_pointer(skb, iph->ihl * 4,
+							  sizeof(_ports),
+							  &_ports);
+				entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
+				entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
+			}
+
+			break;
+
+		default:
+			return -1;
+		}
+		trace_printk(
+			"[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
+			__func__, skb->head, skb->data, iph, skb->len,
+			skb->data_len);
+		break;
+
+	case ETH_P_IPV6:
+		ip6h = ipv6_hdr(skb);
+		switch (ip6h->nexthdr) {
+		case NEXTHDR_UDP:
+			udp = 1;
+			/* fallthrough */
+		case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
+			entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
+
+			entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
+
+			if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
+				entry.bfib1.vlan_layer += 1;
+
+				if (entry.ipv6_5t_route.vlan1)
+					entry.ipv6_5t_route.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
+				else
+					entry.ipv6_5t_route.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
+			}
+
+			if (hnat_priv->data->per_flow_accounting)
+				entry.ipv6_5t_route.iblk2.mibf = 1;
+			entry.ipv6_5t_route.bfib1.udp = udp;
+
+			if (IS_IPV6_6RD(foe)) {
+				entry.ipv6_5t_route.bfib1.rmt = 1;
+				entry.ipv6_6rd.tunnel_sipv4 =
+					foe->ipv6_6rd.tunnel_sipv4;
+				entry.ipv6_6rd.tunnel_dipv4 =
+					foe->ipv6_6rd.tunnel_dipv4;
+			}
+
+			entry.ipv6_3t_route.ipv6_sip0 =
+				foe->ipv6_3t_route.ipv6_sip0;
+			entry.ipv6_3t_route.ipv6_sip1 =
+				foe->ipv6_3t_route.ipv6_sip1;
+			entry.ipv6_3t_route.ipv6_sip2 =
+				foe->ipv6_3t_route.ipv6_sip2;
+			entry.ipv6_3t_route.ipv6_sip3 =
+				foe->ipv6_3t_route.ipv6_sip3;
+
+			entry.ipv6_3t_route.ipv6_dip0 =
+				foe->ipv6_3t_route.ipv6_dip0;
+			entry.ipv6_3t_route.ipv6_dip1 =
+				foe->ipv6_3t_route.ipv6_dip1;
+			entry.ipv6_3t_route.ipv6_dip2 =
+				foe->ipv6_3t_route.ipv6_dip2;
+			entry.ipv6_3t_route.ipv6_dip3 =
+				foe->ipv6_3t_route.ipv6_dip3;
+
+			if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
+				entry.ipv6_5t_route.sport =
+					foe->ipv6_5t_route.sport;
+				entry.ipv6_5t_route.dport =
+					foe->ipv6_5t_route.dport;
+			}
+			entry.ipv6_5t_route.iblk2.dscp =
+				(ip6h->priority << 4 |
+				 (ip6h->flow_lbl[0] >> 4));
+			break;
+
+		case NEXTHDR_IPIP:
+			if ((!mape_toggle &&
+			     entry.bfib1.pkt_type == IPV4_DSLITE) ||
+			    (mape_toggle &&
+			     entry.bfib1.pkt_type == IPV4_MAP_E)) {
+				/* DS-Lite LAN->WAN */
+				entry.ipv4_dslite.bfib1.udp =
+					foe->ipv4_dslite.bfib1.udp;
+				entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
+				entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
+				entry.ipv4_dslite.sport =
+					foe->ipv4_dslite.sport;
+				entry.ipv4_dslite.dport =
+					foe->ipv4_dslite.dport;
+
+				entry.ipv4_dslite.tunnel_sipv6_0 =
+					ntohl(ip6h->saddr.s6_addr32[0]);
+				entry.ipv4_dslite.tunnel_sipv6_1 =
+					ntohl(ip6h->saddr.s6_addr32[1]);
+				entry.ipv4_dslite.tunnel_sipv6_2 =
+					ntohl(ip6h->saddr.s6_addr32[2]);
+				entry.ipv4_dslite.tunnel_sipv6_3 =
+					ntohl(ip6h->saddr.s6_addr32[3]);
+
+				entry.ipv4_dslite.tunnel_dipv6_0 =
+					ntohl(ip6h->daddr.s6_addr32[0]);
+				entry.ipv4_dslite.tunnel_dipv6_1 =
+					ntohl(ip6h->daddr.s6_addr32[1]);
+				entry.ipv4_dslite.tunnel_dipv6_2 =
+					ntohl(ip6h->daddr.s6_addr32[2]);
+				entry.ipv4_dslite.tunnel_dipv6_3 =
+					ntohl(ip6h->daddr.s6_addr32[3]);
+
+				ppe_fill_flow_lbl(&entry, ip6h);
+
+				entry.ipv4_dslite.priority = ip6h->priority;
+				entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
+				entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
+				if (hnat_priv->data->per_flow_accounting)
+					entry.ipv4_dslite.iblk2.mibf = 1;
+			} else if (mape_toggle &&
+				   entry.bfib1.pkt_type == IPV4_HNAPT) {
+				/* MapE LAN -> WAN */
+				mape = 1;
+				entry.ipv4_hnapt.iblk2.dscp =
+					foe->ipv4_hnapt.iblk2.dscp;
+				if (hnat_priv->data->per_flow_accounting)
+					entry.ipv4_hnapt.iblk2.mibf = 1;
+
+				entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
+
+				entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
+				entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
+				entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
+				entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
+
+				entry.ipv4_hnapt.new_sip =
+					foe->ipv4_hnapt.new_sip;
+				entry.ipv4_hnapt.new_dip =
+					foe->ipv4_hnapt.new_dip;
+				entry.ipv4_hnapt.etype = htons(ETH_P_IP);
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+				entry.ipv4_hnapt.iblk2.qid =
+					(hnat_priv->data->version == MTK_HNAT_V4) ?
+					 skb->mark & 0x7f : skb->mark & 0xf;
+				entry.ipv4_hnapt.iblk2.fqos = 1;
+#endif
+
+				entry.ipv4_hnapt.bfib1.udp =
+					foe->ipv4_hnapt.bfib1.udp;
+
+				entry.ipv4_hnapt.new_sport =
+					foe->ipv4_hnapt.new_sport;
+				entry.ipv4_hnapt.new_dport =
+					foe->ipv4_hnapt.new_dport;
+				mape_l2w_v6h = *ip6h;
+			}
+			break;
+
+		default:
+			return -1;
+		}
+
+		trace_printk(
+			"[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
+			__func__, skb->head, skb->data, ip6h, skb->len,
+			skb->data_len);
+		break;
+
+	default:
+		ip6h = ipv6_hdr(skb);
+		iph = ip_hdr(skb);
+		switch (entry.bfib1.pkt_type) {
+		case IPV6_6RD: /* 6RD LAN->WAN */
+			entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
+			entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
+			entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
+			entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
+
+			entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
+			entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
+			entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
+			entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
+
+			entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
+			entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
+			entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
+			entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
+			entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
+			entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
+			entry.ipv6_6rd.ttl = iph->ttl;
+			entry.ipv6_6rd.dscp = iph->tos;
+			entry.ipv6_6rd.per_flow_6rd_id = 1;
+			entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
+			if (hnat_priv->data->per_flow_accounting)
+				entry.ipv6_6rd.iblk2.mibf = 1;
+			break;
+
+		default:
+			return -1;
+		}
+	}
+
+	/* Fill Layer2 Info.*/
+	entry = ppe_fill_L2_info(eth, entry, hw_path);
+
+	/* Fill Info Blk*/
+	entry = ppe_fill_info_blk(eth, entry, hw_path);
+
+	if (IS_LAN(dev)) {
+		if (IS_DSA_LAN(dev))
+			hnat_dsa_fill_stag(dev, &entry, hw_path,
+					   ntohs(eth->h_proto), mape);
+
+		if (IS_BOND_MODE)
+			gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
+				 NR_GMAC2_PORT : NR_GMAC1_PORT;
+		else
+			gmac = NR_GMAC1_PORT;
+	} else if (IS_WAN(dev)) {
+		if (IS_DSA_WAN(dev))
+			hnat_dsa_fill_stag(dev, &entry, hw_path,
+					   ntohs(eth->h_proto), mape);
+		if (mape_toggle && mape == 1) {
+			gmac = NR_PDMA_PORT;
+			/* Set act_dp = wan_dev */
+			entry.ipv4_hnapt.act_dp = dev->ifindex;
+		} else {
+			gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
+		}
+	} else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN(skb) ||
+		   FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
+		if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
+			entry.bfib1.vpm = 1;
+			entry.bfib1.vlan_layer = 1;
+
+			if (FROM_GE_LAN(skb))
+				entry.ipv4_hnapt.vlan1 = 1;
+			else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
+				entry.ipv4_hnapt.vlan1 = 2;
+		}
+
+		trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
+			     skb_hnat_iface(skb), dev->name);
+		/* To CPU then stolen by pre-routing hant hook of LAN/WAN
+		 * Current setting is PDMA RX.
+		 */
+		gmac = NR_PDMA_PORT;
+		if (IS_IPV4_GRP(foe))
+			entry.ipv4_hnapt.act_dp = dev->ifindex;
+		else
+			entry.ipv6_5t_route.act_dp = dev->ifindex;
+	} else {
+		printk_ratelimited(KERN_WARNING
+					"Unknown case of dp, iif=%x --> %s\n",
+					skb_hnat_iface(skb), dev->name);
+
+		return 0;
+	}
+
+	qid = skb->mark & (MTK_QDMA_TX_MASK);
+
+	if (IS_IPV4_GRP(foe)) {
+		entry.ipv4_hnapt.iblk2.dp = gmac;
+		entry.ipv4_hnapt.iblk2.port_mg =
+			(hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+		if (hnat_priv->data->version == MTK_HNAT_V4) {
+			entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
+		} else {
+			/* qid[5:0]= port_mg[1:0]+ qid[3:0] */
+			entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
+			if (hnat_priv->data->version != MTK_HNAT_V1)
+				entry.ipv4_hnapt.iblk2.port_mg |=
+					((qid >> 4) & 0x3);
+
+			if (((IS_EXT(dev) && (FROM_GE_LAN(skb) ||
+			      FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
+			      ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
+			      (!whnat)) {
+				entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
+				entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
+				entry.bfib1.vlan_layer = 1;
+			}
+		}
+
+		if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT)
+			entry.ipv4_hnapt.iblk2.fqos = 0;
+		else
+			entry.ipv4_hnapt.iblk2.fqos = 1;
+#else
+		entry.ipv4_hnapt.iblk2.fqos = 0;
+#endif
+	} else {
+		entry.ipv6_5t_route.iblk2.dp = gmac;
+		entry.ipv6_5t_route.iblk2.port_mg =
+			(hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+		if (hnat_priv->data->version == MTK_HNAT_V4) {
+			entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
+		} else {
+			/* qid[5:0]= port_mg[1:0]+ qid[3:0] */
+			entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
+			if (hnat_priv->data->version != MTK_HNAT_V1)
+				entry.ipv6_5t_route.iblk2.port_mg |=
+							((qid >> 4) & 0x3);
+
+			if (IS_EXT(dev) && (FROM_GE_LAN(skb) ||
+			    FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
+			    (!whnat)) {
+				entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
+				entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
+				entry.bfib1.vlan_layer = 1;
+			}
+		}
+
+		if (FROM_EXT(skb))
+			entry.ipv6_5t_route.iblk2.fqos = 0;
+		else
+			entry.ipv6_5t_route.iblk2.fqos = 1;
+#else
+		entry.ipv6_5t_route.iblk2.fqos = 0;
+#endif
+	}
+
+	memcpy(foe, &entry, sizeof(entry));
+	/*reset statistic for this entry*/
+	if (hnat_priv->data->per_flow_accounting)
+		memset(&hnat_priv->acct[skb_hnat_entry(skb)], 0,
+		       sizeof(struct mib_entry));
+
+	wmb();
+	/* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
+	 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
+	 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
+	 */
+	if (!whnat)
+		foe->bfib1.state = BIND;
+
+	return 0;
+}
+
+int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
+{
+	struct foe_entry *entry;
+	struct ethhdr *eth;
+
+	if (skb_hnat_alg(skb) || !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
+		return NF_ACCEPT;
+
+	trace_printk(
+		"[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
+		__func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
+		skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
+		skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
+
+	if (!skb_hnat_is_hashed(skb))
+		return NF_ACCEPT;
+
+	entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+	if (entry_hnat_is_bound(entry))
+		return NF_ACCEPT;
+
+	if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
+		return NF_ACCEPT;
+
+	eth = eth_hdr(skb);
+	if (is_multicast_ether_addr(eth->h_dest)) {
+		/*not bind multicast if PPE mcast not enable*/
+		if (!hnat_priv->pmcast)
+			return NF_ACCEPT;
+	}
+
+	/* Some mt_wifi virtual interfaces, such as apcli,
+	 * will change the smac for specail purpose.
+	 */
+	switch (entry->bfib1.pkt_type) {
+	case IPV4_HNAPT:
+	case IPV4_HNAT:
+		entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+		entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
+		break;
+	case IPV4_DSLITE:
+	case IPV4_MAP_E:
+	case IPV6_6RD:
+	case IPV6_5T_ROUTE:
+	case IPV6_3T_ROUTE:
+		entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
+		entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
+		break;
+	}
+
+	/* MT7622 wifi hw_nat not support QoS */
+	if (IS_IPV4_GRP(entry)) {
+		entry->ipv4_hnapt.iblk2.fqos = 0;
+		if (gmac_no == NR_WHNAT_WDMA_PORT) {
+			entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
+			entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+			entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
+			entry->ipv4_hnapt.iblk2.winfoi = 1;
+#else
+			entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
+			entry->ipv4_hnapt.iblk2w.winfoi = 1;
+			entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
+#endif
+		} else {
+			if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
+				entry->bfib1.vpm = 1;
+				entry->bfib1.vlan_layer = 1;
+
+				if (FROM_GE_LAN(skb))
+					entry->ipv4_hnapt.vlan1 = 1;
+				else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
+					entry->ipv4_hnapt.vlan1 = 2;
+			}
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+			if (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) {
+				entry->bfib1.vpm = 0;
+				entry->bfib1.vlan_layer = 1;
+				entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
+				entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
+				entry->ipv4_hnapt.iblk2.fqos = 1;
+			}
+#endif
+		}
+		entry->ipv4_hnapt.iblk2.dp = gmac_no;
+	} else {
+		entry->ipv6_5t_route.iblk2.fqos = 0;
+		if (gmac_no == NR_WHNAT_WDMA_PORT) {
+			entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
+			entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+			entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
+			entry->ipv6_5t_route.iblk2.winfoi = 1;
+#else
+			entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
+			entry->ipv6_5t_route.iblk2w.winfoi = 1;
+			entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
+#endif
+		} else {
+			if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
+				entry->bfib1.vpm = 1;
+				entry->bfib1.vlan_layer = 1;
+
+				if (FROM_GE_LAN(skb))
+					entry->ipv6_5t_route.vlan1 = 1;
+				else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
+					entry->ipv6_5t_route.vlan1 = 2;
+			}
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+			if (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) {
+				entry->bfib1.vpm = 0;
+				entry->bfib1.vlan_layer = 1;
+				entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
+				entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
+				entry->ipv6_5t_route.iblk2.fqos = 1;
+			}
+#endif
+		}
+		entry->ipv6_5t_route.iblk2.dp = gmac_no;
+	}
+
+	entry->bfib1.state = BIND;
+
+	return NF_ACCEPT;
+}
+
+int mtk_sw_nat_hook_rx(struct sk_buff *skb)
+{
+	if (!IS_SPACE_AVAILABLE_HEAD(skb))
+		return NF_ACCEPT;
+
+	skb_hnat_alg(skb) = 0;
+	skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+
+	if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
+		skb_hnat_sport(skb) = NR_WDMA0_PORT;
+	else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
+		skb_hnat_sport(skb) = NR_WDMA1_PORT;
+
+	return NF_ACCEPT;
+}
+
+void mtk_ppe_dev_register_hook(struct net_device *dev)
+{
+	int i, number = 0;
+	struct extdev_entry *ext_entry;
+
+	if (!strncmp(dev->name, "wds", 3))
+		return;
+
+	for (i = 1; i < MAX_IF_NUM; i++) {
+		if (hnat_priv->wifi_hook_if[i] == dev) {
+			pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
+				__func__, dev->name, i);
+			return;
+		}
+		if (!hnat_priv->wifi_hook_if[i]) {
+			if (find_extif_from_devname(dev->name)) {
+				extif_set_dev(dev);
+				goto add_wifi_hook_if;
+			}
+
+			number = get_ext_device_number();
+			if (number >= MAX_EXT_DEVS) {
+				pr_info("%s : extdev array is full. %s is not registered\n",
+					__func__, dev->name);
+				return;
+			}
+
+			ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
+			if (!ext_entry)
+				return;
+
+			strncpy(ext_entry->name, dev->name, IFNAMSIZ);
+			dev_hold(dev);
+			ext_entry->dev = dev;
+			ext_if_add(ext_entry);
+
+add_wifi_hook_if:
+			dev_hold(dev);
+			hnat_priv->wifi_hook_if[i] = dev;
+
+			break;
+		}
+	}
+	pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
+}
+
+void mtk_ppe_dev_unregister_hook(struct net_device *dev)
+{
+	int i;
+
+	for (i = 1; i < MAX_IF_NUM; i++) {
+		if (hnat_priv->wifi_hook_if[i] == dev) {
+			hnat_priv->wifi_hook_if[i] = NULL;
+			dev_put(dev);
+
+			break;
+		}
+	}
+
+	extif_put_dev(dev);
+	pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
+}
+
+static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
+{
+	struct dst_entry *dst;
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+	const struct nf_conn_help *help;
+
+	/* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
+	 * is from local_out which is also filtered in sanity check.
+	 */
+	dst = skb_dst(skb);
+	if (dst && dst_xfrm(dst))
+		return 0;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return 1;
+
+	/* rcu_read_lock()ed by nf_hook_slow */
+	help = nfct_help(ct);
+	if (help && rcu_dereference(help->helper))
+		return 0;
+
+	return 1;
+}
+
+static unsigned int mtk_hnat_nf_post_routing(
+	struct sk_buff *skb, const struct net_device *out,
+	unsigned int (*fn)(struct sk_buff *, const struct net_device *,
+			   struct flow_offload_hw_path *),
+	const char *func)
+{
+	struct foe_entry *entry;
+	struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
+						.virt_dev = (struct net_device*)out };
+	const struct net_device *arp_dev = out;
+
+	if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
+					  !IS_SPACE_AVAILABLE_HEAD(skb)))
+		return 0;
+
+	if (unlikely(!skb_hnat_is_hashed(skb)))
+		return 0;
+
+	if (out->netdev_ops->ndo_flow_offload_check) {
+		if (out->netdev_ops->ndo_flow_offload_check(&hw_path))
+			return 0;
+		out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
+	}
+
+	if (!IS_LAN(out) && !IS_WAN(out) && !IS_EXT(out))
+		return 0;
+
+	trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
+		     skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
+
+	entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+
+	switch (skb_hnat_reason(skb)) {
+	case HIT_UNBIND_RATE_REACH:
+		if (entry_hnat_is_bound(entry))
+			break;
+
+		if (fn && !mtk_hnat_accel_type(skb))
+			break;
+
+		if (fn && fn(skb, arp_dev, &hw_path))
+			break;
+
+		skb_to_hnat_info(skb, out, entry, &hw_path);
+		break;
+	case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+		if (fn && !mtk_hnat_accel_type(skb))
+			break;
+
+		/* update mcast timestamp*/
+		if (hnat_priv->data->version == MTK_HNAT_V3 &&
+		    hnat_priv->data->mcast && entry->bfib1.sta == 1)
+			entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
+
+		if (entry_hnat_is_bound(entry)) {
+			memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
+
+			return -1;
+		}
+		break;
+	case HIT_BIND_MULTICAST_TO_CPU:
+	case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+		/*do not forward to gdma again,if ppe already done it*/
+		if (IS_LAN(out) || IS_WAN(out))
+			return -1;
+		break;
+	}
+
+	return 0;
+}
+
+static unsigned int
+mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
+			   const struct nf_hook_state *state)
+{
+	struct foe_entry *entry;
+	struct ipv6hdr *ip6h;
+	struct iphdr _iphdr;
+	const struct iphdr *iph;
+	struct tcpudphdr _ports;
+	const struct tcpudphdr *pptr;
+	int udp = 0;
+
+	if (unlikely(!skb_hnat_is_hashed(skb)))
+		return NF_ACCEPT;
+
+	entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+	if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
+		ip6h = ipv6_hdr(skb);
+		if (ip6h->nexthdr == NEXTHDR_IPIP) {
+			/* Map-E LAN->WAN: need to record orig info before fn. */
+			if (mape_toggle) {
+				iph = skb_header_pointer(skb, IPV6_HDR_LEN,
+							 sizeof(_iphdr), &_iphdr);
+				switch (iph->protocol) {
+				case IPPROTO_UDP:
+					udp = 1;
+				case IPPROTO_TCP:
+				break;
+
+				default:
+					return NF_ACCEPT;
+				}
+
+				pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
+							  sizeof(_ports), &_ports);
+				entry->bfib1.udp = udp;
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+				entry->bfib1.pkt_type = IPV4_MAP_E;
+				entry->ipv4_dslite.iblk2.dscp = iph->tos;
+				entry->ipv4_dslite.new_sip = ntohl(iph->saddr);
+				entry->ipv4_dslite.new_dip = ntohl(iph->daddr);
+				entry->ipv4_dslite.new_sport = ntohs(pptr->src);
+				entry->ipv4_dslite.new_dport = ntohs(pptr->dst);
+#else
+				entry->ipv4_hnapt.iblk2.dscp = iph->tos;
+				entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
+				entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
+				entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
+				entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
+#endif
+			} else {
+				entry->bfib1.pkt_type = IPV4_DSLITE;
+			}
+		}
+	}
+	return NF_ACCEPT;
+}
+
+static unsigned int
+mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
+			      const struct nf_hook_state *state)
+{
+	post_routing_print(skb, state->in, state->out, __func__);
+
+	if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
+				      __func__))
+		return NF_ACCEPT;
+
+	trace_printk(
+		"%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+		__func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
+		skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
+		skb_hnat_alg(skb));
+
+	return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
+			      const struct nf_hook_state *state)
+{
+	post_routing_print(skb, state->in, state->out, __func__);
+
+	if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
+				      __func__))
+		return NF_ACCEPT;
+
+	trace_printk(
+		"%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+		__func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
+		skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
+		skb_hnat_alg(skb));
+
+	return NF_DROP;
+}
+
+static unsigned int
+mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
+		      const struct nf_hook_state *state)
+{
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+	if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
+		skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
+		skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
+	}
+#endif
+
+	if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
+		clr_from_extge(skb);
+
+	/* packets from external devices -> xxx ,step 2, learning stage */
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+	if (do_ext2ge_fast_learn(state->in, skb) && (eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG)) {
+#else
+	if (do_ext2ge_fast_learn(state->in, skb)) {
+#endif
+		if (!do_hnat_ext_to_ge2(skb, __func__))
+			return NF_STOLEN;
+		goto drop;
+	}
+
+	/* packets form ge -> external device */
+	if (do_ge2ext_fast(state->in, skb)) {
+		if (!do_hnat_ge_to_ext(skb, __func__))
+			return NF_STOLEN;
+		goto drop;
+	}
+
+	return NF_ACCEPT;
+drop:
+	printk_ratelimited(KERN_WARNING
+				"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+				__func__, state->in->name, skb_hnat_iface(skb),
+				HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+				skb_hnat_sport(skb), skb_hnat_reason(skb),
+				skb_hnat_alg(skb));
+
+	return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
+			 const struct nf_hook_state *state)
+{
+	post_routing_print(skb, state->in, state->out, __func__);
+
+	if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
+		return NF_ACCEPT;
+
+	trace_printk(
+		"%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+		__func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
+		skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
+		skb_hnat_alg(skb));
+
+	return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
+			   const struct nf_hook_state *state)
+{
+	struct sk_buff *new_skb;
+	struct foe_entry *entry;
+	struct iphdr *iph;
+
+	if (!skb_hnat_is_hashed(skb))
+		return NF_ACCEPT;
+
+	entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+
+	if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
+		new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
+		if (!new_skb) {
+			dev_info(hnat_priv->dev, "%s:drop\n", __func__);
+			return NF_DROP;
+		}
+		dev_kfree_skb(skb);
+		skb = new_skb;
+	}
+
+	/* Make the flow from local not be bound. */
+	iph = ip_hdr(skb);
+	if (iph->protocol == IPPROTO_IPV6) {
+		entry->udib1.pkt_type = IPV6_6RD;
+		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
+	} else {
+		hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+	}
+
+	return NF_ACCEPT;
+}
+
+static unsigned int mtk_hnat_br_nf_forward(void *priv,
+					   struct sk_buff *skb,
+					   const struct nf_hook_state *state)
+{
+	if (unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
+		hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+
+	return NF_ACCEPT;
+}
+
+static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
+	{
+		.hook = mtk_hnat_ipv4_nf_pre_routing,
+		.pf = NFPROTO_IPV4,
+		.hooknum = NF_INET_PRE_ROUTING,
+		.priority = NF_IP_PRI_FIRST + 1,
+	},
+	{
+		.hook = mtk_hnat_ipv6_nf_pre_routing,
+		.pf = NFPROTO_IPV6,
+		.hooknum = NF_INET_PRE_ROUTING,
+		.priority = NF_IP_PRI_FIRST + 1,
+	},
+	{
+		.hook = mtk_hnat_ipv6_nf_post_routing,
+		.pf = NFPROTO_IPV6,
+		.hooknum = NF_INET_POST_ROUTING,
+		.priority = NF_IP_PRI_LAST,
+	},
+	{
+		.hook = mtk_hnat_ipv6_nf_local_out,
+		.pf = NFPROTO_IPV6,
+		.hooknum = NF_INET_LOCAL_OUT,
+		.priority = NF_IP_PRI_LAST,
+	},
+	{
+		.hook = mtk_hnat_ipv4_nf_post_routing,
+		.pf = NFPROTO_IPV4,
+		.hooknum = NF_INET_POST_ROUTING,
+		.priority = NF_IP_PRI_LAST,
+	},
+	{
+		.hook = mtk_hnat_ipv4_nf_local_out,
+		.pf = NFPROTO_IPV4,
+		.hooknum = NF_INET_LOCAL_OUT,
+		.priority = NF_IP_PRI_LAST,
+	},
+	{
+		.hook = mtk_hnat_br_nf_local_in,
+		.pf = NFPROTO_BRIDGE,
+		.hooknum = NF_BR_LOCAL_IN,
+		.priority = NF_BR_PRI_FIRST,
+	},
+	{
+		.hook = mtk_hnat_br_nf_local_out,
+		.pf = NFPROTO_BRIDGE,
+		.hooknum = NF_BR_LOCAL_OUT,
+		.priority = NF_BR_PRI_LAST - 1,
+	},
+	{
+		.hook = mtk_pong_hqos_handler,
+		.pf = NFPROTO_BRIDGE,
+		.hooknum = NF_BR_PRE_ROUTING,
+		.priority = NF_BR_PRI_FIRST,
+	},
+};
+
+int hnat_register_nf_hooks(void)
+{
+	return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
+}
+
+void hnat_unregister_nf_hooks(void)
+{
+	nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
+}
+
+int whnat_adjust_nf_hooks(void)
+{
+	struct nf_hook_ops *hook = mtk_hnat_nf_ops;
+	unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
+
+	if (!hook)
+		return -1;
+
+	while (n-- > 0) {
+		if (hook[n].hook == mtk_hnat_br_nf_local_in) {
+			hook[n].hooknum = NF_BR_PRE_ROUTING;
+		} else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
+			hook[n].hooknum = NF_BR_POST_ROUTING;
+		} else if (hook[n].hook == mtk_pong_hqos_handler) {
+			hook[n].hook = mtk_hnat_br_nf_forward;
+			hook[n].hooknum = NF_BR_FORWARD;
+			hook[n].priority = NF_BR_PRI_LAST - 1;
+		}
+	}
+
+	return 0;
+}
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
+		      struct packet_type *pt, struct net_device *unused)
+{
+	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+	skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
+	skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
+
+	do_hnat_ge_to_ext(skb, __func__);
+
+	return 0;
+}
+#endif
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_stag.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_stag.c
new file mode 100644
index 0000000..b0fabfb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_stag.c
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Landen Chao <landen.chao@mediatek.com>
+ */
+
+#include <linux/of_device.h>
+#include <net/netfilter/nf_flow_table.h>
+#include "hnat.h"
+
+void hnat_dsa_fill_stag(const struct net_device *netdev,
+			struct foe_entry *entry,
+			struct flow_offload_hw_path *hw_path,
+			u16 eth_proto,
+			int mape)
+{
+	const struct net_device *ndev;
+	const unsigned int *port_reg;
+	int port_index;
+	u16 sp_tag;
+
+	if (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN)
+		ndev = hw_path->dev;
+	else
+		ndev = netdev;
+
+	port_reg = of_get_property(ndev->dev.of_node, "reg", NULL);
+	port_index = be32_to_cpup(port_reg);
+	sp_tag = BIT(port_index);
+
+	if (!entry->bfib1.vlan_layer)
+		entry->bfib1.vlan_layer = 1;
+	else
+		/* VLAN existence indicator */
+		sp_tag |= BIT(8);
+	entry->bfib1.vpm = 0;
+
+	switch (eth_proto) {
+	case ETH_P_IP:
+		if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE)
+			entry->ipv4_dslite.etype = sp_tag;
+		else
+			entry->ipv4_hnapt.etype = sp_tag;
+		break;
+	case ETH_P_IPV6:
+		/* In the case MAPE LAN --> WAN, binding entry is to CPU.
+		 * Do not add special tag.
+		 */
+		if (!mape)
+			/* etype offset of ipv6 entries are the same. */
+			entry->ipv6_5t_route.etype = sp_tag;
+
+		break;
+	default:
+		pr_info("DSA + HNAT unsupport protocol\n");
+	}
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
new file mode 100644
index 0000000..bd857f4
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
@@ -0,0 +1,126 @@
+/*   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; version 2 of the License
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ *   Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef NF_HNAT_MTK_H
+#define NF_HNAT_MTK_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include "../mtk_eth_soc.h"
+
+#define HNAT_SKB_CB2(__skb) ((struct hnat_skb_cb2 *)&((__skb)->cb[44]))
+struct hnat_skb_cb2 {
+	__u32 magic;
+};
+
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+struct hnat_desc {
+	u32 entry : 15;
+	u32 resv0 : 3;
+	u32 crsn : 5;
+	u32 resv1 : 3;
+	u32 sport : 4;
+	u32 resv2 : 1;
+	u32 alg : 1;
+	u32 iface : 4;
+	u32 resv3 : 4;
+	u32 magic_tag_protect : 16;
+	u32 wdmaid : 2;
+	u32 rxid : 2;
+	u32 wcid : 10;
+	u32 bssid : 6;
+} __packed;
+#else
+struct hnat_desc {
+	u32 entry : 14;
+	u32 crsn : 5;
+	u32 sport : 4;
+	u32 alg : 1;
+	u32 iface : 4;
+	u32 resv : 4;
+	u32 magic_tag_protect : 16;
+	u32 wdmaid : 8;
+	u32 rxid : 2;
+	u32 wcid : 8;
+	u32 bssid : 6;
+} __packed;
+#endif
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+#define HQOS_MAGIC_TAG 0x5678
+#define HAS_HQOS_MAGIC_TAG(skb) (skb->protocol == HQOS_MAGIC_TAG)
+#else
+#define HAS_HQOS_MAGIC_TAG(skb) NULL
+#endif
+
+#define HNAT_MAGIC_TAG 0x6789
+#define WIFI_INFO_LEN 3
+#define FOE_INFO_LEN (10 + WIFI_INFO_LEN)
+#define IS_SPACE_AVAILABLE_HEAD(skb)                                           \
+	((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+
+#define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
+#define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
+#define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
+#define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
+#define skb_hnat_sport(skb) (((struct hnat_desc *)(skb->head))->sport)
+#define skb_hnat_alg(skb) (((struct hnat_desc *)(skb->head))->alg)
+#define skb_hnat_iface(skb) (((struct hnat_desc *)(skb->head))->iface)
+#define skb_hnat_magic_tag(skb) (((struct hnat_desc *)((skb)->head))->magic_tag_protect)
+#define skb_hnat_wdma_id(skb) (((struct hnat_desc *)((skb)->head))->wdmaid)
+#define skb_hnat_rx_id(skb) (((struct hnat_desc *)((skb)->head))->rxid)
+#define skb_hnat_wc_id(skb) (((struct hnat_desc *)((skb)->head))->wcid)
+#define skb_hnat_bss_id(skb) (((struct hnat_desc *)((skb)->head))->bssid)
+#define do_ext2ge_fast_try(dev, skb) (IS_EXT(dev) && !is_from_extge(skb))
+#define set_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x78786688)
+#define clr_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x0)
+#define set_to_ppe(skb) (HNAT_SKB_CB2(skb)->magic = 0x78681415)
+#define is_from_extge(skb) (HNAT_SKB_CB2(skb)->magic == 0x78786688)
+#define is_magic_tag_valid(skb) (skb_hnat_magic_tag(skb) == HNAT_MAGIC_TAG)
+#define set_from_mape(skb) (HNAT_SKB_CB2(skb)->magic = 0x78787788)
+#define is_from_mape(skb) (HNAT_SKB_CB2(skb)->magic == 0x78787788)
+#define is_unreserved_port(hdr)						       \
+	((ntohs(hdr->source) > 1023) && (ntohs(hdr->dest) > 1023))
+
+#define TTL_0 0x02
+#define HAS_OPTION_HEADER 0x03
+#define NO_FLOW_IS_ASSIGNED 0x07
+#define IPV4_WITH_FRAGMENT 0x08
+#define IPV4_HNAPT_DSLITE_WITH_FRAGMENT 0x09
+#define IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP 0x0A
+#define IPV6_5T_6RD_WITHOUT_TCP_UDP 0x0B
+#define TCP_FIN_SYN_RST                                                        \
+	0x0C /* Ingress packet is TCP fin/syn/rst (for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */
+#define UN_HIT 0x0D /* FOE Un-hit */
+#define HIT_UNBIND 0x0E /* FOE Hit unbind */
+#define HIT_UNBIND_RATE_REACH 0x0F
+#define HIT_BIND_TCP_FIN 0x10
+#define HIT_BIND_TTL_1 0x11
+#define HIT_BIND_WITH_VLAN_VIOLATION 0x12
+#define HIT_BIND_KEEPALIVE_UC_OLD_HDR 0x13
+#define HIT_BIND_KEEPALIVE_MC_NEW_HDR 0x14
+#define HIT_BIND_KEEPALIVE_DUP_OLD_HDR 0x15
+#define HIT_BIND_FORCE_TO_CPU 0x16
+#define HIT_BIND_WITH_OPTION_HEADER 0x17
+#define HIT_BIND_MULTICAST_TO_CPU 0x18
+#define HIT_BIND_MULTICAST_TO_GMAC_CPU 0x19
+#define HIT_PRE_BIND 0x1A
+#define HIT_BIND_PACKET_SAMPLING 0x1B
+#define HIT_BIND_EXCEED_MTU 0x1C
+
+u32 hnat_tx(struct sk_buff *skb);
+u32 hnat_set_skb_info(struct sk_buff *skb, u32 *rxd);
+u32 hnat_reg(struct net_device *, void __iomem *);
+u32 hnat_unreg(void);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c
new file mode 100755
index 0000000..4db27df
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for MediaTek SGMII circuit
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
+{
+	struct device_node *np;
+	int i;
+
+	ss->ana_rgc3 = ana_rgc3;
+
+	for (i = 0; i < MTK_MAX_DEVS; i++) {
+		np = of_parse_phandle(r, "mediatek,sgmiisys", i);
+		if (!np)
+			break;
+
+		ss->regmap[i] = syscon_node_to_regmap(np);
+		if (IS_ERR(ss->regmap[i]))
+			return PTR_ERR(ss->regmap[i]);
+	}
+
+	return 0;
+}
+
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
+{
+	unsigned int val;
+
+	if (!ss->regmap[id])
+		return -EINVAL;
+
+	/* Setup the link timer and QPHY power up inside SGMIISYS */
+	regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
+		     SGMII_LINK_TIMER_DEFAULT);
+
+	regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+	val |= SGMII_REMOTE_FAULT_DIS;
+	regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+	regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+	val |= SGMII_AN_RESTART;
+	regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+	regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+	val &= ~SGMII_PHYA_PWD;
+	regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+	return 0;
+}
+
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+			       const struct phylink_link_state *state)
+{
+	unsigned int val;
+
+	if (!ss->regmap[id])
+		return -EINVAL;
+
+	regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
+	val &= ~RG_PHY_SPEED_MASK;
+	if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+		val |= RG_PHY_SPEED_3_125G;
+	regmap_write(ss->regmap[id], ss->ana_rgc3, val);
+
+	/* Disable SGMII AN */
+	regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+	val &= ~SGMII_AN_ENABLE;
+	regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+	/* SGMII force mode setting */
+	regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+	val &= ~SGMII_IF_MODE_MASK;
+
+	switch (state->speed) {
+	case SPEED_10:
+		val |= SGMII_SPEED_10;
+		break;
+	case SPEED_100:
+		val |= SGMII_SPEED_100;
+		break;
+	case SPEED_2500:
+	case SPEED_1000:
+		val |= SGMII_SPEED_1000;
+		break;
+	};
+
+	if (state->duplex == DUPLEX_FULL)
+		val |= SGMII_DUPLEX_FULL;
+
+	regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+	/* Release PHYA power down state */
+	regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+	val &= ~SGMII_PHYA_PWD;
+	regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+	return 0;
+}
+
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
+{
+	struct mtk_sgmii *ss = eth->sgmii;
+	unsigned int val, sid;
+
+	/* Decide how GMAC and SGMIISYS be mapped */
+	sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+	       0 : mac_id;
+
+	if (!ss->regmap[sid])
+		return;
+
+	regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
+	val |= SGMII_AN_RESTART;
+	regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Kconfig b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Kconfig
new file mode 100644
index 0000000..f5be18e
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Kconfig
@@ -0,0 +1,39 @@
+config RAETH
+	tristate "Mediatek Ethernet GMAC"
+	---help---
+	  This driver supports Mediatek gigabit ethernet family of
+	  adapters.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Mediatek Ethernet devices. If you say Y,
+	  you will be asked for your specific card in the following questions.
+
+if RAETH
+
+config  GE1_SGMII_FORCE_2500
+	bool "SGMII_FORCE_2500 (GigaSW)"
+	depends on RAETH
+	---help---
+	  If you want to use sgmii force 2500.
+	  Please enable GE1_SGMII_FORCE_2500.
+          Switch must support SGMII interface.
+	  This config will impact switch app makefile.
+
+config ETH_SKB_ALLOC_SELECT
+	bool "SKB Allocation API Select"
+
+choice
+	prompt "SKB Allocation API Selection"
+	depends on ETH_SKB_ALLOC_SELECT
+	default ETH_PAGE_ALLOC_SKB
+
+config  ETH_SLAB_ALLOC_SKB
+	bool "SLAB skb allocation"
+
+config  ETH_PAGE_ALLOC_SKB
+	bool "Page skb allocation"
+
+endchoice
+
+endif 	# RAETH
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Makefile b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Makefile
new file mode 100644
index 0000000..e72dd58
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Makefile
@@ -0,0 +1,24 @@
+obj-$(CONFIG_RAETH) += raeth.o
+raeth-objs := raether.o raether_pdma.o ra_mac.o mii_mgr.o ra_switch.o ra_dbg_proc.o
+raeth-objs += raether_qdma.o
+raeth-objs += raether_rss.o
+
+ifeq ($(CONFIG_RAETH_ETHTOOL),y)
+raeth-objs += ra_ethtool.o
+endif
+
+raeth-objs += raether_hwlro.o
+raeth-objs += ra_dbg_hwlro.o
+#raeth-objs += ra_dbg_hwioc.o
+
+ccflags-y += -Idrivers/net/ethernet/raeth
+ccflags-y += -Iinclude/linux/
+
+ifeq ($(CONFIG_RAETH_PDMA_DVT),y)
+raeth-objs += dvt/raether_pdma_dvt.o
+obj-m += dvt/pkt_gen.o
+obj-m += dvt/pkt_gen_udp_frag.o
+obj-m += dvt/pkt_gen_tcp_frag.o
+endif
+
+#ccflags-y += -Werror
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.c
new file mode 100644
index 0000000..7da2517
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.c
@@ -0,0 +1,338 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "mii_mgr.h"
+
+void set_an_polling(u32 an_status)
+{
+	if (an_status == 1)
+		*(unsigned long *)(ESW_PHY_POLLING) |= (1 << 31);
+	else
+		*(unsigned long *)(ESW_PHY_POLLING) &= ~(1 << 31);
+}
+
+u32 __mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
+{
+	u32 status = 0;
+	u32 rc = 0;
+	unsigned long t_start = jiffies;
+	u32 data = 0;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ei_local->mdio_lock, flags);
+	/* We enable mdio gpio purpose register, and disable it when exit. */
+	enable_mdio(1);
+
+	/* make sure previous read operation is complete */
+	while (1) {
+		/* 0 : Read/write operation complete */
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			break;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			rc = 0;
+			pr_err("\n MDIO Read operation is ongoing !!\n");
+			goto out;
+		}
+	}
+
+	data =
+	    (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register <<
+							      25);
+	sys_reg_write(MDIO_PHY_CONTROL_0, data);
+	sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31)));
+
+	/* make sure read operation is complete */
+	t_start = jiffies;
+	while (1) {
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			status = sys_reg_read(MDIO_PHY_CONTROL_0);
+			*read_data = (u32)(status & 0x0000FFFF);
+
+			enable_mdio(0);
+			rc = 1;
+			goto out;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			rc = 0;
+			pr_err
+			    ("\n MDIO Read operation Time Out!!\n");
+			goto out;
+		}
+	}
+out:
+	spin_unlock_irqrestore(&ei_local->mdio_lock, flags);
+	return rc;
+}
+
+u32 __mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
+{
+	unsigned long t_start = jiffies;
+	u32 data;
+	u32 rc = 0;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ei_local->mdio_lock, flags);
+	enable_mdio(1);
+
+	/* make sure previous write operation is complete */
+	while (1) {
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			break;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			rc = 0;
+			pr_err("\n MDIO Write operation ongoing\n");
+			goto out;
+		}
+	}
+
+	data =
+	    (0x01 << 16) | (1 << 18) | (phy_addr << 20) | (phy_register << 25) |
+	    write_data;
+	sys_reg_write(MDIO_PHY_CONTROL_0, data);
+	sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31))); /*start*/
+	/* pr_err("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0); */
+
+	t_start = jiffies;
+
+	/* make sure write operation is complete */
+	while (1) {
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			enable_mdio(0);
+			rc = 1;
+			goto out;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			rc = 0;
+			pr_err("\n MDIO Write operation Time Out\n");
+			goto out;
+		}
+	}
+out:
+	spin_unlock_irqrestore(&ei_local->mdio_lock, flags);
+	return rc;
+}
+
+u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	u32 low_word;
+	u32 high_word;
+	u32 an_status = 0;
+
+	if ((ei_local->architecture &
+	     (GE1_RGMII_FORCE_1000 | GE1_TRGMII_FORCE_2000 |
+	      GE1_TRGMII_FORCE_2600)) && (phy_addr == 31)) {
+		an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1 << 31));
+		if (an_status)
+			set_an_polling(0);
+		if (__mii_mgr_write
+		    (phy_addr, 0x1f, ((phy_register >> 6) & 0x3FF))) {
+			if (__mii_mgr_read
+			    (phy_addr, (phy_register >> 2) & 0xF, &low_word)) {
+				if (__mii_mgr_read
+				    (phy_addr, (0x1 << 4), &high_word)) {
+					*read_data =
+					    (high_word << 16) | (low_word &
+								 0xFFFF);
+					if (an_status)
+						set_an_polling(1);
+					return 1;
+				}
+			}
+		}
+		if (an_status)
+			set_an_polling(1);
+	} else {
+		if (__mii_mgr_read(phy_addr, phy_register, read_data))
+			return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(mii_mgr_read);
+
+u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	u32 an_status = 0;
+
+	if ((ei_local->architecture &
+	     (GE1_RGMII_FORCE_1000 | GE1_TRGMII_FORCE_2000 |
+	      GE1_TRGMII_FORCE_2600)) && (phy_addr == 31)) {
+		an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1 << 31));
+		if (an_status)
+			set_an_polling(0);
+		if (__mii_mgr_write
+		    (phy_addr, 0x1f, (phy_register >> 6) & 0x3FF)) {
+			if (__mii_mgr_write
+			    (phy_addr, ((phy_register >> 2) & 0xF),
+			     write_data & 0xFFFF)) {
+				if (__mii_mgr_write
+				    (phy_addr, (0x1 << 4), write_data >> 16)) {
+					if (an_status)
+						set_an_polling(1);
+					return 1;
+				}
+			}
+		}
+		if (an_status)
+			set_an_polling(1);
+	} else {
+		if (__mii_mgr_write(phy_addr, phy_register, write_data))
+			return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mii_mgr_write);
+
+u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr)
+{
+	u32 rc = 0;
+	unsigned long t_start = jiffies;
+	u32 data = 0;
+
+	enable_mdio(1);
+
+	while (1) {
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			break;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			pr_err("\n MDIO Read operation is ongoing !!\n");
+			return rc;
+		}
+	}
+	data =
+	    (dev_addr << 25) | (port_num << 20) | (0x00 << 18) | (0x00 << 16) |
+	    reg_addr;
+	sys_reg_write(MDIO_PHY_CONTROL_0, data);
+	sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31)));
+
+	t_start = jiffies;
+	while (1) {
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			enable_mdio(0);
+			return 1;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			pr_err("\n MDIO Write operation Time Out\n");
+			return 0;
+		}
+	}
+}
+
+u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data)
+{
+	u32 status = 0;
+	u32 rc = 0;
+	unsigned long t_start = jiffies;
+	u32 data = 0;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ei_local->mdio_lock, flags);
+	/* set address first */
+	mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
+	/* udelay(10); */
+
+	enable_mdio(1);
+
+	while (1) {
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			break;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			rc = 0;
+			pr_err("\n MDIO Read operation is ongoing !!\n");
+			goto out;
+		}
+	}
+	data =
+	    (dev_addr << 25) | (port_num << 20) | (0x03 << 18) | (0x00 << 16) |
+	    reg_addr;
+	sys_reg_write(MDIO_PHY_CONTROL_0, data);
+	sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31)));
+	t_start = jiffies;
+	while (1) {
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			*read_data =
+			    (sys_reg_read(MDIO_PHY_CONTROL_0) & 0x0000FFFF);
+			enable_mdio(0);
+			rc = 1;
+			goto out;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			rc = 0;
+			pr_err
+			    ("\n MDIO Read operation Time Out!!\n");
+			goto out;
+		}
+		status = sys_reg_read(MDIO_PHY_CONTROL_0);
+	}
+out:
+	spin_unlock_irqrestore(&ei_local->mdio_lock, flags);
+	return rc;
+}
+
+u32 mii_mgr_write_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
+{
+	u32 rc = 0;
+	unsigned long t_start = jiffies;
+	u32 data = 0;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ei_local->mdio_lock, flags);
+	/* set address first */
+	mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
+	/* udelay(10); */
+
+	enable_mdio(1);
+	while (1) {
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			break;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			rc = 0;
+			pr_err("\n MDIO Read operation is ongoing !!\n");
+			goto out;
+		}
+	}
+
+	data =
+	    (dev_addr << 25) | (port_num << 20) | (0x01 << 18) | (0x00 << 16) |
+	    write_data;
+	sys_reg_write(MDIO_PHY_CONTROL_0, data);
+	sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31)));
+
+	t_start = jiffies;
+
+	while (1) {
+		if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+			enable_mdio(0);
+			rc = 1;
+			goto out;
+		} else if (time_after(jiffies, t_start + 5 * HZ)) {
+			enable_mdio(0);
+			rc = 0;
+			pr_err("\n MDIO Write operation Time Out\n");
+			goto out;
+		}
+	}
+out:
+	spin_unlock_irqrestore(&ei_local->mdio_lock, flags);
+	return rc;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.h
new file mode 100644
index 0000000..f8e0517
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.h
@@ -0,0 +1,27 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include "raether.h"
+
+extern struct net_device *dev_raether;
+
+#define PHY_CONTROL_0		0x0004
+#define MDIO_PHY_CONTROL_0	(RALINK_ETH_MAC_BASE + PHY_CONTROL_0)
+#define enable_mdio(x)
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwioc.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwioc.c
new file mode 100644
index 0000000..1132903
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwioc.c
@@ -0,0 +1,306 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_dbg_proc.h"
+
+#define MCSI_A_PMU_CTL		0x10390100	/* PMU CTRL */
+#define MCSI_A_PMU_CYC_CNT	0x10399004	/* Cycle counter */
+#define MCSI_A_PMU_CYC_CTL	0x10399008	/* Cycle counter CTRL */
+
+#define MCSI_A_PMU_EVN_SEL0	0x1039A000	/* EVENT SELECT 0 */
+#define MCSI_A_PMU_EVN_CNT0	0x1039A004	/* Event Count 0 */
+#define MCSI_A_PMU_EVN_CTL0	0x1039A008	/* Event Count control 0 */
+
+#define MCSI_A_PMU_EVN_SEL1	0x1039B000	/* EVENT SELECT 1 */
+#define MCSI_A_PMU_EVN_CNT1	0x1039B004	/* Event Count 1 */
+#define MCSI_A_PMU_EVN_CTL1	0x1039B008	/* Event Count control 1 */
+
+#define MCSI_A_PMU_EVN_SEL2	0x1039C000	/* EVENT SELECT 2 */
+#define MCSI_A_PMU_EVN_CNT2	0x1039C004	/* Event Count 2 */
+#define MCSI_A_PMU_EVN_CTL2	0x1039C008	/* Event Count control 2 */
+
+#define MCSI_A_PMU_EVN_SEL3	0x1039D000	/* EVENT SELECT 3 */
+#define MCSI_A_PMU_EVN_CNT3	0x1039D004	/* Event Count 3 */
+#define MCSI_A_PMU_EVN_CTL3	0x1039D008	/* Event Count control 3 */
+
+#define PMU_EVN_SEL_S0 (0x0 << 5)
+#define PMU_EVN_SEL_S1 (0x1 << 5)
+#define PMU_EVN_SEL_S2 (0x2 << 5)
+#define PMU_EVN_SEL_S3 (0x3 << 5)
+#define PMU_EVN_SEL_S4 (0x4 << 5)
+#define PMU_EVN_SEL_S5 (0x5 << 5)
+#define PMU_EVN_SEL_M0 (0x6 << 5)
+#define PMU_EVN_SEL_M1 (0x7 << 5)
+#define PMU_EVN_SEL_M2 (0x8 << 5)
+
+#define PMU_EVN_READ_ANY    0x0
+#define PMU_EVN_READ_SNOOP  0x3
+#define PMU_EVN_READ_HIT    0xA
+#define PMU_EVN_WRITE_ANY   0xC
+#define PMU_EVN_WU_SNOOP    0x10
+#define PMU_EVN_WLU_SNOOP   0x11
+
+#define PMU_0_SEL   (PMU_EVN_SEL_S2 | PMU_EVN_READ_SNOOP)
+#define PMU_1_SEL   (PMU_EVN_SEL_S2 | PMU_EVN_READ_HIT)
+#define PMU_2_SEL   (PMU_EVN_SEL_S4 | PMU_EVN_READ_SNOOP)
+#define PMU_3_SEL   (PMU_EVN_SEL_S4 | PMU_EVN_READ_HIT)
+
+#define MCSI_A_PMU_CTL_BASE	MCSI_A_PMU_CTL
+#define MCSI_A_PMU_CNT0_BASE	MCSI_A_PMU_EVN_SEL0
+#define MCSI_A_PMU_CNT1_BASE	MCSI_A_PMU_EVN_SEL1
+#define MCSI_A_PMU_CNT2_BASE	MCSI_A_PMU_EVN_SEL2
+#define MCSI_A_PMU_CNT3_BASE	MCSI_A_PMU_EVN_SEL3
+
+typedef int (*IOC_SET_FUNC) (int par1, int par2, int par3);
+static struct proc_dir_entry *proc_hw_io_coherent;
+
+unsigned int reg_pmu_evn_phys[] = {
+	MCSI_A_PMU_CNT0_BASE,
+	MCSI_A_PMU_CNT1_BASE,
+	MCSI_A_PMU_CNT2_BASE,
+	MCSI_A_PMU_CNT3_BASE,
+};
+
+int ioc_pmu_cnt_config(int pmu_no, int interface, int event)
+{
+	void *reg_pmu_cnt;
+	unsigned int pmu_sel;
+
+	reg_pmu_cnt = ioremap(reg_pmu_evn_phys[pmu_no], 0x10);
+
+	/* Event Select Register
+	 * bit[31:8]	-> Reserved
+	 * bit[7:5]	-> Event code to define which interface to monitor
+	 * bit[4:0]	-> Event code to define which event to monitor
+	 */
+	pmu_sel = (interface << 5) | event;
+	sys_reg_write(reg_pmu_cnt, pmu_sel);
+
+	/*Counter Control Registers
+	 * bit[31:1]	-> Reserved
+	 * bit[0:0]	-> Counter enable
+	 */
+	sys_reg_write(reg_pmu_cnt + 0x8, 0x1);
+
+	iounmap(reg_pmu_cnt);
+
+	return 0;
+}
+
+int ioc_pmu_ctl_config(int enable, int ignore1, int ignore2)
+{
+	void *reg_pmu_ctl;
+
+	reg_pmu_ctl = ioremap(MCSI_A_PMU_CTL_BASE, 0x10);
+
+	/*Performance Monitor Control Register
+	 * bit[31:16]	-> Reserved
+	 * bit[15:12]	-> Specifies the number of counters implemented
+	 * bit[11:6]	-> Reserved
+	 * bit[5:5]	-> DP: Disables cycle counter
+	 * bit[4:4]	-> EX: Enable export of the events to the event bus
+	 * bit[3:3]	-> CCD: Cycle count divider
+	 * bit[2:2]	-> CCR: Cycle counter reset
+	 * bit[1:1]	-> RST: Performance counter reset
+	 * bit[0:0]	-> CEN: Enable bit
+	 */
+	if (enable) {
+		sys_reg_write(reg_pmu_ctl, BIT(1));
+		sys_reg_write(reg_pmu_ctl, BIT(0));
+	} else {
+		sys_reg_write(reg_pmu_ctl, 0x0);
+	}
+
+	iounmap(reg_pmu_ctl);
+
+	return 0;
+}
+
+int ioc_set_usage(int ignore1, int ignore2, int ignore3)
+{
+	pr_info("<Usage> echo \"[OP Mode] [Arg1] [Arg2 | Arg3]\" > /proc/%s\n\r",
+		PROCREG_HW_IO_COHERENT);
+	pr_info("\tControl PMU counter: echo \"1 [Enable]\" > /proc/%s\n\r",
+		PROCREG_HW_IO_COHERENT);
+	pr_info("\t\t[Enable]:\n\r\t\t\t1: enable\n\r\t\t\t0: disable\n\r");
+	pr_info("\tConfigure PMU counter: echo \"2 [CNT No.] [IF] [EVN]\" > /proc/%s\n\r",
+		PROCREG_HW_IO_COHERENT);
+	pr_info("\t\t[CNT No.]: 0/1/2/3 PMU Counter\n\r");
+	pr_info("\t\t[IF]:\n\r");
+	pr_info("\t\t\t0: PMU_EVN_SEL_S0\n\r");
+	pr_info("\t\t\t1: PMU_EVN_SEL_S1\n\r");
+	pr_info("\t\t\t2: PMU_EVN_SEL_S2\n\r");
+	pr_info("\t\t\t3: PMU_EVN_SEL_S3\n\r");
+	pr_info("\t\t\t4: PMU_EVN_SEL_S4\n\r");
+	pr_info("\t\t\t5: PMU_EVN_SEL_S5\n\r");
+	pr_info("\t\t\t6: PMU_EVN_SEL_M0\n\r");
+	pr_info("\t\t\t7: PMU_EVN_SEL_M1\n\r");
+	pr_info("\t\t\t8: PMU_EVN_SEL_M2\n\r");
+	pr_info("\t\t[EVN]:\n\r");
+	pr_info("\t\t\t0: PMU_EVN_READ_ANY\n\r");
+	pr_info("\t\t\t3: PMU_EVN_READ_SNOOP\n\r");
+	pr_info("\t\t\tA: PMU_EVN_READ_HIT\n\r");
+	pr_info("\t\t\tC: PMU_EVN_WRITE_ANY\n\r");
+	pr_info("\t\t\t10: PMU_EVN_WU_SNOOP\n\r");
+	pr_info("\t\t\t11: PMU_EVN_WLU_SNOOP\n\r");
+
+	return 0;
+}
+
+static const IOC_SET_FUNC iocoherent_set_func[] = {
+	[0] = ioc_set_usage,
+	[1] = ioc_pmu_ctl_config,
+	[2] = ioc_pmu_cnt_config,
+};
+
+ssize_t ioc_pmu_write(struct file *file, const char __user *buffer,
+		      size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0, arg2 = 0, arg3 = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_err("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_info("write parameter data = %s\n\r", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 16, &arg0);
+
+	switch (arg0) {
+	case 1:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 16, &arg1);
+		break;
+	case 2:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 16, &arg1);
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg2 = 0;
+		else
+			ret = kstrtol(p_token, 16, &arg2);
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg3 = 0;
+		else
+			ret = kstrtol(p_token, 16, &arg3);
+		break;
+	}
+
+	if (iocoherent_set_func[arg0] &&
+	    (ARRAY_SIZE(iocoherent_set_func) > arg0)) {
+		(*iocoherent_set_func[arg0]) (arg1, arg2, arg3);
+	} else {
+		pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+		(*iocoherent_set_func[0]) (0, 0, 0);
+	}
+
+	return len;
+}
+
+int ioc_pmu_read(struct seq_file *seq, void *v)
+{
+	void __iomem *reg_virt_0, *reg_virt_1, *reg_virt_2, *reg_virt_3;
+
+	reg_virt_0 = ioremap(MCSI_A_PMU_EVN_SEL0, 0x10);
+	reg_virt_1 = ioremap(MCSI_A_PMU_EVN_SEL1, 0x10);
+	reg_virt_2 = ioremap(MCSI_A_PMU_EVN_SEL2, 0x10);
+	reg_virt_3 = ioremap(MCSI_A_PMU_EVN_SEL3, 0x10);
+
+	seq_printf(seq, "MCSI_A_PMU_EVN_SEL0 = 0x%x\n",
+		   sys_reg_read(reg_virt_0));
+	seq_printf(seq, "MCSI_A_PMU_EVN_CNT0 = 0x%x\n",
+		   sys_reg_read(reg_virt_0 + 0x4));
+	seq_printf(seq, "MCSI_A_PMU_EVN_CTL0 = 0x%x\n",
+		   sys_reg_read(reg_virt_0 + 0x8));
+	seq_printf(seq, "MCSI_A_PMU_EVN_SEL1 = 0x%x\n",
+		   sys_reg_read(reg_virt_1));
+	seq_printf(seq, "MCSI_A_PMU_EVN_CNT1 = 0x%x\n",
+		   sys_reg_read(reg_virt_1 + 0x4));
+	seq_printf(seq, "MCSI_A_PMU_EVN_CTL1 = 0x%x\n",
+		   sys_reg_read(reg_virt_1 + 0x8));
+
+	seq_printf(seq, "MCSI_A_PMU_EVN_SEL2 = 0x%x\n",
+		   sys_reg_read(reg_virt_2));
+	seq_printf(seq, "MCSI_A_PMU_EVN_CNT2 = 0x%x\n",
+		   sys_reg_read(reg_virt_2 + 0x4));
+	seq_printf(seq, "MCSI_A_PMU_EVN_CTL2 = 0x%x\n",
+		   sys_reg_read(reg_virt_2 + 0x8));
+
+	seq_printf(seq, "MCSI_A_PMU_EVN_SEL3 = 0x%x\n",
+		   sys_reg_read(reg_virt_3));
+	seq_printf(seq, "MCSI_A_PMU_EVN_CNT3 = 0x%x\n",
+		   sys_reg_read(reg_virt_3 + 0x4));
+	seq_printf(seq, "MCSI_A_PMU_EVN_CTL3 = 0x%x\n",
+		   sys_reg_read(reg_virt_3 + 0x8));
+
+	iounmap(reg_virt_0);
+	iounmap(reg_virt_1);
+	iounmap(reg_virt_2);
+	iounmap(reg_virt_3);
+	return 0;
+}
+
+static int ioc_pmu_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ioc_pmu_read, NULL);
+}
+
+static const struct file_operations ioc_pmu_fops = {
+	.owner = THIS_MODULE,
+	.open = ioc_pmu_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = ioc_pmu_write,
+	.release = single_release
+};
+
+void hwioc_debug_proc_init(struct proc_dir_entry *proc_reg_dir)
+{
+	proc_hw_io_coherent =
+	     proc_create(PROCREG_HW_IO_COHERENT, 0, proc_reg_dir,
+			 &ioc_pmu_fops);
+	if (!proc_hw_io_coherent)
+		pr_err("FAIL to create %s PROC!\n", PROCREG_HW_IO_COHERENT);
+}
+EXPORT_SYMBOL(hwioc_debug_proc_init);
+
+void hwioc_debug_proc_exit(struct proc_dir_entry *proc_reg_dir)
+{
+	if (proc_hw_io_coherent)
+		remove_proc_entry(PROCREG_HW_IO_COHERENT, proc_reg_dir);
+}
+EXPORT_SYMBOL(hwioc_debug_proc_exit);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwlro.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwlro.c
new file mode 100644
index 0000000..1ecad66
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwlro.c
@@ -0,0 +1,629 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "raether_hwlro.h"
+#include "ra_dbg_proc.h"
+
+/* HW LRO proc */
+#define HW_LRO_RING_NUM 3
+#define MAX_HW_LRO_AGGR 64
+
+typedef int (*HWLRO_DBG_FUNC) (int par1, int par2);
+unsigned int hw_lro_agg_num_cnt[HW_LRO_RING_NUM][MAX_HW_LRO_AGGR + 1];
+unsigned int hw_lro_agg_size_cnt[HW_LRO_RING_NUM][16];
+unsigned int hw_lro_tot_agg_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_tot_flush_cnt[HW_LRO_RING_NUM];
+
+/* HW LRO flush reason proc */
+#define HW_LRO_AGG_FLUSH        (1)
+#define HW_LRO_AGE_FLUSH        (2)
+#define HW_LRO_NOT_IN_SEQ_FLUSH (3)
+#define HW_LRO_TIMESTAMP_FLUSH  (4)
+#define HW_LRO_NON_RULE_FLUSH   (5)
+
+unsigned int hw_lro_agg_flush_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_age_flush_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_seq_flush_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_timestamp_flush_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_norule_flush_cnt[HW_LRO_RING_NUM];
+
+static struct proc_dir_entry *proc_rx_ring1, *proc_rx_ring2, *proc_rx_ring3;
+static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
+
+int rx_lro_ring_read(struct seq_file *seq, void *v,
+		     struct PDMA_rxdesc *rx_ring_p)
+{
+	struct PDMA_rxdesc *rx_ring;
+	int i = 0;
+
+	rx_ring =
+	    kmalloc(sizeof(struct PDMA_rxdesc) * NUM_LRO_RX_DESC, GFP_KERNEL);
+	if (!rx_ring) {
+		seq_puts(seq, " allocate temp rx_ring fail.\n");
+		return 0;
+	}
+
+	for (i = 0; i < NUM_LRO_RX_DESC; i++)
+		memcpy(&rx_ring[i], &rx_ring_p[i], sizeof(struct PDMA_rxdesc));
+
+	for (i = 0; i < NUM_LRO_RX_DESC; i++) {
+		seq_printf(seq, "%d: %08x %08x %08x %08x\n", i,
+			   *(int *)&rx_ring[i].rxd_info1,
+			   *(int *)&rx_ring[i].rxd_info2,
+			   *(int *)&rx_ring[i].rxd_info3,
+			   *(int *)&rx_ring[i].rxd_info4);
+	}
+
+	kfree(rx_ring);
+	return 0;
+}
+
+int rx_ring1_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	rx_lro_ring_read(seq, v, ei_local->rx_ring[1]);
+
+	return 0;
+}
+
+int rx_ring2_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	rx_lro_ring_read(seq, v, ei_local->rx_ring[2]);
+
+	return 0;
+}
+
+int rx_ring3_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	rx_lro_ring_read(seq, v, ei_local->rx_ring[3]);
+
+	return 0;
+}
+
+static int rx_ring1_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rx_ring1_read, NULL);
+}
+
+static int rx_ring2_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rx_ring2_read, NULL);
+}
+
+static int rx_ring3_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rx_ring3_read, NULL);
+}
+
+static const struct file_operations rx_ring1_fops = {
+	.owner = THIS_MODULE,
+	.open = rx_ring1_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static const struct file_operations rx_ring2_fops = {
+	.owner = THIS_MODULE,
+	.open = rx_ring2_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static const struct file_operations rx_ring3_fops = {
+	.owner = THIS_MODULE,
+	.open = rx_ring3_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static int hw_lro_len_update(unsigned int agg_size)
+{
+	int len_idx;
+
+	if (agg_size > 65000)
+		len_idx = 13;
+	else if (agg_size > 60000)
+		len_idx = 12;
+	else if (agg_size > 55000)
+		len_idx = 11;
+	else if (agg_size > 50000)
+		len_idx = 10;
+	else if (agg_size > 45000)
+		len_idx = 9;
+	else if (agg_size > 40000)
+		len_idx = 8;
+	else if (agg_size > 35000)
+		len_idx = 7;
+	else if (agg_size > 30000)
+		len_idx = 6;
+	else if (agg_size > 25000)
+		len_idx = 5;
+	else if (agg_size > 20000)
+		len_idx = 4;
+	else if (agg_size > 15000)
+		len_idx = 3;
+	else if (agg_size > 10000)
+		len_idx = 2;
+	else if (agg_size > 5000)
+		len_idx = 1;
+	else
+		len_idx = 0;
+
+	return len_idx;
+}
+
+void hw_lro_stats_update(unsigned int ring_num, struct PDMA_rxdesc *rx_ring)
+{
+	unsigned int agg_cnt = rx_ring->rxd_info2.LRO_AGG_CNT;
+	unsigned int agg_size = (rx_ring->rxd_info2.PLEN1 << 14) |
+				 rx_ring->rxd_info2.PLEN0;
+
+	if ((ring_num > 0) && (ring_num < 4)) {
+		hw_lro_agg_size_cnt[ring_num - 1]
+				   [hw_lro_len_update(agg_size)]++;
+		hw_lro_agg_num_cnt[ring_num - 1][agg_cnt]++;
+		hw_lro_tot_flush_cnt[ring_num - 1]++;
+		hw_lro_tot_agg_cnt[ring_num - 1] += agg_cnt;
+	}
+}
+
+void hw_lro_flush_stats_update(unsigned int ring_num,
+			       struct PDMA_rxdesc *rx_ring)
+{
+	unsigned int flush_reason = rx_ring->rxd_info2.REV;
+
+	if ((ring_num > 0) && (ring_num < 4)) {
+		if ((flush_reason & 0x7) == HW_LRO_AGG_FLUSH)
+			hw_lro_agg_flush_cnt[ring_num - 1]++;
+		else if ((flush_reason & 0x7) == HW_LRO_AGE_FLUSH)
+			hw_lro_age_flush_cnt[ring_num - 1]++;
+		else if ((flush_reason & 0x7) == HW_LRO_NOT_IN_SEQ_FLUSH)
+			hw_lro_seq_flush_cnt[ring_num - 1]++;
+		else if ((flush_reason & 0x7) == HW_LRO_TIMESTAMP_FLUSH)
+			hw_lro_timestamp_flush_cnt[ring_num - 1]++;
+		else if ((flush_reason & 0x7) == HW_LRO_NON_RULE_FLUSH)
+			hw_lro_norule_flush_cnt[ring_num - 1]++;
+	}
+}
+EXPORT_SYMBOL(hw_lro_flush_stats_update);
+
+ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
+			   size_t count, loff_t *data)
+{
+	memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
+	memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
+	memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
+	memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
+	memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
+	memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
+	memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
+	memset(hw_lro_timestamp_flush_cnt, 0,
+	       sizeof(hw_lro_timestamp_flush_cnt));
+	memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
+
+	pr_info("clear hw lro cnt table\n");
+
+	return count;
+}
+
+int hw_lro_stats_read(struct seq_file *seq, void *v)
+{
+	int i;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	seq_puts(seq, "HW LRO statistic dump:\n");
+
+	/* Agg number count */
+	seq_puts(seq, "Cnt:   RING1 | RING2 | RING3 | Total\n");
+	for (i = 0; i <= MAX_HW_LRO_AGGR; i++) {
+		seq_printf(seq, " %d :      %d        %d        %d        %d\n",
+			   i, hw_lro_agg_num_cnt[0][i],
+			   hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
+			   hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
+			   hw_lro_agg_num_cnt[2][i]);
+	}
+
+	/* Total agg count */
+	seq_puts(seq, "Total agg:   RING1 | RING2 | RING3 | Total\n");
+	seq_printf(seq, "                %d      %d      %d      %d\n",
+		   hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
+		   hw_lro_tot_agg_cnt[2],
+		   hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
+		   hw_lro_tot_agg_cnt[2]);
+
+	/* Total flush count */
+	seq_puts(seq, "Total flush:   RING1 | RING2 | RING3 | Total\n");
+	seq_printf(seq, "                %d      %d      %d      %d\n",
+		   hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
+		   hw_lro_tot_flush_cnt[2],
+		   hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
+		   hw_lro_tot_flush_cnt[2]);
+
+	/* Avg agg count */
+	seq_puts(seq, "Avg agg:   RING1 | RING2 | RING3 | Total\n");
+	seq_printf(seq, "                %d      %d      %d      %d\n",
+		   (hw_lro_tot_flush_cnt[0]) ? hw_lro_tot_agg_cnt[0] /
+		   hw_lro_tot_flush_cnt[0] : 0,
+		   (hw_lro_tot_flush_cnt[1]) ? hw_lro_tot_agg_cnt[1] /
+		   hw_lro_tot_flush_cnt[1] : 0,
+		   (hw_lro_tot_flush_cnt[2]) ? hw_lro_tot_agg_cnt[2] /
+		   hw_lro_tot_flush_cnt[2] : 0,
+		   (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
+		    hw_lro_tot_flush_cnt[2]) ? ((hw_lro_tot_agg_cnt[0] +
+						 hw_lro_tot_agg_cnt[1] +
+						 hw_lro_tot_agg_cnt[2]) /
+						(hw_lro_tot_flush_cnt[0] +
+						 hw_lro_tot_flush_cnt[1] +
+						 hw_lro_tot_flush_cnt[2])) : 0);
+
+	/*  Statistics of aggregation size counts */
+	seq_puts(seq, "HW LRO flush pkt len:\n");
+	seq_puts(seq, " Length  | RING1  | RING2  | RING3  | Total\n");
+	for (i = 0; i < 15; i++) {
+		seq_printf(seq, "%d~%d: %d      %d      %d      %d\n", i * 5000,
+			   (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
+			   hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
+			   hw_lro_agg_size_cnt[0][i] +
+			   hw_lro_agg_size_cnt[1][i] +
+			   hw_lro_agg_size_cnt[2][i]);
+	}
+
+	/* CONFIG_RAETH_HW_LRO_REASON_DBG */
+	if (ei_local->features & FE_HW_LRO_DBG) {
+		seq_puts(seq, "Flush reason:   RING1 | RING2 | RING3 | Total\n");
+		seq_printf(seq, "AGG timeout:      %d      %d      %d      %d\n",
+			   hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
+			   hw_lro_agg_flush_cnt[2],
+			   (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
+			    hw_lro_agg_flush_cnt[2])
+		    );
+		seq_printf(seq, "AGE timeout:      %d      %d      %d      %d\n",
+			   hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
+			   hw_lro_age_flush_cnt[2],
+			   (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
+			    hw_lro_age_flush_cnt[2])
+		    );
+		seq_printf(seq, "Not in-sequence:  %d      %d      %d      %d\n",
+			   hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
+			   hw_lro_seq_flush_cnt[2],
+			   (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
+			    hw_lro_seq_flush_cnt[2])
+		    );
+		seq_printf(seq, "Timestamp:        %d      %d      %d      %d\n",
+			   hw_lro_timestamp_flush_cnt[0],
+			   hw_lro_timestamp_flush_cnt[1],
+			   hw_lro_timestamp_flush_cnt[2],
+			   (hw_lro_timestamp_flush_cnt[0] +
+			    hw_lro_timestamp_flush_cnt[1] +
+			    hw_lro_timestamp_flush_cnt[2])
+		    );
+		seq_printf(seq, "No LRO rule:      %d      %d      %d      %d\n",
+			   hw_lro_norule_flush_cnt[0],
+			   hw_lro_norule_flush_cnt[1],
+			   hw_lro_norule_flush_cnt[2],
+			   (hw_lro_norule_flush_cnt[0] +
+			    hw_lro_norule_flush_cnt[1] +
+			    hw_lro_norule_flush_cnt[2])
+		    );
+	}
+
+	return 0;
+}
+
+static int hw_lro_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hw_lro_stats_read, NULL);
+}
+
+static const struct file_operations hw_lro_stats_fops = {
+	.owner = THIS_MODULE,
+	.open = hw_lro_stats_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = hw_lro_stats_write,
+	.release = single_release
+};
+
+int hwlro_agg_cnt_ctrl(int par1, int par2)
+{
+	SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, par2);
+	SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, par2);
+	SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, par2);
+	return 0;
+}
+
+int hwlro_agg_time_ctrl(int par1, int par2)
+{
+	SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, par2);
+	SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, par2);
+	SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, par2);
+	return 0;
+}
+
+int hwlro_age_time_ctrl(int par1, int par2)
+{
+	SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, par2);
+	SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, par2);
+	SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, par2);
+	return 0;
+}
+
+int hwlro_threshold_ctrl(int par1, int par2)
+{
+	/* bandwidth threshold setting */
+	SET_PDMA_LRO_BW_THRESHOLD(par2);
+	return 0;
+}
+
+int hwlro_ring_enable_ctrl(int par1, int par2)
+{
+	if (!par2) {
+		pr_info("[hwlro_ring_enable_ctrl]Disable HW LRO rings\n");
+		SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 0);
+		SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
+		SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
+		SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
+	} else {
+		pr_info("[hwlro_ring_enable_ctrl]Enable HW LRO rings\n");
+		SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
+		SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
+		SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
+		SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
+	}
+
+	return 0;
+}
+
+static const HWLRO_DBG_FUNC hw_lro_dbg_func[] = {
+	[0] = hwlro_agg_cnt_ctrl,
+	[1] = hwlro_agg_time_ctrl,
+	[2] = hwlro_age_time_ctrl,
+	[3] = hwlro_threshold_ctrl,
+	[4] = hwlro_ring_enable_ctrl,
+};
+
+ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
+			      size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long x = 0, y = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	pr_info("[hw_lro_auto_tlb_write]write parameter len = %d\n\r",
+		(int)len);
+	if (len >= sizeof(buf)) {
+		pr_info("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_info("[hw_lro_auto_tlb_write]write parameter data = %s\n\r", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		x = 0;
+	else
+		ret = kstrtol(p_token, 10, &x);
+
+	p_token = strsep(&p_buf, "\t\n ");
+	if (p_token) {
+		ret = kstrtol(p_token, 10, &y);
+		pr_info("y = %ld\n\r", y);
+	}
+
+	if (hw_lro_dbg_func[x] &&
+	    (ARRAY_SIZE(hw_lro_dbg_func) > x)) {
+		(*hw_lro_dbg_func[x]) (x, y);
+	}
+
+	return count;
+}
+
+void hw_lro_auto_tlb_dump(struct seq_file *seq, unsigned int index)
+{
+	int i;
+	struct PDMA_LRO_AUTO_TLB_INFO pdma_lro_auto_tlb;
+	unsigned int tlb_info[9];
+	unsigned int dw_len, cnt, priority;
+	unsigned int entry;
+
+	if (index > 4)
+		index = index - 1;
+	entry = (index * 9) + 1;
+
+	/* read valid entries of the auto-learn table */
+	sys_reg_write(PDMA_FE_ALT_CF8, entry);
+
+	/* seq_printf(seq, "\nEntry = %d\n", entry); */
+	for (i = 0; i < 9; i++) {
+		tlb_info[i] = sys_reg_read(PDMA_FE_ALT_SEQ_CFC);
+		/* seq_printf(seq, "tlb_info[%d] = 0x%x\n", i, tlb_info[i]); */
+	}
+	memcpy(&pdma_lro_auto_tlb, tlb_info,
+	       sizeof(struct PDMA_LRO_AUTO_TLB_INFO));
+
+	dw_len = pdma_lro_auto_tlb.auto_tlb_info7.DW_LEN;
+	cnt = pdma_lro_auto_tlb.auto_tlb_info6.CNT;
+
+	if (sys_reg_read(ADMA_LRO_CTRL_DW0) & PDMA_LRO_ALT_SCORE_MODE)
+		priority = cnt;		/* packet count */
+	else
+		priority = dw_len;	/* byte count */
+
+	/* dump valid entries of the auto-learn table */
+	if (index >= 4)
+		seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
+	else
+		seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
+	if (pdma_lro_auto_tlb.auto_tlb_info8.IPV4) {
+		seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv4)\n",
+			   pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
+			   pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
+			   pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
+			   pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
+	} else {
+		seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
+			   pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
+			   pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
+			   pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
+			   pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
+	}
+	seq_printf(seq, "DIP_ID = %d\n",
+		   pdma_lro_auto_tlb.auto_tlb_info8.DIP_ID);
+	seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
+		   pdma_lro_auto_tlb.auto_tlb_info0.STP,
+		   pdma_lro_auto_tlb.auto_tlb_info0.DTP);
+	seq_printf(seq, "VLAN_VID_VLD = %d\n",
+		   pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID_VLD);
+	seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
+		   (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 & 0xfff),
+		   ((pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 >> 12) & 0xfff),
+		   ((pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID1 << 8) |
+		   ((pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 >> 24)
+		     & 0xfff)),
+		   ((pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID1 >> 4) & 0xfff));
+	seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
+	seq_printf(seq, "PRIORITY = %d\n", priority);
+}
+
+int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
+{
+	int i;
+	unsigned int reg_val;
+	unsigned int reg_op1, reg_op2, reg_op3, reg_op4;
+	unsigned int agg_cnt, agg_time, age_time;
+
+	seq_puts(seq, "Usage of /proc/mt76xx/hw_lro_auto_tlb:\n");
+	seq_puts(seq, "echo [function] [setting] > /proc/mt76xx/hw_lro_auto_tlb\n");
+	seq_puts(seq, "Functions:\n");
+	seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
+	seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
+	seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
+	seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
+	seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n\n");
+
+	/* Read valid entries of the auto-learn table */
+	sys_reg_write(PDMA_FE_ALT_CF8, 0);
+	reg_val = sys_reg_read(PDMA_FE_ALT_SEQ_CFC);
+
+	seq_printf(seq,
+		   "HW LRO Auto-learn Table: (PDMA_LRO_ALT_CFC_RSEQ_DBG=0x%x)\n",
+		   reg_val);
+
+	for (i = 7; i >= 0; i--) {
+		if (reg_val & (1 << i))
+			hw_lro_auto_tlb_dump(seq, i);
+	}
+
+	/* Read the agg_time/age_time/agg_cnt of LRO rings */
+	seq_puts(seq, "\nHW LRO Ring Settings\n");
+	for (i = 1; i <= 3; i++) {
+		reg_op1 = sys_reg_read(LRO_RX_RING0_CTRL_DW1 + (i * 0x40));
+		reg_op2 = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + (i * 0x40));
+		reg_op3 = sys_reg_read(LRO_RX_RING0_CTRL_DW3 + (i * 0x40));
+		reg_op4 = sys_reg_read(ADMA_LRO_CTRL_DW2);
+		agg_cnt =
+		    ((reg_op3 & 0x03) << PDMA_LRO_AGG_CNT_H_OFFSET) |
+		    ((reg_op2 >> PDMA_LRO_RING_AGG_CNT1_OFFSET) & 0x3f);
+		agg_time = (reg_op2 >> PDMA_LRO_RING_AGG_OFFSET) & 0xffff;
+		age_time =
+		    ((reg_op2 & 0x03f) << PDMA_LRO_AGE_H_OFFSET) |
+		    ((reg_op1 >> PDMA_LRO_RING_AGE1_OFFSET) & 0x3ff);
+		seq_printf(seq,
+			   "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
+			   i, agg_cnt, agg_time, age_time, reg_op4);
+	}
+	seq_puts(seq, "\n");
+
+	return 0;
+}
+
+static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hw_lro_auto_tlb_read, NULL);
+}
+
+static const struct file_operations hw_lro_auto_tlb_fops = {
+	.owner = THIS_MODULE,
+	.open = hw_lro_auto_tlb_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = hw_lro_auto_tlb_write,
+	.release = single_release
+};
+
+int hwlro_debug_proc_init(struct proc_dir_entry *proc_reg_dir)
+{
+	proc_rx_ring1 =
+	     proc_create(PROCREG_RXRING1, 0, proc_reg_dir, &rx_ring1_fops);
+	if (!proc_rx_ring1)
+		pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING1);
+
+	proc_rx_ring2 =
+	     proc_create(PROCREG_RXRING2, 0, proc_reg_dir, &rx_ring2_fops);
+	if (!proc_rx_ring2)
+		pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING2);
+
+	proc_rx_ring3 =
+	     proc_create(PROCREG_RXRING3, 0, proc_reg_dir, &rx_ring3_fops);
+	if (!proc_rx_ring3)
+		pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING3);
+
+	proc_hw_lro_stats =
+	     proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
+			 &hw_lro_stats_fops);
+	if (!proc_hw_lro_stats)
+		pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
+
+	proc_hw_lro_auto_tlb =
+	     proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
+			 &hw_lro_auto_tlb_fops);
+	if (!proc_hw_lro_auto_tlb)
+		pr_info("!! FAIL to create %s PROC !!\n",
+			PROCREG_HW_LRO_AUTO_TLB);
+
+	return 0;
+}
+EXPORT_SYMBOL(hwlro_debug_proc_init);
+
+void hwlro_debug_proc_exit(struct proc_dir_entry *proc_reg_dir)
+{
+	if (proc_rx_ring1)
+		remove_proc_entry(PROCREG_RXRING1, proc_reg_dir);
+	if (proc_rx_ring2)
+		remove_proc_entry(PROCREG_RXRING2, proc_reg_dir);
+	if (proc_rx_ring3)
+		remove_proc_entry(PROCREG_RXRING3, proc_reg_dir);
+	if (proc_hw_lro_stats)
+		remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
+	if (proc_hw_lro_auto_tlb)
+		remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
+}
+EXPORT_SYMBOL(hwlro_debug_proc_exit);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.c
new file mode 100644
index 0000000..468dc84
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.c
@@ -0,0 +1,1672 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_dbg_proc.h"
+#include "ra_ethtool.h"
+
+int txd_cnt[MAX_SKB_FRAGS / 2 + 1];
+int tso_cnt[16];
+
+#define MAX_AGGR 64
+#define MAX_DESC  8
+int lro_stats_cnt[MAX_AGGR + 1];
+int lro_flush_cnt[MAX_AGGR + 1];
+int lro_len_cnt1[16];
+/* int lro_len_cnt2[16]; */
+int aggregated[MAX_DESC];
+int lro_aggregated;
+int lro_flushed;
+int lro_nodesc;
+int force_flush;
+int tot_called1;
+int tot_called2;
+
+struct raeth_int_t raeth_int;
+struct proc_dir_entry *proc_reg_dir;
+static struct proc_dir_entry *proc_gmac, *proc_sys_cp0, *proc_tx_ring,
+*proc_rx_ring, *proc_skb_free;
+static struct proc_dir_entry *proc_gmac2;
+static struct proc_dir_entry *proc_ra_snmp;
+static struct proc_dir_entry *proc_num_of_txd, *proc_tso_len;
+static struct proc_dir_entry *proc_sche;
+static struct proc_dir_entry *proc_int_dbg;
+static struct proc_dir_entry *proc_set_lan_ip;
+/*extern unsigned int M2Q_table[64];
+ * extern struct QDMA_txdesc *free_head;
+ * extern struct SFQ_table *sfq0;
+ * extern struct SFQ_table *sfq1;
+ * extern struct SFQ_table *sfq2;
+ * extern struct SFQ_table *sfq3;
+ */
+
+static int ra_snmp_seq_show(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & USER_SNMPD) {
+		seq_printf(seq, "rx counters: %x %x %x %x %x %x %x\n",
+			   sys_reg_read(GDMA_RX_GBCNT0),
+			   sys_reg_read(GDMA_RX_GPCNT0),
+			   sys_reg_read(GDMA_RX_OERCNT0),
+			   sys_reg_read(GDMA_RX_FERCNT0),
+			   sys_reg_read(GDMA_RX_SERCNT0),
+			   sys_reg_read(GDMA_RX_LERCNT0),
+			   sys_reg_read(GDMA_RX_CERCNT0));
+		seq_printf(seq, "fc config: %x %x %p %x\n",
+			   sys_reg_read(CDMA_FC_CFG),
+			   sys_reg_read(GDMA1_FC_CFG),
+			   PDMA_FC_CFG, sys_reg_read(PDMA_FC_CFG));
+		seq_printf(seq, "ports: %x %x %x %x %x %x\n",
+			   sys_reg_read(PORT0_PKCOUNT),
+			   sys_reg_read(PORT1_PKCOUNT),
+			   sys_reg_read(PORT2_PKCOUNT),
+			   sys_reg_read(PORT3_PKCOUNT),
+			   sys_reg_read(PORT4_PKCOUNT),
+			   sys_reg_read(PORT5_PKCOUNT));
+	}
+
+	return 0;
+}
+
+static int ra_snmp_seq_open(struct inode *inode, struct file *file)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & USER_SNMPD)
+		return single_open(file, ra_snmp_seq_show, NULL);
+	else
+		return 0;
+}
+
+static const struct file_operations ra_snmp_seq_fops = {
+	.owner = THIS_MODULE,
+	.open = ra_snmp_seq_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+/*Routine Name : get_idx(mode, index)
+ * Description: calculate ring usage for tx/rx rings
+ * Mode 1 : Tx Ring
+ * Mode 2 : Rx Ring
+ */
+int get_ring_usage(int mode, int i)
+{
+	unsigned long tx_ctx_idx, tx_dtx_idx, tx_usage;
+	unsigned long rx_calc_idx, rx_drx_idx, rx_usage;
+
+	struct PDMA_rxdesc *rxring;
+	struct PDMA_txdesc *txring;
+
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (mode == 2) {
+		/* cpu point to the next descriptor of rx dma ring */
+		rx_calc_idx = *(unsigned long *)RX_CALC_IDX0;
+		rx_drx_idx = *(unsigned long *)RX_DRX_IDX0;
+		rxring = (struct PDMA_rxdesc *)RX_BASE_PTR0;
+
+		rx_usage =
+		    (rx_drx_idx - rx_calc_idx - 1 + num_rx_desc) % num_rx_desc;
+		if (rx_calc_idx == rx_drx_idx) {
+			if (rxring[rx_drx_idx].rxd_info2.DDONE_bit == 1)
+				tx_usage = num_rx_desc;
+			else
+				tx_usage = 0;
+		}
+		return rx_usage;
+	}
+
+	switch (i) {
+	case 0:
+		tx_ctx_idx = *(unsigned long *)TX_CTX_IDX0;
+		tx_dtx_idx = *(unsigned long *)TX_DTX_IDX0;
+		txring = ei_local->tx_ring0;
+		break;
+	default:
+		pr_debug("get_tx_idx failed %d %d\n", mode, i);
+		return 0;
+	};
+
+	tx_usage = (tx_ctx_idx - tx_dtx_idx + num_tx_desc) % num_tx_desc;
+	if (tx_ctx_idx == tx_dtx_idx) {
+		if (txring[tx_ctx_idx].txd_info2.DDONE_bit == 1)
+			tx_usage = 0;
+		else
+			tx_usage = num_tx_desc;
+	}
+	return tx_usage;
+}
+
+void dump_reg(struct seq_file *s)
+{
+	int fe_int_enable;
+	int rx_usage;
+	int dly_int_cfg;
+	int rx_base_ptr0;
+	int rx_max_cnt0;
+	int rx_calc_idx0;
+	int rx_drx_idx0;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	int tx_usage = 0;
+	int tx_base_ptr[4];
+	int tx_max_cnt[4];
+	int tx_ctx_idx[4];
+	int tx_dtx_idx[4];
+	int i;
+
+	fe_int_enable = sys_reg_read(FE_INT_ENABLE);
+	rx_usage = get_ring_usage(2, 0);
+
+	dly_int_cfg = sys_reg_read(DLY_INT_CFG);
+
+	if (!(ei_local->features & FE_QDMA)) {
+		tx_usage = get_ring_usage(1, 0);
+
+		tx_base_ptr[0] = sys_reg_read(TX_BASE_PTR0);
+		tx_max_cnt[0] = sys_reg_read(TX_MAX_CNT0);
+		tx_ctx_idx[0] = sys_reg_read(TX_CTX_IDX0);
+		tx_dtx_idx[0] = sys_reg_read(TX_DTX_IDX0);
+
+		tx_base_ptr[1] = sys_reg_read(TX_BASE_PTR1);
+		tx_max_cnt[1] = sys_reg_read(TX_MAX_CNT1);
+		tx_ctx_idx[1] = sys_reg_read(TX_CTX_IDX1);
+		tx_dtx_idx[1] = sys_reg_read(TX_DTX_IDX1);
+
+		tx_base_ptr[2] = sys_reg_read(TX_BASE_PTR2);
+		tx_max_cnt[2] = sys_reg_read(TX_MAX_CNT2);
+		tx_ctx_idx[2] = sys_reg_read(TX_CTX_IDX2);
+		tx_dtx_idx[2] = sys_reg_read(TX_DTX_IDX2);
+
+		tx_base_ptr[3] = sys_reg_read(TX_BASE_PTR3);
+		tx_max_cnt[3] = sys_reg_read(TX_MAX_CNT3);
+		tx_ctx_idx[3] = sys_reg_read(TX_CTX_IDX3);
+		tx_dtx_idx[3] = sys_reg_read(TX_DTX_IDX3);
+	}
+
+	rx_base_ptr0 = sys_reg_read(RX_BASE_PTR0);
+	rx_max_cnt0 = sys_reg_read(RX_MAX_CNT0);
+	rx_calc_idx0 = sys_reg_read(RX_CALC_IDX0);
+	rx_drx_idx0 = sys_reg_read(RX_DRX_IDX0);
+
+	seq_printf(s, "\n\nFE_INT_ENABLE  : 0x%08x\n", fe_int_enable);
+
+	if (!(ei_local->features & FE_QDMA))
+		seq_printf(s, "TxRing PktCnt: %d/%d\n", tx_usage, num_tx_desc);
+
+	seq_printf(s, "RxRing PktCnt: %d/%d\n\n", rx_usage, num_rx_desc);
+	seq_printf(s, "DLY_INT_CFG    : 0x%08x\n", dly_int_cfg);
+
+	if (!(ei_local->features & FE_QDMA)) {
+		for (i = 0; i < 4; i++) {
+			seq_printf(s, "TX_BASE_PTR%d   : 0x%08x\n", i,
+				   tx_base_ptr[i]);
+			seq_printf(s, "TX_MAX_CNT%d    : 0x%08x\n", i,
+				   tx_max_cnt[i]);
+			seq_printf(s, "TX_CTX_IDX%d	: 0x%08x\n", i,
+				   tx_ctx_idx[i]);
+			seq_printf(s, "TX_DTX_IDX%d	: 0x%08x\n", i,
+				   tx_dtx_idx[i]);
+		}
+	}
+
+	seq_printf(s, "RX_BASE_PTR0   : 0x%08x\n", rx_base_ptr0);
+	seq_printf(s, "RX_MAX_CNT0    : 0x%08x\n", rx_max_cnt0);
+	seq_printf(s, "RX_CALC_IDX0   : 0x%08x\n", rx_calc_idx0);
+	seq_printf(s, "RX_DRX_IDX0    : 0x%08x\n", rx_drx_idx0);
+
+	if (ei_local->features & FE_ETHTOOL)
+		seq_printf(s,
+			   "The current PHY address selected by ethtool is %d\n",
+			   get_current_phy_address());
+}
+
+int reg_read_main(struct seq_file *seq, void *v)
+{
+	dump_reg(seq);
+	return 0;
+}
+
+static void *seq_skb_free_start(struct seq_file *seq, loff_t *pos)
+{
+	if (*pos < num_tx_desc)
+		return pos;
+	return NULL;
+}
+
+static void *seq_skb_free_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	(*pos)++;
+	if (*pos >= num_tx_desc)
+		return NULL;
+	return pos;
+}
+
+static void seq_skb_free_stop(struct seq_file *seq, void *v)
+{
+	/* Nothing to do */
+}
+
+static int seq_skb_free_show(struct seq_file *seq, void *v)
+{
+	int i = *(loff_t *)v;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	seq_printf(seq, "%d: %08x\n", i, *(int *)&ei_local->skb_free[i]);
+
+	return 0;
+}
+
+static const struct seq_operations seq_skb_free_ops = {
+	.start = seq_skb_free_start,
+	.next = seq_skb_free_next,
+	.stop = seq_skb_free_stop,
+	.show = seq_skb_free_show
+};
+
+static int skb_free_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &seq_skb_free_ops);
+}
+
+static const struct file_operations skb_free_fops = {
+	.owner = THIS_MODULE,
+	.open = skb_free_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+int qdma_read_64queue(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_QDMA) {
+		unsigned int temp, i;
+		unsigned int sw_fq, hw_fq;
+		unsigned int min_en, min_rate, max_en, max_rate, sch, weight;
+		unsigned int queue, tx_des_cnt, hw_resv, sw_resv, queue_head,
+		    queue_tail, queue_no;
+		struct net_device *dev = dev_raether;
+		struct END_DEVICE *ei_local = netdev_priv(dev);
+
+		seq_puts(seq, "==== General Information ====\n");
+		temp = sys_reg_read(QDMA_FQ_CNT);
+		sw_fq = (temp & 0xFFFF0000) >> 16;
+		hw_fq = (temp & 0x0000FFFF);
+		seq_printf(seq, "SW TXD: %d/%d; HW TXD: %d/%d\n", sw_fq,
+			   num_tx_desc, hw_fq, NUM_QDMA_PAGE);
+		seq_printf(seq, "SW TXD virtual start address: 0x%p\n",
+			   ei_local->txd_pool);
+		seq_printf(seq, "HW TXD virtual start address: 0x%p\n\n",
+			   free_head);
+
+		seq_puts(seq, "==== Scheduler Information ====\n");
+		temp = sys_reg_read(QDMA_TX_SCH);
+		max_en = (temp & 0x00000800) >> 11;
+		max_rate = (temp & 0x000007F0) >> 4;
+		for (i = 0; i < (temp & 0x0000000F); i++)
+			max_rate *= 10;
+		seq_printf(seq, "SCH1 rate control:%d. Rate is %dKbps.\n",
+			   max_en, max_rate);
+		max_en = (temp & 0x08000000) >> 27;
+		max_rate = (temp & 0x07F00000) >> 20;
+		for (i = 0; i < (temp & 0x000F0000); i++)
+			max_rate *= 10;
+		seq_printf(seq, "SCH2 rate control:%d. Rate is %dKbps.\n\n",
+			   max_en, max_rate);
+
+		seq_puts(seq, "==== Physical Queue Information ====\n");
+		sys_reg_write(QDMA_PAGE, 0);
+		for (queue = 0; queue < 64; queue++) {
+			if (queue < 16) {
+				sys_reg_write(QDMA_PAGE, 0);
+				queue_no = queue;
+			} else if (queue > 15 && queue <= 31) {
+				sys_reg_write(QDMA_PAGE, 1);
+				queue_no = queue % 16;
+			} else if (queue > 31 && queue <= 47) {
+				sys_reg_write(QDMA_PAGE, 2);
+				queue_no = queue % 32;
+			} else if (queue > 47 && queue <= 63) {
+				sys_reg_write(QDMA_PAGE, 3);
+				queue_no = queue % 48;
+			}
+
+			temp = sys_reg_read(QTX_CFG_0 + 0x10 * queue_no);
+			tx_des_cnt = (temp & 0xffff0000) >> 16;
+			hw_resv = (temp & 0xff00) >> 8;
+			sw_resv = (temp & 0xff);
+			temp = sys_reg_read(QTX_CFG_0 + (0x10 * queue_no) + 0x4);
+			sch = (temp >> 31) + 1;
+			min_en = (temp & 0x8000000) >> 27;
+			min_rate = (temp & 0x7f00000) >> 20;
+			for (i = 0; i < (temp & 0xf0000) >> 16; i++)
+				min_rate *= 10;
+			max_en = (temp & 0x800) >> 11;
+			max_rate = (temp & 0x7f0) >> 4;
+			for (i = 0; i < (temp & 0xf); i++)
+				max_rate *= 10;
+			weight = (temp & 0xf000) >> 12;
+			queue_head = sys_reg_read(QTX_HEAD_0 + 0x10 * queue_no);
+			queue_tail = sys_reg_read(QTX_TAIL_0 + 0x10 * queue_no);
+
+			seq_printf(seq, "Queue#%d Information:\n", queue);
+			seq_printf(seq,
+				   "%d packets in the queue; head address is 0x%08x, tail address is 0x%08x.\n",
+				   tx_des_cnt, queue_head, queue_tail);
+			seq_printf(seq,
+				   "HW_RESV: %d; SW_RESV: %d; SCH: %d; Weighting: %d\n",
+				   hw_resv, sw_resv, sch, weight);
+			seq_printf(seq,
+				   "Min_Rate_En is %d, Min_Rate is %dKbps; Max_Rate_En is %d, Max_Rate is %dKbps.\n\n",
+				   min_en, min_rate, max_en, max_rate);
+		}
+		if (ei_local->features & FE_HW_SFQ) {
+			seq_puts(seq, "==== Virtual Queue Information ====\n");
+			seq_printf(seq,
+				   "VQTX_TB_BASE_0:0x%p;VQTX_TB_BASE_1:0x%p;VQTX_TB_BASE_2:0x%p;VQTX_TB_BASE_3:0x%p\n",
+				   sfq0, sfq1, sfq2, sfq3);
+			temp = sys_reg_read(VQTX_NUM);
+			seq_printf(seq,
+				   "VQTX_NUM_0:0x%01x;VQTX_NUM_1:0x%01x;VQTX_NUM_2:0x%01x;VQTX_NUM_3:0x%01x\n\n",
+				   temp & 0xF, (temp & 0xF0) >> 4,
+				   (temp & 0xF00) >> 8, (temp & 0xF000) >> 12);
+		}
+
+		seq_puts(seq, "==== Flow Control Information ====\n");
+		temp = sys_reg_read(QDMA_FC_THRES);
+		seq_printf(seq,
+			   "SW_DROP_EN:%x; SW_DROP_FFA:%d; SW_DROP_MODE:%d\n",
+			   (temp & 0x1000000) >> 24, (temp & 0x2000000) >> 25,
+			   (temp & 0x30000000) >> 28);
+		seq_printf(seq,
+			   "WH_DROP_EN:%x; HW_DROP_FFA:%d; HW_DROP_MODE:%d\n",
+			   (temp & 0x10000) >> 16, (temp & 0x20000) >> 17,
+			   (temp & 0x300000) >> 20);
+		seq_printf(seq, "SW_DROP_FSTVQ_MODE:%d;SW_DROP_FSTVQ:%d\n",
+			   (temp & 0xC0000000) >> 30,
+			   (temp & 0x08000000) >> 27);
+		seq_printf(seq, "HW_DROP_FSTVQ_MODE:%d;HW_DROP_FSTVQ:%d\n",
+			   (temp & 0xC00000) >> 22, (temp & 0x080000) >> 19);
+
+		seq_puts(seq, "\n==== FSM Information\n");
+		temp = sys_reg_read(QDMA_DMA);
+		seq_printf(seq, "VQTB_FSM:0x%01x\n", (temp & 0x0F000000) >> 24);
+		seq_printf(seq, "FQ_FSM:0x%01x\n", (temp & 0x000F0000) >> 16);
+		seq_printf(seq, "TX_FSM:0x%01x\n", (temp & 0x00000F00) >> 8);
+		seq_printf(seq, "RX_FSM:0x%01x\n\n", (temp & 0x0000000f));
+
+		seq_puts(seq, "==== M2Q Information ====\n");
+		for (i = 0; i < 64; i += 8) {
+			seq_printf(seq,
+				   " (%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)\n",
+				   i, M2Q_table[i], i + 1, M2Q_table[i + 1],
+				   i + 2, M2Q_table[i + 2], i + 3,
+				   M2Q_table[i + 3], i + 4, M2Q_table[i + 4],
+				   i + 5, M2Q_table[i + 5], i + 6,
+				   M2Q_table[i + 6], i + 7, M2Q_table[i + 7]);
+		}
+
+		return 0;
+	} else {
+		return 0;
+	}
+}
+
+int qdma_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_QDMA) {
+		unsigned int temp, i;
+		unsigned int sw_fq, hw_fq;
+		unsigned int min_en, min_rate, max_en, max_rate, sch, weight;
+		unsigned int queue, tx_des_cnt, hw_resv, sw_resv, queue_head,
+		    queue_tail;
+		struct net_device *dev = dev_raether;
+		struct END_DEVICE *ei_local = netdev_priv(dev);
+
+		seq_puts(seq, "==== General Information ====\n");
+		temp = sys_reg_read(QDMA_FQ_CNT);
+		sw_fq = (temp & 0xFFFF0000) >> 16;
+		hw_fq = (temp & 0x0000FFFF);
+		seq_printf(seq, "SW TXD: %d/%d; HW TXD: %d/%d\n", sw_fq,
+			   num_tx_desc, hw_fq, NUM_QDMA_PAGE);
+		seq_printf(seq, "SW TXD virtual start address: 0x%p\n",
+			   ei_local->txd_pool);
+		seq_printf(seq, "HW TXD virtual start address: 0x%p\n\n",
+			   free_head);
+
+		seq_puts(seq, "==== Scheduler Information ====\n");
+		temp = sys_reg_read(QDMA_TX_SCH);
+		max_en = (temp & 0x00000800) >> 11;
+		max_rate = (temp & 0x000007F0) >> 4;
+		for (i = 0; i < (temp & 0x0000000F); i++)
+			max_rate *= 10;
+		seq_printf(seq, "SCH1 rate control:%d. Rate is %dKbps.\n",
+			   max_en, max_rate);
+		max_en = (temp & 0x08000000) >> 27;
+		max_rate = (temp & 0x07F00000) >> 20;
+		for (i = 0; i < (temp & 0x000F0000); i++)
+			max_rate *= 10;
+		seq_printf(seq, "SCH2 rate control:%d. Rate is %dKbps.\n\n",
+			   max_en, max_rate);
+
+		seq_puts(seq, "==== Physical Queue Information ====\n");
+		for (queue = 0; queue < 16; queue++) {
+			temp = sys_reg_read(QTX_CFG_0 + 0x10 * queue);
+			tx_des_cnt = (temp & 0xffff0000) >> 16;
+			hw_resv = (temp & 0xff00) >> 8;
+			sw_resv = (temp & 0xff);
+			temp = sys_reg_read(QTX_CFG_0 + (0x10 * queue) + 0x4);
+			sch = (temp >> 31) + 1;
+			min_en = (temp & 0x8000000) >> 27;
+			min_rate = (temp & 0x7f00000) >> 20;
+			for (i = 0; i < (temp & 0xf0000) >> 16; i++)
+				min_rate *= 10;
+			max_en = (temp & 0x800) >> 11;
+			max_rate = (temp & 0x7f0) >> 4;
+			for (i = 0; i < (temp & 0xf); i++)
+				max_rate *= 10;
+			weight = (temp & 0xf000) >> 12;
+			queue_head = sys_reg_read(QTX_HEAD_0 + 0x10 * queue);
+			queue_tail = sys_reg_read(QTX_TAIL_0 + 0x10 * queue);
+
+			seq_printf(seq, "Queue#%d Information:\n", queue);
+			seq_printf(seq,
+				   "%d packets in the queue; head address is 0x%08x, tail address is 0x%08x.\n",
+				   tx_des_cnt, queue_head, queue_tail);
+			seq_printf(seq,
+				   "HW_RESV: %d; SW_RESV: %d; SCH: %d; Weighting: %d\n",
+				   hw_resv, sw_resv, sch, weight);
+			seq_printf(seq,
+				   "Min_Rate_En is %d, Min_Rate is %dKbps; Max_Rate_En is %d, Max_Rate is %dKbps.\n\n",
+				   min_en, min_rate, max_en, max_rate);
+		}
+		if (ei_local->features & FE_HW_SFQ) {
+			seq_puts(seq, "==== Virtual Queue Information ====\n");
+			seq_printf(seq,
+				   "VQTX_TB_BASE_0:0x%p;VQTX_TB_BASE_1:0x%p;VQTX_TB_BASE_2:0x%p;VQTX_TB_BASE_3:0x%p\n",
+				   sfq0, sfq1, sfq2, sfq3);
+			temp = sys_reg_read(VQTX_NUM);
+			seq_printf(seq,
+				   "VQTX_NUM_0:0x%01x;VQTX_NUM_1:0x%01x;VQTX_NUM_2:0x%01x;VQTX_NUM_3:0x%01x\n\n",
+				   temp & 0xF, (temp & 0xF0) >> 4,
+				   (temp & 0xF00) >> 8, (temp & 0xF000) >> 12);
+		}
+
+		seq_puts(seq, "==== Flow Control Information ====\n");
+		temp = sys_reg_read(QDMA_FC_THRES);
+		seq_printf(seq,
+			   "SW_DROP_EN:%x; SW_DROP_FFA:%d; SW_DROP_MODE:%d\n",
+			   (temp & 0x1000000) >> 24, (temp & 0x2000000) >> 25,
+			   (temp & 0x30000000) >> 28);
+		seq_printf(seq,
+			   "WH_DROP_EN:%x; HW_DROP_FFA:%d; HW_DROP_MODE:%d\n",
+			   (temp & 0x10000) >> 16, (temp & 0x20000) >> 17,
+			   (temp & 0x300000) >> 20);
+		seq_printf(seq, "SW_DROP_FSTVQ_MODE:%d;SW_DROP_FSTVQ:%d\n",
+			   (temp & 0xC0000000) >> 30,
+			   (temp & 0x08000000) >> 27);
+		seq_printf(seq, "HW_DROP_FSTVQ_MODE:%d;HW_DROP_FSTVQ:%d\n",
+			   (temp & 0xC00000) >> 22, (temp & 0x080000) >> 19);
+
+		seq_puts(seq, "\n==== FSM Information\n");
+		temp = sys_reg_read(QDMA_DMA);
+		seq_printf(seq, "VQTB_FSM:0x%01x\n", (temp & 0x0F000000) >> 24);
+		seq_printf(seq, "FQ_FSM:0x%01x\n", (temp & 0x000F0000) >> 16);
+		seq_printf(seq, "TX_FSM:0x%01x\n", (temp & 0x00000F00) >> 8);
+		seq_printf(seq, "RX_FSM:0x%01x\n\n", (temp & 0x0000000f));
+
+		seq_puts(seq, "==== M2Q Information ====\n");
+		for (i = 0; i < 64; i += 8) {
+			seq_printf(seq,
+				   " (%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)\n",
+				   i, M2Q_table[i], i + 1, M2Q_table[i + 1],
+				   i + 2, M2Q_table[i + 2], i + 3,
+				   M2Q_table[i + 3], i + 4, M2Q_table[i + 4],
+				   i + 5, M2Q_table[i + 5], i + 6,
+				   M2Q_table[i + 6], i + 7, M2Q_table[i + 7]);
+		}
+
+		return 0;
+	} else {
+		return 0;
+	}
+}
+
+static int qdma_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, qdma_read, NULL);
+}
+
+static const struct file_operations qdma_fops = {
+	.owner = THIS_MODULE,
+	.open = qdma_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+int tx_ring_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	struct PDMA_txdesc *tx_ring;
+	int i = 0;
+
+	tx_ring = kmalloc_array(num_tx_desc, sizeof(*tx_ring), GFP_KERNEL);
+
+	if (!tx_ring)
+		/*seq_puts(seq, " allocate temp tx_ring fail.\n"); */
+		return 0;
+
+	for (i = 0; i < num_tx_desc; i++)
+		tx_ring[i] = ei_local->tx_ring0[i];
+
+	for (i = 0; i < num_tx_desc; i++) {
+		seq_printf(seq, "%d: %08x %08x %08x %08x\n", i,
+			   *(int *)&tx_ring[i].txd_info1,
+			   *(int *)&tx_ring[i].txd_info2,
+			   *(int *)&tx_ring[i].txd_info3,
+			   *(int *)&tx_ring[i].txd_info4);
+	}
+
+	kfree(tx_ring);
+	return 0;
+}
+
+static int tx_ring_open(struct inode *inode, struct file *file)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (!(ei_local->features & FE_QDMA)) {
+		return single_open(file, tx_ring_read, NULL);
+	} else if (ei_local->features & FE_QDMA) {
+		if (ei_local->chip_name == MT7622_FE)
+			return single_open(file, qdma_read_64queue, NULL);
+		else
+			return single_open(file, qdma_read, NULL);
+	} else {
+		return 0;
+	}
+}
+
+static const struct file_operations tx_ring_fops = {
+	.owner = THIS_MODULE,
+	.open = tx_ring_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+int rx_ring_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	struct PDMA_rxdesc *rx_ring;
+	int i = 0;
+
+	rx_ring = kmalloc_array(num_rx_desc, sizeof(*rx_ring), GFP_KERNEL);
+	if (!rx_ring)
+		/*seq_puts(seq, " allocate temp rx_ring fail.\n"); */
+		return 0;
+
+	for (i = 0; i < num_rx_desc; i++) {
+		memcpy(&rx_ring[i], &ei_local->rx_ring[0][i],
+		       sizeof(struct PDMA_rxdesc));
+	}
+
+	for (i = 0; i < num_rx_desc; i++) {
+		seq_printf(seq, "%d: %08x %08x %08x %08x\n", i,
+			   *(int *)&rx_ring[i].rxd_info1,
+			   *(int *)&rx_ring[i].rxd_info2,
+			   *(int *)&rx_ring[i].rxd_info3,
+			   *(int *)&rx_ring[i].rxd_info4);
+	}
+
+	kfree(rx_ring);
+	return 0;
+}
+
+static int rx_ring_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rx_ring_read, NULL);
+}
+
+static const struct file_operations rx_ring_fops = {
+	.owner = THIS_MODULE,
+	.open = rx_ring_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+int num_of_txd_update(int num_of_txd)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO)
+		txd_cnt[num_of_txd]++;
+	return 0;
+}
+
+static void *seq_tso_txd_num_start(struct seq_file *seq, loff_t *pos)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO) {
+		seq_puts(seq, "TXD | Count\n");
+		if (*pos < (MAX_SKB_FRAGS / 2 + 1))
+			return pos;
+	}
+	return NULL;
+}
+
+static void *seq_tso_txd_num_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO) {
+		(*pos)++;
+		if (*pos >= (MAX_SKB_FRAGS / 2 + 1))
+			return NULL;
+		return pos;
+	} else {
+		return NULL;
+	}
+}
+
+static void seq_tso_txd_num_stop(struct seq_file *seq, void *v)
+{
+	/* Nothing to do */
+}
+
+static int seq_tso_txd_num_show(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO) {
+		int i = *(loff_t *)v;
+
+		seq_printf(seq, "%d: %d\n", i, txd_cnt[i]);
+	}
+	return 0;
+}
+
+ssize_t num_of_txd_write(struct file *file, const char __user *buffer,
+			 size_t count, loff_t *data)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO) {
+		memset(txd_cnt, 0, sizeof(txd_cnt));
+		pr_debug("clear txd cnt table\n");
+		return count;
+	} else {
+		return 0;
+	}
+}
+
+int tso_len_update(int tso_len)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO) {
+		if (tso_len > 70000)
+			tso_cnt[14]++;
+		else if (tso_len > 65000)
+			tso_cnt[13]++;
+		else if (tso_len > 60000)
+			tso_cnt[12]++;
+		else if (tso_len > 55000)
+			tso_cnt[11]++;
+		else if (tso_len > 50000)
+			tso_cnt[10]++;
+		else if (tso_len > 45000)
+			tso_cnt[9]++;
+		else if (tso_len > 40000)
+			tso_cnt[8]++;
+		else if (tso_len > 35000)
+			tso_cnt[7]++;
+		else if (tso_len > 30000)
+			tso_cnt[6]++;
+		else if (tso_len > 25000)
+			tso_cnt[5]++;
+		else if (tso_len > 20000)
+			tso_cnt[4]++;
+		else if (tso_len > 15000)
+			tso_cnt[3]++;
+		else if (tso_len > 10000)
+			tso_cnt[2]++;
+		else if (tso_len > 5000)
+			tso_cnt[1]++;
+		else
+			tso_cnt[0]++;
+	}
+	return 0;
+}
+
+ssize_t tso_len_write(struct file *file, const char __user *buffer,
+		      size_t count, loff_t *data)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO) {
+		memset(tso_cnt, 0, sizeof(tso_cnt));
+		pr_debug("clear tso cnt table\n");
+		return count;
+	} else {
+		return 0;
+	}
+}
+
+static void *seq_tso_len_start(struct seq_file *seq, loff_t *pos)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO) {
+		seq_puts(seq, " Length  | Count\n");
+		if (*pos < 15)
+			return pos;
+	}
+	return NULL;
+}
+
+static void *seq_tso_len_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO) {
+		(*pos)++;
+		if (*pos >= 15)
+			return NULL;
+		return pos;
+	} else {
+		return NULL;
+	}
+}
+
+static void seq_tso_len_stop(struct seq_file *seq, void *v)
+{
+	/* Nothing to do */
+}
+
+static int seq_tso_len_show(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_TSO) {
+		int i = *(loff_t *)v;
+
+		seq_printf(seq, "%d~%d: %d\n", i * 5000, (i + 1) * 5000,
+			   tso_cnt[i]);
+	}
+	return 0;
+}
+
+static const struct seq_operations seq_tso_txd_num_ops = {
+	.start = seq_tso_txd_num_start,
+	.next = seq_tso_txd_num_next,
+	.stop = seq_tso_txd_num_stop,
+	.show = seq_tso_txd_num_show
+};
+
+static int tso_txd_num_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &seq_tso_txd_num_ops);
+}
+
+static const struct file_operations tso_txd_num_fops = {
+	.owner = THIS_MODULE,
+	.open = tso_txd_num_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = num_of_txd_write,
+	.release = seq_release
+};
+
+static const struct seq_operations seq_tso_len_ops = {
+	.start = seq_tso_len_start,
+	.next = seq_tso_len_next,
+	.stop = seq_tso_len_stop,
+	.show = seq_tso_len_show
+};
+
+static int tso_len_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &seq_tso_len_ops);
+}
+
+static const struct file_operations tso_len_fops = {
+	.owner = THIS_MODULE,
+	.open = tso_len_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = tso_len_write,
+	.release = seq_release
+};
+
+static struct proc_dir_entry *proc_esw_cnt;
+static struct proc_dir_entry *proc_eth_cnt;
+
+void internal_gsw_cnt_read(struct seq_file *seq)
+{
+	unsigned int pkt_cnt = 0;
+	int i = 0;
+
+	seq_printf(seq,
+		   "===================== %8s %8s %8s %8s %8s %8s %8s\n",
+		   "Port0", "Port1", "Port2", "Port3", "Port4",
+		   "Port5", "Port6");
+	seq_puts(seq, "Tx Drop Packet      :");
+	DUMP_EACH_PORT(0x4000);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Tx CRC Error        :");
+	DUMP_EACH_PORT(0x4004);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Tx Unicast Packet   :");
+	DUMP_EACH_PORT(0x4008);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Tx Multicast Packet :");
+	DUMP_EACH_PORT(0x400C);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Tx Broadcast Packet :");
+	DUMP_EACH_PORT(0x4010);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Tx Collision Event  :");
+	DUMP_EACH_PORT(0x4014);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Tx Pause Packet     :");
+	DUMP_EACH_PORT(0x402C);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Drop Packet      :");
+	DUMP_EACH_PORT(0x4060);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Filtering Packet :");
+	DUMP_EACH_PORT(0x4064);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Unicast Packet   :");
+	DUMP_EACH_PORT(0x4068);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Multicast Packet :");
+	DUMP_EACH_PORT(0x406C);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Broadcast Packet :");
+	DUMP_EACH_PORT(0x4070);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Alignment Error  :");
+	DUMP_EACH_PORT(0x4074);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx CRC Error     :");
+	DUMP_EACH_PORT(0x4078);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Undersize Error  :");
+	DUMP_EACH_PORT(0x407C);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Fragment Error   :");
+	DUMP_EACH_PORT(0x4080);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Oversize Error   :");
+	DUMP_EACH_PORT(0x4084);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Jabber Error     :");
+	DUMP_EACH_PORT(0x4088);
+	seq_puts(seq, "\n");
+	seq_puts(seq, "Rx Pause Packet     :");
+	DUMP_EACH_PORT(0x408C);
+	mii_mgr_write(31, 0x4fe0, 0xf0);
+	mii_mgr_write(31, 0x4fe0, 0x800000f0);
+
+	seq_puts(seq, "\n");
+}
+
+void pse_qdma_drop_cnt(void)
+{
+	u8 i;
+
+	pr_info("       <<PSE DROP CNT>>\n");
+	pr_info("| FQ_PCNT_MIN : %010u |\n",
+		(sys_reg_read(FE_PSE_FREE) & 0xff0000) >> 16);
+	pr_info("| FQ_PCNT     : %010u |\n",
+		sys_reg_read(FE_PSE_FREE) & 0x00ff);
+	pr_info("| FE_DROP_FQ  : %010u |\n",
+		sys_reg_read(FE_DROP_FQ));
+	pr_info("| FE_DROP_FC  : %010u |\n",
+		sys_reg_read(FE_DROP_FC));
+	pr_info("| FE_DROP_PPE : %010u |\n",
+		sys_reg_read(FE_DROP_PPE));
+	pr_info("\n       <<QDMA PKT/DROP CNT>>\n");
+
+	sys_reg_write(QTX_MIB_IF, 0x90000000);
+	for (i = 0; i < NUM_PQ; i++) {
+		if (i <= 15) {
+			sys_reg_write(QDMA_PAGE, 0);
+			pr_info("QDMA Q%d PKT CNT: %010u, DROP CNT: %010u\n", i,
+				sys_reg_read(QTX_CFG_0 + i * 16),
+				sys_reg_read(QTX_SCH_0 + i * 16));
+		} else if (i > 15 && i <= 31) {
+			sys_reg_write(QDMA_PAGE, 1);
+			pr_info("QDMA Q%d PKT CNT: %010u, DROP CNT: %010u\n", i,
+				sys_reg_read(QTX_CFG_0 + (i - 16) * 16),
+				sys_reg_read(QTX_SCH_0 + (i - 16) * 16));
+		} else if (i > 31 && i <= 47) {
+			sys_reg_write(QDMA_PAGE, 2);
+			pr_info("QDMA Q%d PKT CNT: %010u, DROP CNT: %010u\n", i,
+				sys_reg_read(QTX_CFG_0 + (i - 32) * 16),
+				sys_reg_read(QTX_SCH_0 + (i - 32) * 16));
+		} else if (i > 47 && i <= 63) {
+			sys_reg_write(QDMA_PAGE, 3);
+			pr_info("QDMA Q%d PKT CNT: %010u, DROP CNT: %010u\n", i,
+				sys_reg_read(QTX_CFG_0 + (i - 48) * 16),
+				sys_reg_read(QTX_SCH_0 + (i - 48) * 16));
+		}
+	}
+	sys_reg_write(QDMA_PAGE, 0);
+	sys_reg_write(QTX_MIB_IF, 0x0);
+}
+
+void embedded_sw_cnt_read(struct seq_file *seq)
+{
+	seq_puts(seq, "\n       <<CPU>>\n");
+	seq_puts(seq, "           |\n");
+	seq_puts(seq, "                      ^\n");
+	seq_printf(seq, "                      | Port6 Rx:%08u Good Pkt\n",
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xE0) & 0xFFFF);
+	seq_printf(seq, "                      | Port6 Tx:%08u Good Pkt\n",
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xE0) >> 16);
+	seq_puts(seq, "+---------------------v-------------------------+\n");
+	seq_puts(seq, "|            P6                |\n");
+	seq_puts(seq, "|           <<10/100 Embedded Switch>>         |\n");
+	seq_puts(seq, "|     P0    P1    P2     P3     P4     P5       |\n");
+	seq_puts(seq, "+-----------------------------------------------+\n");
+	seq_puts(seq, "       |     |     |     |       |      |\n");
+	seq_printf(seq,
+		   "Port0 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xE8) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x150) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xE8) >> 16,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x150) >> 16);
+
+	seq_printf(seq,
+		   "Port1 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xEC) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x154) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xEC) >> 16,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x154) >> 16);
+
+	seq_printf(seq,
+		   "Port2 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF0) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x158) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF0) >> 16,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x158) >> 16);
+
+	seq_printf(seq,
+		   "Port3 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF4) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x15C) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF4) >> 16,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x15c) >> 16);
+
+	seq_printf(seq,
+		   "Port4 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF8) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x160) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF8) >> 16,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x160) >> 16);
+
+	seq_printf(seq,
+		   "Port5 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xFC) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x164) & 0xFFFF,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xFC) >> 16,
+		   sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x164) >> 16);
+}
+
+int eth_cnt_read(struct seq_file *seq, void *v)
+{
+	pse_qdma_drop_cnt();
+	return 0;
+}
+
+int esw_cnt_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	seq_puts(seq, "                  <<CPU>>\n");
+	seq_puts(seq, "+-----------------------------------------------+\n");
+	seq_puts(seq, "|		  <<PSE>>		        |\n");
+	seq_puts(seq, "+-----------------------------------------------+\n");
+	seq_puts(seq, "+-----------------------------------------------+\n");
+	seq_puts(seq, "|		  <<GDMA>>		        |\n");
+
+	seq_printf(seq,
+		   "| GDMA1_RX_GBCNT  : %010u (Rx Good Bytes)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C00));
+	seq_printf(seq,
+		   "| GDMA1_RX_GPCNT  : %010u (Rx Good Pkts)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C08));
+	seq_printf(seq,
+		   "| GDMA1_RX_OERCNT : %010u (overflow error)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C10));
+	seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C14));
+	seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C18));
+	seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C1C));
+	seq_printf(seq,
+		   "| GDMA1_RX_CERCNT : %010u (checksum error)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C20));
+	seq_printf(seq,
+		   "| GDMA1_RX_FCCNT  : %010u (flow control)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C24));
+	seq_printf(seq,
+		   "| GDMA1_TX_SKIPCNT: %010u (about count)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C28));
+	seq_printf(seq,
+		   "| GDMA1_TX_COLCNT : %010u (collision count)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C2C));
+	seq_printf(seq,
+		   "| GDMA1_TX_GBCNT  : %010u (Tx Good Bytes)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C30));
+	seq_printf(seq,
+		   "| GDMA1_TX_GPCNT  : %010u (Tx Good Pkts)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C38));
+	seq_puts(seq, "|						|\n");
+	seq_printf(seq,
+		   "| GDMA2_RX_GBCNT  : %010u (Rx Good Bytes)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C40));
+	seq_printf(seq,
+		   "| GDMA2_RX_GPCNT  : %010u (Rx Good Pkts)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C48));
+	seq_printf(seq,
+		   "| GDMA2_RX_OERCNT : %010u (overflow error)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C50));
+	seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C54));
+	seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C58));
+	seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C5C));
+	seq_printf(seq,
+		   "| GDMA2_RX_CERCNT : %010u (checksum error)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C60));
+	seq_printf(seq,
+		   "| GDMA2_RX_FCCNT  : %010u (flow control)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C64));
+	seq_printf(seq,
+		   "| GDMA2_TX_SKIPCNT: %010u (skip)		|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C68));
+	seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C6C));
+	seq_printf(seq,
+		   "| GDMA2_TX_GBCNT  : %010u (Tx Good Bytes)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C70));
+	seq_printf(seq,
+		   "| GDMA2_TX_GPCNT  : %010u (Tx Good Pkts)	|\n",
+		   sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C78));
+
+	seq_puts(seq, "+-----------------------------------------------+\n");
+
+	seq_puts(seq, "\n");
+
+	if ((ei_local->chip_name == MT7623_FE) || ei_local->chip_name == MT7621_FE)
+		internal_gsw_cnt_read(seq);
+	if (ei_local->architecture & RAETH_ESW)
+		embedded_sw_cnt_read(seq);
+
+	return 0;
+}
+
+static int switch_count_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, esw_cnt_read, NULL);
+}
+
+static int eth_count_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, eth_cnt_read, NULL);
+}
+
+static const struct file_operations switch_count_fops = {
+	.owner = THIS_MODULE,
+	.open = switch_count_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static const struct file_operations eth_count_fops = {
+	.owner = THIS_MODULE,
+	.open = eth_count_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+/* proc write procedure */
+static ssize_t change_phyid(struct file *file,
+			    const char __user *buffer, size_t count,
+			    loff_t *data)
+{
+	int val = 0;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & (FE_ETHTOOL | FE_GE2_SUPPORT)) {
+		char buf[32];
+		struct net_device *cur_dev_p;
+		struct END_DEVICE *ei_local;
+		char if_name[64];
+		unsigned int phy_id;
+
+		if (count > 32)
+			count = 32;
+		memset(buf, 0, 32);
+		if (copy_from_user(buf, buffer, count))
+			return -EFAULT;
+
+		/* determine interface name */
+		strncpy(if_name, DEV_NAME, sizeof(if_name) - 1);	/* "eth2" by default */
+		if (isalpha(buf[0])) {
+			val = sscanf(buf, "%4s %1d", if_name, &phy_id);
+			if (val == -1)
+				return -EFAULT;
+		} else {
+			phy_id = kstrtol(buf, 10, NULL);
+		}
+		cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
+
+		if (!cur_dev_p)
+			return -EFAULT;
+
+		ei_local = netdev_priv(cur_dev_p);
+
+		ei_local->mii_info.phy_id = (unsigned char)phy_id;
+		return count;
+	} else {
+		return 0;
+	}
+}
+
+static ssize_t change_gmac2_phyid(struct file *file,
+				  const char __user *buffer,
+				  size_t count, loff_t *data)
+{
+	int val = 0;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & (FE_ETHTOOL | FE_GE2_SUPPORT)) {
+		char buf[32];
+		struct net_device *cur_dev_p;
+		struct PSEUDO_ADAPTER *p_pseudo_ad;
+		char if_name[64];
+		unsigned int phy_id;
+
+		if (count > 32)
+			count = 32;
+		memset(buf, 0, 32);
+		if (copy_from_user(buf, buffer, count))
+			return -EFAULT;
+		/* determine interface name */
+		strncpy(if_name, DEV2_NAME, sizeof(if_name) - 1);	/* "eth3" by default */
+		if (isalpha(buf[0])) {
+			val = sscanf(buf, "%4s %1d", if_name, &phy_id);
+			if (val == -1)
+				return -EFAULT;
+		} else {
+			phy_id = kstrtol(buf, 10, NULL);
+		}
+		cur_dev_p = dev_get_by_name(&init_net, DEV2_NAME);
+
+		if (!cur_dev_p)
+			return -EFAULT;
+		p_pseudo_ad = netdev_priv(cur_dev_p);
+		p_pseudo_ad->mii_info.phy_id = (unsigned char)phy_id;
+		return count;
+	} else {
+		return 0;
+	}
+}
+
+static const struct file_operations gmac2_fops = {
+	.owner = THIS_MODULE,
+	.write = change_gmac2_phyid
+};
+
+static int gmac_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, reg_read_main, NULL);
+}
+
+static const struct file_operations gmac_fops = {
+	.owner = THIS_MODULE,
+	.open = gmac_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = change_phyid,
+	.release = single_release
+};
+
+/* #if defined(TASKLET_WORKQUEUE_SW) */
+
+static int schedule_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & TASKLET_WORKQUEUE_SW) {
+		if (init_schedule == 1)
+			seq_printf(seq,
+				   "Initialize Raeth with workqueque<%d>\n",
+				   init_schedule);
+		else
+			seq_printf(seq,
+				   "Initialize Raeth with tasklet<%d>\n",
+				   init_schedule);
+		if (working_schedule == 1)
+			seq_printf(seq,
+				   "Raeth is running at workqueque<%d>\n",
+				   working_schedule);
+		else
+			seq_printf(seq,
+				   "Raeth is running at tasklet<%d>\n",
+				   working_schedule);
+	}
+
+	return 0;
+}
+
+static ssize_t schedule_write(struct file *file,
+			      const char __user *buffer, size_t count,
+			      loff_t *data)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & TASKLET_WORKQUEUE_SW) {
+		char buf[2];
+		int old;
+
+		if (copy_from_user(buf, buffer, count))
+			return -EFAULT;
+		old = init_schedule;
+		init_schedule = kstrtol(buf, 10, NULL);
+		pr_debug
+		    ("ChangeRaethInitScheduleFrom <%d> to <%d>\n",
+		     old, init_schedule);
+		pr_debug("Not running schedule at present !\n");
+
+		return count;
+	} else {
+		return 0;
+	}
+}
+
+static int schedule_switch_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, schedule_read, NULL);
+}
+
+static const struct file_operations schedule_sw_fops = {
+	.owner = THIS_MODULE,
+	.open = schedule_switch_open,
+	.read = seq_read,
+	.write = schedule_write,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+int int_stats_update(unsigned int int_status)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_RAETH_INT_DBG) {
+		if (int_status & (RX_COHERENT | TX_COHERENT | RXD_ERROR)) {
+			if (int_status & RX_COHERENT)
+				raeth_int.RX_COHERENT_CNT++;
+			if (int_status & TX_COHERENT)
+				raeth_int.TX_COHERENT_CNT++;
+			if (int_status & RXD_ERROR)
+				raeth_int.RXD_ERROR_CNT++;
+		}
+		if (int_status &
+		    (RX_DLY_INT | RING1_RX_DLY_INT | RING2_RX_DLY_INT |
+		     RING3_RX_DLY_INT)) {
+			if (int_status & RX_DLY_INT)
+				raeth_int.RX_DLY_INT_CNT++;
+			if (int_status & RING1_RX_DLY_INT)
+				raeth_int.RING1_RX_DLY_INT_CNT++;
+			if (int_status & RING2_RX_DLY_INT)
+				raeth_int.RING2_RX_DLY_INT_CNT++;
+			if (int_status & RING3_RX_DLY_INT)
+				raeth_int.RING3_RX_DLY_INT_CNT++;
+		}
+		if (int_status & (TX_DLY_INT))
+			raeth_int.TX_DLY_INT_CNT++;
+		if (int_status &
+		    (RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 |
+		     RX_DONE_INT3)) {
+			if (int_status & RX_DONE_INT0)
+				raeth_int.RX_DONE_INT0_CNT++;
+			if (int_status & RX_DONE_INT1)
+				raeth_int.RX_DONE_INT1_CNT++;
+			if (int_status & RX_DONE_INT2)
+				raeth_int.RX_DONE_INT2_CNT++;
+			if (int_status & RX_DONE_INT3)
+				raeth_int.RX_DONE_INT3_CNT++;
+		}
+		if (int_status &
+		    (TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 |
+		     TX_DONE_INT3)) {
+			if (int_status & TX_DONE_INT0)
+				raeth_int.TX_DONE_INT0_CNT++;
+			if (int_status & TX_DONE_INT1)
+				raeth_int.TX_DONE_INT1_CNT++;
+			if (int_status & TX_DONE_INT2)
+				raeth_int.TX_DONE_INT2_CNT++;
+			if (int_status & TX_DONE_INT3)
+				raeth_int.TX_DONE_INT3_CNT++;
+		}
+		if (int_status &
+		    (ALT_RPLC_INT1 | ALT_RPLC_INT2 | ALT_RPLC_INT3)) {
+			if (int_status & ALT_RPLC_INT1)
+				raeth_int.ALT_RPLC_INT1_CNT++;
+			if (int_status & ALT_RPLC_INT2)
+				raeth_int.ALT_RPLC_INT2_CNT++;
+			if (int_status & ALT_RPLC_INT3)
+				raeth_int.ALT_RPLC_INT3_CNT++;
+		}
+	}
+	return 0;
+}
+
+static int int_dbg_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_RAETH_INT_DBG) {
+		seq_puts(seq, "Raether Interrupt Statistics\n");
+		seq_printf(seq, "RX_COHERENT = %d\n",
+			   raeth_int.RX_COHERENT_CNT);
+		seq_printf(seq, "RX_DLY_INT = %d\n", raeth_int.RX_DLY_INT_CNT);
+		seq_printf(seq, "TX_COHERENT = %d\n",
+			   raeth_int.TX_COHERENT_CNT);
+		seq_printf(seq, "TX_DLY_INT = %d\n", raeth_int.TX_DLY_INT_CNT);
+		seq_printf(seq, "RING3_RX_DLY_INT = %d\n",
+			   raeth_int.RING3_RX_DLY_INT_CNT);
+		seq_printf(seq, "RING2_RX_DLY_INT = %d\n",
+			   raeth_int.RING2_RX_DLY_INT_CNT);
+		seq_printf(seq, "RING1_RX_DLY_INT = %d\n",
+			   raeth_int.RING1_RX_DLY_INT_CNT);
+		seq_printf(seq, "RXD_ERROR = %d\n", raeth_int.RXD_ERROR_CNT);
+		seq_printf(seq, "ALT_RPLC_INT3 = %d\n",
+			   raeth_int.ALT_RPLC_INT3_CNT);
+		seq_printf(seq, "ALT_RPLC_INT2 = %d\n",
+			   raeth_int.ALT_RPLC_INT2_CNT);
+		seq_printf(seq, "ALT_RPLC_INT1 = %d\n",
+			   raeth_int.ALT_RPLC_INT1_CNT);
+		seq_printf(seq, "RX_DONE_INT3 = %d\n",
+			   raeth_int.RX_DONE_INT3_CNT);
+		seq_printf(seq, "RX_DONE_INT2 = %d\n",
+			   raeth_int.RX_DONE_INT2_CNT);
+		seq_printf(seq, "RX_DONE_INT1 = %d\n",
+			   raeth_int.RX_DONE_INT1_CNT);
+		seq_printf(seq, "RX_DONE_INT0 = %d\n",
+			   raeth_int.RX_DONE_INT0_CNT);
+		seq_printf(seq, "TX_DONE_INT3 = %d\n",
+			   raeth_int.TX_DONE_INT3_CNT);
+		seq_printf(seq, "TX_DONE_INT2 = %d\n",
+			   raeth_int.TX_DONE_INT2_CNT);
+		seq_printf(seq, "TX_DONE_INT1 = %d\n",
+			   raeth_int.TX_DONE_INT1_CNT);
+		seq_printf(seq, "TX_DONE_INT0 = %d\n",
+			   raeth_int.TX_DONE_INT0_CNT);
+
+		memset(&raeth_int, 0, sizeof(raeth_int));
+	}
+	return 0;
+}
+
+static int int_dbg_open(struct inode *inode, struct file *file)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_RAETH_INT_DBG) {
+		/* memset(&raeth_int, 0, sizeof(raeth_int)); */
+		return single_open(file, int_dbg_read, NULL);
+	} else {
+		return 0;
+	}
+}
+
+static ssize_t int_dbg_write(struct file *file, const char __user *buffer,
+			     size_t count, loff_t *data)
+{
+	return 0;
+}
+
+static const struct file_operations int_dbg_sw_fops = {
+	.owner = THIS_MODULE,
+	.open = int_dbg_open,
+	.read = seq_read,
+	.write = int_dbg_write
+};
+
+static int set_lan_ip_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	seq_printf(seq, "ei_local->lan_ip4_addr = %s\n",
+		   ei_local->lan_ip4_addr);
+
+	return 0;
+}
+
+static int set_lan_ip_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, set_lan_ip_read, NULL);
+}
+
+static ssize_t set_lan_ip_write(struct file *file,
+				const char __user *buffer, size_t count,
+				loff_t *data)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	char ip_tmp[IP4_ADDR_LEN];
+
+	if (count > IP4_ADDR_LEN)
+		return -EFAULT;
+
+	if (copy_from_user(ip_tmp, buffer, count))
+		return -EFAULT;
+
+	strncpy(ei_local->lan_ip4_addr, ip_tmp, count);
+
+	pr_info("[%s]LAN IP = %s\n", __func__, ei_local->lan_ip4_addr);
+
+
+	if (ei_local->features & FE_HW_LRO)
+		fe_set_hw_lro_my_ip(ei_local->lan_ip4_addr);
+
+	return count;
+}
+
+static const struct file_operations set_lan_ip_fops = {
+	.owner = THIS_MODULE,
+	.open = set_lan_ip_open,
+	.read = seq_read,
+	.write = set_lan_ip_write
+};
+
+int debug_proc_init(void)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (!proc_reg_dir)
+		proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
+
+	if (ei_local->features & FE_HW_LRO)
+		hwlro_debug_proc_init(proc_reg_dir);
+	else if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING))
+		rss_debug_proc_init(proc_reg_dir);
+
+	if (ei_local->features & FE_HW_IOCOHERENT)
+		hwioc_debug_proc_init(proc_reg_dir);
+	proc_gmac = proc_create(PROCREG_GMAC, 0, proc_reg_dir, &gmac_fops);
+	if (!proc_gmac)
+		pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_GMAC);
+
+	if (ei_local->features & (FE_ETHTOOL | FE_GE2_SUPPORT)) {
+		proc_gmac2 =
+		    proc_create(PROCREG_GMAC2, 0, proc_reg_dir, &gmac2_fops);
+		if (!proc_gmac2)
+			pr_debug("!! FAIL to create %s PROC !!\n",
+				 PROCREG_GMAC2);
+	}
+	proc_skb_free =
+	    proc_create(PROCREG_SKBFREE, 0, proc_reg_dir, &skb_free_fops);
+	if (!proc_skb_free)
+		pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_SKBFREE);
+	proc_tx_ring = proc_create(PROCREG_TXRING, 0, proc_reg_dir,
+				   &tx_ring_fops);
+	if (!proc_tx_ring)
+		pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
+	proc_rx_ring = proc_create(PROCREG_RXRING, 0,
+				   proc_reg_dir, &rx_ring_fops);
+	if (!proc_rx_ring)
+		pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
+
+	if (ei_local->features & FE_TSO) {
+		proc_num_of_txd =
+		    proc_create(PROCREG_NUM_OF_TXD, 0, proc_reg_dir,
+				&tso_txd_num_fops);
+		if (!proc_num_of_txd)
+			pr_debug("!! FAIL to create %s PROC !!\n",
+				 PROCREG_NUM_OF_TXD);
+		proc_tso_len =
+		    proc_create(PROCREG_TSO_LEN, 0, proc_reg_dir,
+				&tso_len_fops);
+		if (!proc_tso_len)
+			pr_debug("!! FAIL to create %s PROC !!\n",
+				 PROCREG_TSO_LEN);
+	}
+
+	if (ei_local->features & USER_SNMPD) {
+		proc_ra_snmp =
+		    proc_create(PROCREG_SNMP, S_IRUGO, proc_reg_dir,
+				&ra_snmp_seq_fops);
+		if (!proc_ra_snmp)
+			pr_debug("!! FAIL to create %s PROC !!\n",
+				 PROCREG_SNMP);
+	}
+	proc_esw_cnt =
+	    proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
+	if (!proc_esw_cnt)
+		pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
+
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+		proc_eth_cnt =
+			proc_create(PROCREG_ETH_CNT, 0, proc_reg_dir, &eth_count_fops);
+		if (!proc_eth_cnt)
+			pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_ETH_CNT);
+	}
+
+	if (ei_local->features & TASKLET_WORKQUEUE_SW) {
+		proc_sche =
+		    proc_create(PROCREG_SCHE, 0, proc_reg_dir,
+				&schedule_sw_fops);
+		if (!proc_sche)
+			pr_debug("!! FAIL to create %s PROC !!\n",
+				 PROCREG_SCHE);
+	}
+
+	if (ei_local->features & FE_RAETH_INT_DBG) {
+		proc_int_dbg =
+		    proc_create(PROCREG_INT_DBG, 0, proc_reg_dir,
+				&int_dbg_sw_fops);
+		if (!proc_int_dbg)
+			pr_debug("!! FAIL to create %s PROC !!\n",
+				 PROCREG_INT_DBG);
+	}
+
+	/* Set LAN IP address */
+	proc_set_lan_ip =
+	    proc_create(PROCREG_SET_LAN_IP, 0, proc_reg_dir, &set_lan_ip_fops);
+	if (!proc_set_lan_ip)
+		pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_SET_LAN_IP);
+
+	pr_debug("PROC INIT OK!\n");
+	return 0;
+}
+
+void debug_proc_exit(void)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_HW_LRO)
+		hwlro_debug_proc_exit(proc_reg_dir);
+	else if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING))
+		rss_debug_proc_exit(proc_reg_dir);
+
+	if (ei_local->features & FE_HW_IOCOHERENT)
+		hwioc_debug_proc_exit(proc_reg_dir);
+
+	if (proc_sys_cp0)
+		remove_proc_entry(PROCREG_CP0, proc_reg_dir);
+
+	if (proc_gmac)
+		remove_proc_entry(PROCREG_GMAC, proc_reg_dir);
+
+	if (ei_local->features & (FE_ETHTOOL | FE_GE2_SUPPORT)) {
+		if (proc_gmac)
+			remove_proc_entry(PROCREG_GMAC, proc_reg_dir);
+	}
+
+	if (proc_skb_free)
+		remove_proc_entry(PROCREG_SKBFREE, proc_reg_dir);
+
+	if (proc_tx_ring)
+		remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
+
+	if (proc_rx_ring)
+		remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
+
+	if (ei_local->features & FE_TSO) {
+		if (proc_num_of_txd)
+			remove_proc_entry(PROCREG_NUM_OF_TXD, proc_reg_dir);
+
+		if (proc_tso_len)
+			remove_proc_entry(PROCREG_TSO_LEN, proc_reg_dir);
+	}
+
+	if (ei_local->features & USER_SNMPD) {
+		if (proc_ra_snmp)
+			remove_proc_entry(PROCREG_SNMP, proc_reg_dir);
+	}
+
+	if (proc_esw_cnt)
+		remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
+
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+		if (proc_eth_cnt)
+			remove_proc_entry(PROCREG_ETH_CNT, proc_reg_dir);
+	}
+
+	/* if (proc_reg_dir) */
+	/* remove_proc_entry(PROCREG_DIR, 0); */
+
+	pr_debug("proc exit\n");
+}
+EXPORT_SYMBOL(proc_reg_dir);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.h
new file mode 100644
index 0000000..8acb29e
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.h
@@ -0,0 +1,95 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_DBG_PROC_H
+#define RA_DBG_PROC_H
+
+#include <linux/ctype.h>
+#include <linux/proc_fs.h>
+#include "raeth_config.h"
+
+extern struct net_device *dev_raether;
+
+void dump_qos(void);
+void dump_reg(struct seq_file *s);
+void dump_cp0(void);
+
+int debug_proc_init(void);
+void debug_proc_exit(void);
+
+int tso_len_update(int tso_len);
+int num_of_txd_update(int num_of_txd);
+#ifdef CONFIG_RAETH_LRO
+int lro_stats_update(struct net_lro_mgr *lro_mgr, bool all_flushed);
+#endif
+extern unsigned int M2Q_table[64];
+extern struct QDMA_txdesc *free_head;
+extern struct SFQ_table *sfq0;
+extern struct SFQ_table *sfq1;
+extern struct SFQ_table *sfq2;
+extern struct SFQ_table *sfq3;
+extern int init_schedule;
+extern int working_schedule;
+struct raeth_int_t {
+	unsigned int RX_COHERENT_CNT;
+	unsigned int RX_DLY_INT_CNT;
+	unsigned int TX_COHERENT_CNT;
+	unsigned int TX_DLY_INT_CNT;
+	unsigned int RING3_RX_DLY_INT_CNT;
+	unsigned int RING2_RX_DLY_INT_CNT;
+	unsigned int RING1_RX_DLY_INT_CNT;
+	unsigned int RXD_ERROR_CNT;
+	unsigned int ALT_RPLC_INT3_CNT;
+	unsigned int ALT_RPLC_INT2_CNT;
+	unsigned int ALT_RPLC_INT1_CNT;
+	unsigned int RX_DONE_INT3_CNT;
+	unsigned int RX_DONE_INT2_CNT;
+	unsigned int RX_DONE_INT1_CNT;
+	unsigned int RX_DONE_INT0_CNT;
+	unsigned int TX_DONE_INT3_CNT;
+	unsigned int TX_DONE_INT2_CNT;
+	unsigned int TX_DONE_INT1_CNT;
+	unsigned int TX_DONE_INT0_CNT;
+};
+
+int int_stats_update(unsigned int int_status);
+
+#define DUMP_EACH_PORT(base)					\
+	for (i = 0; i < 7; i++) {					\
+		mii_mgr_read(31, (base) + (i * 0x100), &pkt_cnt); \
+		seq_printf(seq, "%8u ", pkt_cnt);			\
+	}							\
+
+/* HW LRO functions */
+int hwlro_debug_proc_init(struct proc_dir_entry *proc_reg_dir);
+void hwlro_debug_proc_exit(struct proc_dir_entry *proc_reg_dir);
+
+int rss_debug_proc_init(struct proc_dir_entry *proc_reg_dir);
+void rss_debug_proc_exit(struct proc_dir_entry *proc_reg_dir);
+
+/* HW IO-Coherent functions */
+#ifdef	CONFIG_RAETH_HW_IOCOHERENT
+void hwioc_debug_proc_init(struct proc_dir_entry *proc_reg_dir);
+void hwioc_debug_proc_exit(struct proc_dir_entry *proc_reg_dir);
+#else
+static inline void hwioc_debug_proc_init(struct proc_dir_entry *proc_reg_dir)
+{
+}
+
+static inline void hwioc_debug_proc_exit(struct proc_dir_entry *proc_reg_dir)
+{
+}
+#endif /* CONFIG_RAETH_HW_IOCOHERENT */
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.c
new file mode 100644
index 0000000..9ff7e0e
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.c
@@ -0,0 +1,168 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_ethtool.h"
+
+#define RAETHER_DRIVER_NAME		"raether"
+#define RA_NUM_STATS			4
+
+unsigned char get_current_phy_address(void)
+{
+	struct net_device *cur_dev_p;
+	struct END_DEVICE *ei_local;
+
+	cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
+	if (!cur_dev_p)
+		return 0;
+	ei_local = netdev_priv(cur_dev_p);
+	return ei_local->mii_info.phy_id;
+}
+
+#define MII_CR_ADDR			0x00
+#define MII_CR_MR_AUTONEG_ENABLE	BIT(12)
+#define MII_CR_MR_RESTART_NEGOTIATION	BIT(9)
+
+#define AUTO_NEGOTIATION_ADVERTISEMENT	0x04
+#define AN_PAUSE			BIT(10)
+
+u32 et_get_link(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	return mii_link_ok(&ei_local->mii_info);
+}
+
+int et_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	mii_ethtool_gset(&ei_local->mii_info, cmd);
+	return 0;
+}
+
+/* mii_mgr_read wrapper for mii.o ethtool */
+int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+	unsigned int result;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	mii_mgr_read((unsigned int)ei_local->mii_info.phy_id,
+		     (unsigned int)location, &result);
+/* printk("\n%s mii.o query= phy_id:%d\n",dev->name, phy_id);*/
+/*printk("address:%d retval:%x\n", location, result); */
+	return (int)result;
+}
+
+/* mii_mgr_write wrapper for mii.o ethtool */
+void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	mii_mgr_write((unsigned int)ei_local->mii_info.phy_id,
+		      (unsigned int)location, (unsigned int)value);
+/* printk("mii.o write= phy_id:%d\n", phy_id);*/
+/*printk("address:%d value:%x\n", location, value); */
+}
+
+/* #ifdef CONFIG_PSEUDO_SUPPORT */
+/*We unable to re-use the Raether functions because it is hard to tell
+ * where the calling from is. From eth2 or eth3?
+ *
+ * These code size is around 950 bytes.
+ */
+
+u32 et_virt_get_link(struct net_device *dev)
+{
+	struct PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_GE2_SUPPORT)
+		return mii_link_ok(&pseudo->mii_info);
+	else
+		return 0;
+}
+
+int et_virt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_GE2_SUPPORT)
+		mii_ethtool_gset(&pseudo->mii_info, cmd);
+	return 0;
+}
+
+int mdio_virt_read(struct net_device *dev, int phy_id, int location)
+{
+	unsigned int result;
+	struct PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_GE2_SUPPORT) {
+		mii_mgr_read((unsigned int)pseudo->mii_info.phy_id,
+			     (unsigned int)location, &result);
+/* printk("%s mii.o query= phy_id:%d,\n", dev->name, phy_id); */
+/*printk("address:%d retval:%d\n", location, result);*/
+		return (int)result;
+	} else {
+		return 0;
+	}
+}
+
+void mdio_virt_write(struct net_device *dev, int phy_id, int location,
+		     int value)
+{
+	struct PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_GE2_SUPPORT) {
+		mii_mgr_write((unsigned int)pseudo->mii_info.phy_id,
+			      (unsigned int)location, (unsigned int)value);
+	}
+
+/* printk("mii.o write= phy_id:%d\n", phy_id);*/
+/*printk("address:%d value:%d\n)", location, value); */
+}
+
+void ethtool_init(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	/* init mii structure */
+	ei_local->mii_info.dev = dev;
+	ei_local->mii_info.mdio_read = mdio_read;
+	ei_local->mii_info.mdio_write = mdio_write;
+	ei_local->mii_info.phy_id_mask = 0x1f;
+	ei_local->mii_info.reg_num_mask = 0x1f;
+	ei_local->mii_info.supports_gmii =
+	    mii_check_gmii_support(&ei_local->mii_info);
+
+	/* TODO:   phy_id: 0~4 */
+	ei_local->mii_info.phy_id = 1;
+}
+
+void ethtool_virt_init(struct net_device *dev)
+{
+	struct PSEUDO_ADAPTER *p_pseudo_ad = netdev_priv(dev);
+
+	/* init mii structure */
+	p_pseudo_ad->mii_info.dev = dev;
+	p_pseudo_ad->mii_info.mdio_read = mdio_virt_read;
+	p_pseudo_ad->mii_info.mdio_write = mdio_virt_write;
+	p_pseudo_ad->mii_info.phy_id_mask = 0x1f;
+	p_pseudo_ad->mii_info.reg_num_mask = 0x1f;
+	p_pseudo_ad->mii_info.phy_id = 0x1e;
+	p_pseudo_ad->mii_info.supports_gmii =
+	    mii_check_gmii_support(&p_pseudo_ad->mii_info);
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.h
new file mode 100644
index 0000000..cff52e2
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.h
@@ -0,0 +1,34 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_ETHTOOL_H
+#define RA_ETHTOOL_H
+
+extern struct net_device *dev_raether;
+
+/* ethtool related */
+void ethtool_init(struct net_device *dev);
+int et_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+u32 et_get_link(struct net_device *dev);
+unsigned char get_current_phy_address(void);
+int mdio_read(struct net_device *dev, int phy_id, int location);
+void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+
+/* for pseudo interface */
+void ethtool_virt_init(struct net_device *dev);
+int et_virt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+u32 et_virt_get_link(struct net_device *dev);
+int mdio_virt_read(struct net_device *dev, int phy_id, int location);
+void mdio_virt_write(struct net_device *dev, int phy_id, int location,
+		     int value);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ioctl.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ioctl.h
new file mode 100644
index 0000000..b94cb33
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ioctl.h
@@ -0,0 +1,179 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _RAETH_IOCTL_H
+#define _RAETH_IOCTL_H
+
+/* ioctl commands */
+#define RAETH_SW_IOCTL          0x89F0
+#define RAETH_ESW_REG_READ		0x89F1
+#define RAETH_ESW_REG_WRITE		0x89F2
+#define RAETH_MII_READ			0x89F3
+#define RAETH_MII_WRITE			0x89F4
+#define RAETH_ESW_INGRESS_RATE		0x89F5
+#define RAETH_ESW_EGRESS_RATE		0x89F6
+#define RAETH_ESW_PHY_DUMP		0x89F7
+#define RAETH_QDMA_IOCTL		0x89F8
+#define RAETH_EPHY_IOCTL		0x89F9
+#define RAETH_MII_READ_CL45             0x89FC
+#define RAETH_MII_WRITE_CL45            0x89FD
+#define RAETH_QDMA_SFQ_WEB_ENABLE       0x89FE
+#define RAETH_SET_LAN_IP		0x89FF
+
+/* switch ioctl commands */
+#define SW_IOCTL_SET_EGRESS_RATE        0x0000
+#define SW_IOCTL_SET_INGRESS_RATE       0x0001
+#define SW_IOCTL_SET_VLAN               0x0002
+#define SW_IOCTL_DUMP_VLAN              0x0003
+#define SW_IOCTL_DUMP_TABLE             0x0004
+#define SW_IOCTL_ADD_L2_ADDR            0x0005
+#define SW_IOCTL_DEL_L2_ADDR            0x0006
+#define SW_IOCTL_ADD_MCAST_ADDR         0x0007
+#define SW_IOCTL_DEL_MCAST_ADDR         0x0008
+#define SW_IOCTL_DUMP_MIB               0x0009
+#define SW_IOCTL_ENABLE_IGMPSNOOP       0x000A
+#define SW_IOCTL_DISABLE_IGMPSNOOP      0x000B
+#define SW_IOCTL_SET_PORT_TRUNK         0x000C
+#define SW_IOCTL_GET_PORT_TRUNK         0x000D
+#define SW_IOCTL_SET_PORT_MIRROR        0x000E
+#define SW_IOCTL_GET_PHY_STATUS         0x000F
+#define SW_IOCTL_READ_REG               0x0010
+#define SW_IOCTL_WRITE_REG              0x0011
+#define SW_IOCTL_QOS_EN                 0x0012
+#define SW_IOCTL_QOS_SET_TABLE2TYPE     0x0013
+#define SW_IOCTL_QOS_GET_TABLE2TYPE     0x0014
+#define SW_IOCTL_QOS_SET_PORT2TABLE     0x0015
+#define SW_IOCTL_QOS_GET_PORT2TABLE     0x0016
+#define SW_IOCTL_QOS_SET_PORT2PRI       0x0017
+#define SW_IOCTL_QOS_GET_PORT2PRI       0x0018
+#define SW_IOCTL_QOS_SET_DSCP2PRI       0x0019
+#define SW_IOCTL_QOS_GET_DSCP2PRI       0x001a
+#define SW_IOCTL_QOS_SET_PRI2QUEUE      0x001b
+#define SW_IOCTL_QOS_GET_PRI2QUEUE      0x001c
+#define SW_IOCTL_QOS_SET_QUEUE_WEIGHT   0x001d
+#define SW_IOCTL_QOS_GET_QUEUE_WEIGHT   0x001e
+#define SW_IOCTL_SET_PHY_TEST_MODE      0x001f
+#define SW_IOCTL_GET_PHY_REG            0x0020
+#define SW_IOCTL_SET_PHY_REG            0x0021
+#define SW_IOCTL_VLAN_TAG               0x0022
+#define SW_IOCTL_CLEAR_TABLE            0x0023
+#define SW_IOCTL_CLEAR_VLAN             0x0024
+#define SW_IOCTL_SET_VLAN_MODE          0x0025
+
+/*****************QDMA IOCTL DATA*************/
+#define RAETH_QDMA_REG_READ		0x0000
+#define RAETH_QDMA_REG_WRITE		0x0001
+#define RAETH_QDMA_QUEUE_MAPPING        0x0002
+#define RAETH_QDMA_READ_CPU_CLK         0x0003
+/*********************************************/
+/******************EPHY IOCTL DATA************/
+/*MT7622 10/100 phy cal*/
+#define RAETH_VBG_IEXT_CALIBRATION	0x0000
+#define RAETH_TXG_R50_CALIBRATION	0x0001
+#define RAETH_TXG_OFFSET_CALIBRATION	0x0002
+#define RAETH_TXG_AMP_CALIBRATION	0x0003
+#define GE_TXG_R50_CALIBRATION		0x0004
+#define GE_TXG_OFFSET_CALIBRATION	0x0005
+#define GE_TXG_AMP_CALIBRATION		0x0006
+/*********************************************/
+#define REG_ESW_WT_MAC_MFC              0x10
+#define REG_ESW_ISC                     0x18
+#define REG_ESW_WT_MAC_ATA1             0x74
+#define REG_ESW_WT_MAC_ATA2             0x78
+#define REG_ESW_WT_MAC_ATWD             0x7C
+#define REG_ESW_WT_MAC_ATC              0x80
+
+#define REG_ESW_TABLE_TSRA1		0x84
+#define REG_ESW_TABLE_TSRA2		0x88
+#define REG_ESW_TABLE_ATRD		0x8C
+
+#define REG_ESW_VLAN_VTCR		0x90
+#define REG_ESW_VLAN_VAWD1		0x94
+#define REG_ESW_VLAN_VAWD2		0x98
+
+#if defined(CONFIG_MACH_MT7623)
+#define REG_ESW_VLAN_ID_BASE		0x100
+#else
+#define REG_ESW_VLAN_ID_BASE          0x50
+#endif
+#define REG_ESW_VLAN_MEMB_BASE		0x70
+#define REG_ESW_TABLE_SEARCH		0x24
+#define REG_ESW_TABLE_STATUS0		0x28
+#define REG_ESW_TABLE_STATUS1		0x2C
+#define REG_ESW_TABLE_STATUS2		0x30
+#define REG_ESW_WT_MAC_AD0		0x34
+#define REG_ESW_WT_MAC_AD1		0x38
+#define REG_ESW_WT_MAC_AD2		0x3C
+
+#if defined(CONFIG_MACH_MT7623)
+#define REG_ESW_MAX         0xFC
+#else
+#define REG_ESW_MAX			0x16C
+#endif
+#define REG_HQOS_MAX			0x3FFF
+
+struct esw_reg {
+	unsigned int off;
+	unsigned int val;
+};
+
+struct ra_mii_ioctl_data {
+	__u32 phy_id;
+	__u32 reg_num;
+	__u32 val_in;
+	__u32 val_out;
+	__u32 port_num;
+	__u32 dev_addr;
+	__u32 reg_addr;
+};
+
+struct ra_switch_ioctl_data {
+	unsigned int cmd;
+	unsigned int on_off;
+	unsigned int port;
+	unsigned int bw;
+	unsigned int vid;
+	unsigned int fid;
+	unsigned int port_map;
+	unsigned int rx_port_map;
+	unsigned int tx_port_map;
+	unsigned int igmp_query_interval;
+	unsigned int reg_addr;
+	unsigned int reg_val;
+	unsigned int mode;
+	unsigned int qos_queue_num;
+	unsigned int qos_type;
+	unsigned int qos_pri;
+	unsigned int qos_dscp;
+	unsigned int qos_table_idx;
+	unsigned int qos_weight;
+	unsigned char mac[6];
+};
+
+struct qdma_ioctl_data {
+	unsigned int cmd;
+	unsigned int off;
+	unsigned int val;
+};
+
+struct ephy_ioctl_data {
+	unsigned int cmd;
+};
+
+struct esw_rate {
+	unsigned int on_off;
+	unsigned int port;
+	unsigned int bw;	/*Mbps */
+};
+#endif	/* _RAETH_IOCTL_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.c
new file mode 100644
index 0000000..ad822bb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.c
@@ -0,0 +1,179 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+
+void enable_auto_negotiate(struct END_DEVICE *ei_local)
+{
+	u32 reg_value;
+	pr_info("=================================\n");
+	pr_info("enable_auto_negotiate\n");
+
+	/* FIXME: we don't know how to deal with PHY end addr */
+	reg_value = sys_reg_read(ESW_PHY_POLLING);
+	reg_value |= (1 << 31);
+	reg_value &= ~(0x1f);
+	reg_value &= ~(0x1f << 8);
+
+	if (ei_local->architecture & (GE2_RGMII_AN | GE2_SGMII_AN)) {
+		/* setup PHY address for auto polling (Start Addr). */
+		/*avoid end phy address = 0 */
+		reg_value |= ((mac_to_gigaphy_mode_addr2 - 1) & 0x1f);
+		/* setup PHY address for auto polling (End Addr). */
+		reg_value |= (mac_to_gigaphy_mode_addr2 << 8);
+	} else if (ei_local->architecture & (GE1_RGMII_AN | GE1_SGMII_AN | LEOPARD_EPHY)) {
+		/* setup PHY address for auto polling (Start Addr). */
+		reg_value |= (mac_to_gigaphy_mode_addr << 0);
+		/* setup PHY address for auto polling (End Addr). */
+		reg_value |= ((mac_to_gigaphy_mode_addr + 1) << 8);
+	}
+
+	sys_reg_write(ESW_PHY_POLLING, reg_value);
+}
+
+void ra2880stop(struct END_DEVICE *ei_local)
+{
+	unsigned int reg_value;
+
+	pr_info("ra2880stop()...");
+
+	reg_value = sys_reg_read(DMA_GLO_CFG);
+	reg_value &= ~(TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
+	sys_reg_write(DMA_GLO_CFG, reg_value);
+
+	pr_info("Done\n");
+}
+
+void set_mac_address(unsigned char p[6])
+{
+	unsigned long reg_value;
+
+	reg_value = (p[0] << 8) | (p[1]);
+	sys_reg_write(GDMA1_MAC_ADRH, reg_value);
+
+	reg_value = (unsigned long)((p[2] << 24) | (p[3] << 16) | (p[4] << 8) | p[5]);
+	sys_reg_write(GDMA1_MAC_ADRL, reg_value);
+}
+
+void set_mac2_address(unsigned char p[6])
+{
+	unsigned long reg_value;
+
+	reg_value = (p[0] << 8) | (p[1]);
+	sys_reg_write(GDMA2_MAC_ADRH, reg_value);
+
+	reg_value = (unsigned long)((p[2] << 24) | (p[3] << 16) | (p[4] << 8) | p[5]);
+	sys_reg_write(GDMA2_MAC_ADRL, reg_value);
+}
+
+static int getnext(const char *src, int separator, char *dest)
+{
+	char *c;
+	int len;
+
+	if (!src || !dest)
+		return -1;
+
+	c = strchr(src, separator);
+	if (!c) {
+		strcpy(dest, src);
+		return -1;
+	}
+	len = c - src;
+	strncpy(dest, src, len);
+	dest[len] = '\0';
+	return len + 1;
+}
+
+int str_to_ip(unsigned int *ip, const char *str)
+{
+	int len;
+	const char *ptr = str;
+	char buf[128];
+	unsigned char c[4];
+	int i;
+	int ret;
+
+	for (i = 0; i < 3; ++i) {
+		len = getnext(ptr, '.', buf);
+		if (len == -1)
+			return 1;	/* parse error */
+
+		ret = kstrtoul(buf, 10, (unsigned long *)&c[i]);
+		if (ret)
+			return ret;
+
+		ptr += len;
+	}
+	ret = kstrtoul(ptr, 0, (unsigned long *)&c[3]);
+	if (ret)
+		return ret;
+
+	*ip = (c[0] << 24) + (c[1] << 16) + (c[2] << 8) + c[3];
+
+	return 0;
+}
+
+void set_ge1_force_1000(void)
+{
+	sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x100, 0x2105e33b);
+}
+
+void set_ge2_force_1000(void)
+{
+	sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x2105e33b);
+}
+
+void set_ge1_an(void)
+{
+	sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x100, 0x21056300);
+}
+
+void set_ge2_an(void)
+{
+	sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x21056300);
+}
+
+void set_ge2_gmii(void)
+{
+	void __iomem *virt_addr;
+	unsigned int reg_value;
+
+	virt_addr = ioremap(ETHSYS_BASE, 0x20);
+	reg_value = sys_reg_read(virt_addr + 0x14);
+	/*[15:14] =0 RGMII, [8] = 0 SGMII disable*/
+	reg_value = reg_value & (~0xc100);
+	reg_value = reg_value | 0x4000;
+	sys_reg_write(virt_addr + 0x14, reg_value);
+	iounmap(virt_addr);
+}
+
+void set_ge0_gmii(void)
+{
+	void __iomem *virt_addr;
+	unsigned int reg_value;
+
+	virt_addr = ioremap(ETHSYS_BASE, 0x20);
+	reg_value = sys_reg_read(virt_addr + 0x14);
+	/*[15:14] =0 RGMII, [8] = 0 SGMII disable*/
+	reg_value = reg_value & (~0xc000);
+	reg_value = reg_value | 0x400;
+	sys_reg_write(virt_addr + 0x14, reg_value);
+	iounmap(virt_addr);
+}
+
+void set_ge2_force_link_down(void)
+{
+	sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x2105e300);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.h
new file mode 100644
index 0000000..c329703
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.h
@@ -0,0 +1,30 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_MAC_H
+#define RA_MAC_H
+
+void ra2880stop(struct END_DEVICE *ei_local);
+void set_mac_address(unsigned char p[6]);
+void set_mac2_address(unsigned char p[6]);
+int str_to_ip(unsigned int *ip, const char *str);
+void enable_auto_negotiate(struct END_DEVICE *ei_local);
+void set_ge1_force_1000(void);
+void set_ge2_force_1000(void);
+void set_ge1_an(void);
+void set_ge2_an(void);
+void set_ge2_gmii(void);
+void set_ge0_gmii(void);
+void set_ge2_force_link_down(void);
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.c
new file mode 100644
index 0000000..f677a8c
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.c
@@ -0,0 +1,4249 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_switch.h"
+#include "ra_mac.h"
+#include "raeth_reg.h"
+
+#define MT7622_CHIP_ID 0x08000008
+
+void reg_bit_zero(void __iomem *addr, unsigned int bit, unsigned int len)
+{
+	int reg_val;
+	int i;
+
+	reg_val = sys_reg_read(addr);
+	for (i = 0; i < len; i++)
+		reg_val &= ~(1 << (bit + i));
+	sys_reg_write(addr, reg_val);
+}
+
+void reg_bit_one(void __iomem *addr, unsigned int bit, unsigned int len)
+{
+	unsigned int reg_val;
+	unsigned int i;
+
+	reg_val = sys_reg_read(addr);
+	for (i = 0; i < len; i++)
+		reg_val |= 1 << (bit + i);
+	sys_reg_write(addr, reg_val);
+}
+
+u8 fe_cal_flag;
+u8 fe_cal_flag_mdix;
+u8 fe_cal_tx_offset_flag;
+u8 fe_cal_tx_offset_flag_mdix;
+u8 fe_cal_r50_flag;
+u8 fe_cal_vbg_flag;
+u32 iext_cal_result;
+u32 r50_p0_cal_result;
+u8 ge_cal_r50_flag;
+u8 ge_cal_tx_offset_flag;
+u8 ge_cal_flag;
+int show_time;
+static u8 ephy_addr_base;
+
+/* 50ohm_new*/
+const u8 ZCAL_TO_R50OHM_TBL_100[64] = {
+	127, 121, 116, 115, 111, 109, 108, 104,
+	102, 99, 97, 96, 77, 76, 73, 72,
+	70, 69, 67, 66, 47, 46, 45, 43,
+	42, 41, 40, 38, 37, 36, 35, 34,
+	32, 16, 15, 14, 13, 12, 11, 10,
+	9, 8, 7, 6, 6, 5, 4, 4,
+	3, 2, 2, 1, 1, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0
+};
+
+const u8 ZCAL_TO_R50ohm_GE_TBL_100[64] = {
+	63, 63, 63, 63, 63, 63, 63, 63,
+	63, 63, 63, 63, 63, 63, 63, 60,
+	57, 55, 53, 51, 48, 46, 44, 42,
+	40, 38, 37, 36, 34, 32, 30, 28,
+	27, 26, 25, 23, 22, 21, 19, 18,
+	16, 15, 14, 13, 12, 11, 10, 9,
+	8, 7, 6, 5, 4, 4, 3, 2,
+	1, 0, 0, 0, 0, 0, 0, 0
+};
+
+const u8 ZCAL_TO_R50ohm_GE_TBL[64] = {
+	63, 63, 63, 63, 63, 63, 63, 63,
+	63, 63, 63, 63, 63, 63, 63, 60,
+	57, 55, 53, 51, 48, 46, 44, 42,
+	40, 38, 37, 36, 34, 32, 30, 28,
+	27, 26, 25, 23, 22, 21, 19, 18,
+	16, 15, 14, 13, 12, 11, 10, 9,
+	8, 7, 6, 5, 4, 4, 3, 2,
+	1, 0, 0, 0, 0, 0, 0, 0
+};
+
+const u8 ZCAL_TO_REXT_TBL[64] = {
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 1, 1, 1, 1, 1,
+	1, 2, 2, 2, 2, 2, 2, 3,
+	3, 3, 3, 3, 3, 4, 4, 4,
+	4, 4, 4, 4, 5, 5, 5, 5,
+	5, 5, 6, 6, 6, 6, 6, 6,
+	7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7
+};
+
+const u8 ZCAL_TO_FILTER_TBL[64] = {
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 1, 1,
+	1, 2, 2, 2, 3, 3, 3, 4,
+	4, 4, 4, 5, 5, 5, 6, 6,
+	7, 7, 7, 8, 8, 8, 9, 9,
+	9, 10, 10, 10, 11, 11, 11, 11,
+	12, 12, 12, 12, 12, 12, 12, 12
+};
+
+void tc_phy_write_g_reg(u8 port_num, u8 page_num,
+			u8 reg_num, u32 reg_data)
+{
+	u32 r31 = 0;
+
+	r31 |= 0 << 15;	/* global */
+	r31 |= ((page_num & 0x7) << 12);	/* page no */
+	mii_mgr_write(port_num, 31, r31);	/* change Global page */
+	mii_mgr_write(port_num, reg_num, reg_data);
+}
+
+void tc_phy_write_l_reg(u8 port_no, u8 page_no,
+			u8 reg_num, u32 reg_data)
+{
+	u32 r31 = 0;
+
+	r31 |= 1 << 15;	/* local */
+	r31 |= ((page_no & 0x7) << 12);	/* page no */
+	mii_mgr_write(port_no, 31, r31); /* select local page x */
+	mii_mgr_write(port_no, reg_num, reg_data);
+}
+
+u32 tc_phy_read_g_reg(u8 port_num, u8 page_num, u8 reg_num)
+{
+	u32 phy_val;
+
+	u32 r31 = 0;
+
+	r31 |= 0 << 15;	/* global */
+	r31 |= ((page_num & 0x7) << 12);	/* page no */
+	mii_mgr_write(port_num, 31, r31);	/* change Global page */
+	mii_mgr_read(port_num, reg_num, &phy_val);
+	return phy_val;
+}
+
+u32 tc_phy_read_l_reg(u8 port_no, u8 page_no, u8 reg_num)
+{
+	u32 phy_val;
+	u32 r31 = 0;
+
+	r31 |= 1 << 15;	/* local */
+	r31 |= ((page_no & 0x7) << 12);	/* page no */
+	mii_mgr_write(port_no, 31, r31); /* select local page x */
+	mii_mgr_read(port_no, reg_num, &phy_val);
+	return phy_val;
+}
+
+u32 tc_phy_read_dev_reg(u32 port_num, u32 dev_addr, u32 reg_addr)
+{
+	u32 phy_val;
+
+	mii_mgr_read_cl45(port_num, dev_addr, reg_addr, &phy_val);
+	return phy_val;
+}
+
+void tc_phy_write_dev_reg(u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
+{
+	mii_mgr_write_cl45(port_num, dev_addr, reg_addr, write_data);
+}
+
+u32 tc_mii_read(u32 phy_addr, u32 phy_register)
+{
+	u32 phy_val;
+
+	mii_mgr_read(phy_addr, phy_register, &phy_val);
+	return phy_val;
+}
+
+void tc_mii_write(u32 phy_addr, u32 phy_register, u32 write_data)
+{
+	mii_mgr_write(phy_addr, phy_register, write_data);
+}
+
+void clear_ckinv_ana_txvos(void)
+{
+	u16 g7r24_tmp;
+	/*clear RG_CAL_CKINV/RG_ANA_CALEN/RG_TXVOS_CALEN*/
+	/*g7r24[13]:0x0, RG_ANA_CALEN_P0*/
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x2000)));
+
+	/*g7r24[14]:0x0, RG_CAL_CKINV_P0*/
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x4000)));
+
+	/*g7r24[12]:0x0, DA_TXVOS_CALEN_P0*/
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x1000)));
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0);
+}
+
+u8 all_fe_ana_cal_wait_txamp(u32 delay, u8 port_num)
+{				/* for EN7512 FE // allen_20160616 */
+	u8 all_ana_cal_status;
+	u16 cnt, g7r24_temp;
+
+	tc_phy_write_l_reg(FE_CAL_COMMON, 4, 23, (0x0000));
+	g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+	g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp | 0x10);
+
+	cnt = 1000;
+	do {
+		udelay(delay);
+		cnt--;
+		all_ana_cal_status =
+		    ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) >> 1) & 0x1);
+	} while ((all_ana_cal_status == 0) && (cnt != 0));
+
+	tc_phy_write_l_reg(FE_CAL_COMMON, 4, 23, (0x0000));
+	tc_phy_write_l_reg(port_num, 4, 23, (0x0000));
+	g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+	return all_ana_cal_status;
+}
+
+u8 all_fe_ana_cal_wait(u32 delay, u8 port_num)
+{
+	u8 all_ana_cal_status;
+	u16 cnt, g7r24_temp;
+
+	tc_phy_write_l_reg(FE_CAL_COMMON, 4, 23, (0x0000));
+	tc_phy_write_l_reg(port_num, 4, 23, (0x0000));
+
+	g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+	g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp | 0x10);
+	cnt = 1000;
+	do {
+		udelay(delay);
+		cnt--;
+		all_ana_cal_status =
+		    ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) >> 1) & 0x1);
+
+	} while ((all_ana_cal_status == 0) && (cnt != 0));
+
+	tc_phy_write_l_reg(FE_CAL_COMMON, 4, 23, (0x0000));
+	tc_phy_write_l_reg(port_num, 4, 23, (0x0000));
+	g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+
+	return all_ana_cal_status;
+}
+
+void fe_cal_tx_amp(u8 port_num, u32 delay)
+{
+	u8 all_ana_cal_status;
+	int ad_cal_comp_out_init;
+	u16 l3r25_temp, l0r26_temp, l2r20_temp;
+	u16 l2r23_temp = 0;
+	int calibration_polarity;
+	u8 tx_amp_reg_shift = 0;
+	int tx_amp_temp = 0, cnt = 0, phyaddr, tx_amp_cnt = 0;
+	u16 tx_amp_final;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	phyaddr = port_num + ephy_addr_base;
+	tx_amp_temp = 0x20;
+	/* *** Tx Amp Cal start ********************** */
+
+/*Set device in 100M mode*/
+	tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+/*TXG output DC differential 1V*/
+	tc_phy_write_g_reg(port_num, 2, 25, 0x10c0);
+
+	tc_phy_write_g_reg(port_num, 1, 26, (0x8000 | DAC_IN_2V));
+	tc_phy_write_g_reg(port_num, 4, 21, (0x0800));	/* set default */
+	tc_phy_write_l_reg(port_num, 0, 30, (0x02c0));
+	tc_phy_write_l_reg(port_num, 4, 21, (0x0000));
+
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (0xc800));
+	tc_phy_write_l_reg(port_num, 3, 25, (0xc800));
+
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x7000);
+
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x400));
+
+	/*decide which port calibration RG_ZCALEN by port_num*/
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	l3r25_temp = l3r25_temp | 0x1000;
+	l3r25_temp = l3r25_temp & ~(0x200);
+	tc_phy_write_l_reg(port_num, 3, 25, l3r25_temp);
+
+	/*DA_PGA_MDIX_STASTUS_P0=0(L0R26[15:14] = 0x01*/
+	l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+	l0r26_temp = l0r26_temp & (~0xc000);
+	tc_phy_write_l_reg(port_num, 0, 26, 0x5203);/* Kant */
+
+	/*RG_RX2TX_EN_P0=0(L2R20[10] =0),*/
+	l2r20_temp = tc_phy_read_l_reg(port_num, 2, 20);
+	l2r20_temp = l2r20_temp & (~0x400);
+	tc_phy_write_l_reg(port_num, 2, 20, l2r20_temp);
+	tc_phy_write_l_reg(port_num, 2, 23, (tx_amp_temp));
+
+	all_ana_cal_status = all_fe_ana_cal_wait_txamp(delay, port_num);
+
+	if (all_ana_cal_status == 0) {
+		all_ana_cal_status = ANACAL_ERROR;
+		pr_info(" FE Tx amp AnaCal ERROR! (init)  \r\n");
+	}
+
+	tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+	if (ad_cal_comp_out_init == 1)
+		calibration_polarity = -1;
+	else
+		calibration_polarity = 1;
+
+	tx_amp_temp += calibration_polarity;
+	cnt = 0;
+	tx_amp_cnt = 0;
+	while (all_ana_cal_status < ANACAL_ERROR) {
+		tc_phy_write_l_reg(port_num, 2, 23, (tx_amp_temp));
+		l2r23_temp = tc_phy_read_l_reg(port_num, 2, 23);
+		cnt++;
+		tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+		tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+		all_ana_cal_status = all_fe_ana_cal_wait_txamp(delay, port_num);
+
+		if (((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24)) & 0x1) !=
+		    ad_cal_comp_out_init) {
+			all_ana_cal_status = ANACAL_FINISH;
+			fe_cal_flag = 1;
+		}
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info(" FE Tx amp AnaCal ERROR! (%d)  \r\n", cnt);
+		} else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+			   ad_cal_comp_out_init) {
+			tx_amp_cnt++;
+			all_ana_cal_status = ANACAL_FINISH;
+			tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+			tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+			ad_cal_comp_out_init =
+			    tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+		} else {
+			if ((l2r23_temp == 0x3f) || (l2r23_temp == 0x00)) {
+				all_ana_cal_status = ANACAL_SATURATION;
+				pr_info
+				    (" Tx amp Cal Saturation(%d)(%x)(%x)\r\n",
+				     cnt, tc_phy_read_l_reg(0, 3, 25),
+				     tc_phy_read_l_reg(1, 3, 25));
+				pr_info
+				    (" Tx amp Cal Saturation(%x)(%x)(%x)\r\n",
+				     tc_phy_read_l_reg(2, 3, 25),
+				     tc_phy_read_l_reg(3, 3, 25),
+				     tc_phy_read_l_reg(0, 2, 30));
+				/* tx_amp_temp += calibration_polarity; */
+			} else {
+				tx_amp_temp += calibration_polarity;
+			}
+		}
+	}
+
+	if ((all_ana_cal_status == ANACAL_ERROR) ||
+	    (all_ana_cal_status == ANACAL_SATURATION)) {
+		l2r23_temp = tc_phy_read_l_reg(port_num, 2, 23);
+		tc_phy_write_l_reg(port_num, 2, 23,
+				   ((tx_amp_temp << tx_amp_reg_shift)));
+		l2r23_temp = tc_phy_read_l_reg(port_num, 2, 23);
+		pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+	} else {
+		if (ei_local->chip_name == MT7622_FE) {
+			if (port_num == 0)
+				l2r23_temp = l2r23_temp + 10;
+			else if (port_num == 1)
+				l2r23_temp = l2r23_temp + 11;
+			else if (port_num == 2)
+				l2r23_temp = l2r23_temp + 10;
+			else if (port_num == 3)
+				l2r23_temp = l2r23_temp + 9;
+			else if (port_num == 4)
+				l2r23_temp = l2r23_temp + 10;
+		} else if (ei_local->chip_name == LEOPARD_FE) {
+			if (port_num == 1)
+				l2r23_temp = l2r23_temp + 3;
+			else if (port_num == 2)
+				l2r23_temp = l2r23_temp + 3;
+			else if (port_num == 3)
+				l2r23_temp = l2r23_temp + 3 - 2;
+			else if (port_num == 4)
+				l2r23_temp = l2r23_temp + 2 - 1 + 2;
+		}
+
+		tc_phy_write_l_reg(port_num, 2, 23, ((l2r23_temp) << tx_amp_reg_shift));
+		fe_cal_flag = 1;
+	}
+
+	tx_amp_final = tc_phy_read_l_reg(port_num, 2, 23) & 0x3f;
+	tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15)  << 8) | 0x20);
+
+	if (ei_local->chip_name == LEOPARD_FE) {
+		if (port_num == 1)
+			tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15 - 4)  << 8) | 0x20);
+		else if (port_num == 2)
+			tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15 + 2)  << 8) | 0x20);
+		else if (port_num == 3)
+			tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15 + 4)  << 8) | 0x20);
+		else if (port_num == 4)
+			tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15 + 4)  << 8) | 0x20);
+	}
+
+	pr_info("[%d] - tx_amp_final = 0x%x\n", port_num, tx_amp_final);
+
+	/*clear RG_CAL_CKINV/RG_ANA_CALEN/RG_TXVOS_CALEN*/
+	clear_ckinv_ana_txvos();
+
+	tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+	tc_phy_write_g_reg(port_num, 1, 26, 0);
+	/* *** Tx Amp Cal end *** */
+}
+
+void fe_cal_tx_amp_mdix(u8 port_num, u32 delay)
+{
+	u8 all_ana_cal_status;
+	int ad_cal_comp_out_init;
+	u16 l3r25_temp, l4r26_temp, l0r26_temp;
+	u16 l2r20_temp, l4r26_temp_amp;
+	int calibration_polarity;
+	int tx_amp_temp = 0, cnt = 0, phyaddr, tx_amp_cnt = 0;
+	u16 tx_amp_mdix_final;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	phyaddr = port_num + ephy_addr_base;
+	tx_amp_temp = 0x20;
+/*Set device in 100M mode*/
+	tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+/*TXG output DC differential 0V*/
+	tc_phy_write_g_reg(port_num, 2, 25, 0x10c0);
+
+	tc_phy_write_g_reg(port_num, 1, 26, (0x8000 | DAC_IN_2V));
+	tc_phy_write_g_reg(port_num, 4, 21, (0x0800));	/* set default */
+	tc_phy_write_l_reg(port_num, 0, 30, (0x02c0));/*0x3f80  // l0r30[9], [7], [6], [1]*/
+	tc_phy_write_l_reg(port_num, 4, 21, (0x0000));
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (0xc800));
+	tc_phy_write_l_reg(port_num, 3, 25, (0xc800));	/* 0xca00 */
+	/* *** Tx Amp Cal start ********************** */
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x7000);
+	/* pr_info(" g7r24[%d] = %x\n", port_num, tc_phy_read_g_reg(port_num, 7, 24)); */
+
+	/*RG_TXG_CALEN =1 l3r25[10]by port number*/
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x400));
+	/*decide which port calibration RG_ZCALEN l3r25[12] by port_num*/
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	l3r25_temp = l3r25_temp | 0x1000;
+	l3r25_temp = l3r25_temp & ~(0x200);
+	tc_phy_write_l_reg(port_num, 3, 25, l3r25_temp);
+
+	/*DA_PGA_MDIX_STASTUS_P0=0(L0R26[15:14] = 0x10) & RG_RX2TX_EN_P0=0(L2R20[10] =1),*/
+	l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+	l0r26_temp = l0r26_temp & (~0xc000);
+	tc_phy_write_l_reg(port_num, 0, 26, 0x9203); /* Kant */
+	l2r20_temp = tc_phy_read_l_reg(port_num, 2, 20);
+	l2r20_temp = l2r20_temp | 0x400;
+	tc_phy_write_l_reg(port_num, 2, 20, l2r20_temp);
+
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x0400));
+/*DA_TX_I2MPB_MDIX L4R26[5:0]*/
+	l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+	/* pr_info("111l4r26 =%x\n", tc_phy_read_l_reg(port_num, 4, 26)); */
+	l4r26_temp = l4r26_temp & (~0x3f);
+	tc_phy_write_l_reg(port_num, 4, 26, (l4r26_temp | tx_amp_temp));
+	/* pr_info("222l4r26 =%x\n", tc_phy_read_l_reg(port_num, 4, 26)); */
+	all_ana_cal_status = all_fe_ana_cal_wait_txamp(delay, port_num);
+
+	if (all_ana_cal_status == 0) {
+		all_ana_cal_status = ANACAL_ERROR;
+		pr_info(" FE Tx amp mdix AnaCal ERROR! (init)  \r\n");
+	}
+
+	tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	/*ad_cal_comp_out_init = (tc_phy_read_l_reg(FE_CAL_COMMON, 4, 23) >> 4) & 0x1;*/
+	ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+	/* pr_info("mdix ad_cal_comp_out_init = %d\n", ad_cal_comp_out_init); */
+	if (ad_cal_comp_out_init == 1) {
+		calibration_polarity = -1;
+		/* tx_amp_temp = 0x10; */
+	} else {
+		calibration_polarity = 1;
+	}
+	tx_amp_temp += calibration_polarity;
+	cnt = 0;
+	tx_amp_cnt = 0;
+	while (all_ana_cal_status < ANACAL_ERROR) {
+		l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+		l4r26_temp = l4r26_temp & (~0x3f);
+		tc_phy_write_l_reg(port_num, 4, 26, (l4r26_temp | tx_amp_temp));
+		l4r26_temp = (tc_phy_read_l_reg(port_num, 4, 26));
+		l4r26_temp_amp = (tc_phy_read_l_reg(port_num, 4, 26)) & 0x3f;
+		cnt++;
+
+		tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+		tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+		all_ana_cal_status = all_fe_ana_cal_wait_txamp(delay, port_num);
+
+		if (((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24)) & 0x1) !=
+		    ad_cal_comp_out_init) {
+			all_ana_cal_status = ANACAL_FINISH;
+			fe_cal_flag_mdix = 1;
+		}
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info(" FE Tx amp mdix AnaCal ERROR! (%d)  \r\n", cnt);
+		} else if (((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24)) & 0x1) !=
+			   ad_cal_comp_out_init) {
+			all_ana_cal_status = ANACAL_FINISH;
+			tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+			tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+			ad_cal_comp_out_init =
+			    (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24)) & 0x1;
+		} else {
+			if ((l4r26_temp_amp == 0x3f) || (l4r26_temp_amp == 0x00)) {
+				all_ana_cal_status = ANACAL_SATURATION;
+				pr_info
+				    (" Tx amp Cal mdix Saturation(%d)(%x)(%x)\r\n",
+				     cnt, tc_phy_read_l_reg(0, 3, 25),
+				     tc_phy_read_l_reg(1, 3, 25));
+				pr_info
+				    (" Tx amp Cal mdix Saturation(%x)(%x)(%x)\r\n",
+				     tc_phy_read_l_reg(2, 3, 25),
+				     tc_phy_read_l_reg(3, 3, 25),
+				     tc_phy_read_l_reg(0, 2, 30));
+				/* tx_amp_temp += calibration_polarity; */
+			} else {
+				tx_amp_temp += calibration_polarity;
+			}
+		}
+	}
+
+	if ((all_ana_cal_status == ANACAL_ERROR) ||
+	    (all_ana_cal_status == ANACAL_SATURATION)) {
+		l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+		pr_info(" FE-%d Tx amp AnaCal mdix Saturation! (%d)(l4r26=0x%x)  \r\n",
+			phyaddr, cnt, l4r26_temp);
+		tc_phy_write_l_reg(port_num, 4, 26,
+				   ((l4r26_temp & (~0x3f)) | tx_amp_temp));
+		l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+		pr_info(" FE-%d Tx amp AnaCal mdix Saturation! (%d)(l4r26=0x%x)  \r\n",
+			phyaddr, cnt, l4r26_temp);
+		pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+	} else {
+		if (ei_local->chip_name == MT7622_FE) {
+			if (port_num == 0) {
+				l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+				l4r26_temp = l4r26_temp + 10;
+			} else if (port_num == 1) {
+				l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+				l4r26_temp = l4r26_temp + 11;
+			} else if (port_num == 2) {
+				l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+				l4r26_temp = l4r26_temp + 9;
+			} else if (port_num == 3) {
+				l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+				l4r26_temp = l4r26_temp + 9;
+			} else if (port_num == 4) {
+				l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+				l4r26_temp = l4r26_temp + 9;
+			}
+		} else if (ei_local->chip_name == LEOPARD_FE) {
+			if (port_num == 1) {
+				l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+				l4r26_temp = l4r26_temp + 4 - 2;
+			} else if (port_num == 2) {
+				l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+				l4r26_temp = l4r26_temp + 3 - 1;
+			} else if (port_num == 3) {
+				l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+				l4r26_temp = l4r26_temp + 4 - 3;
+			} else if (port_num == 4) {
+				l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+				l4r26_temp = l4r26_temp + 4 - 2 + 1;
+			}
+		}
+		tc_phy_write_l_reg(port_num, 4, 26, l4r26_temp);
+		fe_cal_flag_mdix = 1;
+	}
+
+	tx_amp_mdix_final = tc_phy_read_l_reg(port_num, 4, 26) & 0x3f;
+	tc_phy_write_l_reg(port_num, 4, 27, ((tx_amp_mdix_final + 15) << 8) | 0x20);
+	if (ei_local->chip_name == LEOPARD_FE) {
+		if (port_num == 2)
+			tc_phy_write_l_reg(port_num, 4, 27,
+					   ((tx_amp_mdix_final + 15 + 1)  << 8) | 0x20);
+		else if (port_num == 3)
+			tc_phy_write_l_reg(port_num, 4, 27,
+					   ((tx_amp_mdix_final + 15 + 4)  << 8) | 0x20);
+		else if (port_num == 4)
+			tc_phy_write_l_reg(port_num, 4, 27,
+					   ((tx_amp_mdix_final + 15 + 4)  << 8) | 0x20);
+	}
+	pr_info("[%d] - tx_amp_mdix_final = 0x%x\n", port_num, tx_amp_mdix_final);
+
+	clear_ckinv_ana_txvos();
+	tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+	tc_phy_write_g_reg(port_num, 1, 26, 0);
+	/* *** Tx Amp Cal end *** */
+}
+
+void fe_cal_tx_offset(u8 port_num, u32 delay)
+{
+	u8 all_ana_cal_status;
+	int ad_cal_comp_out_init;
+	u16 l3r25_temp, l2r20_temp;
+	u16 g4r21_temp, l0r30_temp, l4r17_temp, l0r26_temp;
+	int calibration_polarity, tx_offset_temp;
+	int cal_temp = 0;
+	u8 tx_offset_reg_shift;
+	u8 cnt = 0, phyaddr, tx_amp_cnt = 0;
+	u16 tx_offset_final;
+
+	phyaddr = port_num + ephy_addr_base;
+/*Set device in 100M mode*/
+	tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+
+	/*// g4r21[11]:Hw bypass tx offset cal, Fw cal*/
+	g4r21_temp = tc_phy_read_g_reg(port_num, 4, 21);
+	tc_phy_write_g_reg(port_num, 4, 21, (g4r21_temp | 0x0800));
+
+	/*l0r30[9], [7], [6], [1]*/
+	l0r30_temp = tc_phy_read_l_reg(port_num, 0, 30);
+	tc_phy_write_l_reg(port_num, 0, 30, (l0r30_temp | 0x02c0));
+
+	/* tx_offset_temp = TX_AMP_OFFSET_0MV; */
+	tx_offset_temp = 0x20;
+	tx_offset_reg_shift = 8;
+	tc_phy_write_g_reg(port_num, 1, 26, (0x8000 | DAC_IN_0V));
+
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x3000);
+	/* pr_info(" g7r24[%d] = %x\n", port_num, tc_phy_read_g_reg(port_num, 7, 24)); */
+	/*RG_TXG_CALEN =1 by port number*/
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x400));
+	/*decide which port calibration RG_ZCALEN by port_num*/
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x1000));
+
+	/*DA_PGA_MDIX_STASTUS_P0=0(L0R26[15:14] = 0x01) & RG_RX2TX_EN_P0=0(L2R20[10] =0),*/
+	l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+	l0r26_temp = l0r26_temp & (~0xc000);
+	/* tc_phy_write_l_reg(port_num, 0, 26, (l0r26_temp | 0x4000)); */
+	tc_phy_write_l_reg(port_num, 0, 26, 0x5203);/* Kant */
+	/* pr_info("l0r26[%d] = %x\n", port_num, tc_phy_read_l_reg(port_num, 0, 26)); */
+	l2r20_temp = tc_phy_read_l_reg(port_num, 2, 20);
+	l2r20_temp = l2r20_temp & (~0x400);
+	tc_phy_write_l_reg(port_num, 2, 20, l2r20_temp);
+	/* pr_info("l2r20[%d] = %x\n", port_num, tc_phy_read_l_reg(port_num, 2, 20)); */
+
+	tc_phy_write_l_reg(port_num, 4, 17, (0x0000));
+	l4r17_temp = tc_phy_read_l_reg(port_num, 4, 17);
+	tc_phy_write_l_reg(port_num, 4, 17,
+			   l4r17_temp |
+			   (tx_offset_temp << tx_offset_reg_shift));
+/*wat AD_CAL_CLK = 1*/
+	all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+	if (all_ana_cal_status == 0) {
+		all_ana_cal_status = ANACAL_ERROR;
+		pr_info(" FE Tx offset AnaCal ERROR! (init)  \r\n");
+	}
+
+	tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+/*GET AD_CAL_COMP_OUT g724[0]*/
+	/*ad_cal_comp_out_init = (tc_phy_read_l_reg(FE_CAL_COMMON, 4, 23) >> 4) & 0x1;*/
+	ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+	if (ad_cal_comp_out_init == 1)
+		calibration_polarity = -1;
+	else
+		calibration_polarity = 1;
+	cnt = 0;
+	tx_amp_cnt = 0;
+	tx_offset_temp += calibration_polarity;
+
+	while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+		cnt++;
+		cal_temp = tx_offset_temp;
+		tc_phy_write_l_reg(port_num, 4, 17,
+				   (cal_temp << tx_offset_reg_shift));
+
+		tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+		tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+		all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info(" FE Tx offset AnaCal ERROR! (%d)  \r\n", cnt);
+		} else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+			   ad_cal_comp_out_init) {
+			all_ana_cal_status = ANACAL_FINISH;
+			tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+			tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+
+			ad_cal_comp_out_init =
+			    tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+		} else {
+			l4r17_temp = tc_phy_read_l_reg(port_num, 4, 17);
+
+			if ((tx_offset_temp == 0x3f) || (tx_offset_temp == 0x00)) {
+				all_ana_cal_status = ANACAL_SATURATION;
+				pr_info("tx offset ANACAL_SATURATION\n");
+			} else {
+				tx_offset_temp += calibration_polarity;
+			}
+		}
+	}
+
+	if ((all_ana_cal_status == ANACAL_ERROR) ||
+	    (all_ana_cal_status == ANACAL_SATURATION)) {
+		tx_offset_temp = TX_AMP_OFFSET_0MV;
+		l4r17_temp = tc_phy_read_l_reg(port_num, 4, 17);
+		tc_phy_write_l_reg(port_num, 4, 17,
+				   (l4r17_temp |
+				    (tx_offset_temp << tx_offset_reg_shift)));
+		pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+	} else {
+		fe_cal_tx_offset_flag = 1;
+	}
+	tx_offset_final = (tc_phy_read_l_reg(port_num, 4, 17) & 0x3f00) >> 8;
+	pr_info("[%d] - tx_offset_final = 0x%x\n", port_num, tx_offset_final);
+
+	clear_ckinv_ana_txvos();
+	tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+	tc_phy_write_g_reg(port_num, 1, 26, 0);
+}
+
+void fe_cal_tx_offset_mdix(u8 port_num, u32 delay)
+{				/* for MT7622 */
+	u8 all_ana_cal_status;
+	int ad_cal_comp_out_init;
+	u16 l3r25_temp, l2r20_temp, l4r26_temp;
+	u16 g4r21_temp, l0r30_temp, l0r26_temp;
+	int calibration_polarity, tx_offset_temp;
+	int cal_temp = 0;
+	u8 tx_offset_reg_shift;
+	u8 cnt = 0, phyaddr;
+	u16 tx_offset_final_mdix;
+
+	phyaddr = port_num + ephy_addr_base;
+/*Set device in 100M mode*/
+	tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+
+	/*// g4r21[11]:Hw bypass tx offset cal, Fw cal*/
+	g4r21_temp = tc_phy_read_g_reg(port_num, 4, 21);
+	tc_phy_write_g_reg(port_num, 4, 21, (g4r21_temp | 0x0800));
+
+	/*l0r30[9], [7], [6], [1]*/
+	l0r30_temp = tc_phy_read_l_reg(port_num, 0, 30);
+	tc_phy_write_l_reg(port_num, 0, 30, (l0r30_temp | 0x02c0));
+
+	tx_offset_temp = 0x20;
+	tx_offset_reg_shift = 8;
+	tc_phy_write_g_reg(port_num, 1, 26, (0x8000 | DAC_IN_0V));
+
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x3000);
+
+	/*RG_TXG_CALEN =1 by port number*/
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x400));
+
+	/*decide which port calibration RG_ZCALEN by port_num*/
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x1000));
+
+	/*DA_PGA_MDIX_STASTUS_P0=0(L0R26[15:14] = 0x10) & RG_RX2TX_EN_P0=1(L2R20[10] =1),*/
+	l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+	l0r26_temp = l0r26_temp & (~0xc000);
+	tc_phy_write_l_reg(port_num, 0, 26, 0x9203); /* Kant */
+	l2r20_temp = tc_phy_read_l_reg(port_num, 2, 20);
+	l2r20_temp = l2r20_temp | 0x400;
+	tc_phy_write_l_reg(port_num, 2, 20, l2r20_temp);
+
+	l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+	tc_phy_write_l_reg(port_num, 4, 26, l4r26_temp & (~0x3f00));
+	tc_phy_write_l_reg(port_num, 4, 26,
+			   (l4r26_temp & ~0x3f00) | (cal_temp << tx_offset_reg_shift));
+
+	all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+	if (all_ana_cal_status == 0) {
+		all_ana_cal_status = ANACAL_ERROR;
+		pr_info(" FE Tx offset mdix AnaCal ERROR! (init)  \r\n");
+	}
+
+	tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+
+	ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+	if (ad_cal_comp_out_init == 1)
+		calibration_polarity = -1;
+	else
+		calibration_polarity = 1;
+
+	cnt = 0;
+	tx_offset_temp += calibration_polarity;
+	while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+		cnt++;
+		cal_temp = tx_offset_temp;
+		l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+		tc_phy_write_l_reg(port_num, 4, 26,
+				   (l4r26_temp & ~0x3f00) | (cal_temp << tx_offset_reg_shift));
+
+		tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+		tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+		all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info(" FE Tx offset mdix AnaCal ERROR! (%d)  \r\n", cnt);
+		} else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+			   ad_cal_comp_out_init) {
+			all_ana_cal_status = ANACAL_FINISH;
+			tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+			tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+			ad_cal_comp_out_init =
+			    tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+		} else {
+			l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+
+			if ((tx_offset_temp == 0x3f) || (tx_offset_temp == 0x00)) {
+				all_ana_cal_status = ANACAL_SATURATION;
+				pr_info("tx offset ANACAL_SATURATION\n");
+			} else {
+				tx_offset_temp += calibration_polarity;
+			}
+		}
+	}
+
+	if ((all_ana_cal_status == ANACAL_ERROR) ||
+	    (all_ana_cal_status == ANACAL_SATURATION)) {
+		tx_offset_temp = TX_AMP_OFFSET_0MV;
+		l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+		tc_phy_write_l_reg(port_num, 4, 26,
+				   (l4r26_temp & (~0x3f00)) | (cal_temp << tx_offset_reg_shift));
+		pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+	} else {
+		fe_cal_tx_offset_flag_mdix = 1;
+	}
+	tx_offset_final_mdix = (tc_phy_read_l_reg(port_num, 4, 26) & 0x3f00) >> 8;
+	pr_info("[%d] - tx_offset_final_mdix = 0x%x\n", port_num, tx_offset_final_mdix);
+
+	clear_ckinv_ana_txvos();
+	tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+	tc_phy_write_g_reg(port_num, 1, 26, 0);
+}
+
+void set_r50_leopard(u8 port_num, u32 r50_cal_result)
+{
+	int rg_zcal_ctrl_tx, rg_zcal_ctrl_rx;
+	u16 l4r22_temp;
+
+	rg_zcal_ctrl_rx = 0;
+	rg_zcal_ctrl_tx = 0;
+	pr_info("r50_cal_result  = 0x%x\n", r50_cal_result);
+	if (port_num == 0) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)];
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)];
+	}
+	if (port_num == 1) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+	}
+	if (port_num == 2) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 6;
+	}
+	if (port_num == 3) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 5;
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 6;
+	}
+	if (port_num == 4) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+	}
+	if (rg_zcal_ctrl_tx > 0x7f)
+		rg_zcal_ctrl_tx = 0x7f;
+	if (rg_zcal_ctrl_rx > 0x7f)
+		rg_zcal_ctrl_rx = 0x7f;
+/*R50OHM_RSEL_TX= LP4R22[14:8]*/
+	tc_phy_write_l_reg(port_num, 4, 22, ((rg_zcal_ctrl_tx << 8)));
+	l4r22_temp = tc_phy_read_l_reg(port_num, 4, 22);
+/*R50OHM_RSEL_RX= LP4R22[6:0]*/
+	tc_phy_write_l_reg(port_num, 4, 22,
+			   (l4r22_temp | (rg_zcal_ctrl_rx << 0)));
+	fe_cal_r50_flag = 1;
+	pr_info("[%d] - r50 final result l4r22[%d] = %x\n", port_num,
+		port_num, tc_phy_read_l_reg(port_num, 4, 22));
+}
+
+void set_r50_mt7622(u8 port_num, u32 r50_cal_result)
+{
+	int rg_zcal_ctrl_tx, rg_zcal_ctrl_rx;
+	u16 l4r22_temp;
+
+	rg_zcal_ctrl_rx = 0;
+	rg_zcal_ctrl_tx = 0;
+	pr_info("r50_cal_result  = 0x%x\n", r50_cal_result);
+
+	if (port_num == 0) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 5)];
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 5)];
+	}
+	if (port_num == 1) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 3)];
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 3)];
+	}
+	if (port_num == 2) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 4)];
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 5)];
+	}
+	if (port_num == 3) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 4)];
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 3)];
+	}
+	if (port_num == 4) {
+		rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 4)];
+		rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 5)];
+	}
+/*R50OHM_RSEL_TX= LP4R22[14:8]*/
+	tc_phy_write_l_reg(port_num, 4, 22, ((rg_zcal_ctrl_tx << 8)));
+	l4r22_temp = tc_phy_read_l_reg(port_num, 4, 22);
+/*R50OHM_RSEL_RX= LP4R22[6:0]*/
+	tc_phy_write_l_reg(port_num, 4, 22,
+			   (l4r22_temp | (rg_zcal_ctrl_rx << 0)));
+	fe_cal_r50_flag = 1;
+	pr_info("[%d] - r50 final result l4r22[%d] = %x\n", port_num,
+		port_num, tc_phy_read_l_reg(port_num, 4, 22));
+}
+
+void fe_ge_r50_common(u8 port_num)
+{
+	u16 l3r25_temp, g7r24_tmp, l4r23_temp;
+	u8 phyaddr;
+
+	phyaddr = port_num;
+	tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+	/*g2r25[7:5]:0x110, BG voltage output*/
+	tc_phy_write_g_reg(FE_CAL_COMMON, 2, 25, 0xf0c0);
+
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x0000);
+	/*g7r24[13]:0x01, RG_ANA_CALEN_P0=1*/
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x2000));
+	/*g7r24[14]:0x01, RG_CAL_CKINV_P0=1*/
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x4000));
+
+	/*g7r24[12]:0x01, DA_TXVOS_CALEN_P0=0*/
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x1000)));
+
+	/*DA_R50OHM_CAL_EN l4r23[0] = 0*/
+	l4r23_temp = tc_phy_read_l_reg(port_num, 4, 23);
+	l4r23_temp = l4r23_temp & ~(0x01);
+	tc_phy_write_l_reg(port_num, 4, 23, l4r23_temp);
+
+	/*RG_REXT_CALEN l2r25[13] = 0*/
+	l3r25_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 25);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (l3r25_temp & (~0x2000)));
+}
+
+void fe_cal_r50(u8 port_num, u32 delay)
+{
+	int rg_zcal_ctrl, all_ana_cal_status, rg_zcal_ctrl_tx, rg_zcal_ctrl_rx;
+	int ad_cal_comp_out_init;
+	u16 l3r25_temp, l0r4, g7r24_tmp, l4r23_temp;
+	int calibration_polarity;
+	u8 cnt = 0, phyaddr;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	phyaddr = port_num + ephy_addr_base;
+	tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+	/*g2r25[7:5]:0x110, BG voltage output*/
+	tc_phy_write_g_reg(FE_CAL_COMMON, 2, 25, 0xf0c0);
+
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x0000);
+	/*g7r24[13]:0x01, RG_ANA_CALEN_P0=1*/
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x2000));
+	/*g7r24[14]:0x01, RG_CAL_CKINV_P0=1*/
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x4000));
+
+	/*g7r24[12]:0x01, DA_TXVOS_CALEN_P0=0*/
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x1000)));
+
+	/* pr_info("g7r24 = %x\n", g7r24_tmp); */
+
+	/*DA_R50OHM_CAL_EN l4r23[0] = 1*/
+	l4r23_temp = tc_phy_read_l_reg(port_num, 4, 23);
+	tc_phy_write_l_reg(port_num, 4, 23, (l4r23_temp | (0x01)));
+
+	/*RG_REXT_CALEN l2r25[13] = 0*/
+	l3r25_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 25);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (l3r25_temp & (~0x2000)));
+
+	/*decide which port calibration RG_ZCALEN by port_num*/
+	l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+	tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x1000));
+
+	rg_zcal_ctrl = 0x20;	/* start with 0 dB */
+	g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+
+	/*wait AD_CAL_COMP_OUT = 1*/
+	all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+	if (all_ana_cal_status == 0) {
+		all_ana_cal_status = ANACAL_ERROR;
+		pr_info(" FE R50 AnaCal ERROR! (init)   \r\n");
+	}
+
+	ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+	if (ad_cal_comp_out_init == 1)
+		calibration_polarity = -1;
+	else
+		calibration_polarity = 1;
+
+	cnt = 0;
+	while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+		cnt++;
+
+		rg_zcal_ctrl += calibration_polarity;
+		g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+		tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+		all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info(" FE R50 AnaCal ERROR! (%d)  \r\n", cnt);
+		} else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+			ad_cal_comp_out_init) {
+			all_ana_cal_status = ANACAL_FINISH;
+		} else {
+			if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
+				all_ana_cal_status = ANACAL_SATURATION;
+				pr_info(" FE R50 AnaCal Saturation! (%d)  \r\n",
+					cnt);
+			} else {
+				l0r4 = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+				l0r4 = l0r4 & 0x1;
+			}
+		}
+	}
+	if (port_num == 0)
+		r50_p0_cal_result = rg_zcal_ctrl;
+
+	if ((all_ana_cal_status == ANACAL_ERROR) ||
+	    (all_ana_cal_status == ANACAL_SATURATION)) {
+		rg_zcal_ctrl = 0x20;	/* 0 dB */
+		rg_zcal_ctrl_tx = 0x7f;
+		rg_zcal_ctrl_rx = 0x7f;
+		pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+	} else {
+		fe_cal_r50_flag = 1;
+	}
+	if (ei_local->chip_name == MT7622_FE)
+		set_r50_mt7622(port_num, rg_zcal_ctrl);
+	else if (ei_local->chip_name == LEOPARD_FE)
+		set_r50_leopard(port_num, rg_zcal_ctrl);
+
+	clear_ckinv_ana_txvos();
+	tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+}
+
+void fe_cal_vbg(u8 port_num, u32 delay)
+{
+	int rg_zcal_ctrl, all_ana_cal_status;
+	int ad_cal_comp_out_init, port_no;
+	u16 l3r25_temp, l0r4, g7r24_tmp, l3r26_temp;
+	int calibration_polarity;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	u16 g2r22_temp, rg_bg_rasel;
+	u8 cnt = 0, phyaddr;
+
+	rg_bg_rasel = 0;
+	ephy_addr_base = 0;
+	phyaddr = port_num + ephy_addr_base;
+
+	tc_phy_write_g_reg(FE_CAL_COMMON, 2, 25, 0x30c0);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 0, 25, 0x0030);
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x2000));
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x4000));
+
+	g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x1000)));
+
+	l3r25_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 25);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (l3r25_temp | 0x2000));
+
+	for (port_no = port_num; port_no < 5; port_no++) {
+		l3r25_temp = tc_phy_read_l_reg(port_no, 3, 25);
+		tc_phy_write_l_reg(port_no, 3, 25, (l3r25_temp & (~0x1000)));
+	}
+	rg_zcal_ctrl = 0x0;	/* start with 0 dB */
+
+	g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+
+	all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+	if (all_ana_cal_status == 0) {
+		all_ana_cal_status = ANACAL_ERROR;
+		pr_info(" fe_cal_vbg ERROR! (init)   \r\n");
+	}
+	ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+	if (ad_cal_comp_out_init == 1)
+		calibration_polarity = -1;
+	else
+		calibration_polarity = 1;
+
+	cnt = 0;
+	while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+		cnt++;
+		rg_zcal_ctrl += calibration_polarity;
+		g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+		tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+		all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info("VBG ERROR(%d)status=%d\n", cnt, all_ana_cal_status);
+		} else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+			ad_cal_comp_out_init) {
+			all_ana_cal_status = ANACAL_FINISH;
+		} else {
+			if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
+				all_ana_cal_status = ANACAL_SATURATION;
+				pr_info(" VBG0 AnaCal Saturation! (%d)  \r\n",
+					cnt);
+			} else {
+				l0r4 = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+				l0r4 = l0r4 & 0x1;
+			}
+		}
+	}
+	if ((all_ana_cal_status == ANACAL_ERROR) ||
+	    (all_ana_cal_status == ANACAL_SATURATION)) {
+		rg_zcal_ctrl = 0x20;	/* 0 dB */
+	} else {
+		fe_cal_vbg_flag = 1;
+	}
+
+	rg_zcal_ctrl = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (0xfc0)) >> 6;
+	iext_cal_result = rg_zcal_ctrl;
+	pr_info("iext_cal_result = 0x%x\n", iext_cal_result);
+	if (ei_local->chip_name == LEOPARD_FE)
+		rg_bg_rasel =  ZCAL_TO_REXT_TBL[rg_zcal_ctrl];
+
+	l3r26_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 26);
+	l3r26_temp = l3r26_temp & (~0xfc0);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 26, l3r26_temp | ((rg_zcal_ctrl & 0x3f) << 6));
+
+	g2r22_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 2, 22);
+	g2r22_temp = g2r22_temp & (~0xe00);/*[11:9]*/
+
+	if (ei_local->chip_name == LEOPARD_FE) {
+		rg_bg_rasel = rg_bg_rasel & 0x7;
+		tc_phy_write_g_reg(FE_CAL_COMMON, 2, 22,
+				   g2r22_temp | (rg_bg_rasel << 9));
+	} else if (ei_local->chip_name == MT7622_FE) {
+		rg_zcal_ctrl = rg_zcal_ctrl & 0x38;
+		tc_phy_write_g_reg(FE_CAL_COMMON, 2, 22,
+				   g2r22_temp | (((rg_zcal_ctrl & 0x38) >> 3) << 9));
+	}
+	clear_ckinv_ana_txvos();
+
+	tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+}
+
+#define CALDLY 40
+
+void do_fe_phy_all_analog_cal(u8 port_num)
+{
+	u16 l0r26_temp, l0r30_temp, l3r25_tmp;
+	u8 cnt = 0, phyaddr, i, iext_port;
+	u32 iext_s, iext_e, r50_s, r50_e, txo_s, txo_e, txa_s, txa_e;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	iext_port = 0;
+	ephy_addr_base = 0;
+	phyaddr = port_num + ephy_addr_base;
+	l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+	tc_phy_write_l_reg(port_num, 0, 26, 0x5600);
+	tc_phy_write_l_reg(port_num, 4, 21, 0x0000);
+	tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+
+	l0r30_temp = tc_phy_read_l_reg(port_num, 0, 30);
+
+/*eye pic.*/
+	tc_phy_write_g_reg(port_num, 5, 20, 0x0170);
+	tc_phy_write_g_reg(port_num, 5, 23, 0x0220);
+	tc_phy_write_g_reg(port_num, 5, 24, 0x0206);
+	tc_phy_write_g_reg(port_num, 5, 26, 0x0370);
+	tc_phy_write_g_reg(port_num, 5, 27, 0x02f2);
+	tc_phy_write_g_reg(port_num, 5, 29, 0x001b);
+	tc_phy_write_g_reg(port_num, 5, 30, 0x0002);
+/*Yiron default setting*/
+	for (i = port_num; i < 5; i++) {
+		tc_phy_write_g_reg(i, 3, 23, 0x0);
+		tc_phy_write_l_reg(i, 3, 23, 0x2004);
+		tc_phy_write_l_reg(i, 2, 21, 0x8551);
+		tc_phy_write_l_reg(i, 4, 17, 0x2000);
+		tc_phy_write_g_reg(i, 7, 20, 0x7c62);
+		tc_phy_write_l_reg(i, 4, 20, 0x4444);
+		tc_phy_write_l_reg(i, 2, 22, 0x1011);
+		tc_phy_write_l_reg(i, 4, 28, 0x1011);
+		tc_phy_write_l_reg(i, 4, 19, 0x2222);
+		tc_phy_write_l_reg(i, 4, 29, 0x2222);
+		tc_phy_write_l_reg(i, 2, 28, 0x3444);
+		tc_phy_write_l_reg(i, 2, 29, 0x04c6);
+		tc_phy_write_l_reg(i, 4, 30, 0x0006);
+		tc_phy_write_l_reg(i, 5, 16, 0x04c6);
+	}
+	if (ei_local->chip_name == LEOPARD_FE) {
+		tc_phy_write_l_reg(port_num, 0, 20, 0x0c0c);
+		tc_phy_write_dev_reg(0, 0x1e, 0x017d, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x017e, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x017f, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x0180, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x0181, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x0182, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x0183, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x0184, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x00db, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x00dc, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x003e, 0x0000);
+		tc_phy_write_dev_reg(0, 0x1e, 0x00dd, 0x0000);
+
+		/*eye pic.*/
+		tc_phy_write_g_reg(1, 5, 19, 0x0100);
+		tc_phy_write_g_reg(1, 5, 20, 0x0161);
+		tc_phy_write_g_reg(1, 5, 21, 0x00f0);
+		tc_phy_write_g_reg(1, 5, 22, 0x0046);
+		tc_phy_write_g_reg(1, 5, 23, 0x0210);
+		tc_phy_write_g_reg(1, 5, 24, 0x0206);
+		tc_phy_write_g_reg(1, 5, 25, 0x0238);
+		tc_phy_write_g_reg(1, 5, 26, 0x0360);
+		tc_phy_write_g_reg(1, 5, 27, 0x02f2);
+		tc_phy_write_g_reg(1, 5, 28, 0x0240);
+		tc_phy_write_g_reg(1, 5, 29, 0x0010);
+		tc_phy_write_g_reg(1, 5, 30, 0x0002);
+	}
+	if (ei_local->chip_name == MT7622_FE)
+		iext_port = 0;
+	else if (ei_local->chip_name == LEOPARD_FE)
+		iext_port = 1;
+
+	if (port_num == iext_port) {
+			/*****VBG & IEXT Calibration*****/
+		cnt = 0;
+		while ((fe_cal_vbg_flag == 0) && (cnt < 0x03)) {
+			iext_s = jiffies;
+			fe_cal_vbg(port_num, 1);	/* allen_20160608 */
+			iext_e = jiffies;
+			if (show_time)
+				pr_info("port[%d] fe_cal_vbg time = %u\n",
+					port_num, (iext_e - iext_s) * 4);
+			cnt++;
+			if (fe_cal_vbg_flag == 0)
+				pr_info(" FE-%d VBG wait! (%d)  \r\n", phyaddr, cnt);
+		}
+		fe_cal_vbg_flag = 0;
+		/**** VBG & IEXT Calibration end ****/
+	}
+
+	/* *** R50 Cal start *************************************** */
+	cnt = 0;
+	while ((fe_cal_r50_flag == 0) && (cnt < 0x03)) {
+		r50_s = jiffies;
+
+		fe_cal_r50(port_num, 1);
+
+		r50_e = jiffies;
+		if (show_time)
+			pr_info("port[%d] fe_r50 time = %u\n",
+				port_num, (r50_e - r50_s) * 4);
+		cnt++;
+		if (fe_cal_r50_flag == 0)
+			pr_info(" FE-%d R50 wait! (%d)  \r\n", phyaddr, cnt);
+	}
+	fe_cal_r50_flag = 0;
+	cnt = 0;
+	/* *** R50 Cal end *** */
+	/* *** Tx offset Cal start ********************************* */
+
+	cnt = 0;
+	while ((fe_cal_tx_offset_flag == 0) && (cnt < 0x03)) {
+		txo_s = jiffies;
+		fe_cal_tx_offset(port_num, CALDLY);
+		txo_e = jiffies;
+		if (show_time)
+			pr_info("port[%d] fe_cal_tx_offset time = %u\n",
+				port_num, (txo_e - txo_s) * 4);
+		cnt++;
+	}
+	fe_cal_tx_offset_flag = 0;
+	cnt = 0;
+
+	while ((fe_cal_tx_offset_flag_mdix == 0) && (cnt < 0x03)) {
+		txo_s = jiffies;
+		fe_cal_tx_offset_mdix(port_num, CALDLY);
+		txo_e = jiffies;
+		if (show_time)
+			pr_info("port[%d] fe_cal_tx_offset_mdix time = %u\n",
+				port_num, (txo_e - txo_s) * 4);
+		cnt++;
+	}
+	fe_cal_tx_offset_flag_mdix = 0;
+	cnt = 0;
+	/* *** Tx offset Cal end *** */
+
+	/* *** Tx Amp Cal start ************************************** */
+	cnt = 0;
+	while ((fe_cal_flag == 0) && (cnt < 0x3)) {
+		txa_s = jiffies;
+		fe_cal_tx_amp(port_num, CALDLY);	/* allen_20160608 */
+		txa_e = jiffies;
+		if (show_time)
+			pr_info("port[%d] fe_cal_tx_amp time = %u\n",
+				port_num, (txa_e - txa_s) * 4);
+		cnt++;
+	}
+	fe_cal_flag = 0;
+	cnt = 0;
+	while ((fe_cal_flag_mdix == 0) && (cnt < 0x3)) {
+		txa_s = jiffies;
+		fe_cal_tx_amp_mdix(port_num, CALDLY);
+		txa_e = jiffies;
+		if (show_time)
+			pr_info("port[%d] fe_cal_tx_amp_mdix time = %u\n",
+				port_num, (txa_e - txa_s) * 4);
+		cnt++;
+	}
+	fe_cal_flag_mdix = 0;
+	cnt = 0;
+
+	l3r25_tmp = tc_phy_read_l_reg(port_num, 3, 25);
+	l3r25_tmp = l3r25_tmp & ~(0x1000);/*[12] RG_ZCALEN = 0*/
+	tc_phy_write_l_reg(port_num, 3, 25, l3r25_tmp);
+	tc_phy_write_g_reg(port_num, 1, 26, 0x0000);
+	tc_phy_write_l_reg(port_num, 0, 26, l0r26_temp);
+	tc_phy_write_l_reg(port_num, 0, 30, l0r30_temp);
+	tc_phy_write_g_reg(port_num, 1, 26, 0x0000);
+	tc_phy_write_l_reg(port_num, 0, 0, 0x3100);
+	/*enable flow control*/
+	tc_phy_write_g_reg(port_num, 0, 4, 0x5e1);
+}
+
+u8 all_ge_ana_cal_wait(unsigned int delay, u8 port_num) /* for EN7512 */
+{
+	u8 all_ana_cal_status;
+	u16 cnt, g7r24_temp;
+
+	g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+	g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp | 0x10);
+
+	cnt = 1000;
+	do {
+		udelay(delay);
+		cnt--;
+		all_ana_cal_status =
+		    ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) >> 1) & 0x1);
+
+	} while ((all_ana_cal_status == 0) && (cnt != 0));
+	g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+
+	return all_ana_cal_status;
+}
+
+void ge_cal_rext(u8 phyaddr, unsigned int delay)
+{
+	u8	rg_zcal_ctrl, all_ana_cal_status;
+	u16	ad_cal_comp_out_init;
+	u16	dev1e_e0_ana_cal_r5;
+	int	calibration_polarity;
+	u8	cnt = 0;
+	u16	dev1e_17a_tmp, dev1e_e0_tmp;
+
+	/* *** Iext/Rext Cal start ************ */
+	all_ana_cal_status = ANACAL_INIT;
+	/* analog calibration enable, Rext calibration enable */
+	/* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+	/* 1e_dc[0]:rg_txvos_calen */
+	/* 1e_e1[4]:rg_cal_refsel(0:1.2V) */
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x1110);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e1, 0x0000);
+
+	rg_zcal_ctrl = 0x20;/* start with 0 dB */
+	dev1e_e0_ana_cal_r5 = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00e0);
+	/* 1e_e0[5:0]:rg_zcal_ctrl */
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, (rg_zcal_ctrl));
+	all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);/* delay 20 usec */
+	if (all_ana_cal_status == 0) {
+		all_ana_cal_status = ANACAL_ERROR;
+		pr_info(" GE Rext AnaCal ERROR!   \r\n");
+	}
+	/* 1e_17a[8]:ad_cal_comp_out */
+	ad_cal_comp_out_init = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017a) >> 8) & 0x1;
+	if (ad_cal_comp_out_init == 1)
+		calibration_polarity = -1;
+	else /* ad_cal_comp_out_init == 0 */
+		calibration_polarity = 1;
+
+	cnt = 0;
+	while (all_ana_cal_status < ANACAL_ERROR) {
+		cnt++;
+		rg_zcal_ctrl += calibration_polarity;
+		tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, (rg_zcal_ctrl));
+		all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr); /* delay 20 usec */
+		dev1e_17a_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017a);
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info("  GE Rext AnaCal ERROR!   \r\n");
+		} else if (((dev1e_17a_tmp >> 8) & 0x1) != ad_cal_comp_out_init) {
+			all_ana_cal_status = ANACAL_FINISH;
+			pr_info("  GE Rext AnaCal Done! (%d)(0x%x)  \r\n", cnt, rg_zcal_ctrl);
+		} else {
+			dev1e_17a_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017a);
+			dev1e_e0_tmp =	tc_phy_read_dev_reg(phyaddr, 0x1e, 0xe0);
+			if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
+				all_ana_cal_status = ANACAL_SATURATION;  /* need to FT(IC fail?) */
+				pr_info(" GE Rext AnaCal Saturation!  \r\n");
+				rg_zcal_ctrl = 0x20;  /* 0 dB */
+			} else {
+				pr_info(" GE Rxet cal (%d)(%d)(%d)(0x%x)  \r\n",
+					cnt, ad_cal_comp_out_init,
+				((dev1e_17a_tmp >> 8) & 0x1), dev1e_e0_tmp);
+			}
+		}
+	}
+
+	if (all_ana_cal_status == ANACAL_ERROR) {
+		rg_zcal_ctrl = 0x20;  /* 0 dB */
+		tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
+	} else {
+		tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
+		tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, ((rg_zcal_ctrl << 8) | rg_zcal_ctrl));
+		/* ****  1f_115[2:0] = rg_zcal_ctrl[5:3]  // Mog review */
+		tc_phy_write_dev_reg(phyaddr, 0x1f, 0x0115, ((rg_zcal_ctrl & 0x3f) >> 3));
+		pr_info("  GE Rext AnaCal Done! (%d)(0x%x)  \r\n", cnt, rg_zcal_ctrl);
+		ge_cal_flag = 1;
+	}
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0000);
+	/* *** Iext/Rext Cal end *** */
+}
+
+void ge_cal_r50(u8 phyaddr, unsigned int delay)
+{
+	u8	rg_zcal_ctrl, all_ana_cal_status, i;
+	u16	ad_cal_comp_out_init;
+	u16	dev1e_e0_ana_cal_r5;
+	int	calibration_polarity;
+	u16	cal_pair, val_tmp, g7r24_tmp;
+	u16	dev1e_174_tmp, dev1e_175_tmp, l3r25_temp;
+	u8	rg_zcal_ctrl_filter, cnt = 0;
+
+	/* *** R50 Cal start***************** */
+	fe_ge_r50_common(phyaddr);
+	/* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+	/* 1e_dc[0]:rg_txvos_calen */
+	/*disable RG_ZCALEN*/
+	/*decide which port calibration RG_ZCALEN by port_num*/
+	for (i = 1; i <= 4; i++) {
+		l3r25_temp = tc_phy_read_l_reg(i, 3, 25);
+		l3r25_temp = l3r25_temp & ~(0x1000);
+		tc_phy_write_l_reg(i, 3, 25, l3r25_temp);
+	}
+	for (cal_pair = ANACAL_PAIR_A; cal_pair <= ANACAL_PAIR_D; cal_pair++) {
+		rg_zcal_ctrl = 0x20;/* start with 0 dB */
+		dev1e_e0_ana_cal_r5 = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00e0) & (~0x003f));
+		/* 1e_e0[5:0]:rg_zcal_ctrl */
+		if (cal_pair == ANACAL_PAIR_A) {
+	/* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x1000);
+		} else if (cal_pair == ANACAL_PAIR_B) {
+	/* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+	/* 1e_dc[12]:rg_zcalen_b */
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0100);
+		} else if (cal_pair == ANACAL_PAIR_C) {
+	/* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+	/* 1e_dc[8]:rg_zcalen_c */
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0010);
+
+		} else {/* if(cal_pair == ANACAL_PAIR_D) */
+
+	/* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+	/* 1e_dc[4]:rg_zcalen_d */
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0001);
+		}
+		rg_zcal_ctrl = 0x20;	/* start with 0 dB */
+		g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+		tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+
+		/*wait AD_CAL_COMP_OUT = 1*/
+		all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info(" GE R50 AnaCal ERROR! (init)   \r\n");
+		}
+		ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+		if (ad_cal_comp_out_init == 1)
+			calibration_polarity = -1;
+		else
+			calibration_polarity = 1;
+
+		cnt = 0;
+		while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+			cnt++;
+
+			rg_zcal_ctrl += calibration_polarity;
+			g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+			tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24,
+					   g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+			all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+
+			if (all_ana_cal_status == 0) {
+				all_ana_cal_status = ANACAL_ERROR;
+				pr_info(" GE R50 AnaCal ERROR! (%d)  \r\n", cnt);
+			} else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+				ad_cal_comp_out_init) {
+				all_ana_cal_status = ANACAL_FINISH;
+			} else {
+				if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
+					all_ana_cal_status = ANACAL_SATURATION;
+					pr_info(" GE R50 Cal Sat! rg_zcal_ctrl = 0x%x(%d)\n",
+						cnt, rg_zcal_ctrl);
+				}
+			}
+		}
+
+		if ((all_ana_cal_status == ANACAL_ERROR) ||
+		    (all_ana_cal_status == ANACAL_SATURATION)) {
+			rg_zcal_ctrl = 0x20;  /* 0 dB */
+			rg_zcal_ctrl_filter = 8; /*default value*/
+		} else {
+			/*DA_TX_R50*/
+			rg_zcal_ctrl_filter = rg_zcal_ctrl;
+			rg_zcal_ctrl = ZCAL_TO_R50ohm_GE_TBL[rg_zcal_ctrl];
+			/*DA_TX_FILTER*/
+			rg_zcal_ctrl_filter = ZCAL_TO_FILTER_TBL[rg_zcal_ctrl_filter];
+			rg_zcal_ctrl_filter = rg_zcal_ctrl_filter & 0xf;
+			rg_zcal_ctrl_filter = rg_zcal_ctrl_filter << 8 | rg_zcal_ctrl_filter;
+		}
+		if (all_ana_cal_status == ANACAL_FINISH) {
+			if (cal_pair == ANACAL_PAIR_A) {
+				dev1e_174_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0174);
+				dev1e_174_tmp = dev1e_174_tmp & ~(0xff00);
+				if (rg_zcal_ctrl > 4) {
+					val_tmp = (((rg_zcal_ctrl - 4) << 8) & 0xff00) |
+						dev1e_174_tmp;
+				} else {
+					val_tmp = (((0) << 8) & 0xff00) | dev1e_174_tmp;
+				}
+
+				tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0174, val_tmp);
+				tc_phy_write_dev_reg(phyaddr, 0x1e, 0x03a0, rg_zcal_ctrl_filter);
+
+				pr_info("R50_PAIR_A : 1e_174 = 0x%x\n",
+					tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0174));
+				pr_info("R50_PAIR_A : 1e_3a0 = 0x%x\n",
+					tc_phy_read_dev_reg(phyaddr, 0x1e, 0x03a0));
+
+			} else if (cal_pair == ANACAL_PAIR_B) {
+				dev1e_174_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0174);
+				dev1e_174_tmp = dev1e_174_tmp & (~0x007f);
+				if (rg_zcal_ctrl > 2) {
+					val_tmp = (((rg_zcal_ctrl - 2) << 0) & 0xff) |
+						dev1e_174_tmp;
+				} else {
+					val_tmp = (((0) << 0) & 0xff) |
+						dev1e_174_tmp;
+				}
+				tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0174, val_tmp);
+				tc_phy_write_dev_reg(phyaddr, 0x1e, 0x03a1, rg_zcal_ctrl_filter);
+				pr_info("R50_PAIR_B : 1e_174 = 0x%x\n",
+					tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0174));
+				pr_info("R50_PAIR_B : 1e_3a1 = 0x%x\n",
+					tc_phy_read_dev_reg(phyaddr, 0x1e, 0x03a1));
+			} else if (cal_pair == ANACAL_PAIR_C) {
+				dev1e_175_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0175);
+				dev1e_175_tmp =  dev1e_175_tmp & (~0x7f00);
+				if (rg_zcal_ctrl > 4) {
+					val_tmp = dev1e_175_tmp |
+						(((rg_zcal_ctrl - 4) << 8) & 0xff00);
+				} else {
+					val_tmp = dev1e_175_tmp | (((0) << 8) & 0xff00);
+				}
+				tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0175, val_tmp);
+				tc_phy_write_dev_reg(phyaddr, 0x1e, 0x03a2, rg_zcal_ctrl_filter);
+				pr_info("R50_PAIR_C : 1e_175 = 0x%x\n",
+					tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0175));
+				pr_info("R50_PAIR_C : 1e_3a2 = 0x%x\n",
+					tc_phy_read_dev_reg(phyaddr, 0x1e, 0x03a2));
+
+			} else {/* if(cal_pair == ANACAL_PAIR_D) */
+				dev1e_175_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0175);
+				dev1e_175_tmp = dev1e_175_tmp & (~0x007f);
+				if (rg_zcal_ctrl > 6) {
+					val_tmp = dev1e_175_tmp |
+						(((rg_zcal_ctrl - 6)  << 0) & 0xff);
+				} else {
+					val_tmp = dev1e_175_tmp |
+						(((0)  << 0) & 0xff);
+				}
+
+				tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0175, val_tmp);
+				tc_phy_write_dev_reg(phyaddr, 0x1e, 0x03a3, rg_zcal_ctrl_filter);
+				pr_info("R50_PAIR_D : 1e_175 = 0x%x\n",
+					tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0175));
+				pr_info("R50_PAIR_D : 1e_3a3 = 0x%x\n",
+					tc_phy_read_dev_reg(phyaddr, 0x1e, 0x03a3));
+			}
+		}
+	}
+	clear_ckinv_ana_txvos();
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0000);
+	ge_cal_r50_flag = 1;
+	/* *** R50 Cal end *** */
+}
+
+void ge_cal_tx_amp(u8 phyaddr, unsigned int delay)
+{
+	u8	all_ana_cal_status;
+	u16	ad_cal_comp_out_init;
+	int	calibration_polarity;
+	u16	cal_pair;
+	u8	tx_amp_reg_shift;
+	u16	reg_temp, val_tmp, l3r25_temp, val_tmp_100;
+	u8	tx_amp_temp, tx_amp_reg, cnt = 0, tx_amp_reg_100;
+
+	u16	tx_amp_temp_L, tx_amp_temp_M;
+	u16	tx_amp_L_100, tx_amp_M_100;
+	/* *** Tx Amp Cal start ***/
+	tc_phy_write_l_reg(0, 0, 0, 0x0140);
+
+	tc_phy_write_dev_reg(0, 0x1e, 0x3e, 0xf808);
+	tc_phy_write_dev_reg(0, 0x1e, 0x145, 0x5010);
+	tc_phy_write_dev_reg(0, 0x1e, 0x17d, 0x80f0);
+	tc_phy_write_dev_reg(0, 0x1e, 0x17e, 0x80f0);
+	tc_phy_write_dev_reg(0, 0x1e, 0x17f, 0x80f0);
+	tc_phy_write_dev_reg(0, 0x1e, 0x180, 0x80f0);
+	tc_phy_write_dev_reg(0, 0x1e, 0x181, 0x80f0);
+	tc_phy_write_dev_reg(0, 0x1e, 0x182, 0x80f0);
+	tc_phy_write_dev_reg(0, 0x1e, 0x183, 0x80f0);
+	tc_phy_write_dev_reg(0, 0x1e, 0x184, 0x80f0);
+	tc_phy_write_dev_reg(0, 0x1e, 0x00db, 0x1000);
+	tc_phy_write_dev_reg(0, 0x1e, 0x00dc, 0x0001);
+	tc_phy_write_dev_reg(0, 0x1f, 0x300, 0x4);
+	tc_phy_write_dev_reg(0, 0x1f, 0x27a, 0x33);
+	tc_phy_write_g_reg(1, 2, 25, 0xf020);
+	tc_phy_write_dev_reg(0, 0x1f, 0x300, 0x14);
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x7000);
+	l3r25_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 25);
+	l3r25_temp = l3r25_temp | 0x200;
+	tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, l3r25_temp);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x11, 0xff00);
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0xc9, 0xffff);
+	tc_phy_write_g_reg(1, 2, 25, 0xb020);
+
+	for (cal_pair = ANACAL_PAIR_A; cal_pair <= ANACAL_PAIR_D; cal_pair++) {
+		tx_amp_temp = 0x20;	/* start with 0 dB */
+		tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x7000);
+		if (cal_pair == ANACAL_PAIR_A) {
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x1000);
+			reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x012) & (~0xfc00));
+			tx_amp_reg_shift = 10;
+			tx_amp_reg = 0x12;
+			tx_amp_reg_100 = 0x16;
+		} else if (cal_pair == ANACAL_PAIR_B) {
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0100);
+			reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017) & (~0x3f00));
+			tx_amp_reg_shift = 8;
+			tx_amp_reg = 0x17;
+			tx_amp_reg_100 = 0x18;
+		} else if (cal_pair == ANACAL_PAIR_C) {
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0010);
+			reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x019) & (~0x3f00));
+			tx_amp_reg_shift = 8;
+			tx_amp_reg = 0x19;
+			tx_amp_reg_100 = 0x20;
+		} else {/* if(cal_pair == ANACAL_PAIR_D) */
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0001);
+			reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x021) & (~0x3f00));
+			tx_amp_reg_shift = 8;
+			tx_amp_reg = 0x21;
+			tx_amp_reg_100 = 0x22;
+		}
+		/* 1e_12, 1e_17, 1e_19, 1e_21 */
+		val_tmp = tx_amp_temp | (tx_amp_temp << tx_amp_reg_shift);
+		tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg, val_tmp);
+		tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg_100, val_tmp);
+		all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info(" GE Tx amp AnaCal ERROR!   \r\n");
+		}
+/* 1e_17a[8]:ad_cal_comp_out */
+		ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+		if (ad_cal_comp_out_init == 1)
+			calibration_polarity = -1;
+		else
+			calibration_polarity = 1;
+
+		cnt = 0;
+		while (all_ana_cal_status < ANACAL_ERROR) {
+			cnt++;
+			tx_amp_temp += calibration_polarity;
+
+			val_tmp = (tx_amp_temp | (tx_amp_temp << tx_amp_reg_shift));
+			tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg, val_tmp);
+			tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg_100, val_tmp);
+			all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+			if (all_ana_cal_status == 0) {
+				all_ana_cal_status = ANACAL_ERROR;
+				pr_info(" GE Tx amp AnaCal ERROR!\n");
+			} else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+				    ad_cal_comp_out_init) {
+				all_ana_cal_status = ANACAL_FINISH;
+			} else {
+				if ((tx_amp_temp == 0x3f) || (tx_amp_temp == 0x00)) {
+					all_ana_cal_status = ANACAL_SATURATION;
+					pr_info(" GE Tx amp AnaCal Saturation!  \r\n");
+				}
+			}
+		}
+		if (all_ana_cal_status == ANACAL_ERROR) {
+			pr_info("ANACAL_ERROR\n");
+			tx_amp_temp = 0x20;
+			val_tmp = (reg_temp | (tx_amp_temp << tx_amp_reg_shift));
+			tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg, val_tmp);
+		}
+
+		if (all_ana_cal_status == ANACAL_FINISH) {
+			if (cal_pair == ANACAL_PAIR_A) {
+				tx_amp_temp_M = tx_amp_temp + 9;
+				tx_amp_temp_L = tx_amp_temp + 18;
+			} else if (cal_pair == ANACAL_PAIR_B) {
+				tx_amp_temp_M = tx_amp_temp + 8;
+				tx_amp_temp_L = tx_amp_temp + 22;
+			} else if (cal_pair == ANACAL_PAIR_C) {
+				tx_amp_temp_M = tx_amp_temp + 9;
+				tx_amp_temp_L = tx_amp_temp + 9;
+			} else if (cal_pair == ANACAL_PAIR_D) {
+				tx_amp_temp_M = tx_amp_temp + 9;
+				tx_amp_temp_L = tx_amp_temp + 9;
+			}
+			if (tx_amp_temp_L >= 0x3f)
+				tx_amp_temp_L = 0x3f;
+			if (tx_amp_temp_M >= 0x3f)
+				tx_amp_temp_M = 0x3f;
+			val_tmp = ((tx_amp_temp_L) |
+				((tx_amp_temp_M) << tx_amp_reg_shift));
+			if (cal_pair == ANACAL_PAIR_A) {
+				if (tx_amp_temp < 6)
+					tx_amp_M_100 = 0;
+				else
+					tx_amp_M_100 = tx_amp_temp - 6;
+
+				if ((tx_amp_temp + 9) >= 0x3f)
+					tx_amp_L_100 = 0x3f;
+				else
+					tx_amp_L_100 = tx_amp_temp + 9;
+				val_tmp_100 = ((tx_amp_L_100) |
+					((tx_amp_M_100) << tx_amp_reg_shift));
+			} else if (cal_pair == ANACAL_PAIR_B) {
+				if (tx_amp_temp < 7)
+					tx_amp_M_100 = 0;
+				else
+					tx_amp_M_100 = tx_amp_temp - 7;
+
+				if ((tx_amp_temp + 8) >= 0x3f)
+					tx_amp_L_100 = 0x3f;
+				else
+					tx_amp_L_100 = tx_amp_temp + 8;
+				val_tmp_100 = ((tx_amp_L_100) |
+					((tx_amp_M_100) << tx_amp_reg_shift));
+			} else if (cal_pair == ANACAL_PAIR_C) {
+				if ((tx_amp_temp + 9) >= 0x3f)
+					tx_amp_L_100 = 0x3f;
+				else
+					tx_amp_L_100 = tx_amp_temp + 9;
+				tx_amp_M_100 = tx_amp_L_100;
+				val_tmp_100 = ((tx_amp_L_100) |
+					((tx_amp_M_100) << tx_amp_reg_shift));
+			} else if (cal_pair == ANACAL_PAIR_D) {
+				if ((tx_amp_temp + 9) >= 0x3f)
+					tx_amp_L_100 = 0x3f;
+				else
+					tx_amp_L_100 = tx_amp_temp + 9;
+
+				tx_amp_M_100 = tx_amp_L_100;
+				val_tmp_100 = ((tx_amp_L_100) |
+					((tx_amp_M_100) << tx_amp_reg_shift));
+			}
+
+			tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg, val_tmp);
+			tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg_100, val_tmp_100);
+
+			if (cal_pair == ANACAL_PAIR_A) {
+				pr_info("TX_AMP_PAIR_A : 1e_%x = 0x%x\n",
+					tx_amp_reg,
+					tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg));
+				pr_info("TX_AMP_PAIR_A : 1e_%x = 0x%x\n",
+					tx_amp_reg_100,
+					tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg_100));
+			} else if (cal_pair == ANACAL_PAIR_B) {
+				pr_info("TX_AMP_PAIR_B : 1e_%x = 0x%x\n",
+					tx_amp_reg,
+					tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg));
+				pr_info("TX_AMP_PAIR_B : 1e_%x = 0x%x\n",
+					tx_amp_reg_100,
+					tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg_100));
+			} else if (cal_pair == ANACAL_PAIR_C) {
+				pr_info("TX_AMP_PAIR_C : 1e_%x = 0x%x\n",
+					tx_amp_reg,
+					tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg));
+				pr_info("TX_AMP_PAIR_C : 1e_%x = 0x%x\n",
+					tx_amp_reg_100,
+					tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg_100));
+
+			} else {/* if(cal_pair == ANACAL_PAIR_D) */
+				pr_info("TX_AMP_PAIR_D : 1e_%x = 0x%x\n",
+					tx_amp_reg,
+					tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg));
+				pr_info("TX_AMP_PAIR_D : 1e_%x = 0x%x\n",
+					tx_amp_reg_100,
+					tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg_100));
+			}
+		}
+	}
+
+	ge_cal_flag = 1;
+	pr_info("GE_TX_AMP END\n");
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017d, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017e, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017f, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0180, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0181, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0182, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0183, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0184, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0x2000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0xc9, 0x0fff);
+	tc_phy_write_g_reg(1, 2, 25, 0xb020);
+	tc_phy_write_dev_reg(0, 0x1e, 0x145, 0x1000);
+
+/* disable analog calibration circuit */
+/* disable Tx offset calibration circuit */
+/* disable Tx VLD force mode */
+/* disable Tx offset/amplitude calibration circuit */
+
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x003e, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0000);
+	/* *** Tx Amp Cal end *** */
+}
+
+void ge_cal_tx_offset(u8 phyaddr, unsigned int delay)
+{
+	u8	all_ana_cal_status;
+	u16	ad_cal_comp_out_init;
+	int	calibration_polarity, tx_offset_temp;
+	u16	cal_pair, cal_temp;
+	u8	tx_offset_reg_shift;
+	u16	tx_offset_reg, reg_temp, val_tmp;
+	u8	cnt = 0;
+
+	tc_phy_write_l_reg(0, 0, 0, 0x2100);
+
+	/* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+	/* 1e_dc[0]:rg_txvos_calen */
+	/* 1e_96[15]:bypass_tx_offset_cal, Hw bypass, Fw cal */
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0100);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0001);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0096, 0x8000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x003e, 0xf808);/* 1e_3e */
+	tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x3000);
+
+	for (cal_pair = ANACAL_PAIR_A; cal_pair <= ANACAL_PAIR_D; cal_pair++) {
+		tx_offset_temp = 0x20;
+
+		if (cal_pair == ANACAL_PAIR_A) {
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5010);
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x1000);
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017d, (0x8000 | DAC_IN_0V));
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0181, (0x8000 | DAC_IN_0V));
+			reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0172) & (~0x3f00));
+			tx_offset_reg_shift = 8;/* 1e_172[13:8] */
+			tx_offset_reg = 0x0172;
+
+		} else if (cal_pair == ANACAL_PAIR_B) {
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5018);
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0100);
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017e, (0x8000 | DAC_IN_0V));
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0182, (0x8000 | DAC_IN_0V));
+			reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0172) & (~0x003f));
+			tx_offset_reg_shift = 0;
+			tx_offset_reg = 0x0172;/* 1e_172[5:0] */
+		} else if (cal_pair == ANACAL_PAIR_C) {
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0010);
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017f, (0x8000 | DAC_IN_0V));
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0183, (0x8000 | DAC_IN_0V));
+			reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0173) & (~0x3f00));
+			tx_offset_reg_shift = 8;
+			tx_offset_reg = 0x0173;/* 1e_173[13:8] */
+		} else {/* if(cal_pair == ANACAL_PAIR_D) */
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0001);
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0180, (0x8000 | DAC_IN_0V));
+			tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0184, (0x8000 | DAC_IN_0V));
+			reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0173) & (~0x003f));
+			tx_offset_reg_shift = 0;
+			tx_offset_reg = 0x0173;/* 1e_173[5:0] */
+		}
+		/* 1e_172, 1e_173 */
+		val_tmp =  (reg_temp | (tx_offset_temp << tx_offset_reg_shift));
+		tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, val_tmp);
+
+		all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr); /* delay 20 usec */
+		if (all_ana_cal_status == 0) {
+			all_ana_cal_status = ANACAL_ERROR;
+			pr_info(" GE Tx offset AnaCal ERROR!   \r\n");
+		}
+		ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+		if (ad_cal_comp_out_init == 1)
+			calibration_polarity = -1;
+		else
+			calibration_polarity = 1;
+
+		cnt = 0;
+		tx_offset_temp += calibration_polarity;
+		while (all_ana_cal_status < ANACAL_ERROR) {
+			cnt++;
+			cal_temp = tx_offset_temp;
+			val_tmp = (reg_temp | (cal_temp << tx_offset_reg_shift));
+			tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, val_tmp);
+
+			all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+			if (all_ana_cal_status == 0) {
+				all_ana_cal_status = ANACAL_ERROR;
+				pr_info(" GE Tx offset AnaCal ERROR!   \r\n");
+			} else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+				    ad_cal_comp_out_init) {
+				all_ana_cal_status = ANACAL_FINISH;
+			} else {
+				if ((tx_offset_temp == 0x3f) || (tx_offset_temp == 0x00)) {
+					all_ana_cal_status = ANACAL_SATURATION;
+					pr_info("GE tx offset ANACAL_SATURATION\n");
+					/* tx_amp_temp += calibration_polarity; */
+				} else {
+					tx_offset_temp += calibration_polarity;
+				}
+			}
+		}
+		if (all_ana_cal_status == ANACAL_ERROR) {
+			tx_offset_temp = 0x20;
+			val_tmp = (reg_temp | (tx_offset_temp << tx_offset_reg_shift));
+			tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, val_tmp);
+		}
+
+		if (all_ana_cal_status == ANACAL_FINISH) {
+			if (cal_pair == ANACAL_PAIR_A) {
+				pr_info("TX_OFFSET_PAIR_A : 1e_%x = 0x%x\n",
+					tx_offset_reg,
+				tc_phy_read_dev_reg(phyaddr, 0x1e, tx_offset_reg));
+			} else if (cal_pair == ANACAL_PAIR_B) {
+				pr_info("TX_OFFSET_PAIR_B : 1e_%x = 0x%x\n",
+					tx_offset_reg,
+				tc_phy_read_dev_reg(phyaddr, 0x1e, tx_offset_reg));
+			} else if (cal_pair == ANACAL_PAIR_C) {
+				pr_info("TX_OFFSET_PAIR_C : 1e_%x = 0x%x\n",
+					tx_offset_reg,
+				tc_phy_read_dev_reg(phyaddr, 0x1e, tx_offset_reg));
+
+			} else {/* if(cal_pair == ANACAL_PAIR_D) */
+				pr_info("TX_OFFSET_PAIR_D : 1e_%x = 0x%x\n",
+					tx_offset_reg,
+				tc_phy_read_dev_reg(phyaddr, 0x1e, tx_offset_reg));
+			}
+		}
+	}
+	ge_cal_tx_offset_flag = 1;
+	clear_ckinv_ana_txvos();
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017d, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017e, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017f, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0180, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0181, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0182, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0183, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0184, 0x0000);
+/* disable analog calibration circuit */
+/* disable Tx offset calibration circuit */
+/* disable Tx VLD force mode */
+/* disable Tx offset/amplitude calibration circuit */
+
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x003e, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0000);
+}
+
+void do_ge_phy_all_analog_cal(u8 phyaddr)
+{
+	u16	reg0_temp, dev1e_145_temp, reg_temp;
+	u16	reg_tmp;
+
+	tc_mii_write(phyaddr, 0x1f, 0x0000);/* g0 */
+	reg0_temp = tc_mii_read(phyaddr, 0x0);/* keep the default value */
+/* set [12]AN disable, [8]full duplex, [13/6]1000Mbps */
+	tc_mii_write(phyaddr, 0x0,  0x0140);
+
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x0100, 0xc000);/* BG voltage output */
+	dev1e_145_temp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0145);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0145, 0x1010);/* fix mdi */
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0185, 0x0000);/* disable tx slew control */
+
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x27c, 0x1f1f);
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x27c, 0x3300);
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0);
+
+	reg_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x11);
+	reg_tmp = reg_tmp | (0xf << 12);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x11, reg_tmp);
+
+	/* calibration start ============ */
+	ge_cal_flag = 1; /*GE calibration not calibration*/
+	while (ge_cal_flag == 0)
+		ge_cal_rext(phyaddr, 100);
+
+	/* *** R50 Cal start ***************************** */
+	/*phyaddress = 0*/
+	ge_cal_r50(phyaddr, CALDLY);
+	/* *** R50 Cal end *** */
+
+	/* *** Tx offset Cal start *********************** */
+	ge_cal_tx_offset(phyaddr, CALDLY);
+	/* *** Tx offset Cal end *** */
+
+	/* *** Tx Amp Cal start *** */
+	ge_cal_tx_amp(phyaddr, CALDLY);
+	/* *** Tx Amp Cal end *** */
+
+	/* *** Rx offset Cal start *************** */
+	/* 1e_96[15]:bypass_tx_offset_cal, Hw bypass, Fw cal */
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0096, 0x8000);
+	/* tx/rx_cal_criteria_value */
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0037, 0x0033);
+	/* [14]: bypass all calibration, [11]: bypass adc offset cal analog */
+	reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0039) & (~0x4800));
+	/* rx offset cal by Hw setup */
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0039, reg_temp);
+	/* [12]: enable rtune calibration */
+	reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1f, 0x0107) & (~0x1000));
+	/* disable rtune calibration */
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x0107, reg_temp);
+	/* 1e_171[8:7]: bypass tx/rx dc offset cancellation process */
+	reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0171) & (~0x0180));
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0171, (reg_temp | 0x0180));
+	reg_temp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0039);
+	/* rx offset calibration start */
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0039, (reg_temp | 0x2000));
+	/* rx offset calibration end */
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0039, (reg_temp & (~0x2000)));
+	mdelay(10);	/* mdelay for Hw calibration finish */
+	reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0171) & (~0x0180));
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0171, reg_temp);
+
+	tc_mii_write(phyaddr, 0x0,  reg0_temp);
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x0100, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0145, dev1e_145_temp);
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0x2000);
+	/* *** Rx offset Cal end *** */
+	/*eye pic*/
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0, 0x018d);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x1, 0x01c7);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x2, 0x01c0);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3, 0x003a);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x4, 0x0206);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x5, 0x0000);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x6, 0x038a);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x7, 0x03c8);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x8, 0x03c0);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x9, 0x0235);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0xa, 0x0008);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0xb, 0x0000);
+
+	/*tmp maybe changed*/
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x27c, 0x1111);
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x27b, 0x47);
+	tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0x2200);
+
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3a8, 0x0810);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3aa, 0x0008);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3ab, 0x0810);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3ad, 0x0008);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3ae, 0x0106);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3b0, 0x0001);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3b1, 0x0106);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3b3, 0x0001);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x18c, 0x0001);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x18d, 0x0001);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x18e, 0x0001);
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x18f, 0x0001);
+
+	/*da_tx_bias1_b_tx_standby = 5'b10 (dev1eh_reg3aah[12:8])*/
+	reg_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x3aa);
+	reg_tmp = reg_tmp & ~(0x1f00);
+	reg_tmp = reg_tmp | 0x2 << 8;
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3aa, reg_tmp);
+
+	/*da_tx_bias1_a_tx_standby = 5'b10 (dev1eh_reg3a9h[4:0])*/
+	reg_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x3a9);
+	reg_tmp = reg_tmp & ~(0x1f);
+	reg_tmp = reg_tmp | 0x2;
+	tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3a9, reg_tmp);
+}
+
+#if 0
+static void mt7622_ephy_cal(void)
+{
+	int i;
+	unsigned long t_s, t_e;
+
+	t_s = jiffies;
+	for (i = 0; i < 5; i++)
+		do_fe_phy_all_analog_cal(i);
+	t_e = jiffies;
+	if (show_time)
+		pr_info("cal time = %lu\n", (t_e - t_s) * 4);
+}
+
+static void leopard_ephy_cal(void)
+{
+	int i, dbg;
+	unsigned long t_s, t_e;
+
+	dbg = 1;
+	if (dbg) {
+		t_s = jiffies;
+		for (i = 1; i < 5; i++)
+			do_fe_phy_all_analog_cal(i);
+
+		do_ge_phy_all_analog_cal(0);
+
+		t_e = jiffies;
+	}
+	if (show_time)
+		pr_info("cal time = %lu\n", (t_e - t_s) * 4);
+}
+#endif
+static void wait_loop(void)
+{
+	int i;
+	int read_data;
+
+	for (i = 0; i < 320; i = i + 1)
+		read_data = sys_reg_read(RALINK_ETH_SW_BASE + 0x108);
+}
+
+static void trgmii_calibration_7623(void)
+{
+	/* minimum delay for all correct */
+	unsigned int tap_a[5] = {
+		0, 0, 0, 0, 0
+	};
+	/* maximum delay for all correct */
+	unsigned int tap_b[5] = {
+		0, 0, 0, 0, 0
+	};
+	unsigned int final_tap[5];
+	unsigned int rxc_step_size;
+	unsigned int rxd_step_size;
+	unsigned int read_data;
+	unsigned int tmp;
+	unsigned int rd_wd;
+	int i;
+	unsigned int err_cnt[5];
+	unsigned int init_toggle_data;
+	unsigned int err_flag[5];
+	unsigned int err_total_flag;
+	unsigned int training_word;
+	unsigned int rd_tap;
+
+	void __iomem *TRGMII_7623_base;
+	void __iomem *TRGMII_7623_RD_0;
+	void __iomem *temp_addr;
+
+	TRGMII_7623_base = ETHDMASYS_ETH_SW_BASE + 0x0300;
+	TRGMII_7623_RD_0 = TRGMII_7623_base + 0x10;
+	rxd_step_size = 0x1;
+	rxc_step_size = 0x4;
+	init_toggle_data = 0x00000055;
+	training_word = 0x000000AC;
+
+	/* RX clock gating in MT7623 */
+	reg_bit_zero(TRGMII_7623_base + 0x04, 30, 2);
+	/* Assert RX  reset in MT7623 */
+	reg_bit_one(TRGMII_7623_base + 0x00, 31, 1);
+	/* Set TX OE edge in  MT7623 */
+	reg_bit_one(TRGMII_7623_base + 0x78, 13, 1);
+	/* Disable RX clock gating in MT7623 */
+	reg_bit_one(TRGMII_7623_base + 0x04, 30, 2);
+	/* Release RX reset in MT7623 */
+	reg_bit_zero(TRGMII_7623_base, 31, 1);
+
+	for (i = 0; i < 5; i++)
+		/* Set bslip_en = 1 */
+		reg_bit_one(TRGMII_7623_RD_0 + i * 8, 31, 1);
+
+	/* Enable Training Mode in MT7530 */
+	mii_mgr_read(0x1F, 0x7A40, &read_data);
+	read_data |= 0xc0000000;
+	mii_mgr_write(0x1F, 0x7A40, read_data);
+
+	err_total_flag = 0;
+	read_data = 0x0;
+	while (err_total_flag == 0 && read_data != 0x68) {
+		/* Enable EDGE CHK in MT7623 */
+		for (i = 0; i < 5; i++) {
+			reg_bit_zero(TRGMII_7623_RD_0 + i * 8, 28, 4);
+			reg_bit_one(TRGMII_7623_RD_0 + i * 8, 31, 1);
+		}
+		wait_loop();
+		err_total_flag = 1;
+		for (i = 0; i < 5; i++) {
+			tmp = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+			err_cnt[i] = (tmp >> 8) & 0x0000000f;
+
+			tmp = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+			rd_wd = (tmp >> 16) & 0x000000ff;
+
+			if (err_cnt[i] != 0)
+				err_flag[i] = 1;
+			else if (rd_wd != 0x55)
+				err_flag[i] = 1;
+			else
+				err_flag[i] = 0;
+			err_total_flag = err_flag[i] & err_total_flag;
+		}
+
+		/* Disable EDGE CHK in MT7623 */
+		for (i = 0; i < 5; i++) {
+			reg_bit_one(TRGMII_7623_RD_0 + i * 8, 30, 1);
+			reg_bit_zero(TRGMII_7623_RD_0 + i * 8, 28, 2);
+			reg_bit_zero(TRGMII_7623_RD_0 + i * 8, 31, 1);
+		}
+		wait_loop();
+		/* Adjust RXC delay */
+		/* RX clock gating in MT7623 */
+		reg_bit_zero(TRGMII_7623_base + 0x04, 30, 2);
+		read_data = sys_reg_read(TRGMII_7623_base);
+		if (err_total_flag == 0) {
+			tmp = (read_data & 0x0000007f) + rxc_step_size;
+			read_data >>= 8;
+			read_data &= 0xffffff80;
+			read_data |= tmp;
+			read_data <<= 8;
+			read_data &= 0xffffff80;
+			read_data |= tmp;
+			sys_reg_write(TRGMII_7623_base, read_data);
+		} else {
+			tmp = (read_data & 0x0000007f) + 16;
+			read_data >>= 8;
+			read_data &= 0xffffff80;
+			read_data |= tmp;
+			read_data <<= 8;
+			read_data &= 0xffffff80;
+			read_data |= tmp;
+			sys_reg_write(TRGMII_7623_base, read_data);
+		}
+		read_data &= 0x000000ff;
+		/* Disable RX clock gating in MT7623 */
+		reg_bit_one(TRGMII_7623_base + 0x04, 30, 2);
+		for (i = 0; i < 5; i++)
+			reg_bit_one(TRGMII_7623_RD_0 + i * 8, 31, 1);
+	}
+	/* Read RD_WD MT7623 */
+	for (i = 0; i < 5; i++) {
+		temp_addr = TRGMII_7623_RD_0 + i * 8;
+		rd_tap = 0;
+		while (err_flag[i] != 0 && rd_tap != 128) {
+			/* Enable EDGE CHK in MT7623 */
+			tmp = sys_reg_read(temp_addr);
+			tmp |= 0x40000000;
+			reg_bit_zero(temp_addr, 28, 4);
+			reg_bit_one(temp_addr, 30, 1);
+			wait_loop();
+			read_data = sys_reg_read(temp_addr);
+			/* Read MT7623 Errcnt */
+			err_cnt[i] = (read_data >> 8) & 0x0000000f;
+			rd_wd = (read_data >> 16) & 0x000000ff;
+			if (err_cnt[i] != 0 || rd_wd != 0x55)
+				err_flag[i] = 1;
+			else
+				err_flag[i] = 0;
+			/* Disable EDGE CHK in MT7623 */
+			reg_bit_zero(temp_addr, 28, 2);
+			reg_bit_zero(temp_addr, 31, 1);
+			tmp |= 0x40000000;
+			sys_reg_write(temp_addr, tmp & 0x4fffffff);
+			wait_loop();
+			if (err_flag[i] != 0) {
+				/* Add RXD delay in MT7623 */
+				rd_tap = (read_data & 0x7f) + rxd_step_size;
+
+				read_data = (read_data & 0xffffff80) | rd_tap;
+				sys_reg_write(temp_addr, read_data);
+				tap_a[i] = rd_tap;
+			} else {
+				rd_tap = (read_data & 0x0000007f) + 48;
+				read_data = (read_data & 0xffffff80) | rd_tap;
+				sys_reg_write(temp_addr, read_data);
+			}
+		}
+		pr_info("MT7623 %dth bit  Tap_a = %d\n", i, tap_a[i]);
+	}
+	for (i = 0; i < 5; i++) {
+		while ((err_flag[i] == 0) && (rd_tap != 128)) {
+			read_data = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+			/* Add RXD delay in MT7623 */
+			rd_tap = (read_data & 0x7f) + rxd_step_size;
+
+			read_data = (read_data & 0xffffff80) | rd_tap;
+			sys_reg_write(TRGMII_7623_RD_0 + i * 8, read_data);
+
+			/* Enable EDGE CHK in MT7623 */
+			tmp = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+			tmp |= 0x40000000;
+			sys_reg_write(TRGMII_7623_RD_0 + i * 8,
+				      (tmp & 0x4fffffff));
+			wait_loop();
+			read_data = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+			/* Read MT7623 Errcnt */
+			err_cnt[i] = (read_data >> 8) & 0xf;
+			rd_wd = (read_data >> 16) & 0x000000ff;
+			if (err_cnt[i] != 0 || rd_wd != 0x55)
+				err_flag[i] = 1;
+			else
+				err_flag[i] = 0;
+
+			/* Disable EDGE CHK in MT7623 */
+			tmp = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+			tmp |= 0x40000000;
+			sys_reg_write(TRGMII_7623_RD_0 + i * 8,
+				      (tmp & 0x4fffffff));
+			wait_loop();
+		}
+		tap_b[i] = rd_tap;	/* -rxd_step_size; */
+		pr_info("MT7623 %dth bit  Tap_b = %d\n", i, tap_b[i]);
+		/* Calculate RXD delay = (TAP_A + TAP_B)/2 */
+		final_tap[i] = (tap_a[i] + tap_b[i]) / 2;
+		read_data = (read_data & 0xffffff80) | final_tap[i];
+		sys_reg_write(TRGMII_7623_RD_0 + i * 8, read_data);
+	}
+
+	mii_mgr_read(0x1F, 0x7A40, &read_data);
+	read_data &= 0x3fffffff;
+	mii_mgr_write(0x1F, 0x7A40, read_data);
+}
+
+static void trgmii_calibration_7530(void)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	unsigned int tap_a[5] = {
+		0, 0, 0, 0, 0
+	};
+	unsigned int tap_b[5] = {
+		0, 0, 0, 0, 0
+	};
+	unsigned int final_tap[5];
+	unsigned int rxc_step_size;
+	unsigned int rxd_step_size;
+	unsigned int read_data;
+	unsigned int tmp = 0;
+	int i;
+	unsigned int err_cnt[5];
+	unsigned int rd_wd;
+	unsigned int init_toggle_data;
+	unsigned int err_flag[5];
+	unsigned int err_total_flag;
+	unsigned int training_word;
+	unsigned int rd_tap;
+
+	void __iomem *TRGMII_7623_base;
+	u32 TRGMII_7530_RD_0;
+	u32 TRGMII_7530_base;
+	u32 TRGMII_7530_TX_base;
+
+	TRGMII_7623_base = ETHDMASYS_ETH_SW_BASE + 0x0300;
+	TRGMII_7530_base = 0x7A00;
+	TRGMII_7530_RD_0 = TRGMII_7530_base + 0x10;
+	rxd_step_size = 0x1;
+	rxc_step_size = 0x8;
+	init_toggle_data = 0x00000055;
+	training_word = 0x000000AC;
+
+	TRGMII_7530_TX_base = TRGMII_7530_base + 0x50;
+
+	reg_bit_one(TRGMII_7623_base + 0x40, 31, 1);
+	mii_mgr_read(0x1F, 0x7a10, &read_data);
+
+	/* RX clock gating in MT7530 */
+	mii_mgr_read(0x1F, TRGMII_7530_base + 0x04, &read_data);
+	read_data &= 0x3fffffff;
+	mii_mgr_write(0x1F, TRGMII_7530_base + 0x04, read_data);
+
+	/* Set TX OE edge in  MT7530 */
+	mii_mgr_read(0x1F, TRGMII_7530_base + 0x78, &read_data);
+	read_data |= 0x00002000;
+	mii_mgr_write(0x1F, TRGMII_7530_base + 0x78, read_data);
+
+	/* Assert RX  reset in MT7530 */
+	mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+	read_data |= 0x80000000;
+	mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+
+	/* Release RX reset in MT7530 */
+	mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+	read_data &= 0x7fffffff;
+	mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+
+	/* Disable RX clock gating in MT7530 */
+	mii_mgr_read(0x1F, TRGMII_7530_base + 0x04, &read_data);
+	read_data |= 0xC0000000;
+	mii_mgr_write(0x1F, TRGMII_7530_base + 0x04, read_data);
+
+	/*Enable Training Mode in MT7623 */
+	reg_bit_zero(TRGMII_7623_base + 0x40, 30, 1);
+	if (ei_local->architecture & GE1_TRGMII_FORCE_2000)
+		reg_bit_one(TRGMII_7623_base + 0x40, 30, 2);
+	else
+		reg_bit_one(TRGMII_7623_base + 0x40, 31, 1);
+	reg_bit_zero(TRGMII_7623_base + 0x78, 8, 4);
+	reg_bit_zero(TRGMII_7623_base + 0x50, 8, 4);
+	reg_bit_zero(TRGMII_7623_base + 0x58, 8, 4);
+	reg_bit_zero(TRGMII_7623_base + 0x60, 8, 4);
+	reg_bit_zero(TRGMII_7623_base + 0x68, 8, 4);
+	reg_bit_zero(TRGMII_7623_base + 0x70, 8, 4);
+	reg_bit_one(TRGMII_7623_base + 0x78, 11, 1);
+
+	err_total_flag = 0;
+	read_data = 0x0;
+	while (err_total_flag == 0 && (read_data != 0x68)) {
+		/* Enable EDGE CHK in MT7530 */
+		for (i = 0; i < 5; i++) {
+			mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+				     &read_data);
+			read_data |= 0x40000000;
+			read_data &= 0x4fffffff;
+			mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+				      read_data);
+			wait_loop();
+			mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+				     &err_cnt[i]);
+			err_cnt[i] >>= 8;
+			err_cnt[i] &= 0x0000ff0f;
+			rd_wd = err_cnt[i] >> 8;
+			rd_wd &= 0x000000ff;
+			err_cnt[i] &= 0x0000000f;
+			if (err_cnt[i] != 0)
+				err_flag[i] = 1;
+			else if (rd_wd != 0x55)
+				err_flag[i] = 1;
+			else
+				err_flag[i] = 0;
+
+			if (i == 0)
+				err_total_flag = err_flag[i];
+			else
+				err_total_flag = err_flag[i] & err_total_flag;
+			/* Disable EDGE CHK in MT7530 */
+			mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+				     &read_data);
+			read_data |= 0x40000000;
+			read_data &= 0x4fffffff;
+			mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+				      read_data);
+			wait_loop();
+		}
+		/*Adjust RXC delay */
+		if (err_total_flag == 0) {
+			/* Assert RX  reset in MT7530 */
+			mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+			read_data |= 0x80000000;
+			mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+
+			/* RX clock gating in MT7530 */
+			mii_mgr_read(0x1F, TRGMII_7530_base + 0x04, &read_data);
+			read_data &= 0x3fffffff;
+			mii_mgr_write(0x1F, TRGMII_7530_base + 0x04, read_data);
+
+			mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+			tmp = read_data;
+			tmp &= 0x0000007f;
+			tmp += rxc_step_size;
+			read_data &= 0xffffff80;
+			read_data |= tmp;
+			mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+			mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+
+			/* Release RX reset in MT7530 */
+			mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+			read_data &= 0x7fffffff;
+			mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+
+			/* Disable RX clock gating in MT7530 */
+			mii_mgr_read(0x1F, TRGMII_7530_base + 0x04, &read_data);
+			read_data |= 0xc0000000;
+			mii_mgr_write(0x1F, TRGMII_7530_base + 0x04, read_data);
+		}
+		read_data = tmp;
+	}
+	/* Read RD_WD MT7530 */
+	for (i = 0; i < 5; i++) {
+		rd_tap = 0;
+		while (err_flag[i] != 0 && rd_tap != 128) {
+			/* Enable EDGE CHK in MT7530 */
+			mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+				     &read_data);
+			read_data |= 0x40000000;
+			read_data &= 0x4fffffff;
+			mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+				      read_data);
+			wait_loop();
+			err_cnt[i] = (read_data >> 8) & 0x0000000f;
+			rd_wd = (read_data >> 16) & 0x000000ff;
+			if (err_cnt[i] != 0 || rd_wd != 0x55)
+				err_flag[i] = 1;
+			else
+				err_flag[i] = 0;
+
+			if (err_flag[i] != 0) {
+				/* Add RXD delay in MT7530 */
+				rd_tap = (read_data & 0x7f) + rxd_step_size;
+				read_data = (read_data & 0xffffff80) | rd_tap;
+				mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+					      read_data);
+				tap_a[i] = rd_tap;
+			} else {
+				/* Record the min delay TAP_A */
+				tap_a[i] = (read_data & 0x0000007f);
+				rd_tap = tap_a[i] + 0x4;
+				read_data = (read_data & 0xffffff80) | rd_tap;
+				mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+					      read_data);
+			}
+
+			/* Disable EDGE CHK in MT7530 */
+			mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+				     &read_data);
+			read_data |= 0x40000000;
+			read_data &= 0x4fffffff;
+			mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+				      read_data);
+			wait_loop();
+		}
+		pr_info("MT7530 %dth bit  Tap_a = %d\n", i, tap_a[i]);
+	}
+	for (i = 0; i < 5; i++) {
+		rd_tap = 0;
+		while (err_flag[i] == 0 && (rd_tap != 128)) {
+			/* Enable EDGE CHK in MT7530 */
+			mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+				     &read_data);
+			read_data |= 0x40000000;
+			read_data &= 0x4fffffff;
+			mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+				      read_data);
+			wait_loop();
+			err_cnt[i] = (read_data >> 8) & 0x0000000f;
+			rd_wd = (read_data >> 16) & 0x000000ff;
+			if (err_cnt[i] != 0 || rd_wd != 0x55)
+				err_flag[i] = 1;
+			else
+				err_flag[i] = 0;
+
+			if (err_flag[i] == 0 && (rd_tap != 128)) {
+				/* Add RXD delay in MT7530 */
+				rd_tap = (read_data & 0x7f) + rxd_step_size;
+				read_data = (read_data & 0xffffff80) | rd_tap;
+				mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+					      read_data);
+			}
+			/* Disable EDGE CHK in MT7530 */
+			mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+				     &read_data);
+			read_data |= 0x40000000;
+			read_data &= 0x4fffffff;
+			mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+				      read_data);
+			wait_loop();
+		}
+		tap_b[i] = rd_tap;	/* - rxd_step_size; */
+		pr_info("MT7530 %dth bit  Tap_b = %d\n", i, tap_b[i]);
+		/* Calculate RXD delay = (TAP_A + TAP_B)/2 */
+		final_tap[i] = (tap_a[i] + tap_b[i]) / 2;
+		read_data = (read_data & 0xffffff80) | final_tap[i];
+		mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8, read_data);
+	}
+	if (ei_local->architecture & GE1_TRGMII_FORCE_2000)
+		reg_bit_zero(TRGMII_7623_base + 0x40, 31, 1);
+	else
+		reg_bit_zero(TRGMII_7623_base + 0x40, 30, 2);
+}
+
+static void mt7530_trgmii_clock_setting(u32 xtal_mode)
+{
+	u32 reg_value;
+	/* TRGMII Clock */
+	mii_mgr_write_cl45(0, 0x1f, 0x410, 0x1);
+	if (xtal_mode == 1) {	/* 25MHz */
+		mii_mgr_write_cl45(0, 0x1f, 0x404, MT7530_TRGMII_PLL_25M);
+	} else if (xtal_mode == 2) {	/* 40MHz */
+		mii_mgr_write_cl45(0, 0x1f, 0x404, MT7530_TRGMII_PLL_40M);
+	}
+	mii_mgr_write_cl45(0, 0x1f, 0x405, 0);
+	if (xtal_mode == 1)	/* 25MHz */
+		mii_mgr_write_cl45(0, 0x1f, 0x409, 0x57);
+	else
+		mii_mgr_write_cl45(0, 0x1f, 0x409, 0x87);
+
+	if (xtal_mode == 1)	/* 25MHz */
+		mii_mgr_write_cl45(0, 0x1f, 0x40a, 0x57);
+	else
+		mii_mgr_write_cl45(0, 0x1f, 0x40a, 0x87);
+
+	mii_mgr_write_cl45(0, 0x1f, 0x403, 0x1800);
+	mii_mgr_write_cl45(0, 0x1f, 0x403, 0x1c00);
+	mii_mgr_write_cl45(0, 0x1f, 0x401, 0xc020);
+	mii_mgr_write_cl45(0, 0x1f, 0x406, 0xa030);
+	mii_mgr_write_cl45(0, 0x1f, 0x406, 0xa038);
+	usleep_range(120, 130);	/* for MT7623 bring up test */
+	mii_mgr_write_cl45(0, 0x1f, 0x410, 0x3);
+
+	mii_mgr_read(31, 0x7830, &reg_value);
+	reg_value &= 0xFFFFFFFC;
+	reg_value |= 0x00000001;
+	mii_mgr_write(31, 0x7830, reg_value);
+
+	mii_mgr_read(31, 0x7a40, &reg_value);
+	reg_value &= ~(0x1 << 30);
+	reg_value &= ~(0x1 << 28);
+	mii_mgr_write(31, 0x7a40, reg_value);
+
+	mii_mgr_write(31, 0x7a78, 0x55);
+	usleep_range(100, 110);	/* for mt7623 bring up test */
+
+	/* Release MT7623 RXC reset */
+	reg_bit_zero(ETHDMASYS_ETH_SW_BASE + 0x0300, 31, 1);
+
+	trgmii_calibration_7623();
+	trgmii_calibration_7530();
+	/* Assert RX  reset in MT7623 */
+	reg_bit_one(ETHDMASYS_ETH_SW_BASE + 0x0300, 31, 1);
+	/* Release RX reset in MT7623 */
+	reg_bit_zero(ETHDMASYS_ETH_SW_BASE + 0x0300, 31, 1);
+	mii_mgr_read(31, 0x7a00, &reg_value);
+	reg_value |= (0x1 << 31);
+	mii_mgr_write(31, 0x7a00, reg_value);
+	mdelay(1);
+	reg_value &= ~(0x1 << 31);
+	mii_mgr_write(31, 0x7a00, reg_value);
+	mdelay(100);
+}
+
+void trgmii_set_7621(void)
+{
+	u32 val = 0;
+	u32 val_0 = 0;
+
+	val = sys_reg_read(RSTCTRL);
+	/* MT7621 need to reset GMAC and FE first */
+	val = val | RALINK_FE_RST | RALINK_ETH_RST;
+	sys_reg_write(RSTCTRL, val);
+
+	/* set TRGMII clock */
+	val_0 = sys_reg_read(CLK_CFG_0);
+	val_0 &= 0xffffff9f;
+	val_0 |= (0x1 << 5);
+	sys_reg_write(CLK_CFG_0, val_0);
+	mdelay(1);
+	val_0 = sys_reg_read(CLK_CFG_0);
+	pr_info("set CLK_CFG_0 = 0x%x!!!!!!!!!!!!!!!!!!1\n", val_0);
+	val = val & ~(RALINK_FE_RST | RALINK_ETH_RST);
+	sys_reg_write(RSTCTRL, val);
+	pr_info("trgmii_set_7621 Completed!!\n");
+}
+
+void trgmii_set_7530(void)
+{
+	u32 regValue;
+
+	mii_mgr_write(0, 13, 0x1f);
+	mii_mgr_write(0, 14, 0x404);
+	mii_mgr_write(0, 13, 0x401f);
+	mii_mgr_read(31, 0x7800, &regValue);
+	regValue = (regValue >> 9) & 0x3;
+	if (regValue == 0x3)
+		mii_mgr_write(0, 14, 0x0C00);/*25Mhz XTAL for 150Mhz CLK */
+	 else if (regValue == 0x2)
+		mii_mgr_write(0, 14, 0x0780);/*40Mhz XTAL for 150Mhz CLK */
+
+	mdelay(1);
+
+	mii_mgr_write(0, 13, 0x1f);
+	mii_mgr_write(0, 14, 0x409);
+	mii_mgr_write(0, 13, 0x401f);
+	if (regValue == 0x3) /* 25MHz */
+		mii_mgr_write(0, 14, 0x57);
+	else
+		mii_mgr_write(0, 14, 0x87);
+	mdelay(1);
+
+	mii_mgr_write(0, 13, 0x1f);
+	mii_mgr_write(0, 14, 0x40a);
+	mii_mgr_write(0, 13, 0x401f);
+	if (regValue == 0x3) /* 25MHz */
+		mii_mgr_write(0, 14, 0x57);
+	else
+		mii_mgr_write(0, 14, 0x87);
+
+/* PLL BIAS en */
+	mii_mgr_write(0, 13, 0x1f);
+	mii_mgr_write(0, 14, 0x403);
+	mii_mgr_write(0, 13, 0x401f);
+	mii_mgr_write(0, 14, 0x1800);
+	mdelay(1);
+
+/* BIAS LPF en */
+	mii_mgr_write(0, 13, 0x1f);
+	mii_mgr_write(0, 14, 0x403);
+	mii_mgr_write(0, 13, 0x401f);
+	mii_mgr_write(0, 14, 0x1c00);
+
+/* sys PLL en */
+	mii_mgr_write(0, 13, 0x1f);
+	mii_mgr_write(0, 14, 0x401);
+	mii_mgr_write(0, 13, 0x401f);
+	mii_mgr_write(0, 14, 0xc020);
+
+/* LCDDDS PWDS */
+	mii_mgr_write(0, 13, 0x1f);
+	mii_mgr_write(0, 14, 0x406);
+	mii_mgr_write(0, 13, 0x401f);
+	mii_mgr_write(0, 14, 0xa030);
+	mdelay(1);
+
+/* GSW_2X_CLK */
+	mii_mgr_write(0, 13, 0x1f);
+	mii_mgr_write(0, 14, 0x410);
+	mii_mgr_write(0, 13, 0x401f);
+	mii_mgr_write(0, 14, 0x0003);
+	mii_mgr_write_cl45(0, 0x1f, 0x410, 0x0003);
+
+/* enable P6 */
+	mii_mgr_write(31, 0x3600, 0x5e33b);
+
+/* enable TRGMII */
+	mii_mgr_write(31, 0x7830, 0x1);
+
+	pr_info("trgmii_set_7530 Completed!!\n");
+}
+
+static void is_switch_vlan_table_busy(void)
+{
+	int j = 0;
+	unsigned int value = 0;
+
+	for (j = 0; j < 20; j++) {
+		mii_mgr_read(31, 0x90, &value);
+		if ((value & 0x80000000) == 0) {	/* table busy */
+			break;
+		}
+		mdelay(70);
+	}
+	if (j == 20)
+		pr_info("set vlan timeout value=0x%x.\n", value);
+}
+
+static void lan_wan_partition(void)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	/*Set  MT7530 */
+	if (ei_local->architecture & WAN_AT_P0) {
+		pr_info("set LAN/WAN WLLLL\n");
+		/*WLLLL, wan at P0 */
+		/*LAN/WAN ports as security mode */
+		mii_mgr_write(31, 0x2004, 0xff0003);	/* port0 */
+		mii_mgr_write(31, 0x2104, 0xff0003);	/* port1 */
+		mii_mgr_write(31, 0x2204, 0xff0003);	/* port2 */
+		mii_mgr_write(31, 0x2304, 0xff0003);	/* port3 */
+		mii_mgr_write(31, 0x2404, 0xff0003);	/* port4 */
+		mii_mgr_write(31, 0x2504, 0xff0003);	/* port5 */
+		mii_mgr_write(31, 0x2604, 0xff0003);	/* port6 */
+
+		/*set PVID */
+		mii_mgr_write(31, 0x2014, 0x10002);	/* port0 */
+		mii_mgr_write(31, 0x2114, 0x10001);	/* port1 */
+		mii_mgr_write(31, 0x2214, 0x10001);	/* port2 */
+		mii_mgr_write(31, 0x2314, 0x10001);	/* port3 */
+		mii_mgr_write(31, 0x2414, 0x10001);	/* port4 */
+		mii_mgr_write(31, 0x2514, 0x10002);	/* port5 */
+		mii_mgr_write(31, 0x2614, 0x10001);	/* port6 */
+		/*port6 */
+		/*VLAN member */
+		is_switch_vlan_table_busy();
+		mii_mgr_write(31, 0x94, 0x405e0001);	/* VAWD1 */
+		mii_mgr_write(31, 0x90, 0x80001001);	/* VTCR, VID=1 */
+		is_switch_vlan_table_busy();
+
+		mii_mgr_write(31, 0x94, 0x40210001);	/* VAWD1 */
+		mii_mgr_write(31, 0x90, 0x80001002);	/* VTCR, VID=2 */
+		is_switch_vlan_table_busy();
+	}
+	if (ei_local->architecture & WAN_AT_P4) {
+		pr_info("set LAN/WAN LLLLW\n");
+		/*LLLLW, wan at P4 */
+		/*LAN/WAN ports as security mode */
+		mii_mgr_write(31, 0x2004, 0xff0003);	/* port0 */
+		mii_mgr_write(31, 0x2104, 0xff0003);	/* port1 */
+		mii_mgr_write(31, 0x2204, 0xff0003);	/* port2 */
+		mii_mgr_write(31, 0x2304, 0xff0003);	/* port3 */
+		mii_mgr_write(31, 0x2404, 0xff0003);	/* port4 */
+		mii_mgr_write(31, 0x2504, 0xff0003);	/* port5 */
+		mii_mgr_write(31, 0x2604, 0xff0003);	/* port6 */
+
+		/*set PVID */
+		mii_mgr_write(31, 0x2014, 0x10001);	/* port0 */
+		mii_mgr_write(31, 0x2114, 0x10001);	/* port1 */
+		mii_mgr_write(31, 0x2214, 0x10001);	/* port2 */
+		mii_mgr_write(31, 0x2314, 0x10001);	/* port3 */
+		mii_mgr_write(31, 0x2414, 0x10002);	/* port4 */
+		mii_mgr_write(31, 0x2514, 0x10002);	/* port5 */
+		mii_mgr_write(31, 0x2614, 0x10001);	/* port6 */
+
+		/*VLAN member */
+		is_switch_vlan_table_busy();
+		mii_mgr_write(31, 0x94, 0x404f0001);	/* VAWD1 */
+		mii_mgr_write(31, 0x90, 0x80001001);	/* VTCR, VID=1 */
+		is_switch_vlan_table_busy();
+		mii_mgr_write(31, 0x94, 0x40300001);	/* VAWD1 */
+		mii_mgr_write(31, 0x90, 0x80001002);	/* VTCR, VID=2 */
+		is_switch_vlan_table_busy();
+	}
+}
+
+static void mt7530_phy_setting(void)
+{
+	u32 i;
+	u32 reg_value;
+
+	for (i = 0; i < 5; i++) {
+		/* Disable EEE */
+		mii_mgr_write_cl45(i, 0x7, 0x3c, 0);
+		/* Enable HW auto downshift */
+		mii_mgr_write(i, 31, 0x1);
+		mii_mgr_read(i, 0x14, &reg_value);
+		reg_value |= (1 << 4);
+		mii_mgr_write(i, 0x14, reg_value);
+		/* Increase SlvDPSready time */
+		mii_mgr_write(i, 31, 0x52b5);
+		mii_mgr_write(i, 16, 0xafae);
+		mii_mgr_write(i, 18, 0x2f);
+		mii_mgr_write(i, 16, 0x8fae);
+		/* Incease post_update_timer */
+		mii_mgr_write(i, 31, 0x3);
+		mii_mgr_write(i, 17, 0x4b);
+		/* Adjust 100_mse_threshold */
+		mii_mgr_write_cl45(i, 0x1e, 0x123, 0xffff);
+		/* Disable mcc */
+		mii_mgr_write_cl45(i, 0x1e, 0xa6, 0x300);
+	}
+}
+
+static void setup_internal_gsw(void)
+{
+	void __iomem *gpio_base_virt = ioremap(ETH_GPIO_BASE, 0x1000);
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	u32 reg_value;
+	u32 xtal_mode;
+	u32 i;
+
+	if (ei_local->architecture &
+	    (GE1_TRGMII_FORCE_2000 | GE1_TRGMII_FORCE_2600))
+		reg_bit_one(RALINK_SYSCTL_BASE + 0x2c, 11, 1);
+	else
+		reg_bit_zero(RALINK_SYSCTL_BASE + 0x2c, 11, 1);
+	reg_bit_one(ETHDMASYS_ETH_SW_BASE + 0x0390, 1, 1);	/* TRGMII mode */
+
+	#if defined(CONFIG_GE1_RGMII_FORCE_1200)
+
+	if (ei_local->chip_name == MT7621_FE)
+		trgmii_set_7621();
+
+	#endif
+
+	/*Hardware reset Switch */
+
+	reg_bit_zero((void __iomem *)gpio_base_virt + 0x520, 1, 1);
+	mdelay(1);
+	reg_bit_one((void __iomem *)gpio_base_virt + 0x520, 1, 1);
+	mdelay(100);
+
+	/* Assert MT7623 RXC reset */
+	reg_bit_one(ETHDMASYS_ETH_SW_BASE + 0x0300, 31, 1);
+	/*For MT7623 reset MT7530 */
+	reg_bit_one(RALINK_SYSCTL_BASE + 0x34, 2, 1);
+	mdelay(1);
+	reg_bit_zero(RALINK_SYSCTL_BASE + 0x34, 2, 1);
+	mdelay(100);
+
+	/* Wait for Switch Reset Completed */
+	for (i = 0; i < 100; i++) {
+		mdelay(10);
+		mii_mgr_read(31, 0x7800, &reg_value);
+		if (reg_value != 0) {
+			pr_info("MT7530 Reset Completed!!\n");
+			break;
+		}
+		if (i == 99)
+			pr_info("MT7530 Reset Timeout!!\n");
+	}
+
+	for (i = 0; i <= 4; i++) {
+		/*turn off PHY */
+		mii_mgr_read(i, 0x0, &reg_value);
+		reg_value |= (0x1 << 11);
+		mii_mgr_write(i, 0x0, reg_value);
+	}
+	mii_mgr_write(31, 0x7000, 0x3);	/* reset switch */
+	usleep_range(100, 110);
+
+	#if defined(CONFIG_GE1_RGMII_FORCE_1200)
+
+	if (ei_local->chip_name == MT7621_FE) {
+	trgmii_set_7530();
+	/* enable MDIO to control MT7530 */
+	reg_value = sys_reg_read(RALINK_SYSCTL_BASE + 0x60);
+	reg_value &= ~(0x3 << 12);
+	sys_reg_write(RALINK_SYSCTL_BASE + 0x60, reg_value);
+	}
+
+	#endif
+
+	/* (GE1, Force 1000M/FD, FC ON) */
+	sys_reg_write(RALINK_ETH_SW_BASE + 0x100, 0x2105e33b);
+	mii_mgr_write(31, 0x3600, 0x5e33b);
+	mii_mgr_read(31, 0x3600, &reg_value);
+	/* (GE2, Link down) */
+	sys_reg_write(RALINK_ETH_SW_BASE + 0x200, 0x00008000);
+
+	mii_mgr_read(31, 0x7804, &reg_value);
+	reg_value &= ~(1 << 8);	/* Enable Port 6 */
+	reg_value |= (1 << 6);	/* Disable Port 5 */
+	reg_value |= (1 << 13);	/* Port 5 as GMAC, no Internal PHY */
+
+	if (ei_local->architecture & GMAC2) {
+		/*RGMII2=Normal mode */
+		reg_bit_zero(RALINK_SYSCTL_BASE + 0x60, 15, 1);
+
+		/*GMAC2= RGMII mode */
+		reg_bit_zero(SYSCFG1, 14, 2);
+		if (ei_local->architecture & GE2_RGMII_AN) {
+			mii_mgr_write(31, 0x3500, 0x56300);
+			/* (GE2, auto-polling) */
+			sys_reg_write(RALINK_ETH_SW_BASE + 0x200, 0x21056300);
+			reg_value |= (1 << 6);	/* disable MT7530 P5 */
+			enable_auto_negotiate(ei_local);
+
+		} else {
+			/* MT7530 P5 Force 1000 */
+			mii_mgr_write(31, 0x3500, 0x5e33b);
+			/* (GE2, Force 1000) */
+			sys_reg_write(RALINK_ETH_SW_BASE + 0x200, 0x2105e33b);
+			reg_value &= ~(1 << 6);	/* enable MT7530 P5 */
+			reg_value |= ((1 << 7) | (1 << 13) | (1 << 16));
+			if (ei_local->architecture & WAN_AT_P0)
+				reg_value |= (1 << 20);
+			else
+				reg_value &= ~(1 << 20);
+		}
+	}
+	reg_value &= ~(1 << 5);
+	reg_value |= (1 << 16);	/* change HW-TRAP */
+	pr_info("change HW-TRAP to 0x%x\n", reg_value);
+	mii_mgr_write(31, 0x7804, reg_value);
+	mii_mgr_read(31, 0x7800, &reg_value);
+	reg_value = (reg_value >> 9) & 0x3;
+	if (reg_value == 0x3) {	/* 25Mhz Xtal */
+		xtal_mode = 1;
+		/*Do Nothing */
+	} else if (reg_value == 0x2) {	/* 40Mhz */
+		xtal_mode = 2;
+		/* disable MT7530 core clock */
+		mii_mgr_write_cl45(0, 0x1f, 0x410, 0x0);
+
+		mii_mgr_write_cl45(0, 0x1f, 0x40d, 0x2020);
+		mii_mgr_write_cl45(0, 0x1f, 0x40e, 0x119);
+		mii_mgr_write_cl45(0, 0x1f, 0x40d, 0x2820);
+		usleep_range(20, 30);	/* suggest by CD */
+	#if defined(CONFIG_GE1_RGMII_FORCE_1200)
+		mii_mgr_write_cl45(0, 0x1f, 0x410, 0x3);
+	#else
+		mii_mgr_write_cl45(0, 0x1f, 0x410, 0x1);
+	#endif
+
+	} else {
+		xtal_mode = 3;
+	 /* TODO */}
+
+	/* set MT7530 central align */
+	#if !defined(CONFIG_GE1_RGMII_FORCE_1200)  /* for RGMII 1000HZ */
+	mii_mgr_read(31, 0x7830, &reg_value);
+	reg_value &= ~1;
+	reg_value |= 1 << 1;
+	mii_mgr_write(31, 0x7830, reg_value);
+
+	mii_mgr_read(31, 0x7a40, &reg_value);
+	reg_value &= ~(1 << 30);
+	mii_mgr_write(31, 0x7a40, reg_value);
+
+	reg_value = 0x855;
+	mii_mgr_write(31, 0x7a78, reg_value);
+	#endif
+
+	mii_mgr_write(31, 0x7b00, 0x104);	/* delay setting for 10/1000M */
+	mii_mgr_write(31, 0x7b04, 0x10);	/* delay setting for 10/1000M */
+
+	/*Tx Driving */
+	mii_mgr_write(31, 0x7a54, 0x88);	/* lower GE1 driving */
+	mii_mgr_write(31, 0x7a5c, 0x88);	/* lower GE1 driving */
+	mii_mgr_write(31, 0x7a64, 0x88);	/* lower GE1 driving */
+	mii_mgr_write(31, 0x7a6c, 0x88);	/* lower GE1 driving */
+	mii_mgr_write(31, 0x7a74, 0x88);	/* lower GE1 driving */
+	mii_mgr_write(31, 0x7a7c, 0x88);	/* lower GE1 driving */
+	mii_mgr_write(31, 0x7810, 0x11);	/* lower GE2 driving */
+	/*Set MT7623 TX Driving */
+	sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0354, 0x88);
+	sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x035c, 0x88);
+	sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0364, 0x88);
+	sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x036c, 0x88);
+	sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0374, 0x88);
+	sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x037c, 0x88);
+
+	/* Set GE2 driving and slew rate */
+	if (ei_local->architecture & GE2_RGMII_AN)
+		sys_reg_write((void __iomem *)gpio_base_virt + 0xf00, 0xe00);
+	else
+		sys_reg_write((void __iomem *)gpio_base_virt + 0xf00, 0xa00);
+	/* set GE2 TDSEL */
+	sys_reg_write((void __iomem *)gpio_base_virt + 0x4c0, 0x5);
+	/* set GE2 TUNE */
+	sys_reg_write((void __iomem *)gpio_base_virt + 0xed0, 0);
+
+	if (ei_local->chip_name == MT7623_FE)
+		mt7530_trgmii_clock_setting(xtal_mode);
+	if (ei_local->architecture & GE1_RGMII_FORCE_1000) {
+		sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0350, 0x55);
+		sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0358, 0x55);
+		sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0360, 0x55);
+		sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0368, 0x55);
+		sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0370, 0x55);
+		sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0378, 0x855);
+	}
+
+	lan_wan_partition();
+	mt7530_phy_setting();
+	for (i = 0; i <= 4; i++) {
+		/*turn on PHY */
+		mii_mgr_read(i, 0x0, &reg_value);
+		reg_value &= ~(0x1 << 11);
+		mii_mgr_write(i, 0x0, reg_value);
+	}
+
+	mii_mgr_read(31, 0x7808, &reg_value);
+	reg_value |= (3 << 16);	/* Enable INTR */
+	mii_mgr_write(31, 0x7808, reg_value);
+
+	iounmap(gpio_base_virt);
+}
+
+void setup_external_gsw(void)
+{
+	/* reduce RGMII2 PAD driving strength */
+	reg_bit_zero(PAD_RGMII2_MDIO_CFG, 4, 2);
+	/*enable MDIO */
+	reg_bit_zero(RALINK_SYSCTL_BASE + 0x60, 12, 2);
+
+	/*RGMII1=Normal mode */
+	reg_bit_zero(RALINK_SYSCTL_BASE + 0x60, 14, 1);
+	/*GMAC1= RGMII mode */
+	reg_bit_zero(SYSCFG1, 12, 2);
+
+	/* (GE1, Link down) */
+	sys_reg_write(RALINK_ETH_SW_BASE + 0x100, 0x00008000);
+
+	/*RGMII2=Normal mode */
+	reg_bit_zero(RALINK_SYSCTL_BASE + 0x60, 15, 1);
+	/*GMAC2= RGMII mode */
+	reg_bit_zero(SYSCFG1, 14, 2);
+
+	/* (GE2, Force 1000M/FD, FC ON) */
+	sys_reg_write(RALINK_ETH_SW_BASE + 0x200, 0x2105e33b);
+
+} int is_marvell_gigaphy(int ge)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	u32 phy_id0 = 0, phy_id1 = 0, phy_address;
+
+	if (ei_local->architecture & GE1_RGMII_AN)
+		phy_address = mac_to_gigaphy_mode_addr;
+	else
+		phy_address = mac_to_gigaphy_mode_addr2;
+
+	if (!mii_mgr_read(phy_address, 2, &phy_id0)) {
+		pr_info("\n Read PhyID 1 is Fail!!\n");
+		phy_id0 = 0;
+	}
+	if (!mii_mgr_read(phy_address, 3, &phy_id1)) {
+		pr_info("\n Read PhyID 1 is Fail!!\n");
+		phy_id1 = 0;
+	}
+
+	if ((phy_id0 == EV_MARVELL_PHY_ID0) && (phy_id1 == EV_MARVELL_PHY_ID1))
+		return 1;
+	return 0;
+}
+
+int is_vtss_gigaphy(int ge)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	u32 phy_id0 = 0, phy_id1 = 0, phy_address;
+
+	if (ei_local->architecture & GE1_RGMII_AN)
+		phy_address = mac_to_gigaphy_mode_addr;
+	else
+		phy_address = mac_to_gigaphy_mode_addr2;
+
+	if (!mii_mgr_read(phy_address, 2, &phy_id0)) {
+		pr_info("\n Read PhyID 1 is Fail!!\n");
+		phy_id0 = 0;
+	}
+	if (!mii_mgr_read(phy_address, 3, &phy_id1)) {
+		pr_info("\n Read PhyID 1 is Fail!!\n");
+		phy_id1 = 0;
+	}
+
+	if ((phy_id0 == EV_VTSS_PHY_ID0) && (phy_id1 == EV_VTSS_PHY_ID1))
+		return 1;
+	return 0;
+}
+
+void fe_sw_preinit(struct END_DEVICE *ei_local)
+{
+	struct device_node *np = ei_local->switch_np;
+	struct platform_device *pdev = of_find_device_by_node(np);
+	struct mtk_gsw *gsw;
+	int ret;
+
+	gsw = platform_get_drvdata(pdev);
+	if (!gsw) {
+		pr_info("Failed to get gsw\n");
+		return;
+	}
+
+	regulator_set_voltage(gsw->supply, 1000000, 1000000);
+	ret = regulator_enable(gsw->supply);
+	if (ret)
+		pr_info("Failed to enable mt7530 power: %d\n", ret);
+
+	if (gsw->mcm) {
+		regulator_set_voltage(gsw->b3v, 3300000, 3300000);
+		ret = regulator_enable(gsw->b3v);
+		if (ret)
+			dev_err(&pdev->dev, "Failed to enable b3v: %d\n", ret);
+	} else {
+		ret = devm_gpio_request(&pdev->dev, gsw->reset_pin,
+					"mediatek,reset-pin");
+		if (ret)
+			pr_info("fail to devm_gpio_request\n");
+
+		gpio_direction_output(gsw->reset_pin, 0);
+		usleep_range(1000, 1100);
+		gpio_set_value(gsw->reset_pin, 1);
+		mdelay(100);
+		devm_gpio_free(&pdev->dev, gsw->reset_pin);
+	}
+}
+
+void set_sgmii_force_link(int port_num, int speed)
+{
+	void __iomem *virt_addr;
+	unsigned int reg_value;
+	unsigned int sgmii_reg_phya, sgmii_reg;
+
+	virt_addr = ioremap(ETHSYS_BASE, 0x20);
+	reg_value = sys_reg_read(virt_addr + 0x14);
+
+	if (port_num == 1) {
+		reg_value |= SGMII_CONFIG_0;
+		sgmii_reg_phya = SGMII_REG_PHYA_BASE0;
+		sgmii_reg = SGMII_REG_BASE0;
+		set_ge1_force_1000();
+	}
+	if (port_num == 2) {
+		reg_value |= SGMII_CONFIG_1;
+		sgmii_reg_phya = SGMII_REG_PHYA_BASE1;
+		sgmii_reg = SGMII_REG_BASE1;
+		set_ge2_force_1000();
+	}
+
+	sys_reg_write(virt_addr + 0x14, reg_value);
+	reg_value = sys_reg_read(virt_addr + 0x14);
+	iounmap(virt_addr);
+
+	/* Set SGMII GEN2 speed(2.5G) */
+	virt_addr = ioremap(sgmii_reg_phya, 0x100);
+	reg_value = sys_reg_read(virt_addr + 0x28);
+	reg_value |= speed << 2;
+	sys_reg_write(virt_addr + 0x28, reg_value);
+	iounmap(virt_addr);
+
+	virt_addr = ioremap(sgmii_reg, 0x100);
+	/* disable SGMII AN */
+	reg_value = sys_reg_read(virt_addr);
+	reg_value &= ~(1 << 12);
+	sys_reg_write(virt_addr, reg_value);
+	/* SGMII force mode setting */
+	reg_value = sys_reg_read(virt_addr + 0x20);
+	sys_reg_write(virt_addr + 0x20, 0x31120019);
+	reg_value = sys_reg_read(virt_addr + 0x20);
+	/* Release PHYA power down state */
+	reg_value = sys_reg_read(virt_addr + 0xe8);
+	reg_value &= ~(1 << 4);
+	sys_reg_write(virt_addr + 0xe8, reg_value);
+	iounmap(virt_addr);
+}
+
+void set_sgmii_an(int port_num)
+{
+	void __iomem *virt_addr;
+	unsigned int reg_value;
+	unsigned int sgmii_reg, sgmii_reg_phya;
+
+	virt_addr = ioremap(ETHSYS_BASE, 0x20);
+	reg_value = sys_reg_read(virt_addr + 0x14);
+
+	if (port_num == 1) {
+		reg_value |= SGMII_CONFIG_0;
+		sgmii_reg_phya = SGMII_REG_PHYA_BASE0;
+		sgmii_reg = SGMII_REG_BASE0;
+	}
+	if (port_num == 2) {
+		reg_value |= SGMII_CONFIG_1;
+		sgmii_reg_phya = SGMII_REG_PHYA_BASE1;
+		sgmii_reg = SGMII_REG_BASE1;
+	}
+
+	sys_reg_write(virt_addr + 0x14, reg_value);
+	iounmap(virt_addr);
+
+	/* set auto polling */
+	virt_addr = ioremap(ETHSYS_MAC_BASE, 0x300);
+	sys_reg_write(virt_addr + (0x100 * port_num), 0x21056300);
+	iounmap(virt_addr);
+
+	virt_addr = ioremap(sgmii_reg, 0x100);
+	/* set link timer */
+	sys_reg_write(virt_addr + 0x18, 0x186a0);
+	/* disable remote fault */
+	reg_value = sys_reg_read(virt_addr + 0x20);
+	reg_value |= 1 << 8;
+	sys_reg_write(virt_addr + 0x20, reg_value);
+	/* restart an */
+	reg_value = sys_reg_read(virt_addr);
+	reg_value |= 1 << 9;
+	sys_reg_write(virt_addr, reg_value);
+	/* Release PHYA power down state */
+	reg_value = sys_reg_read(virt_addr + 0xe8);
+	reg_value &= ~(1 << 4);
+	sys_reg_write(virt_addr + 0xe8, reg_value);
+	iounmap(virt_addr);
+}
+
+static void mt7622_esw_5port_gpio(void)
+{
+	u32 ret, value, i;
+
+	mii_mgr_write(0, 31, 0x2000); /* change G2 page */
+
+	ret = mii_mgr_read(0, 31, &value);
+	pr_debug("(%d) R31: %x!\n", ret, value);
+
+	mii_mgr_read(0, 25, &value);
+	value = 0xf020;
+	mii_mgr_write(0, 25, value);
+	mii_mgr_read(0, 25, &value);
+	pr_debug("G2_R25: %x!\n", value);
+
+	mii_mgr_write(0, 31, 0x7000); /* change G7 page */
+	mii_mgr_read(0, 22, &value);
+
+	if (value & 0x8000) {
+		pr_debug("G7_R22[15]: 1\n");
+	} else {
+		mii_mgr_write(0, 22, (value | (1 << 15)));
+		pr_debug("G7_R22[15]: set to 1\n");
+	}
+
+	mii_mgr_write(0, 31, 0x3000); /* change G3 page */
+	mii_mgr_read(0, 16, &value);
+	value |= (1 << 3);
+	mii_mgr_write(0, 16, value);
+
+	mii_mgr_read(0, 16, &value);
+	pr_debug("G3_R16: %x!\n", value);
+
+	mii_mgr_write(0, 31, 0x7000); /* change G7 page */
+	mii_mgr_read(0, 22, &value);
+	value |= (1 << 5);
+	mii_mgr_write(0, 22, value);
+
+	mii_mgr_read(0, 24, &value);
+	value &= 0xDFFF;
+	mii_mgr_write(0, 24, value);
+
+	mii_mgr_read(0, 24, &value);
+	value |= (1 << 14);
+	mii_mgr_write(0, 24, value);
+
+	mii_mgr_read(0, 22, &value);
+	pr_debug("G7_R22: %x!\n", value);
+
+	mii_mgr_read(0, 24, &value);
+	pr_debug("G7_R24: %x!\n", value);
+
+	for (i = 0; i <= 4; i++) {
+		mii_mgr_write(i, 31, 0x8000); /* change L0 page */
+
+		mii_mgr_read(i, 30, &value);
+		value |= 0x3FFF;
+		mii_mgr_write(i, 30, value);
+		mii_mgr_read(i, 30, &value);
+		pr_debug("port %d L0_R30: %x!\n", i, value);
+
+		mii_mgr_write(i, 31, 0xB000); /* change L3 page */
+
+		mii_mgr_read(i, 26, &value);
+		value |= (1 << 12);
+		mii_mgr_write(i, 26, value);
+
+		mii_mgr_read(i, 26, &value);
+		pr_debug("port %d L3_R26: %x!\n", i, value);
+
+		mii_mgr_read(i, 25, &value);
+		value |= (1 << 8);
+		value |= (1 << 12);
+		mii_mgr_write(i, 25, value);
+
+		mii_mgr_read(i, 25, &value);
+		pr_debug("port %d L3_R25: %x!\n", i, value);
+	}
+
+	mii_mgr_write(0, 31, 0x2000); /* change G2 page */
+
+	mii_mgr_read(0, 25, &value);
+
+	pr_debug("G2_R25 before: %x!\n", value);
+	/* value &= 0xFFFF3FFF; */
+	/* G2_R25: 1020!-->0020 */
+	/* value &= 0xFFFF2FFF; */
+	value = 0x20;
+	mii_mgr_write(0, 25, value);
+
+	mii_mgr_read(0, 25, &value);
+	pr_debug("G2_R25: %x!\n", value);
+
+	/* LDO */
+	mii_mgr_write(0, 31, 0x7000); /* change G7 page */
+
+	mii_mgr_read(0, 16, &value);
+	value |= (1 << 2);
+	mii_mgr_write(0, 16, value);
+
+	mii_mgr_read(0, 16, &value);
+	pr_debug("G7_R16: %x!\n", value);
+
+	/* BG */
+	mii_mgr_write(0, 31, 0x2000); /* change G2 page */
+
+	mii_mgr_read(0, 22, &value);
+	value |= (1 << 12);
+	value |= (1 << 13);
+	value |= (1 << 14);
+	mii_mgr_write(0, 22, value);
+
+	mii_mgr_read(0, 22, &value);
+	pr_debug("G2_R22: %x!\n", value);
+
+	mii_mgr_read(0, 22, &value);
+	value &= 0x7FFF;
+	mii_mgr_write(0, 22, value);
+
+	mii_mgr_read(0, 22, &value);
+	pr_debug("G2_R22: %x!\n", value);
+}
+
+void leopard_gmii_config(u8 enable)
+{
+	unsigned int reg_value = 0;
+	void __iomem *gpio_base_virt, *infra_base_virt;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	/*bit[1]: gphy connect GMAC0 or GMAC2 1:GMAC0. 0:GMAC2*/
+	/*bit[0]: Co-QPHY path selection 0:U3path, 1:SGMII*/
+	infra_base_virt = ioremap(INFRA_BASE, 0x10);
+	reg_value = sys_reg_read(infra_base_virt);
+	if (enable) {
+		reg_value = reg_value | 0x02;
+		sys_reg_write(infra_base_virt, reg_value);
+
+		mac_to_gigaphy_mode_addr = 0;
+		enable_auto_negotiate(ei_local);
+
+		/*port5 enable*/
+		sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x90, 0x00007f7f);
+		/*port5 an mode, port6 fix*/
+		sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0xc8, 0x20503bfa);
+	} else {
+			reg_value = reg_value & (~0x2);
+			sys_reg_write(infra_base_virt, reg_value);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x84, 0);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x90, 0x10007f7f);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0xc8, 0x05503f38);
+	}
+	/*10000710	GEPHY_CTRL0[9:6] = 0 */
+	gpio_base_virt = ioremap(GPIO_GO_BASE, 0x10);
+	reg_value = sys_reg_read(gpio_base_virt);
+	/*reg_value = reg_value & ~(0xfffff3cf);*/
+	reg_value = 0x10000820;
+	sys_reg_write(gpio_base_virt, reg_value);
+	iounmap(gpio_base_virt);
+	iounmap(infra_base_virt);
+}
+
+void fe_sw_init(void)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	unsigned int reg_value = 0;
+	void __iomem *gpio_base_virt, *infra_base_virt, *ethsys_base_virt;
+	//int i;
+	//u16 r0_tmp;
+
+	/* Case1: MT7623/MT7622 GE1 + GigaPhy */
+	if (ei_local->architecture & GE1_RGMII_AN) {
+		//enable_auto_negotiate(ei_local);
+		if (is_marvell_gigaphy(1)) {
+			if (ei_local->features & FE_FPGA_MODE) {
+				mii_mgr_read(mac_to_gigaphy_mode_addr, 9,
+					     &reg_value);
+				/* turn off 1000Base-T Advertisement
+				 * (9.9=1000Full, 9.8=1000Half)
+				 */
+				reg_value &= ~(3 << 8);
+				mii_mgr_write(mac_to_gigaphy_mode_addr,
+					      9, reg_value);
+
+				/*10Mbps, debug */
+				mii_mgr_write(mac_to_gigaphy_mode_addr,
+					      4, 0x461);
+
+				mii_mgr_read(mac_to_gigaphy_mode_addr, 0,
+					     &reg_value);
+				reg_value |= 1 << 9;	/* restart AN */
+				mii_mgr_write(mac_to_gigaphy_mode_addr,
+					      0, reg_value);
+			}
+		}
+		if (is_vtss_gigaphy(1)) {
+			mii_mgr_write(mac_to_gigaphy_mode_addr, 31, 1);
+			mii_mgr_read(mac_to_gigaphy_mode_addr, 28,
+				     &reg_value);
+			pr_info("Vitesse phy skew: %x --> ", reg_value);
+			reg_value |= (0x3 << 12);
+			reg_value &= ~(0x3 << 14);
+			pr_info("%x\n", reg_value);
+			mii_mgr_write(mac_to_gigaphy_mode_addr, 28,
+				      reg_value);
+			mii_mgr_write(mac_to_gigaphy_mode_addr, 31, 0);
+		}
+	}
+
+	/* Case2: RT3883/MT7621 GE2 + GigaPhy */
+	if (ei_local->architecture & GE2_RGMII_AN) {
+#if(0)
+		leopard_gmii_config(0);
+		enable_auto_negotiate(ei_local);
+		set_ge2_an();
+		set_ge2_gmii();
+		if (ei_local->chip_name == LEOPARD_FE) {
+			for (i = 1; i < 5; i++)
+				do_fe_phy_all_analog_cal(i);
+
+			do_ge_phy_all_analog_cal(0);
+		}
+#endif
+		if (is_marvell_gigaphy(2)) {
+			mii_mgr_read(mac_to_gigaphy_mode_addr2, 9,
+				     &reg_value);
+			/* turn off 1000Base-T Advertisement
+			 * (9.9=1000Full, 9.8=1000Half)
+			 */
+			reg_value &= ~(3 << 8);
+			mii_mgr_write(mac_to_gigaphy_mode_addr2, 9,
+				      reg_value);
+
+			mii_mgr_read(mac_to_gigaphy_mode_addr2, 20,
+				     &reg_value);
+			/* Add delay to RX_CLK for RXD Outputs */
+			reg_value |= 1 << 7;
+			mii_mgr_write(mac_to_gigaphy_mode_addr2, 20,
+				      reg_value);
+
+			mii_mgr_read(mac_to_gigaphy_mode_addr2, 0,
+				     &reg_value);
+			reg_value |= 1 << 15;	/* PHY Software Reset */
+			mii_mgr_write(mac_to_gigaphy_mode_addr2, 0,
+				      reg_value);
+			if (ei_local->features & FE_FPGA_MODE) {
+				mii_mgr_read(mac_to_gigaphy_mode_addr2,
+					     9, &reg_value);
+				/* turn off 1000Base-T Advertisement
+				 * (9.9=1000Full, 9.8=1000Half)
+				 */
+				reg_value &= ~(3 << 8);
+				mii_mgr_write(mac_to_gigaphy_mode_addr2,
+					      9, reg_value);
+
+				/*10Mbps, debug */
+				mii_mgr_write(mac_to_gigaphy_mode_addr2,
+					      4, 0x461);
+
+				mii_mgr_read(mac_to_gigaphy_mode_addr2,
+					     0, &reg_value);
+				reg_value |= 1 << 9;	/* restart AN */
+				mii_mgr_write(mac_to_gigaphy_mode_addr2,
+					      0, reg_value);
+			}
+		}
+		if (is_vtss_gigaphy(2)) {
+			mii_mgr_write(mac_to_gigaphy_mode_addr2, 31, 1);
+			mii_mgr_read(mac_to_gigaphy_mode_addr2, 28,
+				     &reg_value);
+			pr_info("Vitesse phy skew: %x --> ", reg_value);
+			reg_value |= (0x3 << 12);
+			reg_value &= ~(0x3 << 14);
+			pr_info("%x\n", reg_value);
+			mii_mgr_write(mac_to_gigaphy_mode_addr2, 28,
+				      reg_value);
+			mii_mgr_write(mac_to_gigaphy_mode_addr2, 31, 0);
+		}
+	}
+
+	/* Case3:  MT7623 GE1 + Internal GigaSW */
+	if (ei_local->architecture &
+	    (GE1_RGMII_FORCE_1000 | GE1_TRGMII_FORCE_2000 |
+	     GE1_TRGMII_FORCE_2600)) {
+		if ((ei_local->chip_name == MT7623_FE) ||
+		    (ei_local->chip_name == MT7621_FE))
+			setup_internal_gsw();
+		/* TODO
+		 * else if (ei_local->features & FE_FPGA_MODE)
+		 * setup_fpga_gsw();
+		 * else
+		 * sys_reg_write(MDIO_CFG, INIT_VALUE_OF_FORCE_1000_FD);
+		 */
+	}
+
+	/* Case4: MT7623 GE2 + GigaSW */
+	if (ei_local->architecture & GE2_RGMII_FORCE_1000) {
+		set_ge2_force_1000();
+		if (ei_local->chip_name == MT7623_FE)
+			setup_external_gsw();
+	}
+	/*TODO
+	 * else
+	 * sys_reg_write(MDIO_CFG2, INIT_VALUE_OF_FORCE_1000_FD);
+	 */
+
+	/* Case5: MT7622 embedded switch */
+	if (ei_local->architecture & RAETH_ESW) {
+		reg_value = sys_reg_read(ETHDMASYS_ETH_MAC_BASE + 0xC);
+		reg_value = reg_value | 0x1;
+		sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0xC, reg_value);
+
+		if (ei_local->architecture & MT7622_EPHY) {
+			gpio_base_virt = ioremap(GPIO_GO_BASE, 0x100);
+			sys_reg_write(gpio_base_virt + 0xF0, 0xE0FFFFFF);
+			iounmap(gpio_base_virt);
+			gpio_base_virt = ioremap(GPIO_MODE_BASE, 0x100);
+			reg_value = sys_reg_read(gpio_base_virt + 0x90);
+			reg_value &= 0x0000ffff;
+			reg_value |= 0x22220000;
+			sys_reg_write(gpio_base_virt + 0x90, reg_value);
+			iounmap(gpio_base_virt);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x84, 0);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x90, 0x10007f7f);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0xc8, 0x05503f38);
+		} else if (ei_local->architecture & LEOPARD_EPHY) {
+			set_ge1_an();
+			/*port0 force link down*/
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x84, 0x8000000);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x8c, 0x02404040);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x98, 0x00007f7f);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x04, 0xfbffffff);
+			sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x9c, 0x0008a041);
+#if(0)
+			if (ei_local->architecture & LEOPARD_EPHY_GMII) {
+				leopard_gmii_config(1);
+				set_ge0_gmii();
+			} else {
+				leopard_gmii_config(0);
+			}
+#endif
+		}
+	}
+
+	/* clear SGMII setting */
+	if ((ei_local->chip_name == LEOPARD_FE) || (ei_local->chip_name == MT7622_FE)) {
+		ethsys_base_virt = ioremap(ETHSYS_BASE, 0x20);
+		reg_value = sys_reg_read(ethsys_base_virt + 0x14);
+		reg_value &= ~(3 << 8);
+		sys_reg_write(ethsys_base_virt + 0x14, reg_value);
+	}
+
+	if (ei_local->architecture & GE1_SGMII_FORCE_2500)
+		set_sgmii_force_link(1, 1);
+	else if (ei_local->architecture & GE1_SGMII_AN) {
+		enable_auto_negotiate(ei_local);
+		set_sgmii_an(1);
+	}
+	if (ei_local->chip_name == LEOPARD_FE) {
+		if (ei_local->architecture & GE2_RAETH_SGMII) {
+			/*bit[1]: gphy connect GMAC0 or GMAC2 1:GMAC0. 0:GMAC2*/
+			/*bit[0]: Co-QPHY path selection 0:U3path, 1:SGMII*/
+			infra_base_virt = ioremap(INFRA_BASE, 0x10);
+			reg_value = sys_reg_read(infra_base_virt);
+			reg_value = reg_value | 0x01;
+			sys_reg_write(infra_base_virt, reg_value);
+			iounmap(infra_base_virt);
+		}
+	}
+
+	if (ei_local->architecture & GE2_SGMII_FORCE_2500)
+		set_sgmii_force_link(2, 1);
+	else if (ei_local->architecture & GE2_SGMII_AN) {
+		enable_auto_negotiate(ei_local);
+		set_sgmii_an(2);
+	}
+
+	if (ei_local->architecture & MT7622_EPHY) {
+		//mt7622_ephy_cal();
+	} else if (ei_local->architecture & LEOPARD_EPHY) {
+#if(0)
+		leopard_ephy_cal();
+		tc_phy_write_l_reg(2, 1, 18, 0x21f);
+		tc_phy_write_l_reg(2, 1, 18, 0x22f);
+		tc_phy_write_l_reg(2, 1, 18, 0x23f);
+		tc_phy_write_l_reg(2, 1, 18, 0x24f);
+		tc_phy_write_l_reg(2, 1, 18, 0x4f);
+		tc_phy_write_l_reg(4, 1, 18, 0x21f);
+		tc_phy_write_l_reg(4, 1, 18, 0x22f);
+		tc_phy_write_l_reg(4, 1, 18, 0x2f);
+		r0_tmp = tc_phy_read_l_reg(3, 0, 0);
+		r0_tmp = r0_tmp | 0x200;
+		tc_phy_write_l_reg(3, 0, 0, r0_tmp);
+#endif
+	}
+
+	if (ei_local->chip_name == MT7621_FE) {
+		clk_prepare_enable(ei_local->clks[MTK_CLK_GP0]);
+
+		/* switch to esw */
+		sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0xC, 0x1);
+
+		/* set agpio to 5-port ephy */
+		gpio_base_virt = ioremap(GPIO_GO_BASE, 0x100);
+		reg_value = sys_reg_read(gpio_base_virt + 0xF0);
+		reg_value &= 0xE0FFFFFF;
+		sys_reg_write(gpio_base_virt + 0xF0, reg_value);
+		iounmap(gpio_base_virt);
+
+		/* set ephy to 5-port gpio mode */
+		mt7622_esw_5port_gpio();
+
+		/* set agpio to 0-port ephy */
+		gpio_base_virt = ioremap(GPIO_GO_BASE, 0x100);
+		reg_value = sys_reg_read(gpio_base_virt + 0xF0);
+		reg_value |= BITS(24, 28);
+		sys_reg_write(gpio_base_virt + 0xF0, reg_value);
+		iounmap(gpio_base_virt);
+
+		/* switch back to gmac1 */
+		sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0xC, 0x0);
+
+		clk_disable_unprepare(ei_local->clks[MTK_CLK_GP0]);
+	}
+
+	if (ei_local->chip_name == MT7622_FE) {
+		if (ei_local->features & FE_GE2_SUPPORT) {
+			gpio_base_virt = ioremap(GPIO_GO_BASE + 0x100, 0x100);
+			reg_value = sys_reg_read(gpio_base_virt + 0x70);
+			reg_value = reg_value | (1 << 30);
+			sys_reg_write(gpio_base_virt + 0x70, reg_value);
+			reg_value = sys_reg_read(gpio_base_virt + 0x8c);
+			reg_value = reg_value | (1 << 24);
+			sys_reg_write(gpio_base_virt + 0x8c, reg_value);
+			iounmap(gpio_base_virt);
+		}
+	}
+}
+
+void fe_sw_deinit(struct END_DEVICE *ei_local)
+{
+	struct device_node *np = ei_local->switch_np;
+	struct platform_device *pdev = of_find_device_by_node(np);
+	void __iomem *gpio_base_virt;
+	unsigned int reg_value;
+	struct mtk_gsw *gsw;
+	int ret;
+
+	gsw = platform_get_drvdata(pdev);
+	if (!gsw)
+		return;
+
+	ret = regulator_disable(gsw->supply);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to disable mt7530 power: %d\n", ret);
+
+	if (gsw->mcm) {
+		ret = regulator_disable(gsw->b3v);
+		if (ret)
+			dev_err(&pdev->dev, "Failed to disable b3v: %d\n", ret);
+	}
+
+	if (ei_local->architecture & MT7622_EPHY) {
+		/* set ephy to 5-port gpio mode */
+		mt7622_esw_5port_gpio();
+
+		/* set agpio to 0-port ephy */
+		gpio_base_virt = ioremap(GPIO_GO_BASE, 0x100);
+		reg_value = sys_reg_read(gpio_base_virt + 0xF0);
+		reg_value |= BITS(24, 28);
+		sys_reg_write(gpio_base_virt + 0xF0, reg_value);
+		iounmap(gpio_base_virt);
+	} else if (ei_local->architecture & LEOPARD_EPHY) {
+		mt7622_esw_5port_gpio();
+		/*10000710	GEPHY_CTRL0[9:6] = 1 */
+		gpio_base_virt = ioremap(GPIO_GO_BASE, 0x10);
+		reg_value = sys_reg_read(gpio_base_virt);
+		reg_value = reg_value | 0x3c0;
+		sys_reg_write(gpio_base_virt, reg_value);
+		iounmap(gpio_base_virt);
+
+		gpio_base_virt = ioremap(GPIO_MODE_BASE, 0x100);
+		/*10217310	GPIO_MODE1 [31:16] = 0x0*/
+		reg_value = sys_reg_read(gpio_base_virt + 0x10);
+		reg_value &= 0x0000ffff;
+		reg_value = reg_value & (~0xffff0000);
+		sys_reg_write(gpio_base_virt + 0x10, reg_value);
+
+		/*10217320	GPIO_MODE2(gpio17/18/21/22/23)*/
+		reg_value = sys_reg_read(gpio_base_virt + 0x20);
+		reg_value = reg_value & (~0xfff00fff);
+		sys_reg_write(gpio_base_virt + 0x20, reg_value);
+		iounmap(gpio_base_virt);
+	}
+}
+
+static void esw_link_status_changed(int port_no, void *dev_id)
+{
+	unsigned int reg_val;
+
+	mii_mgr_read(31, (0x3008 + (port_no * 0x100)), &reg_val);
+	if (reg_val & 0x1)
+		pr_info("ESW: Link Status Changed - Port%d Link UP\n", port_no);
+	else
+		pr_info("ESW: Link Status Changed - Port%d Link Down\n",
+			port_no);
+}
+
+irqreturn_t gsw_interrupt(int irq, void *resv)
+{
+	unsigned long flags;
+	unsigned int reg_int_val;
+	struct net_device *dev = dev_raether;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	void *dev_id = NULL;
+
+	spin_lock_irqsave(&ei_local->page_lock, flags);
+	mii_mgr_read(31, 0x700c, &reg_int_val);
+
+	if (reg_int_val & P4_LINK_CH)
+		esw_link_status_changed(4, dev_id);
+
+	if (reg_int_val & P3_LINK_CH)
+		esw_link_status_changed(3, dev_id);
+	if (reg_int_val & P2_LINK_CH)
+		esw_link_status_changed(2, dev_id);
+	if (reg_int_val & P1_LINK_CH)
+		esw_link_status_changed(1, dev_id);
+	if (reg_int_val & P0_LINK_CH)
+		esw_link_status_changed(0, dev_id);
+
+	mii_mgr_write(31, 0x700c, 0x1f);	/* ack switch link change */
+	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+u32 phy_tr_dbg(u8 phyaddr, char *type, u32 data_addr, u8 ch_num)
+{
+	u16 page_reg = 31;
+	u32 token_ring_debug_reg = 0x52B5;
+	u32 token_ring_control_reg = 0x10;
+	u32 token_ring_low_data_reg = 0x11;
+	u32 token_ring_high_data_reg = 0x12;
+	u16 ch_addr = 0;
+	u32 node_addr = 0;
+	u32 value = 0;
+	u32 value_high = 0;
+	u32 value_low = 0;
+
+	if (strncmp(type, "DSPF", 4) == 0) {
+		/* DSP Filter Debug Node*/
+		ch_addr = 0x02;
+		node_addr = 0x0D;
+	} else if (strncmp(type, "PMA", 3) == 0) {
+		/*PMA Debug Node*/
+		ch_addr = 0x01;
+		node_addr = 0x0F;
+	} else if (strncmp(type, "TR", 2) == 0) {
+		/* Timing Recovery  Debug Node */
+		ch_addr = 0x01;
+		node_addr = 0x0D;
+	} else if (strncmp(type, "PCS", 3) == 0) {
+		/* R1000PCS Debug Node */
+		ch_addr = 0x02;
+		node_addr = 0x0F;
+	} else if (strncmp(type, "FFE", 3) == 0) {
+		/* FFE Debug Node */
+		ch_addr = ch_num;
+		node_addr = 0x04;
+	} else if (strncmp(type, "EC", 2) == 0) {
+		/* ECC Debug Node */
+		ch_addr = ch_num;
+		node_addr = 0x00;
+	} else if (strncmp(type, "ECT", 3) == 0) {
+		/* EC/Tail Debug Node */
+		ch_addr = ch_num;
+		node_addr = 0x01;
+	} else if (strncmp(type, "NC", 2) == 0) {
+		/* EC/NC Debug Node */
+		ch_addr = ch_num;
+		node_addr = 0x01;
+	} else if (strncmp(type, "DFEDC", 5) == 0) {
+		/* DFETail/DC Debug Node */
+		ch_addr = ch_num;
+		node_addr = 0x05;
+	} else if (strncmp(type, "DEC", 3) == 0) {
+		/* R1000DEC Debug Node */
+		ch_addr = 0x00;
+		node_addr = 0x07;
+	} else if (strncmp(type, "CRC", 3) == 0) {
+		/* R1000CRC Debug Node */
+		ch_addr = ch_num;
+		node_addr = 0x06;
+	} else if (strncmp(type, "AN", 2) == 0) {
+		/* Autoneg Debug Node */
+		ch_addr = 0x00;
+		node_addr = 0x0F;
+	} else if (strncmp(type, "CMI", 3) == 0) {
+		/* CMI Debug Node */
+		ch_addr = 0x03;
+		node_addr = 0x0F;
+	} else if (strncmp(type, "SUPV", 4) == 0) {
+		/* SUPV PHY  Debug Node */
+		ch_addr = 0x00;
+		node_addr = 0x0D;
+	} else {
+		pr_info("Wrong TR register Type !");
+		return 0xFFFF;
+	}
+	data_addr = data_addr & 0x3F;
+
+	tc_mii_write(phyaddr, page_reg, token_ring_debug_reg);
+	tc_mii_write(phyaddr, token_ring_control_reg,
+		     (1 << 15) | (1 << 13) | (ch_addr << 11) | (node_addr << 7) | (data_addr << 1));
+
+	value_low = tc_mii_read(phyaddr, token_ring_low_data_reg);
+	value_high = tc_mii_read(phyaddr, token_ring_high_data_reg);
+	value = value_low + ((value_high & 0x00FF) << 16);
+	pr_info("*%s => Phyaddr=%d, ch_addr=%d, node_addr=0x%X, data_addr=0x%X , value=0x%X\r\n",
+		type, phyaddr, ch_addr, node_addr, data_addr, value);
+	tc_mii_write(phyaddr, page_reg, 0x00);/* V1.11 */
+
+	return value;
+}
+
+void esw_show_debug_log(u32 phy_addr)
+{
+	u32 val;
+
+	val = phy_tr_dbg(phy_addr, "PMA", 0x38, 0);
+	pr_info("VgaStateA =0x%x\n", ((val >> 4) & 0x1F));
+	pr_info("VgaStateB =0x%x\n", ((val >> 9) & 0x1F));
+	pr_info("VgaStateC =0x%x\n", ((val >> 14) & 0x1F));
+	pr_info("VgaStateD =0x%x\n", ((val >> 19) & 0x1F));
+
+	/* pairA */
+	val = tc_phy_read_dev_reg(phy_addr, 0x1E, 0x9B);
+	pr_info("XX0 0x1E,0x9B =0x%x\n", val);
+	val = (val >> 8) & 0xFF;
+	pr_info("AA0 lch_mse_mdcA =0x%x\r\n", val);
+
+	/* Pair B */
+	val = tc_phy_read_dev_reg(phy_addr, 0x1E, 0x9B);
+	pr_info("XX1 0x1E,0x9B =0x%x\n", val);
+	val = (val) & 0xFF;	/* V1.16 */
+	pr_info("AA1 lch_mse_mdcB =0x%x\r\n", val);
+	/* Pair C */
+	val = tc_phy_read_dev_reg(phy_addr, 0x1E, 0x9C);
+	pr_info("XX2 0x1E,0x9C =0x%x\n", val);
+	val = (val >> 8) & 0xFF;
+	pr_info("AA2 lch_mse_mdcC =0x%x\r\n", val);
+
+	/* Pair D */
+	val = tc_phy_read_dev_reg(phy_addr, 0x1E, 0x9C);
+	pr_info("XX3 0x1E,0x9C =0x%x\n", val);
+	val = (val) & 0xFF;	/* V1.16 */
+	pr_info("AA3 lch_mse_mdcD =0x%x\r\n", val);
+}
+
+irqreturn_t esw_interrupt(int irq, void *resv)
+{
+	unsigned long flags;
+	u32 phy_val;
+	int i;
+	static unsigned int port_status[5] = {0, 0, 0, 0, 0};
+	struct net_device *dev = dev_raether;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	spin_lock_irqsave(&ei_local->page_lock, flags);
+	/* disable irq mask and ack irq status */
+	sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x4, 0xffffffff);
+	sys_reg_write(ETHDMASYS_ETH_SW_BASE, 0x04000000);
+	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+	for (i = 0; i < 5; i++) {
+		mii_mgr_read(i, 1, &phy_val);
+		if (port_status[i] != ((phy_val & 0x4) >> 2)) {
+			if (port_status[i] == 0) {
+				port_status[i] = 1;
+				pr_info("ESW: Link Status Changed - Port%d Link Up\n", i);
+			} else {
+				port_status[i] = 0;
+				pr_info("ESW: Link Status Changed - Port%d Link Down\n", i);
+			}
+			if (ei_local->architecture & LEOPARD_EPHY) {
+				if (i == 0)
+					esw_show_debug_log(i);/*port0 giga port*/
+			}
+		}
+	}
+	/* enable irq mask */
+	sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x4, 0xfbffffff);
+	return IRQ_HANDLED;
+}
+
+int ephy_ioctl(struct net_device *dev, struct ifreq *ifr,
+	       struct ephy_ioctl_data *ioctl_data)
+{
+	int ret = 0;
+	unsigned int cmd;
+	u8 cnt = 0;
+	u8 port_num = 0;
+
+	cmd = ioctl_data->cmd;
+	pr_info("%s : cmd =%x\n", __func__, cmd);
+	switch (cmd) {
+	case RAETH_VBG_IEXT_CALIBRATION:
+		cnt = 0;
+		fe_cal_vbg_flag = 0; /*restart calibration*/
+		for (port_num = 0; port_num < 5; port_num++) {
+			while ((fe_cal_vbg_flag == 0) && (cnt < 0x3)) {
+				fe_cal_vbg(port_num, 1);
+				cnt++;
+				if (fe_cal_vbg_flag == 0)
+					pr_info(" VBG wait! (%d)\n", cnt);
+			}
+		}
+		break;
+
+	case RAETH_TXG_R50_CALIBRATION:
+		cnt = 0;
+		fe_cal_r50_flag = 0;
+		for (port_num = 0; port_num < 5; port_num++) {
+			while ((fe_cal_r50_flag == 0) && (cnt < 0x3)) {
+				fe_cal_r50(port_num, 1);
+				cnt++;
+				if (fe_cal_r50_flag == 0)
+					pr_info(" FE R50 wait! (%d)\n", cnt);
+			}
+		}
+		break;
+
+	case RAETH_TXG_OFFSET_CALIBRATION:
+		for (port_num = 0; port_num < 5; port_num++) {
+			cnt = 0;
+			fe_cal_tx_offset_flag = 0;
+			while ((fe_cal_tx_offset_flag == 0) && (cnt < 0x3)) {
+				fe_cal_tx_offset(port_num, 100);
+				cnt++;
+				if (fe_cal_tx_offset_flag == 0)
+					pr_info("FeTxOffsetAnaCal wait!(%d)\n",
+						cnt);
+			}
+			cnt = 0;
+			fe_cal_tx_offset_flag_mdix = 0;
+			while ((fe_cal_tx_offset_flag_mdix == 0) && (cnt < 0x3)) {
+				fe_cal_tx_offset_mdix(port_num, 100);
+				cnt++;
+				if (fe_cal_tx_offset_flag_mdix == 0)
+					pr_info
+					    ("FeTxOffsetAnaCal mdix wait!(%d)\n",
+					     cnt);
+			}
+		}
+		break;
+
+	case RAETH_TXG_AMP_CALIBRATION:
+		for (port_num = 0; port_num < 5; port_num++) {
+			cnt = 0;
+			fe_cal_flag = 0;
+			while ((fe_cal_flag == 0) && (cnt < 0x3)) {
+				fe_cal_tx_amp(port_num, 300);
+				cnt++;
+				if (fe_cal_flag == 0)
+					pr_info("FETxAmpAnaCal wait!(%d)\n",
+						cnt);
+			}
+			cnt = 0;
+			fe_cal_flag_mdix = 0;
+			while ((fe_cal_flag_mdix == 0) && (cnt < 0x3)) {
+				fe_cal_tx_amp_mdix(port_num, 300);
+				cnt++;
+				if (fe_cal_flag_mdix == 0)
+					pr_info
+					    ("FETxAmpAnaCal mdix wait!(%d)\n",
+					     cnt);
+			}
+		}
+		break;
+
+	case GE_TXG_R50_CALIBRATION:
+		cnt = 0;
+		ge_cal_r50_flag = 0;
+		while ((ge_cal_r50_flag == 0) && (cnt < 0x3)) {
+			ge_cal_r50(0, 20);
+			cnt++;
+			if (ge_cal_r50_flag == 0)
+				pr_info(" GE R50 wait! (%d)\n", cnt);
+		}
+		break;
+
+	case GE_TXG_OFFSET_CALIBRATION:
+		cnt = 0;
+		ge_cal_tx_offset_flag = 0;
+		while ((ge_cal_tx_offset_flag == 0) && (cnt < 0x3)) {
+			ge_cal_tx_offset(port_num, 20);
+			cnt++;
+			if (ge_cal_tx_offset_flag == 0)
+				pr_info("GeTxOffsetAnaCal wait!(%d)\n",
+					cnt);
+		}
+		break;
+
+	case GE_TXG_AMP_CALIBRATION:
+		cnt = 0;
+		ge_cal_flag = 0;
+		while ((ge_cal_flag == 0) && (cnt < 0x3)) {
+			ge_cal_tx_amp(port_num, 20);
+			cnt++;
+			if (ge_cal_flag == 0)
+				pr_info("GETxAmpAnaCal wait!(%d)\n",
+					cnt);
+		}
+		break;
+	default:
+		ret = 1;
+		break;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id mediatek_gsw_match[] = {
+	{.compatible = "mediatek,mt7623-gsw"},
+	{.compatible = "mediatek,mt7621-gsw"},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
+
+static int mtk_gsw_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *pctl;
+	struct mtk_gsw *gsw;
+	int err;
+	const char *pm;
+
+	gsw = devm_kzalloc(&pdev->dev, sizeof(struct mtk_gsw), GFP_KERNEL);
+	if (!gsw)
+		return -ENOMEM;
+
+	gsw->dev = &pdev->dev;
+	gsw->trgmii_force = 2000;
+	gsw->irq = irq_of_parse_and_map(np, 0);
+	if (gsw->irq < 0)
+		return -EINVAL;
+
+	err = of_property_read_string(pdev->dev.of_node, "mcm", &pm);
+	if (!err && !strcasecmp(pm, "enable")) {
+		gsw->mcm = true;
+		pr_info("== MT7530 MCM ==\n");
+	}
+
+	gsw->ethsys = syscon_regmap_lookup_by_phandle(np, "mediatek,ethsys");
+	if (IS_ERR(gsw->ethsys)) {
+		pr_err("fail at %s %d\n", __func__, __LINE__);
+		return PTR_ERR(gsw->ethsys);
+	}
+
+	if (!gsw->mcm) {
+		gsw->reset_pin = of_get_named_gpio(np, "mediatek,reset-pin", 0);
+		if (gsw->reset_pin < 0) {
+			pr_err("fail at %s %d\n", __func__, __LINE__);
+			return -1;
+		}
+		pr_debug("reset_pin_port= %d\n", gsw->reset_pin);
+
+		pctl = of_parse_phandle(np, "mediatek,pctl-regmap", 0);
+		if (IS_ERR(pctl)) {
+			pr_err("fail at %s %d\n", __func__, __LINE__);
+			return PTR_ERR(pctl);
+		}
+
+		gsw->pctl = syscon_node_to_regmap(pctl);
+		if (IS_ERR(pctl)) {
+			pr_err("fail at %s %d\n", __func__, __LINE__);
+			return PTR_ERR(pctl);
+		}
+
+		gsw->pins = pinctrl_get(&pdev->dev);
+		if (gsw->pins) {
+			gsw->ps_reset =
+			    pinctrl_lookup_state(gsw->pins, "reset");
+
+			if (IS_ERR(gsw->ps_reset)) {
+				dev_err(&pdev->dev,
+					"failed to lookup the gsw_reset state\n");
+				return PTR_ERR(gsw->ps_reset);
+			}
+		} else {
+			dev_err(&pdev->dev, "gsw get pinctrl fail\n");
+			return PTR_ERR(gsw->pins);
+		}
+	}
+
+	gsw->supply = devm_regulator_get(&pdev->dev, "mt7530");
+	if (IS_ERR(gsw->supply)) {
+		pr_info("fail at %s %d\n", __func__, __LINE__);
+		return PTR_ERR(gsw->supply);
+	}
+
+	if (gsw->mcm) {
+		gsw->b3v = devm_regulator_get(&pdev->dev, "b3v");
+		if (IS_ERR(gsw->b3v))
+			return PTR_ERR(gsw->b3v);
+	}
+
+	gsw->wllll = of_property_read_bool(np, "mediatek,wllll");
+
+	platform_set_drvdata(pdev, gsw);
+
+	return 0;
+}
+
+static int mtk_gsw_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver gsw_driver = {
+	.probe = mtk_gsw_probe,
+	.remove = mtk_gsw_remove,
+	.driver = {
+		   .name = "mtk-gsw",
+		   .owner = THIS_MODULE,
+		   .of_match_table = mediatek_gsw_match,
+		   },
+};
+
+module_platform_driver(gsw_driver);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.h
new file mode 100644
index 0000000..7d3a9ee
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.h
@@ -0,0 +1,95 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_SWITCH_H
+#define RA_SWITCH_H
+
+extern struct net_device *dev_raether;
+#define ANACAL_INIT		0x01
+#define ANACAL_ERROR		0xFD
+#define ANACAL_SATURATION	0xFE
+#define	ANACAL_FINISH		0xFF
+#define ANACAL_PAIR_A		0
+#define ANACAL_PAIR_B		1
+#define ANACAL_PAIR_C		2
+#define ANACAL_PAIR_D		3
+#define DAC_IN_0V		0x000
+#define DAC_IN_2V		0x0f0
+#define TX_AMP_OFFSET_0MV	0x20
+#define TX_AMP_OFFSET_VALID_BITS	6
+#define FE_CAL_P0			0
+#define FE_CAL_P1			1
+#if defined(CONFIG_MACH_LEOPARD)
+#define FE_CAL_COMMON			1
+#else
+#define FE_CAL_COMMON			0
+#endif
+
+void fe_sw_init(void);
+void fe_sw_preinit(struct END_DEVICE *ei_local);
+void fe_sw_deinit(struct END_DEVICE *ei_local);
+void sw_ioctl(struct ra_switch_ioctl_data *ioctl_data);
+irqreturn_t esw_interrupt(int irq, void *resv);
+irqreturn_t gsw_interrupt(int irq, void *resv);
+
+/* struct mtk_gsw -	the structure that holds the SoC specific data
+ * @dev:		The Device struct
+ * @base:		The base address
+ * @piac_offset:	The PIAC base may change depending on SoC
+ * @irq:		The IRQ we are using
+ * @port4:		The port4 mode on MT7620
+ * @autopoll:		Is MDIO autopolling enabled
+ * @ethsys:		The ethsys register map
+ * @pctl:		The pin control register map
+ * @clk_trgpll:		The trgmii pll clock
+ */
+struct mtk_gsw {
+	struct mtk_eth		*eth;
+	struct device		*dev;
+	void __iomem		*base;
+	u32			piac_offset;
+	int			irq;
+	int			port4;
+	unsigned long int	autopoll;
+
+	struct regmap		*ethsys;
+	struct regmap		*pctl;
+
+	int			trgmii_force;
+	bool			wllll;
+	bool			mcm;
+	struct pinctrl *pins;
+	struct pinctrl_state *ps_default;
+	struct pinctrl_state *ps_reset;
+	int reset_pin;
+	struct regulator *supply;
+	struct regulator *b3v;
+};
+
+extern u8 fe_cal_flag;
+extern u8 fe_cal_flag_mdix;
+extern u8 fe_cal_tx_offset_flag;
+extern u8 fe_cal_tx_offset_flag_mdix;
+extern u8 fe_cal_r50_flag;
+extern u8 fe_cal_vbg_flag;
+void fe_cal_r50(u8 port_num, u32 delay);
+void fe_cal_tx_amp(u8 port_num, u32 delay);
+void fe_cal_tx_amp_mdix(u8 port_num, u32 delay);
+void fe_cal_tx_offset(u8 port_num, u32 delay);
+void fe_cal_tx_offset_mdix(u8 port_num, u32 delay);
+void fe_cal_vbg(u8 port_num, u32 delay);
+/*giga port calibration*/
+void ge_cal_r50(u8 port_num, u32 delay);
+void ge_cal_tx_amp(u8 port_num, u32 delay);
+void ge_cal_tx_offset(u8 port_num, u32 delay);
+void do_ge_phy_all_analog_cal(u8 phyaddr);
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_config.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_config.h
new file mode 100644
index 0000000..428bbf7
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_config.h
@@ -0,0 +1,329 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RAETH_CONFIG_H
+#define RAETH_CONFIG_H
+
+/* compile flag for features */
+#define DELAY_INT
+
+#define CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+/*#define CONFIG_QDMA_QOS_WEB*/
+#define CONFIG_QDMA_QOS_MARK
+
+#if !defined(CONFIG_SOC_MT7621)
+#define CONFIG_RAETH_NAPI
+#define CONFIG_RAETH_TX_RX_INT_SEPARATION
+#define CONFIG_RAETH_NAPI_TX_RX
+//#define CONFIG_RAETH_NAPI_RX_ONLY
+#endif
+
+#if defined(CONFIG_SOC_MT7621)
+#define CONFIG_GE1_RGMII_FORCE_1000
+#define CONFIG_GE1_RGMII_FORCE_1200
+#define CONFIG_RA_NETWORK_TASKLET_BH
+#endif
+/*CONFIG_RA_NETWORK_TASKLET_BH*/
+/*CONFIG_RA_NETWORK_WORKQUEUE_BH*/
+/*CONFIG_RAETH_SPECIAL_TAG*/
+#define CONFIG_RAETH_CHECKSUM_OFFLOAD
+#if !defined(CONFIG_SOC_MT7621)
+//#define CONFIG_RAETH_HW_LRO
+#endif
+/* #define CONFIG_RAETH_HW_LRO_FORCE */
+/* #define CONFIG_RAETH_HW_LRO_DVT */
+//#define CONFIG_RAETH_HW_VLAN_TX
+/*CONFIG_RAETH_HW_VLAN_RX*/
+#define CONFIG_RAETH_TSO
+/*#define CONFIG_RAETH_ETHTOOL*/
+#define CONFIG_RAETH_QDMA
+/*CONFIG_RAETH_QDMATX_QDMARX*/
+/*CONFIG_HW_SFQ*/
+//#define CONFIG_RAETH_HW_IOCOHERENT
+#define	CONFIG_RAETH_GMAC2
+/*#define CONFIG_RAETH_RSS_4RING*/
+/*#define CONFIG_RAETH_RSS_2RING*/
+/* definitions */
+#ifdef	DELAY_INT
+#define FE_DLY_INT	BIT(0)
+#else
+#define FE_DLY_INT	(0)
+#endif
+#ifdef	CONFIG_RAETH_HW_LRO
+#define FE_HW_LRO	BIT(1)
+#else
+#define FE_HW_LRO	(0)
+#endif
+#ifdef	CONFIG_RAETH_HW_LRO_FORCE
+#define FE_HW_LRO_FPORT	BIT(2)
+#else
+#define FE_HW_LRO_FPORT	(0)
+#endif
+#ifdef	CONFIG_RAETH_LRO
+#define FE_SW_LRO	BIT(3)
+#else
+#define FE_SW_LRO	(0)
+#endif
+#ifdef	CONFIG_RAETH_QDMA
+#define FE_QDMA		BIT(4)
+#else
+#define FE_QDMA		(0)
+#endif
+#ifdef	CONFIG_RAETH_NAPI
+#define FE_INT_NAPI	BIT(5)
+#else
+#define FE_INT_NAPI	(0)
+#endif
+#ifdef	CONFIG_RA_NETWORK_WORKQUEUE_BH
+#define FE_INT_WORKQ	BIT(6)
+#else
+#define FE_INT_WORKQ	(0)
+#endif
+#ifdef	CONFIG_RA_NETWORK_TASKLET_BH
+#define FE_INT_TASKLET	BIT(7)
+#else
+#define FE_INT_TASKLET	(0)
+#endif
+#ifdef	CONFIG_RAETH_TX_RX_INT_SEPARATION
+#define FE_IRQ_SEPARATE	BIT(8)
+#else
+#define FE_IRQ_SEPARATE	(0)
+#endif
+#define FE_GE2_SUPPORT	BIT(9)
+#ifdef	CONFIG_RAETH_ETHTOOL
+#define FE_ETHTOOL	BIT(10)
+#else
+#define FE_ETHTOOL	(0)
+#endif
+#ifdef	CONFIG_RAETH_CHECKSUM_OFFLOAD
+#define FE_CSUM_OFFLOAD	BIT(11)
+#else
+#define FE_CSUM_OFFLOAD	(0)
+#endif
+#ifdef	CONFIG_RAETH_TSO
+#define FE_TSO		BIT(12)
+#else
+#define FE_TSO		(0)
+#endif
+#ifdef	CONFIG_RAETH_TSOV6
+#define FE_TSO_V6	BIT(13)
+#else
+#define FE_TSO_V6	(0)
+#endif
+#ifdef	CONFIG_RAETH_HW_VLAN_TX
+#define FE_HW_VLAN_TX	BIT(14)
+#else
+#define FE_HW_VLAN_TX	(0)
+#endif
+#ifdef	CONFIG_RAETH_HW_VLAN_RX
+#define FE_HW_VLAN_RX	BIT(15)
+#else
+#define FE_HW_VLAN_RX	(0)
+#endif
+#ifdef	CONFIG_RAETH_QDMA
+#define FE_QDMA_TX	BIT(16)
+#else
+#define FE_QDMA_TX	(0)
+#endif
+#ifdef	CONFIG_RAETH_QDMATX_QDMARX
+#define FE_QDMA_RX	BIT(17)
+#else
+#define FE_QDMA_RX	(0)
+#endif
+#ifdef	CONFIG_HW_SFQ
+#define FE_HW_SFQ	BIT(18)
+#else
+#define FE_HW_SFQ	(0)
+#endif
+#define FE_HW_IOCOHERENT BIT(19)
+
+#ifdef	CONFIG_MTK_FPGA
+#define FE_FPGA_MODE	BIT(20)
+#else
+#define FE_FPGA_MODE	(0)
+#endif
+
+#ifdef CONFIG_RAETH_RSS_4RING
+#define FE_RSS_4RING	BIT(20)
+#else
+#define FE_RSS_4RING	(0)
+#endif
+
+#ifdef CONFIG_RAETH_RSS_2RING
+#define FE_RSS_2RING	BIT(2)
+#else
+#define FE_RSS_2RING	(0)
+#endif
+
+#ifdef	CONFIG_RAETH_HW_LRO_REASON_DBG
+#define FE_HW_LRO_DBG	BIT(21)
+#else
+#define FE_HW_LRO_DBG	(0)
+#endif
+#ifdef CONFIG_RAETH_INT_DBG
+#define FE_RAETH_INT_DBG	BIT(22)
+#else
+#define FE_RAETH_INT_DBG	(0)
+#endif
+#ifdef CONFIG_USER_SNMPD
+#define USER_SNMPD	BIT(23)
+#else
+#define USER_SNMPD	(0)
+#endif
+#ifdef CONFIG_TASKLET_WORKQUEUE_SW
+#define TASKLET_WORKQUEUE_SW	BIT(24)
+#else
+#define TASKLET_WORKQUEUE_SW	(0)
+#endif
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+#define FE_HW_NAT	BIT(25)
+#else
+#define FE_HW_NAT	(0)
+#endif
+#ifdef	CONFIG_RAETH_NAPI_TX_RX
+#define FE_INT_NAPI_TX_RX	BIT(26)
+#else
+#define FE_INT_NAPI_TX_RX	(0)
+#endif
+#ifdef	CONFIG_QDMA_MQ
+#define QDMA_MQ       BIT(27)
+#else
+#define QDMA_MQ       (0)
+#endif
+#ifdef	CONFIG_RAETH_NAPI_RX_ONLY
+#define FE_INT_NAPI_RX_ONLY	BIT(28)
+#else
+#define FE_INT_NAPI_RX_ONLY	(0)
+#endif
+#ifdef	CONFIG_QDMA_SUPPORT_QOS
+#define FE_QDMA_FQOS	BIT(29)
+#else
+#define FE_QDMA_FQOS	(0)
+#endif
+
+#ifdef	CONFIG_QDMA_QOS_WEB
+#define QDMA_QOS_WEB	BIT(30)
+#else
+#define QDMA_QOS_WEB	(0)
+#endif
+
+#ifdef	CONFIG_QDMA_QOS_MARK
+#define QDMA_QOS_MARK	BIT(31)
+#else
+#define QDMA_QOS_MARK	(0)
+#endif
+
+#define MT7626_FE	(7626)
+#define MT7623_FE	(7623)
+#define MT7622_FE	(7622)
+#define MT7621_FE	(7621)
+#define LEOPARD_FE		(1985)
+#define MT7986_FE		(1985)
+
+#define GMAC2 BIT(0)
+#define LAN_WAN_SUPPORT BIT(1)
+#define WAN_AT_P0 BIT(2)
+#define WAN_AT_P4 BIT(3)
+#if defined(CONFIG_GE1_RGMII_FORCE_1000)
+#define    GE1_RGMII_FORCE_1000		BIT(4)
+#define    GE1_TRGMII_FORCE_2000	(0)
+#define    GE1_TRGMII_FORCE_2600	(0)
+#define    MT7530_TRGMII_PLL_25M	(0x0A00)
+#define    MT7530_TRGMII_PLL_40M	(0x0640)
+#elif defined(CONFIG_GE1_TRGMII_FORCE_2000)
+#define    GE1_TRGMII_FORCE_2000	BIT(5)
+#define    GE1_RGMII_FORCE_1000		(0)
+#define    GE1_TRGMII_FORCE_2600	(0)
+#define    MT7530_TRGMII_PLL_25M	(0x1400)
+#define    MT7530_TRGMII_PLL_40M	(0x0C80)
+#elif defined(CONFIG_GE1_TRGMII_FORCE_2600)
+#define    GE1_TRGMII_FORCE_2600	BIT(6)
+#define    GE1_RGMII_FORCE_1000		(0)
+#define    GE1_TRGMII_FORCE_2000	(0)
+#define    MT7530_TRGMII_PLL_25M	(0x1A00)
+#define    MT7530_TRGMII_PLL_40M	(0x1040)
+#define    TRGMII
+#else
+#define    GE1_RGMII_FORCE_1000		(0)
+#define    GE1_TRGMII_FORCE_2000	(0)
+#define    GE1_TRGMII_FORCE_2600	(0)
+#define    MT7530_TRGMII_PLL_25M	(0)
+#define    MT7530_TRGMII_PLL_40M	(0)
+#endif
+
+#define    GE1_RGMII_AN    BIT(7)
+#define    GE1_SGMII_AN    BIT(8)
+#define    GE1_SGMII_FORCE_2500    BIT(9)
+#define    GE1_RGMII_ONE_EPHY    BIT(10)
+#define    RAETH_ESW    BIT(11)
+#define    GE1_RGMII_NONE    BIT(12)
+#define    GE2_RGMII_FORCE_1000    BIT(13)
+#define    GE2_RGMII_AN    BIT(14)
+#define    GE2_INTERNAL_GPHY    BIT(15)
+#define    GE2_SGMII_AN    BIT(16)
+#define    GE2_SGMII_FORCE_2500    BIT(17)
+#define    MT7622_EPHY    BIT(18)
+#define    RAETH_SGMII	BIT(19)
+#define    GE2_RAETH_SGMII	BIT(20)
+#define    LEOPARD_EPHY	BIT(21)
+#define    SGMII_SWITCH	BIT(22)
+#define    LEOPARD_EPHY_GMII BIT(23)
+/* /#ifndef CONFIG_MAC_TO_GIGAPHY_MODE_ADDR */
+/* #define CONFIG_MAC_TO_GIGAPHY_MODE_ADDR (0) */
+/* #endif */
+/* #ifndef CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 */
+/* #define CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 (0) */
+/* #endif */
+
+/* macros */
+#define fe_features_config(end_device)	\
+{					\
+end_device->features = 0;		\
+end_device->features |= FE_DLY_INT;	\
+end_device->features |= FE_HW_LRO;	\
+end_device->features |= FE_HW_LRO_FPORT;\
+end_device->features |= FE_HW_LRO_DBG;	\
+end_device->features |= FE_SW_LRO;	\
+end_device->features |= FE_QDMA;	\
+end_device->features |= FE_INT_NAPI;	\
+end_device->features |= FE_INT_WORKQ;	\
+end_device->features |= FE_INT_TASKLET;	\
+end_device->features |= FE_IRQ_SEPARATE;\
+end_device->features |= FE_ETHTOOL;	\
+end_device->features |= FE_CSUM_OFFLOAD;\
+end_device->features |= FE_TSO;		\
+end_device->features |= FE_TSO_V6;	\
+end_device->features |= FE_HW_VLAN_TX;	\
+end_device->features |= FE_HW_VLAN_RX;	\
+end_device->features |= FE_QDMA_TX;	\
+end_device->features |= FE_QDMA_RX;	\
+end_device->features |= FE_HW_SFQ;	\
+end_device->features |= FE_FPGA_MODE;	\
+end_device->features |= FE_HW_NAT;	\
+end_device->features |= FE_INT_NAPI_TX_RX; \
+end_device->features |= FE_INT_NAPI_RX_ONLY; \
+end_device->features |= FE_QDMA_FQOS;	\
+end_device->features |= QDMA_QOS_WEB;	\
+end_device->features |= QDMA_QOS_MARK;	\
+end_device->features |= FE_RSS_4RING;	\
+end_device->features |= FE_RSS_2RING;	\
+}
+
+#define fe_architecture_config(end_device)              \
+{                                                       \
+end_device->architecture = 0;                           \
+end_device->architecture |= GE1_TRGMII_FORCE_2000;      \
+end_device->architecture |= GE1_TRGMII_FORCE_2600;      \
+}
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_reg.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_reg.h
new file mode 100644
index 0000000..df57115
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_reg.h
@@ -0,0 +1,1366 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RAETH_REG_H
+#define RAETH_REG_H
+
+#include <linux/mii.h>		/* for struct mii_if_info in ra2882ethreg.h */
+#include <linux/version.h>	/* check linux version */
+#include <linux/interrupt.h>	/* for "struct tasklet_struct" */
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#include "raether.h"
+
+#define MAX_PACKET_SIZE	1514
+#define	MIN_PACKET_SIZE 60
+#if defined(CONFIG_MACH_MT7623) || defined(CONFIG_SOC_MT7621)
+#define MAX_PTXD_LEN 0x3fff	/* 16k */
+#define MAX_QTXD_LEN 0x3fff	/* 16k */
+#else
+#define MAX_PTXD_LEN 0x3fff	/* 16k */
+#define MAX_QTXD_LEN 0xffff
+#endif
+
+#define phys_to_bus(a) (a)
+
+extern void __iomem *ethdma_sysctl_base;
+extern void __iomem *ethdma_frame_engine_base;
+
+/* bits range: for example BITS(16,23) = 0xFF0000
+ *   ==>  (BIT(m)-1)   = 0x0000FFFF     ~(BIT(m)-1)   => 0xFFFF0000
+ *   ==>  (BIT(n+1)-1) = 0x00FFFFFF
+ */
+#define BITS(m, n)   (~(BIT(m) - 1) & ((BIT(n) - 1) | BIT(n)))
+
+#define ETHER_ADDR_LEN  6
+
+/*  Phy Vender ID list */
+
+#define EV_ICPLUS_PHY_ID0 0x0243
+#define EV_ICPLUS_PHY_ID1 0x0D90
+#define EV_MARVELL_PHY_ID0 0x0141
+#define EV_MARVELL_PHY_ID1 0x0CC2
+#define EV_VTSS_PHY_ID0 0x0007
+#define EV_VTSS_PHY_ID1 0x0421
+
+#define ETHSYS_BASE 0x1b000000
+#define SGMII_CONFIG_0	BIT(9) /*SGMII path enable of GMAC1*/
+#define SGMII_CONFIG_1	BIT(8) /*SGMII path enable of GMAC1*/
+
+#if defined(CONFIG_PINCTRL_MT7622)
+#define SGMII_REG_BASE0	(0x1b128000)
+#define SGMII_REG_PHYA_BASE0	(0x1b12a000)
+#define SGMII_REG_BASE1	(0)
+#define SGMII_REG_PHYA_BASE1	(0)
+#elif defined(CONFIG_MACH_LEOPARD)
+#define SGMII_REG_BASE0	(0x1b128000)
+#define SGMII_REG_PHYA_BASE0	(0x1b128100)
+#define SGMII_REG_BASE1	(0x1b130000)
+#define SGMII_REG_PHYA_BASE1	(0x1b130100)
+#else
+#define SGMII_REG_BASE0	(0)
+#define SGMII_REG_PHYA_BASE0	(0)
+#define SGMII_REG_BASE1	(0)
+#define SGMII_REG_PHYA_BASE1	(0)
+#endif
+#define ETHSYS_MAC_BASE	(0x1b110000)
+
+#if defined(CONFIG_MACH_LEOPARD)
+#define FE_RSTCTL   0x1B000034
+#define INFRA_BASE  0x1000070C
+#define GEPHY_CTRL0 0x10000710
+#define GPIO_GO_BASE GEPHY_CTRL0
+#define GPIO_MODE_BASE 0x10217300
+#else
+#define INFRA_BASE  0
+#define FE_RSTCTL   0
+#define GPIO_GO_BASE 0x10211800
+#define GPIO_MODE_BASE 0x10211300
+#endif
+
+/* ETHDMASYS base address
+ * for I2S/PCM/GDMA/HSDMA/FE/GMAC
+ */
+#define ETHDMASYS_BASE			ethdma_sysctl_base
+#define ETHDMASYS_FRAME_ENGINE_BASE	ethdma_frame_engine_base
+
+#define ETHDMASYS_SYSCTL_BASE            ETHDMASYS_BASE
+#define ETHDMASYS_PPE_BASE		(ETHDMASYS_FRAME_ENGINE_BASE + 0x0C00)
+#define ETHDMASYS_ETH_MAC_BASE		(ETHDMASYS_FRAME_ENGINE_BASE + 0x10000)
+#if defined(CONFIG_MACH_MT7623) || defined(CONFIG_SOC_MT7621)
+#define ETHDMASYS_ETH_SW_BASE       (ETHDMASYS_FRAME_ENGINE_BASE + 0x10000)
+#else
+#define ETHDMASYS_ETH_SW_BASE		(ETHDMASYS_FRAME_ENGINE_BASE + 0x18000)
+#endif
+
+#define RALINK_FRAME_ENGINE_BASE	ETHDMASYS_FRAME_ENGINE_BASE
+#define RALINK_PPE_BASE                 ETHDMASYS_PPE_BASE
+#define RALINK_SYSCTL_BASE		ETHDMASYS_SYSCTL_BASE
+#define RALINK_ETH_MAC_BASE		ETHDMASYS_ETH_MAC_BASE
+#define RALINK_ETH_SW_BASE		ETHDMASYS_ETH_SW_BASE
+
+#define RSTCTL_FE_RST			BIT(6)
+#define RALINK_FE_RST			RSTCTL_FE_RST
+
+#define RSTCTL_ETH_RST			BIT(23)
+#define RALINK_ETH_RST			RSTCTL_ETH_RST
+
+/* FE_INT_STATUS */
+#define RX_COHERENT      BIT(31)
+#define RX_DLY_INT       BIT(30)
+#define TX_COHERENT      BIT(29)
+#define TX_DLY_INT       BIT(28)
+#define RING3_RX_DLY_INT    BIT(27)
+#define RING2_RX_DLY_INT    BIT(26)
+#define RING1_RX_DLY_INT    BIT(25)
+#define RING0_RX_DLY_INT    BIT(30)
+
+#define RSS_RX_INT0	 (RX_DONE_INT0 | RX_DONE_INT1 | \
+			  RING0_RX_DLY_INT | RING1_RX_DLY_INT)
+
+#define RSS_RX_RING0	 (RX_DONE_INT0 | RING0_RX_DLY_INT)
+#define RSS_RX_RING1	 (RX_DONE_INT1 | RING1_RX_DLY_INT)
+#define RSS_RX_RING2	 (RX_DONE_INT2 | RING2_RX_DLY_INT)
+#define RSS_RX_RING3	 (RX_DONE_INT3 | RING3_RX_DLY_INT)
+
+#define RSS_RX_INT1	 (RX_DONE_INT2 | RX_DONE_INT3 | \
+			  RING2_RX_DLY_INT | RING3_RX_DLY_INT)
+
+#define RSS_RX_DLY_INT0	(RING0_RX_DLY_INT | RING1_RX_DLY_INT)
+#define RSS_RX_DLY_INT1	(RING2_RX_DLY_INT | RING3_RX_DLY_INT)
+
+#define RSS_RX_DLY_INT	 (RING0_RX_DLY_INT | RING1_RX_DLY_INT | \
+			  RING2_RX_DLY_INT | RING3_RX_DLY_INT)
+
+#define RXD_ERROR	 BIT(24)
+#define ALT_RPLC_INT3    BIT(23)
+#define ALT_RPLC_INT2    BIT(22)
+#define ALT_RPLC_INT1    BIT(21)
+
+#define RX_DONE_INT3     BIT(19)
+#define RX_DONE_INT2     BIT(18)
+#define RX_DONE_INT1     BIT(17)
+#define RX_DONE_INT0     BIT(16)
+
+#define TX_DONE_INT3     BIT(3)
+#define TX_DONE_INT2     BIT(2)
+#define TX_DONE_INT1     BIT(1)
+#define TX_DONE_INT0     BIT(0)
+
+#define RLS_COHERENT     BIT(29)
+#define RLS_DLY_INT      BIT(28)
+#define RLS_DONE_INT     BIT(0)
+
+#define FE_INT_ALL		(TX_DONE_INT3 | TX_DONE_INT2 | \
+				 TX_DONE_INT1 | TX_DONE_INT0 | \
+				 RX_DONE_INT0 | RX_DONE_INT1 | \
+				 RX_DONE_INT2 | RX_DONE_INT3)
+
+#define QFE_INT_ALL		(RLS_DONE_INT | RX_DONE_INT0 | \
+				 RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3)
+#define QFE_INT_DLY_INIT	(RLS_DLY_INT | RX_DLY_INT)
+#define RX_INT_ALL		(RX_DONE_INT0 | RX_DONE_INT1 | \
+				 RX_DONE_INT2 | RX_DONE_INT3 | \
+				 RING0_RX_DLY_INT | RING1_RX_DLY_INT | \
+				 RING2_RX_DLY_INT | RING3_RX_DLY_INT | RX_DLY_INT)
+#define TX_INT_ALL		(TX_DONE_INT0 | TX_DLY_INT)
+
+#define NUM_QDMA_PAGE	    512
+#define QDMA_PAGE_SIZE      2048
+
+/* SW_INT_STATUS */
+#define ESW_PHY_POLLING		(RALINK_ETH_MAC_BASE + 0x0000)
+#define MAC1_WOL		(RALINK_ETH_SW_BASE + 0x0110)
+#define WOL_INT_CLR		BIT(17)
+#define WOL_INT_EN		BIT(1)
+#define WOL_EN			BIT(0)
+
+#define P5_LINK_CH		BIT(5)
+#define P4_LINK_CH		BIT(4)
+#define P3_LINK_CH		BIT(3)
+#define P2_LINK_CH		BIT(2)
+#define P1_LINK_CH		BIT(1)
+#define P0_LINK_CH		BIT(0)
+
+#define RX_BUF_ALLOC_SIZE	2000
+#define FASTPATH_HEADROOM	64
+
+#define ETHER_BUFFER_ALIGN	32	/* /// Align on a cache line */
+
+#define ETHER_ALIGNED_RX_SKB_ADDR(addr) \
+	((((unsigned long)(addr) + ETHER_BUFFER_ALIGN - 1) & \
+	~(ETHER_BUFFER_ALIGN - 1)) - (unsigned long)(addr))
+
+struct PSEUDO_ADAPTER {
+	struct net_device *raeth_dev;
+	struct net_device *pseudo_dev;
+	struct net_device_stats stat;
+	struct mii_if_info mii_info;
+};
+
+#define MAX_PSEUDO_ENTRY               1
+
+/* Register Categories Definition */
+#if 0
+#define FE_PSE_OFFSET 0x0000
+#define CDMA_OFFSET 0x0400
+#define GDM1_OFFSET 0x0500
+#define ADMA_OFFSET 0x0800
+#define CDMQ_OFFSET 0x1400
+#define GDM2_OFFSET 0x1500
+#define CDM_OFFSET 0x1600
+#define QDMA_OFFSET 0x1800
+#define RSS_OFFSET 0x3000
+#define EDMA0_OFFSET 0x3800 
+#define EDMA1_OFFSET 0x3C00
+#else
+#define FE_PSE_OFFSET 0x0000
+#define CDMA_OFFSET 0x0400
+#define GDM1_OFFSET 0x0500
+#define ADMA_OFFSET 0x4000
+#define CDMQ_OFFSET 0x1400
+#define GDM2_OFFSET 0x1500
+#define CDM_OFFSET 0x1600
+#define QDMA_OFFSET 0x4400
+#define RSS_OFFSET 0x2800
+#define EDMA0_OFFSET 0x3800 
+#define EDMA1_OFFSET 0x3C00
+#endif
+
+/* Register Map Detail */
+/* FE/PSE */
+#define SYSCFG1			            (RALINK_SYSCTL_BASE + 0x14)
+#define CLK_CFG_0		            (RALINK_SYSCTL_BASE + 0x2C)
+#define PAD_RGMII2_MDIO_CFG     (RALINK_SYSCTL_BASE + 0x58)
+#define FE_GLO_CFG		 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x00)
+#define FE_RST_GL		   (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x04)
+#define FE_INT_STATUS2 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x08)
+#define FOE_TS_T	     (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x10)
+#define FE_INT_ENABLE2 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x0c)
+#define FE_INT_GRP		 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x20)
+#define PSE_FQ_CFG     (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x40)
+#define CDMA_FC_CFG    (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x44)
+#define GDMA1_FC_CFG   (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x48)
+#define GDMA2_FC_CFG   (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x4C)
+#define CDMA_OQ_STA    (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x50)
+#define GDMA1_OQ_STA   (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x54)
+#define GDMA2_OQ_STA   (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x58)
+#define PSE_IQ_STA     (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x5C)
+
+#define MAC1_LINK	BIT(24)
+#define MAC2_LINK	BIT(25)
+#define PDMA_FC_CFG	  (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x100)
+#define FE_GLO_MISC		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x124)
+#define PSE_IQ_REV1		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x140)
+#define PSE_IQ_REV2		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x144)
+#define PSE_IQ_REV3		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x148)
+#define PSE_IQ_REV4		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x14C)
+#define PSE_IQ_REV5		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x150)
+#define PSE_IQ_REV6		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x154)
+#define PSE_IQ_REV7		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x158)
+#define PSE_IQ_REV8		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x15C)
+#define PSE_OQ_TH1		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x160)
+#define PSE_OQ_TH2		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x164)
+#define PSE_OQ_TH3		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x168)
+#define PSE_OQ_TH4		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x16C)
+#define PSE_OQ_TH5		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x170)
+#define PSE_OQ_TH6		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x174)
+#define PSE_OQ_TH7		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x178)
+#define PSE_OQ_TH8		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x17C)
+#define	FE_PSE_FREE		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x240)
+#define FE_DROP_FQ		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x244)
+#define FE_DROP_FC		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x248)
+#define FE_DROP_PPE		(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x24c)
+/* GDM1 */
+#define GDMA1_FWD_CFG       (RALINK_FRAME_ENGINE_BASE + GDM1_OFFSET + 0x00)
+#define GDMA1_SHPR_CFG      (RALINK_FRAME_ENGINE_BASE + GDM1_OFFSET + 0x04)
+#define GDMA1_MAC_ADRL      (RALINK_FRAME_ENGINE_BASE + GDM1_OFFSET + 0x08)
+#define GDMA1_MAC_ADRH      (RALINK_FRAME_ENGINE_BASE + GDM1_OFFSET + 0x0C)
+#define GDMA1_SCH_CFG       GDMA1_SHPR_CFG
+/* CDMA */                                        
+#define CDMA_CSG_CFG     (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x000)
+#define CDMP_IG_CTRL     (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x000)
+#define CDMP_EG_CTRL     (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x004)
+#define GDMA_TX_GBCNT0   (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x300)
+#define GDMA_TX_GPCNT0   (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x304)
+#define GDMA_TX_SKIPCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x308)
+#define GDMA_TX_COLCNT0  (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x30C)
+#define GDMA_RX_GBCNT0   (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x320)
+#define GDMA_RX_GPCNT0   (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x324)
+#define GDMA_RX_OERCNT0  (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x328)
+#define GDMA_RX_FERCNT0  (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x32C)
+#define GDMA_RX_SERCNT0  (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x330)
+#define GDMA_RX_LERCNT0  (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x334)
+#define GDMA_RX_CERCNT0  (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x338)
+#define GDMA_RX_FCCNT1   (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x33C)
+/* ADMA */
+#define TX_BASE_PTR0	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x000)
+#define TX_MAX_CNT0	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x004)
+#define TX_CTX_IDX0	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x008)
+#define TX_DTX_IDX0	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x00C)
+#define TX_BASE_PTR1	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x010)
+#define TX_MAX_CNT1	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x014)
+#define TX_CTX_IDX1	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x018)
+#define TX_DTX_IDX1	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x01C)
+#define TX_BASE_PTR2	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x020)
+#define TX_MAX_CNT2	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x024)
+#define TX_CTX_IDX2	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x028)
+#define TX_DTX_IDX2	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x02C)
+#define TX_BASE_PTR3	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x030)
+#define TX_MAX_CNT3	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x034)
+#define TX_CTX_IDX3	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x038)
+#define TX_DTX_IDX3	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x03C)
+#define RX_BASE_PTR0	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x100)
+#define RX_MAX_CNT0	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x104)
+#define RX_CALC_IDX0	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x108)
+#define RX_DRX_IDX0	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x10C)
+#define RX_BASE_PTR1	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x110)
+#define RX_MAX_CNT1	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x114)
+#define RX_CALC_IDX1	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x118)
+#define RX_DRX_IDX1	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x11C)
+#define RX_BASE_PTR2	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x120)
+#define RX_MAX_CNT2	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x124)
+#define RX_CALC_IDX2	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x128)
+#define RX_DRX_IDX2	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x12C)
+#define RX_BASE_PTR3	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x130)
+#define RX_MAX_CNT3	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x134)
+#define RX_CALC_IDX3	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x138)
+#define RX_DRX_IDX3	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x13C)
+#define PDMA_INFO	    (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x200)
+#define PDMA_GLO_CFG	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x204)
+#define PDMA_RST_IDX	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x208)
+#define PDMA_RST_CFG	(PDMA_RST_IDX)
+#define DLY_INT_CFG	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x20C)
+#define FREEQ_THRES	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x210)
+#define INT_STATUS	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x220)
+#define FE_INT_STATUS	(INT_STATUS)
+#define INT_MASK	    (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x228)
+#define FE_INT_ENABLE	(INT_MASK)
+#define SCH_Q01_CFG	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x280)
+#define SCH_Q23_CFG	  (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x284)
+#define PDMA_INT_GRP1	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x250)
+#define PDMA_INT_GRP2	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x254)
+#define PDMA_INT_GRP3	(RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x22c)
+/* GDM2 */
+#define GDMA2_FWD_CFG       (RALINK_FRAME_ENGINE_BASE + GDM2_OFFSET + 0x00)
+#define GDMA2_SHPR_CFG      (RALINK_FRAME_ENGINE_BASE + GDM2_OFFSET + 0x04)
+#define GDMA2_MAC_ADRL      (RALINK_FRAME_ENGINE_BASE + GDM2_OFFSET + 0x08)
+#define GDMA2_MAC_ADRH      (RALINK_FRAME_ENGINE_BASE + GDM2_OFFSET + 0x0C)
+#define GDMA2_SCH_CFG       GDMA2_SHPR_CFG
+/* QDMA */
+#define  QTX_CFG_0          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x000)
+#define  QTX_SCH_0          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x004)
+#define  QTX_HEAD_0         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x008)
+#define  QTX_TAIL_0         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x00C)
+#define  QTX_CFG_1          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x010)
+#define  QTX_SCH_1          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x014)
+#define  QTX_HEAD_1         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x018)
+#define  QTX_TAIL_1         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x01C)
+#define  QTX_CFG_2          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x020)
+#define  QTX_SCH_2          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x024)
+#define  QTX_HEAD_2         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x028)
+#define  QTX_TAIL_2         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02C)
+#define  QTX_CFG_3          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x030)
+#define  QTX_SCH_3          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x034)
+#define  QTX_HEAD_3         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x038)
+#define  QTX_TAIL_3         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x03C)
+#define  QTX_CFG_4          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x040)
+#define  QTX_SCH_4          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x044)
+#define  QTX_HEAD_4         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x048)
+#define  QTX_TAIL_4         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x04C)
+#define  QTX_CFG_5          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x050)
+#define  QTX_SCH_5          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x054)
+#define  QTX_HEAD_5         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x058)
+#define  QTX_TAIL_5         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x05C)
+#define  QTX_CFG_6          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x060)
+#define  QTX_SCH_6          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x064)
+#define  QTX_HEAD_6         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x068)
+#define  QTX_TAIL_6         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x06C)
+#define  QTX_CFG_7          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x070)
+#define  QTX_SCH_7          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x074)
+#define  QTX_HEAD_7         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x078)
+#define  QTX_TAIL_7         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x07C)
+#define  QTX_CFG_8          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x080)
+#define  QTX_SCH_8          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x084)
+#define  QTX_HEAD_8         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x088)
+#define  QTX_TAIL_8         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x08C)
+#define  QTX_CFG_9          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x090)
+#define  QTX_SCH_9          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x094)
+#define  QTX_HEAD_9         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x098)
+#define  QTX_TAIL_9         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x09C)
+#define  QTX_CFG_10         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0A0)
+#define  QTX_SCH_10         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0A4)
+#define  QTX_HEAD_10        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0A8)
+#define  QTX_TAIL_10        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0AC)
+#define  QTX_CFG_11         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0B0)
+#define  QTX_SCH_11         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0B4)
+#define  QTX_HEAD_11        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0B8)
+#define  QTX_TAIL_11        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0BC)
+#define  QTX_CFG_12         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0C0)
+#define  QTX_SCH_12         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0C4)
+#define  QTX_HEAD_12        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0C8)
+#define  QTX_TAIL_12        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0CC)
+#define  QTX_CFG_13         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0D0)
+#define  QTX_SCH_13         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0D4)
+#define  QTX_HEAD_13        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0D8)
+#define  QTX_TAIL_13        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0DC)
+#define  QTX_CFG_14         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0E0)
+#define  QTX_SCH_14         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0E4)
+#define  QTX_HEAD_14        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0E8)
+#define  QTX_TAIL_14        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0EC)
+#define  QTX_CFG_15         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0F0)
+#define  QTX_SCH_15         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0F4)
+#define  QTX_HEAD_15        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0F8)
+#define  QTX_TAIL_15        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0FC)
+#define  QRX_BASE_PTR_0     (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x100)
+#define  QRX_MAX_CNT_0      (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x104)
+#define  QRX_CRX_IDX_0      (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x108)
+#define  QRX_DRX_IDX_0      (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x10C)
+#define  QRX_BASE_PTR_1     (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x110)
+#define  QRX_MAX_CNT_1      (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x114)
+#define  QRX_CRX_IDX_1      (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x118)
+#define  QRX_DRX_IDX_1      (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x11C)
+#define  VQTX_TB_BASE_0     (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x180)
+#define  VQTX_TB_BASE_1     (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x184)
+#define  VQTX_TB_BASE_2     (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x188)
+#define  VQTX_TB_BASE_3     (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x18C)
+#define  QDMA_INFO          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x200)
+#define  QDMA_GLO_CFG       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x204)
+#define  QDMA_RST_IDX       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x208)
+#define  QDMA_RST_CFG       (QDMA_RST_IDX)
+#define  QDMA_DELAY_INT     (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x20C)
+#define  QDMA_FC_THRES      (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x210)
+#define  QDMA_TX_SCH        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x214)
+#define  QDMA_INT_STS       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x218)
+#define  QFE_INT_STATUS		  (QDMA_INT_STS)
+#define  QDMA_INT_MASK      (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x21C)
+#define  QFE_INT_ENABLE		  (QDMA_INT_MASK)
+#define  QDMA_TRTCM         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x220)
+#define  QDMA_DATA0         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x224)
+#define  QDMA_DATA1         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x228)
+#define  QDMA_RED_THRES     (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x22C)
+#define  QDMA_TEST          (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x230)
+#define  QDMA_DMA           (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x234)
+#define  QDMA_BMU           (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x238)
+#define  QDMA_HRED1         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x240)
+#define  QDMA_HRED2         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x244)
+#define  QDMA_SRED1         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x248)
+#define  QDMA_SRED2         (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x24C)
+#define  QTX_MIB_IF         (RALINK_FRAME_ENGINE_BASE + 0x1abc)
+#define  QTX_CTX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x300)
+#define  QTX_DTX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x304)
+#define  QTX_FWD_CNT        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x308)
+#define  QTX_CRX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x310)
+#define  QTX_DRX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x314)
+#define  QTX_RLS_CNT        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x318)
+#define  QDMA_FQ_HEAD       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x320)
+#define  QDMA_FQ_TAIL       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x324)
+#define  QDMA_FQ_CNT        (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x328)
+#define  QDMA_FQ_BLEN       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x32C)
+#define  QTX_Q0MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x350)
+#define  QTX_Q1MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x354)
+#define  QTX_Q2MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x358)
+#define  QTX_Q3MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x35C)
+#define  QTX_Q0MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x360)
+#define  QTX_Q1MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x364)
+#define  QTX_Q2MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x368)
+#define  QTX_Q3MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x36C)
+#define  QDMA_INT_GRP1	    (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x220)
+#define  QDMA_INT_GRP2	    (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x224)
+
+#define DELAY_INT_INIT		0x8f0f8f0f
+#define FE_INT_DLY_INIT		(TX_DLY_INT | RX_DLY_INT)
+#define RSS_INT_DLY_INT_2RING	(RING0_RX_DLY_INT | RING1_RX_DLY_INT)
+#define RSS_INT_DLY_INT		(RING0_RX_DLY_INT | RING1_RX_DLY_INT | \
+				 RING2_RX_DLY_INT | RING3_RX_DLY_INT | TX_DLY_INT)
+
+/* LRO global control */
+/* Bits [15:0]:LRO_ALT_RFSH_TIMER, Bits [20:16]:LRO_ALT_TICK_TIMER */
+#define LRO_ALT_REFRESH_TIMER   (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x001C)
+
+/* LRO auto-learn table info */
+#define PDMA_FE_ALT_CF8		  (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x0300)
+#define PDMA_FE_ALT_SGL_CFC	(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x0304)
+#define PDMA_FE_ALT_SEQ_CFC	(RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x0308)
+
+/* LRO controls */
+#define ADMA_LRO_CTRL_OFFSET    (ADMA_OFFSET + 0x180)
+/*Bit [0]:LRO_EN, Bit [1]:LRO_IPv6_EN, Bit [2]:MULTIPLE_NON_LRO_RX_RING_EN,
+ * Bit [3]:MULTIPLE_RXD_PREFETCH_EN, Bit [4]:RXD_PREFETCH_EN,
+ * Bit [5]:LRO_DLY_INT_EN, Bit [6]:LRO_CRSN_BNW, Bit [7]:L3_CKS_UPD_EN,
+ * Bit [20]:first_ineligible_pkt_redirect_en, Bit [21]:cr_lro_alt_score_mode,
+ * Bit [22]:cr_lro_alt_rplc_mode, Bit [23]:cr_lro_l4_ctrl_psh_en,
+ * Bits [28:26]:LRO_RING_RELINGUISH_REQ, Bits [31:29]:LRO_RING_RELINGUISH_DONE
+ */
+#define ADMA_LRO_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			   ADMA_LRO_CTRL_OFFSET + 0x00)
+/* Bits [31:0]:LRO_CPU_REASON */
+#define ADMA_LRO_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			   ADMA_LRO_CTRL_OFFSET + 0x04)
+/* Bits [31:0]:AUTO_LEARN_LRO_ELIGIBLE_THRESHOLD */
+#define ADMA_LRO_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			   ADMA_LRO_CTRL_OFFSET + 0x08)
+/*Bits [7:0]:LRO_MAX_AGGREGATED_CNT,
+ * Bits [11:8]:LRO_VLAN_EN, Bits [13:12]:LRO_VLAN_VID_CMP_DEPTH,
+ * Bit [14]:ADMA_FW_RSTN_REQ, Bit [15]:ADMA_MODE, Bits [31:16]:LRO_MIN_RXD_SDL0
+ */
+#define ADMA_LRO_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			   ADMA_LRO_CTRL_OFFSET + 0x0C)
+
+/* LRO RX delay interrupt configurations */
+#define LRO_RX1_DLY_INT        (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x0270)
+#define LRO_RX2_DLY_INT        (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x0274)
+#define LRO_RX3_DLY_INT        (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x0278)
+
+/* LRO auto-learn configurations */
+#define PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET    (ADMA_OFFSET + 0x190)
+#define PDMA_LRO_ATL_OVERFLOW_ADJ (RALINK_FRAME_ENGINE_BASE + \
+				   PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET)
+#define LRO_ALT_SCORE_DELTA   (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x024c)
+
+/* LRO agg timer configurations */
+#define LRO_MAX_AGG_TIME       (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET  + 0x025c)
+
+/* LRO configurations of RX ring #0 */
+#define LRO_RXRING0_OFFSET          (ADMA_OFFSET + 0x300)
+#define LRO_RX_RING0_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING0_OFFSET + 0x04)
+#define LRO_RX_RING0_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING0_OFFSET + 0x08)
+#define LRO_RX_RING0_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING0_OFFSET + 0x0C)
+#define LRO_RX_RING0_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING0_OFFSET + 0x10)
+#define LRO_RX_RING0_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING0_OFFSET + 0x28)
+/* Bit [8]:RING0_VLD, Bit [9]:RING0_MYIP_VLD */
+#define LRO_RX_RING0_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING0_OFFSET + 0x2C)
+#define LRO_RX_RING0_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING0_OFFSET + 0x30)
+/* LRO configurations of RX ring #1 */
+#define LRO_RXRING1_OFFSET          (ADMA_OFFSET + 0x340)
+#define LRO_RX_RING1_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE + \
+				 LRO_RXRING1_OFFSET + 0x00)
+#define LRO_RX_RING1_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x04)
+#define LRO_RX_RING1_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x08)
+#define LRO_RX_RING1_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x0C)
+#define LRO_RX_RING1_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x10)
+#define LRO_RX_RING1_SIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x14)
+#define LRO_RX_RING1_SIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x18)
+#define LRO_RX_RING1_SIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x1C)
+#define LRO_RX_RING1_SIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x20)
+#define LRO_RX_RING1_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING1_OFFSET + 0x24)
+#define LRO_RX_RING1_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING1_OFFSET + 0x28)
+#define LRO_RX_RING1_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING1_OFFSET + 0x2C)
+#define LRO_RX_RING1_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING1_OFFSET + 0x30)
+#define LRO_RXRING2_OFFSET          (ADMA_OFFSET + 0x380)
+#define LRO_RX_RING2_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE + \
+				 LRO_RXRING2_OFFSET + 0x00)
+#define LRO_RX_RING2_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x04)
+#define LRO_RX_RING2_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x08)
+#define LRO_RX_RING2_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x0C)
+#define LRO_RX_RING2_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x10)
+#define LRO_RX_RING2_SIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x14)
+#define LRO_RX_RING2_SIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x18)
+#define LRO_RX_RING2_SIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x1C)
+#define LRO_RX_RING2_SIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x20)
+#define LRO_RX_RING2_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING2_OFFSET + 0x24)
+#define LRO_RX_RING2_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING2_OFFSET + 0x28)
+#define LRO_RX_RING2_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING2_OFFSET + 0x2C)
+#define LRO_RX_RING2_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING2_OFFSET + 0x30)
+#define LRO_RXRING3_OFFSET          (ADMA_OFFSET + 0x3C0)
+#define LRO_RX_RING3_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE + \
+				 LRO_RXRING3_OFFSET + 0x00)
+#define LRO_RX_RING3_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x04)
+#define LRO_RX_RING3_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x08)
+#define LRO_RX_RING3_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x0C)
+#define LRO_RX_RING3_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x10)
+#define LRO_RX_RING3_SIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x14)
+#define LRO_RX_RING3_SIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x18)
+#define LRO_RX_RING3_SIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x1C)
+#define LRO_RX_RING3_SIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x20)
+#define LRO_RX_RING3_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING3_OFFSET + 0x24)
+#define LRO_RX_RING3_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING3_OFFSET + 0x28)
+#define LRO_RX_RING3_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING3_OFFSET + 0x2C)
+#define LRO_RX_RING3_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING3_OFFSET + 0x30)
+
+#define ADMA_DBG_OFFSET	(ADMA_OFFSET + 0x230)
+#define ADMA_TX_DBG0	(RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x00)
+#define ADMA_TX_DBG1	(RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x04)
+#define ADMA_RX_DBG0	(RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x08)
+#define ADMA_RX_DBG1	(RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x0C)
+
+/********RSS CR ************/
+#define ADMA_RSS_GLO_CFG	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x00)
+#define ADMA_RSS_INDR_TABLE_DW0	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x50)
+#define ADMA_RSS_INDR_TABLE_DW1	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x54)
+#define ADMA_RSS_INDR_TABLE_DW2	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x58)
+#define ADMA_RSS_INDR_TABLE_DW3	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x5C)
+#define ADMA_RSS_INDR_TABLE_DW4	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x60)
+#define ADMA_RSS_INDR_TABLE_DW5	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x64)
+#define ADMA_RSS_INDR_TABLE_DW6	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x68)
+#define ADMA_RSS_INDR_TABLE_DW7	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x6C)
+
+#define ADMA_RSS_HASH_KEY_DW0	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x20)
+#define ADMA_RSS_HASH_KEY_DW1	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x24)
+#define ADMA_RSS_HASH_KEY_DW2	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x28)
+#define ADMA_RSS_HASH_KEY_DW3	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x2C)
+#define ADMA_RSS_HASH_KEY_DW4	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x30)
+#define ADMA_RSS_HASH_KEY_DW5	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x34)
+#define ADMA_RSS_HASH_KEY_DW6	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x38)
+#define ADMA_RSS_HASH_KEY_DW7	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x3C)
+#define ADMA_RSS_HASH_KEY_DW8	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x40)
+#define ADMA_RSS_HASH_KEY_DW9	(RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x44)
+/* LRO RX ring mode */
+#define PDMA_RX_NORMAL_MODE         (0x0)
+#define PDMA_RX_PSE_MODE            (0x1)
+#define PDMA_RX_FORCE_PORT          (0x2)
+#define PDMA_RX_AUTO_LEARN          (0x3)
+
+#define ADMA_RX_RING0   (0)
+#define ADMA_RX_RING1   (1)
+#define ADMA_RX_RING2   (2)
+#define ADMA_RX_RING3   (3)
+
+#define ADMA_RX_LEN0_MASK   (0x3fff)
+#define ADMA_RX_LEN1_MASK   (0x3)
+
+#define SET_ADMA_RX_LEN0(x)    ((x) & ADMA_RX_LEN0_MASK)
+#define SET_ADMA_RX_LEN1(x)    ((x) & ADMA_RX_LEN1_MASK)
+
+#define QDMA_PAGE	(ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x1F0)
+
+/*SFQ use*/
+#define VQTX_TB_BASE0 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0180)
+#define VQTX_TB_BASE1 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0184)
+#define VQTX_TB_BASE2 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0188)
+#define VQTX_TB_BASE3 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x018C)
+#define VQTX_GLO       (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0280)
+#define VQTX_INVLD_PTR (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x028C)
+#define VQTX_NUM       (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0290)
+#define VQTX_SCH       (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0298)
+#define VQTX_HASH_CFG  (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02A0)
+#define VQTX_HASH_SD   (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02A4)
+#define VQTX_VLD_CFG   (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02B0)
+#define VQTX_MIB_IF    (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02BC)
+#define VQTX_MIB_PCNT  (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02C0)
+#define VQTX_MIB_BCNT0 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02C4)
+#define VQTX_MIB_BCNT1 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02C8)
+#define VQTX_0_BIND_QID	(PQ0 << 0)
+#define VQTX_1_BIND_QID (PQ1 << 8)
+#define VQTX_2_BIND_QID (PQ2 << 16)
+#define VQTX_3_BIND_QID (PQ3 << 24)
+#define VQTX_4_BIND_QID (PQ4 << 0)
+#define VQTX_5_BIND_QID (PQ5 << 8)
+#define VQTX_6_BIND_QID (PQ6 << 16)
+#define VQTX_7_BIND_QID (PQ7 << 24)
+#define VQTX_TB_BASE4 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0190)
+#define VQTX_TB_BASE5 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0194)
+#define VQTX_TB_BASE6 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0198)
+#define VQTX_TB_BASE7 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x019C)
+#define VQTX_0_3_BIND_QID (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0xBC0)
+#define VQTX_4_7_BIND_QID (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0xBC4)
+#define PQ0	0
+#define PQ1	1
+#define PQ2	15
+#define PQ3	16
+#define PQ4	30
+#define PQ5	31
+#define PQ6	43
+#define PQ7	63
+
+#if defined(CONFIG_MACH_MT7623)
+#define VQ_NUM0	256
+#define VQ_NUM1	256
+#define VQ_NUM2	256
+#define VQ_NUM3	256
+#define VQ_NUM4	0
+#define VQ_NUM5	0
+#define VQ_NUM6	0
+#define VQ_NUM7	0
+#define VQTX_NUM_0  (4 << 0)
+#define VQTX_NUM_1  (4 << 4)
+#define VQTX_NUM_2  (4 << 8)
+#define VQTX_NUM_3  (4 << 12)
+#define VQTX_NUM_4   0
+#define VQTX_NUM_5   0
+#define VQTX_NUM_6   0
+#define VQTX_NUM_7   0
+#else
+#define VQ_NUM0	128
+#define VQ_NUM1	128
+#define VQ_NUM2	128
+#define VQ_NUM3	128
+#define VQ_NUM4	128
+#define VQ_NUM5	128
+#define VQ_NUM6	128
+#define VQ_NUM7	128
+#define VQTX_NUM_0  (3 << 0)
+#define VQTX_NUM_1  (3 << 4)
+#define VQTX_NUM_2  (3 << 8)
+#define VQTX_NUM_3  (3 << 12)
+#define VQTX_NUM_4  (3 << 16)
+#define VQTX_NUM_5  (3 << 20)
+#define VQTX_NUM_6  (3 << 24)
+#define VQTX_NUM_7  (3 << 28)
+#endif
+
+#define VQTX_MIB_EN BIT(17)
+
+/*HW IO-COHERNET BASE address*/
+#if defined(CONFIG_MACH_LEOPARD)
+#define HW_IOC_BASE	0x1B000080
+#define IOC_OFFSET	4
+#else
+#define HW_IOC_BASE	0x1B000400
+#define IOC_OFFSET	8
+#endif
+
+/*=========================================
+ *    SFQ Table Format define
+ *=========================================
+ */
+struct SFQ_INFO1_T {
+	unsigned int VQHPTR;
+};
+
+struct SFQ_INFO2_T {
+	unsigned int VQTPTR;
+};
+
+struct SFQ_INFO3_T {
+	unsigned int QUE_DEPTH:16;
+	unsigned int DEFICIT_CNT:16;
+};
+
+struct SFQ_INFO4_T {
+	unsigned int RESV;
+};
+
+struct SFQ_INFO5_T {
+	unsigned int PKT_CNT;
+};
+
+struct SFQ_INFO6_T {
+	unsigned int BYTE_CNT;
+};
+
+struct SFQ_INFO7_T {
+	unsigned int BYTE_CNT;
+};
+
+struct SFQ_INFO8_T {
+	unsigned int RESV;
+};
+
+struct SFQ_table {
+	struct SFQ_INFO1_T sfq_info1;
+	struct SFQ_INFO2_T sfq_info2;
+	struct SFQ_INFO3_T sfq_info3;
+	struct SFQ_INFO4_T sfq_info4;
+	struct SFQ_INFO5_T sfq_info5;
+	struct SFQ_INFO6_T sfq_info6;
+	struct SFQ_INFO7_T sfq_info7;
+	struct SFQ_INFO8_T sfq_info8;
+};
+
+#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
+#define FE_GDM_RXID1_OFFSET	(0x0130)
+#define FE_GDM_RXID1		(RALINK_FRAME_ENGINE_BASE + FE_GDM_RXID1_OFFSET)
+#define GDM_VLAN_PRI7_RXID_SEL	BITS(30, 31)
+#define GDM_VLAN_PRI6_RXID_SEL	BITS(28, 29)
+#define GDM_VLAN_PRI5_RXID_SEL	BITS(26, 27)
+#define GDM_VLAN_PRI4_RXID_SEL	BITS(24, 25)
+#define GDM_VLAN_PRI3_RXID_SEL	BITS(22, 23)
+#define GDM_VLAN_PRI2_RXID_SEL	BITS(20, 21)
+#define GDM_VLAN_PRI1_RXID_SEL	BITS(18, 19)
+#define GDM_VLAN_PRI0_RXID_SEL	BITS(16, 17)
+#define GDM_TCP_ACK_RXID_SEL	BITS(4, 5)
+#define GDM_TCP_ACK_WZPC	BIT(3)
+#define GDM_RXID_PRI_SEL	BITS(0, 2)
+
+#define FE_GDM_RXID2_OFFSET	(0x0134)
+#define FE_GDM_RXID2		(RALINK_FRAME_ENGINE_BASE + FE_GDM_RXID2_OFFSET)
+#define GDM_STAG7_RXID_SEL	BITS(30, 31)
+#define GDM_STAG6_RXID_SEL	BITS(28, 29)
+#define GDM_STAG5_RXID_SEL	BITS(26, 27)
+#define GDM_STAG4_RXID_SEL	BITS(24, 25)
+#define GDM_STAG3_RXID_SEL	BITS(22, 23)
+#define GDM_STAG2_RXID_SEL	BITS(20, 21)
+#define GDM_STAG1_RXID_SEL	BITS(18, 19)
+#define GDM_STAG0_RXID_SEL	BITS(16, 17)
+#define GDM_PID2_RXID_SEL	BITS(2, 3)
+#define GDM_PID1_RXID_SEL	BITS(0, 1)
+
+#define GDM_PRI_PID              (0)
+#define GDM_PRI_VLAN_PID         (1)
+#define GDM_PRI_ACK_PID          (2)
+#define GDM_PRI_VLAN_ACK_PID     (3)
+#define GDM_PRI_ACK_VLAN_PID     (4)
+
+#define SET_GDM_VLAN_PRI_RXID_SEL(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(0x03 << (((x) << 1) + 16));   \
+reg_val |= ((y) & 0x3) << (((x) << 1) + 16);  \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define SET_GDM_TCP_ACK_RXID_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(GDM_TCP_ACK_RXID_SEL);   \
+reg_val |= ((x) & 0x3) << 4;  \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define SET_GDM_TCP_ACK_WZPC(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(GDM_TCP_ACK_WZPC);   \
+reg_val |= ((x) & 0x1) << 3;  \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define SET_GDM_RXID_PRI_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(GDM_RXID_PRI_SEL);   \
+reg_val |= (x) & 0x7;  \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define GDM_STAG_RXID_SEL(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID2); \
+reg_val &= ~(0x03 << (((x) << 1) + 16));   \
+reg_val |= ((y) & 0x3) << (((x) << 1) + 16);  \
+sys_reg_write(FE_GDM_RXID2, reg_val); \
+}
+
+#define SET_GDM_PID2_RXID_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID2); \
+reg_val &= ~(GDM_PID2_RXID_SEL);   \
+reg_val |= ((x) & 0x3) << 2;  \
+sys_reg_write(FE_GDM_RXID2, reg_val); \
+}
+
+#define SET_GDM_PID1_RXID_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID2); \
+reg_val &= ~(GDM_PID1_RXID_SEL);   \
+reg_val |= ((x) & 0x3);  \
+sys_reg_write(FE_GDM_RXID2, reg_val); \
+}
+
+#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
+/* Per Port Packet Counts in RT3052, added by bobtseng 2009.4.17. */
+#define	PORT0_PKCOUNT		(0xb01100e8)
+#define	PORT1_PKCOUNT		(0xb01100ec)
+#define	PORT2_PKCOUNT		(0xb01100f0)
+#define	PORT3_PKCOUNT		(0xb01100f4)
+#define	PORT4_PKCOUNT		(0xb01100f8)
+#define	PORT5_PKCOUNT		(0xb01100fc)
+
+#define sys_reg_read(phys)	 (__raw_readl((void __iomem *)phys))
+#define sys_reg_write(phys, val) (__raw_writel(val, (void __iomem *)phys))
+
+/* ====================================== */
+#define GDM1_DISPAD       BIT(18)
+#define GDM1_DISCRC       BIT(17)
+
+/* GDMA1 uni-cast frames destination port */
+#define GDM1_ICS_EN	   (0x1 << 22)
+#define GDM1_TCS_EN	   (0x1 << 21)
+#define GDM1_UCS_EN	   (0x1 << 20)
+#define GDM1_JMB_EN	   (0x1 << 19)
+#define GDM1_STRPCRC	   (0x1 << 16)
+#define GDM1_UFRC_P_CPU     (0 << 12)
+
+/* GDMA1 broad-cast MAC address frames */
+#define GDM1_BFRC_P_CPU     (0 << 8)
+
+/* GDMA1 multi-cast MAC address frames */
+#define GDM1_MFRC_P_CPU     (0 << 4)
+
+/* GDMA1 other MAC address frames destination port */
+#define GDM1_OFRC_P_CPU     (0 << 0)
+
+/* checksum generator registers are removed */
+#define ICS_GEN_EN          (0 << 2)
+#define UCS_GEN_EN          (0 << 1)
+#define TCS_GEN_EN          (0 << 0)
+
+/* MDIO_CFG     bit */
+#define MDIO_CFG_GP1_FC_TX	BIT(11)
+#define MDIO_CFG_GP1_FC_RX	BIT(10)
+
+/* ====================================== */
+/* ====================================== */
+#define GP1_LNK_DWN     BIT(9)
+#define GP1_AN_FAIL     BIT(8)
+/* ====================================== */
+/* ====================================== */
+#define PSE_RESET       BIT(0)
+/* ====================================== */
+#define PST_DRX_IDX3       BIT(19)
+#define PST_DRX_IDX2       BIT(18)
+#define PST_DRX_IDX1       BIT(17)
+#define PST_DRX_IDX0       BIT(16)
+#define PST_DTX_IDX3       BIT(3)
+#define PST_DTX_IDX2       BIT(2)
+#define PST_DTX_IDX1       BIT(1)
+#define PST_DTX_IDX0       BIT(0)
+
+#define RX_2B_OFFSET	  BIT(31)
+#define CSR_CLKGATE_BYP	  BIT(30)
+#define MULTI_EN	  BIT(10)
+#define DESC_32B_EN	  BIT(8)
+#define TX_WB_DDONE       BIT(6)
+#define RX_DMA_BUSY       BIT(3)
+#define TX_DMA_BUSY       BIT(1)
+#define RX_DMA_EN         BIT(2)
+#define TX_DMA_EN         BIT(0)
+
+#define PDMA_BT_SIZE_4DWORDS		(0 << 4)
+#define PDMA_BT_SIZE_8DWORDS		BIT(4)
+#define PDMA_BT_SIZE_16DWORDS		(2 << 4)
+#define PDMA_BT_SIZE_32DWORDS		(3 << 4)
+#define PDMA_DESC_32B_E             (1 << 8)
+
+#define ADMA_RX_BT_SIZE_4DWORDS		(0 << 11)
+#define ADMA_RX_BT_SIZE_8DWORDS		BIT(11)
+#define ADMA_RX_BT_SIZE_16DWORDS	(2 << 11)
+#define ADMA_RX_BT_SIZE_32DWORDS	(3 << 11)
+
+/* Register bits.
+ */
+
+#define MACCFG_RXEN	BIT(2)
+#define MACCFG_TXEN	BIT(3)
+#define MACCFG_PROMISC	BIT(18)
+#define MACCFG_RXMCAST	BIT(19)
+#define MACCFG_FDUPLEX	BIT(20)
+#define MACCFG_PORTSEL	BIT(27)
+#define MACCFG_HBEATDIS	BIT(28)
+
+#define DMACTL_SR	BIT(1)	/* Start/Stop Receive */
+#define DMACTL_ST	BIT(13)	/* Start/Stop Transmission Command */
+
+#define DMACFG_SWR	BIT(0)	/* Software Reset */
+#define DMACFG_BURST32		(32 << 8)
+
+#define DMASTAT_TS		0x00700000	/* Transmit Process State */
+#define DMASTAT_RS		0x000e0000	/* Receive Process State */
+
+#define MACCFG_INIT		0   /* (MACCFG_FDUPLEX) // | MACCFG_PORTSEL) */
+
+/* Descriptor bits.
+ */
+#define R_OWN		0x80000000	/* Own Bit */
+#define RD_RER		0x02000000	/* Receive End Of Ring */
+#define RD_LS		0x00000100	/* Last Descriptor */
+#define RD_ES		0x00008000	/* Error Summary */
+#define RD_CHAIN	0x01000000	/* Chained */
+
+/* Word 0 */
+#define T_OWN		0x80000000	/* Own Bit */
+#define TD_ES		0x00008000	/* Error Summary */
+
+/* Word 1 */
+#define TD_LS		0x40000000	/* Last Segment */
+#define TD_FS		0x20000000	/* First Segment */
+#define TD_TER		0x08000000	/* Transmit End Of Ring */
+#define TD_CHAIN	0x01000000	/* Chained */
+
+#define TD_SET		0x08000000	/* Setup Packet */
+
+#define POLL_DEMAND 1
+
+#define RSTCTL	(0x34)
+#define RSTCTL_RSTENET1	BIT(19)
+#define RSTCTL_RSTENET2	BIT(20)
+
+#define INIT_VALUE_OF_RT2883_PSE_FQ_CFG		0xff908000
+#define INIT_VALUE_OF_PSE_FQFC_CFG		0x80504000
+#define INIT_VALUE_OF_FORCE_100_FD		0x1001BC01
+#define INIT_VALUE_OF_FORCE_1000_FD		0x1F01DC01
+
+/* Define Whole FE Reset Register */
+#define RSTCTRL			(RALINK_SYSCTL_BASE + 0x34)
+#define RT2880_AGPIOCFG_REG	(RALINK_SYSCTL_BASE + 0x3C)
+
+/*=========================================
+ *    PDMA RX Descriptor Format define
+ *=========================================
+ */
+
+struct PDMA_RXD_INFO1_T {
+	unsigned int PDP0;
+};
+
+struct PDMA_RXD_INFO2_T {
+	unsigned int PLEN1:2;
+	unsigned int LRO_AGG_CNT:8;
+	unsigned int REV:3;
+	unsigned int FOE_ENTRY_32:1;
+	unsigned int REV1:1;		
+	unsigned int TAG:1;
+	unsigned int PLEN0:14;
+	unsigned int LS0:1;
+	unsigned int DDONE_bit:1;
+};
+
+struct PDMA_RXD_INFO3_T {
+	unsigned int VID:16;
+	unsigned int TPID:16;
+};
+
+struct PDMA_RXD_INFO4_T {
+	unsigned int FOE_ENTRY:14;
+	unsigned int CRSN:5;
+	unsigned int SP:4;
+	unsigned int L4F:1;
+	unsigned int L4VLD:1;
+	unsigned int TACK:1;
+	unsigned int IP4F:1;
+	unsigned int IP4:1;
+	unsigned int IP6:1;
+	unsigned int UN_USE1:3;
+};
+
+struct PDMA_rxdesc {
+	struct PDMA_RXD_INFO1_T rxd_info1;
+	struct PDMA_RXD_INFO2_T rxd_info2;
+	struct PDMA_RXD_INFO3_T rxd_info3;
+	struct PDMA_RXD_INFO4_T rxd_info4;
+#ifdef CONFIG_32B_DESC
+	unsigned int rxd_info5;
+	unsigned int rxd_info6;
+	unsigned int rxd_info7;
+	unsigned int rxd_info8;
+#endif
+};
+
+/*=========================================
+ *    PDMA TX Descriptor Format define
+ *=========================================
+ */
+struct PDMA_TXD_INFO1_T {
+	unsigned int SDP0;
+};
+
+struct PDMA_TXD_INFO2_T {
+	unsigned int SDL1:14;
+	unsigned int LS1_bit:1;
+	unsigned int BURST_bit:1;
+	unsigned int SDL0:14;
+	unsigned int LS0_bit:1;
+	unsigned int DDONE_bit:1;
+};
+
+struct PDMA_TXD_INFO3_T {
+	unsigned int SDP1;
+};
+
+struct PDMA_TXD_INFO4_T {
+	unsigned int VLAN_TAG:17;	/* INSV(1)+VPRI(3)+CFI(1)+VID(12) */
+	unsigned int RESV:2;
+	unsigned int UDF:5;
+	unsigned int FPORT:4;
+	unsigned int TSO:1;
+	unsigned int TUI_CO:3;
+};
+
+struct PDMA_txdesc {
+	struct PDMA_TXD_INFO1_T txd_info1;
+	struct PDMA_TXD_INFO2_T txd_info2;
+	struct PDMA_TXD_INFO3_T txd_info3;
+	struct PDMA_TXD_INFO4_T txd_info4;
+#ifdef CONFIG_32B_DESC
+	unsigned int txd_info5;
+	unsigned int txd_info6;
+	unsigned int txd_info7;
+	unsigned int txd_info8;
+#endif
+};
+
+/*=========================================
+ *    QDMA TX Descriptor Format define
+ *=========================================
+ */
+struct QDMA_TXD_INFO1_T {
+	unsigned int SDP;
+};
+
+struct QDMA_TXD_INFO2_T {
+	unsigned int NDP;
+};
+
+struct QDMA_TXD_INFO3_T {
+	unsigned int RSV0:6;
+	unsigned int RSV1:2;
+	unsigned int SDL:16;
+	unsigned int RSV2:6;
+	unsigned int LS:1;
+	unsigned int DDONE:1;
+};
+
+struct QDMA_TXD_INFO4_T {
+	unsigned int RSV0:6;
+	unsigned int RSV1:2;
+	unsigned int FPORT:4;
+	unsigned int RSV2:2;
+	unsigned int RSV3:2;
+	unsigned int QID:7;
+	unsigned int RSV4:1;
+	unsigned int RSV5:6;
+	unsigned int SWC:1;
+	unsigned int BURST:1;
+};
+
+struct QDMA_TXD_INFO5_T {
+	unsigned int PROT:3;
+	unsigned int RSV0:2;
+	unsigned int IPOFST:7;
+	unsigned int RSV1:2;
+	unsigned int VQID:10;
+	unsigned int RSV2:2;
+	unsigned int VQID0:1;
+	unsigned int RSV3:1;
+	unsigned int TUI_CO:3;
+	unsigned int TSO:1;
+};
+
+struct QDMA_TXD_INFO6_T {
+    unsigned int VLAN_TAG_1:16;
+    unsigned int INSV_1:1;
+    unsigned int RSV0:14;
+    unsigned int INSV_0:1;
+};
+
+struct QDMA_TXD_INFO7_T {
+    unsigned int VLAN_TAG_0:16;
+    unsigned int VPID_0:16;
+};
+
+struct QDMA_TXD_INFO8_T {
+    unsigned int RSV;
+};
+
+struct QDMA_txdesc {
+	struct QDMA_TXD_INFO1_T txd_info1;
+	struct QDMA_TXD_INFO2_T txd_info2;
+	struct QDMA_TXD_INFO3_T txd_info3;
+	struct QDMA_TXD_INFO4_T txd_info4;
+	struct QDMA_TXD_INFO5_T txd_info5;
+	struct QDMA_TXD_INFO6_T txd_info6;
+	struct QDMA_TXD_INFO7_T txd_info7;
+	struct QDMA_TXD_INFO8_T txd_info8;
+};
+
+#define QTXD_LEN (sizeof(struct QDMA_txdesc))
+#define PHY_ENABLE_AUTO_NEGO	0x1000
+#define PHY_RESTART_AUTO_NEGO	0x0200
+
+/* PHY_STAT_REG = 1; */
+#define PHY_AUTO_NEGO_COMP	0x0020
+#define PHY_LINK_STATUS		0x0004
+
+/* PHY_AUTO_NEGO_REG = 4; */
+#define PHY_CAP_10_HALF		0x0020
+#define PHY_CAP_10_FULL		0x0040
+#define	PHY_CAP_100_HALF	0x0080
+#define	PHY_CAP_100_FULL	0x0100
+
+/* proc definition */
+
+#define PROCREG_CONTROL_FILE      "/var/run/procreg_control"
+#if 0
+#if defined(CONFIG_MACH_MT7623)
+#define PROCREG_DIR             "mt7623"
+#elif defined(CONFIG_MACH_LEOPARD)
+#define PROCREG_DIR             "leopard"
+#elif defined(CONFIG_PINCTRL_MT7622)
+#define PROCREG_DIR             "mt7622"
+#elif defined(CONFIG_SOC_MT7621)
+#define PROCREG_DIR             "mt7621"
+#endif
+#endif
+#define PROCREG_DIR             "panther"
+#define PROCREG_SKBFREE		"skb_free"
+#define PROCREG_TXRING		"tx_ring"
+#define PROCREG_RXRING		"rx_ring"
+#define PROCREG_RXRING1		"rx_ring1"
+#define PROCREG_RXRING2		"rx_ring2"
+#define PROCREG_RXRING3		"rx_ring3"
+#define PROCREG_NUM_OF_TXD	"num_of_txd"
+#define PROCREG_TSO_LEN		"tso_len"
+#define PROCREG_LRO_STATS	"lro_stats"
+#define PROCREG_HW_LRO_STATS	"hw_lro_stats"
+#define PROCREG_HW_LRO_AUTO_TLB	"hw_lro_auto_tlb"
+#define PROCREG_HW_IO_COHERENT	"hw_iocoherent"
+#define PROCREG_GMAC		"gmac"
+#define PROCREG_GMAC2           "gmac2"
+#define PROCREG_CP0		"cp0"
+#define PROCREG_RAQOS		"qos"
+#define PROCREG_READ_VAL	"regread_value"
+#define PROCREG_WRITE_VAL	"regwrite_value"
+#define PROCREG_ADDR		"reg_addr"
+#define PROCREG_CTL		"procreg_control"
+#define PROCREG_RXDONE_INTR	"rxdone_intr_count"
+#define PROCREG_ESW_INTR	"esw_intr_count"
+#define PROCREG_ESW_CNT		"esw_cnt"
+#define PROCREG_ETH_CNT		"eth_cnt"
+#define PROCREG_SNMP		"snmp"
+#define PROCREG_SET_LAN_IP	"set_lan_ip"
+#if defined(TASKLET_WORKQUEUE_SW)
+#define PROCREG_SCHE		"schedule"
+#endif
+#define PROCREG_QDMA            "qdma"
+#define PROCREG_INT_DBG		"int_dbg"
+struct rt2880_reg_op_data {
+	char name[64];
+	unsigned int reg_addr;
+	unsigned int op;
+	unsigned int reg_value;
+};
+
+struct lro_counters {
+	u32 lro_aggregated;
+	u32 lro_flushed;
+	u32 lro_no_desc;
+};
+
+struct lro_para_struct {
+	unsigned int lan_ip1;
+};
+
+struct parse_result {
+	/* layer2 header */
+	u8 dmac[6];
+	u8 smac[6];
+
+	/* vlan header */
+	u16 vlan_tag;
+	u16 vlan1_gap;
+	u16 vlan1;
+	u16 vlan2_gap;
+	u16 vlan2;
+	u16 vlan_layer;
+
+	/* pppoe header */
+	u32 pppoe_gap;
+	u16 ppp_tag;
+	u16 pppoe_sid;
+
+	/* layer3 header */
+	u16 eth_type;
+	struct iphdr iph;
+	struct ipv6hdr ip6h;
+
+	/* layer4 header */
+	struct tcphdr th;
+	struct udphdr uh;
+
+	u32 pkt_type;
+	u8 is_mcast;
+};
+
+#define DMA_GLO_CFG PDMA_GLO_CFG
+
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define GDMA1_FWD_PORT 0x5555
+#define GDMA2_FWD_PORT 0x5555
+#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
+#define GDMA1_FWD_PORT 0x5555
+#define GDMA2_FWD_PORT 0x5555
+#else
+#define GDMA1_FWD_PORT 0x0000
+#define GDMA2_FWD_PORT 0x0000
+#endif
+
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
+#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
+#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
+#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
+#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
+#else
+#define RAETH_RX_CALC_IDX0 RX_CALC_IDX0
+#define RAETH_RX_CALC_IDX1 RX_CALC_IDX1
+#endif
+#define RAETH_RX_CALC_IDX2 RX_CALC_IDX2
+#define RAETH_RX_CALC_IDX3 RX_CALC_IDX3
+#define RAETH_FE_INT_STATUS FE_INT_STATUS
+#define RAETH_FE_INT_ALL FE_INT_ALL
+#define RAETH_FE_INT_ENABLE FE_INT_ENABLE
+#define RAETH_FE_INT_DLY_INIT FE_INT_DLY_INIT
+#define RAETH_FE_INT_SETTING (RX_DONE_INT0 | RX_DONE_INT1 | \
+			      TX_DONE_INT0 | TX_DONE_INT1 | \
+			      TX_DONE_INT2 | TX_DONE_INT3)
+#define QFE_INT_SETTING (RX_DONE_INT0 | RX_DONE_INT1 | \
+			 TX_DONE_INT0 | TX_DONE_INT1 | \
+			 TX_DONE_INT2 | TX_DONE_INT3)
+#define RAETH_TX_DLY_INT TX_DLY_INT
+#define RAETH_TX_DONE_INT0 TX_DONE_INT0
+#define RAETH_DLY_INT_CFG DLY_INT_CFG
+
+/* io-coherent for ethdmasys */
+#define	IOC_ETH_PDMA	BIT(0)
+#define	IOC_ETH_QDMA	BIT(1)
+
+#endif	/* RAETH_REG_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.c
new file mode 100644
index 0000000..913eb9b
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.c
@@ -0,0 +1,3294 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_mac.h"
+#include "ra_ioctl.h"
+#include "ra_switch.h"
+#include "raether_hwlro.h"
+#include "ra_ethtool.h"
+
+void __iomem *ethdma_sysctl_base;
+#if defined(CONFIG_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+EXPORT_SYMBOL(ethdma_sysctl_base);
+#endif
+void __iomem *ethdma_frame_engine_base;
+#if defined(CONFIG_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+EXPORT_SYMBOL(ethdma_frame_engine_base);
+#endif
+struct net_device *dev_raether;
+#if defined(CONFIG_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+EXPORT_SYMBOL(dev_raether);
+#endif
+void __iomem *ethdma_mac_base;
+
+static int pending_recv;
+
+/* LRO support */
+unsigned int lan_ip;
+struct lro_para_struct lro_para;
+u32 gmac1_txq_num;
+EXPORT_SYMBOL(gmac1_txq_num);
+u32 gmac1_txq_txd_num;
+EXPORT_SYMBOL(gmac1_txq_txd_num);
+u32 gmac1_txd_num;
+EXPORT_SYMBOL(gmac1_txd_num);
+u32 gmac2_txq_num;
+EXPORT_SYMBOL(gmac2_txq_num);
+u32 gmac2_txq_txd_num;
+EXPORT_SYMBOL(gmac2_txq_txd_num);
+u32 gmac2_txd_num;
+EXPORT_SYMBOL(gmac2_txd_num);
+u32 num_rx_desc;
+EXPORT_SYMBOL(num_rx_desc);
+u32 num_tx_max_process;
+EXPORT_SYMBOL(num_tx_max_process);
+u32 num_tx_desc;
+EXPORT_SYMBOL(num_tx_desc);
+u32 total_txq_num;
+EXPORT_SYMBOL(total_txq_num);
+
+static const char *const mtk_clks_source_name[] = {
+	"ethif", "esw", "gp0", "gp1", "gp2",
+	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
+	"sgmii1_tx250m", "sgmii1_rx250m", "sgmii1_cdr_ref", "sgmii1_cdr_fb",
+	"trgpll", "sgmipll", "eth1pll", "eth2pll", "eth", "sgmiitop"
+};
+
+/* reset frame engine */
+static void fe_reset(void)
+{
+	u32 val;
+
+	val = sys_reg_read(RSTCTRL);
+	val = val | RALINK_FE_RST;
+	sys_reg_write(RSTCTRL, val);
+
+	val = val & ~(RALINK_FE_RST);
+	sys_reg_write(RSTCTRL, val);
+}
+
+static void fe_gmac_reset(void)
+{
+	u32 val;
+	/*Reset GMAC */
+	/* sys_reg_write(RALINK_SYSCTL_BASE + 0x34, 0x00800000); */
+	/* sys_reg_write(RALINK_SYSCTL_BASE + 0x34, 0x00000000); */
+	val = sys_reg_read(RALINK_SYSCTL_BASE + 0x34);
+	val |= (1 << 23);
+	sys_reg_write(RALINK_SYSCTL_BASE + 0x34, val);
+	val &= ~(1 << 23);
+	sys_reg_write(RALINK_SYSCTL_BASE + 0x34, val);
+}
+
+/* Set the hardware MAC address. */
+static int ei_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	if (netif_running(dev))
+		return -EBUSY;
+
+	set_mac_address(dev->dev_addr);
+
+	return 0;
+}
+
+static int ei_set_mac2_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	if (netif_running(dev))
+		return -EBUSY;
+
+	set_mac2_address(dev->dev_addr);
+
+	return 0;
+}
+
+static void ei_reset_statistics(struct END_DEVICE *ei_local)
+{
+	ei_local->stat.tx_packets = 0;
+	ei_local->stat.tx_bytes = 0;
+	ei_local->stat.tx_dropped = 0;
+	ei_local->stat.tx_errors = 0;
+	ei_local->stat.tx_aborted_errors = 0;
+	ei_local->stat.tx_carrier_errors = 0;
+	ei_local->stat.tx_fifo_errors = 0;
+	ei_local->stat.tx_heartbeat_errors = 0;
+	ei_local->stat.tx_window_errors = 0;
+
+	ei_local->stat.rx_packets = 0;
+	ei_local->stat.rx_bytes = 0;
+	ei_local->stat.rx_dropped = 0;
+	ei_local->stat.rx_errors = 0;
+	ei_local->stat.rx_length_errors = 0;
+	ei_local->stat.rx_over_errors = 0;
+	ei_local->stat.rx_crc_errors = 0;
+	ei_local->stat.rx_frame_errors = 0;
+	ei_local->stat.rx_fifo_errors = 0;
+	ei_local->stat.rx_missed_errors = 0;
+
+	ei_local->stat.collisions = 0;
+}
+
+static inline void fe_rx_desc_init(struct PDMA_rxdesc *rx_ring,
+				   dma_addr_t dma_addr)
+{
+	rx_ring->rxd_info1.PDP0 = dma_addr;
+	rx_ring->rxd_info2.PLEN0 = MAX_RX_LENGTH;
+	rx_ring->rxd_info2.LS0 = 0;
+	rx_ring->rxd_info2.DDONE_bit = 0;
+}
+
+static int rt2880_eth_recv(struct net_device *dev,
+			   struct napi_struct *napi, int budget)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+	struct sk_buff *rx_skb;
+	unsigned int length = 0;
+	int rx_processed = 0;
+	struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+	unsigned int rx_dma_owner_idx, rx_next_idx;
+	void *rx_data, *rx_data_next, *new_data;
+	unsigned int skb_size;
+
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	rx_dma_owner_idx = (ei_local->rx_calc_idx[0] + 1) % num_rx_desc;
+#else
+	rx_dma_owner_idx = (sys_reg_read(RAETH_RX_CALC_IDX0) + 1) % num_rx_desc;
+#endif
+	rx_ring = &ei_local->rx_ring[0][rx_dma_owner_idx];
+	rx_data = ei_local->netrx_skb_data[0][rx_dma_owner_idx];
+
+	skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN + NET_SKB_PAD) +
+	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	for (;;) {
+		dma_addr_t dma_addr;
+
+		if ((rx_processed++ > budget) ||
+		    (rx_ring->rxd_info2.DDONE_bit == 0))
+			break;
+
+		rx_next_idx = (rx_dma_owner_idx + 1) % num_rx_desc;
+		rx_ring_next = &ei_local->rx_ring[0][rx_next_idx];
+		rx_data_next = ei_local->netrx_skb_data[0][rx_next_idx];
+		prefetch(rx_ring_next);
+
+		/* We have to check the free memory size is big enough
+		 * before pass the packet to cpu
+		 */
+		new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+		if (unlikely(!new_data)) {
+			pr_err("skb not available...\n");
+			goto skb_err;
+		}
+
+		dma_addr = dma_map_single(dev->dev.parent,
+					  new_data + NET_SKB_PAD,
+					  MAX_RX_LENGTH, DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+			pr_err("[%s]dma_map_single() failed...\n", __func__);
+			raeth_free_skb_data(new_data);
+			goto skb_err;
+		}
+
+		rx_skb = raeth_build_skb(rx_data, skb_size);
+
+		if (unlikely(!rx_skb)) {
+			put_page(virt_to_head_page(rx_data));
+			pr_err("build_skb failed\n");
+			goto skb_err;
+		}
+		skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+		length = rx_ring->rxd_info2.PLEN0;
+		dma_unmap_single(dev->dev.parent,
+				 rx_ring->rxd_info1.PDP0,
+				 length, DMA_FROM_DEVICE);
+
+		prefetch(rx_skb->data);
+
+		/* skb processing */
+		skb_put(rx_skb, length);
+
+		/* rx packet from GE2 */
+		if (rx_ring->rxd_info4.SP == 2) {
+			if (likely(ei_local->pseudo_dev)) {
+				rx_skb->dev = ei_local->pseudo_dev;
+				rx_skb->protocol =
+				    eth_type_trans(rx_skb,
+						   ei_local->pseudo_dev);
+			} else {
+				pr_err("pseudo_dev is still not initialize ");
+				pr_err("but receive packet from GMAC2\n");
+			}
+		} else {
+			rx_skb->dev = dev;
+			rx_skb->protocol = eth_type_trans(rx_skb, dev);
+		}
+
+		/* rx checksum offload */
+		if (rx_ring->rxd_info4.L4VLD)
+			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if (ppe_hook_rx_eth) {
+			if (IS_SPACE_AVAILABLE_HEAD(rx_skb)) {
+				*(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+					*(uint32_t *)&rx_ring->rxd_info4;
+				FOE_ALG_HEAD(rx_skb) = 0;
+				FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+				FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+			}
+			if (IS_SPACE_AVAILABLE_TAIL(rx_skb)) {
+				*(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+					*(uint32_t *)&rx_ring->rxd_info4;
+				FOE_ALG_TAIL(rx_skb) = 0;
+				FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+				FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+			}
+		}
+#endif
+
+	if (ei_local->features & FE_HW_VLAN_RX) {
+		if (rx_ring->rxd_info2.TAG)
+			__vlan_hwaccel_put_tag(rx_skb,
+					       htons(ETH_P_8021Q),
+					       rx_ring->rxd_info3.VID);
+	}
+
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if ((!ppe_hook_rx_eth) ||
+		    (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+			if (ei_local->features & FE_INT_NAPI)
+				/* napi_gro_receive(napi, rx_skb); */
+				netif_receive_skb(rx_skb);
+			else
+				netif_rx(rx_skb);
+
+#if defined(CONFIG_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		}
+#endif
+
+		if (rx_ring->rxd_info4.SP == 2) {
+			p_ad->stat.rx_packets++;
+			p_ad->stat.rx_bytes += length;
+		} else {
+			ei_local->stat.rx_packets++;
+			ei_local->stat.rx_bytes += length;
+		}
+
+		/* init RX desc. */
+		fe_rx_desc_init(rx_ring, dma_addr);
+		ei_local->netrx_skb_data[0][rx_dma_owner_idx] = new_data;
+
+		/* make sure that all changes to the dma ring are flushed before
+		 * we continue
+		 */
+		wmb();
+
+		sys_reg_write(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+		ei_local->rx_calc_idx[0] = rx_dma_owner_idx;
+#endif
+
+		/* Update to Next packet point that was received.
+		 */
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+		rx_dma_owner_idx = rx_next_idx;
+#else
+		rx_dma_owner_idx =
+		    (sys_reg_read(RAETH_RX_CALC_IDX0) + 1) % num_rx_desc;
+#endif
+
+		/* use prefetched variable */
+		rx_ring = rx_ring_next;
+		rx_data = rx_data_next;
+	}			/* for */
+
+	return rx_processed;
+
+skb_err:
+	/* rx packet from GE2 */
+	if (rx_ring->rxd_info4.SP == 2)
+		p_ad->stat.rx_dropped++;
+	else
+		ei_local->stat.rx_dropped++;
+
+	/* Discard the rx packet */
+	fe_rx_desc_init(rx_ring, rx_ring->rxd_info1.PDP0);
+
+	/* make sure that all changes to the dma ring
+	 * are flushed before we continue
+	 */
+	wmb();
+
+	sys_reg_write(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	ei_local->rx_calc_idx[0] = rx_dma_owner_idx;
+#endif
+
+	return (budget + 1);
+}
+
+static int raeth_poll_full(struct napi_struct *napi, int budget)
+{
+	struct END_DEVICE *ei_local =
+	    container_of(napi, struct END_DEVICE, napi);
+	struct net_device *netdev = ei_local->netdev;
+	unsigned long reg_int_val_rx, reg_int_val_tx;
+	unsigned long reg_int_mask_rx, reg_int_mask_tx;
+	unsigned long flags;
+	int tx_done = 0, rx_done = 0;
+
+	reg_int_val_tx = sys_reg_read(ei_local->fe_tx_int_status);
+	reg_int_val_rx = sys_reg_read(ei_local->fe_rx_int_status);
+
+	if (reg_int_val_tx & ei_local->tx_mask) {
+		/* Clear TX interrupt status */
+		sys_reg_write(ei_local->fe_tx_int_status, (TX_DLY_INT | TX_DONE_INT0));
+		tx_done = ei_local->ei_xmit_housekeeping(netdev,
+							 num_tx_max_process);
+	}
+
+	if (reg_int_val_rx & ei_local->rx_mask) {
+		/* Clear RX interrupt status */
+		sys_reg_write(ei_local->fe_rx_int_status, RX_INT_ALL);
+		rx_done = ei_local->ei_eth_recv(netdev, napi, budget);
+	}
+
+	if (rx_done >= budget)
+		return budget;
+
+	napi_complete(napi);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	/* Enable TX/RX interrupts */
+	reg_int_mask_tx = sys_reg_read(ei_local->fe_tx_int_enable);
+	sys_reg_write(ei_local->fe_tx_int_enable,
+		      reg_int_mask_tx | ei_local->tx_mask);
+	reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+	sys_reg_write(ei_local->fe_rx_int_enable,
+		      reg_int_mask_rx | ei_local->rx_mask);
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return rx_done;
+}
+
+static int raeth_poll_rx_rss0(struct napi_struct *napi, int budget)
+{
+	struct END_DEVICE *ei_local =
+	    container_of(napi, struct END_DEVICE, napi_rx_rss0);
+	struct net_device *netdev = ei_local->netdev;
+	unsigned long reg_int_mask_rx;
+	unsigned long flags;
+	int rx_done = 0;
+
+	rx_done = ei_local->ei_eth_recv_rss0(netdev, napi, budget);
+	if (rx_done >= budget)
+		return budget;
+
+	napi_complete(napi);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	/* Enable RX interrupt */
+	reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+	sys_reg_write(ei_local->fe_rx_int_enable,
+		      (reg_int_mask_rx | RING0_RX_DLY_INT));
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return rx_done;
+}
+
+static int raeth_poll_rx_rss1(struct napi_struct *napi, int budget)
+{
+	struct END_DEVICE *ei_local =
+	    container_of(napi, struct END_DEVICE, napi_rx_rss1);
+	struct net_device *netdev = ei_local->netdev;
+	unsigned long reg_int_mask_rx;
+	unsigned long flags;
+	int rx_done = 0;
+
+	rx_done = ei_local->ei_eth_recv_rss1(netdev, napi, budget);
+	if (rx_done >= budget)
+		return budget;
+
+	napi_complete(napi);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	/* Enable RX interrupt */
+	reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+	sys_reg_write(ei_local->fe_rx_int_enable,
+		      (reg_int_mask_rx | RING1_RX_DLY_INT));
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return rx_done;
+}
+
+static int raeth_poll_rx_rss2(struct napi_struct *napi, int budget)
+{
+	struct END_DEVICE *ei_local =
+	    container_of(napi, struct END_DEVICE, napi_rx_rss2);
+	struct net_device *netdev = ei_local->netdev;
+	unsigned long reg_int_mask_rx;
+	unsigned long flags;
+	int rx_done = 0;
+
+	rx_done = ei_local->ei_eth_recv_rss2(netdev, napi, budget);
+	if (rx_done >= budget)
+		return budget;
+
+	napi_complete(napi);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	/* Enable RX interrupt */
+	reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+	sys_reg_write(ei_local->fe_rx_int_enable,
+		      (reg_int_mask_rx | RING2_RX_DLY_INT));
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return rx_done;
+}
+
+static int raeth_poll_rx_rss3(struct napi_struct *napi, int budget)
+{
+	struct END_DEVICE *ei_local =
+	    container_of(napi, struct END_DEVICE, napi_rx_rss3);
+	struct net_device *netdev = ei_local->netdev;
+	unsigned long reg_int_mask_rx;
+	unsigned long flags;
+	int rx_done = 0;
+
+	rx_done = ei_local->ei_eth_recv_rss3(netdev, napi, budget);
+	if (rx_done >= budget)
+		return budget;
+
+	napi_complete(napi);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	/* Enable RX interrupt */
+	reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+	sys_reg_write(ei_local->fe_rx_int_enable,
+		      (reg_int_mask_rx | RING3_RX_DLY_INT));
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return rx_done;
+}
+
+static int raeth_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct END_DEVICE *ei_local =
+	    container_of(napi, struct END_DEVICE, napi_rx);
+	struct net_device *netdev = ei_local->netdev;
+	unsigned long reg_int_mask_rx;
+	unsigned long flags;
+	int rx_done = 0;
+
+	rx_done = ei_local->ei_eth_recv(netdev, napi, budget);
+	if (rx_done >= budget)
+		return budget;
+
+	napi_complete(napi);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	/* Enable RX interrupt */
+	reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+	sys_reg_write(ei_local->fe_rx_int_enable,
+		      (reg_int_mask_rx | ei_local->rx_mask));
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return rx_done;
+}
+
+static int raeth_poll_tx(struct napi_struct *napi, int budget)
+{
+	struct END_DEVICE *ei_local =
+	    container_of(napi, struct END_DEVICE, napi_tx);
+	struct net_device *netdev = ei_local->netdev;
+	unsigned long reg_int_val_tx;
+	unsigned long reg_int_mask_tx;
+	unsigned long flags;
+	int tx_done = 0;
+
+	reg_int_val_tx = sys_reg_read(ei_local->fe_tx_int_status);
+
+	if (reg_int_val_tx & ei_local->tx_mask) {
+		/* Clear TX interrupt status */
+		sys_reg_write(ei_local->fe_tx_int_status, TX_INT_ALL);
+		tx_done = ei_local->ei_xmit_housekeeping(netdev,
+							 num_tx_max_process);
+	}
+
+	napi_complete(napi);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+	/* Enable TX interrupts */
+	reg_int_mask_tx = sys_reg_read(ei_local->fe_tx_int_enable);
+	sys_reg_write(ei_local->fe_tx_int_enable,
+		      reg_int_mask_tx | ei_local->tx_mask);
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return 1;
+}
+
+static void ei_func_register(struct END_DEVICE *ei_local)
+{
+	/* TX handling */
+	if (ei_local->features & FE_QDMA_TX) {
+		ei_local->ei_start_xmit = ei_qdma_start_xmit;
+		ei_local->ei_xmit_housekeeping = ei_qdma_xmit_housekeeping;
+		ei_local->fe_tx_int_status = (void __iomem *)QFE_INT_STATUS;
+		ei_local->fe_tx_int_enable = (void __iomem *)QFE_INT_ENABLE;
+	} else {
+		ei_local->ei_start_xmit = ei_pdma_start_xmit;
+		ei_local->ei_xmit_housekeeping = ei_pdma_xmit_housekeeping;
+		ei_local->fe_tx_int_status =
+		    (void __iomem *)RAETH_FE_INT_STATUS;
+		ei_local->fe_tx_int_enable =
+		    (void __iomem *)RAETH_FE_INT_ENABLE;
+	}
+
+	/* RX handling */
+	if (ei_local->features & FE_QDMA_RX) {
+		ei_local->fe_rx_int_status = (void __iomem *)QFE_INT_STATUS;
+		ei_local->fe_rx_int_enable = (void __iomem *)QFE_INT_ENABLE;
+	} else {
+		ei_local->fe_rx_int_status =
+		    (void __iomem *)RAETH_FE_INT_STATUS;
+		ei_local->fe_rx_int_enable =
+		    (void __iomem *)RAETH_FE_INT_ENABLE;
+	}
+
+	/* HW LRO handling */
+	if (ei_local->features & FE_HW_LRO) {
+		ei_local->ei_eth_recv = fe_hw_lro_recv;
+	} else if (ei_local->features & FE_RSS_4RING) {
+		ei_local->ei_eth_recv_rss0 = fe_rss0_recv;
+		ei_local->ei_eth_recv_rss1 = fe_rss1_recv;
+		ei_local->ei_eth_recv_rss2 = fe_rss2_recv;
+		ei_local->ei_eth_recv_rss3 = fe_rss3_recv;
+	} else if (ei_local->features & FE_RSS_2RING) {
+		ei_local->ei_eth_recv_rss0 = fe_rss0_recv;
+		ei_local->ei_eth_recv_rss1 = fe_rss1_recv;
+	} else {
+		ei_local->ei_eth_recv = rt2880_eth_recv;
+	}
+}
+
+static int __init ei_init(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	fe_reset();
+
+	if (ei_local->features & FE_INT_NAPI) {
+		/* we run 2 devices on the same DMA ring */
+		/* so we need a dummy device for NAPI to work */
+		init_dummy_netdev(&ei_local->dummy_dev);
+
+		if (ei_local->features & FE_INT_NAPI_TX_RX) {
+			netif_napi_add(&ei_local->dummy_dev, &ei_local->napi_rx,
+				       raeth_poll_rx, MTK_NAPI_WEIGHT);
+			netif_napi_add(&ei_local->dummy_dev, &ei_local->napi_tx,
+				       raeth_poll_tx, MTK_NAPI_WEIGHT);
+
+		} else if (ei_local->features & FE_INT_NAPI_RX_ONLY) {
+			if (ei_local->features & FE_RSS_4RING) {
+				netif_napi_add(&ei_local->dummy_dev,
+					       &ei_local->napi_rx_rss0,
+					       raeth_poll_rx_rss0, MTK_NAPI_WEIGHT);
+				netif_napi_add(&ei_local->dummy_dev,
+					       &ei_local->napi_rx_rss1,
+					       raeth_poll_rx_rss1, MTK_NAPI_WEIGHT);
+				netif_napi_add(&ei_local->dummy_dev,
+					       &ei_local->napi_rx_rss2,
+					       raeth_poll_rx_rss2, MTK_NAPI_WEIGHT);
+				netif_napi_add(&ei_local->dummy_dev,
+					       &ei_local->napi_rx_rss3,
+					       raeth_poll_rx_rss3, MTK_NAPI_WEIGHT);
+			} else if (ei_local->features & FE_RSS_2RING) {
+				netif_napi_add(&ei_local->dummy_dev,
+					       &ei_local->napi_rx_rss0,
+					       raeth_poll_rx_rss0, MTK_NAPI_WEIGHT);
+				netif_napi_add(&ei_local->dummy_dev,
+					       &ei_local->napi_rx_rss1,
+					       raeth_poll_rx_rss1, MTK_NAPI_WEIGHT);
+			} else {
+				netif_napi_add(&ei_local->dummy_dev,
+					       &ei_local->napi_rx,
+					       raeth_poll_rx, MTK_NAPI_WEIGHT);
+			}
+		} else {
+			netif_napi_add(&ei_local->dummy_dev, &ei_local->napi,
+				       raeth_poll_full, MTK_NAPI_WEIGHT);
+		}
+	}
+
+	spin_lock_init(&ei_local->page_lock);
+	spin_lock_init(&ei_local->irq_lock);
+	spin_lock_init(&ei_local->mdio_lock);
+	ether_setup(dev);
+
+	ei_func_register(ei_local);
+
+	/* init  my IP */
+	strncpy(ei_local->lan_ip4_addr, FE_DEFAULT_LAN_IP, IP4_ADDR_LEN);
+
+	if (ei_local->chip_name == MT7621_FE) {
+		fe_gmac_reset();
+		fe_sw_init();
+	}
+
+	return 0;
+}
+
+static void ei_uninit(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	free_netdev(dev);
+
+	if (ei_local->features & FE_GE2_SUPPORT) {
+		unregister_netdev(ei_local->pseudo_dev);
+		free_netdev(ei_local->pseudo_dev);
+	}
+
+	pr_info("Free ei_local and unregister netdev...\n");
+
+	debug_proc_exit();
+}
+static void ei_mac_addr_setting(struct net_device *dev)
+{
+	/* If the mac address is invalid, use random mac address  */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		random_ether_addr(dev->dev_addr);
+		dev->addr_assign_type = NET_ADDR_RANDOM;
+	}
+
+	ei_set_mac_addr(dev, dev->dev_addr);
+}
+
+static void ei_mac2_addr_setting(struct net_device *dev)
+{
+	/* If the mac address is invalid, use random mac address  */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		random_ether_addr(dev->dev_addr);
+		dev->addr_assign_type = NET_ADDR_RANDOM;
+	}
+}
+
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+static void fe_dma_rx_cal_idx_init(struct END_DEVICE *ei_local)
+{
+	if (unlikely(ei_local->features & FE_QDMA_RX)) {
+		ei_local->rx_calc_idx[0] = sys_reg_read(QRX_CRX_IDX_0);
+	} else {		/* PDMA RX */
+		ei_local->rx_calc_idx[0] = sys_reg_read(RX_CALC_IDX0);
+		if (ei_local->features & (FE_HW_LRO | FE_RSS_4RING)) {
+			ei_local->rx_calc_idx[1] = sys_reg_read(RX_CALC_IDX1);
+			ei_local->rx_calc_idx[2] = sys_reg_read(RX_CALC_IDX2);
+			ei_local->rx_calc_idx[3] = sys_reg_read(RX_CALC_IDX3);
+		} else if (ei_local->features & FE_RSS_2RING) {
+			ei_local->rx_calc_idx[1] = sys_reg_read(RX_CALC_IDX1);
+		}
+	}
+}
+#endif
+
+static inline int ei_init_ptx_prx(struct net_device *dev)
+{
+	int err;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	err = fe_pdma_wait_dma_idle();
+	if (err)
+		return err;
+
+	err = fe_pdma_rx_dma_init(dev);
+	if (err)
+		return err;
+
+	if (ei_local->features & FE_HW_LRO) {
+		err = fe_hw_lro_init(dev);
+		if (err)
+			return err;
+	} else if (ei_local->features & FE_RSS_4RING) {
+		err = fe_rss_4ring_init(dev);
+		if (err)
+			return err;
+	} else if (ei_local->features & FE_RSS_2RING) {
+		err = fe_rss_2ring_init(dev);
+		if (err)
+			return err;
+	}
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	fe_dma_rx_cal_idx_init(ei_local);
+#endif
+
+	err = fe_pdma_tx_dma_init(dev);
+	if (err)
+		return err;
+
+	set_fe_pdma_glo_cfg();
+
+	/* enable RXD prefetch of ADMA */
+	SET_PDMA_LRO_RXD_PREFETCH_EN(ADMA_RXD_PREFETCH_EN |
+				     ADMA_MULTI_RXD_PREFETCH_EN);
+
+	return 0;
+}
+
+static inline int ei_init_qtx_prx(struct net_device *dev)
+{
+	int err;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	err = fe_pdma_wait_dma_idle();
+	if (err)
+		return err;
+
+	err = fe_qdma_wait_dma_idle();
+	if (err)
+		return err;
+
+	err = fe_qdma_rx_dma_init(dev);
+	if (err)
+		return err;
+
+	err = fe_pdma_rx_dma_init(dev);
+	if (err)
+		return err;
+
+	if (ei_local->features & FE_HW_LRO) {
+		err = fe_hw_lro_init(dev);
+		if (err)
+			return err;
+	} else if (ei_local->features & FE_RSS_4RING) {
+		err = fe_rss_4ring_init(dev);
+		if (err)
+			return err;
+	} else if (ei_local->features & FE_RSS_2RING) {
+		err = fe_rss_2ring_init(dev);
+		if (err)
+			return err;
+	}
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	fe_dma_rx_cal_idx_init(ei_local);
+#endif
+
+	err = fe_qdma_tx_dma_init(dev);
+	if (err)
+		return err;
+
+	set_fe_pdma_glo_cfg();
+	set_fe_qdma_glo_cfg();
+
+	/* enable RXD prefetch of ADMA */
+	SET_PDMA_LRO_RXD_PREFETCH_EN(ADMA_RXD_PREFETCH_EN |
+				     ADMA_MULTI_RXD_PREFETCH_EN);
+
+	return 0;
+}
+
+static inline int ei_init_qtx_qrx(struct net_device *dev)
+{
+	int err;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	err = fe_qdma_wait_dma_idle();
+	if (err)
+		return err;
+
+	err = fe_qdma_rx_dma_init(dev);
+	if (err)
+		return err;
+
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	fe_dma_rx_cal_idx_init(ei_local);
+#endif
+
+	err = fe_qdma_tx_dma_init(dev);
+	if (err)
+		return err;
+
+	set_fe_qdma_glo_cfg();
+
+	return 0;
+}
+
+static int ei_init_dma(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	if ((ei_local->features & FE_QDMA_TX) &&
+	    (ei_local->features & FE_QDMA_RX))
+		return ei_init_qtx_qrx(dev);
+
+	if (ei_local->features & FE_QDMA_TX)
+		return ei_init_qtx_prx(dev);
+	else
+		return ei_init_ptx_prx(dev);
+}
+
+static void ei_deinit_dma(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	if (ei_local->features & FE_QDMA_TX) {
+		fe_qdma_tx_dma_deinit(dev);
+		fe_qdma_rx_dma_deinit(dev);
+	} else {
+		fe_pdma_tx_dma_deinit(dev);
+	}
+
+	if (!(ei_local->features & FE_QDMA_RX))
+		fe_pdma_rx_dma_deinit(dev);
+
+	if (ei_local->features & FE_HW_LRO)
+		fe_hw_lro_deinit(dev);
+	else if (ei_local->features & FE_RSS_4RING)
+		fe_rss_4ring_deinit(dev);
+	else if (ei_local->features & FE_RSS_2RING)
+		fe_rss_2ring_deinit(dev);
+
+	pr_info("Free TX/RX Ring Memory!\n");
+}
+
+/* MT7623 PSE reset workaround to do PSE reset */
+void fe_do_reset(void)
+{
+	u32 adma_rx_dbg0_r = 0;
+	u32 dbg_rx_curr_state, rx_fifo_wcnt;
+	u32 dbg_cdm_lro_rinf_afifo_rempty, dbg_cdm_eof_rdy_afifo_empty;
+	u32 reg_tmp, loop_count;
+	unsigned long flags;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	ei_local->fe_reset_times++;
+	/* do CDM/PDMA reset */
+	pr_crit("[%s] CDM/PDMA reset (%d times)!!!\n", __func__,
+		ei_local->fe_reset_times);
+	spin_lock_irqsave(&ei_local->page_lock, flags);
+	reg_tmp = sys_reg_read(FE_GLO_MISC);
+	reg_tmp |= 0x1;
+	sys_reg_write(FE_GLO_MISC, reg_tmp);
+	mdelay(10);
+	reg_tmp = sys_reg_read(ADMA_LRO_CTRL_DW3);
+	reg_tmp |= (0x1 << 14);
+	sys_reg_write(ADMA_LRO_CTRL_DW3, reg_tmp);
+	loop_count = 0;
+	do {
+		adma_rx_dbg0_r = sys_reg_read(ADMA_RX_DBG0);
+		dbg_rx_curr_state = (adma_rx_dbg0_r >> 16) & 0x7f;
+		rx_fifo_wcnt = (adma_rx_dbg0_r >> 8) & 0x3f;
+		dbg_cdm_lro_rinf_afifo_rempty = (adma_rx_dbg0_r >> 7) & 0x1;
+		dbg_cdm_eof_rdy_afifo_empty = (adma_rx_dbg0_r >> 6) & 0x1;
+		loop_count++;
+		if (loop_count >= 100) {
+			pr_err("[%s] loop_count timeout!!!\n", __func__);
+			break;
+		}
+		mdelay(10);
+	} while (((dbg_rx_curr_state != 0x17) && (dbg_rx_curr_state != 0x00)) ||
+		 (rx_fifo_wcnt != 0) ||
+		 (!dbg_cdm_lro_rinf_afifo_rempty) ||
+		 (!dbg_cdm_eof_rdy_afifo_empty));
+	reg_tmp = sys_reg_read(ADMA_LRO_CTRL_DW3);
+	reg_tmp &= 0xffffbfff;
+	sys_reg_write(ADMA_LRO_CTRL_DW3, reg_tmp);
+	reg_tmp = sys_reg_read(FE_GLO_MISC);
+	reg_tmp &= 0xfffffffe;
+	sys_reg_write(FE_GLO_MISC, reg_tmp);
+	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+}
+
+/* MT7623 PSE reset workaround to poll if PSE hang */
+static int fe_reset_thread(void *data)
+{
+	u32 adma_rx_dbg0_r = 0;
+	u32 dbg_rx_curr_state, rx_fifo_wcnt;
+	u32 dbg_cdm_lro_rinf_afifo_rempty, dbg_cdm_eof_rdy_afifo_empty;
+
+	pr_info("%s called\n", __func__);
+
+	for (;;) {
+		adma_rx_dbg0_r = sys_reg_read(ADMA_RX_DBG0);
+		dbg_rx_curr_state = (adma_rx_dbg0_r >> 16) & 0x7f;
+		rx_fifo_wcnt = (adma_rx_dbg0_r >> 8) & 0x3f;
+		dbg_cdm_lro_rinf_afifo_rempty = (adma_rx_dbg0_r >> 7) & 0x1;
+		dbg_cdm_eof_rdy_afifo_empty = (adma_rx_dbg0_r >> 6) & 0x1;
+
+		/* check if PSE P0 hang */
+		if (dbg_cdm_lro_rinf_afifo_rempty &&
+		    dbg_cdm_eof_rdy_afifo_empty &&
+		    (rx_fifo_wcnt & 0x20) &&
+		    ((dbg_rx_curr_state == 0x17) ||
+		     (dbg_rx_curr_state == 0x00))) {
+			fe_do_reset();
+		}
+
+		msleep_interruptible(FE_RESET_POLLING_MS);
+		if (kthread_should_stop())
+			break;
+	}
+
+	pr_info("%s leaved\n", __func__);
+	return 0;
+}
+
+static int phy_polling_thread(void *data)
+{
+	unsigned int link_status, link_speed, duplex;
+	unsigned int local_eee, lp_eee;
+	unsigned int fc_phy, fc_lp;
+	unsigned int val_tmp;
+
+	pr_info("%s called\n", __func__);
+	val_tmp = 1;
+	for (;;) {
+		mii_mgr_read(0x0, 0x1, &link_status);
+		link_status = (link_status >> 2) & 0x1;
+		if (link_status) {
+			mii_mgr_read(0x0, 0x4, &fc_phy);
+			mii_mgr_read(0x0, 0x5, &fc_lp);
+			if ((fc_phy & 0xc00) == (fc_lp & 0xc00))
+				val_tmp = val_tmp | 0x30;
+			else
+				val_tmp = val_tmp & (~0x30);
+			mii_mgr_read_cl45(0, 0x1e, 0xa2, &link_speed);
+			duplex = link_speed & 0x20;
+			if (duplex)
+				val_tmp = val_tmp | 0x2;
+			else
+				val_tmp = val_tmp & (~0x2);
+			link_speed = link_speed & 0xe;
+			val_tmp = val_tmp & (~0xc);
+			if (link_speed == 0x04)
+				val_tmp = val_tmp | (0x4);
+			else if (link_speed == 0x08)
+				val_tmp = val_tmp | (0x8);
+			mii_mgr_read_cl45(0, 0x7, 0x3c, &local_eee);
+			mii_mgr_read_cl45(0, 0x7, 0x3d, &lp_eee);
+			if ((local_eee & 0x4) == 4 && (lp_eee & 0x4) == 4)/*1g eee*/
+				val_tmp = val_tmp | 0x80;
+			if ((local_eee & 0x2) == 2 && ((lp_eee & 0x2) == 2))/*100m eee*/
+				val_tmp = val_tmp | 0x40;
+			val_tmp = val_tmp & 0xff;
+			sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x2105e300 | val_tmp);
+		} else {
+			/*force link down*/
+			set_ge2_force_link_down();
+		}
+
+		msleep_interruptible(PHY_POLLING_MS);
+		if (kthread_should_stop())
+			break;
+	}
+
+	pr_info("%s leaved\n", __func__);
+	return 0;
+}
+
+#if 0
+static irqreturn_t ei_interrupt_napi_rx_only(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned int reg_int_mask;
+	unsigned long flags;
+
+	if (likely(napi_schedule_prep(&ei_local->napi_rx))) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+		/* Clear RX interrupt status */
+		sys_reg_write(ei_local->fe_rx_int_status, RX_INT_ALL);
+
+		/* Disable RX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+		sys_reg_write(ei_local->fe_rx_int_enable,
+			      reg_int_mask & ~(RX_INT_ALL));
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+		__napi_schedule(&ei_local->napi_rx);
+	}
+
+	return IRQ_HANDLED;
+}
+static irqreturn_t ei_interrupt_napi(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned long flags;
+
+	if (likely(napi_schedule_prep(&ei_local->napi))) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+		/* Disable TX interrupt */
+		sys_reg_write(ei_local->fe_tx_int_enable, 0);
+		/* Disable RX interrupt */
+		sys_reg_write(ei_local->fe_rx_int_enable, 0);
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+		__napi_schedule(&ei_local->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+#endif
+static irqreturn_t ei_interrupt_napi_sep(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned int reg_int_mask;
+	unsigned long flags;
+
+	if (likely(napi_schedule_prep(&ei_local->napi_tx))) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+		/* Disable TX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_tx_int_enable);
+		sys_reg_write(ei_local->fe_tx_int_enable,
+			      reg_int_mask & ~(TX_INT_ALL));
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+		__napi_schedule(&ei_local->napi_tx);
+	}
+
+	if (likely(napi_schedule_prep(&ei_local->napi_rx))) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+		/* Disable RX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+		sys_reg_write(ei_local->fe_rx_int_enable,
+			      reg_int_mask & ~(RX_INT_ALL));
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+		__napi_schedule(&ei_local->napi_rx);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_interrupt(int irq, void *dev_id)
+{
+	unsigned long reg_int_val = 0;
+	unsigned long reg_int_val_p = 0;
+	unsigned long reg_int_val_q = 0;
+	unsigned long reg_int_mask = 0;
+	unsigned int recv = 0;
+
+	unsigned int transmit __maybe_unused = 0;
+	unsigned long flags;
+
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	if (!dev) {
+		pr_err("net_interrupt(): irq %x for unknown device.\n",
+		       IRQ_ENET0);
+		return IRQ_NONE;
+	}
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+	reg_int_val_p = sys_reg_read(RAETH_FE_INT_STATUS);
+
+	if (ei_local->features & FE_QDMA)
+		reg_int_val_q = sys_reg_read(QFE_INT_STATUS);
+	reg_int_val = reg_int_val_p | reg_int_val_q;
+
+	if (reg_int_val & ei_local->rx_mask)
+		recv = 1;
+	if (reg_int_val & ei_local->tx_mask)
+		transmit = 1;
+	if (ei_local->features & FE_QDMA)
+		sys_reg_write(QFE_INT_STATUS, reg_int_val_q);
+
+	ei_local->ei_xmit_housekeeping(dev, num_tx_max_process);
+
+	/* QWERT */
+	sys_reg_write(RAETH_FE_INT_STATUS, reg_int_val_p);
+
+	if (((recv == 1) || (pending_recv == 1)) &&
+	    (ei_local->tx_ring_full == 0)) {
+		reg_int_mask = sys_reg_read(RAETH_FE_INT_ENABLE);
+
+		sys_reg_write(RAETH_FE_INT_ENABLE,
+			      reg_int_mask & ~(ei_local->rx_mask));
+		/*QDMA RX*/
+		if (ei_local->features & FE_QDMA) {
+			reg_int_mask = sys_reg_read(QFE_INT_ENABLE);
+			if (ei_local->features & FE_DLY_INT)
+				sys_reg_write(QFE_INT_ENABLE,
+					      reg_int_mask & ~(RX_DLY_INT));
+			else
+				sys_reg_write(QFE_INT_ENABLE,
+					      reg_int_mask & ~(RX_DONE_INT0 |
+							       RX_DONE_INT1 |
+							       RX_DONE_INT2 |
+							       RX_DONE_INT3));
+		}
+
+		pending_recv = 0;
+
+		if (ei_local->features & FE_INT_WORKQ)
+			schedule_work(&ei_local->rx_wq);
+		else
+			tasklet_hi_schedule(&ei_local->rx_tasklet);
+	} else if (recv == 1 && ei_local->tx_ring_full == 1) {
+		pending_recv = 1;
+	}
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned int reg_int_val;
+	unsigned long flags;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+	if (likely(reg_int_val & RX_INT_ALL)) {
+		if (likely(napi_schedule_prep(&ei_local->napi))) {
+			spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+			/* Disable RX interrupt */
+			sys_reg_write(ei_local->fe_rx_int_enable, 0);
+			/* Disable TX interrupt */
+			sys_reg_write(ei_local->fe_tx_int_enable, 0);
+
+			spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+			__napi_schedule(&ei_local->napi);
+		}
+	} else {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+		/* Ack other interrupt status except TX irqs */
+		reg_int_val &= ~(TX_INT_ALL);
+		sys_reg_write(ei_local->fe_rx_int_status, reg_int_val);
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_g0(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned int reg_int_val, reg_int_val_0, reg_int_val_1, reg_int_mask;
+	unsigned long flags;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+	reg_int_val_0 = reg_int_val & RSS_RX_RING0;
+	reg_int_val_1 = reg_int_val & RSS_RX_RING1;
+	if (likely(reg_int_val_0)) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+		/* Disable RX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+		sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING0));
+		/* Clear RX interrupt status */
+		sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING0);
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+	if (likely(reg_int_val_1)) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+		/* Disable RX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+		sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING1));
+		/* Clear RX interrupt status */
+		sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING1);
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+	if (likely(reg_int_val_0)) {
+		if (likely(napi_schedule_prep(&ei_local->napi_rx_rss0)))
+			__napi_schedule(&ei_local->napi_rx_rss0);
+	}
+
+	if (likely(reg_int_val_1)) {
+		if (likely(napi_schedule_prep(&ei_local->napi_rx_rss1)))
+			__napi_schedule(&ei_local->napi_rx_rss1);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_rss0(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned int reg_int_val, reg_int_val_0, reg_int_mask;
+	unsigned long flags;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+	reg_int_val_0 = reg_int_val & RSS_RX_RING0;
+	if (likely(reg_int_val_0)) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+		/* Disable RX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+		sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING0));
+		/* Clear RX interrupt status */
+		sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING0);
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+	if (likely(reg_int_val_0)) {
+		if (likely(napi_schedule_prep(&ei_local->napi_rx_rss0)))
+			__napi_schedule(&ei_local->napi_rx_rss0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_rss1(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned int reg_int_val, reg_int_val_1, reg_int_mask;
+	unsigned long flags;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+	reg_int_val_1 = reg_int_val & RSS_RX_RING1;
+
+	if (likely(reg_int_val_1)) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+		/* Disable RX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+		sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING1));
+		/* Clear RX interrupt status */
+		sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING1);
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+
+	if (likely(reg_int_val_1)) {
+		if (likely(napi_schedule_prep(&ei_local->napi_rx_rss1)))
+			__napi_schedule(&ei_local->napi_rx_rss1);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_g1(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned int reg_int_val, reg_int_val_0, reg_int_val_1, reg_int_mask;
+	unsigned long flags;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+	reg_int_val_0 = reg_int_val & RSS_RX_RING2;
+	reg_int_val_1 = reg_int_val & RSS_RX_RING3;
+	if (likely(reg_int_val_0)) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+		/* Disable RX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+		sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING2));
+		/* Clear RX interrupt status */
+		sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING2);
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+	if (likely(reg_int_val_1)) {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+		/* Disable RX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+		sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING3));
+		/* Clear RX interrupt status */
+		sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING3);
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+	if (likely(reg_int_val_0)) {
+		if (likely(napi_schedule_prep(&ei_local->napi_rx_rss2)))
+			__napi_schedule(&ei_local->napi_rx_rss2);
+	}
+
+	if (likely(reg_int_val_1)) {
+		if (likely(napi_schedule_prep(&ei_local->napi_rx_rss3)))
+			__napi_schedule(&ei_local->napi_rx_rss3);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_sep(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned int reg_int_val, reg_int_mask;
+	unsigned long flags;
+
+	//pr_info("enter ei_rx_interrupt_napi_sep\n");
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+	if (likely(reg_int_val & RX_INT_ALL)) {
+		if (likely(napi_schedule_prep(&ei_local->napi_rx))) {
+			spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+			/* Clear RX interrupt status */
+			sys_reg_write(ei_local->fe_rx_int_status, RX_INT_ALL);
+
+			/* Disable RX interrupt */
+			reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+			sys_reg_write(ei_local->fe_rx_int_enable,
+				      reg_int_mask & ~(RX_INT_ALL));
+
+			spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+			__napi_schedule(&ei_local->napi_rx);
+		}
+	} else {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+		/* Ack other interrupt status except TX irqs */
+		reg_int_val &= ~(TX_INT_ALL);
+		sys_reg_write(ei_local->fe_rx_int_status, reg_int_val);
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+
+	//pr_info("leave ei_rx_interrupt_napi_sep\n");
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt(int irq, void *dev_id)
+{
+	unsigned long reg_int_val;
+	unsigned long reg_int_mask;
+	unsigned int recv = 0;
+	unsigned long flags;
+
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+	if (reg_int_val & RX_INT_ALL)
+		recv = 1;
+
+	/* Clear RX interrupt status */
+	sys_reg_write(ei_local->fe_rx_int_status, RX_INT_ALL);
+
+	if (likely(((recv == 1) || (pending_recv == 1)) &&
+		   (ei_local->tx_ring_full == 0))) {
+		reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+		/* Disable RX interrupt */
+		sys_reg_write(ei_local->fe_rx_int_enable,
+			      reg_int_mask & ~(RX_INT_ALL));
+		pending_recv = 0;
+
+		if (likely(ei_local->features & FE_INT_TASKLET))
+			tasklet_hi_schedule(&ei_local->rx_tasklet);
+		else
+			schedule_work(&ei_local->rx_wq);
+	} else if (recv == 1 && ei_local->tx_ring_full == 1) {
+		pending_recv = 1;
+	}
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_tx_interrupt_napi(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned long flags;
+	unsigned int reg_int_val;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	reg_int_val = sys_reg_read(ei_local->fe_tx_int_status);
+	if (likely(reg_int_val & TX_INT_ALL)) {
+		if (likely(napi_schedule_prep(&ei_local->napi))) {
+			spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+			/* Disable TX interrupt */
+			sys_reg_write(ei_local->fe_tx_int_enable, 0);
+			/* Disable RX interrupt */
+			sys_reg_write(ei_local->fe_rx_int_enable, 0);
+
+			spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+			__napi_schedule(&ei_local->napi);
+		}
+	} else {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+		/* Ack other interrupt status except RX irqs */
+		reg_int_val &= ~(RX_INT_ALL);
+		sys_reg_write(ei_local->fe_tx_int_status, reg_int_val);
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_tx_interrupt_napi_sep(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned long flags;
+	unsigned int reg_int_val, reg_int_mask;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	reg_int_val = sys_reg_read(ei_local->fe_tx_int_status);
+	if (likely(reg_int_val & TX_INT_ALL)) {
+		if (likely(napi_schedule_prep(&ei_local->napi_tx))) {
+			spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+			/* Disable TX interrupt */
+			reg_int_mask = sys_reg_read(ei_local->fe_tx_int_enable);
+			sys_reg_write(ei_local->fe_tx_int_enable,
+				      reg_int_mask & ~(TX_INT_ALL));
+
+			spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+			__napi_schedule(&ei_local->napi_tx);
+		}
+	} else {
+		spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+		/* Ack other interrupt status except RX irqs */
+		reg_int_val &= ~(RX_INT_ALL);
+		sys_reg_write(ei_local->fe_tx_int_status, reg_int_val);
+
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_tx_interrupt(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned long flags;
+	unsigned long reg_int_val, reg_int_mask;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	reg_int_val = sys_reg_read(ei_local->fe_tx_int_status);
+
+	if (likely(reg_int_val & TX_INT_ALL)) {
+		/* Disable TX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_tx_int_enable);
+		sys_reg_write(ei_local->fe_tx_int_enable,
+			      reg_int_mask & ~(TX_INT_ALL));
+		/* Clear TX interrupt status */
+		sys_reg_write(ei_local->fe_tx_int_status, TX_INT_ALL);
+		ei_local->ei_xmit_housekeeping(netdev, num_tx_max_process);
+
+		/* Enable TX interrupt */
+		reg_int_mask = sys_reg_read(ei_local->fe_tx_int_enable);
+		sys_reg_write(ei_local->fe_tx_int_enable,
+			      reg_int_mask | ei_local->tx_mask);
+	}
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+#if 0
+static irqreturn_t ei_fe_interrupt(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *)dev_id;
+	struct END_DEVICE *ei_local;
+	unsigned long flags;
+	unsigned int reg_val;
+	unsigned int speed_mode;
+
+	if (unlikely(!netdev)) {
+		pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+		return IRQ_NONE;
+	}
+	ei_local = netdev_priv(netdev);
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	/* not to apply SGMII FC ECO for 100/10 */
+	if (ei_local->architecture & GE1_SGMII_AN) {
+		/* disable fe int */
+		sys_reg_write(FE_INT_ENABLE2, 0);
+		sys_reg_write(FE_INT_STATUS2, MAC1_LINK);
+		reg_val = sys_reg_read(ethdma_mac_base + 0x108);
+		if (reg_val & 0x1) {
+			speed_mode = (reg_val & 0x8) >> 3;
+			/* speed_mode: 0 for 100/10; 1 for else */
+			reg_val = sys_reg_read(ethdma_mac_base + 0x8);
+			if (speed_mode == 0)
+				reg_val |= 1 << 7;
+			else if (speed_mode == 1)
+				reg_val &= ~(1 << 7);
+			sys_reg_write(ethdma_mac_base + 0x8, reg_val);
+		}
+		sys_reg_write(FE_INT_ENABLE2, MAC1_LINK);
+	} else if (ei_local->architecture & GE2_SGMII_AN) {
+		/* disable fe int */
+		sys_reg_write(FE_INT_ENABLE2, 0);
+		sys_reg_write(FE_INT_STATUS2, MAC2_LINK);
+		reg_val = sys_reg_read(ethdma_mac_base + 0x208);
+		if (reg_val & 0x1) {
+			speed_mode = (reg_val & 0x8) >> 3;
+			/* speed_mode: 0 for 100/10; 1 for else */
+			reg_val = sys_reg_read(ethdma_mac_base + 0x8);
+			if (speed_mode == 0)
+				reg_val |= 1 << 7;
+			else if (speed_mode == 1)
+				reg_val &= ~(1 << 7);
+			sys_reg_write(ethdma_mac_base + 0x8, reg_val);
+		}
+		sys_reg_write(FE_INT_ENABLE2, MAC2_LINK);
+	}
+		spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return IRQ_HANDLED;
+}
+#endif
+
+static inline void ei_receive(void)
+{
+	struct net_device *dev = dev_raether;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned long reg_int_mask;
+	int rx_processed;
+	unsigned long flags;
+
+	if (ei_local->tx_ring_full == 0) {
+		rx_processed = ei_local->ei_eth_recv(dev, NULL,
+						     NUM_RX_MAX_PROCESS);
+		if (rx_processed > NUM_RX_MAX_PROCESS) {
+			if (likely(ei_local->features & FE_INT_TASKLET))
+				tasklet_hi_schedule(&ei_local->rx_tasklet);
+			else
+				schedule_work(&ei_local->rx_wq);
+		} else {
+			spin_lock_irqsave(&ei_local->irq_lock, flags);
+			/* Enable RX interrupt */
+			reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+			sys_reg_write(ei_local->fe_rx_int_enable,
+				      reg_int_mask | ei_local->rx_mask);
+			spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+		}
+	} else {
+		if (likely(ei_local->features & FE_INT_TASKLET))
+			tasklet_schedule(&ei_local->rx_tasklet);
+		else
+			schedule_work(&ei_local->rx_wq);
+	}
+}
+
+static void ei_receive_tasklet(unsigned long unused)
+{
+	ei_receive();
+}
+
+static void ei_receive_workq(struct work_struct *work)
+{
+	ei_receive();
+}
+
+static int fe_int_enable(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	//struct device_node *np = ei_local->switch_np;
+	//struct platform_device *pdev = of_find_device_by_node(np);
+	int err0 = 0, err1 = 0, err2 = 0, err3 = 0;
+	//struct mtk_gsw *gsw;
+	unsigned int reg_val = 0;
+	unsigned long flags;
+
+	pr_err("fe_int_enable\n");
+	if (ei_local->architecture & (GE1_SGMII_AN | GE2_SGMII_AN)) {
+		//err0 = request_irq(ei_local->irq0, ei_fe_interrupt,
+				  // IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, dev->name, dev);
+	} else if (ei_local->features & FE_INT_NAPI) {
+		if (ei_local->features & FE_INT_NAPI_TX_RX)
+			err0 =
+			    request_irq(ei_local->irq0, ei_interrupt_napi_sep,
+					IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, dev->name, dev);
+	} else
+		err0 =
+		    request_irq(ei_local->irq0, ei_interrupt, IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+				dev->name, dev);
+
+	if (ei_local->features & FE_IRQ_SEPARATE) {
+		if (ei_local->features & FE_INT_NAPI) {
+			pr_err("FE_INT_NAPI\n");
+			if (ei_local->features & FE_INT_NAPI_TX_RX) {
+				pr_err("FE_INT_NAPI_TX_RX\n");
+				err1 =
+				    request_irq(ei_local->irq1,
+						ei_tx_interrupt_napi_sep,
+						IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+						"eth_tx", dev);
+				err2 =
+				    request_irq(ei_local->irq2,
+						ei_rx_interrupt_napi_sep,
+						IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+						"eth_rx", dev);
+			} else if (ei_local->features & FE_INT_NAPI_RX_ONLY) {
+				pr_err("FE_INT_NAPI_RX_ONLY\n");
+
+
+				if (ei_local->features & FE_RSS_4RING) {
+					err2 =
+					    request_irq(ei_local->irq2,
+							ei_rx_interrupt_napi_g0,
+							IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+							"eth_rx_g0", dev);
+					err3 =
+					    request_irq(ei_local->irq3,
+							ei_rx_interrupt_napi_g1,
+							IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+							"eth_rx_g1", dev);
+				} else if (ei_local->features & FE_RSS_2RING) {
+					err2 =
+					    request_irq(ei_local->irq2,
+							ei_rx_interrupt_napi_rss0,
+							IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+							"eth_rx_0", dev);
+					err3 =
+					    request_irq(ei_local->irq3,
+							ei_rx_interrupt_napi_rss1,
+							IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+							"eth_rx_1", dev);
+				}
+			} else {
+				pr_err("else\n");
+				err1 =
+				    request_irq(ei_local->irq1,
+						ei_tx_interrupt_napi,
+						IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+						"eth_tx", dev);
+				err2 =
+				    request_irq(ei_local->irq2,
+						ei_rx_interrupt_napi,
+						IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+						"eth_rx", dev);
+			}
+		} else {
+			pr_err("not FE_INT_NAPI\n");
+			err1 =
+			    request_irq(ei_local->irq1, ei_tx_interrupt,
+					IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, "eth_tx", dev);
+			err2 =
+			    request_irq(ei_local->irq2, ei_rx_interrupt,
+					IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, "eth_rx", dev);
+		}
+	}
+	pr_info("!!!!!! request done\n");
+
+
+
+	spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+	if (ei_local->features & FE_DLY_INT) {
+		ei_local->tx_mask = RLS_DLY_INT;
+
+		if (ei_local->features & FE_RSS_4RING)
+			ei_local->rx_mask = RSS_RX_DLY_INT;
+		else if (ei_local->features & FE_RSS_2RING)
+			ei_local->rx_mask = RSS_RX_DLY_INT0;
+		else
+			ei_local->rx_mask = RX_DLY_INT;
+	} else {
+		ei_local->tx_mask = TX_DONE_INT0;
+		ei_local->rx_mask = RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3;
+	}
+
+	/* Enable PDMA interrupts */
+	if (ei_local->features & FE_DLY_INT) {
+		sys_reg_write(RAETH_DLY_INT_CFG, DELAY_INT_INIT);
+		if (ei_local->features & FE_RSS_4RING) {
+			sys_reg_write(LRO_RX1_DLY_INT, DELAY_INT_INIT);
+			sys_reg_write(LRO_RX2_DLY_INT, DELAY_INT_INIT);
+			sys_reg_write(LRO_RX3_DLY_INT, DELAY_INT_INIT);
+			sys_reg_write(RAETH_FE_INT_ENABLE, RSS_INT_DLY_INT);
+		} else if (ei_local->features & FE_RSS_2RING) {
+			sys_reg_write(LRO_RX1_DLY_INT, DELAY_INT_INIT);
+			sys_reg_write(RAETH_FE_INT_ENABLE, RSS_INT_DLY_INT_2RING);
+		} else {
+			sys_reg_write(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT);
+		}
+	} else {
+		sys_reg_write(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL);
+	}
+
+	/* Enable QDMA interrupts */
+	if (ei_local->features & FE_QDMA) {
+		if (ei_local->features & FE_DLY_INT) {
+			sys_reg_write(QDMA_DELAY_INT, DELAY_INT_INIT);
+			sys_reg_write(QFE_INT_ENABLE, QFE_INT_DLY_INIT);
+		} else {
+			sys_reg_write(QFE_INT_ENABLE, QFE_INT_ALL);
+		}
+	}
+
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+		if (ei_local->architecture & GE1_SGMII_AN)
+			sys_reg_write(FE_INT_ENABLE2, MAC1_LINK);
+		else if (ei_local->architecture & GE2_SGMII_AN)
+			sys_reg_write(FE_INT_ENABLE2, MAC2_LINK);
+	}
+
+	/* IRQ separation settings */
+	if (ei_local->features & FE_IRQ_SEPARATE) {
+		if (ei_local->features & FE_DLY_INT) {
+			/* PDMA setting */
+			sys_reg_write(PDMA_INT_GRP1, TX_DLY_INT);
+
+			if (ei_local->features & FE_RSS_4RING) {
+				/* Enable multipe rx ring delay interrupt */
+				reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0);
+				reg_val |= PDMA_LRO_DLY_INT_EN;
+				sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val);
+				sys_reg_write(PDMA_INT_GRP2, (RING0_RX_DLY_INT | RING1_RX_DLY_INT));
+				sys_reg_write(PDMA_INT_GRP3, (RING2_RX_DLY_INT | RING3_RX_DLY_INT));
+
+			} else if (ei_local->features & FE_RSS_2RING) {
+				reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0);
+				reg_val |= PDMA_LRO_DLY_INT_EN;
+				sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val);
+				sys_reg_write(PDMA_INT_GRP2, RING0_RX_DLY_INT);
+				sys_reg_write(PDMA_INT_GRP3, RING1_RX_DLY_INT);
+			} else {
+				sys_reg_write(PDMA_INT_GRP2, RX_DLY_INT);
+			}
+			/* QDMA setting */
+			sys_reg_write(QDMA_INT_GRP1, RLS_DLY_INT);
+			sys_reg_write(QDMA_INT_GRP2, RX_DLY_INT);
+		} else {
+			/* PDMA setting */
+			sys_reg_write(PDMA_INT_GRP1, TX_DONE_INT0);
+
+			/* QDMA setting */
+			sys_reg_write(QDMA_INT_GRP1, RLS_DONE_INT);
+			sys_reg_write(QDMA_INT_GRP2, RX_DONE_INT0 | RX_DONE_INT1);
+
+			if (ei_local->features & FE_RSS_4RING) {
+				sys_reg_write(PDMA_INT_GRP2, (RX_DONE_INT0 | RX_DONE_INT1));
+				sys_reg_write(PDMA_INT_GRP3, (RX_DONE_INT2 | RX_DONE_INT3));
+			} else if (ei_local->features & FE_RSS_2RING) {
+				sys_reg_write(PDMA_INT_GRP2, RX_DONE_INT0);
+				sys_reg_write(PDMA_INT_GRP3, RX_DONE_INT1);
+			} else {
+				sys_reg_write(PDMA_INT_GRP2, RX_DONE_INT0 | RX_DONE_INT1 |
+					      RX_DONE_INT2 | RX_DONE_INT3);
+			}
+		}
+		/*leopard fe_int[0~3][223,224,225,219]*/
+		if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING))
+			sys_reg_write(FE_INT_GRP, 0x21021030);
+		else
+			sys_reg_write(FE_INT_GRP, 0x21021000);
+	}
+
+	if (ei_local->features & FE_INT_TASKLET) {
+		tasklet_init(&ei_local->rx_tasklet, ei_receive_tasklet, 0);
+	} else if (ei_local->features & FE_INT_WORKQ) {
+		INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
+	} else {
+		if (ei_local->features & FE_INT_NAPI_TX_RX) {
+			napi_enable(&ei_local->napi_tx);
+			napi_enable(&ei_local->napi_rx);
+		} else if (ei_local->features & FE_INT_NAPI_RX_ONLY) {
+			if (ei_local->features & FE_RSS_4RING) {
+				napi_enable(&ei_local->napi_rx_rss0);
+				napi_enable(&ei_local->napi_rx_rss1);
+				napi_enable(&ei_local->napi_rx_rss2);
+				napi_enable(&ei_local->napi_rx_rss3);
+			} else if (ei_local->features & FE_RSS_2RING) {
+				napi_enable(&ei_local->napi_rx_rss0);
+				napi_enable(&ei_local->napi_rx_rss1);
+			} else {
+				napi_enable(&ei_local->napi_rx);
+			}
+		} else {
+			napi_enable(&ei_local->napi);
+		}
+	}
+
+	spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+	return 0;
+}
+
+static int fe_int_disable(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	/*always request irq0*/
+	free_irq(ei_local->irq0, dev);
+
+	if (ei_local->features & FE_IRQ_SEPARATE) {
+		free_irq(ei_local->irq1, dev);
+		free_irq(ei_local->irq2, dev);
+	}
+
+	if (ei_local->architecture & RAETH_ESW)
+		free_irq(ei_local->esw_irq, dev);
+
+	if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING))
+		free_irq(ei_local->irq3, dev);
+
+	cancel_work_sync(&ei_local->reset_task);
+
+	if (ei_local->features & FE_INT_WORKQ)
+		cancel_work_sync(&ei_local->rx_wq);
+	else if (ei_local->features & FE_INT_TASKLET)
+		tasklet_kill(&ei_local->rx_tasklet);
+
+	if (ei_local->features & FE_INT_NAPI) {
+		if (ei_local->features & FE_INT_NAPI_TX_RX) {
+			napi_disable(&ei_local->napi_tx);
+			napi_disable(&ei_local->napi_rx);
+		} else if (ei_local->features & FE_INT_NAPI_RX_ONLY) {
+			if (ei_local->features & FE_RSS_4RING) {
+				napi_disable(&ei_local->napi_rx_rss0);
+				napi_disable(&ei_local->napi_rx_rss1);
+				napi_disable(&ei_local->napi_rx_rss2);
+				napi_disable(&ei_local->napi_rx_rss3);
+			} else if (ei_local->features & FE_RSS_2RING) {
+				napi_disable(&ei_local->napi_rx_rss0);
+				napi_disable(&ei_local->napi_rx_rss1);
+			} else {
+				napi_disable(&ei_local->napi_rx);
+			}
+		} else {
+			napi_disable(&ei_local->napi);
+		}
+	}
+
+	return 0;
+}
+
+int forward_config(struct net_device *dev)
+{
+	unsigned int reg_val, reg_csg;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned int reg_val2 = 0;
+
+	if (ei_local->features & FE_HW_VLAN_TX) {
+		/*VLAN_IDX 0 = VLAN_ID 0
+		 * .........
+		 * VLAN_IDX 15 = VLAN ID 15
+		 *
+		 */
+		/* frame engine will push VLAN tag
+		 * regarding to VIDX feild in Tx desc.
+		 */
+		sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xa8, 0x00010000);
+		sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xac, 0x00030002);
+		sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xb0, 0x00050004);
+		sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xb4, 0x00070006);
+		sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xb8, 0x00090008);
+		sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xbc, 0x000b000a);
+		sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xc0, 0x000d000c);
+		sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xc4, 0x000f000e);
+	}
+
+	reg_val = sys_reg_read(GDMA1_FWD_CFG);
+	reg_csg = sys_reg_read(CDMA_CSG_CFG);
+
+	if (ei_local->features & FE_GE2_SUPPORT)
+		reg_val2 = sys_reg_read(GDMA2_FWD_CFG);
+
+	/* set unicast/multicast/broadcast frame to cpu */
+	reg_val &= ~0xFFFF;
+	reg_val |= GDMA1_FWD_PORT;
+	reg_csg &= ~0x7;
+
+	if (ei_local->features & FE_HW_VLAN_TX)
+		dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+
+	if (ei_local->features & FE_HW_VLAN_RX) {
+		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+		/* enable HW VLAN RX */
+		sys_reg_write(CDMP_EG_CTRL, 1);
+	}
+	if (ei_local->features & FE_CSUM_OFFLOAD) {
+		/* enable ipv4 header checksum check */
+		reg_val |= GDM1_ICS_EN;
+		reg_csg |= ICS_GEN_EN;
+
+		/* enable tcp checksum check */
+		reg_val |= GDM1_TCS_EN;
+		reg_csg |= TCS_GEN_EN;
+
+		/* enable udp checksum check */
+		reg_val |= GDM1_UCS_EN;
+		reg_csg |= UCS_GEN_EN;
+
+		if (ei_local->features & FE_GE2_SUPPORT) {
+			reg_val2 &= ~0xFFFF;
+			reg_val2 |= GDMA2_FWD_PORT;
+			reg_val2 |= GDM1_ICS_EN;
+			reg_val2 |= GDM1_TCS_EN;
+			reg_val2 |= GDM1_UCS_EN;
+		}
+
+		if (ei_local->features & FE_HW_LRO)
+			dev->features |= NETIF_F_HW_CSUM;
+		else
+			/* Can checksum TCP/UDP over IPv4 */
+			dev->features |= NETIF_F_IP_CSUM;
+
+		if (ei_local->features & FE_TSO) {
+			dev->features |= NETIF_F_SG;
+			dev->features |= NETIF_F_TSO;
+		}
+
+		if (ei_local->features & FE_TSO_V6) {
+			dev->features |= NETIF_F_TSO6;
+			/* Can checksum TCP/UDP over IPv6 */
+			dev->features |= NETIF_F_IPV6_CSUM;
+		}
+	} else {		/* Checksum offload disabled */
+		/* disable ipv4 header checksum check */
+		reg_val &= ~GDM1_ICS_EN;
+		reg_csg &= ~ICS_GEN_EN;
+
+		/* disable tcp checksum check */
+		reg_val &= ~GDM1_TCS_EN;
+		reg_csg &= ~TCS_GEN_EN;
+
+		/* disable udp checksum check */
+		reg_val &= ~GDM1_UCS_EN;
+		reg_csg &= ~UCS_GEN_EN;
+
+		if (ei_local->features & FE_GE2_SUPPORT) {
+			reg_val2 &= ~GDM1_ICS_EN;
+			reg_val2 &= ~GDM1_TCS_EN;
+			reg_val2 &= ~GDM1_UCS_EN;
+		}
+
+		/* disable checksum TCP/UDP over IPv4 */
+		dev->features &= ~NETIF_F_IP_CSUM;
+	}
+
+	sys_reg_write(GDMA1_FWD_CFG, reg_val);
+	sys_reg_write(CDMA_CSG_CFG, reg_csg);
+	if (ei_local->features & FE_GE2_SUPPORT)
+		sys_reg_write(GDMA2_FWD_CFG, reg_val2);
+
+	dev->vlan_features = dev->features;
+
+	/*FE_RST_GLO register definition -
+	 *Bit 0: PSE Rest
+	 *Reset PSE after re-programming PSE_FQ_CFG.
+	 */
+	reg_val = 0x1;
+	sys_reg_write(FE_RST_GL, reg_val);
+	sys_reg_write(FE_RST_GL, 0);	/* update for RSTCTL issue */
+
+	reg_csg = sys_reg_read(CDMA_CSG_CFG);
+	reg_val = sys_reg_read(GDMA1_FWD_CFG);
+
+	if (ei_local->features & FE_GE2_SUPPORT)
+		reg_val = sys_reg_read(GDMA2_FWD_CFG);
+
+	return 1;
+}
+
+void virtif_setup_statistics(struct PSEUDO_ADAPTER *p_ad)
+{
+	p_ad->stat.tx_packets = 0;
+	p_ad->stat.tx_bytes = 0;
+	p_ad->stat.tx_dropped = 0;
+	p_ad->stat.tx_errors = 0;
+	p_ad->stat.tx_aborted_errors = 0;
+	p_ad->stat.tx_carrier_errors = 0;
+	p_ad->stat.tx_fifo_errors = 0;
+	p_ad->stat.tx_heartbeat_errors = 0;
+	p_ad->stat.tx_window_errors = 0;
+
+	p_ad->stat.rx_packets = 0;
+	p_ad->stat.rx_bytes = 0;
+	p_ad->stat.rx_dropped = 0;
+	p_ad->stat.rx_errors = 0;
+	p_ad->stat.rx_length_errors = 0;
+	p_ad->stat.rx_over_errors = 0;
+	p_ad->stat.rx_crc_errors = 0;
+	p_ad->stat.rx_frame_errors = 0;
+	p_ad->stat.rx_fifo_errors = 0;
+	p_ad->stat.rx_missed_errors = 0;
+
+	p_ad->stat.collisions = 0;
+}
+
+int virtualif_open(struct net_device *dev)
+{
+	struct PSEUDO_ADAPTER *p_pseudo_ad = netdev_priv(dev);
+	struct END_DEVICE *ei_local = netdev_priv(p_pseudo_ad->raeth_dev);
+
+
+	virtif_setup_statistics(p_pseudo_ad);
+
+	if (ei_local->features & FE_HW_VLAN_TX)
+		dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+
+	if (ei_local->features & FE_HW_VLAN_RX)
+		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+	netif_start_queue(p_pseudo_ad->pseudo_dev);
+
+	return 0;
+}
+
+int virtualif_close(struct net_device *dev)
+{
+	struct PSEUDO_ADAPTER *p_pseudo_ad = netdev_priv(dev);
+
+	pr_info("%s: ===> virtualif_close\n", dev->name);
+
+	netif_stop_queue(p_pseudo_ad->pseudo_dev);
+
+	return 0;
+}
+
+int virtualif_send_packets(struct sk_buff *p_skb, struct net_device *dev)
+{
+	struct PSEUDO_ADAPTER *p_pseudo_ad = netdev_priv(dev);
+	struct END_DEVICE *ei_local;
+
+	if (!(p_pseudo_ad->raeth_dev->flags & IFF_UP)) {
+		dev_kfree_skb_any(p_skb);
+		return 0;
+	}
+	/* p_skb->cb[40]=0x5a; */
+	p_skb->dev = p_pseudo_ad->pseudo_dev;
+	ei_local = netdev_priv(p_pseudo_ad->raeth_dev);
+	ei_local->ei_start_xmit(p_skb, p_pseudo_ad->raeth_dev, 2);
+	return 0;
+}
+
+struct net_device_stats *virtualif_get_stats(struct net_device *dev)
+{
+	struct PSEUDO_ADAPTER *p_ad = netdev_priv(dev);
+
+	return &p_ad->stat;
+}
+
+int virtualif_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+	struct ra_mii_ioctl_data mii;
+	unsigned long ret;
+
+	switch (cmd) {
+	case RAETH_MII_READ:
+		ret = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+		mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
+		ret = copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
+		break;
+
+	case RAETH_MII_WRITE:
+		ret = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+		mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int ei_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	if (!ei_local) {
+		pr_emerg
+		    ("%s: %s passed a non-existent private pointer from net_dev!\n",
+		     dev->name, __func__);
+		return -ENXIO;
+	}
+
+	if ((new_mtu > 4096) || (new_mtu < 64))
+		return -EINVAL;
+
+	if (new_mtu > 1500)
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+#if 0
+static int ei_clock_enable(struct END_DEVICE *ei_local)
+{
+	unsigned long rate;
+	int ret;
+	void __iomem *clk_virt_base;
+	unsigned int reg_value;
+
+	pm_runtime_enable(ei_local->dev);
+	pm_runtime_get_sync(ei_local->dev);
+
+	clk_prepare_enable(ei_local->clks[MTK_CLK_ETH1PLL]);
+	clk_prepare_enable(ei_local->clks[MTK_CLK_ETH2PLL]);
+	clk_prepare_enable(ei_local->clks[MTK_CLK_ETHIF]);
+	clk_prepare_enable(ei_local->clks[MTK_CLK_ESW]);
+	clk_prepare_enable(ei_local->clks[MTK_CLK_GP1]);
+	clk_prepare_enable(ei_local->clks[MTK_CLK_GP2]);
+	/*enable frame engine clock*/
+	if (ei_local->chip_name == LEOPARD_FE)
+		clk_prepare_enable(ei_local->clks[MTK_CLK_FE]);
+
+	if (ei_local->architecture & RAETH_ESW)
+		clk_prepare_enable(ei_local->clks[MTK_CLK_GP0]);
+
+	if (ei_local->architecture &
+	    (GE1_TRGMII_FORCE_2000 | GE1_TRGMII_FORCE_2600)) {
+		ret = clk_set_rate(ei_local->clks[MTK_CLK_TRGPLL], 500000000);
+		if (ret)
+			pr_err("Failed to set mt7530 trgmii pll: %d\n", ret);
+		rate = clk_get_rate(ei_local->clks[MTK_CLK_TRGPLL]);
+		pr_info("TRGMII_PLL rate = %ld\n", rate);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_TRGPLL]);
+	}
+
+	if (ei_local->architecture & RAETH_SGMII) {
+		if (ei_local->chip_name == LEOPARD_FE)
+			clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_TOP]);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMIPLL]);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_TX250M]);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_RX250M]);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_CDR_REF]);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_CDR_FB]);
+	}
+
+	if (ei_local->architecture & GE2_RAETH_SGMII) {
+		clk_virt_base = ioremap(0x102100C0, 0x10);
+		reg_value = sys_reg_read(clk_virt_base);
+		reg_value = reg_value & (~0x8000);	/*[bit15] = 0 */
+		/*pdn_sgmii_re_1 1: Enable clock off */
+		sys_reg_write(clk_virt_base, reg_value);
+		iounmap(clk_virt_base);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMIPLL]);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII1_TX250M]);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII1_RX250M]);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII1_CDR_REF]);
+		clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII1_CDR_FB]);
+	}
+
+	return 0;
+}
+#endif
+static int ei_clock_disable(struct END_DEVICE *ei_local)
+{
+	if (ei_local->chip_name == LEOPARD_FE)
+		clk_disable_unprepare(ei_local->clks[MTK_CLK_FE]);
+	if (ei_local->architecture & RAETH_ESW)
+		clk_disable_unprepare(ei_local->clks[MTK_CLK_GP0]);
+
+	if (ei_local->architecture &
+	    (GE1_TRGMII_FORCE_2000 | GE1_TRGMII_FORCE_2600))
+		clk_disable_unprepare(ei_local->clks[MTK_CLK_TRGPLL]);
+
+	if (ei_local->architecture & RAETH_SGMII) {
+		clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMII_TX250M]);
+		clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMII_RX250M]);
+		clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMII_CDR_REF]);
+		clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMII_CDR_FB]);
+		clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMIPLL]);
+	}
+
+	clk_disable_unprepare(ei_local->clks[MTK_CLK_GP2]);
+	clk_disable_unprepare(ei_local->clks[MTK_CLK_GP1]);
+	clk_disable_unprepare(ei_local->clks[MTK_CLK_ESW]);
+	clk_disable_unprepare(ei_local->clks[MTK_CLK_ETHIF]);
+	clk_disable_unprepare(ei_local->clks[MTK_CLK_ETH2PLL]);
+	clk_disable_unprepare(ei_local->clks[MTK_CLK_ETH1PLL]);
+
+	pm_runtime_put_sync(ei_local->dev);
+	pm_runtime_disable(ei_local->dev);
+
+	return 0;
+}
+
+static struct ethtool_ops ra_ethtool_ops = {
+	.get_link = et_get_link,
+};
+
+static struct ethtool_ops ra_virt_ethtool_ops = {
+	.get_link = et_virt_get_link,
+};
+
+static const struct net_device_ops virtualif_netdev_ops = {
+	.ndo_open = virtualif_open,
+	.ndo_stop = virtualif_close,
+	.ndo_start_xmit = virtualif_send_packets,
+	.ndo_get_stats = virtualif_get_stats,
+	.ndo_set_mac_address = ei_set_mac2_addr,
+	.ndo_change_mtu = ei_change_mtu,
+	.ndo_do_ioctl = virtualif_ioctl,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+void raeth_init_pseudo(struct END_DEVICE *p_ad, struct net_device *net_dev)
+{
+	int index;
+	struct net_device *dev;
+	struct PSEUDO_ADAPTER *p_pseudo_ad;
+	struct END_DEVICE *ei_local = netdev_priv(net_dev);
+
+	for (index = 0; index < MAX_PSEUDO_ENTRY; index++) {
+		dev = alloc_etherdev_mqs(sizeof(struct PSEUDO_ADAPTER),
+					 gmac2_txq_num, 1);
+		if (!dev) {
+			pr_err("alloc_etherdev for PSEUDO_ADAPTER failed.\n");
+			return;
+		}
+		strncpy(dev->name, DEV2_NAME, sizeof(dev->name) - 1);
+		netif_set_real_num_tx_queues(dev, gmac2_txq_num);
+		netif_set_real_num_rx_queues(dev, 1);
+
+		ei_mac2_addr_setting(dev);
+		/*set my mac*/
+		set_mac2_address(dev->dev_addr);
+		ether_setup(dev);
+		p_pseudo_ad = netdev_priv(dev);
+
+		p_pseudo_ad->pseudo_dev = dev;
+		p_pseudo_ad->raeth_dev = net_dev;
+		p_ad->pseudo_dev = dev;
+
+		dev->netdev_ops = &virtualif_netdev_ops;
+
+		if (ei_local->features & FE_HW_LRO)
+			dev->features |= NETIF_F_HW_CSUM;
+		else
+			/* Can checksum TCP/UDP over IPv4 */
+			dev->features |= NETIF_F_IP_CSUM;
+
+		if (ei_local->features & FE_TSO) {
+			dev->features |= NETIF_F_SG;
+			dev->features |= NETIF_F_TSO;
+		}
+
+		if (ei_local->features & FE_TSO_V6) {
+			dev->features |= NETIF_F_TSO6;
+			/* Can checksum TCP/UDP over IPv6 */
+			dev->features |= NETIF_F_IPV6_CSUM;
+		}
+
+		dev->vlan_features = dev->features;
+
+		if (ei_local->features & FE_ETHTOOL) {
+			dev->ethtool_ops = &ra_virt_ethtool_ops;
+			ethtool_virt_init(dev);
+		}
+
+		/* Register this device */
+		register_netdev(dev);
+	}
+}
+
+void ei_set_pse_threshold(void)
+{
+
+	sys_reg_write(PSE_IQ_REV1, 0x001a000e);
+	sys_reg_write(PSE_IQ_REV2, 0x01ff001a);
+	sys_reg_write(PSE_IQ_REV3, 0x000e01ff);
+	sys_reg_write(PSE_IQ_REV4, 0x000e000e);
+	sys_reg_write(PSE_IQ_REV5, 0x000e000e);
+	sys_reg_write(PSE_IQ_REV6, 0x000e000e);
+	sys_reg_write(PSE_IQ_REV7, 0x000e000e);
+	sys_reg_write(PSE_IQ_REV8, 0x000e000e);
+	sys_reg_write(PSE_OQ_TH1, 0x000f000a);
+	sys_reg_write(PSE_OQ_TH2, 0x001a000f);
+	sys_reg_write(PSE_OQ_TH3, 0x000f001a);
+	sys_reg_write(PSE_OQ_TH4, 0x01ff000f);
+	sys_reg_write(PSE_OQ_TH5, 0x000f000f);
+	sys_reg_write(PSE_OQ_TH6, 0x0006000f);
+	sys_reg_write(PSE_OQ_TH7, 0x00060006);
+	sys_reg_write(PSE_OQ_TH8, 0x00060006);
+}
+
+int ei_open(struct net_device *dev)
+{
+	int err;
+	struct END_DEVICE *ei_local;
+
+
+	ei_local = netdev_priv(dev);
+
+	if (!ei_local) {
+		pr_err("%s: ei_open passed a non-existent device!\n",
+		       dev->name);
+		return -ENXIO;
+	}
+
+	if (!try_module_get(THIS_MODULE)) {
+		pr_err("%s: Cannot reserve module\n", __func__);
+		return -1;
+	}
+
+	pr_info("Raeth %s (", RAETH_VERSION);
+	if (ei_local->features & FE_INT_NAPI)
+		pr_info("NAPI\n");
+	else if (ei_local->features & FE_INT_TASKLET)
+		pr_info("Tasklet");
+	else if (ei_local->features & FE_INT_WORKQ)
+		pr_info("Workqueue");
+	pr_info(")\n");
+
+	ei_reset_statistics(ei_local);
+
+	ei_set_pse_threshold();
+
+	err = ei_init_dma(dev);
+	if (err)
+		return err;
+
+	if (ei_local->chip_name != MT7621_FE) {
+		fe_gmac_reset();
+		fe_sw_init();
+	}
+
+	/* initialize fe and switch register */
+	if (ei_local->chip_name != LEOPARD_FE)
+		fe_sw_preinit(ei_local);
+
+
+	forward_config(dev);
+
+	if ((ei_local->chip_name == MT7623_FE) &&
+	    (ei_local->features & FE_HW_LRO)) {
+		ei_local->kreset_task =
+		    kthread_create(fe_reset_thread, NULL, "FE_reset_kthread");
+		if (IS_ERR(ei_local->kreset_task))
+			return PTR_ERR(ei_local->kreset_task);
+		wake_up_process(ei_local->kreset_task);
+	}
+
+	netif_start_queue(dev);
+
+	fe_int_enable(dev);
+
+	/*set hw my mac address*/
+	set_mac_address(dev->dev_addr);
+	if (ei_local->chip_name == LEOPARD_FE) {
+		/*phy led enable*/
+		mii_mgr_write_cl45(0, 0x1f, 0x21, 0x8008);
+		mii_mgr_write_cl45(0, 0x1f, 0x24, 0x8007);
+		mii_mgr_write_cl45(0, 0x1f, 0x25, 0x3f);
+		if ((ei_local->architecture & GE2_RGMII_AN)) {
+			mii_mgr_write(0, 9, 0x200);
+			mii_mgr_write(0, 0, 0x1340);
+			if (mac_to_gigaphy_mode_addr2 == 0) {
+				ei_local->kphy_poll_task =
+				    kthread_create(phy_polling_thread, NULL, "phy_polling_kthread");
+				if (IS_ERR(ei_local->kphy_poll_task))
+					return PTR_ERR(ei_local->kphy_poll_task);
+				wake_up_process(ei_local->kphy_poll_task);
+			}
+		} else if (ei_local->architecture & LEOPARD_EPHY_GMII) {
+			mii_mgr_write(0, 9, 0x200);
+			mii_mgr_write(0, 0, 0x1340);
+		}
+	}
+	return 0;
+}
+
+int ei_close(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	fe_reset();
+
+	if ((ei_local->chip_name == MT7623_FE) &&
+	    (ei_local->features & FE_HW_LRO))
+		kthread_stop(ei_local->kreset_task);
+
+	if (ei_local->chip_name == LEOPARD_FE) {
+		if (ei_local->architecture & GE2_RGMII_AN)
+			kthread_stop(ei_local->kphy_poll_task);
+	}
+
+	netif_stop_queue(dev);
+	ra2880stop(ei_local);
+
+	fe_int_disable(dev);
+
+	if (ei_local->features & FE_GE2_SUPPORT)
+		virtualif_close(ei_local->pseudo_dev);
+
+	ei_deinit_dma(dev);
+
+	if (ei_local->chip_name != LEOPARD_FE)
+		fe_sw_deinit(ei_local);
+
+	module_put(THIS_MODULE);
+
+	return 0;
+}
+
+static int ei_start_xmit_fake(struct sk_buff *skb, struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	return ei_local->ei_start_xmit(skb, dev, 1);
+}
+
+struct net_device_stats *ra_get_stats(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	return &ei_local->stat;
+}
+
+void dump_phy_reg(int port_no, int from, int to, int is_local, int page_no)
+{
+	u32 i = 0;
+	u32 temp = 0;
+	u32 r31 = 0;
+
+	if (is_local == 0) {
+		pr_info("\n\nGlobal Register Page %d\n", page_no);
+		pr_info("===============");
+		r31 |= 0 << 15;	/* global */
+		r31 |= ((page_no & 0x7) << 12);	/* page no */
+		mii_mgr_write(port_no, 31, r31);	/* select global page x */
+		for (i = 16; i < 32; i++) {
+			if (i % 8 == 0)
+				pr_info("\n");
+			mii_mgr_read(port_no, i, &temp);
+			pr_info("%02d: %04X ", i, temp);
+		}
+	} else {
+		pr_info("\n\nLocal Register Port %d Page %d\n", port_no,
+			page_no);
+		pr_info("===============");
+		r31 |= 1 << 15;	/* local */
+		r31 |= ((page_no & 0x7) << 12);	/* page no */
+		mii_mgr_write(port_no, 31, r31);	/* select local page x */
+		for (i = 16; i < 32; i++) {
+			if (i % 8 == 0)
+				pr_info("\n");
+			mii_mgr_read(port_no, i, &temp);
+			pr_info("%02d: %04X ", i, temp);
+		}
+	}
+	pr_info("\n");
+}
+
+int ei_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct esw_reg reg;
+	struct esw_rate ratelimit;
+	struct qdma_ioctl_data qdma_data;
+	struct ephy_ioctl_data ephy_data;
+
+	unsigned int offset = 0;
+	unsigned int value = 0;
+	int ret = 0;
+	unsigned long result;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	struct ra_mii_ioctl_data mii;
+	char ip_tmp[IP4_ADDR_LEN];
+
+	spin_lock_irq(&ei_local->page_lock);
+
+	switch (cmd) {
+	case RAETH_MII_READ:
+		result = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+		mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
+		result = copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
+		break;
+
+	case RAETH_MII_WRITE:
+		result = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+		mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
+		break;
+	case RAETH_MII_READ_CL45:
+		result = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+		mii_mgr_read_cl45(mii.port_num, mii.dev_addr, mii.reg_addr,
+				  &mii.val_out);
+		result = copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
+		break;
+	case RAETH_MII_WRITE_CL45:
+		result = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+		mii_mgr_write_cl45(mii.port_num, mii.dev_addr, mii.reg_addr,
+				   mii.val_in);
+		break;
+	case RAETH_ESW_REG_READ:
+		result = copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
+		if (reg.off > REG_ESW_MAX) {
+			ret = -EINVAL;
+			break;
+		}
+		reg.val = sys_reg_read(RALINK_ETH_SW_BASE + reg.off);
+		result = copy_to_user(ifr->ifr_data, &reg, sizeof(reg));
+		break;
+	case RAETH_ESW_REG_WRITE:
+		result = copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
+		if (reg.off > REG_ESW_MAX) {
+			ret = -EINVAL;
+			break;
+		}
+		sys_reg_write(RALINK_ETH_SW_BASE + reg.off, reg.val);
+		break;
+	case RAETH_ESW_PHY_DUMP:
+		result = copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
+		/* SPEC defined Register 0~15
+		 * Global Register 16~31 for each page
+		 * Local Register 16~31 for each page
+		 */
+		pr_info("SPEC defined Register");
+		if (reg.val == 32) {	/* dump all phy register */
+			int i = 0;
+
+			for (i = 0; i < 5; i++) {
+				pr_info("\n[Port %d]===============", i);
+				for (offset = 0; offset < 16; offset++) {
+					if (offset % 8 == 0)
+						pr_info("\n");
+					mii_mgr_read(i, offset, &value);
+					pr_info("%02d: %04X ", offset, value);
+				}
+			}
+		} else {
+			pr_info("\n[Port %d]===============", reg.val);
+			for (offset = 0; offset < 16; offset++) {
+				if (offset % 8 == 0)
+					pr_info("\n");
+				mii_mgr_read(reg.val, offset, &value);
+				pr_info("%02d: %04X ", offset, value);
+			}
+		}
+
+		/* global register  page 0~4 */
+		for (offset = 0; offset < 5; offset++) {
+			if (reg.val == 32)	/* dump all phy register */
+				dump_phy_reg(0, 16, 31, 0, offset);
+			else
+				dump_phy_reg(reg.val, 16, 31, 0, offset);
+		}
+
+		if (reg.val == 32) {	/* dump all phy register */
+			/* local register port 0-port4 */
+			for (offset = 0; offset < 5; offset++) {
+				/* dump local page 0 */
+				dump_phy_reg(offset, 16, 31, 1, 0);
+				/* dump local page 1 */
+				dump_phy_reg(offset, 16, 31, 1, 1);
+				/* dump local page 2 */
+				dump_phy_reg(offset, 16, 31, 1, 2);
+				/* dump local page 3 */
+				dump_phy_reg(offset, 16, 31, 1, 3);
+			}
+		} else {
+			/* dump local page 0 */
+			dump_phy_reg(reg.val, 16, 31, 1, 0);
+			/* dump local page 1 */
+			dump_phy_reg(reg.val, 16, 31, 1, 1);
+			/* dump local page 2 */
+			dump_phy_reg(reg.val, 16, 31, 1, 2);
+			/* dump local page 3 */
+			dump_phy_reg(reg.val, 16, 31, 1, 3);
+		}
+		break;
+
+	case RAETH_ESW_INGRESS_RATE:
+		result = copy_from_user(&ratelimit, ifr->ifr_data,
+					sizeof(ratelimit));
+		offset = 0x1800 + (0x100 * ratelimit.port);
+		value = sys_reg_read(RALINK_ETH_SW_BASE + offset);
+
+		value &= 0xffff0000;
+		if (ratelimit.on_off == 1) {
+			value |= (ratelimit.on_off << 15);
+			if (ratelimit.bw < 100) {
+				value |= (0x0 << 8);
+				value |= ratelimit.bw;
+			} else if (ratelimit.bw < 1000) {
+				value |= (0x1 << 8);
+				value |= ratelimit.bw / 10;
+			} else if (ratelimit.bw < 10000) {
+				value |= (0x2 << 8);
+				value |= ratelimit.bw / 100;
+			} else if (ratelimit.bw < 100000) {
+				value |= (0x3 << 8);
+				value |= ratelimit.bw / 1000;
+			} else {
+				value |= (0x4 << 8);
+				value |= ratelimit.bw / 10000;
+			}
+		}
+		pr_info("offset = 0x%4x value=0x%x\n\r", offset, value);
+		mii_mgr_write(0x1f, offset, value);
+		break;
+
+	case RAETH_ESW_EGRESS_RATE:
+		result = copy_from_user(&ratelimit, ifr->ifr_data,
+					sizeof(ratelimit));
+		offset = 0x1040 + (0x100 * ratelimit.port);
+		value = sys_reg_read(RALINK_ETH_SW_BASE + offset);
+
+		value &= 0xffff0000;
+		if (ratelimit.on_off == 1) {
+			value |= (ratelimit.on_off << 15);
+			if (ratelimit.bw < 100) {
+				value |= (0x0 << 8);
+				value |= ratelimit.bw;
+			} else if (ratelimit.bw < 1000) {
+				value |= (0x1 << 8);
+				value |= ratelimit.bw / 10;
+			} else if (ratelimit.bw < 10000) {
+				value |= (0x2 << 8);
+				value |= ratelimit.bw / 100;
+			} else if (ratelimit.bw < 100000) {
+				value |= (0x3 << 8);
+				value |= ratelimit.bw / 1000;
+			} else {
+				value |= (0x4 << 8);
+				value |= ratelimit.bw / 10000;
+			}
+		}
+		pr_info("offset = 0x%4x value=0x%x\n\r", offset, value);
+		mii_mgr_write(0x1f, offset, value);
+		break;
+
+	case RAETH_SET_LAN_IP:
+		result = copy_from_user(ip_tmp, ifr->ifr_data, IP4_ADDR_LEN);
+		strncpy(ei_local->lan_ip4_addr, ip_tmp, IP4_ADDR_LEN);
+		pr_info("RAETH_SET_LAN_IP: %s\n", ei_local->lan_ip4_addr);
+
+
+		if (ei_local->features & FE_HW_LRO)
+			fe_set_hw_lro_my_ip(ei_local->lan_ip4_addr);
+		break;
+
+	case RAETH_QDMA_IOCTL:
+
+		ret =
+		    copy_from_user(&qdma_data, ifr->ifr_data,
+				   sizeof(qdma_data));
+		ei_qdma_ioctl(dev, ifr, &qdma_data);
+
+		break;
+
+	case RAETH_EPHY_IOCTL:
+
+		ret =
+		    copy_from_user(&ephy_data, ifr->ifr_data,
+				   sizeof(ephy_data));
+		ephy_ioctl(dev, ifr, &ephy_data);
+
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	spin_unlock_irq(&ei_local->page_lock);
+	return ret;
+}
+
+static const struct net_device_ops ei_netdev_ops = {
+	.ndo_init = ei_init,
+	.ndo_uninit = ei_uninit,
+	.ndo_open = ei_open,
+	.ndo_stop = ei_close,
+	.ndo_start_xmit = ei_start_xmit_fake,
+	.ndo_get_stats = ra_get_stats,
+	.ndo_set_mac_address = ei_set_mac_addr,
+	.ndo_change_mtu = ei_change_mtu,
+	.ndo_do_ioctl = ei_ioctl,
+	.ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = raeth_poll_full,
+#endif
+};
+
+void raeth_setup_dev_fptable(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	dev->netdev_ops = &ei_netdev_ops;
+
+	if (ei_local->features & FE_ETHTOOL)
+		dev->ethtool_ops = &ra_ethtool_ops;
+
+#define TX_TIMEOUT (5 * HZ)
+	dev->watchdog_timeo = TX_TIMEOUT;
+}
+
+void ei_ioc_setting(struct platform_device *pdev, struct END_DEVICE *ei_local)
+{
+	void __iomem *reg_virt;
+	/* unsigned int reg_val; */
+
+	if (ei_local->features & FE_HW_IOCOHERENT) {
+		pr_info("[Raether] HW IO coherent is enabled !\n");
+		/* enable S4 coherence function */
+		reg_virt = ioremap(0x10395000, 0x10);
+		sys_reg_write(reg_virt, 0x00000003);
+
+		/* Enable ETHSYS io coherence path */
+		/*reg_virt = ioremap(HW_IOC_BASE, 0x10);*/
+		/*reg_virt += IOC_OFFSET;*/
+		/*reg_val = sys_reg_read(reg_virt);*/
+
+		/*if (ei_local->features & FE_QDMA_FQOS)*/
+		/*	reg_val |= IOC_ETH_PDMA;*/
+		/*else*/
+		/*	reg_val |= IOC_ETH_PDMA | IOC_ETH_QDMA;*/
+
+		/*sys_reg_write(reg_virt, reg_val);*/
+		/*reg_virt -= IOC_OFFSET;*/
+		iounmap(reg_virt);
+
+		arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, TRUE);
+
+		if (ei_local->features & FE_QDMA_FQOS)
+			arch_setup_dma_ops(&ei_local->qdma_pdev->dev,
+					   0, 0, NULL, FALSE);
+		else
+			arch_setup_dma_ops(&ei_local->qdma_pdev->dev,
+					   0, 0, NULL, TRUE);
+	} else {
+		pr_info("[Raether] HW IO coherent is disabled !\n");
+		arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, FALSE);
+		arch_setup_dma_ops(&ei_local->qdma_pdev->dev,
+				   0, 0, NULL, FALSE);
+	}
+}
+
+void fe_chip_name_config(struct END_DEVICE *ei_local, struct platform_device *pdev)
+{
+	const char *pm;
+	int ret;
+
+	ret = of_property_read_string(pdev->dev.of_node, "compatible", &pm);
+
+	if (!ret && !strcasecmp(pm, "mediatek,mt7621-eth")) {
+		ei_local->chip_name = MT7621_FE;
+		pr_info("CHIP_ID = MT7621\n");
+	} else if (!strcasecmp(pm, "mediatek,mt7622-raeth")) {
+		ei_local->chip_name = MT7622_FE;
+		pr_info("CHIP_ID = MT7622\n");
+	} else if (!strcasecmp(pm, "mediatek,mt7623-eth")) {
+		ei_local->chip_name = MT7623_FE;
+		pr_info("CHIP_ID = MT7623\n");
+	} else if (!strcasecmp(pm, "mediatek,leopard-eth")) {
+		ei_local->chip_name = LEOPARD_FE;
+		pr_info("CHIP_ID = LEOPARD_FE\n");
+	} else if (!strcasecmp(pm, "mediatek,mt7986-eth")) {
+                ei_local->chip_name = MT7986_FE;
+                pr_info("CHIP_ID = MT7986_FE\n");
+	} else {
+		pr_info("CHIP_ID error\n");
+	}
+}
+
+void raeth_set_wol(bool enable)
+{
+	unsigned int reg_value = 0;
+
+	if (enable) {
+		reg_value = sys_reg_read(MAC1_WOL);
+		reg_value |= (WOL_INT_CLR | WOL_INT_EN | WOL_EN);
+		sys_reg_write(MAC1_WOL, reg_value);
+
+	} else {
+		reg_value = sys_reg_read(MAC1_WOL);
+		reg_value &= ~(WOL_INT_EN | WOL_EN);
+		sys_reg_write(MAC1_WOL, reg_value);
+	}
+}
+
+#if (0)
+static int raeth_resume(struct device *dev)
+{
+	raeth_set_wol(false);
+	return 0;
+}
+
+static int raeth_suspend(struct device *dev)
+{
+	raeth_set_wol(true);
+	return 0;
+}
+#endif
+u32 mac_to_gigaphy_mode_addr;
+u32 mac_to_gigaphy_mode_addr2;
+void raeth_arch_setting(struct END_DEVICE *ei_local, struct platform_device *pdev)
+{
+	const char *pm;
+	int ret;
+	u32 val;
+
+	ret = of_property_read_string(pdev->dev.of_node, "wan_at", &pm);
+	if (!ret) {
+		ei_local->architecture |= LAN_WAN_SUPPORT;
+		if (!ret && !strcasecmp(pm, "p4")) {
+			ei_local->architecture |= WAN_AT_P4;
+			pr_info("WAN at P4\n");
+		} else if (!strcasecmp(pm, "p0")) {
+			ei_local->architecture |= WAN_AT_P0;
+			pr_info("WAN at P0\n");
+		}
+	}
+	ret = of_property_read_string(pdev->dev.of_node, "gmac1-support", &pm);
+	if (!ret && !strcasecmp(pm, "sgmii-1")) {
+		ei_local->architecture |= RAETH_SGMII;
+		pr_info("GMAC1 support SGMII\n");
+		ret = of_property_read_string(pdev->dev.of_node, "sgmii-mode-1", &pm);
+		if (!ret && !strcasecmp(pm, "force-2500")) {
+			pr_info("GE1_SGMII_FORCE_2500\n");
+			ei_local->architecture |= GE1_SGMII_FORCE_2500;
+		} else if (!strcasecmp(pm, "an")) {
+			pr_info("GE1_SGMII_AN\n");
+			ei_local->architecture |= GE1_SGMII_AN;
+			of_property_read_u32(pdev->dev.of_node, "gmac1-phy-address", &val);
+			mac_to_gigaphy_mode_addr = val;
+			pr_info("mac_to_gigaphy_mode_addr = 0x%x\n", mac_to_gigaphy_mode_addr);
+		}
+	} else if (!strcasecmp(pm, "rgmii-1")) {
+		pr_info("GMAC1 support rgmii\n");
+		ret = of_property_read_string(pdev->dev.of_node, "rgmii-mode-1", &pm);
+		if (!ret && !strcasecmp(pm, "force-1000")) {
+			pr_info("GE1_RGMII_FORCE_1000\n");
+			ei_local->architecture |= GE1_RGMII_FORCE_1000;
+		} else if (!strcasecmp(pm, "an")) {
+			pr_info("GE1_RGMII_AN\n");
+			of_property_read_u32(pdev->dev.of_node, "gmac1-phy-address", &val);
+			mac_to_gigaphy_mode_addr = val;
+			ei_local->architecture |= GE1_RGMII_AN;
+			pr_info("mac_to_gigaphy_mode_addr = 0x%x\n", mac_to_gigaphy_mode_addr);
+		} else if (!strcasecmp(pm, "one-ephy")) {
+			pr_info("GE1_RGMII_ONE_EPHY\n");
+			ei_local->architecture |= GE1_RGMII_ONE_EPHY;
+		}
+
+	} else if (!strcasecmp(pm, "esw")) {
+		pr_info("Embedded 5-Port Switch\n");
+		ei_local->architecture |= RAETH_ESW;
+		if (ei_local->chip_name == MT7622_FE) {
+			ei_local->architecture |= MT7622_EPHY;
+		} else if (ei_local->chip_name == LEOPARD_FE) {
+			ret = of_property_read_string(pdev->dev.of_node, "gmac0", &pm);
+			if (!ret && !strcasecmp(pm, "gmii"))
+				ei_local->architecture |= LEOPARD_EPHY_GMII;
+			ei_local->architecture |= LEOPARD_EPHY;
+		}
+	} else if (!strcasecmp(pm, "none")) {
+		pr_info("GE1_RGMII_NONE\n");
+		ei_local->architecture |= GE1_RGMII_NONE;
+	}  else {
+		pr_info("GE1 dts parsing error\n");
+	}
+
+	ret = of_property_read_string(pdev->dev.of_node, "gmac2-support", &pm);
+	if (!ret) {
+		ei_local->architecture |= GMAC2;
+		ei_local->features |= FE_GE2_SUPPORT;
+	}
+	if (!ret && !strcasecmp(pm, "sgmii-2")) {
+		ei_local->architecture |= GE2_RAETH_SGMII;
+		pr_info("GMAC2 support SGMII\n");
+		ret = of_property_read_string(pdev->dev.of_node, "sgmii-mode-2", &pm);
+		if (!ret && !strcasecmp(pm, "force-2500")) {
+			pr_info("GE2_SGMII_FORCE_2500\n");
+			ei_local->architecture |= GE2_SGMII_FORCE_2500;
+			ret = of_property_read_string(pdev->dev.of_node, "gmac2-force", &pm);
+			if (!ret && !strcasecmp(pm, "sgmii-switch")) {
+				ei_local->architecture |= SGMII_SWITCH;
+				pr_info("GE2_SGMII_FORCE LINK SWITCH\n");
+			}
+		} else if (!strcasecmp(pm, "an")) {
+			pr_info("GE2_SGMII_AN\n");
+			ei_local->architecture |= GE2_SGMII_AN;
+			of_property_read_u32(pdev->dev.of_node, "gmac2-phy-address", &val);
+			mac_to_gigaphy_mode_addr2 = val;
+		}
+	} else if (!strcasecmp(pm, "rgmii-2")) {
+		pr_info("GMAC2 support rgmii\n");
+		ret = of_property_read_string(pdev->dev.of_node, "rgmii-mode-2", &pm);
+		if (!ret && !strcasecmp(pm, "force-1000")) {
+			pr_info("GE2_RGMII_FORCE_1000\n");
+			ei_local->architecture |= GE2_RGMII_FORCE_1000;
+		} else if (!strcasecmp(pm, "an")) {
+			pr_info("RGMII_AN (External GigaPhy)\n");
+			of_property_read_u32(pdev->dev.of_node, "gmac2-phy-address", &val);
+			mac_to_gigaphy_mode_addr2 = val;
+			pr_info("mac_to_gigaphy_mode_addr2 = 0x%x\n", mac_to_gigaphy_mode_addr2);
+			ei_local->architecture |= GE2_RGMII_AN;
+		} else if (!strcasecmp(pm, "an-internal")) {
+			pr_info("RGMII_AN (Internal GigaPhy)\n");
+			ei_local->architecture |= GE2_INTERNAL_GPHY;
+		}
+	} else {
+		pr_info("GE2 no connect\n");
+	}
+}
+
+void fe_tx_rx_dec(struct END_DEVICE *ei_local, struct platform_device *pdev)
+{
+	u32 val;
+	u8 i;
+
+	of_property_read_u32(pdev->dev.of_node, "gmac1_txq_num", &val);
+	gmac1_txq_num = val;
+	of_property_read_u32(pdev->dev.of_node, "gmac1_txq_txd_num", &val);
+	gmac1_txq_txd_num = val;
+	gmac1_txd_num = gmac1_txq_num * gmac1_txq_txd_num;
+
+	of_property_read_u32(pdev->dev.of_node, "gmac2_txq_num", &val);
+	gmac2_txq_num = val;
+	of_property_read_u32(pdev->dev.of_node, "gmac2_txq_txd_num", &val);
+	gmac2_txq_txd_num = val;
+	gmac2_txd_num = gmac2_txq_num * gmac2_txq_txd_num;
+
+	num_tx_desc = gmac1_txd_num + gmac2_txd_num;
+	total_txq_num = gmac1_txq_num + gmac2_txq_num;
+
+	of_property_read_u32(pdev->dev.of_node, "num_rx_desc", &val);
+	num_rx_desc = val;
+	num_tx_max_process = num_tx_desc;
+
+	ei_local->free_skb = kmalloc_array(num_tx_desc, sizeof(struct sk_buff *), GFP_KERNEL);
+
+	ei_local->free_txd_num = kmalloc_array(total_txq_num, sizeof(atomic_t), GFP_KERNEL);
+	ei_local->free_txd_head = kmalloc_array(total_txq_num, sizeof(unsigned int), GFP_KERNEL);
+	ei_local->free_txd_tail = kmalloc_array(total_txq_num, sizeof(unsigned int), GFP_KERNEL);
+	ei_local->txd_pool_info = kmalloc_array(num_tx_desc, sizeof(unsigned int), GFP_KERNEL);
+	ei_local->skb_free = kmalloc_array(num_tx_desc, sizeof(struct sk_buff *), GFP_KERNEL);
+	ei_local->rls_cnt = kmalloc_array(total_txq_num, sizeof(unsigned int), GFP_KERNEL);
+	for (i = 0; i < MAX_RX_RING_NUM; i++)
+		ei_local->netrx_skb_data[i] =
+			kmalloc_array(num_rx_desc, sizeof(void *), GFP_KERNEL);
+	ei_local->netrx0_skb_data = kmalloc_array(num_rx_desc, sizeof(void *), GFP_KERNEL);
+}
+
+/* static struct wakeup_source eth_wake_lock; */
+
+static int rather_probe(struct platform_device *pdev)
+{
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	struct END_DEVICE *ei_local;
+	struct net_device *netdev;
+	struct device_node *node;
+	const char *mac_addr;
+	int ret;
+	//int i;
+
+	netdev = alloc_etherdev_mqs(sizeof(struct END_DEVICE),
+				    1, 1);
+	if (!netdev)
+		return -ENOMEM;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	dev_raether = netdev;
+	ei_local = netdev_priv(netdev);
+	ei_local->dev = &pdev->dev;
+	ei_local->netdev = netdev;
+	fe_features_config(ei_local);
+	fe_architecture_config(ei_local);
+	fe_chip_name_config(ei_local, pdev);
+	raeth_arch_setting(ei_local, pdev);
+	fe_tx_rx_dec(ei_local, pdev);
+
+	ret = of_property_read_bool(pdev->dev.of_node, "dma-coherent");
+	if (ret) {
+		pr_err("HW_IOC supported\n");
+		ei_local->features |= FE_HW_IOCOHERENT;
+	}
+
+	if ((ei_local->features & FE_HW_IOCOHERENT) &&
+	    (ei_local->features & FE_QDMA_FQOS)) {
+		pr_err("HW_IOC supported\n");
+		ei_local->qdma_pdev =
+			platform_device_alloc("QDMA", PLATFORM_DEVID_AUTO);
+		if (!ei_local->qdma_pdev) {
+			dev_err(&pdev->dev,
+				"QDMA platform device allocate fail!\n");
+			ret = -ENOMEM;
+			goto err_free_dev;
+		}
+
+		ei_local->qdma_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+		ei_local->qdma_pdev->dev.dma_mask =
+			&ei_local->qdma_pdev->dev.coherent_dma_mask;
+	} else {
+		ei_local->qdma_pdev = pdev;
+	}
+
+	/* iomap registers */
+	node = of_parse_phandle(pdev->dev.of_node, "mediatek,ethsys", 0);
+	ethdma_sysctl_base = of_iomap(node, 0);
+	if (IS_ERR(ethdma_sysctl_base)) {
+		dev_err(&pdev->dev, "no ethdma_sysctl_base found\n");
+		return PTR_ERR(ethdma_sysctl_base);
+	}
+
+	ethdma_frame_engine_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ethdma_frame_engine_base)) {
+		dev_err(&pdev->dev, "no ethdma_frame_engine_base found\n");
+		return PTR_ERR(ethdma_frame_engine_base);
+	}
+
+	ethdma_mac_base = ioremap(0x15110000, 0x300);
+
+	/* get clock ctrl */
+#if (0)
+	if (ei_local->chip_name != MT7621_FE) {
+		for (i = 0; i < ARRAY_SIZE(ei_local->clks); i++) {
+			ei_local->clks[i] = devm_clk_get(&pdev->dev,
+							 mtk_clks_source_name[i]);
+			if (IS_ERR(ei_local->clks[i])) {
+				if (PTR_ERR(ei_local->clks[i]) == -EPROBE_DEFER)
+					pr_info("!!!!!EPROBE_DEFER!!!!!\n");
+				pr_info("!!!!ENODEV!!!!! clks = %s\n", mtk_clks_source_name[i]);
+			}
+		}
+	}
+#endif
+
+	/* get gsw device node */
+	ei_local->switch_np = of_parse_phandle(pdev->dev.of_node,
+					       "mediatek,switch", 0);
+
+#if 0
+	/* get MAC address */
+	mac_addr = of_get_mac_address(pdev->dev.of_node);
+	if (mac_addr)
+		ether_addr_copy(netdev->dev_addr, mac_addr);
+#endif
+
+	/* get IRQs */
+	ei_local->irq0 = platform_get_irq(pdev, 0);
+	if (ei_local->chip_name != MT7621_FE) {
+		ei_local->irq1 = platform_get_irq(pdev, 1);
+		ei_local->irq2 = platform_get_irq(pdev, 2);
+	}
+	if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING)) {
+		ei_local->irq3 = platform_get_irq(pdev, 3);
+	}
+
+	pr_err("ei_local->irq0 = %d; ei_local->irq1 = %d; ei_local->irq2 = %d\n", ei_local->irq0, ei_local->irq1, ei_local->irq2);
+#if (0)
+	if (ei_local->architecture & RAETH_ESW) {
+		if (ei_local->architecture & MT7622_EPHY)
+			ei_local->esw_irq = platform_get_irq(pdev, 3);
+		else if (ei_local->architecture & LEOPARD_EPHY)
+			ei_local->esw_irq = platform_get_irq(pdev, 4);
+		pr_info("ei_local->esw_irq = %d\n", ei_local->esw_irq);
+	}
+
+	if (0)
+		ei_clock_enable(ei_local);
+
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE)
+		ei_ioc_setting(pdev, ei_local);
+#endif
+	raeth_setup_dev_fptable(netdev);
+//alive 01
+	ei_mac_addr_setting(netdev);
+//dead 03
+	strncpy(netdev->name, DEV_NAME, sizeof(netdev->name) - 1);
+	netif_set_real_num_tx_queues(netdev, gmac1_txq_num);
+	netif_set_real_num_rx_queues(netdev, 1);
+
+	netdev->addr_len = 6;
+//dead 02
+	netdev->base_addr = (unsigned long)RALINK_FRAME_ENGINE_BASE;
+	sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x100, 0x2105e303);
+	sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x2105e303);
+	/* net_device structure Init */
+	pr_info
+	    ("%s  %d rx/%d tx descriptors allocated, mtu = %d!\n",
+	     RAETH_VERSION, num_rx_desc, num_tx_desc, netdev->mtu);
+//dead 01
+	if (ei_local->features & FE_ETHTOOL)
+		ethtool_init(netdev);
+	ret = debug_proc_init();
+	if (ret) {
+		dev_err(&pdev->dev, "error set debug proc\n");
+		goto err_free_dev;
+	}
+	/* Register net device for the driver */
+	ret = register_netdev(netdev);
+	if (ret) {
+		dev_err(&pdev->dev, "error bringing up device\n");
+		goto err_free_dev;
+	}
+	/*keep ethsys power domain on*/
+	device_init_wakeup(&pdev->dev, true);
+
+	pr_info("device_init_wakeup\n");
+	if (ei_local->features & FE_GE2_SUPPORT) {
+		if (!ei_local->pseudo_dev)
+			raeth_init_pseudo(ei_local, netdev);
+
+		if (!ei_local->pseudo_dev)
+			pr_info("Open pseudo_dev failed.\n");
+		else
+			virtualif_open(ei_local->pseudo_dev);
+	}
+	return 0;
+
+err_free_dev:
+	free_netdev(netdev);
+	return ret;
+}
+
+static int raether_remove(struct platform_device *pdev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_QDMA_FQOS)
+		if (ei_local->qdma_pdev)
+			ei_local->qdma_pdev->dev.release
+				(&ei_local->qdma_pdev->dev);
+
+	ei_clock_disable(ei_local);
+
+	return 0;
+}
+
+#if (0)
+static const struct dev_pm_ops raeth_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(raeth_suspend, raeth_resume)
+};
+#endif
+static const char raeth_string[] = "RAETH_DRV";
+
+static const struct of_device_id raether_of_ids[] = {
+	{.compatible = "mediatek,mt7623-eth"},
+	{.compatible = "mediatek,mt7622-raeth"},
+	{.compatible = "mediatek,mt7621-eth"},
+	{.compatible = "mediatek,leopard-eth"},
+	{.compatible = "mediatek,mt7986-eth"},
+	{},
+};
+
+static struct platform_driver raeth_driver = {
+	.probe = rather_probe,
+	.remove = raether_remove,
+	.driver = {
+		   .name = raeth_string,
+		   .owner = THIS_MODULE,
+		   .of_match_table = raether_of_ids,
+		   /* .pm = &raeth_pm_ops, */
+		   },
+};
+
+module_platform_driver(raeth_driver);
+MODULE_LICENSE("GPL");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.h
new file mode 100644
index 0000000..5316905
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.h
@@ -0,0 +1,463 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA2882ETHEND_H
+#define RA2882ETHEND_H
+
+#include "raeth_config.h"
+#include "raeth_reg.h"
+#include "ra_dbg_proc.h"
+#include "ra_ioctl.h"
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/fs.h>
+#include <linux/mii.h>
+#include <linux/uaccess.h>
+#if defined(CONFIG_RAETH_TSO)
+#include <linux/tcp.h>
+#include <net/ipv6.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <linux/in.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_pppox.h>
+#endif
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ppp_defs.h>
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/dma-mapping.h>
+
+#if defined(CONFIG_MACH_MT7623)
+#include <linux/delay.h>
+#endif
+#include <linux/kthread.h>
+#include <linux/prefetch.h>
+
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+#include <net/ra_nat.h>
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define ETH_GPIO_BASE	0x10005000
+
+#if defined(CONFIG_QDMA_MQ)
+#define GMAC1_TXQ_NUM 3
+#define GMAC1_TXQ_TXD_NUM 512
+#define GMAC1_TXD_NUM (GMAC1_TXQ_NUM * GMAC1_TXQ_TXD_NUM)
+#define GMAC2_TXQ_NUM 1
+#define GMAC2_TXQ_TXD_NUM 128
+#define GMAC2_TXD_NUM (GMAC2_TXQ_NUM * GMAC2_TXQ_TXD_NUM)
+#define NUM_TX_DESC (GMAC1_TXD_NUM + GMAC2_TXD_NUM)
+#define TOTAL_TXQ_NUM (GMAC1_TXQ_NUM + GMAC2_TXQ_NUM)
+#else
+#define TOTAL_TXQ_NUM 2
+#endif
+
+#if defined(CONFIG_MACH_MT7623)
+#define NUM_RX_DESC     2048
+#define NUM_QRX_DESC 16
+#define NUM_PQ_RESV 4
+#define FFA 2048
+#define QUEUE_OFFSET 0x10
+#else
+#define NUM_QRX_DESC 16
+#define NUM_PQ_RESV 4
+#define FFA 512
+#define QUEUE_OFFSET 0x10
+#endif
+
+#if defined(CONFIG_PINCTRL_MT7622)
+#define NUM_PQ 64
+#else
+#define NUM_PQ 16
+#endif
+/* #define NUM_TX_MAX_PROCESS NUM_TX_DESC */
+#define NUM_RX_MAX_PROCESS 16
+
+#define MAX_RX_RING_NUM	4
+#define NUM_LRO_RX_DESC	16
+
+#define	MAX_RX_LENGTH	1536
+
+#if defined(CONFIG_SUPPORT_OPENWRT)
+#define DEV_NAME        "eth0"
+#define DEV2_NAME       "eth1"
+#else
+#define DEV_NAME        "eth2"
+#define DEV2_NAME       "eth3"
+#endif
+
+#if defined(CONFIG_MACH_MT7623)
+#define GMAC0_OFFSET    0xE000
+#define GMAC2_OFFSET    0xE006
+#else
+#define GMAC0_OFFSET    0x28
+#define GMAC2_OFFSET    0x22
+#endif
+
+#if defined(CONFIG_MACH_MT7623)
+#define IRQ_ENET0       232
+#define IRQ_ENET1       231
+#define IRQ_ENET2       230
+#else
+/* NOTE(Nelson): prom version started from 20150806 */
+#define IRQ_ENET0       255
+#define IRQ_ENET1       256
+#define IRQ_ENET2       257
+#endif
+#define MTK_NAPI_WEIGHT	64
+
+#define RAETH_VERSION	"STD_v0.1"
+
+/* MT7623 PSE reset workaround */
+#define	FE_RESET_POLLING_MS	(5000)
+
+/*LEOPARD POLLING*/
+#define PHY_POLLING_MS		(1000)
+#define FE_DEFAULT_LAN_IP	"192.168.1.1"
+#define IP4_ADDR_LEN		16
+
+#if defined(CONFIG_SOC_MT7621)
+#define MT_TRIGGER_LOW	0
+#else
+#define MT_TRIGGER_LOW	IRQF_TRIGGER_LOW
+#endif
+
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+	MTK_CLK_ETHIF,
+	MTK_CLK_ESW,
+	MTK_CLK_GP0,
+	MTK_CLK_GP1,
+	MTK_CLK_GP2,
+	MTK_CLK_SGMII_TX250M,
+	MTK_CLK_SGMII_RX250M,
+	MTK_CLK_SGMII_CDR_REF,
+	MTK_CLK_SGMII_CDR_FB,
+	MTK_CLK_SGMII1_TX250M,
+	MTK_CLK_SGMII1_RX250M,
+	MTK_CLK_SGMII1_CDR_REF,
+	MTK_CLK_SGMII1_CDR_FB,
+	MTK_CLK_TRGPLL,
+	MTK_CLK_SGMIPLL,
+	MTK_CLK_ETH1PLL,
+	MTK_CLK_ETH2PLL,
+	MTK_CLK_FE,
+	MTK_CLK_SGMII_TOP,
+	MTK_CLK_MAX
+};
+
+struct END_DEVICE {
+	struct device *dev;
+	unsigned int tx_cpu_owner_idx0;
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	unsigned int rx_calc_idx[MAX_RX_RING_NUM];
+#endif
+	unsigned int tx_ring_full;
+	unsigned int tx_full;	/* NOTE(Nelso): unused, can remove */
+
+	/* PDMA TX  PTR */
+	dma_addr_t phy_tx_ring0;
+
+	/* QDMA TX  PTR */
+	struct platform_device *qdma_pdev;
+	/* struct sk_buff *free_skb[NUM_TX_DESC]; */
+	struct sk_buff **free_skb;
+	unsigned int tx_dma_ptr;
+	unsigned int tx_cpu_ptr;
+	unsigned int tx_cpu_idx;
+	unsigned int rls_cpu_idx;
+	/* atomic_t  free_txd_num[TOTAL_TXQ_NUM]; */
+	atomic_t  *free_txd_num;
+	/* unsigned int free_txd_head[TOTAL_TXQ_NUM]; */
+	/* unsigned int free_txd_tail[TOTAL_TXQ_NUM]; */
+	unsigned int *free_txd_head;
+	unsigned int *free_txd_tail;
+	struct QDMA_txdesc *txd_pool;
+	dma_addr_t phy_txd_pool;
+	/* unsigned int txd_pool_info[NUM_TX_DESC]; */
+	unsigned int *txd_pool_info;
+	struct QDMA_txdesc *free_head;
+	unsigned int phy_free_head;
+	unsigned int *free_page_head;
+	dma_addr_t phy_free_page_head;
+	struct PDMA_rxdesc *qrx_ring;
+	dma_addr_t phy_qrx_ring;
+
+	/* TSO */
+	unsigned int skb_txd_num;
+
+	/* MT7623 workaround */
+	struct work_struct reset_task;
+
+	/* workqueue_bh */
+	struct work_struct rx_wq;
+
+	/* tasklet_bh */
+	struct tasklet_struct rx_tasklet;
+
+	/* struct sk_buff *skb_free[NUM_TX_DESC]; */
+	struct sk_buff **skb_free;
+	unsigned int free_idx;
+
+	struct net_device_stats stat;	/* The new statistics table. */
+	spinlock_t page_lock;	/* spin_lock for cr access critial section */
+	spinlock_t irq_lock;	/* spin_lock for isr critial section */
+	spinlock_t mdio_lock;   /* spin_lock for mdio reg access */
+	struct PDMA_txdesc *tx_ring0;
+	struct PDMA_rxdesc *rx_ring[MAX_RX_RING_NUM];
+	dma_addr_t phy_rx_ring[MAX_RX_RING_NUM];
+
+	/* void *netrx_skb_data[MAX_RX_RING_NUM][NUM_RX_DESC]; */
+	void **netrx_skb_data[MAX_RX_RING_NUM];
+
+	/* struct sk_buff *netrx0_skbuf[NUM_RX_DESC]; */
+	/*struct sk_buff **netrx0_skbuf;*/
+	void **netrx0_skb_data;
+	/* napi */
+	struct napi_struct napi;
+	struct napi_struct napi_rx;
+	struct napi_struct napi_rx_rss0;
+	struct napi_struct napi_rx_rss1;
+	struct napi_struct napi_rx_rss2;
+	struct napi_struct napi_rx_rss3;
+	struct napi_struct napi_tx;
+	struct net_device dummy_dev;
+
+	/* clock control */
+	struct clk	*clks[MTK_CLK_MAX];
+
+	/* gsw device node */
+	struct device_node *switch_np;
+
+	/* GE1 support */
+	struct net_device *netdev;
+	/* GE2 support */
+	struct net_device *pseudo_dev;
+	unsigned int is_pseudo;
+
+	struct mii_if_info mii_info;
+	struct lro_counters lro_counters;
+	struct vlan_group *vlgrp;
+
+	/* virtual base addr from device tree */
+	void __iomem *ethdma_sysctl_base;
+
+	unsigned int irq0;
+	unsigned int irq1;
+	unsigned int irq2;
+	unsigned int irq3;
+	unsigned int esw_irq;
+	void __iomem *fe_tx_int_status;
+	void __iomem *fe_tx_int_enable;
+	void __iomem *fe_rx_int_status;
+	void __iomem *fe_rx_int_enable;
+
+	unsigned int features;
+	unsigned int chip_name;
+	unsigned int architecture;
+
+	/* IP address */
+	char lan_ip4_addr[IP4_ADDR_LEN];
+
+	/* Function pointers */
+	int (*ei_start_xmit)(struct sk_buff *skb, struct net_device *netdev,
+			     int gmac_no);
+	int (*ei_xmit_housekeeping)(struct net_device *netdev, int budget);
+	int (*ei_eth_recv)(struct net_device *dev,
+			   struct napi_struct *napi,
+			   int budget);
+	int (*ei_eth_recv_rss0)(struct net_device *dev,
+				struct napi_struct *napi,
+			   int budget);
+	int (*ei_eth_recv_rss1)(struct net_device *dev,
+				struct napi_struct *napi,
+			   int budget);
+	int (*ei_eth_recv_rss2)(struct net_device *dev,
+				struct napi_struct *napi,
+			   int budget);
+	int (*ei_eth_recv_rss3)(struct net_device *dev,
+				struct napi_struct *napi,
+			   int budget);
+	int (*ei_fill_tx_desc)(struct net_device *dev,
+			       unsigned long *tx_cpu_owner_idx,
+			       struct sk_buff *skb, int gmac_no);
+
+	/* MT7623 PSE reset workaround */
+	struct task_struct *kreset_task;
+	struct task_struct *kphy_poll_task;
+	unsigned int fe_reset_times;
+	unsigned int tx_mask;
+	unsigned int rx_mask;
+	unsigned int *rls_cnt;
+};
+
+struct net_device_stats *ra_get_stats(struct net_device *dev);
+
+int ei_open(struct net_device *dev);
+int ei_close(struct net_device *dev);
+
+int ra2882eth_init(void);
+void ra2882eth_cleanup_module(void);
+
+u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data);
+u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data);
+u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr);
+u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr,
+		      u32 *read_data);
+u32 mii_mgr_write_cl45(u32 port_num, u32 dev_addr, u32 reg_addr,
+		       u32 write_data);
+
+/* HNAT functions */
+#if defined(CONFIG_RA_NAT_NONE)
+static int (*ppe_hook_rx_eth)(struct sk_buff *skb);
+static int (*ppe_hook_tx_eth)(struct sk_buff *skb, int gmac_no);
+#else
+extern int (*ppe_hook_rx_eth)(struct sk_buff *skb);
+extern int (*ppe_hook_tx_eth)(struct sk_buff *skb, int gmac_no);
+#endif
+
+/* PDMA functions */
+int fe_pdma_wait_dma_idle(void);
+int fe_pdma_rx_dma_init(struct net_device *dev);
+int fe_pdma_tx_dma_init(struct net_device *dev);
+void fe_pdma_rx_dma_deinit(struct net_device *dev);
+void fe_pdma_tx_dma_deinit(struct net_device *dev);
+void set_fe_pdma_glo_cfg(void);
+int ei_pdma_start_xmit(struct sk_buff *skb, struct net_device *dev,
+		       int gmac_no);
+int ei_pdma_xmit_housekeeping(struct net_device *netdev,
+			      int budget);
+int fe_fill_tx_desc(struct net_device *dev,
+		    unsigned long *tx_cpu_owner_idx,
+		    struct sk_buff *skb,
+		    int gmac_no);
+int fe_fill_tx_desc_tso(struct net_device *dev,
+			unsigned long *tx_cpu_owner_idx,
+			struct sk_buff *skb,
+			int gmac_no);
+
+/* QDMA functions */
+int fe_qdma_wait_dma_idle(void);
+int fe_qdma_rx_dma_init(struct net_device *dev);
+int fe_qdma_tx_dma_init(struct net_device *dev);
+void fe_qdma_rx_dma_deinit(struct net_device *dev);
+void fe_qdma_tx_dma_deinit(struct net_device *dev);
+void set_fe_qdma_glo_cfg(void);
+int ei_qdma_start_xmit(struct sk_buff *skb, struct net_device *dev,
+		       int gmac_no);
+int ei_qdma_xmit_housekeeping(struct net_device *netdev, int budget);
+int ei_qdma_ioctl(struct net_device *dev, struct ifreq *ifr,
+		  struct qdma_ioctl_data *ioctl_data);
+int ephy_ioctl(struct net_device *dev, struct ifreq *ifr,
+	       struct ephy_ioctl_data *ioctl_data);
+/* HW LRO functions */
+int fe_hw_lro_init(struct net_device *dev);
+void fe_hw_lro_deinit(struct net_device *dev);
+int fe_hw_lro_recv(struct net_device *dev,
+		   struct napi_struct *napi,
+		   int budget);
+void fe_set_hw_lro_my_ip(char *lan_ip_addr);
+
+int fe_rss_4ring_init(struct net_device *dev);
+void fe_rss_4ring_deinit(struct net_device *dev);
+int fe_rss_2ring_init(struct net_device *dev);
+void fe_rss_2ring_deinit(struct net_device *dev);
+int fe_rss0_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget);
+int fe_rss1_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget);
+int fe_rss2_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget);
+int fe_rss3_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget);
+static inline void *raeth_alloc_skb_data(size_t size, gfp_t flags)
+{
+#ifdef CONFIG_ETH_SLAB_ALLOC_SKB
+	return kmalloc(size, flags);
+#else
+	return netdev_alloc_frag(size);
+#endif
+}
+
+static inline void raeth_free_skb_data(void *addr)
+{
+#ifdef CONFIG_ETH_SLAB_ALLOC_SKB
+	kfree(addr);
+#else
+	skb_free_frag(addr);
+#endif
+}
+
+static inline struct sk_buff *raeth_build_skb(void *data,
+					      unsigned int frag_size)
+{
+#ifdef CONFIG_ETH_SLAB_ALLOC_SKB
+	return build_skb(data, 0);
+#else
+	return build_skb(data, frag_size);
+#endif
+}
+
+extern u32 gmac1_txq_num;
+extern u32 gmac1_txq_txd_num;
+extern u32 gmac1_txd_num;
+extern u32 gmac2_txq_num;
+extern u32 gmac2_txq_txd_num;
+extern u32 gmac2_txd_num;
+extern u32 num_rx_desc;
+extern u32 num_tx_max_process;
+extern u32 num_tx_desc;
+extern u32 total_txq_num;
+extern u32 mac_to_gigaphy_mode_addr;
+extern u32 mac_to_gigaphy_mode_addr2;
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.c
new file mode 100644
index 0000000..9d76dd0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.c
@@ -0,0 +1,619 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "raether_hwlro.h"
+#include "ra_mac.h"
+
+/* HW LRO Force port */
+int set_fe_lro_ring1_cfg(struct net_device *dev)
+{
+	unsigned int ip;
+
+	pr_debug("set_fe_lro_ring1_cfg()\n");
+
+	/* 1. Set RX ring mode to force port */
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
+
+	/* 2. Configure lro ring */
+	/* 2.1 set src/destination TCP ports */
+	SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 1122);
+	SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 3344);
+	/* 2.2 set src/destination IPs */
+	str_to_ip(&ip, "10.10.10.3");
+	sys_reg_write(LRO_RX_RING1_SIP_DW0, ip);
+	str_to_ip(&ip, "10.10.10.254");
+	sys_reg_write(LRO_RX_RING1_DIP_DW0, ip);
+	/* 2.3 IPv4 force port mode */
+	SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
+	/* 2.4 IPv6 force port mode */
+	SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING1, 1);
+
+	/* 3. Set Age timer: 10 msec. */
+	SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
+
+	/* 4. Valid LRO ring */
+	SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
+
+	return 0;
+}
+
+int set_fe_lro_ring2_cfg(struct net_device *dev)
+{
+	unsigned int ip;
+
+	pr_debug("set_fe_lro_ring2_cfg()\n");
+
+	/* 1. Set RX ring mode to force port */
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_FORCE_PORT);
+
+	/* 2. Configure lro ring */
+	/* 2.1 set src/destination TCP ports */
+	SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 5566);
+	SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 7788);
+	/* 2.2 set src/destination IPs */
+	str_to_ip(&ip, "10.10.10.3");
+	sys_reg_write(LRO_RX_RING2_SIP_DW0, ip);
+	str_to_ip(&ip, "10.10.10.254");
+	sys_reg_write(LRO_RX_RING2_DIP_DW0, ip);
+	/* 2.3 IPv4 force port mode */
+	SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
+	/* 2.4 IPv6 force port mode */
+	SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING2, 1);
+
+	/* 3. Set Age timer: 10 msec. */
+	SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
+
+	/* 4. Valid LRO ring */
+	SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
+
+	return 0;
+}
+
+int set_fe_lro_ring3_cfg(struct net_device *dev)
+{
+	unsigned int ip;
+
+	pr_debug("set_fe_lro_ring3_cfg()\n");
+
+	/* 1. Set RX ring mode to force port */
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_FORCE_PORT);
+
+	/* 2. Configure lro ring */
+	/* 2.1 set src/destination TCP ports */
+	SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 9900);
+	SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 99);
+	/* 2.2 set src/destination IPs */
+	str_to_ip(&ip, "10.10.10.3");
+	sys_reg_write(LRO_RX_RING3_SIP_DW0, ip);
+	str_to_ip(&ip, "10.10.10.254");
+	sys_reg_write(LRO_RX_RING3_DIP_DW0, ip);
+	/* 2.3 IPv4 force port mode */
+	SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
+	/* 2.4 IPv6 force port mode */
+	SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING3, 1);
+
+	/* 3. Set Age timer: 10 msec. */
+	SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
+
+	/* 4. Valid LRO ring */
+	SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
+
+	return 0;
+}
+
+int set_fe_lro_glo_cfg(struct net_device *dev)
+{
+	unsigned int reg_val = 0;
+
+	pr_debug("set_fe_lro_glo_cfg()\n");
+
+	/* 1 Set max AGG timer: 10 msec. */
+	SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
+
+	/* 2. Set max LRO agg count */
+	SET_PDMA_LRO_MAX_AGG_CNT(HW_LRO_MAX_AGG_CNT);
+
+	/* PDMA prefetch enable setting */
+	SET_PDMA_LRO_RXD_PREFETCH_EN(ADMA_RXD_PREFETCH_EN |
+				     ADMA_MULTI_RXD_PREFETCH_EN);
+
+	/* 2.1 IPv4 checksum update enable */
+	SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
+
+	/* 3. Polling relinguish */
+	while (1) {
+		if (sys_reg_read(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
+			pr_warn("Polling HW LRO RELINGUISH...\n");
+		else
+			break;
+	}
+
+	/* 4. Enable LRO */
+	reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0);
+	reg_val |= PDMA_LRO_EN;
+	sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val);
+
+	return 0;
+}
+
+void fe_set_hw_lro_my_ip(char *lan_ip_addr)
+{
+	unsigned int lan_ip;
+
+	str_to_ip(&lan_ip, lan_ip_addr);
+	pr_info("[%s]lan_ip_addr = %s (lan_ip = 0x%x)\n",
+		__func__, lan_ip_addr, lan_ip);
+
+	/* Set my IP_1: LAN IP */
+	sys_reg_write(LRO_RX_RING0_DIP_DW0, lan_ip);
+	sys_reg_write(LRO_RX_RING0_DIP_DW1, 0);
+	sys_reg_write(LRO_RX_RING0_DIP_DW2, 0);
+	sys_reg_write(LRO_RX_RING0_DIP_DW3, 0);
+	SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
+}
+
+/* HW LRO Auto-learn */
+int set_fe_lro_auto_cfg(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned int reg_val = 0;
+
+	pr_debug("set_fe_lro_auto_cfg()\n");
+
+	fe_set_hw_lro_my_ip(ei_local->lan_ip4_addr);
+
+	/* Set RX ring1~3 to auto-learn modes */
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
+
+	/* Valid LRO ring */
+	SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
+	SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
+	SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
+	SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
+
+	/* Set AGE timer */
+	SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
+	SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
+	SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
+
+	/* Set max AGG timer */
+	SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, HW_LRO_AGG_TIME);
+	SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, HW_LRO_AGG_TIME);
+	SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, HW_LRO_AGG_TIME);
+
+	/* Set max LRO agg count */
+	SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
+	SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
+	SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
+
+	/* IPv6 LRO enable */
+	SET_PDMA_LRO_IPV6_EN(1);
+
+	/* IPv4 checksum update enable */
+	SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
+
+	/* TCP push option check disable */
+	/* SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(0); */
+
+	/* PDMA prefetch enable setting */
+	SET_PDMA_LRO_RXD_PREFETCH_EN(ADMA_RXD_PREFETCH_EN |
+				     ADMA_MULTI_RXD_PREFETCH_EN);
+
+	/* switch priority comparison to packet count mode */
+	SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_PKT_CNT_MODE);
+
+	/* bandwidth threshold setting */
+	SET_PDMA_LRO_BW_THRESHOLD(HW_LRO_BW_THRE);
+
+	/* auto-learn score delta setting */
+	sys_reg_write(LRO_ALT_SCORE_DELTA, HW_LRO_REPLACE_DELTA);
+
+	/* Set ALT timer to 20us: (unit: 20us) */
+	SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
+	/* Set ALT refresh timer to 1 sec. (unit: 20us) */
+	SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
+
+	/* the least remaining room of SDL0 in RXD for lro aggregation */
+	SET_PDMA_LRO_MIN_RXD_SDL(HW_LRO_SDL_REMAIN_ROOM);
+
+	/* Polling relinguish */
+	while (1) {
+		if (sys_reg_read(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
+			pr_warn("Polling HW LRO RELINGUISH...\n");
+		else
+			break;
+	}
+
+	/* Enable HW LRO */
+	reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0);
+	reg_val |= PDMA_LRO_EN;
+
+	/*enable cpu reason black list*/
+	reg_val |= PDMA_LRO_CRSN_BNW;
+	sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val);
+
+	/*no use PPE cpu reason 0xff*/
+	sys_reg_write(ADMA_LRO_CTRL_DW1, 0xffffffff);
+
+	return 0;
+}
+
+int fe_hw_lro_init(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int skb_size;
+	int i, j;
+
+	skb_size = SKB_DATA_ALIGN(MAX_LRO_RX_LENGTH + NET_IP_ALIGN) +
+		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	/* Initial RX Ring 1 ~ 3 */
+	for (i = 1; i < MAX_RX_RING_NUM; i++) {
+		ei_local->rx_ring[i] =
+			dma_alloc_coherent(dev->dev.parent,
+					   NUM_LRO_RX_DESC *
+					   sizeof(struct PDMA_rxdesc),
+					   &ei_local->phy_rx_ring[i],
+					   GFP_ATOMIC | __GFP_ZERO);
+		for (j = 0; j < NUM_LRO_RX_DESC; j++) {
+			ei_local->netrx_skb_data[i][j] =
+				raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+
+			if (!ei_local->netrx_skb_data[i][j]) {
+				pr_err("rx skbuff buffer allocation failed!\n");
+				goto no_rx_mem;
+			}
+
+			memset(&ei_local->rx_ring[i][j], 0,
+			       sizeof(struct PDMA_rxdesc));
+			ei_local->rx_ring[i][j].rxd_info2.DDONE_bit = 0;
+			ei_local->rx_ring[i][j].rxd_info2.LS0 = 0;
+			ei_local->rx_ring[i][j].rxd_info2.PLEN0 =
+			    SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
+			ei_local->rx_ring[i][j].rxd_info2.PLEN1 =
+			    SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
+			ei_local->rx_ring[i][j].rxd_info1.PDP0 =
+			    dma_map_single(dev->dev.parent,
+					   ei_local->netrx_skb_data[i][j] +
+					   NET_SKB_PAD,
+					   MAX_LRO_RX_LENGTH, DMA_FROM_DEVICE);
+			if (unlikely
+			    (dma_mapping_error
+			     (dev->dev.parent,
+			      ei_local->rx_ring[i][j].rxd_info1.PDP0))) {
+				pr_err("[%s]dma_map_single() failed...\n",
+				       __func__);
+				goto no_rx_mem;
+			}
+		}
+		pr_info("\nphy_rx_ring[%d] = 0x%08x, rx_ring[%d] = 0x%p\n",
+			i, (unsigned int)ei_local->phy_rx_ring[i],
+			i, (void __iomem *)ei_local->rx_ring[i]);
+	}
+
+	sys_reg_write(RX_BASE_PTR3, phys_to_bus((u32)ei_local->phy_rx_ring[3]));
+	sys_reg_write(RX_MAX_CNT3, cpu_to_le32((u32)NUM_LRO_RX_DESC));
+	sys_reg_write(RX_CALC_IDX3, cpu_to_le32((u32)(NUM_LRO_RX_DESC - 1)));
+	sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX3);
+	sys_reg_write(RX_BASE_PTR2, phys_to_bus((u32)ei_local->phy_rx_ring[2]));
+	sys_reg_write(RX_MAX_CNT2, cpu_to_le32((u32)NUM_LRO_RX_DESC));
+	sys_reg_write(RX_CALC_IDX2, cpu_to_le32((u32)(NUM_LRO_RX_DESC - 1)));
+	sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX2);
+	sys_reg_write(RX_BASE_PTR1, phys_to_bus((u32)ei_local->phy_rx_ring[1]));
+	sys_reg_write(RX_MAX_CNT1, cpu_to_le32((u32)NUM_LRO_RX_DESC));
+	sys_reg_write(RX_CALC_IDX1, cpu_to_le32((u32)(NUM_LRO_RX_DESC - 1)));
+	sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX1);
+
+	if (ei_local->features & FE_HW_LRO_FPORT) {
+		set_fe_lro_ring1_cfg(dev);
+		set_fe_lro_ring2_cfg(dev);
+		set_fe_lro_ring3_cfg(dev);
+		set_fe_lro_glo_cfg(dev);
+	} else {
+		set_fe_lro_auto_cfg(dev);
+	}
+
+	return 0;
+
+no_rx_mem:
+	return -ENOMEM;
+}
+
+void fe_hw_lro_deinit(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int i, j;
+
+	for (i = 1; i < MAX_RX_RING_NUM; i++) {
+		/* free RX Ring */
+		dma_free_coherent(dev->dev.parent,
+				  NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
+				  ei_local->rx_ring[i],
+				  ei_local->phy_rx_ring[i]);
+		/* free RX data */
+		for (j = 0; j < NUM_LRO_RX_DESC; j++) {
+			raeth_free_skb_data(ei_local->netrx_skb_data[i][j]);
+			ei_local->netrx_skb_data[i][j] = NULL;
+		}
+	}
+}
+
+static inline void hw_lro_rx_desc_init(struct END_DEVICE *ei_local,
+				       struct PDMA_rxdesc *rx_ring,
+				       unsigned int rx_ring_no,
+				       dma_addr_t dma_addr)
+{
+	if (rx_ring_no != 0) {
+		/* lro ring */
+		rx_ring->rxd_info2.PLEN0 =
+		    SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
+		rx_ring->rxd_info2.PLEN1 =
+		    SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
+	} else
+		/* normal ring */
+		rx_ring->rxd_info2.PLEN0 = MAX_RX_LENGTH;
+
+	rx_ring->rxd_info1.PDP0 = dma_addr;
+	rx_ring->rxd_info2.LS0 = 0;
+	rx_ring->rxd_info2.DDONE_bit = 0;
+}
+
+static int get_hw_lro_rx_ring(struct END_DEVICE *ei_local,
+			      unsigned int rx_idx[])
+{
+	int i;
+
+	for (i = 0; i < MAX_RX_RING_NUM; i++)
+		if (ei_local->rx_ring[i][rx_idx[i]].rxd_info2.DDONE_bit == 1)
+			return i;
+
+	return 0;
+}
+
+static inline void __iomem *get_rx_cal_idx_reg(unsigned int rx_ring_no)
+{
+	return (void __iomem *)(RAETH_RX_CALC_IDX0 + (rx_ring_no << 4));
+}
+
+int fe_hw_lro_recv(struct net_device *dev,
+		   struct napi_struct *napi,
+		   int budget)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+	struct sk_buff *rx_skb;
+	struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+	void *rx_data, *rx_data_next, *new_data;
+	unsigned int length = 0;
+	unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+	unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+	unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+	unsigned int skb_size, map_size;
+	void __iomem *rx_calc_idx_reg;
+	int rx_processed = 0;
+
+	/* get cpu owner indexes of rx rings */
+	rx_dma_owner_lro[0] = (ei_local->rx_calc_idx[0] + 1) % num_rx_desc;
+	rx_dma_owner_lro[1] = (ei_local->rx_calc_idx[1] + 1) % NUM_LRO_RX_DESC;
+	rx_dma_owner_lro[2] = (ei_local->rx_calc_idx[2] + 1) % NUM_LRO_RX_DESC;
+	rx_dma_owner_lro[3] = (ei_local->rx_calc_idx[3] + 1) % NUM_LRO_RX_DESC;
+
+	rx_ring_no =  get_hw_lro_rx_ring(ei_local, rx_dma_owner_lro);
+	rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+	rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+	rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+	rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no);
+
+	for (;;) {
+		dma_addr_t dma_addr;
+
+		if ((rx_processed++ > budget) ||
+		    (rx_ring->rxd_info2.DDONE_bit == 0))
+			break;
+
+		/* prefetch the next handling RXD */
+		if (rx_ring_no == 0) {
+			rx_dma_owner_lro[rx_ring_no] =
+				(rx_dma_owner_idx + 1) % num_rx_desc;
+			skb_size =
+			   SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+					  NET_SKB_PAD) +
+			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+			map_size = MAX_RX_LENGTH;
+		} else {
+			rx_dma_owner_lro[rx_ring_no] =
+				(rx_dma_owner_idx + 1) % NUM_LRO_RX_DESC;
+			skb_size =
+			   SKB_DATA_ALIGN(MAX_LRO_RX_LENGTH + NET_IP_ALIGN +
+					  NET_SKB_PAD) +
+			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+			map_size = MAX_LRO_RX_LENGTH;
+		}
+
+		rx_ring_no_next =  get_hw_lro_rx_ring(ei_local,
+						      rx_dma_owner_lro);
+		rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+		rx_ring_next =
+			&ei_local->rx_ring
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		rx_data_next =
+			ei_local->netrx_skb_data
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		prefetch(rx_ring_next);
+
+		/* We have to check the free memory size is big enough
+		 * before pass the packet to cpu
+		 */
+		new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+		if (unlikely(!new_data)) {
+			pr_err("skb not available...\n");
+			goto skb_err;
+		}
+
+		dma_addr = dma_map_single(dev->dev.parent,
+					  new_data + NET_SKB_PAD,
+					  map_size,
+					  DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+			pr_err("[%s]dma_map_single() failed...\n", __func__);
+			raeth_free_skb_data(new_data);
+			goto skb_err;
+		}
+
+		rx_skb = raeth_build_skb(rx_data, skb_size);
+
+		if (unlikely(!rx_skb)) {
+			put_page(virt_to_head_page(rx_data));
+			pr_err("build_skb failed\n");
+			goto skb_err;
+		}
+		skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+		length = (rx_ring->rxd_info2.PLEN1 << 14) |
+			 rx_ring->rxd_info2.PLEN0;
+		dma_unmap_single(dev->dev.parent,
+				 rx_ring->rxd_info1.PDP0,
+				 length, DMA_FROM_DEVICE);
+
+		prefetch(rx_skb->data);
+
+		/* skb processing */
+		skb_put(rx_skb, length);
+
+		/* rx packet from GE2 */
+		if (rx_ring->rxd_info4.SP == 2) {
+			if (ei_local->pseudo_dev) {
+				rx_skb->dev = ei_local->pseudo_dev;
+				rx_skb->protocol =
+				    eth_type_trans(rx_skb,
+						   ei_local->pseudo_dev);
+			} else {
+				pr_err
+				    ("pseudo_dev is still not initialize ");
+				pr_err
+				    ("but receive packet from GMAC2\n");
+			}
+		} else {
+			rx_skb->dev = dev;
+			rx_skb->protocol = eth_type_trans(rx_skb, dev);
+		}
+
+		/* rx checksum offload */
+		if (likely(rx_ring->rxd_info4.L4VLD))
+			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if (ppe_hook_rx_eth) {
+			if (IS_SPACE_AVAILABLE_HEAD(rx_skb)) {
+				*(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+					*(uint32_t *)&rx_ring->rxd_info4;
+				FOE_ALG_HEAD(rx_skb) = 0;
+				FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+				FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+			}
+			if (IS_SPACE_AVAILABLE_TAIL(rx_skb)) {
+				*(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+					*(uint32_t *)&rx_ring->rxd_info4;
+				FOE_ALG_TAIL(rx_skb) = 0;
+				FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+				FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+			}
+		}
+#endif
+
+		/* HW LRO aggregation statistics */
+		if (ei_local->features & FE_HW_LRO_DBG) {
+			hw_lro_stats_update(rx_ring_no, rx_ring);
+			hw_lro_flush_stats_update(rx_ring_no, rx_ring);
+		}
+
+		if (ei_local->features & FE_HW_VLAN_RX) {
+			if (rx_ring->rxd_info2.TAG)
+				__vlan_hwaccel_put_tag(rx_skb,
+						       htons(ETH_P_8021Q),
+						       rx_ring->rxd_info3.VID);
+		}
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if ((!ppe_hook_rx_eth) ||
+		    (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+			if (ei_local->features & FE_INT_NAPI)
+			/* napi_gro_receive(napi, rx_skb); */
+				netif_receive_skb(rx_skb);
+			else
+				netif_rx(rx_skb);
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		}
+#endif
+
+		if (rx_ring->rxd_info4.SP == 2) {
+			p_ad->stat.rx_packets++;
+			p_ad->stat.rx_bytes += length;
+		} else {
+			ei_local->stat.rx_packets++;
+			ei_local->stat.rx_bytes += length;
+		}
+
+		/* Init RX desc. */
+		hw_lro_rx_desc_init(ei_local,
+				    rx_ring,
+				    rx_ring_no,
+				    dma_addr);
+		ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+			new_data;
+
+		/* make sure that all changes to the dma ring are flushed before
+		  * we continue
+		  */
+		wmb();
+
+		sys_reg_write(rx_calc_idx_reg, rx_dma_owner_idx);
+		ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+		/* use prefetched variable */
+		rx_dma_owner_idx = rx_dma_owner_idx_next;
+		rx_ring_no = rx_ring_no_next;
+		rx_ring = rx_ring_next;
+		rx_data = rx_data_next;
+		rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no);
+	}	/* for */
+
+	return rx_processed;
+
+skb_err:
+	/* rx packet from GE2 */
+	if (rx_ring->rxd_info4.SP == 2)
+		p_ad->stat.rx_dropped++;
+	else
+		ei_local->stat.rx_dropped++;
+
+	/* Discard the rx packet */
+	hw_lro_rx_desc_init(ei_local,
+			    rx_ring,
+			    rx_ring_no,
+			    rx_ring->rxd_info1.PDP0);
+	sys_reg_write(rx_calc_idx_reg, rx_dma_owner_idx);
+	ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+	return (budget + 1);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.h
new file mode 100644
index 0000000..c319aca
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.h
@@ -0,0 +1,403 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_HWLRO_H
+#define RA_HWLRO_H
+
+#include "raeth_reg.h"
+
+#define	HW_LRO_TIMER_UNIT   1
+#define	HW_LRO_REFRESH_TIME 50000
+#define	HW_LRO_MAX_AGG_CNT	64
+#define	HW_LRO_AGG_DELTA	1
+#define	MAX_LRO_RX_LENGTH	(PAGE_SIZE * 3)
+#define	HW_LRO_AGG_TIME		10	/* 200us */
+#define	HW_LRO_AGE_TIME		50	/* 1ms */
+#define	HW_LRO_BW_THRE	        3000
+#define	HW_LRO_REPLACE_DELTA    1000
+#define	HW_LRO_SDL_REMAIN_ROOM	1522
+
+struct PDMA_LRO_AUTO_TLB_INFO0_T {
+	unsigned int DTP:16;
+	unsigned int STP:16;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO1_T {
+	unsigned int SIP0:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO2_T {
+	unsigned int SIP1:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO3_T {
+	unsigned int SIP2:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO4_T {
+	unsigned int SIP3:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO5_T {
+	unsigned int VLAN_VID0:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO6_T {
+	unsigned int VLAN_VID1:16;
+	unsigned int VLAN_VID_VLD:4;
+	unsigned int CNT:12;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO7_T {
+	unsigned int DW_LEN:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO8_T {
+	unsigned int DIP_ID:2;
+	unsigned int IPV6:1;
+	unsigned int IPV4:1;
+	unsigned int RESV:27;
+	unsigned int VALID:1;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO {
+	struct PDMA_LRO_AUTO_TLB_INFO0_T auto_tlb_info0;
+	struct PDMA_LRO_AUTO_TLB_INFO1_T auto_tlb_info1;
+	struct PDMA_LRO_AUTO_TLB_INFO2_T auto_tlb_info2;
+	struct PDMA_LRO_AUTO_TLB_INFO3_T auto_tlb_info3;
+	struct PDMA_LRO_AUTO_TLB_INFO4_T auto_tlb_info4;
+	struct PDMA_LRO_AUTO_TLB_INFO5_T auto_tlb_info5;
+	struct PDMA_LRO_AUTO_TLB_INFO6_T auto_tlb_info6;
+	struct PDMA_LRO_AUTO_TLB_INFO7_T auto_tlb_info7;
+	struct PDMA_LRO_AUTO_TLB_INFO8_T auto_tlb_info8;
+};
+
+#define PDMA_LRO_EN             BIT(0)
+#define PDMA_LRO_IPV6_EN        BIT(1)
+#define PDMA_LRO_CRSN_BNW       BIT(6)
+#define PDMA_LRO_IPV4_CSUM_UPDATE_EN    BIT(7)
+#define PDMA_LRO_IPV4_CTRL_PUSH_EN	BIT(23)
+#define PDMA_LRO_RXD_PREFETCH_EN        BITS(3, 4)
+#define PDMA_NON_LRO_MULTI_EN   BIT(2)
+#define PDMA_LRO_DLY_INT_EN             BIT(5)
+#define PDMA_LRO_FUSH_REQ               BITS(26, 28)
+#define PDMA_LRO_RELINGUISH     BITS(29, 31)
+#define PDMA_LRO_FREQ_PRI_ADJ   BITS(16, 19)
+#define PDMA_LRO_TPUT_PRE_ADJ           BITS(8, 11)
+#define PDMA_LRO_TPUT_PRI_ADJ           BITS(12, 15)
+#define PDMA_LRO_ALT_SCORE_MODE         BIT(21)
+#define PDMA_LRO_RING_AGE1      BITS(22, 31)
+#define PDMA_LRO_RING_AGE2      BITS(0, 5)
+#define PDMA_LRO_RING_AGG               BITS(10, 25)
+#define PDMA_LRO_RING_AGG_CNT1          BITS(26, 31)
+#define PDMA_LRO_RING_AGG_CNT2          BITS(0, 1)
+#define PDMA_LRO_ALT_TICK_TIMER         BITS(16, 20)
+#define PDMA_LRO_LRO_MIN_RXD_SDL0       BITS(16, 31)
+
+#define PDMA_LRO_DLY_INT_EN_OFFSET          (5)
+#define PDMA_LRO_TPUT_PRE_ADJ_OFFSET        (8)
+#define PDMA_LRO_FREQ_PRI_ADJ_OFFSET    (16)
+#define PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET    (16)
+#define PDMA_LRO_TPUT_PRI_ADJ_OFFSET        (12)
+#define PDMA_LRO_ALT_SCORE_MODE_OFFSET      (21)
+#define PDMA_LRO_FUSH_REQ_OFFSET            (26)
+#define PDMA_NON_LRO_MULTI_EN_OFFSET        (2)
+#define PDMA_LRO_IPV6_EN_OFFSET             (1)
+#define PDMA_LRO_RXD_PREFETCH_EN_OFFSET     (3)
+#define PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET (7)
+#define PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET   (23)
+#define PDMA_LRO_ALT_TICK_TIMER_OFFSET      (16)
+
+#define PDMA_LRO_TPUT_OVERFLOW_ADJ  BITS(12, 31)
+#define PDMA_LRO_CNT_OVERFLOW_ADJ   BITS(0, 11)
+
+#define PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET   (12)
+#define PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET    (0)
+
+#define PDMA_LRO_ALT_BYTE_CNT_MODE  (0)
+#define PDMA_LRO_ALT_PKT_CNT_MODE   (1)
+
+/* LRO_RX_RING1_CTRL_DW1 offsets  */
+#define PDMA_LRO_AGE_H_OFFSET           (10)
+#define PDMA_LRO_RING_AGE1_OFFSET       (22)
+#define PDMA_LRO_RING_AGG_CNT1_OFFSET   (26)
+/* LRO_RX_RING1_CTRL_DW2 offsets  */
+#define PDMA_RX_MODE_OFFSET             (6)
+#define PDMA_RX_PORT_VALID_OFFSET       (8)
+#define PDMA_RX_MYIP_VALID_OFFSET       (9)
+#define PDMA_LRO_RING_AGE2_OFFSET       (0)
+#define PDMA_LRO_RING_AGG_OFFSET        (10)
+#define PDMA_LRO_RING_AGG_CNT2_OFFSET   (0)
+/* LRO_RX_RING1_CTRL_DW3 offsets  */
+#define PDMA_LRO_AGG_CNT_H_OFFSET       (6)
+/* LRO_RX_RING1_STP_DTP_DW offsets */
+#define PDMA_RX_TCP_SRC_PORT_OFFSET     (16)
+#define PDMA_RX_TCP_DEST_PORT_OFFSET    (0)
+/* LRO_RX_RING1_CTRL_DW0 offsets */
+#define PDMA_RX_IPV4_FORCE_OFFSET       (1)
+#define PDMA_RX_IPV6_FORCE_OFFSET       (0)
+
+#define ADMA_MULTI_RXD_PREFETCH_EN	BIT(3)
+#define ADMA_RXD_PREFETCH_EN		BIT(4)
+
+#define SET_PDMA_LRO_MAX_AGG_CNT(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW3); \
+reg_val &= ~0xff;   \
+reg_val |= ((x) & 0xff);  \
+sys_reg_write(ADMA_LRO_CTRL_DW3, reg_val); \
+}
+
+#define SET_PDMA_LRO_FLUSH_REQ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_FUSH_REQ;   \
+reg_val |= ((x) & 0x7) << PDMA_LRO_FUSH_REQ_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_IPV6_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_IPV6_EN;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_IPV6_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_RXD_PREFETCH_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_RXD_PREFETCH_EN;   \
+reg_val |= (x);  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_IPV4_CSUM_UPDATE_EN;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_IPV4_CTRL_PUSH_EN;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_NON_LRO_MULTI_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~(PDMA_NON_LRO_MULTI_EN);   \
+reg_val |= ((x) & 0x1) << PDMA_NON_LRO_MULTI_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_FREQ_PRI_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_FREQ_PRI_ADJ;   \
+reg_val |= ((x) & 0xf) << PDMA_LRO_FREQ_PRI_ADJ_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_TPUT_PRE_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_TPUT_PRE_ADJ;   \
+reg_val |= ((x) & 0xf) << PDMA_LRO_TPUT_PRE_ADJ_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_TPUT_PRI_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_TPUT_PRI_ADJ;   \
+reg_val |= ((x) & 0xf) << PDMA_LRO_TPUT_PRI_ADJ_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_ALT_SCORE_MODE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_ALT_SCORE_MODE;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_ALT_SCORE_MODE_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_DLY_INT_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_DLY_INT_EN;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_DLY_INT_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_BW_THRESHOLD(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW2); \
+reg_val = (x);  \
+sys_reg_write(ADMA_LRO_CTRL_DW2, reg_val); \
+}
+
+#define SET_PDMA_LRO_MIN_RXD_SDL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW3); \
+reg_val &= ~PDMA_LRO_LRO_MIN_RXD_SDL0;   \
+reg_val |= ((x) & 0xffff) << PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW3, reg_val); \
+}
+
+#define SET_PDMA_LRO_TPUT_OVERFLOW_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(PDMA_LRO_ATL_OVERFLOW_ADJ); \
+reg_val &= ~PDMA_LRO_TPUT_OVERFLOW_ADJ;   \
+reg_val |= ((x) & 0xfffff) << PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET;  \
+sys_reg_write(PDMA_LRO_ATL_OVERFLOW_ADJ, reg_val); \
+}
+
+#define SET_PDMA_LRO_CNT_OVERFLOW_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(PDMA_LRO_ATL_OVERFLOW_ADJ); \
+reg_val &= ~PDMA_LRO_CNT_OVERFLOW_ADJ;   \
+reg_val |= ((x) & 0xfff) << PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET;  \
+sys_reg_write(PDMA_LRO_ATL_OVERFLOW_ADJ, reg_val); \
+}
+
+#define SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(x) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_ALT_REFRESH_TIMER); \
+reg_val &= ~PDMA_LRO_ALT_TICK_TIMER;   \
+reg_val |= ((x) & 0x1f) << PDMA_LRO_ALT_TICK_TIMER_OFFSET;  \
+sys_reg_write(LRO_ALT_REFRESH_TIMER, reg_val); \
+}
+
+#define SET_PDMA_LRO_ALT_REFRESH_TIMER(x) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_ALT_REFRESH_TIMER); \
+reg_val &= ~0xffff;   \
+reg_val |= ((x) & 0xffff);  \
+sys_reg_write(LRO_ALT_REFRESH_TIMER, reg_val); \
+}
+
+#define SET_PDMA_LRO_MAX_AGG_TIME(x) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_MAX_AGG_TIME); \
+reg_val &= ~0xffff;   \
+reg_val |= ((x) & 0xffff);  \
+sys_reg_write(LRO_MAX_AGG_TIME, reg_val); \
+}
+
+#define SET_PDMA_RXRING_MODE(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~(0x3 << PDMA_RX_MODE_OFFSET);   \
+reg_val |= (y) << PDMA_RX_MODE_OFFSET;  \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_MYIP_VALID(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_MYIP_VALID_OFFSET); \
+reg_val |= ((y) & 0x1) << PDMA_RX_MYIP_VALID_OFFSET;    \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_VALID(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_PORT_VALID_OFFSET); \
+reg_val |= ((y) & 0x1) << PDMA_RX_PORT_VALID_OFFSET;    \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_TCP_SRC_PORT(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_STP_DTP_DW + \
+				    (((x) - 1) << 6)); \
+reg_val &= ~(0xffff << PDMA_RX_TCP_SRC_PORT_OFFSET);    \
+reg_val |= (y) << PDMA_RX_TCP_SRC_PORT_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_STP_DTP_DW + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_TCP_DEST_PORT(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_STP_DTP_DW + \
+				    (((x) - 1) << 6)); \
+reg_val &= ~(0xffff << PDMA_RX_TCP_DEST_PORT_OFFSET);    \
+reg_val |= (y) << PDMA_RX_TCP_DEST_PORT_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_STP_DTP_DW + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_IPV4_FORCE_MODE(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_IPV4_FORCE_OFFSET);    \
+reg_val |= (y) << PDMA_RX_IPV4_FORCE_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_IPV6_FORCE_MODE(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_IPV6_FORCE_OFFSET);    \
+reg_val |= (y) << PDMA_RX_IPV6_FORCE_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_AGE_TIME(x, y) \
+{ \
+unsigned int reg_val1 = sys_reg_read(LRO_RX_RING0_CTRL_DW1 + ((x) << 6)); \
+unsigned int reg_val2 = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val1 &= ~PDMA_LRO_RING_AGE1;    \
+reg_val2 &= ~PDMA_LRO_RING_AGE2;    \
+reg_val1 |= ((y) & 0x3ff) << PDMA_LRO_RING_AGE1_OFFSET;    \
+reg_val2 |= (((y) >> PDMA_LRO_AGE_H_OFFSET) & 0x03f) << \
+	    PDMA_LRO_RING_AGE2_OFFSET;\
+sys_reg_write(LRO_RX_RING0_CTRL_DW1 + ((x) << 6), reg_val1); \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val2); \
+}
+
+#define SET_PDMA_RXRING_AGG_TIME(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~PDMA_LRO_RING_AGG;    \
+reg_val |= ((y) & 0xffff) << PDMA_LRO_RING_AGG_OFFSET;    \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_MAX_AGG_CNT(x, y) \
+{ \
+unsigned int reg_val1 = sys_reg_read(LRO_RX_RING1_CTRL_DW2 + \
+				     (((x) - 1) << 6)); \
+unsigned int reg_val2 = sys_reg_read(LRO_RX_RING1_CTRL_DW3 + \
+				     (((x) - 1) << 6)); \
+reg_val1 &= ~PDMA_LRO_RING_AGG_CNT1;    \
+reg_val2 &= ~PDMA_LRO_RING_AGG_CNT2;    \
+reg_val1 |= ((y) & 0x3f) << PDMA_LRO_RING_AGG_CNT1_OFFSET;    \
+reg_val2 |= (((y) >> PDMA_LRO_AGG_CNT_H_OFFSET) & 0x03) << \
+	     PDMA_LRO_RING_AGG_CNT2_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_CTRL_DW2 + (((x) - 1) << 6), reg_val1); \
+sys_reg_write(LRO_RX_RING1_CTRL_DW3 + (((x) - 1) << 6), reg_val2); \
+}
+
+/* HW LRO debug functions */
+void hw_lro_stats_update(unsigned int ring_num,
+			 struct PDMA_rxdesc *rx_ring);
+void hw_lro_flush_stats_update(unsigned int ring_num,
+			       struct PDMA_rxdesc *rx_ring);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_pdma.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_pdma.c
new file mode 100644
index 0000000..344f3d5
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_pdma.c
@@ -0,0 +1,770 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+
+int fe_pdma_wait_dma_idle(void)
+{
+	unsigned int reg_val;
+	unsigned int loop_cnt = 0;
+
+	while (1) {
+		if (loop_cnt++ > 1000)
+			break;
+		reg_val = sys_reg_read(PDMA_GLO_CFG);
+		if ((reg_val & RX_DMA_BUSY)) {
+			pr_warn("\n  RX_DMA_BUSY !!! ");
+			continue;
+		}
+		if ((reg_val & TX_DMA_BUSY)) {
+			pr_warn("\n  TX_DMA_BUSY !!! ");
+			continue;
+		}
+		return 0;
+	}
+
+	return -1;
+}
+
+int fe_pdma_rx_dma_init(struct net_device *dev)
+{
+	int i;
+	unsigned int skb_size;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	dma_addr_t dma_addr;
+
+	skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN + NET_SKB_PAD) +
+		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	/* Initial RX Ring 0 */
+	ei_local->rx_ring[0] = dma_alloc_coherent(dev->dev.parent,
+						num_rx_desc *
+						sizeof(struct PDMA_rxdesc),
+						&ei_local->phy_rx_ring[0],
+						GFP_ATOMIC | __GFP_ZERO);
+	pr_debug("\nphy_rx_ring[0] = 0x%08x, rx_ring[0] = 0x%p\n",
+		 (unsigned int)ei_local->phy_rx_ring[0],
+		 (void *)ei_local->rx_ring[0]);
+
+	for (i = 0; i < num_rx_desc; i++) {
+		ei_local->netrx_skb_data[0][i] =
+			raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+		if (!ei_local->netrx_skb_data[0][i]) {
+			pr_err("rx skbuff buffer allocation failed!");
+			goto no_rx_mem;
+		}
+
+		memset(&ei_local->rx_ring[0][i], 0, sizeof(struct PDMA_rxdesc));
+		ei_local->rx_ring[0][i].rxd_info2.DDONE_bit = 0;
+		ei_local->rx_ring[0][i].rxd_info2.LS0 = 0;
+		ei_local->rx_ring[0][i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
+		dma_addr = dma_map_single(dev->dev.parent,
+					  ei_local->netrx_skb_data[0][i] +
+					  NET_SKB_PAD,
+					  MAX_RX_LENGTH,
+					  DMA_FROM_DEVICE);
+		ei_local->rx_ring[0][i].rxd_info1.PDP0 = dma_addr;
+		if (unlikely
+		    (dma_mapping_error
+		     (dev->dev.parent,
+		      ei_local->rx_ring[0][i].rxd_info1.PDP0))) {
+			pr_err("[%s]dma_map_single() failed...\n", __func__);
+			goto no_rx_mem;
+		}
+	}
+
+	/* Tell the adapter where the RX rings are located. */
+	sys_reg_write(RX_BASE_PTR0, phys_to_bus((u32)ei_local->phy_rx_ring[0]));
+	sys_reg_write(RX_MAX_CNT0, cpu_to_le32((u32)num_rx_desc));
+	sys_reg_write(RX_CALC_IDX0, cpu_to_le32((u32)(num_rx_desc - 1)));
+
+	sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX0);
+
+	return 0;
+
+no_rx_mem:
+	return -ENOMEM;
+}
+
+int fe_pdma_tx_dma_init(struct net_device *dev)
+{
+	int i;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+
+	for (i = 0; i < num_tx_desc; i++)
+		ei_local->skb_free[i] = 0;
+
+	ei_local->tx_ring_full = 0;
+	ei_local->free_idx = 0;
+	ei_local->tx_ring0 =
+	    dma_alloc_coherent(dev->dev.parent,
+			       num_tx_desc * sizeof(struct PDMA_txdesc),
+			       &ei_local->phy_tx_ring0,
+			       GFP_ATOMIC | __GFP_ZERO);
+	pr_debug("\nphy_tx_ring = 0x%08x, tx_ring = 0x%p\n",
+		 (unsigned int)ei_local->phy_tx_ring0,
+		 (void *)ei_local->tx_ring0);
+
+	for (i = 0; i < num_tx_desc; i++) {
+		memset(&ei_local->tx_ring0[i], 0, sizeof(struct PDMA_txdesc));
+		ei_local->tx_ring0[i].txd_info2.LS0_bit = 1;
+		ei_local->tx_ring0[i].txd_info2.DDONE_bit = 1;
+	}
+
+	/* Tell the adapter where the TX rings are located. */
+	sys_reg_write(TX_BASE_PTR0, phys_to_bus((u32)ei_local->phy_tx_ring0));
+	sys_reg_write(TX_MAX_CNT0, cpu_to_le32((u32)num_tx_desc));
+	sys_reg_write(TX_CTX_IDX0, 0);
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	ei_local->tx_cpu_owner_idx0 = 0;
+#endif
+	sys_reg_write(PDMA_RST_CFG, PST_DTX_IDX0);
+
+	return 0;
+}
+
+void fe_pdma_rx_dma_deinit(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int i;
+
+	/* free RX Ring */
+	dma_free_coherent(dev->dev.parent,
+			  num_rx_desc * sizeof(struct PDMA_rxdesc),
+			  ei_local->rx_ring[0], ei_local->phy_rx_ring[0]);
+
+	/* free RX data */
+	for (i = 0; i < num_rx_desc; i++) {
+		raeth_free_skb_data(ei_local->netrx_skb_data[0][i]);
+		ei_local->netrx_skb_data[0][i] = NULL;
+	}
+}
+
+void fe_pdma_tx_dma_deinit(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int i;
+
+	/* free TX Ring */
+	if (ei_local->tx_ring0)
+		dma_free_coherent(dev->dev.parent,
+				  num_tx_desc *
+				  sizeof(struct PDMA_txdesc),
+				  ei_local->tx_ring0,
+				  ei_local->phy_tx_ring0);
+
+	/* free TX data */
+	for (i = 0; i < num_tx_desc; i++) {
+		if ((ei_local->skb_free[i] != 0) &&
+		    (ei_local->skb_free[i] != (struct sk_buff *)0xFFFFFFFF))
+			dev_kfree_skb_any(ei_local->skb_free[i]);
+	}
+}
+
+void set_fe_pdma_glo_cfg(void)
+{
+	unsigned int dma_glo_cfg = 0;
+
+	dma_glo_cfg =
+	    (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS |
+	     MULTI_EN | ADMA_RX_BT_SIZE_32DWORDS);
+//	dma_glo_cfg |= (RX_2B_OFFSET);
+
+	sys_reg_write(PDMA_GLO_CFG, dma_glo_cfg);
+}
+
+/* @brief cal txd number for a page
+ *
+ *  @parm size
+ *
+ *  @return frag_txd_num
+ */
+static inline unsigned int pdma_cal_frag_txd_num(unsigned int size)
+{
+	unsigned int frag_txd_num = 0;
+
+	if (size == 0)
+		return 0;
+	while (size > 0) {
+		if (size > MAX_PTXD_LEN) {
+			frag_txd_num++;
+			size -= MAX_PTXD_LEN;
+		} else {
+			frag_txd_num++;
+			size = 0;
+		}
+	}
+	return frag_txd_num;
+}
+
+int fe_fill_tx_desc(struct net_device *dev,
+		    unsigned long *tx_cpu_owner_idx,
+		    struct sk_buff *skb,
+		    int gmac_no)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	struct PDMA_txdesc *tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+	struct PDMA_TXD_INFO2_T txd_info2_tmp;
+	struct PDMA_TXD_INFO4_T txd_info4_tmp;
+
+	tx_ring->txd_info1.SDP0 = virt_to_phys(skb->data);
+	txd_info2_tmp.SDL0 = skb->len;
+	txd_info4_tmp.FPORT = gmac_no;
+	txd_info4_tmp.TSO = 0;
+
+	if (ei_local->features & FE_CSUM_OFFLOAD) {
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			txd_info4_tmp.TUI_CO = 7;
+		else
+			txd_info4_tmp.TUI_CO = 0;
+	}
+
+	if (ei_local->features & FE_HW_VLAN_TX) {
+		if (skb_vlan_tag_present(skb))
+			txd_info4_tmp.VLAN_TAG =
+				0x10000 | skb_vlan_tag_get(skb);
+		else
+			txd_info4_tmp.VLAN_TAG = 0;
+	}
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+	if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
+		if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
+			if (ppe_hook_rx_eth) {
+				/* PPE */
+				txd_info4_tmp.FPORT = 4;
+				FOE_MAGIC_TAG(skb) = 0;
+			}
+		}
+	} else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
+		if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
+			if (ppe_hook_rx_eth) {
+				/* PPE */
+				txd_info4_tmp.FPORT = 4;
+				FOE_MAGIC_TAG(skb) = 0;
+			}
+		}
+	}
+#endif
+
+	txd_info2_tmp.LS0_bit = 1;
+	txd_info2_tmp.DDONE_bit = 0;
+
+	tx_ring->txd_info4 = txd_info4_tmp;
+	tx_ring->txd_info2 = txd_info2_tmp;
+
+	return 0;
+}
+
+static int fe_fill_tx_tso_data(struct END_DEVICE *ei_local,
+			       unsigned int frag_offset,
+			       unsigned int frag_size,
+			       unsigned long *tx_cpu_owner_idx,
+			       unsigned int nr_frags,
+			       int gmac_no)
+{
+	struct PSEUDO_ADAPTER *p_ad;
+	unsigned int size;
+	unsigned int frag_txd_num;
+	struct PDMA_txdesc *tx_ring;
+
+	frag_txd_num = pdma_cal_frag_txd_num(frag_size);
+	tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+	while (frag_txd_num > 0) {
+		if (frag_size < MAX_PTXD_LEN)
+			size = frag_size;
+		else
+			size = MAX_PTXD_LEN;
+
+		if (ei_local->skb_txd_num % 2 == 0) {
+			*tx_cpu_owner_idx =
+			    (*tx_cpu_owner_idx + 1) % num_tx_desc;
+			tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+			while (tx_ring->txd_info2.DDONE_bit == 0) {
+				if (gmac_no == 2) {
+					p_ad =
+					    netdev_priv(ei_local->pseudo_dev);
+					p_ad->stat.tx_errors++;
+				} else {
+					ei_local->stat.tx_errors++;
+				}
+			}
+			tx_ring->txd_info1.SDP0 = frag_offset;
+			tx_ring->txd_info2.SDL0 = size;
+			if (((nr_frags == 0)) && (frag_txd_num == 1))
+				tx_ring->txd_info2.LS0_bit = 1;
+			else
+				tx_ring->txd_info2.LS0_bit = 0;
+			tx_ring->txd_info2.DDONE_bit = 0;
+			tx_ring->txd_info4.FPORT = gmac_no;
+		} else {
+			tx_ring->txd_info3.SDP1 = frag_offset;
+			tx_ring->txd_info2.SDL1 = size;
+			if (((nr_frags == 0)) && (frag_txd_num == 1))
+				tx_ring->txd_info2.LS1_bit = 1;
+			else
+				tx_ring->txd_info2.LS1_bit = 0;
+		}
+		frag_offset += size;
+		frag_size -= size;
+		frag_txd_num--;
+		ei_local->skb_txd_num++;
+	}
+
+	return 0;
+}
+
+static int fe_fill_tx_tso_frag(struct net_device *netdev,
+			       struct sk_buff *skb,
+			       unsigned long *tx_cpu_owner_idx,
+			       int gmac_no)
+{
+	struct END_DEVICE *ei_local = netdev_priv(netdev);
+	struct PSEUDO_ADAPTER *p_ad;
+	unsigned int size;
+	unsigned int frag_txd_num;
+	skb_frag_t * frag;
+	unsigned int nr_frags;
+	unsigned int frag_offset, frag_size;
+	struct PDMA_txdesc *tx_ring;
+	int i = 0, j = 0, unmap_idx = 0;
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+	for (i = 0; i < nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		frag_offset = 0;
+		frag_size = skb_frag_size(frag);
+		frag_txd_num = pdma_cal_frag_txd_num(frag_size);
+
+		while (frag_txd_num > 0) {
+			if (frag_size < MAX_PTXD_LEN)
+				size = frag_size;
+			else
+				size = MAX_PTXD_LEN;
+
+			if (ei_local->skb_txd_num % 2 == 0) {
+				*tx_cpu_owner_idx =
+					(*tx_cpu_owner_idx + 1) % num_tx_desc;
+				tx_ring =
+					&ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+				while (tx_ring->txd_info2.DDONE_bit == 0) {
+					if (gmac_no == 2) {
+						p_ad =
+						    netdev_priv
+						    (ei_local->pseudo_dev);
+						p_ad->stat.tx_errors++;
+					} else {
+						ei_local->stat.tx_errors++;
+					}
+				}
+
+				tx_ring->txd_info1.SDP0 = skb_frag_dma_map(netdev->dev.parent, frag, frag_offset, size, DMA_TO_DEVICE);
+
+				if (unlikely
+				    (dma_mapping_error
+				     (netdev->dev.parent,
+				      tx_ring->txd_info1.SDP0))) {
+					pr_err
+					    ("[%s]dma_map_page() failed\n",
+					     __func__);
+					goto err_dma;
+				}
+
+				tx_ring->txd_info2.SDL0 = size;
+
+				if ((frag_txd_num == 1) &&
+				    (i == (nr_frags - 1)))
+					tx_ring->txd_info2.LS0_bit = 1;
+				else
+					tx_ring->txd_info2.LS0_bit = 0;
+				tx_ring->txd_info2.DDONE_bit = 0;
+				tx_ring->txd_info4.FPORT = gmac_no;
+			} else {
+				tx_ring->txd_info3.SDP1 = skb_frag_dma_map(netdev->dev.parent, frag, frag_offset, size, DMA_TO_DEVICE);
+
+				if (unlikely
+				    (dma_mapping_error
+				     (netdev->dev.parent,
+				      tx_ring->txd_info3.SDP1))) {
+					pr_err
+					    ("[%s]dma_map_page() failed\n",
+					     __func__);
+					goto err_dma;
+				}
+				tx_ring->txd_info2.SDL1 = size;
+				if ((frag_txd_num == 1) &&
+				    (i == (nr_frags - 1)))
+					tx_ring->txd_info2.LS1_bit = 1;
+				else
+					tx_ring->txd_info2.LS1_bit = 0;
+			}
+			frag_offset += size;
+			frag_size -= size;
+			frag_txd_num--;
+			ei_local->skb_txd_num++;
+		}
+	}
+
+	return 0;
+
+err_dma:
+	/* unmap dma */
+	j = *tx_cpu_owner_idx;
+	unmap_idx = i;
+	for (i = 0; i < unmap_idx; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		frag_size = skb_frag_size(frag);
+		frag_txd_num = pdma_cal_frag_txd_num(frag_size);
+
+		while (frag_txd_num > 0) {
+			if (frag_size < MAX_PTXD_LEN)
+				size = frag_size;
+			else
+				size = MAX_PTXD_LEN;
+			if (ei_local->skb_txd_num % 2 == 0) {
+				j = (j + 1) % num_tx_desc;
+				dma_unmap_page(netdev->dev.parent,
+					       ei_local->tx_ring0[j].
+					       txd_info1.SDP0,
+					       ei_local->tx_ring0[j].
+					       txd_info2.SDL0, DMA_TO_DEVICE);
+				/* reinit txd */
+				ei_local->tx_ring0[j].txd_info2.LS0_bit = 1;
+				ei_local->tx_ring0[j].txd_info2.DDONE_bit = 1;
+			} else {
+				dma_unmap_page(netdev->dev.parent,
+					       ei_local->tx_ring0[j].
+					       txd_info3.SDP1,
+					       ei_local->tx_ring0[j].
+					       txd_info2.SDL1, DMA_TO_DEVICE);
+				/* reinit txd */
+				ei_local->tx_ring0[j].txd_info2.LS1_bit = 1;
+			}
+			frag_size -= size;
+			frag_txd_num--;
+			ei_local->skb_txd_num++;
+		}
+	}
+
+	return -1;
+}
+
+int fe_fill_tx_desc_tso(struct net_device *dev,
+			unsigned long *tx_cpu_owner_idx,
+			struct sk_buff *skb,
+			int gmac_no)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	struct iphdr *iph = NULL;
+	struct ipv6hdr *ip6h = NULL;
+	struct tcphdr *th = NULL;
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+	unsigned int len, offset;
+	int err;
+	struct PDMA_txdesc *tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+	tx_ring->txd_info4.FPORT = gmac_no;
+	tx_ring->txd_info4.TSO = 0;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		tx_ring->txd_info4.TUI_CO = 7;
+	else
+		tx_ring->txd_info4.TUI_CO = 0;
+
+	if (ei_local->features & FE_HW_VLAN_TX) {
+		if (skb_vlan_tag_present(skb))
+			tx_ring->txd_info4.VLAN_TAG =
+				0x10000 | skb_vlan_tag_get(skb);
+		else
+			tx_ring->txd_info4.VLAN_TAG = 0;
+	}
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+	if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
+		if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
+			if (ppe_hook_rx_eth) {
+				/* PPE */
+				tx_ring->txd_info4.FPORT = 4;
+				FOE_MAGIC_TAG(skb) = 0;
+			}
+		}
+	} else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
+		if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
+			if (ppe_hook_rx_eth) {
+				/* PPE */
+				tx_ring->txd_info4.FPORT = 4;
+				FOE_MAGIC_TAG(skb) = 0;
+			}
+		}
+	}
+#endif
+	ei_local->skb_txd_num = 1;
+
+	/* skb data handle */
+	len = skb->len - skb->data_len;
+	offset = virt_to_phys(skb->data);
+	tx_ring->txd_info1.SDP0 = offset;
+	if (len < MAX_PTXD_LEN) {
+		tx_ring->txd_info2.SDL0 = len;
+		tx_ring->txd_info2.LS0_bit = nr_frags ? 0 : 1;
+		len = 0;
+	} else {
+		tx_ring->txd_info2.SDL0 = MAX_PTXD_LEN;
+		tx_ring->txd_info2.LS0_bit = 0;
+		len -= MAX_PTXD_LEN;
+		offset += MAX_PTXD_LEN;
+	}
+
+	if (len > 0)
+		fe_fill_tx_tso_data(ei_local, offset, len,
+				    tx_cpu_owner_idx, nr_frags, gmac_no);
+
+	/* skb fragments handle */
+	if (nr_frags > 0) {
+		err = fe_fill_tx_tso_frag(dev, skb, tx_cpu_owner_idx, gmac_no);
+		if (unlikely(err))
+			return err;
+	}
+
+	/* fill in MSS info in tcp checksum field */
+	if (skb_shinfo(skb)->gso_segs > 1) {
+		/* TCP over IPv4 */
+		iph = (struct iphdr *)skb_network_header(skb);
+		if ((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
+			th = (struct tcphdr *)skb_transport_header(skb);
+			tx_ring->txd_info4.TSO = 1;
+			th->check = htons(skb_shinfo(skb)->gso_size);
+			dma_sync_single_for_device(dev->dev.parent,
+						   virt_to_phys(th),
+						   sizeof(struct tcphdr),
+						   DMA_TO_DEVICE);
+		}
+
+		/* TCP over IPv6 */
+		if (ei_local->features & FE_TSO_V6) {
+			ip6h = (struct ipv6hdr *)skb_network_header(skb);
+			if ((ip6h->nexthdr == NEXTHDR_TCP) &&
+			    (ip6h->version == 6)) {
+				th = (struct tcphdr *)skb_transport_header(skb);
+				tx_ring->txd_info4.TSO = 1;
+				th->check = htons(skb_shinfo(skb)->gso_size);
+				dma_sync_single_for_device(dev->dev.parent,
+							   virt_to_phys(th),
+							   sizeof(struct
+								  tcphdr),
+							   DMA_TO_DEVICE);
+			}
+		}
+	}
+	tx_ring->txd_info2.DDONE_bit = 0;
+
+	return 0;
+}
+
+static inline int rt2880_pdma_eth_send(struct net_device *dev,
+				       struct sk_buff *skb, int gmac_no,
+				       unsigned int num_of_frag)
+{
+	unsigned int length = skb->len;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	unsigned long tx_cpu_owner_idx0 = ei_local->tx_cpu_owner_idx0;
+#else
+	unsigned long tx_cpu_owner_idx0 = sys_reg_read(TX_CTX_IDX0);
+#endif
+	struct PSEUDO_ADAPTER *p_ad;
+	int err;
+
+	while (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0) {
+		if (gmac_no == 2) {
+			if (ei_local->pseudo_dev) {
+				p_ad = netdev_priv(ei_local->pseudo_dev);
+				p_ad->stat.tx_errors++;
+			} else {
+				pr_err
+				    ("pseudo_dev is still not initialize ");
+				pr_err
+				    ("but receive packet from GMAC2\n");
+			}
+		} else {
+			ei_local->stat.tx_errors++;
+		}
+	}
+
+	if (num_of_frag > 1)
+		err = fe_fill_tx_desc_tso(dev, &tx_cpu_owner_idx0,
+					  skb, gmac_no);
+	else
+		err = fe_fill_tx_desc(dev, &tx_cpu_owner_idx0, skb, gmac_no);
+	if (err)
+		return err;
+
+	tx_cpu_owner_idx0 = (tx_cpu_owner_idx0 + 1) % num_tx_desc;
+	while (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0) {
+		if (gmac_no == 2) {
+			p_ad = netdev_priv(ei_local->pseudo_dev);
+			p_ad->stat.tx_errors++;
+		} else {
+			ei_local->stat.tx_errors++;
+		}
+	}
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	ei_local->tx_cpu_owner_idx0 = tx_cpu_owner_idx0;
+#endif
+	/* make sure that all changes to the dma ring are flushed before we
+	 * continue
+	 */
+	wmb();
+
+	sys_reg_write(TX_CTX_IDX0, cpu_to_le32((u32)tx_cpu_owner_idx0));
+
+	if (gmac_no == 2) {
+		p_ad = netdev_priv(ei_local->pseudo_dev);
+		p_ad->stat.tx_packets++;
+		p_ad->stat.tx_bytes += length;
+	} else {
+		ei_local->stat.tx_packets++;
+		ei_local->stat.tx_bytes += length;
+	}
+
+	return length;
+}
+
+int ei_pdma_start_xmit(struct sk_buff *skb, struct net_device *dev, int gmac_no)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned long tx_cpu_owner_idx;
+	unsigned int tx_cpu_owner_idx_next, tx_cpu_owner_idx_next2;
+	unsigned int num_of_txd, num_of_frag;
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
+	skb_frag_t * frag;
+	struct PSEUDO_ADAPTER *p_ad;
+	unsigned int tx_cpu_cal_idx;
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+	if (ppe_hook_tx_eth) {
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+		if (FOE_MAGIC_TAG(skb) != FOE_MAGIC_PPE)
+#endif
+			if (ppe_hook_tx_eth(skb, gmac_no) != 1) {
+				dev_kfree_skb_any(skb);
+				return 0;
+			}
+	}
+#endif
+
+//	dev->trans_start = jiffies;	/* save the timestamp */
+	netif_trans_update(dev);
+	spin_lock(&ei_local->page_lock);
+	dma_sync_single_for_device(dev->dev.parent, virt_to_phys(skb->data),
+				   skb->len, DMA_TO_DEVICE);
+
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	tx_cpu_owner_idx = ei_local->tx_cpu_owner_idx0;
+#else
+	tx_cpu_owner_idx = sys_reg_read(TX_CTX_IDX0);
+#endif
+
+	if (ei_local->features & FE_TSO) {
+		num_of_txd = pdma_cal_frag_txd_num(skb->len - skb->data_len);
+		if (nr_frags != 0) {
+			for (i = 0; i < nr_frags; i++) {
+				frag = &skb_shinfo(skb)->frags[i];
+				num_of_txd += pdma_cal_frag_txd_num(skb_frag_size(frag));
+
+			}
+		}
+		num_of_frag = num_of_txd;
+		num_of_txd = (num_of_txd + 1) >> 1;
+	} else {
+		num_of_frag = 1;
+		num_of_txd = 1;
+	}
+
+	tx_cpu_owner_idx_next = (tx_cpu_owner_idx + num_of_txd) % num_tx_desc;
+
+	if ((ei_local->skb_free[tx_cpu_owner_idx_next] == 0) &&
+	    (ei_local->skb_free[tx_cpu_owner_idx] == 0)) {
+		if (rt2880_pdma_eth_send(dev, skb, gmac_no, num_of_frag) < 0) {
+			dev_kfree_skb_any(skb);
+			if (gmac_no == 2) {
+				p_ad = netdev_priv(ei_local->pseudo_dev);
+				p_ad->stat.tx_dropped++;
+			} else {
+				ei_local->stat.tx_dropped++;
+			}
+			goto tx_err;
+		}
+
+		tx_cpu_owner_idx_next2 =
+		    (tx_cpu_owner_idx_next + 1) % num_tx_desc;
+
+		if (ei_local->skb_free[tx_cpu_owner_idx_next2] != 0)
+			ei_local->tx_ring_full = 1;
+	} else {
+		if (gmac_no == 2) {
+			p_ad = netdev_priv(ei_local->pseudo_dev);
+			p_ad->stat.tx_dropped++;
+		} else {
+			ei_local->stat.tx_dropped++;
+		}
+
+		dev_kfree_skb_any(skb);
+		spin_unlock(&ei_local->page_lock);
+		return NETDEV_TX_OK;
+	}
+
+	/* SG: use multiple TXD to send the packet (only have one skb) */
+	tx_cpu_cal_idx = (tx_cpu_owner_idx + num_of_txd - 1) % num_tx_desc;
+	ei_local->skb_free[tx_cpu_cal_idx] = skb;
+	while (--num_of_txd)
+		/* MAGIC ID */
+		ei_local->skb_free[(--tx_cpu_cal_idx) % num_tx_desc] =
+			(struct sk_buff *)0xFFFFFFFF;
+
+tx_err:
+	spin_unlock(&ei_local->page_lock);
+	return NETDEV_TX_OK;
+}
+
+int ei_pdma_xmit_housekeeping(struct net_device *netdev, int budget)
+{
+	struct END_DEVICE *ei_local = netdev_priv(netdev);
+	struct PDMA_txdesc *tx_desc;
+	unsigned long skb_free_idx;
+	int tx_processed = 0;
+
+	tx_desc = ei_local->tx_ring0;
+	skb_free_idx = ei_local->free_idx;
+
+	while (budget &&
+	       (ei_local->skb_free[skb_free_idx] != 0) &&
+	       (tx_desc[skb_free_idx].txd_info2.DDONE_bit == 1)) {
+		if (ei_local->skb_free[skb_free_idx] !=
+		    (struct sk_buff *)0xFFFFFFFF)
+			dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
+
+		ei_local->skb_free[skb_free_idx] = 0;
+		skb_free_idx = (skb_free_idx + 1) % num_tx_desc;
+		budget--;
+		tx_processed++;
+	}
+
+	ei_local->tx_ring_full = 0;
+	ei_local->free_idx = skb_free_idx;
+
+	return tx_processed;
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.c
new file mode 100644
index 0000000..a2414c4
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.c
@@ -0,0 +1,1509 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_ioctl.h"
+#include "raether_qdma.h"
+
+/* skb->mark to queue mapping table */
+struct QDMA_txdesc *free_head;
+
+/* ioctl */
+unsigned int M2Q_table[64] = { 0 };
+EXPORT_SYMBOL(M2Q_table);
+unsigned int lan_wan_separate;
+EXPORT_SYMBOL(lan_wan_separate);
+struct sk_buff *magic_id = (struct sk_buff *)0xFFFFFFFF;
+
+/* CONFIG_HW_SFQ */
+unsigned int web_sfq_enable;
+#define HW_SFQ_UP 3
+#define HW_SFQ_DL 1
+
+#define sfq_debug 0
+struct SFQ_table *sfq0;
+struct SFQ_table *sfq1;
+struct SFQ_table *sfq2;
+struct SFQ_table *sfq3;
+
+#define KSEG1                   0xa0000000
+#define PHYS_TO_VIRT(x)         phys_to_virt(x)
+#define VIRT_TO_PHYS(x)         virt_to_phys(x)
+/* extern void set_fe_dma_glo_cfg(void); */
+struct parse_result sfq_parse_result;
+
+/**
+ *
+ * @brief: get the TXD index from its address
+ *
+ * @param: cpu_ptr
+ *
+ * @return: TXD index
+*/
+
+/**
+ * @brief cal txd number for a page
+ *
+ * @parm size
+ *
+ * @return frag_txd_num
+ */
+
+static inline unsigned int cal_frag_txd_num(unsigned int size)
+{
+	unsigned int frag_txd_num = 0;
+
+	if (size == 0)
+		return 0;
+	while (size > 0) {
+		if (size > MAX_QTXD_LEN) {
+			frag_txd_num++;
+			size -= MAX_QTXD_LEN;
+		} else {
+			frag_txd_num++;
+			size = 0;
+		}
+	}
+	return frag_txd_num;
+}
+
+/**
+ * @brief get free TXD from TXD queue
+ *
+ * @param free_txd
+ *
+ * @return
+ */
+static inline int get_free_txd(struct END_DEVICE *ei_local, int ring_no)
+{
+	unsigned int tmp_idx;
+
+	tmp_idx = ei_local->free_txd_head[ring_no];
+	ei_local->free_txd_head[ring_no] = ei_local->txd_pool_info[tmp_idx];
+	atomic_sub(1, &ei_local->free_txd_num[ring_no]);
+	return tmp_idx;
+}
+
+static inline unsigned int get_phy_addr(struct END_DEVICE *ei_local,
+					unsigned int idx)
+{
+	return ei_local->phy_txd_pool + (idx * QTXD_LEN);
+}
+
+/**
+ * @brief add free TXD into TXD queue
+ *
+ * @param free_txd
+ *
+ * @return
+ */
+static inline void put_free_txd(struct END_DEVICE *ei_local, int free_txd_idx)
+{
+	ei_local->txd_pool_info[ei_local->free_txd_tail[0]] = free_txd_idx;
+	ei_local->free_txd_tail[0] = free_txd_idx;
+}
+
+void init_pseudo_link_list(struct END_DEVICE *ei_local)
+{
+	int i;
+
+	for (i = 0; i < gmac1_txq_num; i++) {
+		atomic_set(&ei_local->free_txd_num[i], gmac1_txq_txd_num);
+		ei_local->free_txd_head[i] = gmac1_txq_txd_num * i;
+		ei_local->free_txd_tail[i] = gmac1_txq_txd_num * (i + 1) - 1;
+	}
+	for (i = 0; i < gmac2_txq_num; i++) {
+		atomic_set(&ei_local->free_txd_num[i + gmac1_txq_num],
+			   gmac2_txq_txd_num);
+		ei_local->free_txd_head[i + gmac1_txq_num] =
+		    gmac1_txd_num + gmac2_txq_txd_num * i;
+		ei_local->free_txd_tail[i + gmac1_txq_num] =
+		    gmac1_txd_num + gmac2_txq_txd_num * (i + 1) - 1;
+	}
+}
+
+static inline int ring_no_mapping(int txd_idx)
+{
+	int i;
+
+	if (txd_idx < gmac1_txd_num) {
+		for (i = 0; i < gmac1_txq_num; i++) {
+			if (txd_idx < (gmac1_txq_txd_num * (i + 1)))
+				return i;
+		}
+	}
+
+	txd_idx -= gmac1_txd_num;
+	for (i = 0; i < gmac2_txq_num; i++) {
+		if (txd_idx < (gmac2_txq_txd_num * (i + 1)))
+			return (i + gmac1_txq_num);
+	}
+	pr_err("txd index out of range\n");
+	return 0;
+}
+
+/*define qdma initial alloc*/
+/**
+ * @brief
+ *
+ * @param net_dev
+ *
+ * @return  0: fail
+ *	    1: success
+ */
+bool qdma_tx_desc_alloc(void)
+{
+	struct net_device *dev = dev_raether;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned int txd_idx;
+	int i = 0;
+
+	ei_local->txd_pool =
+	    dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+			       QTXD_LEN * num_tx_desc,
+			       &ei_local->phy_txd_pool, GFP_KERNEL);
+	pr_err("txd_pool=%p phy_txd_pool=%p\n", ei_local->txd_pool,
+	       (void *)ei_local->phy_txd_pool);
+
+	if (!ei_local->txd_pool) {
+		pr_err("adapter->txd_pool allocation failed!\n");
+		return 0;
+	}
+	pr_err("ei_local->skb_free start address is 0x%p.\n",
+	       ei_local->skb_free);
+	/* set all txd_pool_info to 0. */
+	for (i = 0; i < num_tx_desc; i++) {
+		ei_local->skb_free[i] = 0;
+		ei_local->txd_pool_info[i] = i + 1;
+		ei_local->txd_pool[i].txd_info3.LS = 1;
+		ei_local->txd_pool[i].txd_info3.DDONE = 1;
+	}
+
+	init_pseudo_link_list(ei_local);
+
+	/* get free txd from txd pool */
+	txd_idx = get_free_txd(ei_local, 0);
+	ei_local->tx_cpu_idx = txd_idx;
+	/* add null TXD for transmit */
+	sys_reg_write(QTX_CTX_PTR, get_phy_addr(ei_local, txd_idx));
+	sys_reg_write(QTX_DTX_PTR, get_phy_addr(ei_local, txd_idx));
+
+	/* get free txd from txd pool */
+	txd_idx = get_free_txd(ei_local, 0);
+	ei_local->rls_cpu_idx = txd_idx;
+	/* add null TXD for release */
+	sys_reg_write(QTX_CRX_PTR, get_phy_addr(ei_local, txd_idx));
+	sys_reg_write(QTX_DRX_PTR, get_phy_addr(ei_local, txd_idx));
+
+	/*Reserve 4 TXD for each physical queue */
+	if (ei_local->chip_name == MT7623_FE || ei_local->chip_name == MT7621_FE ||
+	    ei_local->chip_name == LEOPARD_FE) {
+		//for (i = 0; i < NUM_PQ; i++)
+		for (i = 0; i < 16; i++)
+			sys_reg_write(QTX_CFG_0 + QUEUE_OFFSET * i,
+				      (NUM_PQ_RESV | (NUM_PQ_RESV << 8)));
+	}
+
+	sys_reg_write(QTX_SCH_1, 0x80000000);
+#if 0
+	if (ei_local->chip_name == MT7622_FE) {
+		for (i = 0; i < NUM_PQ; i++) {
+			if (i <= 15) {
+				sys_reg_write(QDMA_PAGE, 0);
+				sys_reg_write(QTX_CFG_0 + QUEUE_OFFSET * i,
+					      (NUM_PQ_RESV |
+					       (NUM_PQ_RESV << 8)));
+			} else if (i > 15 && i <= 31) {
+				sys_reg_write(QDMA_PAGE, 1);
+				sys_reg_write(QTX_CFG_0 +
+					      QUEUE_OFFSET * (i - 16),
+					      (NUM_PQ_RESV |
+					       (NUM_PQ_RESV << 8)));
+			} else if (i > 31 && i <= 47) {
+				sys_reg_write(QDMA_PAGE, 2);
+				sys_reg_write(QTX_CFG_0 +
+					      QUEUE_OFFSET * (i - 32),
+					      (NUM_PQ_RESV |
+					       (NUM_PQ_RESV << 8)));
+			} else if (i > 47 && i <= 63) {
+				sys_reg_write(QDMA_PAGE, 3);
+				sys_reg_write(QTX_CFG_0 +
+					      QUEUE_OFFSET * (i - 48),
+					      (NUM_PQ_RESV |
+					       (NUM_PQ_RESV << 8)));
+			}
+		}
+		sys_reg_write(QDMA_PAGE, 0);
+	}
+#endif
+
+	return 1;
+}
+
+bool sfq_init(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+	unsigned int reg_val;
+	dma_addr_t sfq_phy0;
+	dma_addr_t sfq_phy1;
+	dma_addr_t sfq_phy2;
+	dma_addr_t sfq_phy3;
+	struct SFQ_table *sfq0 = NULL;
+	struct SFQ_table *sfq1 = NULL;
+	struct SFQ_table *sfq2 = NULL;
+	struct SFQ_table *sfq3 = NULL;
+
+	dma_addr_t sfq_phy4;
+	dma_addr_t sfq_phy5;
+	dma_addr_t sfq_phy6;
+	dma_addr_t sfq_phy7;
+	struct SFQ_table *sfq4 = NULL;
+	struct SFQ_table *sfq5 = NULL;
+	struct SFQ_table *sfq6 = NULL;
+	struct SFQ_table *sfq7 = NULL;
+
+	int i = 0;
+
+	reg_val = sys_reg_read(VQTX_GLO);
+	reg_val = reg_val | VQTX_MIB_EN;
+	/* Virtual table extends to 32bytes */
+	sys_reg_write(VQTX_GLO, reg_val);
+	reg_val = sys_reg_read(VQTX_GLO);
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+		sys_reg_write(VQTX_NUM,
+			      (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) |
+			      (VQTX_NUM_3) | (VQTX_NUM_4) | (VQTX_NUM_5) |
+			      (VQTX_NUM_6) | (VQTX_NUM_7));
+	} else {
+		sys_reg_write(VQTX_NUM,
+			      (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) |
+			      (VQTX_NUM_3));
+	}
+
+	/* 10 s change hash algorithm */
+	sys_reg_write(VQTX_HASH_CFG, 0xF002710);
+
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE)
+		sys_reg_write(VQTX_VLD_CFG, 0xeca86420);
+	else
+		sys_reg_write(VQTX_VLD_CFG, 0xc840);
+	sys_reg_write(VQTX_HASH_SD, 0x0D);
+	sys_reg_write(QDMA_FC_THRES, 0x9b9b4444);
+	sys_reg_write(QDMA_HRED1, 0);
+	sys_reg_write(QDMA_HRED2, 0);
+	sys_reg_write(QDMA_SRED1, 0);
+	sys_reg_write(QDMA_SRED2, 0);
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+		sys_reg_write(VQTX_0_3_BIND_QID,
+			      (VQTX_0_BIND_QID) | (VQTX_1_BIND_QID) |
+			      (VQTX_2_BIND_QID) | (VQTX_3_BIND_QID));
+		sys_reg_write(VQTX_4_7_BIND_QID,
+			      (VQTX_4_BIND_QID) | (VQTX_5_BIND_QID) |
+			      (VQTX_6_BIND_QID) | (VQTX_7_BIND_QID));
+		pr_err("VQTX_0_3_BIND_QID =%x\n",
+		       sys_reg_read(VQTX_0_3_BIND_QID));
+		pr_err("VQTX_4_7_BIND_QID =%x\n",
+		       sys_reg_read(VQTX_4_7_BIND_QID));
+	}
+
+	sfq0 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+				  VQ_NUM0 * sizeof(struct SFQ_table), &sfq_phy0,
+				  GFP_KERNEL);
+
+	memset(sfq0, 0x0, VQ_NUM0 * sizeof(struct SFQ_table));
+	for (i = 0; i < VQ_NUM0; i++) {
+		sfq0[i].sfq_info1.VQHPTR = 0xdeadbeef;
+		sfq0[i].sfq_info2.VQTPTR = 0xdeadbeef;
+	}
+	sfq1 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+				  VQ_NUM1 * sizeof(struct SFQ_table), &sfq_phy1,
+				  GFP_KERNEL);
+	memset(sfq1, 0x0, VQ_NUM1 * sizeof(struct SFQ_table));
+	for (i = 0; i < VQ_NUM1; i++) {
+		sfq1[i].sfq_info1.VQHPTR = 0xdeadbeef;
+		sfq1[i].sfq_info2.VQTPTR = 0xdeadbeef;
+	}
+
+	sfq2 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+				  VQ_NUM2 * sizeof(struct SFQ_table), &sfq_phy2,
+				  GFP_KERNEL);
+	memset(sfq2, 0x0, VQ_NUM2 * sizeof(struct SFQ_table));
+	for (i = 0; i < VQ_NUM2; i++) {
+		sfq2[i].sfq_info1.VQHPTR = 0xdeadbeef;
+		sfq2[i].sfq_info2.VQTPTR = 0xdeadbeef;
+	}
+
+	sfq3 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+				  VQ_NUM3 * sizeof(struct SFQ_table), &sfq_phy3,
+				  GFP_KERNEL);
+	memset(sfq3, 0x0, VQ_NUM3 * sizeof(struct SFQ_table));
+	for (i = 0; i < VQ_NUM3; i++) {
+		sfq3[i].sfq_info1.VQHPTR = 0xdeadbeef;
+		sfq3[i].sfq_info2.VQTPTR = 0xdeadbeef;
+	}
+	if (unlikely((!sfq0)) || unlikely((!sfq1)) ||
+	    unlikely((!sfq2)) || unlikely((!sfq3))) {
+		pr_err("QDMA SFQ0~3 VQ not available...\n");
+		return 1;
+	}
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+		sfq4 =
+		    dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+				       VQ_NUM4 * sizeof(struct SFQ_table),
+				       &sfq_phy4, GFP_KERNEL);
+		memset(sfq4, 0x0, VQ_NUM4 * sizeof(struct SFQ_table));
+		for (i = 0; i < VQ_NUM4; i++) {
+			sfq4[i].sfq_info1.VQHPTR = 0xdeadbeef;
+			sfq4[i].sfq_info2.VQTPTR = 0xdeadbeef;
+		}
+		sfq5 =
+		    dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+				       VQ_NUM5 * sizeof(struct SFQ_table),
+				       &sfq_phy5, GFP_KERNEL);
+		memset(sfq5, 0x0, VQ_NUM5 * sizeof(struct SFQ_table));
+		for (i = 0; i < VQ_NUM5; i++) {
+			sfq5[i].sfq_info1.VQHPTR = 0xdeadbeef;
+			sfq5[i].sfq_info2.VQTPTR = 0xdeadbeef;
+		}
+		sfq6 =
+		    dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+				       VQ_NUM6 * sizeof(struct SFQ_table),
+				       &sfq_phy6, GFP_KERNEL);
+		memset(sfq6, 0x0, VQ_NUM6 * sizeof(struct SFQ_table));
+		for (i = 0; i < VQ_NUM6; i++) {
+			sfq6[i].sfq_info1.VQHPTR = 0xdeadbeef;
+			sfq6[i].sfq_info2.VQTPTR = 0xdeadbeef;
+		}
+		sfq7 =
+		    dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+				       VQ_NUM7 * sizeof(struct SFQ_table),
+				       &sfq_phy7, GFP_KERNEL);
+		memset(sfq7, 0x0, VQ_NUM7 * sizeof(struct SFQ_table));
+		for (i = 0; i < VQ_NUM7; i++) {
+			sfq7[i].sfq_info1.VQHPTR = 0xdeadbeef;
+			sfq7[i].sfq_info2.VQTPTR = 0xdeadbeef;
+		}
+		if (unlikely((!sfq4)) || unlikely((!sfq5)) ||
+		    unlikely((!sfq6)) || unlikely((!sfq7))) {
+			pr_err("QDMA SFQ4~7 VQ not available...\n");
+			return 1;
+		}
+	}
+
+	pr_err("*****sfq_phy0 is 0x%p!!!*******\n", (void *)sfq_phy0);
+	pr_err("*****sfq_phy1 is 0x%p!!!*******\n", (void *)sfq_phy1);
+	pr_err("*****sfq_phy2 is 0x%p!!!*******\n", (void *)sfq_phy2);
+	pr_err("*****sfq_phy3 is 0x%p!!!*******\n", (void *)sfq_phy3);
+	pr_err("*****sfq_virt0 is 0x%p!!!*******\n", sfq0);
+	pr_err("*****sfq_virt1 is 0x%p!!!*******\n", sfq1);
+	pr_err("*****sfq_virt2 is 0x%p!!!*******\n", sfq2);
+	pr_err("*****sfq_virt3 is 0x%p!!!*******\n", sfq3);
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+		pr_err("*****sfq_phy4 is 0x%p!!!*******\n", (void *)sfq_phy4);
+		pr_err("*****sfq_phy5 is 0x%p!!!*******\n", (void *)sfq_phy5);
+		pr_err("*****sfq_phy6 is 0x%p!!!*******\n", (void *)sfq_phy6);
+		pr_err("*****sfq_phy7 is 0x%p!!!*******\n", (void *)sfq_phy7);
+		pr_err("*****sfq_virt4 is 0x%p!!!*******\n", sfq4);
+		pr_err("*****sfq_virt5 is 0x%p!!!*******\n", sfq5);
+		pr_err("*****sfq_virt6 is 0x%p!!!*******\n", sfq6);
+		pr_err("*****sfq_virt7 is 0x%p!!!*******\n", sfq7);
+	}
+
+	sys_reg_write(VQTX_TB_BASE0, (u32)sfq_phy0);
+	sys_reg_write(VQTX_TB_BASE1, (u32)sfq_phy1);
+	sys_reg_write(VQTX_TB_BASE2, (u32)sfq_phy2);
+	sys_reg_write(VQTX_TB_BASE3, (u32)sfq_phy3);
+	if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+		sys_reg_write(VQTX_TB_BASE4, (u32)sfq_phy4);
+		sys_reg_write(VQTX_TB_BASE5, (u32)sfq_phy5);
+		sys_reg_write(VQTX_TB_BASE6, (u32)sfq_phy6);
+		sys_reg_write(VQTX_TB_BASE7, (u32)sfq_phy7);
+	}
+
+	return 0;
+}
+
+bool fq_qdma_init(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	/* struct QDMA_txdesc *free_head = NULL; */
+	dma_addr_t phy_free_head;
+	dma_addr_t phy_free_tail;
+	unsigned int *free_page_head = NULL;
+	dma_addr_t phy_free_page_head;
+	int i;
+
+	free_head = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+				       NUM_QDMA_PAGE *
+				       QTXD_LEN, &phy_free_head, GFP_KERNEL);
+
+	if (unlikely(!free_head)) {
+		pr_err("QDMA FQ decriptor not available...\n");
+		return 0;
+	}
+	memset(free_head, 0x0, QTXD_LEN * NUM_QDMA_PAGE);
+
+	free_page_head =
+	    dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+			       NUM_QDMA_PAGE * QDMA_PAGE_SIZE,
+			       &phy_free_page_head, GFP_KERNEL);
+
+	if (unlikely(!free_page_head)) {
+		pr_err("QDMA FQ page not available...\n");
+		return 0;
+	}
+	for (i = 0; i < NUM_QDMA_PAGE; i++) {
+		free_head[i].txd_info1.SDP =
+		    (phy_free_page_head + (i * QDMA_PAGE_SIZE));
+		if (i < (NUM_QDMA_PAGE - 1)) {
+			free_head[i].txd_info2.NDP =
+			    (phy_free_head + ((i + 1) * QTXD_LEN));
+		}
+		free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
+	}
+	phy_free_tail =
+	    (phy_free_head + (u32)((NUM_QDMA_PAGE - 1) * QTXD_LEN));
+
+	pr_err("phy_free_head is 0x%p!!!\n", (void *)phy_free_head);
+	pr_err("phy_free_tail_phy is 0x%p!!!\n", (void *)phy_free_tail);
+	sys_reg_write(QDMA_FQ_HEAD, (u32)phy_free_head);
+	sys_reg_write(QDMA_FQ_TAIL, (u32)phy_free_tail);
+	sys_reg_write(QDMA_FQ_CNT, ((num_tx_desc << 16) | NUM_QDMA_PAGE));
+	sys_reg_write(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
+	pr_info("gmac1_txd_num:%d; gmac2_txd_num:%d; num_tx_desc:%d\n",
+		gmac1_txd_num, gmac2_txd_num, num_tx_desc);
+	ei_local->free_head = free_head;
+	ei_local->phy_free_head = phy_free_head;
+	ei_local->free_page_head = free_page_head;
+	ei_local->phy_free_page_head = phy_free_page_head;
+	ei_local->tx_ring_full = 0;
+	return 1;
+}
+
+int sfq_prot;
+
+#if (sfq_debug)
+int udp_source_port;
+int tcp_source_port;
+int ack_packt;
+#endif
+int sfq_parse_layer_info(struct sk_buff *skb)
+{
+	struct vlan_hdr *vh_sfq = NULL;
+	struct ethhdr *eth_sfq = NULL;
+	struct iphdr *iph_sfq = NULL;
+	struct ipv6hdr *ip6h_sfq = NULL;
+	struct tcphdr *th_sfq = NULL;
+	struct udphdr *uh_sfq = NULL;
+
+	memset(&sfq_parse_result, 0, sizeof(sfq_parse_result));
+	eth_sfq = (struct ethhdr *)skb->data;
+	ether_addr_copy(sfq_parse_result.dmac, eth_sfq->h_dest);
+	ether_addr_copy(sfq_parse_result.smac, eth_sfq->h_source);
+	/* memcpy(sfq_parse_result.dmac, eth_sfq->h_dest, ETH_ALEN); */
+	/* memcpy(sfq_parse_result.smac, eth_sfq->h_source, ETH_ALEN); */
+	sfq_parse_result.eth_type = eth_sfq->h_proto;
+
+	if (sfq_parse_result.eth_type == htons(ETH_P_8021Q)) {
+		sfq_parse_result.vlan1_gap = VLAN_HLEN;
+		vh_sfq = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+		sfq_parse_result.eth_type = vh_sfq->h_vlan_encapsulated_proto;
+	} else {
+		sfq_parse_result.vlan1_gap = 0;
+	}
+
+	/* set layer4 start addr */
+	if ((sfq_parse_result.eth_type == htons(ETH_P_IP)) ||
+	    (sfq_parse_result.eth_type == htons(ETH_P_PPP_SES) &&
+	     sfq_parse_result.ppp_tag == htons(PPP_IP))) {
+		iph_sfq =
+		    (struct iphdr *)(skb->data + ETH_HLEN +
+				     (sfq_parse_result.vlan1_gap));
+
+		/* prepare layer3/layer4 info */
+		memcpy(&sfq_parse_result.iph, iph_sfq, sizeof(struct iphdr));
+		if (iph_sfq->protocol == IPPROTO_TCP) {
+			th_sfq =
+			    (struct tcphdr *)(skb->data + ETH_HLEN +
+					      (sfq_parse_result.vlan1_gap) +
+					      (iph_sfq->ihl * 4));
+			memcpy(&sfq_parse_result.th, th_sfq,
+			       sizeof(struct tcphdr));
+#if (sfq_debug)
+			tcp_source_port = ntohs(sfq_parse_result.th.source);
+			udp_source_port = 0;
+			/* tcp ack packet */
+			if (ntohl(sfq_parse_result.iph.saddr) == 0xa0a0a04)
+				ack_packt = 1;
+			else
+				ack_packt = 0;
+#endif
+			sfq_prot = 2;	/* IPV4_HNAPT */
+			if (iph_sfq->frag_off & htons(IP_MF | IP_OFFSET))
+				return 1;
+		} else if (iph_sfq->protocol == IPPROTO_UDP) {
+			uh_sfq =
+			    (struct udphdr *)(skb->data + ETH_HLEN +
+					      (sfq_parse_result.vlan1_gap) +
+					      iph_sfq->ihl * 4);
+			memcpy(&sfq_parse_result.uh, uh_sfq,
+			       sizeof(struct udphdr));
+#if (sfq_debug)
+			udp_source_port = ntohs(sfq_parse_result.uh.source);
+			tcp_source_port = 0;
+			ack_packt = 0;
+#endif
+			sfq_prot = 2;	/* IPV4_HNAPT */
+			if (iph_sfq->frag_off & htons(IP_MF | IP_OFFSET))
+				return 1;
+		} else {
+			sfq_prot = 1;
+		}
+	} else if (sfq_parse_result.eth_type == htons(ETH_P_IPV6) ||
+		   (sfq_parse_result.eth_type == htons(ETH_P_PPP_SES) &&
+		    sfq_parse_result.ppp_tag == htons(PPP_IPV6))) {
+		ip6h_sfq =
+		    (struct ipv6hdr *)(skb->data + ETH_HLEN +
+				       (sfq_parse_result.vlan1_gap));
+		if (ip6h_sfq->nexthdr == NEXTHDR_TCP) {
+			sfq_prot = 4;	/* IPV6_5T */
+#if (sfq_debug)
+			if (ntohl(sfq_parse_result.ip6h.saddr.s6_addr32[3]) ==
+			    8)
+				ack_packt = 1;
+			else
+				ack_packt = 0;
+#endif
+		} else if (ip6h_sfq->nexthdr == NEXTHDR_UDP) {
+#if (sfq_debug)
+			ack_packt = 0;
+#endif
+			sfq_prot = 4;	/* IPV6_5T */
+
+		} else {
+			sfq_prot = 3;	/* IPV6_3T */
+		}
+	}
+	return 0;
+}
+
+int rt2880_qdma_eth_send(struct END_DEVICE *ei_local, struct net_device *dev,
+			 struct sk_buff *skb, int gmac_no, int ring_no)
+{
+	unsigned int length = skb->len;
+	struct QDMA_txdesc *cpu_ptr, *prev_cpu_ptr;
+	struct QDMA_txdesc dummy_desc;
+	struct PSEUDO_ADAPTER *p_ad;
+	unsigned long flags;
+	unsigned int next_txd_idx, qidx;
+
+	cpu_ptr = &dummy_desc;
+	/* 2. prepare data */
+	dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
+				   virt_to_phys(skb->data),
+				   skb->len, DMA_TO_DEVICE);
+	/* cpu_ptr->txd_info1.SDP = VIRT_TO_PHYS(skb->data); */
+	cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
+	cpu_ptr->txd_info3.SDL = skb->len;
+	if (ei_local->features & FE_HW_SFQ) {
+		sfq_parse_layer_info(skb);
+		cpu_ptr->txd_info5.VQID0 = 1;	/* 1:HW hash 0:CPU */
+		cpu_ptr->txd_info5.PROT = sfq_prot;
+		/* no vlan */
+		cpu_ptr->txd_info5.IPOFST = 14 + (sfq_parse_result.vlan1_gap);
+	}
+	cpu_ptr->txd_info4.FPORT = gmac_no;
+
+	if (ei_local->features & FE_CSUM_OFFLOAD) {
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			cpu_ptr->txd_info5.TUI_CO = 7;
+		else
+			cpu_ptr->txd_info5.TUI_CO = 0;
+	}
+
+	if (ei_local->features & FE_HW_VLAN_TX) {
+		if (skb_vlan_tag_present(skb)) {
+			cpu_ptr->txd_info6.INSV_1 = 1;
+			cpu_ptr->txd_info6.VLAN_TAG_1 = skb_vlan_tag_get(skb);
+			    cpu_ptr->txd_info4.QID = skb_vlan_tag_get(skb);
+		} else {
+			cpu_ptr->txd_info4.QID = ring_no;
+			cpu_ptr->txd_info6.INSV_1 = 0;
+			cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
+		}
+	} else {
+		cpu_ptr->txd_info6.INSV_1 = 0;
+		cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
+	}
+	cpu_ptr->txd_info4.QID = 0;
+	/* cpu_ptr->txd_info4.QID = ring_no; */
+
+	if ((ei_local->features & QDMA_QOS_MARK) && (skb->mark != 0)) {
+		if (skb->mark < 64) {
+			qidx = M2Q_table[skb->mark];
+			cpu_ptr->txd_info4.QID = ((qidx & 0x30) >> 4);
+			cpu_ptr->txd_info4.QID = (qidx & 0x0f);
+		} else {
+			pr_debug("skb->mark out of range\n");
+			cpu_ptr->txd_info4.QID = 0;
+			cpu_ptr->txd_info4.QID = 0;
+		}
+	}
+	/* QoS Web UI used */
+	if ((ei_local->features & QDMA_QOS_WEB) && (lan_wan_separate == 1)) {
+		if (web_sfq_enable == 1 && (skb->mark == 2)) {
+			if (gmac_no == 1)
+				cpu_ptr->txd_info4.QID = HW_SFQ_DL;
+			else
+				cpu_ptr->txd_info4.QID = HW_SFQ_UP;
+		} else if (gmac_no == 2) {
+			cpu_ptr->txd_info4.QID += 8;
+		}
+	}
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+	if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
+		if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
+			if (ppe_hook_rx_eth) {
+				cpu_ptr->txd_info4.FPORT = 3;	/* PPE */
+				FOE_MAGIC_TAG(skb) = 0;
+			}
+		}
+	} else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
+		if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
+			if (ppe_hook_rx_eth) {
+				cpu_ptr->txd_info4.FPORT = 3;	/* PPE */
+				FOE_MAGIC_TAG(skb) = 0;
+			}
+		}
+	}
+#endif
+
+	/* dma_sync_single_for_device(NULL, virt_to_phys(skb->data), */
+	/* skb->len, DMA_TO_DEVICE); */
+	cpu_ptr->txd_info4.SWC = 1;
+
+	/* 5. move CPU_PTR to new TXD */
+	cpu_ptr->txd_info5.TSO = 0;
+	cpu_ptr->txd_info3.LS = 1;
+	cpu_ptr->txd_info3.DDONE = 0;
+	next_txd_idx = get_free_txd(ei_local, ring_no);
+	cpu_ptr->txd_info2.NDP = get_phy_addr(ei_local, next_txd_idx);
+	spin_lock_irqsave(&ei_local->page_lock, flags);
+	prev_cpu_ptr = ei_local->txd_pool + ei_local->tx_cpu_idx;
+	/* update skb_free */
+	ei_local->skb_free[ei_local->tx_cpu_idx] = skb;
+	/* update tx cpu idx */
+	ei_local->tx_cpu_idx = next_txd_idx;
+	/* update txd info */
+	prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
+	prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
+	prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
+	prev_cpu_ptr->txd_info5 = dummy_desc.txd_info5;
+	prev_cpu_ptr->txd_info6 = dummy_desc.txd_info6;
+	prev_cpu_ptr->txd_info7 = dummy_desc.txd_info7;
+	prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
+	/* NOTE: add memory barrier to avoid
+	 * DMA access memory earlier than memory written
+	 */
+	wmb();
+	/* update CPU pointer */
+	sys_reg_write(QTX_CTX_PTR,
+		      get_phy_addr(ei_local, ei_local->tx_cpu_idx));
+	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+	if (ei_local->features & FE_GE2_SUPPORT) {
+		if (gmac_no == 2) {
+			if (ei_local->pseudo_dev) {
+				p_ad = netdev_priv(ei_local->pseudo_dev);
+				p_ad->stat.tx_packets++;
+
+				p_ad->stat.tx_bytes += length;
+			}
+		} else {
+			ei_local->stat.tx_packets++;
+			ei_local->stat.tx_bytes += skb->len;
+		}
+	} else {
+		ei_local->stat.tx_packets++;
+		ei_local->stat.tx_bytes += skb->len;
+	}
+	if (ei_local->features & FE_INT_NAPI) {
+		if (ei_local->tx_full == 1) {
+			ei_local->tx_full = 0;
+			netif_wake_queue(dev);
+		}
+	}
+
+	return length;
+}
+
+int rt2880_qdma_eth_send_tso(struct END_DEVICE *ei_local,
+			     struct net_device *dev, struct sk_buff *skb,
+			     int gmac_no, int ring_no)
+{
+	unsigned int length = skb->len;
+	struct QDMA_txdesc *cpu_ptr, *prev_cpu_ptr;
+	struct QDMA_txdesc dummy_desc;
+	struct QDMA_txdesc init_dummy_desc;
+	int ctx_idx;
+	struct iphdr *iph = NULL;
+	struct QDMA_txdesc *init_cpu_ptr;
+	struct tcphdr *th = NULL;
+	skb_frag_t * frag;
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+	unsigned int len, size, frag_txd_num, qidx;
+	dma_addr_t offset;
+	unsigned long flags;
+	int i;
+	int init_qid, init_qid1;
+	struct ipv6hdr *ip6h = NULL;
+	struct PSEUDO_ADAPTER *p_ad;
+
+	init_cpu_ptr = &init_dummy_desc;
+	cpu_ptr = &init_dummy_desc;
+
+	len = length - skb->data_len;
+	dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
+				   virt_to_phys(skb->data),
+				   len,
+				   DMA_TO_DEVICE);
+	offset = virt_to_phys(skb->data);
+	cpu_ptr->txd_info1.SDP = offset;
+	if (len > MAX_QTXD_LEN) {
+		cpu_ptr->txd_info3.SDL = MAX_QTXD_LEN;
+		cpu_ptr->txd_info3.LS = 0;
+		len -= MAX_QTXD_LEN;
+		offset += MAX_QTXD_LEN;
+	} else {
+		cpu_ptr->txd_info3.SDL = len;
+		cpu_ptr->txd_info3.LS = nr_frags ? 0 : 1;
+		len = 0;
+	}
+	if (ei_local->features & FE_HW_SFQ) {
+		sfq_parse_layer_info(skb);
+
+		cpu_ptr->txd_info5.VQID0 = 1;
+		cpu_ptr->txd_info5.PROT = sfq_prot;
+		/* no vlan */
+		cpu_ptr->txd_info5.IPOFST = 14 + (sfq_parse_result.vlan1_gap);
+	}
+	if (gmac_no == 1)
+		cpu_ptr->txd_info4.FPORT = 1;
+	else
+		cpu_ptr->txd_info4.FPORT = 2;
+
+	cpu_ptr->txd_info5.TSO = 0;
+	cpu_ptr->txd_info4.QID = 0;
+	/* cpu_ptr->txd_info4.QID = ring_no; */
+	if ((ei_local->features & QDMA_QOS_MARK) && (skb->mark != 0)) {
+		if (skb->mark < 64) {
+			qidx = M2Q_table[skb->mark];
+			cpu_ptr->txd_info4.QID = qidx;
+
+		} else {
+			pr_debug("skb->mark out of range\n");
+			cpu_ptr->txd_info4.QID = 0;
+
+		}
+	}
+	if (ei_local->features & FE_CSUM_OFFLOAD) {
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			cpu_ptr->txd_info5.TUI_CO = 7;
+		else
+			cpu_ptr->txd_info5.TUI_CO = 0;
+	}
+
+	if (ei_local->features & FE_HW_VLAN_TX) {
+		if (skb_vlan_tag_present(skb)) {
+			cpu_ptr->txd_info6.INSV_1 = 1;
+			cpu_ptr->txd_info6.VLAN_TAG_1 = skb_vlan_tag_get(skb);
+			cpu_ptr->txd_info4.QID = skb_vlan_tag_get(skb);
+		} else {
+			cpu_ptr->txd_info4.QID = ring_no;
+			cpu_ptr->txd_info6.INSV_1 = 0;
+			cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
+		}
+	} else {
+		cpu_ptr->txd_info6.INSV_1 = 0;
+		cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
+	}
+
+	if ((ei_local->features & FE_GE2_SUPPORT) && (lan_wan_separate == 1)) {
+		if (web_sfq_enable == 1 && (skb->mark == 2)) {
+			if (gmac_no == 1)
+				cpu_ptr->txd_info4.QID = HW_SFQ_DL;
+			else
+				cpu_ptr->txd_info4.QID = HW_SFQ_UP;
+		} else if (gmac_no == 2) {
+			cpu_ptr->txd_info4.QID += 8;
+		}
+	}
+	/*debug multi tx queue */
+	init_qid = cpu_ptr->txd_info4.QID;
+	init_qid1 = cpu_ptr->txd_info4.QID;
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+	if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
+		if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
+			if (ppe_hook_rx_eth) {
+				cpu_ptr->txd_info4.FPORT = 3;	/* PPE */
+				FOE_MAGIC_TAG(skb) = 0;
+			}
+		}
+	} else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
+		if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
+			if (ppe_hook_rx_eth) {
+				cpu_ptr->txd_info4.FPORT = 3;	/* PPE */
+				FOE_MAGIC_TAG(skb) = 0;
+			}
+		}
+	}
+#endif
+
+	cpu_ptr->txd_info4.SWC = 1;
+
+	ctx_idx = get_free_txd(ei_local, ring_no);
+	cpu_ptr->txd_info2.NDP = get_phy_addr(ei_local, ctx_idx);
+	/*prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
+	 *prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
+	 *prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
+	 *prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
+	 */
+	if (len > 0) {
+		frag_txd_num = cal_frag_txd_num(len);
+		for (frag_txd_num = frag_txd_num; frag_txd_num > 0;
+		     frag_txd_num--) {
+			if (len < MAX_QTXD_LEN)
+				size = len;
+			else
+				size = MAX_QTXD_LEN;
+
+			cpu_ptr = (ei_local->txd_pool + (ctx_idx));
+			dummy_desc.txd_info1 = cpu_ptr->txd_info1;
+			dummy_desc.txd_info2 = cpu_ptr->txd_info2;
+			dummy_desc.txd_info3 = cpu_ptr->txd_info3;
+			dummy_desc.txd_info4 = cpu_ptr->txd_info4;
+			dummy_desc.txd_info5 = cpu_ptr->txd_info5;
+			dummy_desc.txd_info6 = cpu_ptr->txd_info6;
+			dummy_desc.txd_info7 = cpu_ptr->txd_info7;
+			prev_cpu_ptr = cpu_ptr;
+			cpu_ptr = &dummy_desc;
+			cpu_ptr->txd_info4.QID = init_qid;
+			cpu_ptr->txd_info4.QID = init_qid1;
+			cpu_ptr->txd_info1.SDP = offset;
+			cpu_ptr->txd_info3.SDL = size;
+			if ((nr_frags == 0) && (frag_txd_num == 1))
+				cpu_ptr->txd_info3.LS = 1;
+			else
+				cpu_ptr->txd_info3.LS = 0;
+			cpu_ptr->txd_info3.DDONE = 0;
+			cpu_ptr->txd_info4.SWC = 1;
+			if (cpu_ptr->txd_info3.LS == 1)
+				ei_local->skb_free[ctx_idx] = skb;
+			else
+				ei_local->skb_free[ctx_idx] = magic_id;
+			ctx_idx = get_free_txd(ei_local, ring_no);
+			cpu_ptr->txd_info2.NDP =
+			    get_phy_addr(ei_local, ctx_idx);
+			prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
+			prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
+			prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
+			prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
+			prev_cpu_ptr->txd_info5 = dummy_desc.txd_info5;
+			prev_cpu_ptr->txd_info6 = dummy_desc.txd_info6;
+			prev_cpu_ptr->txd_info7 = dummy_desc.txd_info7;
+			offset += size;
+			len -= size;
+		}
+	}
+
+	for (i = 0; i < nr_frags; i++) {
+		/* 1. set or get init value for current fragment */
+		offset = 0;
+		frag = &skb_shinfo(skb)->frags[i];
+		len = skb_frag_size(frag);
+		frag_txd_num = cal_frag_txd_num(len);
+		for (frag_txd_num = frag_txd_num;
+		     frag_txd_num > 0; frag_txd_num--) {
+			/* 2. size will be assigned to SDL
+			 * and can't be larger than MAX_TXD_LEN
+			 */
+			if (len < MAX_QTXD_LEN)
+				size = len;
+			else
+				size = MAX_QTXD_LEN;
+
+			/* 3. Update TXD info */
+			cpu_ptr = (ei_local->txd_pool + (ctx_idx));
+			dummy_desc.txd_info1 = cpu_ptr->txd_info1;
+			dummy_desc.txd_info2 = cpu_ptr->txd_info2;
+			dummy_desc.txd_info3 = cpu_ptr->txd_info3;
+			dummy_desc.txd_info4 = cpu_ptr->txd_info4;
+			dummy_desc.txd_info5 = cpu_ptr->txd_info5;
+			dummy_desc.txd_info6 = cpu_ptr->txd_info6;
+			dummy_desc.txd_info7 = cpu_ptr->txd_info7;
+			prev_cpu_ptr = cpu_ptr;
+			cpu_ptr = &dummy_desc;
+			cpu_ptr->txd_info4.QID = init_qid;
+			cpu_ptr->txd_info4.QID = init_qid1;
+			cpu_ptr->txd_info1.SDP = skb_frag_dma_map(&ei_local->qdma_pdev->dev, frag, offset, size, DMA_TO_DEVICE);
+			if (unlikely(dma_mapping_error
+					(&ei_local->qdma_pdev->dev,
+					 cpu_ptr->txd_info1.SDP)))
+				pr_err("[%s]dma_map_page() failed...\n",
+				       __func__);
+
+			cpu_ptr->txd_info3.SDL = size;
+
+			if ((i == (nr_frags - 1)) && (frag_txd_num == 1))
+				cpu_ptr->txd_info3.LS = 1;
+			else
+				cpu_ptr->txd_info3.LS = 0;
+			cpu_ptr->txd_info3.DDONE = 0;
+			cpu_ptr->txd_info4.SWC = 1;
+			/* 4. Update skb_free for housekeeping */
+			if (cpu_ptr->txd_info3.LS == 1)
+				ei_local->skb_free[ctx_idx] = skb;
+			else
+				ei_local->skb_free[ctx_idx] = magic_id;
+
+			/* 5. Get next TXD */
+			ctx_idx = get_free_txd(ei_local, ring_no);
+			cpu_ptr->txd_info2.NDP =
+			    get_phy_addr(ei_local, ctx_idx);
+			prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
+			prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
+			prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
+			prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
+			prev_cpu_ptr->txd_info5 = dummy_desc.txd_info5;
+			prev_cpu_ptr->txd_info6 = dummy_desc.txd_info6;
+			prev_cpu_ptr->txd_info7 = dummy_desc.txd_info7;
+			/* 6. Update offset and len. */
+			offset += size;
+			len -= size;
+		}
+	}
+
+	if (skb_shinfo(skb)->gso_segs > 1) {
+		/* TsoLenUpdate(skb->len); */
+
+		/* TCP over IPv4 */
+		iph = (struct iphdr *)skb_network_header(skb);
+		if ((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
+			th = (struct tcphdr *)skb_transport_header(skb);
+
+			init_cpu_ptr->txd_info5.TSO = 1;
+
+			th->check = htons(skb_shinfo(skb)->gso_size);
+
+			dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
+						   virt_to_phys(th),
+						   sizeof(struct
+							  tcphdr),
+						   DMA_TO_DEVICE);
+		}
+		if (ei_local->features & FE_TSO_V6) {
+			ip6h = (struct ipv6hdr *)skb_network_header(skb);
+			if ((ip6h->nexthdr == NEXTHDR_TCP) &&
+			    (ip6h->version == 6)) {
+				th = (struct tcphdr *)skb_transport_header(skb);
+				init_cpu_ptr->txd_info5.TSO = 1;
+				th->check = htons(skb_shinfo(skb)->gso_size);
+				dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
+							   virt_to_phys(th),
+							   sizeof(struct
+								  tcphdr),
+							   DMA_TO_DEVICE);
+			}
+		}
+
+		if (ei_local->features & FE_HW_SFQ) {
+			init_cpu_ptr->txd_info5.VQID0 = 1;
+			init_cpu_ptr->txd_info5.PROT = sfq_prot;
+			/* no vlan */
+			init_cpu_ptr->txd_info5.IPOFST =
+			    14 + (sfq_parse_result.vlan1_gap);
+		}
+	}
+	/* dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE); */
+
+	init_cpu_ptr->txd_info3.DDONE = 0;
+	spin_lock_irqsave(&ei_local->page_lock, flags);
+	prev_cpu_ptr = ei_local->txd_pool + ei_local->tx_cpu_idx;
+	ei_local->skb_free[ei_local->tx_cpu_idx] = magic_id;
+	ei_local->tx_cpu_idx = ctx_idx;
+	prev_cpu_ptr->txd_info1 = init_dummy_desc.txd_info1;
+	prev_cpu_ptr->txd_info2 = init_dummy_desc.txd_info2;
+	prev_cpu_ptr->txd_info4 = init_dummy_desc.txd_info4;
+	prev_cpu_ptr->txd_info3 = init_dummy_desc.txd_info3;
+	prev_cpu_ptr->txd_info5 = init_dummy_desc.txd_info5;
+	prev_cpu_ptr->txd_info6 = init_dummy_desc.txd_info6;
+	prev_cpu_ptr->txd_info7 = init_dummy_desc.txd_info7;
+
+	/* NOTE: add memory barrier to avoid
+	 * DMA access memory earlier than memory written
+	 */
+	wmb();
+	sys_reg_write(QTX_CTX_PTR,
+		      get_phy_addr(ei_local, ei_local->tx_cpu_idx));
+	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+	if (ei_local->features & FE_GE2_SUPPORT) {
+		if (gmac_no == 2) {
+			if (ei_local->pseudo_dev) {
+				p_ad = netdev_priv(ei_local->pseudo_dev);
+				p_ad->stat.tx_packets++;
+				p_ad->stat.tx_bytes += length;
+			}
+		} else {
+			ei_local->stat.tx_packets++;
+			ei_local->stat.tx_bytes += skb->len;
+		}
+	} else {
+		ei_local->stat.tx_packets++;
+		ei_local->stat.tx_bytes += skb->len;
+	}
+	if (ei_local->features & FE_INT_NAPI) {
+		if (ei_local->tx_full == 1) {
+			ei_local->tx_full = 0;
+			netif_wake_queue(dev);
+		}
+	}
+
+	return length;
+}
+
+/* QDMA functions */
+int fe_qdma_wait_dma_idle(void)
+{
+	unsigned int reg_val;
+
+	while (1) {
+		reg_val = sys_reg_read(QDMA_GLO_CFG);
+		if ((reg_val & RX_DMA_BUSY)) {
+			pr_err("\n  RX_DMA_BUSY !!! ");
+			continue;
+		}
+		if ((reg_val & TX_DMA_BUSY)) {
+			pr_err("\n  TX_DMA_BUSY !!! ");
+			continue;
+		}
+		return 0;
+	}
+
+	return -1;
+}
+
+int fe_qdma_rx_dma_init(struct net_device *dev)
+{
+	int i;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned int skb_size;
+	/* Initial QDMA RX Ring */
+
+	skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN + NET_SKB_PAD) +
+		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	ei_local->qrx_ring =
+	    dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+			       NUM_QRX_DESC * sizeof(struct PDMA_rxdesc),
+			       &ei_local->phy_qrx_ring,
+			       GFP_ATOMIC | __GFP_ZERO);
+	for (i = 0; i < NUM_QRX_DESC; i++) {
+		ei_local->netrx0_skb_data[i] =
+		    raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+		if (!ei_local->netrx0_skb_data[i]) {
+			pr_err("rx skbuff buffer allocation failed!");
+			goto no_rx_mem;
+		}
+
+		memset(&ei_local->qrx_ring[i], 0, sizeof(struct PDMA_rxdesc));
+		ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
+		ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
+		ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
+		ei_local->qrx_ring[i].rxd_info1.PDP0 =
+		    dma_map_single(&ei_local->qdma_pdev->dev,
+				   ei_local->netrx0_skb_data[i] +
+				   NET_SKB_PAD,
+				   MAX_RX_LENGTH,
+				   DMA_FROM_DEVICE);
+		if (unlikely
+		    (dma_mapping_error
+		     (&ei_local->qdma_pdev->dev,
+		      ei_local->qrx_ring[i].rxd_info1.PDP0))) {
+			pr_err("[%s]dma_map_single() failed...\n", __func__);
+			goto no_rx_mem;
+		}
+	}
+	pr_err("\nphy_qrx_ring = 0x%p, qrx_ring = 0x%p\n",
+	       (void *)ei_local->phy_qrx_ring, ei_local->qrx_ring);
+
+	/* Tell the adapter where the RX rings are located. */
+	sys_reg_write(QRX_BASE_PTR_0,
+		      phys_to_bus((u32)ei_local->phy_qrx_ring));
+	sys_reg_write(QRX_MAX_CNT_0, cpu_to_le32((u32)NUM_QRX_DESC));
+	sys_reg_write(QRX_CRX_IDX_0, cpu_to_le32((u32)(NUM_QRX_DESC - 1)));
+
+	sys_reg_write(QDMA_RST_CFG, PST_DRX_IDX0);
+	ei_local->rx_ring[0] = ei_local->qrx_ring;
+
+	return 0;
+
+no_rx_mem:
+	return -ENOMEM;
+}
+
+int fe_qdma_tx_dma_init(struct net_device *dev)
+{
+	bool pass;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	if (ei_local->features & FE_HW_SFQ)
+		sfq_init(dev);
+	/*tx desc alloc, add a NULL TXD to HW */
+	pass = qdma_tx_desc_alloc();
+	if (!pass)
+		return -1;
+
+	pass = fq_qdma_init(dev);
+	if (!pass)
+		return -1;
+
+	return 0;
+}
+
+void fe_qdma_rx_dma_deinit(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int i;
+
+	/* free RX Ring */
+	dma_free_coherent(&ei_local->qdma_pdev->dev,
+			  NUM_QRX_DESC * sizeof(struct PDMA_rxdesc),
+			  ei_local->qrx_ring, ei_local->phy_qrx_ring);
+
+	/* free RX skb */
+	for (i = 0; i < NUM_QRX_DESC; i++) {
+		raeth_free_skb_data(ei_local->netrx0_skb_data[i]);
+		ei_local->netrx0_skb_data[i] = NULL;
+	}
+}
+
+void fe_qdma_tx_dma_deinit(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int i;
+
+	/* free TX Ring */
+	if (ei_local->txd_pool)
+		dma_free_coherent(&ei_local->qdma_pdev->dev,
+				  num_tx_desc * QTXD_LEN,
+				  ei_local->txd_pool, ei_local->phy_txd_pool);
+	if (ei_local->free_head)
+		dma_free_coherent(&ei_local->qdma_pdev->dev,
+				  NUM_QDMA_PAGE * QTXD_LEN,
+				  ei_local->free_head, ei_local->phy_free_head);
+	if (ei_local->free_page_head)
+		dma_free_coherent(&ei_local->qdma_pdev->dev,
+				  NUM_QDMA_PAGE * QDMA_PAGE_SIZE,
+				  ei_local->free_page_head,
+				  ei_local->phy_free_page_head);
+
+	/* free TX data */
+	for (i = 0; i < num_tx_desc; i++) {
+		if ((ei_local->skb_free[i] != (struct sk_buff *)0xFFFFFFFF) &&
+		    (ei_local->skb_free[i] != 0))
+			dev_kfree_skb_any(ei_local->skb_free[i]);
+	}
+}
+
+void set_fe_qdma_glo_cfg(void)
+{
+	unsigned int reg_val;
+	unsigned int dma_glo_cfg = 0;
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	reg_val = sys_reg_read(QDMA_GLO_CFG);
+	reg_val &= 0x000000FF;
+
+	sys_reg_write(QDMA_GLO_CFG, reg_val);
+	reg_val = sys_reg_read(QDMA_GLO_CFG);
+
+	/* Enable randon early drop and set drop threshold automatically */
+	if (!(ei_local->features & FE_HW_SFQ))
+		sys_reg_write(QDMA_FC_THRES, 0x4444);
+	sys_reg_write(QDMA_HRED2, 0x0);
+
+	dma_glo_cfg =
+	    (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS | PDMA_DESC_32B_E);
+	dma_glo_cfg |= (RX_2B_OFFSET);
+	sys_reg_write(QDMA_GLO_CFG, dma_glo_cfg);
+
+	pr_err("Enable QDMA TX NDP coherence check and re-read mechanism\n");
+	reg_val = sys_reg_read(QDMA_GLO_CFG);
+	reg_val = reg_val | 0x400 | 0x100000;
+	sys_reg_write(QDMA_GLO_CFG, reg_val);
+	//sys_reg_write(QDMA_GLO_CFG, 0x95404575);
+	sys_reg_write(QDMA_GLO_CFG, 0x95404475);
+	pr_err("***********QDMA_GLO_CFG=%x\n", sys_reg_read(QDMA_GLO_CFG));
+}
+
+int ei_qdma_start_xmit(struct sk_buff *skb, struct net_device *dev, int gmac_no)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned int num_of_txd = 0;
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
+	skb_frag_t * frag;
+	struct PSEUDO_ADAPTER *p_ad;
+	int ring_no;
+
+	ring_no = skb->queue_mapping + (gmac_no - 1) * gmac1_txq_num;
+
+#if defined(CONFIG_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+	if (ppe_hook_tx_eth) {
+		if (ppe_hook_tx_eth(skb, gmac_no) != 1) {
+			dev_kfree_skb_any(skb);
+			return 0;
+		}
+	}
+#endif
+
+//	dev->trans_start = jiffies;	/* save the timestamp */
+	netif_trans_update(dev);
+	/*spin_lock_irqsave(&ei_local->page_lock, flags); */
+
+	/* check free_txd_num before calling rt288_eth_send() */
+
+	if (ei_local->features & FE_TSO) {
+		num_of_txd += cal_frag_txd_num(skb->len - skb->data_len);
+		if (nr_frags != 0) {
+			for (i = 0; i < nr_frags; i++) {
+				frag = &skb_shinfo(skb)->frags[i];
+				num_of_txd += cal_frag_txd_num(skb_frag_size(frag));
+			}
+		}
+	} else {
+		num_of_txd = 1;
+	}
+
+/* if ((ei_local->free_txd_num > num_of_txd + 1)) { */
+	if (likely(atomic_read(&ei_local->free_txd_num[ring_no]) >
+		   (num_of_txd + 1))) {
+		if (num_of_txd == 1)
+			rt2880_qdma_eth_send(ei_local, dev, skb,
+					     gmac_no, ring_no);
+		else
+			rt2880_qdma_eth_send_tso(ei_local, dev, skb,
+						 gmac_no, ring_no);
+	} else {
+		if (ei_local->features & FE_GE2_SUPPORT) {
+			if (gmac_no == 2) {
+				if (ei_local->pseudo_dev) {
+					p_ad =
+					    netdev_priv(ei_local->pseudo_dev);
+					p_ad->stat.tx_dropped++;
+				}
+			} else {
+				ei_local->stat.tx_dropped++;
+			}
+		} else {
+			ei_local->stat.tx_dropped++;
+		}
+		/* kfree_skb(skb); */
+		dev_kfree_skb_any(skb);
+		/* spin_unlock_irqrestore(&ei_local->page_lock, flags); */
+		return 0;
+	}
+	/* spin_unlock_irqrestore(&ei_local->page_lock, flags); */
+	return 0;
+}
+
+int ei_qdma_xmit_housekeeping(struct net_device *netdev, int budget)
+{
+	struct END_DEVICE *ei_local = netdev_priv(netdev);
+
+	dma_addr_t dma_ptr;
+	struct QDMA_txdesc *cpu_ptr = NULL;
+	dma_addr_t tmp_ptr;
+	unsigned int ctx_offset = 0;
+	unsigned int dtx_offset = 0;
+	unsigned int rls_cnt[TOTAL_TXQ_NUM] = { 0 };
+	int ring_no;
+	int i;
+
+	dma_ptr = (dma_addr_t)sys_reg_read(QTX_DRX_PTR);
+	ctx_offset = ei_local->rls_cpu_idx;
+	dtx_offset = (dma_ptr - ei_local->phy_txd_pool) / QTXD_LEN;
+	cpu_ptr = (ei_local->txd_pool + (ctx_offset));
+	while (ctx_offset != dtx_offset) {
+		/* 1. keep cpu next TXD */
+		tmp_ptr = (dma_addr_t)cpu_ptr->txd_info2.NDP;
+		ring_no = ring_no_mapping(ctx_offset);
+		rls_cnt[ring_no]++;
+		/* 2. release TXD */
+		ei_local->txd_pool_info[ei_local->free_txd_tail[ring_no]] =
+		    ctx_offset;
+		ei_local->free_txd_tail[ring_no] = ctx_offset;
+		/* atomic_add(1, &ei_local->free_txd_num[ring_no]); */
+		/* 3. update ctx_offset and free skb memory */
+		ctx_offset = (tmp_ptr - ei_local->phy_txd_pool) / QTXD_LEN;
+		if (ei_local->features & FE_TSO) {
+			if (ei_local->skb_free[ctx_offset] != magic_id) {
+				dev_kfree_skb_any(ei_local->skb_free
+						  [ctx_offset]);
+			}
+		} else {
+			dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
+		}
+		ei_local->skb_free[ctx_offset] = 0;
+		/* 4. update cpu_ptr */
+		cpu_ptr = (ei_local->txd_pool + ctx_offset);
+	}
+	for (i = 0; i < TOTAL_TXQ_NUM; i++) {
+		if (rls_cnt[i] > 0)
+			atomic_add(rls_cnt[i], &ei_local->free_txd_num[i]);
+	}
+	/* atomic_add(rls_cnt, &ei_local->free_txd_num[0]); */
+	ei_local->rls_cpu_idx = ctx_offset;
+	netif_wake_queue(netdev);
+	if (ei_local->features & FE_GE2_SUPPORT)
+		netif_wake_queue(ei_local->pseudo_dev);
+	ei_local->tx_ring_full = 0;
+	sys_reg_write(QTX_CRX_PTR,
+		      (ei_local->phy_txd_pool + (ctx_offset * QTXD_LEN)));
+
+	return 0;
+}
+
+int ei_qdma_ioctl(struct net_device *dev, struct ifreq *ifr,
+		  struct qdma_ioctl_data *data)
+{
+	int ret = 0;
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	unsigned int cmd;
+
+	cmd = data->cmd;
+
+	switch (cmd) {
+	case RAETH_QDMA_REG_READ:
+
+		if (data->off > REG_HQOS_MAX) {
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ei_local->chip_name == MT7622_FE) {	/* harry */
+			unsigned int page = 0;
+
+			/* q16~q31: 0x100 <= data->off < 0x200
+			 * q32~q47: 0x200 <= data->off < 0x300
+			 * q48~q63: 0x300 <= data->off < 0x400
+			 */
+			if (data->off >= 0x100 && data->off < 0x200) {
+				page = 1;
+				data->off = data->off - 0x100;
+			} else if (data->off >= 0x200 && data->off < 0x300) {
+				page = 2;
+				data->off = data->off - 0x200;
+			} else if (data->off >= 0x300 && data->off < 0x400) {
+				page = 3;
+				data->off = data->off - 0x300;
+			} else {
+				page = 0;
+			}
+			/*magic number for ioctl identify CR 0x1b101a14*/
+			if (data->off == 0x777) {
+				page = 0;
+				data->off = 0x214;
+			}
+
+			sys_reg_write(QDMA_PAGE, page);
+			/* pr_debug("page=%d, data->off =%x\n", page, data->off); */
+		}
+
+		data->val = sys_reg_read(QTX_CFG_0 + data->off);
+		pr_info("read reg off:%x val:%x\n", data->off, data->val);
+		ret = copy_to_user(ifr->ifr_data, data, sizeof(*data));
+		sys_reg_write(QDMA_PAGE, 0);
+		if (ret) {
+			pr_info("ret=%d\n", ret);
+			ret = -EFAULT;
+		}
+		break;
+	case RAETH_QDMA_REG_WRITE:
+
+		if (data->off > REG_HQOS_MAX) {
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ei_local->chip_name == MT7622_FE) {	/* harry */
+			unsigned int page = 0;
+			/*QoS must enable QDMA drop packet policy*/
+			sys_reg_write(QDMA_FC_THRES, 0x83834444);
+			/* q16~q31: 0x100 <= data->off < 0x200
+			 * q32~q47: 0x200 <= data->off < 0x300
+			 * q48~q63: 0x300 <= data->off < 0x400
+			 */
+			if (data->off >= 0x100 && data->off < 0x200) {
+				page = 1;
+				data->off = data->off - 0x100;
+			} else if (data->off >= 0x200 && data->off < 0x300) {
+				page = 2;
+				data->off = data->off - 0x200;
+			} else if (data->off >= 0x300 && data->off < 0x400) {
+				page = 3;
+				data->off = data->off - 0x300;
+			} else {
+				page = 0;
+			}
+			/*magic number for ioctl identify CR 0x1b101a14*/
+			if (data->off == 0x777) {
+				page = 0;
+				data->off = 0x214;
+			}
+			sys_reg_write(QDMA_PAGE, page);
+			/*pr_info("data->val =%x\n", data->val);*/
+			sys_reg_write(QTX_CFG_0 + data->off, data->val);
+			sys_reg_write(QDMA_PAGE, 0);
+		} else {
+			sys_reg_write(QTX_CFG_0 + data->off, data->val);
+		}
+		/* pr_ino("write reg off:%x val:%x\n", data->off, data->val); */
+		break;
+	case RAETH_QDMA_QUEUE_MAPPING:
+		if ((data->off & 0x100) == 0x100) {
+			lan_wan_separate = 1;
+			data->off &= 0xff;
+		} else {
+			lan_wan_separate = 0;
+			data->off &= 0xff;
+		}
+		M2Q_table[data->off] = data->val;
+		break;
+	case RAETH_QDMA_SFQ_WEB_ENABLE:
+		if (ei_local->features & FE_HW_SFQ) {
+			if ((data->val) == 0x1)
+				web_sfq_enable = 1;
+			else
+				web_sfq_enable = 0;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		ret = 1;
+		break;
+	}
+
+	return ret;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.h
new file mode 100644
index 0000000..ce1af4d
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.h
@@ -0,0 +1,20 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RAETHER_QDMA_H
+#define RAETHER_QDMA_H
+
+extern struct net_device *dev_raether;
+void set_fe_dma_glo_cfg(void);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.c
new file mode 100644
index 0000000..972c4e0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.c
@@ -0,0 +1,1222 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "raether_rss.h"
+#include "raether_hwlro.h"
+#include "ra_mac.h"
+
+static struct proc_dir_entry *proc_rss_ring1, *proc_rss_ring2, *proc_rss_ring3;
+
+int fe_rss_4ring_init(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int skb_size;
+	int i, j;
+
+	skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN) +
+		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	/* Initial RX Ring 1 ~ 3 */
+	for (i = 1; i < MAX_RX_RING_NUM; i++) {
+		ei_local->rx_ring[i] =
+			dma_alloc_coherent(dev->dev.parent,
+					   NUM_RSS_RX_DESC *
+					   sizeof(struct PDMA_rxdesc),
+					   &ei_local->phy_rx_ring[i],
+					   GFP_ATOMIC | __GFP_ZERO);
+		for (j = 0; j < NUM_RSS_RX_DESC; j++) {
+			ei_local->netrx_skb_data[i][j] =
+				raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+
+			if (!ei_local->netrx_skb_data[i][j]) {
+				pr_info("rx skbuff buffer allocation failed!\n");
+				goto no_rx_mem;
+			}
+
+			memset(&ei_local->rx_ring[i][j], 0,
+			       sizeof(struct PDMA_rxdesc));
+			ei_local->rx_ring[i][j].rxd_info2.DDONE_bit = 0;
+			ei_local->rx_ring[i][j].rxd_info2.LS0 = 0;
+			ei_local->rx_ring[i][j].rxd_info2.PLEN0 =
+			    SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
+			ei_local->rx_ring[i][j].rxd_info1.PDP0 =
+			    dma_map_single(dev->dev.parent,
+					   ei_local->netrx_skb_data[i][j] +
+					   NET_SKB_PAD,
+					   MAX_RX_LENGTH, DMA_FROM_DEVICE);
+			if (unlikely
+			    (dma_mapping_error
+			     (dev->dev.parent,
+			      ei_local->rx_ring[i][j].rxd_info1.PDP0))) {
+				pr_info("[%s]dma_map_single() failed...\n",
+					__func__);
+				goto no_rx_mem;
+			}
+		}
+		pr_info("\nphy_rx_ring[%d] = 0x%08x, rx_ring[%d] = 0x%p\n",
+			i, (unsigned int)ei_local->phy_rx_ring[i],
+			i, (void __iomem *)ei_local->rx_ring[i]);
+	}
+
+	sys_reg_write(RX_BASE_PTR3, phys_to_bus((u32)ei_local->phy_rx_ring[3]));
+	sys_reg_write(RX_MAX_CNT3, cpu_to_le32((u32)NUM_RSS_RX_DESC));
+	sys_reg_write(RX_CALC_IDX3, cpu_to_le32((u32)(NUM_RSS_RX_DESC - 1)));
+	sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX3);
+	sys_reg_write(RX_BASE_PTR2, phys_to_bus((u32)ei_local->phy_rx_ring[2]));
+	sys_reg_write(RX_MAX_CNT2, cpu_to_le32((u32)NUM_RSS_RX_DESC));
+	sys_reg_write(RX_CALC_IDX2, cpu_to_le32((u32)(NUM_RSS_RX_DESC - 1)));
+	sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX2);
+	sys_reg_write(RX_BASE_PTR1, phys_to_bus((u32)ei_local->phy_rx_ring[1]));
+	sys_reg_write(RX_MAX_CNT1, cpu_to_le32((u32)NUM_RSS_RX_DESC));
+	sys_reg_write(RX_CALC_IDX1, cpu_to_le32((u32)(NUM_RSS_RX_DESC - 1)));
+	sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX1);
+
+	/* 1. Set RX ring1~3 to pse modes */
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_PSE_MODE);
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_PSE_MODE);
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_PSE_MODE);
+
+	/* 2. Enable non-lro multiple rx */
+	SET_PDMA_NON_LRO_MULTI_EN(1);  /* MRX EN */
+
+	/*Hash Type*/
+	SET_PDMA_RSS_IPV4_TYPE(7);
+	SET_PDMA_RSS_IPV6_TYPE(7);
+	/* 3. Select the size of indirection table */
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW0, 0x39393939);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW1, 0x93939393);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW2, 0x39399393);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW3, 0x93933939);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW4, 0x39393939);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW5, 0x93939393);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW6, 0x39399393);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW7, 0x93933939);
+	/* 4. Pause */
+	SET_PDMA_RSS_CFG_REQ(1);
+
+	/* 5. Enable RSS */
+	SET_PDMA_RSS_EN(1);
+
+	/* 6. Release pause */
+	SET_PDMA_RSS_CFG_REQ(0);
+
+	return 0;
+
+no_rx_mem:
+	return -ENOMEM;
+}
+
+void fe_rss_4ring_deinit(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int i, j;
+
+	for (i = 1; i < MAX_RX_RING_NUM; i++) {
+		/* free RX Ring */
+		dma_free_coherent(dev->dev.parent,
+				  NUM_RSS_RX_DESC * sizeof(struct PDMA_rxdesc),
+				  ei_local->rx_ring[i],
+				  ei_local->phy_rx_ring[i]);
+		/* free RX data */
+		for (j = 0; j < NUM_RSS_RX_DESC; j++) {
+			raeth_free_skb_data(ei_local->netrx_skb_data[i][j]);
+			ei_local->netrx_skb_data[i][j] = NULL;
+		}
+	}
+}
+
+int fe_rss_2ring_init(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int skb_size;
+	int i, j;
+
+	skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN) +
+		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	for (i = 1; i < MAX_RX_RING_NUM_2RING; i++) {
+		ei_local->rx_ring[i] =
+			dma_alloc_coherent(dev->dev.parent,
+					   NUM_RSS_RX_DESC *
+					   sizeof(struct PDMA_rxdesc),
+					   &ei_local->phy_rx_ring[i],
+					   GFP_ATOMIC | __GFP_ZERO);
+		for (j = 0; j < NUM_RSS_RX_DESC; j++) {
+			ei_local->netrx_skb_data[i][j] =
+				raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+
+			if (!ei_local->netrx_skb_data[i][j]) {
+				pr_info("rx skbuff buffer allocation failed!\n");
+				goto no_rx_mem;
+			}
+
+			memset(&ei_local->rx_ring[i][j], 0,
+			       sizeof(struct PDMA_rxdesc));
+			ei_local->rx_ring[i][j].rxd_info2.DDONE_bit = 0;
+			ei_local->rx_ring[i][j].rxd_info2.LS0 = 0;
+			ei_local->rx_ring[i][j].rxd_info2.PLEN0 =
+			    SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
+			ei_local->rx_ring[i][j].rxd_info1.PDP0 =
+			    dma_map_single(dev->dev.parent,
+					   ei_local->netrx_skb_data[i][j] +
+					   NET_SKB_PAD,
+					   MAX_RX_LENGTH, DMA_FROM_DEVICE);
+			if (unlikely
+			    (dma_mapping_error
+			     (dev->dev.parent,
+			      ei_local->rx_ring[i][j].rxd_info1.PDP0))) {
+				pr_info("[%s]dma_map_single() failed...\n",
+					__func__);
+				goto no_rx_mem;
+			}
+		}
+		pr_info("\nphy_rx_ring[%d] = 0x%08x, rx_ring[%d] = 0x%p\n",
+			i, (unsigned int)ei_local->phy_rx_ring[i],
+			i, (void __iomem *)ei_local->rx_ring[i]);
+	}
+
+	sys_reg_write(RX_BASE_PTR1, phys_to_bus((u32)ei_local->phy_rx_ring[1]));
+	sys_reg_write(RX_MAX_CNT1, cpu_to_le32((u32)NUM_RSS_RX_DESC));
+	sys_reg_write(RX_CALC_IDX1, cpu_to_le32((u32)(NUM_RSS_RX_DESC - 1)));
+	sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX1);
+
+	/* 1. Set RX ring1~3 to pse modes */
+	SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_PSE_MODE);
+
+	/* 2. Enable non-lro multiple rx */
+	SET_PDMA_NON_LRO_MULTI_EN(1);  /* MRX EN */
+
+	/*Hash Type*/
+	SET_PDMA_RSS_IPV4_TYPE(7);
+	SET_PDMA_RSS_IPV6_TYPE(7);
+	/* 3. Select the size of indirection table */
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW0, 0x44444444);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW1, 0x44444444);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW2, 0x44444444);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW3, 0x44444444);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW4, 0x44444444);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW5, 0x44444444);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW6, 0x44444444);
+	SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW7, 0x44444444);
+	/* 4. Pause */
+	SET_PDMA_RSS_CFG_REQ(1);
+
+	/* 5. Enable RSS */
+	SET_PDMA_RSS_EN(1);
+
+	/* 6. Release pause */
+	SET_PDMA_RSS_CFG_REQ(0);
+
+	return 0;
+
+no_rx_mem:
+	return -ENOMEM;
+}
+
+void fe_rss_2ring_deinit(struct net_device *dev)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	int i, j;
+
+	for (i = 1; i < MAX_RX_RING_NUM_2RING; i++) {
+		/* free RX Ring */
+		dma_free_coherent(dev->dev.parent,
+				  NUM_RSS_RX_DESC * sizeof(struct PDMA_rxdesc),
+				  ei_local->rx_ring[i],
+				  ei_local->phy_rx_ring[i]);
+		/* free RX data */
+		for (j = 0; j < NUM_RSS_RX_DESC; j++) {
+			raeth_free_skb_data(ei_local->netrx_skb_data[i][j]);
+			ei_local->netrx_skb_data[i][j] = NULL;
+		}
+	}
+}
+
+static inline void hw_rss_rx_desc_init(struct END_DEVICE *ei_local,
+				       struct PDMA_rxdesc *rx_ring,
+				       unsigned int rx_ring_no,
+				       dma_addr_t dma_addr)
+{
+	rx_ring->rxd_info2.PLEN0 = MAX_RX_LENGTH;
+	rx_ring->rxd_info1.PDP0 = dma_addr;
+	rx_ring->rxd_info2.LS0 = 0;
+	rx_ring->rxd_info2.DDONE_bit = 0;
+}
+
+static inline void __iomem *get_rx_cal_idx_reg(unsigned int rx_ring_no)
+{
+	return (void __iomem *)(RAETH_RX_CALC_IDX0 + (rx_ring_no << 4));
+}
+
+int fe_rss0_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+	struct sk_buff *rx_skb;
+	struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+	void *rx_data, *rx_data_next, *new_data;
+	unsigned int length = 0;
+	unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+	unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+	unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+	unsigned int skb_size, map_size;
+	/* void __iomem *rx_calc_idx_reg; */
+	int rx_processed = 0;
+
+	/* get cpu owner indexes of rx rings */
+	rx_dma_owner_lro[0] = (ei_local->rx_calc_idx[0] + 1) % num_rx_desc;
+
+	rx_ring_no =  0;
+	rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+	rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+	rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+	/* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+
+	for (;;) {
+		dma_addr_t dma_addr;
+
+		if ((rx_processed++ > budget) ||
+		    (rx_ring->rxd_info2.DDONE_bit == 0))
+			break;
+
+		/* prefetch the next handling RXD */
+
+		rx_dma_owner_lro[rx_ring_no] =
+				(rx_dma_owner_idx + 1) % num_rx_desc;
+		skb_size =
+			   SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+					  NET_SKB_PAD) +
+			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		map_size = MAX_RX_LENGTH;
+
+		/* rx_ring_no_next =  get_rss_rx_ring(ei_local, rx_dma_owner_lro, group); */
+		rx_ring_no_next =  rx_ring_no;
+		rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+
+		rx_ring_next =
+			&ei_local->rx_ring
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		rx_data_next =
+			ei_local->netrx_skb_data
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		prefetch(rx_ring_next);
+
+		/* We have to check the free memory size is big enough
+		 * before pass the packet to cpu
+		 */
+		new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+		if (unlikely(!new_data)) {
+			pr_info("skb not available...\n");
+			goto skb_err;
+		}
+
+		dma_addr = dma_map_single(dev->dev.parent,
+					  new_data + NET_SKB_PAD,
+					  map_size,
+					  DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+			pr_info("[%s]dma_map_single() failed...\n", __func__);
+			raeth_free_skb_data(new_data);
+			goto skb_err;
+		}
+
+		rx_skb = raeth_build_skb(rx_data, skb_size);
+
+		if (unlikely(!rx_skb)) {
+			put_page(virt_to_head_page(rx_data));
+			pr_info("build_skb failed\n");
+			goto skb_err;
+		}
+		skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+		length = rx_ring->rxd_info2.PLEN0;
+		dma_unmap_single(dev->dev.parent,
+				 rx_ring->rxd_info1.PDP0,
+				 length, DMA_FROM_DEVICE);
+
+		prefetch(rx_skb->data);
+
+		/* skb processing */
+		skb_put(rx_skb, length);
+
+		/* rx packet from GE2 */
+		if (rx_ring->rxd_info4.SP == 2) {
+			if (ei_local->pseudo_dev) {
+				rx_skb->dev = ei_local->pseudo_dev;
+				rx_skb->protocol =
+				    eth_type_trans(rx_skb,
+						   ei_local->pseudo_dev);
+			} else {
+				pr_info
+				    ("pseudo_dev is still not initialize ");
+				pr_info
+				    ("but receive packet from GMAC2\n");
+			}
+		} else {
+			rx_skb->dev = dev;
+			rx_skb->protocol = eth_type_trans(rx_skb, dev);
+		}
+
+		/* rx checksum offload */
+		if (likely(rx_ring->rxd_info4.L4VLD))
+			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if (ppe_hook_rx_eth) {
+			if (IS_SPACE_AVAILABLE_HEAD(rx_skb)) {
+				*(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+					*(uint32_t *)&rx_ring->rxd_info4;
+				FOE_ALG_HEAD(rx_skb) = 0;
+				FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+				FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+			}
+			if (IS_SPACE_AVAILABLE_TAIL(rx_skb)) {
+				*(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+					*(uint32_t *)&rx_ring->rxd_info4;
+				FOE_ALG_TAIL(rx_skb) = 0;
+				FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+				FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+			}
+		}
+#endif
+		if (ei_local->features & FE_HW_VLAN_RX) {
+			if (rx_ring->rxd_info2.TAG)
+				__vlan_hwaccel_put_tag(rx_skb,
+						       htons(ETH_P_8021Q),
+						       rx_ring->rxd_info3.VID);
+		}
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if ((!ppe_hook_rx_eth) ||
+		    (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+			if (ei_local->features & FE_INT_NAPI) {
+			/* napi_gro_receive(napi, rx_skb); */
+				netif_receive_skb(rx_skb);
+			} else {
+				netif_rx(rx_skb);
+			}
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		}
+#endif
+
+		if (rx_ring->rxd_info4.SP == 2) {
+			p_ad->stat.rx_packets++;
+			p_ad->stat.rx_bytes += length;
+		} else {
+			ei_local->stat.rx_packets++;
+			ei_local->stat.rx_bytes += length;
+		}
+
+		/* Init RX desc. */
+		hw_rss_rx_desc_init(ei_local,
+				    rx_ring,
+				    rx_ring_no,
+				    dma_addr);
+		ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+			new_data;
+
+		/* make sure that all changes to the dma ring are flushed before
+		  * we continue
+		  */
+		wmb();
+		sys_reg_write(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
+		ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+		/* use prefetched variable */
+		rx_dma_owner_idx = rx_dma_owner_idx_next;
+		rx_ring_no = rx_ring_no_next;
+		rx_ring = rx_ring_next;
+		rx_data = rx_data_next;
+		/* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+	}	/* for */
+
+	return rx_processed;
+
+skb_err:
+	/* rx packet from GE2 */
+	if (rx_ring->rxd_info4.SP == 2)
+		p_ad->stat.rx_dropped++;
+	else
+		ei_local->stat.rx_dropped++;
+
+	/* Discard the rx packet */
+	hw_rss_rx_desc_init(ei_local,
+			    rx_ring,
+			    rx_ring_no,
+			    rx_ring->rxd_info1.PDP0);
+	sys_reg_write(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
+	ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+	return (budget + 1);
+}
+
+int fe_rss1_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+	struct sk_buff *rx_skb;
+	struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+	void *rx_data, *rx_data_next, *new_data;
+	unsigned int length = 0;
+	unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+	unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+	unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+	unsigned int skb_size, map_size;
+	/* void __iomem *rx_calc_idx_reg; */
+	int rx_processed = 0;
+
+	/* get cpu owner indexes of rx rings */
+	rx_dma_owner_lro[1] = (ei_local->rx_calc_idx[1] + 1) % NUM_RSS_RX_DESC;
+
+	rx_ring_no = 1;
+	rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+	rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+	rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+	/* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+
+	for (;;) {
+		dma_addr_t dma_addr;
+
+		if ((rx_processed++ > budget) ||
+		    (rx_ring->rxd_info2.DDONE_bit == 0))
+			break;
+
+		/* prefetch the next handling RXD */
+
+		rx_dma_owner_lro[rx_ring_no] =
+				(rx_dma_owner_idx + 1) % NUM_RSS_RX_DESC;
+		skb_size =
+			   SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+					  NET_SKB_PAD) +
+			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		map_size = MAX_RX_LENGTH;
+
+		/* rx_ring_no_next =  get_rss_rx_ring(ei_local, rx_dma_owner_lro, group); */
+		rx_ring_no_next =  rx_ring_no;
+		rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+
+		rx_ring_next =
+			&ei_local->rx_ring
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		rx_data_next =
+			ei_local->netrx_skb_data
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		prefetch(rx_ring_next);
+
+		/* We have to check the free memory size is big enough
+		 * before pass the packet to cpu
+		 */
+		new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+		if (unlikely(!new_data)) {
+			pr_info("skb not available...\n");
+			goto skb_err;
+		}
+
+		dma_addr = dma_map_single(dev->dev.parent,
+					  new_data + NET_SKB_PAD,
+					  map_size,
+					  DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+			pr_info("[%s]dma_map_single() failed...\n", __func__);
+			raeth_free_skb_data(new_data);
+			goto skb_err;
+		}
+
+		rx_skb = raeth_build_skb(rx_data, skb_size);
+
+		if (unlikely(!rx_skb)) {
+			put_page(virt_to_head_page(rx_data));
+			pr_info("build_skb failed\n");
+			goto skb_err;
+		}
+		skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+		length = rx_ring->rxd_info2.PLEN0;
+		dma_unmap_single(dev->dev.parent,
+				 rx_ring->rxd_info1.PDP0,
+				 length, DMA_FROM_DEVICE);
+
+		prefetch(rx_skb->data);
+
+		/* skb processing */
+		skb_put(rx_skb, length);
+
+		/* rx packet from GE2 */
+		if (rx_ring->rxd_info4.SP == 2) {
+			if (ei_local->pseudo_dev) {
+				rx_skb->dev = ei_local->pseudo_dev;
+				rx_skb->protocol =
+				    eth_type_trans(rx_skb,
+						   ei_local->pseudo_dev);
+			} else {
+				pr_info
+				    ("pseudo_dev is still not initialize ");
+				pr_info
+				    ("but receive packet from GMAC2\n");
+			}
+		} else {
+			rx_skb->dev = dev;
+			rx_skb->protocol = eth_type_trans(rx_skb, dev);
+		}
+
+		/* rx checksum offload */
+		if (likely(rx_ring->rxd_info4.L4VLD))
+			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if (ppe_hook_rx_eth) {
+			*(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+				*(uint32_t *)&rx_ring->rxd_info4;
+			*(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+				*(uint32_t *)&rx_ring->rxd_info4;
+			FOE_ALG_HEAD(rx_skb) = 0;
+			FOE_ALG_TAIL(rx_skb) = 0;
+			FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+			FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+			FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+			FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+		}
+#endif
+		if (ei_local->features & FE_HW_VLAN_RX) {
+			if (rx_ring->rxd_info2.TAG)
+				__vlan_hwaccel_put_tag(rx_skb,
+						       htons(ETH_P_8021Q),
+						       rx_ring->rxd_info3.VID);
+		}
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if ((!ppe_hook_rx_eth) ||
+		    (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+			if (ei_local->features & FE_INT_NAPI) {
+			/* napi_gro_receive(napi, rx_skb); */
+				netif_receive_skb(rx_skb);
+			} else {
+				netif_rx(rx_skb);
+			}
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		}
+#endif
+
+		if (rx_ring->rxd_info4.SP == 2) {
+			p_ad->stat.rx_packets++;
+			p_ad->stat.rx_bytes += length;
+		} else {
+			ei_local->stat.rx_packets++;
+			ei_local->stat.rx_bytes += length;
+		}
+
+		/* Init RX desc. */
+		hw_rss_rx_desc_init(ei_local,
+				    rx_ring,
+				    rx_ring_no,
+				    dma_addr);
+		ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+			new_data;
+
+		/* make sure that all changes to the dma ring are flushed before
+		  * we continue
+		  */
+		wmb();
+		sys_reg_write(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
+		ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+		/* use prefetched variable */
+		rx_dma_owner_idx = rx_dma_owner_idx_next;
+		rx_ring_no = rx_ring_no_next;
+		rx_ring = rx_ring_next;
+		rx_data = rx_data_next;
+		/* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+	}	/* for */
+
+	return rx_processed;
+
+skb_err:
+	/* rx packet from GE2 */
+	if (rx_ring->rxd_info4.SP == 2)
+		p_ad->stat.rx_dropped++;
+	else
+		ei_local->stat.rx_dropped++;
+
+	/* Discard the rx packet */
+	hw_rss_rx_desc_init(ei_local,
+			    rx_ring,
+			    rx_ring_no,
+			    rx_ring->rxd_info1.PDP0);
+	sys_reg_write(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
+	ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+	return (budget + 1);
+}
+
+int fe_rss2_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+	struct sk_buff *rx_skb;
+	struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+	void *rx_data, *rx_data_next, *new_data;
+	unsigned int length = 0;
+	unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+	unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+	unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+	unsigned int skb_size, map_size;
+	/* void __iomem *rx_calc_idx_reg; */
+	int rx_processed = 0;
+
+	/* get cpu owner indexes of rx rings */
+	rx_dma_owner_lro[2] = (ei_local->rx_calc_idx[2] + 1) % NUM_RSS_RX_DESC;
+
+	rx_ring_no =  2;
+	rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+	rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+	rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+	/* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+
+	for (;;) {
+		dma_addr_t dma_addr;
+
+		if ((rx_processed++ > budget) ||
+		    (rx_ring->rxd_info2.DDONE_bit == 0))
+			break;
+
+		/* prefetch the next handling RXD */
+
+		rx_dma_owner_lro[rx_ring_no] =
+				(rx_dma_owner_idx + 1) % NUM_RSS_RX_DESC;
+		skb_size =
+			   SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+					  NET_SKB_PAD) +
+			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		map_size = MAX_RX_LENGTH;
+
+		/* rx_ring_no_next =  get_rss_rx_ring(ei_local, rx_dma_owner_lro, group); */
+		rx_ring_no_next =  rx_ring_no;
+		rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+
+		rx_ring_next =
+			&ei_local->rx_ring
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		rx_data_next =
+			ei_local->netrx_skb_data
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		prefetch(rx_ring_next);
+
+		/* We have to check the free memory size is big enough
+		 * before pass the packet to cpu
+		 */
+		new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+		if (unlikely(!new_data)) {
+			pr_info("skb not available...\n");
+			goto skb_err;
+		}
+
+		dma_addr = dma_map_single(dev->dev.parent,
+					  new_data + NET_SKB_PAD,
+					  map_size,
+					  DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+			pr_info("[%s]dma_map_single() failed...\n", __func__);
+			raeth_free_skb_data(new_data);
+			goto skb_err;
+		}
+
+		rx_skb = raeth_build_skb(rx_data, skb_size);
+
+		if (unlikely(!rx_skb)) {
+			put_page(virt_to_head_page(rx_data));
+			pr_info("build_skb failed\n");
+			goto skb_err;
+		}
+		skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+		length = rx_ring->rxd_info2.PLEN0;
+		dma_unmap_single(dev->dev.parent,
+				 rx_ring->rxd_info1.PDP0,
+				 length, DMA_FROM_DEVICE);
+
+		prefetch(rx_skb->data);
+
+		/* skb processing */
+		skb_put(rx_skb, length);
+
+		/* rx packet from GE2 */
+		if (rx_ring->rxd_info4.SP == 2) {
+			if (ei_local->pseudo_dev) {
+				rx_skb->dev = ei_local->pseudo_dev;
+				rx_skb->protocol =
+				    eth_type_trans(rx_skb,
+						   ei_local->pseudo_dev);
+			} else {
+				pr_info
+				    ("pseudo_dev is still not initialize ");
+				pr_info
+				    ("but receive packet from GMAC2\n");
+			}
+		} else {
+			rx_skb->dev = dev;
+			rx_skb->protocol = eth_type_trans(rx_skb, dev);
+		}
+
+		/* rx checksum offload */
+		if (likely(rx_ring->rxd_info4.L4VLD))
+			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if (ppe_hook_rx_eth) {
+			*(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+				*(uint32_t *)&rx_ring->rxd_info4;
+			*(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+				*(uint32_t *)&rx_ring->rxd_info4;
+			FOE_ALG_HEAD(rx_skb) = 0;
+			FOE_ALG_TAIL(rx_skb) = 0;
+			FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+			FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+			FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+			FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+		}
+#endif
+		if (ei_local->features & FE_HW_VLAN_RX) {
+			if (rx_ring->rxd_info2.TAG)
+				__vlan_hwaccel_put_tag(rx_skb,
+						       htons(ETH_P_8021Q),
+						       rx_ring->rxd_info3.VID);
+		}
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if ((!ppe_hook_rx_eth) ||
+		    (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+			if (ei_local->features & FE_INT_NAPI) {
+			/* napi_gro_receive(napi, rx_skb); */
+				netif_receive_skb(rx_skb);
+			} else {
+				netif_rx(rx_skb);
+			}
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		}
+#endif
+
+		if (rx_ring->rxd_info4.SP == 2) {
+			p_ad->stat.rx_packets++;
+			p_ad->stat.rx_bytes += length;
+		} else {
+			ei_local->stat.rx_packets++;
+			ei_local->stat.rx_bytes += length;
+		}
+
+		/* Init RX desc. */
+		hw_rss_rx_desc_init(ei_local,
+				    rx_ring,
+				    rx_ring_no,
+				    dma_addr);
+		ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+			new_data;
+
+		/* make sure that all changes to the dma ring are flushed before
+		  * we continue
+		  */
+		wmb();
+
+		sys_reg_write(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
+		ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+		/* use prefetched variable */
+		rx_dma_owner_idx = rx_dma_owner_idx_next;
+		rx_ring_no = rx_ring_no_next;
+		rx_ring = rx_ring_next;
+		rx_data = rx_data_next;
+		/* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+	}	/* for */
+
+	return rx_processed;
+
+skb_err:
+	/* rx packet from GE2 */
+	if (rx_ring->rxd_info4.SP == 2)
+		p_ad->stat.rx_dropped++;
+	else
+		ei_local->stat.rx_dropped++;
+
+	/* Discard the rx packet */
+	hw_rss_rx_desc_init(ei_local,
+			    rx_ring,
+			    rx_ring_no,
+			    rx_ring->rxd_info1.PDP0);
+	sys_reg_write(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
+	ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+	return (budget + 1);
+}
+
+int fe_rss3_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev);
+	struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+	struct sk_buff *rx_skb;
+	struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+	void *rx_data, *rx_data_next, *new_data;
+	unsigned int length = 0;
+	unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+	unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+	unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+	unsigned int skb_size, map_size;
+	/* void __iomem *rx_calc_idx_reg; */
+	int rx_processed = 0;
+
+	/* get cpu owner indexes of rx rings */
+	rx_dma_owner_lro[3] = (ei_local->rx_calc_idx[3] + 1) % NUM_RSS_RX_DESC;
+	rx_ring_no =  3;
+	rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+	rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+	rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+	/* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+
+	for (;;) {
+		dma_addr_t dma_addr;
+
+		if ((rx_processed++ > budget) ||
+		    (rx_ring->rxd_info2.DDONE_bit == 0))
+			break;
+
+		/* prefetch the next handling RXD */
+
+		rx_dma_owner_lro[rx_ring_no] =
+				(rx_dma_owner_idx + 1) % NUM_RSS_RX_DESC;
+		skb_size =
+			   SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+					  NET_SKB_PAD) +
+			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		map_size = MAX_RX_LENGTH;
+
+		/* rx_ring_no_next =  get_rss_rx_ring(ei_local, rx_dma_owner_lro, group); */
+		rx_ring_no_next =  rx_ring_no;
+		rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+
+		rx_ring_next =
+			&ei_local->rx_ring
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		rx_data_next =
+			ei_local->netrx_skb_data
+				[rx_ring_no_next][rx_dma_owner_idx_next];
+		prefetch(rx_ring_next);
+
+		/* We have to check the free memory size is big enough
+		 * before pass the packet to cpu
+		 */
+		new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+		if (unlikely(!new_data)) {
+			pr_info("skb not available...\n");
+			goto skb_err;
+		}
+
+		dma_addr = dma_map_single(dev->dev.parent,
+					  new_data + NET_SKB_PAD,
+					  map_size,
+					  DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+			pr_info("[%s]dma_map_single() failed...\n", __func__);
+			raeth_free_skb_data(new_data);
+			goto skb_err;
+		}
+
+		rx_skb = raeth_build_skb(rx_data, skb_size);
+
+		if (unlikely(!rx_skb)) {
+			put_page(virt_to_head_page(rx_data));
+			pr_info("build_skb failed\n");
+			goto skb_err;
+		}
+		skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+		length = rx_ring->rxd_info2.PLEN0;
+		dma_unmap_single(dev->dev.parent,
+				 rx_ring->rxd_info1.PDP0,
+				 length, DMA_FROM_DEVICE);
+
+		prefetch(rx_skb->data);
+
+		/* skb processing */
+		skb_put(rx_skb, length);
+
+		/* rx packet from GE2 */
+		if (rx_ring->rxd_info4.SP == 2) {
+			if (ei_local->pseudo_dev) {
+				rx_skb->dev = ei_local->pseudo_dev;
+				rx_skb->protocol =
+				    eth_type_trans(rx_skb,
+						   ei_local->pseudo_dev);
+			} else {
+				pr_info
+				    ("pseudo_dev is still not initialize ");
+				pr_info
+				    ("but receive packet from GMAC2\n");
+			}
+		} else {
+			rx_skb->dev = dev;
+			rx_skb->protocol = eth_type_trans(rx_skb, dev);
+		}
+
+		/* rx checksum offload */
+		if (likely(rx_ring->rxd_info4.L4VLD))
+			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if (ppe_hook_rx_eth) {
+			*(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+				*(uint32_t *)&rx_ring->rxd_info4;
+			*(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+				*(uint32_t *)&rx_ring->rxd_info4;
+			FOE_ALG_HEAD(rx_skb) = 0;
+			FOE_ALG_TAIL(rx_skb) = 0;
+			FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+			FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+			FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+			FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+		}
+#endif
+		if (ei_local->features & FE_HW_VLAN_RX) {
+			if (rx_ring->rxd_info2.TAG)
+				__vlan_hwaccel_put_tag(rx_skb,
+						       htons(ETH_P_8021Q),
+						       rx_ring->rxd_info3.VID);
+		}
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		if ((!ppe_hook_rx_eth) ||
+		    (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+			if (ei_local->features & FE_INT_NAPI) {
+			/* napi_gro_receive(napi, rx_skb); */
+				netif_receive_skb(rx_skb);
+			} else {
+				netif_rx(rx_skb);
+			}
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+		}
+#endif
+
+		if (rx_ring->rxd_info4.SP == 2) {
+			p_ad->stat.rx_packets++;
+			p_ad->stat.rx_bytes += length;
+		} else {
+			ei_local->stat.rx_packets++;
+			ei_local->stat.rx_bytes += length;
+		}
+
+		/* Init RX desc. */
+		hw_rss_rx_desc_init(ei_local,
+				    rx_ring,
+				    rx_ring_no,
+				    dma_addr);
+		ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+			new_data;
+
+		/* make sure that all changes to the dma ring are flushed before
+		  * we continue
+		  */
+		wmb();
+
+		sys_reg_write(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
+		ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+		/* use prefetched variable */
+		rx_dma_owner_idx = rx_dma_owner_idx_next;
+		rx_ring_no = rx_ring_no_next;
+		rx_ring = rx_ring_next;
+		rx_data = rx_data_next;
+		/* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+	}	/* for */
+
+	return rx_processed;
+
+skb_err:
+	/* rx packet from GE2 */
+	if (rx_ring->rxd_info4.SP == 2)
+		p_ad->stat.rx_dropped++;
+	else
+		ei_local->stat.rx_dropped++;
+
+	/* Discard the rx packet */
+	hw_rss_rx_desc_init(ei_local,
+			    rx_ring,
+			    rx_ring_no,
+			    rx_ring->rxd_info1.PDP0);
+	sys_reg_write(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
+	ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+	return (budget + 1);
+}
+
+int rx_rss_ring_read(struct seq_file *seq, void *v,
+		     struct PDMA_rxdesc *rx_ring_p)
+{
+	struct PDMA_rxdesc *rx_ring;
+	int i = 0;
+
+	rx_ring =
+	    kmalloc(sizeof(struct PDMA_rxdesc) * NUM_RSS_RX_DESC, GFP_KERNEL);
+	if (!rx_ring) {
+		seq_puts(seq, " allocate temp rx_ring fail.\n");
+		return 0;
+	}
+
+	for (i = 0; i < NUM_RSS_RX_DESC; i++)
+		memcpy(&rx_ring[i], &rx_ring_p[i], sizeof(struct PDMA_rxdesc));
+
+	for (i = 0; i < NUM_RSS_RX_DESC; i++) {
+		seq_printf(seq, "%d: %08x %08x %08x %08x\n", i,
+			   *(int *)&rx_ring[i].rxd_info1,
+			   *(int *)&rx_ring[i].rxd_info2,
+			   *(int *)&rx_ring[i].rxd_info3,
+			   *(int *)&rx_ring[i].rxd_info4);
+	}
+
+	kfree(rx_ring);
+	return 0;
+}
+
+int rss_ring1_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	rx_rss_ring_read(seq, v, ei_local->rx_ring[1]);
+
+	return 0;
+}
+
+int rss_ring2_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	rx_rss_ring_read(seq, v, ei_local->rx_ring[2]);
+
+	return 0;
+}
+
+int rss_ring3_read(struct seq_file *seq, void *v)
+{
+	struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+	rx_rss_ring_read(seq, v, ei_local->rx_ring[3]);
+
+	return 0;
+}
+
+static int rx_ring1_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rss_ring1_read, NULL);
+}
+
+static int rx_ring2_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rss_ring2_read, NULL);
+}
+
+static int rx_ring3_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rss_ring3_read, NULL);
+}
+
+static const struct file_operations rss_ring1_fops = {
+	.owner = THIS_MODULE,
+	.open = rx_ring1_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static const struct file_operations rss_ring2_fops = {
+	.owner = THIS_MODULE,
+	.open = rx_ring2_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static const struct file_operations rss_ring3_fops = {
+	.owner = THIS_MODULE,
+	.open = rx_ring3_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+int rss_debug_proc_init(struct proc_dir_entry *proc_reg_dir)
+{
+	proc_rss_ring1 =
+	     proc_create(PROCREG_RXRING1, 0, proc_reg_dir, &rss_ring1_fops);
+	if (!proc_rss_ring1)
+		pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING1);
+
+	proc_rss_ring2 =
+	     proc_create(PROCREG_RXRING2, 0, proc_reg_dir, &rss_ring2_fops);
+	if (!proc_rss_ring2)
+		pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING2);
+
+	proc_rss_ring3 =
+	     proc_create(PROCREG_RXRING3, 0, proc_reg_dir, &rss_ring3_fops);
+	if (!proc_rss_ring3)
+		pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING3);
+
+	return 0;
+}
+EXPORT_SYMBOL(rss_debug_proc_init);
+
+void rss_debug_proc_exit(struct proc_dir_entry *proc_reg_dir)
+{
+	if (proc_rss_ring1)
+		remove_proc_entry(PROCREG_RXRING1, proc_reg_dir);
+	if (proc_rss_ring2)
+		remove_proc_entry(PROCREG_RXRING2, proc_reg_dir);
+	if (proc_rss_ring3)
+		remove_proc_entry(PROCREG_RXRING3, proc_reg_dir);
+}
+EXPORT_SYMBOL(rss_debug_proc_exit);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.h
new file mode 100644
index 0000000..07c073f
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.h
@@ -0,0 +1,104 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_RSS_H
+#define RA_RSS_H
+
+#include "raeth_reg.h"
+
+#define NUM_RSS_RX_DESC   1024
+#define MAX_RX_RING_NUM_2RING 2
+
+/******RSS define*******/
+#define PDMA_RSS_EN             BIT(0)
+#define PDMA_RSS_BUSY		BIT(1)
+#define PDMA_RSS_CFG_REQ	BIT(2)
+#define PDMA_RSS_CFG_RDY	BIT(3)
+#define PDMA_RSS_INDR_TBL_SIZE		BITS(4, 6)
+#define PDMA_RSS_IPV6_TYPE		BITS(8, 10)
+#define PDMA_RSS_IPV4_TYPE		BITS(12, 14)
+#define PDMA_RSS_IPV6_TUPLE_EN		BITS(16, 20)
+#define PDMA_RSS_IPV4_TUPLE_EN		BITS(24, 28)
+
+#define PDMA_RSS_EN_OFFSET        (0)
+#define PDMA_RSS_BUSY_OFFSET      (1)
+#define PDMA_RSS_CFG_REQ_OFFSET	  (2)
+#define PDMA_RSS_CFG_RDY_OFFSET	  (3)
+#define PDMA_RSS_INDR_TBL_SIZE_OFFSET	(4)
+#define PDMA_RSS_IPV6_TYPE_OFFSET	(8)
+#define PDMA_RSS_IPV4_TYPE_OFFSET	(12)
+#define PDMA_RSS_IPV6_TUPLE_EN_OFFSET	(16)
+#define PDMA_RSS_IPV4_TUPLE_EN_OFFSET	(24)
+
+#define SET_PDMA_RSS_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_EN);   \
+reg_val |= ((x) & 0x1) << PDMA_RSS_EN_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_CFG_REQ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_CFG_REQ);   \
+reg_val |= ((x) & 0x1) << PDMA_RSS_CFG_REQ_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV4_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV4_TYPE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV4_TYPE_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV6_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV6_TYPE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV6_TYPE_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV4_TUPLE_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV4_TYPE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV4_TUPLE_EN_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV6_TUPLE_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV6_TYPE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV6_TUPLE_EN_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_INDR_TBL_SIZE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_INDR_TBL_SIZE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_INDR_TBL_SIZE_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_CR_VALUE(x, y) \
+{ \
+unsigned int reg_val = y; \
+sys_reg_write(x, reg_val); \
+}
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/Makefile b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/Makefile
new file mode 100755
index 0000000..e304fcb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for MediaTek MT753x gigabit switch
+#
+
+obj-$(CONFIG_MT753X_GSW)	+= mt753x.o
+
+mt753x-$(CONFIG_SWCONFIG)	+= mt753x_swconfig.o
+
+mt753x-y			+= mt753x_mdio.o mt7530.o mt7531.o \
+					mt753x_common.o mt753x_vlan.o mt753x_nl.o
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7530.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7530.c
new file mode 100755
index 0000000..7853e27
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7530.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include "mt753x.h"
+#include "mt753x_regs.h"
+
+/* MT7530 registers */
+
+/* Unique fields of PMCR for MT7530 */
+#define FORCE_MODE			BIT(15)
+
+/* Unique fields of GMACCR for MT7530 */
+#define VLAN_SUPT_NO_S			14
+#define VLAN_SUPT_NO_M			0x1c000
+#define LATE_COL_DROP			BIT(13)
+
+/* Unique fields of (M)HWSTRAP for MT7530 */
+#define BOND_OPTION			BIT(24)
+#define P5_PHY0_SEL			BIT(20)
+#define CHG_TRAP			BIT(16)
+#define LOOPDET_DIS			BIT(14)
+#define P5_INTF_SEL_GMAC5		BIT(13)
+#define SMI_ADDR_S			11
+#define SMI_ADDR_M			0x1800
+#define XTAL_FSEL_S			9
+#define XTAL_FSEL_M			0x600
+#define P6_INTF_DIS			BIT(8)
+#define P5_INTF_MODE_RGMII		BIT(7)
+#define P5_INTF_DIS_S			BIT(6)
+#define C_MDIO_BPS_S			BIT(5)
+#define EEPROM_EN_S			BIT(4)
+
+/* PHY EEE Register bitmap of define */
+#define PHY_DEV07			0x07
+#define PHY_DEV07_REG_03C		0x3c
+
+/* PHY Extend Register 0x14 bitmap of define */
+#define PHY_EXT_REG_14			0x14
+
+/* Fields of PHY_EXT_REG_14 */
+#define PHY_EN_DOWN_SHFIT		BIT(4)
+
+/* PHY Token Ring Register 0x10 bitmap of define */
+#define PHY_TR_REG_10			0x10
+
+/* PHY Token Ring Register 0x12 bitmap of define */
+#define PHY_TR_REG_12			0x12
+
+/* PHY LPI PCS/DSP Control Register bitmap of define */
+#define PHY_LPI_REG_11			0x11
+
+/* PHY DEV 0x1e Register bitmap of define */
+#define PHY_DEV1E			0x1e
+#define PHY_DEV1E_REG_123		0x123
+#define PHY_DEV1E_REG_A6		0xa6
+
+/* Values of XTAL_FSEL */
+#define XTAL_20MHZ			1
+#define XTAL_40MHZ			2
+#define XTAL_25MHZ			3
+
+/* Top single control CR define */
+#define TOP_SIG_CTRL			0x7808
+
+/* TOP_SIG_CTRL Register bitmap of define */
+#define OUTPUT_INTR_S			16
+#define OUTPUT_INTR_M			0x30000
+
+#define P6ECR				0x7830
+#define P6_INTF_MODE_TRGMII		BIT(0)
+
+#define TRGMII_TXCTRL			0x7a40
+#define TRAIN_TXEN			BIT(31)
+#define TXC_INV				BIT(30)
+#define TX_DOEO				BIT(29)
+#define TX_RST				BIT(28)
+
+#define TRGMII_TD0_CTRL			0x7a50
+#define TRGMII_TD1_CTRL			0x7a58
+#define TRGMII_TD2_CTRL			0x7a60
+#define TRGMII_TD3_CTRL			0x7a68
+#define TRGMII_TXCTL_CTRL		0x7a70
+#define TRGMII_TCK_CTRL			0x7a78
+#define TRGMII_TD_CTRL(n)		(0x7a50 + (n) * 8)
+#define NUM_TRGMII_CTRL			6
+#define TX_DMPEDRV			BIT(31)
+#define TX_DM_SR			BIT(15)
+#define TX_DMERODT			BIT(14)
+#define TX_DMOECTL			BIT(13)
+#define TX_TAP_S			8
+#define TX_TAP_M			0xf00
+#define TX_TRAIN_WD_S			0
+#define TX_TRAIN_WD_M			0xff
+
+#define TRGMII_TD0_ODT			0x7a54
+#define TRGMII_TD1_ODT			0x7a5c
+#define TRGMII_TD2_ODT			0x7a64
+#define TRGMII_TD3_ODT			0x7a6c
+#define TRGMII_TXCTL_ODT		0x7574
+#define TRGMII_TCK_ODT			0x757c
+#define TRGMII_TD_ODT(n)		(0x7a54 + (n) * 8)
+#define NUM_TRGMII_ODT			6
+#define TX_DM_DRVN_PRE_S		30
+#define TX_DM_DRVN_PRE_M		0xc0000000
+#define TX_DM_DRVP_PRE_S		28
+#define TX_DM_DRVP_PRE_M		0x30000000
+#define TX_DM_TDSEL_S			24
+#define TX_DM_TDSEL_M			0xf000000
+#define TX_ODTEN			BIT(23)
+#define TX_DME_PRE			BIT(20)
+#define TX_DM_DRVNT0			BIT(19)
+#define TX_DM_DRVPT0			BIT(18)
+#define TX_DM_DRVNTE			BIT(17)
+#define TX_DM_DRVPTE			BIT(16)
+#define TX_DM_ODTN_S			12
+#define TX_DM_ODTN_M			0x7000
+#define TX_DM_ODTP_S			8
+#define TX_DM_ODTP_M			0x700
+#define TX_DM_DRVN_S			4
+#define TX_DM_DRVN_M			0xf0
+#define TX_DM_DRVP_S			0
+#define TX_DM_DRVP_M			0x0f
+
+#define P5RGMIIRXCR			0x7b00
+#define CSR_RGMII_RCTL_CFG_S		24
+#define CSR_RGMII_RCTL_CFG_M		0x7000000
+#define CSR_RGMII_RXD_CFG_S		16
+#define CSR_RGMII_RXD_CFG_M		0x70000
+#define CSR_RGMII_EDGE_ALIGN		BIT(8)
+#define CSR_RGMII_RXC_90DEG_CFG_S	4
+#define CSR_RGMII_RXC_90DEG_CFG_M	0xf0
+#define CSR_RGMII_RXC_0DEG_CFG_S	0
+#define CSR_RGMII_RXC_0DEG_CFG_M	0x0f
+
+#define P5RGMIITXCR			0x7b04
+#define CSR_RGMII_TXEN_CFG_S		16
+#define CSR_RGMII_TXEN_CFG_M		0x70000
+#define CSR_RGMII_TXD_CFG_S		8
+#define CSR_RGMII_TXD_CFG_M		0x700
+#define CSR_RGMII_TXC_CFG_S		0
+#define CSR_RGMII_TXC_CFG_M		0x1f
+
+#define CHIP_REV			0x7ffc
+#define CHIP_NAME_S			16
+#define CHIP_NAME_M			0xffff0000
+#define CHIP_REV_S			0
+#define CHIP_REV_M			0x0f
+
+/* MMD registers */
+#define CORE_PLL_GROUP2			0x401
+#define RG_SYSPLL_EN_NORMAL		BIT(15)
+#define RG_SYSPLL_VODEN			BIT(14)
+#define RG_SYSPLL_POSDIV_S		5
+#define RG_SYSPLL_POSDIV_M		0x60
+
+#define CORE_PLL_GROUP4			0x403
+#define RG_SYSPLL_DDSFBK_EN		BIT(12)
+#define RG_SYSPLL_BIAS_EN		BIT(11)
+#define RG_SYSPLL_BIAS_LPF_EN		BIT(10)
+
+#define CORE_PLL_GROUP5			0x404
+#define RG_LCDDS_PCW_NCPO1_S		0
+#define RG_LCDDS_PCW_NCPO1_M		0xffff
+
+#define CORE_PLL_GROUP6			0x405
+#define RG_LCDDS_PCW_NCPO0_S		0
+#define RG_LCDDS_PCW_NCPO0_M		0xffff
+
+#define CORE_PLL_GROUP7			0x406
+#define RG_LCDDS_PWDB			BIT(15)
+#define RG_LCDDS_ISO_EN			BIT(13)
+#define RG_LCCDS_C_S			4
+#define RG_LCCDS_C_M			0x70
+#define RG_LCDDS_PCW_NCPO_CHG		BIT(3)
+
+#define CORE_PLL_GROUP10		0x409
+#define RG_LCDDS_SSC_DELTA_S		0
+#define RG_LCDDS_SSC_DELTA_M		0xfff
+
+#define CORE_PLL_GROUP11		0x40a
+#define RG_LCDDS_SSC_DELTA1_S		0
+#define RG_LCDDS_SSC_DELTA1_M		0xfff
+
+#define CORE_GSWPLL_GCR_1		0x040d
+#define GSWPLL_PREDIV_S			14
+#define GSWPLL_PREDIV_M			0xc000
+#define GSWPLL_POSTDIV_200M_S		12
+#define GSWPLL_POSTDIV_200M_M		0x3000
+#define GSWPLL_EN_PRE			BIT(11)
+#define GSWPLL_FBKSEL			BIT(10)
+#define GSWPLL_BP			BIT(9)
+#define GSWPLL_BR			BIT(8)
+#define GSWPLL_FBKDIV_200M_S		0
+#define GSWPLL_FBKDIV_200M_M		0xff
+
+#define CORE_GSWPLL_GCR_2		0x040e
+#define GSWPLL_POSTDIV_500M_S		8
+#define GSWPLL_POSTDIV_500M_M		0x300
+#define GSWPLL_FBKDIV_500M_S		0
+#define GSWPLL_FBKDIV_500M_M		0xff
+
+#define TRGMII_GSW_CLK_CG		0x0410
+#define TRGMIICK_EN			BIT(1)
+#define GSWCK_EN			BIT(0)
+
+static int mt7530_mii_read(struct gsw_mt753x *gsw, int phy, int reg)
+{
+	if (phy < MT753X_NUM_PHYS)
+		phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
+
+	return mdiobus_read(gsw->host_bus, phy, reg);
+}
+
+static void mt7530_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val)
+{
+	if (phy < MT753X_NUM_PHYS)
+		phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
+
+	mdiobus_write(gsw->host_bus, phy, reg, val);
+}
+
+static int mt7530_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg)
+{
+	u16 val;
+
+	if (addr < MT753X_NUM_PHYS)
+		addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+	mutex_lock(&gsw->host_bus->mdio_lock);
+
+	gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
+			     (MMD_ADDR << MMD_CMD_S) |
+			     ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
+
+	gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, reg);
+
+	gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
+			     (MMD_DATA << MMD_CMD_S) |
+			     ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
+
+	val = gsw->host_bus->read(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG);
+
+	mutex_unlock(&gsw->host_bus->mdio_lock);
+
+	return val;
+}
+
+static void mt7530_mmd_write(struct gsw_mt753x *gsw, int addr, int devad,
+			     u16 reg, u16 val)
+{
+	if (addr < MT753X_NUM_PHYS)
+		addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+	mutex_lock(&gsw->host_bus->mdio_lock);
+
+	gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
+		      (MMD_ADDR << MMD_CMD_S) |
+		      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
+
+	gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, reg);
+
+	gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
+		      (MMD_DATA << MMD_CMD_S) |
+		      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
+
+	gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, val);
+
+	mutex_unlock(&gsw->host_bus->mdio_lock);
+}
+
+static void mt7530_core_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val)
+{
+	gsw->mmd_write(gsw, 0, 0x1f, reg, val);
+}
+
+static void mt7530_trgmii_setting(struct gsw_mt753x *gsw)
+{
+	u16 i;
+
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP5, 0x0780);
+	mdelay(1);
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP6, 0);
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP10, 0x87);
+	mdelay(1);
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP11, 0x87);
+
+	/* PLL BIAS enable */
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP4,
+			      RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN);
+	mdelay(1);
+
+	/* PLL LPF enable */
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP4,
+			      RG_SYSPLL_DDSFBK_EN |
+			      RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
+
+	/* sys PLL enable */
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP2,
+			      RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+			      (1 << RG_SYSPLL_POSDIV_S));
+
+	/* LCDDDS PWDS */
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP7,
+			      (3 << RG_LCCDS_C_S) |
+			      RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+	mdelay(1);
+
+	/* Enable MT7530 TRGMII clock */
+	mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, GSWCK_EN | TRGMIICK_EN);
+
+	/* lower Tx Driving */
+	for (i = 0 ; i < NUM_TRGMII_ODT; i++)
+		mt753x_reg_write(gsw, TRGMII_TD_ODT(i),
+				 (4 << TX_DM_DRVP_S) | (4 << TX_DM_DRVN_S));
+}
+
+static void mt7530_rgmii_setting(struct gsw_mt753x *gsw)
+{
+	u32 val;
+
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP5, 0x0c80);
+	mdelay(1);
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP6, 0);
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP10, 0x87);
+	mdelay(1);
+	mt7530_core_reg_write(gsw, CORE_PLL_GROUP11, 0x87);
+
+	val = mt753x_reg_read(gsw, TRGMII_TXCTRL);
+	val &= ~TXC_INV;
+	mt753x_reg_write(gsw, TRGMII_TXCTRL, val);
+
+	mt753x_reg_write(gsw, TRGMII_TCK_CTRL,
+			 (8 << TX_TAP_S) | (0x55 << TX_TRAIN_WD_S));
+}
+
+static int mt7530_mac_port_setup(struct gsw_mt753x *gsw)
+{
+	u32 hwstrap, p6ecr = 0, p5mcr, p6mcr, phyad;
+
+	hwstrap = mt753x_reg_read(gsw, MHWSTRAP);
+	hwstrap &= ~(P6_INTF_DIS | P5_INTF_MODE_RGMII | P5_INTF_DIS_S);
+	hwstrap |= P5_INTF_SEL_GMAC5;
+	if (!gsw->port5_cfg.enabled) {
+		p5mcr = FORCE_MODE;
+		hwstrap |= P5_INTF_DIS_S;
+	} else {
+		p5mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
+			MAC_MODE | MAC_TX_EN | MAC_RX_EN |
+			BKOFF_EN | BACKPR_EN;
+
+		if (gsw->port5_cfg.force_link) {
+			p5mcr |= FORCE_MODE | FORCE_LINK | FORCE_RX_FC |
+				 FORCE_TX_FC;
+			p5mcr |= gsw->port5_cfg.speed << FORCE_SPD_S;
+
+			if (gsw->port5_cfg.duplex)
+				p5mcr |= FORCE_DPX;
+		}
+
+		switch (gsw->port5_cfg.phy_mode) {
+		case PHY_INTERFACE_MODE_MII:
+		case PHY_INTERFACE_MODE_GMII:
+			break;
+		case PHY_INTERFACE_MODE_RGMII:
+			hwstrap |= P5_INTF_MODE_RGMII;
+			break;
+		default:
+			dev_info(gsw->dev, "%s is not supported by port5\n",
+				 phy_modes(gsw->port5_cfg.phy_mode));
+			p5mcr = FORCE_MODE;
+			hwstrap |= P5_INTF_DIS_S;
+		}
+
+		/* Port5 to PHY direct mode */
+		if (of_property_read_u32(gsw->port5_cfg.np, "phy-address",
+					 &phyad))
+			goto parse_p6;
+
+		if (phyad != 0 && phyad != 4) {
+			dev_info(gsw->dev,
+				 "Only PHY 0/4 can be connected to Port 5\n");
+			goto parse_p6;
+		}
+
+		hwstrap &= ~P5_INTF_SEL_GMAC5;
+		if (phyad == 0)
+			hwstrap |= P5_PHY0_SEL;
+		else
+			hwstrap &= ~P5_PHY0_SEL;
+	}
+
+parse_p6:
+	if (!gsw->port6_cfg.enabled) {
+		p6mcr = FORCE_MODE;
+		hwstrap |= P6_INTF_DIS;
+	} else {
+		p6mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
+			MAC_MODE | MAC_TX_EN | MAC_RX_EN |
+			BKOFF_EN | BACKPR_EN;
+
+		if (gsw->port6_cfg.force_link) {
+			p6mcr |= FORCE_MODE | FORCE_LINK | FORCE_RX_FC |
+				 FORCE_TX_FC;
+			p6mcr |= gsw->port6_cfg.speed << FORCE_SPD_S;
+
+			if (gsw->port6_cfg.duplex)
+				p6mcr |= FORCE_DPX;
+		}
+
+		switch (gsw->port6_cfg.phy_mode) {
+		case PHY_INTERFACE_MODE_RGMII:
+			p6ecr = BIT(1);
+			break;
+		case PHY_INTERFACE_MODE_TRGMII:
+			/* set MT7530 central align */
+			p6ecr = BIT(0);
+			break;
+		default:
+			dev_info(gsw->dev, "%s is not supported by port6\n",
+				 phy_modes(gsw->port6_cfg.phy_mode));
+			p6mcr = FORCE_MODE;
+			hwstrap |= P6_INTF_DIS;
+		}
+	}
+
+	mt753x_reg_write(gsw, MHWSTRAP, hwstrap);
+	mt753x_reg_write(gsw, P6ECR, p6ecr);
+
+	mt753x_reg_write(gsw, PMCR(5), p5mcr);
+	mt753x_reg_write(gsw, PMCR(6), p6mcr);
+
+	return 0;
+}
+
+static void mt7530_core_pll_setup(struct gsw_mt753x *gsw)
+{
+	u32 hwstrap;
+
+	hwstrap = mt753x_reg_read(gsw, HWSTRAP);
+
+	switch ((hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S) {
+	case XTAL_40MHZ:
+		/* Disable MT7530 core clock */
+		mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, 0);
+
+		/* disable MT7530 PLL */
+		mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_1,
+				      (2 << GSWPLL_POSTDIV_200M_S) |
+				      (32 << GSWPLL_FBKDIV_200M_S));
+
+		/* For MT7530 core clock = 500Mhz */
+		mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_2,
+				      (1 << GSWPLL_POSTDIV_500M_S) |
+				      (25 << GSWPLL_FBKDIV_500M_S));
+
+		/* Enable MT7530 PLL */
+		mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_1,
+				      (2 << GSWPLL_POSTDIV_200M_S) |
+				      (32 << GSWPLL_FBKDIV_200M_S) |
+				      GSWPLL_EN_PRE);
+
+		usleep_range(20, 40);
+
+		/* Enable MT7530 core clock */
+		mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, GSWCK_EN);
+		break;
+	default:
+		/* TODO: PLL settings for 20/25MHz */
+		break;
+	}
+
+	hwstrap = mt753x_reg_read(gsw, HWSTRAP);
+	hwstrap |= CHG_TRAP;
+	if (gsw->direct_phy_access)
+		hwstrap &= ~C_MDIO_BPS_S;
+	else
+		hwstrap |= C_MDIO_BPS_S;
+
+	mt753x_reg_write(gsw, MHWSTRAP, hwstrap);
+
+	if (gsw->port6_cfg.enabled &&
+	    gsw->port6_cfg.phy_mode == PHY_INTERFACE_MODE_TRGMII) {
+		mt7530_trgmii_setting(gsw);
+	} else {
+		/* RGMII */
+		mt7530_rgmii_setting(gsw);
+	}
+
+	/* delay setting for 10/1000M */
+	mt753x_reg_write(gsw, P5RGMIIRXCR,
+			 CSR_RGMII_EDGE_ALIGN |
+			 (2 << CSR_RGMII_RXC_0DEG_CFG_S));
+	mt753x_reg_write(gsw, P5RGMIITXCR, 0x14 << CSR_RGMII_TXC_CFG_S);
+}
+
+static int mt7530_sw_detect(struct gsw_mt753x *gsw, struct chip_rev *crev)
+{
+	u32 rev;
+
+	rev = mt753x_reg_read(gsw, CHIP_REV);
+
+	if (((rev & CHIP_NAME_M) >> CHIP_NAME_S) == MT7530) {
+		if (crev) {
+			crev->rev = rev & CHIP_REV_M;
+			crev->name = "MT7530";
+		}
+
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static void mt7530_phy_setting(struct gsw_mt753x *gsw)
+{
+	int i;
+	u32 val;
+
+	for (i = 0; i < MT753X_NUM_PHYS; i++) {
+		/* Disable EEE */
+		gsw->mmd_write(gsw, i, PHY_DEV07, PHY_DEV07_REG_03C, 0);
+
+		/* Enable HW auto downshift */
+		gsw->mii_write(gsw, i, 0x1f, 0x1);
+		val = gsw->mii_read(gsw, i, PHY_EXT_REG_14);
+		val |= PHY_EN_DOWN_SHFIT;
+		gsw->mii_write(gsw, i, PHY_EXT_REG_14, val);
+
+		/* Increase SlvDPSready time */
+		gsw->mii_write(gsw, i, 0x1f, 0x52b5);
+		gsw->mii_write(gsw, i, PHY_TR_REG_10, 0xafae);
+		gsw->mii_write(gsw, i, PHY_TR_REG_12, 0x2f);
+		gsw->mii_write(gsw, i, PHY_TR_REG_10, 0x8fae);
+
+		/* Increase post_update_timer */
+		gsw->mii_write(gsw, i, 0x1f, 0x3);
+		gsw->mii_write(gsw, i, PHY_LPI_REG_11, 0x4b);
+		gsw->mii_write(gsw, i, 0x1f, 0);
+
+		/* Adjust 100_mse_threshold */
+		gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_123, 0xffff);
+
+		/* Disable mcc */
+		gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_A6, 0x300);
+	}
+}
+
+static inline bool get_phy_access_mode(const struct device_node *np)
+{
+	return of_property_read_bool(np, "mt7530,direct-phy-access");
+}
+
+static int mt7530_sw_init(struct gsw_mt753x *gsw)
+{
+	int i;
+	u32 val;
+
+	gsw->direct_phy_access = get_phy_access_mode(gsw->dev->of_node);
+
+	/* Force MT7530 to use (in)direct PHY access */
+	val = mt753x_reg_read(gsw, HWSTRAP);
+	val |= CHG_TRAP;
+	if (gsw->direct_phy_access)
+		val &= ~C_MDIO_BPS_S;
+	else
+		val |= C_MDIO_BPS_S;
+	mt753x_reg_write(gsw, MHWSTRAP, val);
+
+	/* Read PHY address base from HWSTRAP */
+	gsw->phy_base  = (((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3) + 8;
+	gsw->phy_base &= MT753X_SMI_ADDR_MASK;
+
+	if (gsw->direct_phy_access) {
+		gsw->mii_read = mt7530_mii_read;
+		gsw->mii_write = mt7530_mii_write;
+		gsw->mmd_read = mt7530_mmd_read;
+		gsw->mmd_write = mt7530_mmd_write;
+	} else {
+		gsw->mii_read = mt753x_mii_read;
+		gsw->mii_write = mt753x_mii_write;
+		gsw->mmd_read = mt753x_mmd_ind_read;
+		gsw->mmd_write = mt753x_mmd_ind_write;
+	}
+
+	for (i = 0; i < MT753X_NUM_PHYS; i++) {
+		val = gsw->mii_read(gsw, i, MII_BMCR);
+		val |= BMCR_PDOWN;
+		gsw->mii_write(gsw, i, MII_BMCR, val);
+	}
+
+	/* Force MAC link down before reset */
+	mt753x_reg_write(gsw, PMCR(5), FORCE_MODE);
+	mt753x_reg_write(gsw, PMCR(6), FORCE_MODE);
+
+	/* Switch soft reset */
+	/* BUG: sw reset causes gsw int flooding */
+	mt753x_reg_write(gsw, SYS_CTRL, SW_PHY_RST | SW_SYS_RST | SW_REG_RST);
+	usleep_range(10, 20);
+
+	/* global mac control settings configuration */
+	mt753x_reg_write(gsw, GMACCR,
+			 LATE_COL_DROP | (15 << MTCC_LMT_S) |
+			 (2 << MAX_RX_JUMBO_S) | RX_PKT_LEN_MAX_JUMBO);
+
+	/* Output INTR selected */
+	val = mt753x_reg_read(gsw, TOP_SIG_CTRL);
+	val &= ~OUTPUT_INTR_M;
+	val |= (3 << OUTPUT_INTR_S);
+	mt753x_reg_write(gsw, TOP_SIG_CTRL, val);
+
+	mt7530_core_pll_setup(gsw);
+	mt7530_mac_port_setup(gsw);
+
+	return 0;
+}
+
+static int mt7530_sw_post_init(struct gsw_mt753x *gsw)
+{
+	int i;
+	u32 val;
+
+	mt7530_phy_setting(gsw);
+
+	for (i = 0; i < MT753X_NUM_PHYS; i++) {
+		val = gsw->mii_read(gsw, i, MII_BMCR);
+		val &= ~BMCR_PDOWN;
+		gsw->mii_write(gsw, i, MII_BMCR, val);
+	}
+
+	return 0;
+}
+
+struct mt753x_sw_id mt7530_id = {
+	.model = MT7530,
+	.detect = mt7530_sw_detect,
+	.init = mt7530_sw_init,
+	.post_init = mt7530_sw_post_init
+};
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7531.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7531.c
new file mode 100755
index 0000000..7253042
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7531.c
@@ -0,0 +1,1058 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Zhanguo Ju <zhanguo.ju@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+
+#include "mt753x.h"
+#include "mt753x_regs.h"
+
+/* MT7531 registers */
+#define SGMII_REG_BASE			0x5000
+#define SGMII_REG_PORT_BASE		0x1000
+#define SGMII_REG(p, r)			(SGMII_REG_BASE + \
+					(p) * SGMII_REG_PORT_BASE + (r))
+#define PCS_CONTROL_1(p)		SGMII_REG(p, 0x00)
+#define SGMII_MODE(p)			SGMII_REG(p, 0x20)
+#define QPHY_PWR_STATE_CTRL(p)		SGMII_REG(p, 0xe8)
+#define ANA_CKBG(p)			SGMII_REG(p, 0x100)
+#define ANA_DA_FORCE_MODE1(p)		SGMII_REG(p, 0x110)
+#define PHYA_CTRL_SIGNAL3(p)		SGMII_REG(p, 0x128)
+#define PHYA_ANA_SYSPLL(p)		SGMII_REG(p, 0x158)
+
+/* Fields of PCS_CONTROL_1 */
+#define SGMII_LINK_STATUS		BIT(18)
+#define SGMII_AN_ENABLE			BIT(12)
+#define SGMII_AN_RESTART		BIT(9)
+
+/* Fields of SGMII_MODE */
+#define SGMII_REMOTE_FAULT_DIS		BIT(8)
+#define SGMII_IF_MODE_FORCE_DUPLEX	BIT(4)
+#define SGMII_IF_MODE_FORCE_SPEED_S	0x2
+#define SGMII_IF_MODE_FORCE_SPEED_M	0x0c
+#define SGMII_IF_MODE_ADVERT_AN		BIT(1)
+
+/* Values of SGMII_IF_MODE_FORCE_SPEED */
+#define SGMII_IF_MODE_FORCE_SPEED_10	0
+#define SGMII_IF_MODE_FORCE_SPEED_100	1
+#define SGMII_IF_MODE_FORCE_SPEED_1000	2
+
+/* Fields of QPHY_PWR_STATE_CTRL */
+#define PHYA_PWD			BIT(4)
+
+/* Fields of ANA_CKBG */
+#define SSUSB_PLL_SSC_EN		BIT(21)
+
+/* Fields of ANA_DA_FORCE_MODE1 */
+#define FORCE_PLL_SSC_EN		BIT(30)
+
+/* Fields of PHYA_CTRL_SIGNAL3 */
+#define RG_TPHY_SPEED_S			2
+#define RG_TPHY_SPEED_M			0x0c
+
+/* Values of RG_TPHY_SPEED */
+#define RG_TPHY_SPEED_1000		0
+#define RG_TPHY_SPEED_2500		1
+
+/* Fields of PHYA_ANA_SYSPLL */
+#define RG_VUSB10_ON			BIT(29)
+
+/* Unique fields of (M)HWSTRAP for MT7531 */
+#define XTAL_FSEL_S			7
+#define XTAL_FSEL_M			BIT(7)
+#define PHY_EN				BIT(6)
+#define CHG_STRAP			BIT(8)
+
+/* Efuse Register Define */
+#define GBE_EFUSE			0x7bc8
+#define GBE_SEL_EFUSE_EN		BIT(0)
+
+/* PHY ENABLE Register bitmap define */
+#define PHY_DEV1F			0x1f
+#define PHY_DEV1F_REG_44		0x44
+#define PHY_DEV1F_REG_104		0x104
+#define PHY_DEV1F_REG_10A		0x10a
+#define PHY_DEV1F_REG_10B		0x10b
+#define PHY_DEV1F_REG_10C		0x10c
+#define PHY_DEV1F_REG_10D		0x10d
+#define PHY_DEV1F_REG_268		0x268
+#define PHY_DEV1F_REG_269		0x269
+#define PHY_DEV1F_REG_26A		0x26A
+#define PHY_DEV1F_REG_403		0x403
+
+/* Fields of PHY_DEV1F_REG_403 */
+#define GBE_EFUSE_SETTING		BIT(3)
+#define PHY_EN_BYPASS_MODE		BIT(4)
+#define POWER_ON_OFF			BIT(5)
+#define PHY_PLL_M			GENMASK(9, 8)
+#define PHY_PLL_SEL(x)			(((x) << 8) & GENMASK(9, 8))
+
+/* PHY EEE Register bitmap of define */
+#define PHY_DEV07			0x07
+#define PHY_DEV07_REG_03C		0x3c
+
+/* PHY Extend Register 0x14 bitmap of define */
+#define PHY_EXT_REG_14			0x14
+
+/* Fields of PHY_EXT_REG_14 */
+#define PHY_EN_DOWN_SHFIT		BIT(4)
+
+/* PHY Extend Register 0x17 bitmap of define */
+#define PHY_EXT_REG_17			0x17
+
+/* Fields of PHY_EXT_REG_17 */
+#define PHY_LINKDOWN_POWER_SAVING_EN	BIT(4)
+
+/* PHY PMA Register 0x17 bitmap of define */
+#define SLV_DSP_READY_TIME_S		15
+#define SLV_DSP_READY_TIME_M		(0xff << SLV_DSP_READY_TIME_S)
+
+/* PHY PMA Register 0x18 bitmap of define */
+#define ENABLE_RANDOM_UPDATE_TRIGGER	BIT(8)
+
+/* PHY DEV 0x1e Register bitmap of define */
+#define PHY_DEV1E			0x1e
+#define PHY_TX_MLT3_BASE		0x0
+#define PHY_DEV1E_REG_13		0x13
+#define PHY_DEV1E_REG_14		0x14
+#define PHY_DEV1E_REG_41		0x41
+#define PHY_DEV1E_REG_A6		0xa6
+#define PHY_DEV1E_REG_0C6		0x0c6
+#define PHY_DEV1E_REG_0FE		0x0fe
+#define PHY_DEV1E_REG_123		0x123
+#define PHY_DEV1E_REG_141		0x141
+#define PHY_DEV1E_REG_189		0x189
+#define PHY_DEV1E_REG_234		0x234
+
+/* Fields of PHY_DEV1E_REG_0C6 */
+#define PHY_POWER_SAVING_S		8
+#define PHY_POWER_SAVING_M		0x300
+#define PHY_POWER_SAVING_TX		0x0
+
+/* Fields of PHY_DEV1E_REG_189 */
+#define DESCRAMBLER_CLEAR_EN		0x1
+
+/* Fields of PHY_DEV1E_REG_234 */
+#define TR_OPEN_LOOP_EN			BIT(0)
+
+/* Port debug count register */
+#define DBG_CNT_BASE			0x3018
+#define DBG_CNT_PORT_BASE		0x100
+#define DBG_CNT(p)			(DBG_CNT_BASE + \
+					(p) * DBG_CNT_PORT_BASE)
+#define DIS_CLR				BIT(31)
+
+/* Values of XTAL_FSEL_S */
+#define XTAL_40MHZ			0
+#define XTAL_25MHZ			1
+
+#define PLLGP_EN			0x7820
+#define EN_COREPLL			BIT(2)
+#define SW_CLKSW			BIT(1)
+#define SW_PLLGP			BIT(0)
+
+#define PLLGP_CR0			0x78a8
+#define RG_COREPLL_EN			BIT(22)
+#define RG_COREPLL_POSDIV_S		23
+#define RG_COREPLL_POSDIV_M		0x3800000
+#define RG_COREPLL_SDM_PCW_S		1
+#define RG_COREPLL_SDM_PCW_M		0x3ffffe
+#define RG_COREPLL_SDM_PCW_CHG		BIT(0)
+
+/* TOP Signals Status Register */
+#define TOP_SIG_SR			0x780c
+#define PAD_MCM_SMI_EN			BIT(0)
+#define PAD_DUAL_SGMII_EN		BIT(1)
+
+/* RGMII and SGMII PLL clock */
+#define ANA_PLLGP_CR2			0x78b0
+#define ANA_PLLGP_CR5			0x78bc
+
+/* GPIO mode define */
+#define GPIO_MODE_REGS(x)		(0x7c0c + (((x) / 8) * 4))
+#define GPIO_MODE_S			4
+
+/* GPIO GROUP IOLB SMT0 Control */
+#define SMT0_IOLB			0x7f04
+#define SMT_IOLB_5_SMI_MDC_EN		BIT(5)
+
+/* Unique fields of PMCR for MT7531 */
+#define FORCE_MODE_EEE1G		BIT(25)
+#define FORCE_MODE_EEE100		BIT(26)
+#define FORCE_MODE_TX_FC		BIT(27)
+#define FORCE_MODE_RX_FC		BIT(28)
+#define FORCE_MODE_DPX			BIT(29)
+#define FORCE_MODE_SPD			BIT(30)
+#define FORCE_MODE_LNK			BIT(31)
+#define FORCE_MODE			BIT(15)
+
+#define CHIP_REV			0x781C
+#define CHIP_NAME_S			16
+#define CHIP_NAME_M			0xffff0000
+#define CHIP_REV_S			0
+#define CHIP_REV_M			0x0f
+#define CHIP_REV_E1			0x0
+
+#define CLKGEN_CTRL			0x7500
+#define CLK_SKEW_OUT_S			8
+#define CLK_SKEW_OUT_M			0x300
+#define CLK_SKEW_IN_S			6
+#define CLK_SKEW_IN_M			0xc0
+#define RXCLK_NO_DELAY			BIT(5)
+#define TXCLK_NO_REVERSE		BIT(4)
+#define GP_MODE_S			1
+#define GP_MODE_M			0x06
+#define GP_CLK_EN			BIT(0)
+
+#define CPGC_CTRL			0xB0
+#define COL_EN				BIT(0)
+#define COL_CLK_EN			BIT(1)
+#define COL_RST_N			BIT(2)
+#define COL_BUSY			BIT(3)
+
+/* Values of GP_MODE */
+#define GP_MODE_RGMII			0
+#define GP_MODE_MII			1
+#define GP_MODE_REV_MII			2
+
+/* Values of CLK_SKEW_IN */
+#define CLK_SKEW_IN_NO_CHANGE		0
+#define CLK_SKEW_IN_DELAY_100PPS	1
+#define CLK_SKEW_IN_DELAY_200PPS	2
+#define CLK_SKEW_IN_REVERSE		3
+
+/* Values of CLK_SKEW_OUT */
+#define CLK_SKEW_OUT_NO_CHANGE		0
+#define CLK_SKEW_OUT_DELAY_100PPS	1
+#define CLK_SKEW_OUT_DELAY_200PPS	2
+#define CLK_SKEW_OUT_REVERSE		3
+
+/* Proprietory Control Register of Internal Phy device 0x1e */
+#define RXADC_CONTROL_3			0xc2
+#define RXADC_LDO_CONTROL_2		0xd3
+
+/* Proprietory Control Register of Internal Phy device 0x1f */
+#define TXVLD_DA_271			0x271
+#define TXVLD_DA_272			0x272
+#define TXVLD_DA_273			0x273
+
+/* gpio pinmux pins and functions define */
+static int gpio_int_pins[] = {0};
+static int gpio_int_funcs[] = {1};
+static int gpio_mdc_pins[] = {11, 20};
+static int gpio_mdc_funcs[] = {2, 2};
+static int gpio_mdio_pins[] = {12, 21};
+static int gpio_mdio_funcs[] = {2, 2};
+
+static int mt7531_set_port_sgmii_force_mode(struct gsw_mt753x *gsw, u32 port,
+					    struct mt753x_port_cfg *port_cfg)
+{
+	u32 speed, port_base, val;
+	ktime_t timeout;
+	u32 timeout_us;
+
+	if (port < 5 || port >= MT753X_NUM_PORTS) {
+		dev_info(gsw->dev, "port %d is not a SGMII port\n", port);
+		return -EINVAL;
+	}
+
+	port_base = port - 5;
+
+	switch (port_cfg->speed) {
+	case MAC_SPD_1000:
+		speed = RG_TPHY_SPEED_1000;
+		break;
+	case MAC_SPD_2500:
+		speed = RG_TPHY_SPEED_2500;
+		break;
+	default:
+		dev_info(gsw->dev, "invalid SGMII speed idx %d for port %d\n",
+			 port_cfg->speed, port);
+
+		speed = RG_TPHY_SPEED_1000;
+	}
+
+	/* Step 1: Speed select register setting */
+	val = mt753x_reg_read(gsw, PHYA_CTRL_SIGNAL3(port_base));
+	val &= ~RG_TPHY_SPEED_M;
+	val |= speed << RG_TPHY_SPEED_S;
+	mt753x_reg_write(gsw, PHYA_CTRL_SIGNAL3(port_base), val);
+
+	/* Step 2 : Disable AN */
+	val = mt753x_reg_read(gsw, PCS_CONTROL_1(port_base));
+	val &= ~SGMII_AN_ENABLE;
+	mt753x_reg_write(gsw, PCS_CONTROL_1(port_base), val);
+
+	/* Step 3: SGMII force mode setting */
+	val = mt753x_reg_read(gsw, SGMII_MODE(port_base));
+	val &= ~SGMII_IF_MODE_ADVERT_AN;
+	val &= ~SGMII_IF_MODE_FORCE_SPEED_M;
+	val |= SGMII_IF_MODE_FORCE_SPEED_1000 << SGMII_IF_MODE_FORCE_SPEED_S;
+	val |= SGMII_IF_MODE_FORCE_DUPLEX;
+	/* For sgmii force mode, 0 is full duplex and 1 is half duplex */
+	if (port_cfg->duplex)
+		val &= ~SGMII_IF_MODE_FORCE_DUPLEX;
+
+	mt753x_reg_write(gsw, SGMII_MODE(port_base), val);
+
+	/* Step 4: XXX: Disable Link partner's AN and set force mode */
+
+	/* Step 5: XXX: Special setting for PHYA ==> reserved for flexible */
+
+	/* Step 6 : Release PHYA power down state */
+	val = mt753x_reg_read(gsw, QPHY_PWR_STATE_CTRL(port_base));
+	val &= ~PHYA_PWD;
+	mt753x_reg_write(gsw, QPHY_PWR_STATE_CTRL(port_base), val);
+
+	/* Step 7 : Polling SGMII_LINK_STATUS */
+	timeout_us = 2000000;
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+	while (1) {
+		val = mt753x_reg_read(gsw, PCS_CONTROL_1(port_base));
+		val &= SGMII_LINK_STATUS;
+
+		if (val)
+			break;
+
+		if (ktime_compare(ktime_get(), timeout) > 0)
+			return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int mt7531_set_port_sgmii_an_mode(struct gsw_mt753x *gsw, u32 port,
+					 struct mt753x_port_cfg *port_cfg)
+{
+	u32 speed, port_base, val;
+	ktime_t timeout;
+	u32 timeout_us;
+
+	if (port < 5 || port >= MT753X_NUM_PORTS) {
+		dev_info(gsw->dev, "port %d is not a SGMII port\n", port);
+		return -EINVAL;
+	}
+
+	port_base = port - 5;
+
+	switch (port_cfg->speed) {
+	case MAC_SPD_1000:
+		speed = RG_TPHY_SPEED_1000;
+		break;
+	case MAC_SPD_2500:
+		speed = RG_TPHY_SPEED_2500;
+		break;
+	default:
+		dev_info(gsw->dev, "invalid SGMII speed idx %d for port %d\n",
+			 port_cfg->speed, port);
+
+		speed = RG_TPHY_SPEED_1000;
+	}
+
+	/* Step 1: Speed select register setting */
+	val = mt753x_reg_read(gsw, PHYA_CTRL_SIGNAL3(port_base));
+	val &= ~RG_TPHY_SPEED_M;
+	val |= speed << RG_TPHY_SPEED_S;
+	mt753x_reg_write(gsw, PHYA_CTRL_SIGNAL3(port_base), val);
+
+	/* Step 2: Remote fault disable */
+	val = mt753x_reg_read(gsw, SGMII_MODE(port));
+	val |= SGMII_REMOTE_FAULT_DIS;
+	mt753x_reg_write(gsw, SGMII_MODE(port), val);
+
+	/* Step 3: Setting Link partner's AN enable = 1 */
+
+	/* Step 4: Setting Link partner's device ability for speed/duplex */
+
+	/* Step 5: AN re-start */
+	val = mt753x_reg_read(gsw, PCS_CONTROL_1(port));
+	val |= SGMII_AN_RESTART;
+	mt753x_reg_write(gsw, PCS_CONTROL_1(port), val);
+
+	/* Step 6: Special setting for PHYA ==> reserved for flexible */
+
+	/* Step 7 : Polling SGMII_LINK_STATUS */
+	timeout_us = 2000000;
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+	while (1) {
+		val = mt753x_reg_read(gsw, PCS_CONTROL_1(port_base));
+		val &= SGMII_LINK_STATUS;
+
+		if (val)
+			break;
+
+		if (ktime_compare(ktime_get(), timeout) > 0)
+			return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void mt7531_sgmii_ssc(struct gsw_mt753x *gsw, u32 port, int enable)
+{
+	u32 val;
+	u32 port_base = port - 5;
+
+	if (enable) {
+		val = mt753x_reg_read(gsw, ANA_CKBG(port_base));
+		val |= SSUSB_PLL_SSC_EN;
+		mt753x_reg_write(gsw, ANA_CKBG(port_base), val);
+
+		val = mt753x_reg_read(gsw, ANA_DA_FORCE_MODE1(port_base));
+		val |= FORCE_PLL_SSC_EN;
+		mt753x_reg_write(gsw, ANA_DA_FORCE_MODE1(port_base), val);
+	} else {
+		val = mt753x_reg_read(gsw, ANA_CKBG(port_base));
+		val &= ~SSUSB_PLL_SSC_EN;
+		mt753x_reg_write(gsw, ANA_CKBG(port_base), val);
+
+		val = mt753x_reg_read(gsw, ANA_DA_FORCE_MODE1(port_base));
+		val &= ~FORCE_PLL_SSC_EN;
+		mt753x_reg_write(gsw, ANA_DA_FORCE_MODE1(port_base), val);
+	}
+}
+
+static int mt7531_set_port_rgmii(struct gsw_mt753x *gsw, u32 port)
+{
+	u32 val;
+
+	if (port != 5) {
+		dev_info(gsw->dev, "RGMII mode is not available for port %d\n",
+			 port);
+		return -EINVAL;
+	}
+
+	val = mt753x_reg_read(gsw, CLKGEN_CTRL);
+	val |= GP_CLK_EN;
+	val &= ~GP_MODE_M;
+	val |= GP_MODE_RGMII << GP_MODE_S;
+	val |= TXCLK_NO_REVERSE;
+	val |= RXCLK_NO_DELAY;
+	val &= ~CLK_SKEW_IN_M;
+	val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
+	val &= ~CLK_SKEW_OUT_M;
+	val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
+	mt753x_reg_write(gsw, CLKGEN_CTRL, val);
+
+	return 0;
+}
+
+static int mt7531_mac_port_setup(struct gsw_mt753x *gsw, u32 port,
+				 struct mt753x_port_cfg *port_cfg)
+{
+	u32 pmcr;
+	u32 speed;
+
+	if (port < 5 || port >= MT753X_NUM_PORTS) {
+		dev_info(gsw->dev, "port %d is not a MAC port\n", port);
+		return -EINVAL;
+	}
+
+	if (port_cfg->enabled) {
+		pmcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
+		       MAC_MODE | MAC_TX_EN | MAC_RX_EN |
+		       BKOFF_EN | BACKPR_EN;
+
+		if (port_cfg->force_link) {
+			/* PMCR's speed field 0x11 is reserved,
+			 * sw should set 0x10
+			 */
+			speed = port_cfg->speed;
+			if (port_cfg->speed == MAC_SPD_2500)
+				speed = MAC_SPD_1000;
+
+			pmcr |= FORCE_MODE_LNK | FORCE_LINK |
+				FORCE_MODE_SPD | FORCE_MODE_DPX |
+				FORCE_MODE_RX_FC | FORCE_MODE_TX_FC |
+				FORCE_RX_FC | FORCE_TX_FC |
+				(speed << FORCE_SPD_S);
+
+			if (port_cfg->duplex)
+				pmcr |= FORCE_DPX;
+		}
+	} else {
+		pmcr = FORCE_MODE_LNK;
+	}
+
+	switch (port_cfg->phy_mode) {
+	case PHY_INTERFACE_MODE_RGMII:
+		mt7531_set_port_rgmii(gsw, port);
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+		if (port_cfg->force_link)
+			mt7531_set_port_sgmii_force_mode(gsw, port, port_cfg);
+		else
+			mt7531_set_port_sgmii_an_mode(gsw, port, port_cfg);
+
+		mt7531_sgmii_ssc(gsw, port, port_cfg->ssc_on);
+		break;
+	default:
+		if (port_cfg->enabled)
+			dev_info(gsw->dev, "%s is not supported by port %d\n",
+				 phy_modes(port_cfg->phy_mode), port);
+
+		pmcr = FORCE_MODE_LNK;
+	}
+
+	mt753x_reg_write(gsw, PMCR(port), pmcr);
+
+	return 0;
+}
+
+static void mt7531_core_pll_setup(struct gsw_mt753x *gsw)
+{
+	u32 val;
+	u32 top_sig;
+	u32 hwstrap;
+	u32 xtal;
+
+	val = mt753x_reg_read(gsw, CHIP_REV);
+	top_sig = mt753x_reg_read(gsw, TOP_SIG_SR);
+	hwstrap = mt753x_reg_read(gsw, HWSTRAP);
+	if ((val & CHIP_REV_M) > 0)
+		xtal = (top_sig & PAD_MCM_SMI_EN) ? XTAL_40MHZ : XTAL_25MHZ;
+	else
+		xtal = (hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S;
+
+	/* dump HW strap and XTAL */
+	dev_info(gsw->dev, "HWSTRAP=0x%x XTAL=%dMHz\n", hwstrap,
+		 (xtal == XTAL_25MHZ) ? 25 : 40);
+
+	/* Only BE needs additional setting */
+	if (top_sig & PAD_DUAL_SGMII_EN)
+		return;
+
+	/* Disable Port5 SGMII clearly */
+	val = mt753x_reg_read(gsw, PHYA_ANA_SYSPLL(0));
+	val &= ~RG_VUSB10_ON;
+	mt753x_reg_write(gsw, PHYA_ANA_SYSPLL(0), val);
+
+	switch (xtal) {
+	case XTAL_25MHZ:
+		/* Step 1 : Disable MT7531 COREPLL */
+		val = mt753x_reg_read(gsw, PLLGP_EN);
+		val &= ~EN_COREPLL;
+		mt753x_reg_write(gsw, PLLGP_EN, val);
+
+		/* Step 2: switch to XTAL output */
+		val = mt753x_reg_read(gsw, PLLGP_EN);
+		val |= SW_CLKSW;
+		mt753x_reg_write(gsw, PLLGP_EN, val);
+
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val &= ~RG_COREPLL_EN;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+		/* Step 3: disable PLLGP and enable program PLLGP */
+		val = mt753x_reg_read(gsw, PLLGP_EN);
+		val |= SW_PLLGP;
+		mt753x_reg_write(gsw, PLLGP_EN, val);
+
+		/* Step 4: program COREPLL output frequency to 500MHz */
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val &= ~RG_COREPLL_POSDIV_M;
+		val |= 2 << RG_COREPLL_POSDIV_S;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+		usleep_range(25, 35);
+
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val &= ~RG_COREPLL_SDM_PCW_M;
+		val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+		/* Set feedback divide ratio update signal to high */
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val |= RG_COREPLL_SDM_PCW_CHG;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+		/* Wait for at least 16 XTAL clocks */
+		usleep_range(10, 20);
+
+		/* Step 5: set feedback divide ratio update signal to low */
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val &= ~RG_COREPLL_SDM_PCW_CHG;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+		/* Enable 325M clock for SGMII */
+		mt753x_reg_write(gsw, ANA_PLLGP_CR5, 0xad0000);
+
+		/* Enable 250SSC clock for RGMII */
+		mt753x_reg_write(gsw, ANA_PLLGP_CR2, 0x4f40000);
+
+		/* Step 6: Enable MT7531 PLL */
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val |= RG_COREPLL_EN;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+		val = mt753x_reg_read(gsw, PLLGP_EN);
+		val |= EN_COREPLL;
+		mt753x_reg_write(gsw, PLLGP_EN, val);
+		usleep_range(25, 35);
+
+		break;
+	case XTAL_40MHZ:
+		/* Step 1 : Disable MT7531 COREPLL */
+		val = mt753x_reg_read(gsw, PLLGP_EN);
+		val &= ~EN_COREPLL;
+		mt753x_reg_write(gsw, PLLGP_EN, val);
+
+		/* Step 2: switch to XTAL output */
+		val = mt753x_reg_read(gsw, PLLGP_EN);
+		val |= SW_CLKSW;
+		mt753x_reg_write(gsw, PLLGP_EN, val);
+
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val &= ~RG_COREPLL_EN;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+		/* Step 3: disable PLLGP and enable program PLLGP */
+		val = mt753x_reg_read(gsw, PLLGP_EN);
+		val |= SW_PLLGP;
+		mt753x_reg_write(gsw, PLLGP_EN, val);
+
+		/* Step 4: program COREPLL output frequency to 500MHz */
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val &= ~RG_COREPLL_POSDIV_M;
+		val |= 2 << RG_COREPLL_POSDIV_S;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+		usleep_range(25, 35);
+
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val &= ~RG_COREPLL_SDM_PCW_M;
+		val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+		/* Set feedback divide ratio update signal to high */
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val |= RG_COREPLL_SDM_PCW_CHG;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+		/* Wait for at least 16 XTAL clocks */
+		usleep_range(10, 20);
+
+		/* Step 5: set feedback divide ratio update signal to low */
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val &= ~RG_COREPLL_SDM_PCW_CHG;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+		/* Enable 325M clock for SGMII */
+		mt753x_reg_write(gsw, ANA_PLLGP_CR5, 0xad0000);
+
+		/* Enable 250SSC clock for RGMII */
+		mt753x_reg_write(gsw, ANA_PLLGP_CR2, 0x4f40000);
+
+		/* Step 6: Enable MT7531 PLL */
+		val = mt753x_reg_read(gsw, PLLGP_CR0);
+		val |= RG_COREPLL_EN;
+		mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+		val = mt753x_reg_read(gsw, PLLGP_EN);
+		val |= EN_COREPLL;
+		mt753x_reg_write(gsw, PLLGP_EN, val);
+		usleep_range(25, 35);
+		break;
+	}
+}
+
+static int mt7531_internal_phy_calibration(struct gsw_mt753x *gsw)
+{
+	return 0;
+}
+
+static int mt7531_sw_detect(struct gsw_mt753x *gsw, struct chip_rev *crev)
+{
+	u32 rev, topsig;
+
+	rev = mt753x_reg_read(gsw, CHIP_REV);
+
+	if (((rev & CHIP_NAME_M) >> CHIP_NAME_S) == MT7531) {
+		if (crev) {
+			topsig = mt753x_reg_read(gsw, TOP_SIG_SR);
+
+			crev->rev = rev & CHIP_REV_M;
+			crev->name = topsig & PAD_DUAL_SGMII_EN ?
+				     "MT7531AE" : "MT7531BE";
+		}
+
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static void pinmux_set_mux_7531(struct gsw_mt753x *gsw, u32 pin, u32 mode)
+{
+	u32 val;
+
+	val = mt753x_reg_read(gsw, GPIO_MODE_REGS(pin));
+	val &= ~(0xf << (pin & 7) * GPIO_MODE_S);
+	val |= mode << (pin & 7) * GPIO_MODE_S;
+	mt753x_reg_write(gsw, GPIO_MODE_REGS(pin), val);
+}
+
+static int mt7531_set_gpio_pinmux(struct gsw_mt753x *gsw)
+{
+	u32 group = 0;
+	struct device_node *np = gsw->dev->of_node;
+
+	/* Set GPIO 0 interrupt mode */
+	pinmux_set_mux_7531(gsw, gpio_int_pins[0], gpio_int_funcs[0]);
+
+	of_property_read_u32(np, "mediatek,mdio_master_pinmux", &group);
+
+	/* group = 0: do nothing, 1: 1st group (AE), 2: 2nd group (BE) */
+	if (group > 0 && group <= 2) {
+		group--;
+		pinmux_set_mux_7531(gsw, gpio_mdc_pins[group],
+				    gpio_mdc_funcs[group]);
+		pinmux_set_mux_7531(gsw, gpio_mdio_pins[group],
+				    gpio_mdio_funcs[group]);
+	}
+
+	return 0;
+}
+
+static void mt7531_phy_pll_setup(struct gsw_mt753x *gsw)
+{
+	u32 hwstrap;
+	u32 val;
+
+	val = mt753x_reg_read(gsw, CHIP_REV);
+	if ((val & CHIP_REV_M) > 0)
+		return;
+
+	hwstrap = mt753x_reg_read(gsw, HWSTRAP);
+
+	switch ((hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S) {
+	case XTAL_25MHZ:
+		/* disable pll auto calibration */
+		gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_104, 0x608);
+
+		/* change pll sel */
+		val = gsw->mmd_read(gsw, 0, PHY_DEV1F,
+				     PHY_DEV1F_REG_403);
+		val &= ~(PHY_PLL_M);
+		val |= PHY_PLL_SEL(3);
+		gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
+
+		/* set divider ratio */
+		gsw->mmd_write(gsw, 0, PHY_DEV1F,
+			       PHY_DEV1F_REG_10A, 0x1009);
+
+		/* set divider ratio */
+		gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10B, 0x7c6);
+
+		/* capacitance and resistance adjustment */
+		gsw->mmd_write(gsw, 0, PHY_DEV1F,
+			       PHY_DEV1F_REG_10C, 0xa8be);
+
+		break;
+	case XTAL_40MHZ:
+		/* disable pll auto calibration */
+		gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_104, 0x608);
+
+		/* change pll sel */
+		val = gsw->mmd_read(gsw, 0, PHY_DEV1F,
+				     PHY_DEV1F_REG_403);
+		val &= ~(PHY_PLL_M);
+		val |= PHY_PLL_SEL(3);
+		gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
+
+		/* set divider ratio */
+		gsw->mmd_write(gsw, 0, PHY_DEV1F,
+			       PHY_DEV1F_REG_10A, 0x1018);
+
+		/* set divider ratio */
+		gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10B, 0xc676);
+
+		/* capacitance and resistance adjustment */
+		gsw->mmd_write(gsw, 0, PHY_DEV1F,
+			       PHY_DEV1F_REG_10C, 0xd8be);
+		break;
+	}
+
+	/* power down pll. additional delay is not required via mdio access */
+	gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10D, 0x10);
+
+	/* power up pll */
+	gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10D, 0x14);
+}
+
+/* 12 registers for TX_MLT3 waveform tuning.
+ *    012 345 678 9ab
+ *  1    __
+ *     _/  \_
+ *  0_/      \
+ *            \_    _/
+ * -1           \__/
+ */
+static void mt7531_phy_100m_eye_diag_setting(struct gsw_mt753x *gsw, u32 port)
+{
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x0, 0x187);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x1, 0x1c9);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x2, 0x1c6);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x3, 0x182);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x4, 0x208);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x5, 0x205);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x6, 0x384);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x7, 0x3cb);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x8, 0x3c4);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x9, 0x30a);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0xa, 0x00b);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0xb, 0x002);
+}
+
+static void mt7531_phy_setting(struct gsw_mt753x *gsw)
+{
+	int i;
+	u32 val;
+
+	for (i = 0; i < MT753X_NUM_PHYS; i++) {
+		mt7531_phy_100m_eye_diag_setting(gsw, i);
+
+		/* Enable HW auto downshift */
+		gsw->mii_write(gsw, i, 0x1f, 0x1);
+		val = gsw->mii_read(gsw, i, PHY_EXT_REG_14);
+		val |= PHY_EN_DOWN_SHFIT;
+		gsw->mii_write(gsw, i, PHY_EXT_REG_14, val);
+
+		/* Decrease SlvDPSready time */
+		val = mt753x_tr_read(gsw, i, PMA_CH, PMA_NOD, PMA_17);
+		val &= ~SLV_DSP_READY_TIME_M;
+		val |= 0xc << SLV_DSP_READY_TIME_S;
+		mt753x_tr_write(gsw, i, PMA_CH, PMA_NOD, PMA_17, val);
+
+		/* Enable Random Update Mechanism */
+		val = mt753x_tr_read(gsw, i, PMA_CH, PMA_NOD, PMA_18);
+		val |= ENABLE_RANDOM_UPDATE_TRIGGER;
+		mt753x_tr_write(gsw, i, PMA_CH, PMA_NOD, PMA_18, val);
+
+		/* PHY link down power saving enable */
+		val = gsw->mii_read(gsw, i, PHY_EXT_REG_17);
+		val |= PHY_LINKDOWN_POWER_SAVING_EN;
+		gsw->mii_write(gsw, i, PHY_EXT_REG_17, val);
+
+		val = gsw->mmd_read(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_0C6);
+		val &= ~PHY_POWER_SAVING_M;
+		val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
+		gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_0C6, val);
+
+		/* Timing Recovery for GbE slave mode */
+		mt753x_tr_write(gsw, i, PMA_CH, PMA_NOD, PMA_01, 0x6fb90a);
+		mt753x_tr_write(gsw, i, DSP_CH, DSP_NOD, DSP_06, 0x2ebaef);
+		val = gsw->mmd_read(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_234);
+		val |= TR_OPEN_LOOP_EN;
+		gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_234, val);
+
+		/* Enable Asymmetric Pause Capability */
+		val = gsw->mii_read(gsw, i, MII_ADVERTISE);
+		val |= ADVERTISE_PAUSE_ASYM;
+		gsw->mii_write(gsw, i, MII_ADVERTISE, val);
+	}
+}
+
+static void mt7531_adjust_line_driving(struct gsw_mt753x *gsw, u32 port)
+{
+	/* For ADC timing margin window for LDO calibration */
+	gsw->mmd_write(gsw, port, PHY_DEV1E, RXADC_LDO_CONTROL_2, 0x2222);
+
+	/* Adjust AD sample timing */
+	gsw->mmd_write(gsw, port, PHY_DEV1E, RXADC_CONTROL_3, 0x4444);
+
+	/* Adjust Line driver current for different mode */
+	gsw->mmd_write(gsw, port, PHY_DEV1F, TXVLD_DA_271, 0x2ca5);
+
+	/* Adjust Line driver current for different mode */
+	gsw->mmd_write(gsw, port, PHY_DEV1F, TXVLD_DA_272, 0xc6b);
+
+	/* Adjust Line driver gain for 10BT from 1000BT calibration result */
+	gsw->mmd_write(gsw, port, PHY_DEV1F, TXVLD_DA_273, 0x3000);
+
+	/* Adjust RX Echo path filter */
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_0FE, 0x2);
+
+	/* Adjust RX HVGA bias current */
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_41, 0x3333);
+
+	/* Adjust TX class AB driver 1 */
+	gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_268, 0x384);
+
+	/* Adjust TX class AB driver 2 */
+	gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_269, 0x1114);
+
+	/* Adjust DAC delay for TX Pairs */
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_13, 0x404);
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_14, 0x404);
+
+	/* Adjust DAC digital delay for TX Delay */
+	gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_44, 0xc0);
+
+	/* Adjust Line driver compensation cap for stability concern due to
+	 * increase current.
+	 */
+	gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_26A, 0x3333);
+}
+
+static void mt7531_eee_setting(struct gsw_mt753x *gsw, u32 port)
+{
+	u32 val;
+
+	/* Disable EEE */
+	gsw->mmd_write(gsw, port, PHY_DEV07, PHY_DEV07_REG_03C, 0);
+
+	/* Disable generate signal to clear the scramble_lock when lpi mode */
+	val = gsw->mmd_read(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_189);
+	val &= ~DESCRAMBLER_CLEAR_EN;
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_189, val);
+
+	/* Roll back EEE Slave Mode */
+	gsw->mmd_write(gsw, port, 0x1e, 0x2d1, 0);
+	mt753x_tr_write(gsw, port, DSP_CH, DSP_NOD, DSP_08, 0x1b);
+	mt753x_tr_write(gsw, port, DSP_CH, DSP_NOD, DSP_0f, 0);
+	mt753x_tr_write(gsw, port, DSP_CH, DSP_NOD, DSP_10, 0x5000);
+
+	/* Adjust 100_mse_threshold */
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_123, 0xffff);
+
+	/* Disable mcc */
+	gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_A6, 0x300);
+}
+
+static void mt7531_afifo_reset(struct gsw_mt753x *gsw, int enable)
+{
+	int p;
+	u32 val;
+
+	if (enable) {
+		for (p = 0; p < MT753X_NUM_PORTS; p++) {
+			val = mt753x_reg_read(gsw, DBG_CNT(p));
+			val &= ~DIS_CLR;
+			mt753x_reg_write(gsw, DBG_CNT(p), val);
+		}
+	} else {
+		for (p = 0; p < MT753X_NUM_PORTS; p++) {
+			val = mt753x_reg_read(gsw, DBG_CNT(p));
+			val |= DIS_CLR;
+			mt753x_reg_write(gsw, DBG_CNT(p), val);
+		}
+	}
+}
+
+static int mt7531_sw_init(struct gsw_mt753x *gsw)
+{
+	int i;
+	u32 val;
+
+	gsw->phy_base = (gsw->smi_addr + 1) & MT753X_SMI_ADDR_MASK;
+
+	gsw->mii_read = mt753x_mii_read;
+	gsw->mii_write = mt753x_mii_write;
+	gsw->mmd_read = mt753x_mmd_read;
+	gsw->mmd_write = mt753x_mmd_write;
+
+	gsw->hw_phy_cal = of_property_read_bool(gsw->dev->of_node, "mediatek,hw_phy_cal");
+
+	for (i = 0; i < MT753X_NUM_PHYS; i++) {
+		val = gsw->mii_read(gsw, i, MII_BMCR);
+		val |= BMCR_ISOLATE;
+		gsw->mii_write(gsw, i, MII_BMCR, val);
+	}
+
+	/* Force MAC link down before reset */
+	mt753x_reg_write(gsw, PMCR(5), FORCE_MODE_LNK);
+	mt753x_reg_write(gsw, PMCR(6), FORCE_MODE_LNK);
+
+	/* Switch soft reset */
+	mt753x_reg_write(gsw, SYS_CTRL, SW_SYS_RST | SW_REG_RST);
+	usleep_range(10, 20);
+
+	/* Enable MDC input Schmitt Trigger */
+	val = mt753x_reg_read(gsw, SMT0_IOLB);
+	mt753x_reg_write(gsw, SMT0_IOLB, val | SMT_IOLB_5_SMI_MDC_EN);
+
+	/* Set 7531 gpio pinmux */
+	mt7531_set_gpio_pinmux(gsw);
+
+	mt7531_core_pll_setup(gsw);
+	mt7531_mac_port_setup(gsw, 5, &gsw->port5_cfg);
+	mt7531_mac_port_setup(gsw, 6, &gsw->port6_cfg);
+
+	/* Global mac control settings */
+	mt753x_reg_write(gsw, GMACCR,
+			 (15 << MTCC_LMT_S) | (15 << MAX_RX_JUMBO_S) |
+			 RX_PKT_LEN_MAX_JUMBO);
+
+	/* Enable Collision Poll */
+	val = mt753x_reg_read(gsw, CPGC_CTRL);
+	val |= COL_CLK_EN;
+	mt753x_reg_write(gsw, CPGC_CTRL, val);
+	val |= COL_RST_N;
+	mt753x_reg_write(gsw, CPGC_CTRL, val);
+	val |= COL_EN;
+	mt753x_reg_write(gsw, CPGC_CTRL, val);
+
+	/* Disable AFIFO reset for extra short IPG */
+	mt7531_afifo_reset(gsw, 0);
+
+	return 0;
+}
+
+static int mt7531_sw_post_init(struct gsw_mt753x *gsw)
+{
+	int i;
+	u32 val;
+
+	/* Let internal PHYs only Tx constant data in configure stage. */
+	for (i = 0; i < MT753X_NUM_PHYS; i++)
+		gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_141, 0x200);
+
+	/* Internal PHYs might be enabled by HW Bootstrapping, or bootloader.
+	 * Turn off PHYs before setup PHY PLL.
+	 */
+	val = gsw->mmd_read(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403);
+	val |= PHY_EN_BYPASS_MODE;
+	val |= POWER_ON_OFF;
+	gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
+
+	mt7531_phy_pll_setup(gsw);
+
+	/* Enable Internal PHYs before phy setting */
+	val = gsw->mmd_read(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403);
+	val |= PHY_EN_BYPASS_MODE;
+	val &= ~POWER_ON_OFF;
+	gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
+
+	mt7531_phy_setting(gsw);
+
+	for (i = 0; i < MT753X_NUM_PHYS; i++) {
+		val = gsw->mii_read(gsw, i, MII_BMCR);
+		val &= ~BMCR_ISOLATE;
+		gsw->mii_write(gsw, i, MII_BMCR, val);
+	}
+
+	for (i = 0; i < MT753X_NUM_PHYS; i++) {
+		mt7531_adjust_line_driving(gsw, i);
+		mt7531_eee_setting(gsw, i);
+	}
+
+	/* Restore internal PHYs normal Tx function after configure stage. */
+	for (i = 0; i < MT753X_NUM_PHYS; i++)
+		gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_141, 0x0);
+
+	mt7531_internal_phy_calibration(gsw);
+
+	return 0;
+}
+
+struct mt753x_sw_id mt7531_id = {
+	.model = MT7531,
+	.detect = mt7531_sw_detect,
+	.init = mt7531_sw_init,
+	.post_init = mt7531_sw_post_init
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhanguo Ju <zhanguo.ju@mediatek.com>");
+MODULE_DESCRIPTION("Driver for MediaTek MT753x Gigabit Switch");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x.h b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x.h
new file mode 100755
index 0000000..732bda1
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#ifndef _MT753X_H_
+#define _MT753X_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/workqueue.h>
+#include <linux/gpio/consumer.h>
+
+#ifdef CONFIG_SWCONFIG
+#include <linux/switch.h>
+#endif
+
+#include "mt753x_vlan.h"
+
+#define MT753X_DFL_CPU_PORT	6
+#define MT753X_NUM_PHYS		5
+
+#define MT753X_DFL_SMI_ADDR	0x1f
+#define MT753X_SMI_ADDR_MASK	0x1f
+
+struct gsw_mt753x;
+
+enum mt753x_model {
+	MT7530 = 0x7530,
+	MT7531 = 0x7531
+};
+
+struct mt753x_port_cfg {
+	struct device_node *np;
+	int phy_mode;
+	u32 enabled: 1;
+	u32 force_link: 1;
+	u32 speed: 2;
+	u32 duplex: 1;
+	bool ssc_on;
+	bool stag_on;
+};
+
+struct mt753x_phy {
+	struct gsw_mt753x *gsw;
+	struct net_device netdev;
+	struct phy_device *phydev;
+};
+
+struct gsw_mt753x {
+	u32 id;
+
+	struct device *dev;
+	struct mii_bus *host_bus;
+	struct mii_bus *gphy_bus;
+	struct mutex mii_lock;	/* MII access lock */
+	u32 smi_addr;
+	u32 phy_base;
+	int direct_phy_access;
+
+	enum mt753x_model model;
+	const char *name;
+
+	struct mt753x_port_cfg port5_cfg;
+	struct mt753x_port_cfg port6_cfg;
+
+	bool hw_phy_cal;
+	bool phy_status_poll;
+	struct mt753x_phy phys[MT753X_NUM_PHYS];
+//	int phy_irqs[PHY_MAX_ADDR]; //FIXME 
+
+	int phy_link_sts;
+
+	int irq;
+	int reset_pin;
+	struct work_struct irq_worker;
+
+#ifdef CONFIG_SWCONFIG
+	struct switch_dev swdev;
+	u32 cpu_port;
+#endif
+
+	int global_vlan_enable;
+	struct mt753x_vlan_entry vlan_entries[MT753X_NUM_VLANS];
+	struct mt753x_port_entry port_entries[MT753X_NUM_PORTS];
+
+	int (*mii_read)(struct gsw_mt753x *gsw, int phy, int reg);
+	void (*mii_write)(struct gsw_mt753x *gsw, int phy, int reg, u16 val);
+
+	int (*mmd_read)(struct gsw_mt753x *gsw, int addr, int devad, u16 reg);
+	void (*mmd_write)(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+			  u16 val);
+
+	struct list_head list;
+};
+
+struct chip_rev {
+	const char *name;
+	u32 rev;
+};
+
+struct mt753x_sw_id {
+	enum mt753x_model model;
+	int (*detect)(struct gsw_mt753x *gsw, struct chip_rev *crev);
+	int (*init)(struct gsw_mt753x *gsw);
+	int (*post_init)(struct gsw_mt753x *gsw);
+};
+
+extern struct list_head mt753x_devs;
+
+struct gsw_mt753x *mt753x_get_gsw(u32 id);
+struct gsw_mt753x *mt753x_get_first_gsw(void);
+void mt753x_put_gsw(void);
+void mt753x_lock_gsw(void);
+
+u32 mt753x_reg_read(struct gsw_mt753x *gsw, u32 reg);
+void mt753x_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val);
+
+int mt753x_mii_read(struct gsw_mt753x *gsw, int phy, int reg);
+void mt753x_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val);
+
+int mt753x_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg);
+void mt753x_mmd_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+		      u16 val);
+
+int mt753x_mmd_ind_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg);
+void mt753x_mmd_ind_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+			  u16 val);
+
+int mt753x_tr_read(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr);
+void mt753x_tr_write(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr,
+		     u32 data);
+
+void mt753x_irq_worker(struct work_struct *work);
+void mt753x_irq_enable(struct gsw_mt753x *gsw);
+
+int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr);
+int extphy_init(struct gsw_mt753x *gsw, int addr);
+
+/* MDIO Indirect Access Registers */
+#define MII_MMD_ACC_CTL_REG		0x0d
+#define MMD_CMD_S			14
+#define MMD_CMD_M			0xc000
+#define MMD_DEVAD_S			0
+#define MMD_DEVAD_M			0x1f
+
+/* MMD_CMD: MMD commands */
+#define MMD_ADDR			0
+#define MMD_DATA			1
+
+#define MII_MMD_ADDR_DATA_REG		0x0e
+
+/* Procedure of MT753x Internal Register Access
+ *
+ * 1. Internal Register Address
+ *
+ *    The MT753x has a 16-bit register address and each register is 32-bit.
+ *    This means the lowest two bits are not used as the register address is
+ *    4-byte aligned.
+ *
+ *    Rest of the valid bits are divided into two parts:
+ *      Bit 15..6 is the Page address
+ *      Bit 5..2 is the low address
+ *
+ *    -------------------------------------------------------------------
+ *    | 15  14  13  12  11  10   9   8   7   6 | 5   4   3   2 | 1   0  |
+ *    |----------------------------------------|---------------|--------|
+ *    |              Page Address              |    Address    | Unused |
+ *    -------------------------------------------------------------------
+ *
+ * 2. MDIO access timing
+ *
+ *    The MT753x uses the following MDIO timing for a single register read
+ *
+ *      Phase 1: Write Page Address
+ *    -------------------------------------------------------------------
+ *    | ST | OP | PHY_ADDR | TYPE | RSVD | TA |  RSVD |    PAGE_ADDR    |
+ *    -------------------------------------------------------------------
+ *    | 01 | 01 |   11111  |   1  | 1111 | xx | 00000 | REG_ADDR[15..6] |
+ *    -------------------------------------------------------------------
+ *
+ *      Phase 2: Write low Address & Read low word
+ *    -------------------------------------------------------------------
+ *    | ST | OP | PHY_ADDR | TYPE |    LOW_ADDR    | TA |      DATA     |
+ *    -------------------------------------------------------------------
+ *    | 01 | 10 |   11111  |   0  | REG_ADDR[5..2] | xx |  DATA[15..0]  |
+ *    -------------------------------------------------------------------
+ *
+ *      Phase 3: Read high word
+ *    -------------------------------------------------------------------
+ *    | ST | OP | PHY_ADDR | TYPE | RSVD | TA |           DATA          |
+ *    -------------------------------------------------------------------
+ *    | 01 | 10 |   11111  |   1  | 0000 | xx |       DATA[31..16]      |
+ *    -------------------------------------------------------------------
+ *
+ *    The MT753x uses the following MDIO timing for a single register write
+ *
+ *      Phase 1: Write Page Address (The same as read)
+ *
+ *      Phase 2: Write low Address and low word
+ *    -------------------------------------------------------------------
+ *    | ST | OP | PHY_ADDR | TYPE |    LOW_ADDR    | TA |      DATA     |
+ *    -------------------------------------------------------------------
+ *    | 01 | 01 |   11111  |   0  | REG_ADDR[5..2] | xx |  DATA[15..0]  |
+ *    -------------------------------------------------------------------
+ *
+ *      Phase 3: write high word
+ *    -------------------------------------------------------------------
+ *    | ST | OP | PHY_ADDR | TYPE | RSVD | TA |           DATA          |
+ *    -------------------------------------------------------------------
+ *    | 01 | 01 |   11111  |   1  | 0000 | xx |       DATA[31..16]      |
+ *    -------------------------------------------------------------------
+ *
+ */
+
+/* Internal Register Address fields */
+#define MT753X_REG_PAGE_ADDR_S		6
+#define MT753X_REG_PAGE_ADDR_M		0xffc0
+#define MT753X_REG_ADDR_S		2
+#define MT753X_REG_ADDR_M		0x3c
+#endif /* _MT753X_H_ */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_mdio.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_mdio.c
new file mode 100755
index 0000000..06a1114
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_mdio.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+#include <linux/hrtimer.h>
+#include <linux/mii.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+#include <linux/of_irq.h>
+#include <linux/phy.h>
+
+#include "mt753x.h"
+#include "mt753x_swconfig.h"
+#include "mt753x_regs.h"
+#include "mt753x_nl.h"
+#include "mt7530.h"
+#include "mt7531.h"
+
+static u32 mt753x_id;
+struct list_head mt753x_devs;
+static DEFINE_MUTEX(mt753x_devs_lock);
+
+static struct mt753x_sw_id *mt753x_sw_ids[] = {
+	&mt7530_id,
+	&mt7531_id,
+};
+
+u32 mt753x_reg_read(struct gsw_mt753x *gsw, u32 reg)
+{
+	u32 high, low;
+
+	mutex_lock(&gsw->host_bus->mdio_lock);
+
+	gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x1f,
+		(reg & MT753X_REG_PAGE_ADDR_M) >> MT753X_REG_PAGE_ADDR_S);
+
+	low = gsw->host_bus->read(gsw->host_bus, gsw->smi_addr,
+		(reg & MT753X_REG_ADDR_M) >> MT753X_REG_ADDR_S);
+
+	high = gsw->host_bus->read(gsw->host_bus, gsw->smi_addr, 0x10);
+
+	mutex_unlock(&gsw->host_bus->mdio_lock);
+
+	return (high << 16) | (low & 0xffff);
+}
+
+void mt753x_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val)
+{
+	mutex_lock(&gsw->host_bus->mdio_lock);
+
+	gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x1f,
+		(reg & MT753X_REG_PAGE_ADDR_M) >> MT753X_REG_PAGE_ADDR_S);
+
+	gsw->host_bus->write(gsw->host_bus, gsw->smi_addr,
+		(reg & MT753X_REG_ADDR_M) >> MT753X_REG_ADDR_S, val & 0xffff);
+
+	gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x10, val >> 16);
+
+	mutex_unlock(&gsw->host_bus->mdio_lock);
+}
+
+/* Indirect MDIO clause 22/45 access */
+static int mt753x_mii_rw(struct gsw_mt753x *gsw, int phy, int reg, u16 data,
+			 u32 cmd, u32 st)
+{
+	ktime_t timeout;
+	u32 val, timeout_us;
+	int ret = 0;
+
+	timeout_us = 100000;
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+	while (1) {
+		val = mt753x_reg_read(gsw, PHY_IAC);
+
+		if ((val & PHY_ACS_ST) == 0)
+			break;
+
+		if (ktime_compare(ktime_get(), timeout) > 0)
+			return -ETIMEDOUT;
+	}
+
+	val = (st << MDIO_ST_S) |
+	      ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
+	      ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
+	      ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
+
+	if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
+		val |= data & MDIO_RW_DATA_M;
+
+	mt753x_reg_write(gsw, PHY_IAC, val | PHY_ACS_ST);
+
+	timeout_us = 100000;
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+	while (1) {
+		val = mt753x_reg_read(gsw, PHY_IAC);
+
+		if ((val & PHY_ACS_ST) == 0)
+			break;
+
+		if (ktime_compare(ktime_get(), timeout) > 0)
+			return -ETIMEDOUT;
+	}
+
+	if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
+		val = mt753x_reg_read(gsw, PHY_IAC);
+		ret = val & MDIO_RW_DATA_M;
+	}
+
+	return ret;
+}
+
+int mt753x_mii_read(struct gsw_mt753x *gsw, int phy, int reg)
+{
+	int val;
+
+	if (phy < MT753X_NUM_PHYS)
+		phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
+
+	mutex_lock(&gsw->mii_lock);
+	val = mt753x_mii_rw(gsw, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
+	mutex_unlock(&gsw->mii_lock);
+
+	return val;
+}
+
+void mt753x_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val)
+{
+	if (phy < MT753X_NUM_PHYS)
+		phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
+
+	mutex_lock(&gsw->mii_lock);
+	mt753x_mii_rw(gsw, phy, reg, val, MDIO_CMD_WRITE, MDIO_ST_C22);
+	mutex_unlock(&gsw->mii_lock);
+}
+
+int mt753x_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg)
+{
+	int val;
+
+	if (addr < MT753X_NUM_PHYS)
+		addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+	mutex_lock(&gsw->mii_lock);
+	mt753x_mii_rw(gsw, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
+	val = mt753x_mii_rw(gsw, addr, devad, 0, MDIO_CMD_READ_C45,
+			    MDIO_ST_C45);
+	mutex_unlock(&gsw->mii_lock);
+
+	return val;
+}
+
+void mt753x_mmd_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+		      u16 val)
+{
+	if (addr < MT753X_NUM_PHYS)
+		addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+	mutex_lock(&gsw->mii_lock);
+	mt753x_mii_rw(gsw, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
+	mt753x_mii_rw(gsw, addr, devad, val, MDIO_CMD_WRITE, MDIO_ST_C45);
+	mutex_unlock(&gsw->mii_lock);
+}
+
+int mt753x_mmd_ind_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg)
+{
+	u16 val;
+
+	if (addr < MT753X_NUM_PHYS)
+		addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+	mutex_lock(&gsw->mii_lock);
+
+	mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
+		      (MMD_ADDR << MMD_CMD_S) |
+		      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
+		      MDIO_CMD_WRITE, MDIO_ST_C22);
+
+	mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, reg,
+		      MDIO_CMD_WRITE, MDIO_ST_C22);
+
+	mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
+		      (MMD_DATA << MMD_CMD_S) |
+		      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
+		      MDIO_CMD_WRITE, MDIO_ST_C22);
+
+	val = mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, 0,
+			    MDIO_CMD_READ, MDIO_ST_C22);
+
+	mutex_unlock(&gsw->mii_lock);
+
+	return val;
+}
+
+void mt753x_mmd_ind_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+			  u16 val)
+{
+	if (addr < MT753X_NUM_PHYS)
+		addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+	mutex_lock(&gsw->mii_lock);
+
+	mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
+		      (MMD_ADDR << MMD_CMD_S) |
+		      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
+		      MDIO_CMD_WRITE, MDIO_ST_C22);
+
+	mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, reg,
+		      MDIO_CMD_WRITE, MDIO_ST_C22);
+
+	mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
+		      (MMD_DATA << MMD_CMD_S) |
+		      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
+		      MDIO_CMD_WRITE, MDIO_ST_C22);
+
+	mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, val,
+		      MDIO_CMD_WRITE, MDIO_ST_C22);
+
+	mutex_unlock(&gsw->mii_lock);
+}
+
+static inline int mt753x_get_duplex(const struct device_node *np)
+{
+	return of_property_read_bool(np, "full-duplex");
+}
+
+static void mt753x_load_port_cfg(struct gsw_mt753x *gsw)
+{
+	struct device_node *port_np;
+	struct device_node *fixed_link_node;
+	struct mt753x_port_cfg *port_cfg;
+	u32 port;
+
+	for_each_child_of_node(gsw->dev->of_node, port_np) {
+		if (!of_device_is_compatible(port_np, "mediatek,mt753x-port"))
+			continue;
+
+		if (!of_device_is_available(port_np))
+			continue;
+
+		if (of_property_read_u32(port_np, "reg", &port))
+			continue;
+
+		switch (port) {
+		case 5:
+			port_cfg = &gsw->port5_cfg;
+			break;
+		case 6:
+			port_cfg = &gsw->port6_cfg;
+			break;
+		default:
+			continue;
+		}
+
+		if (port_cfg->enabled) {
+			dev_info(gsw->dev, "duplicated node for port%d\n",
+				 port_cfg->phy_mode);
+			continue;
+		}
+
+		port_cfg->np = port_np;
+
+		port_cfg->phy_mode = of_get_phy_mode(port_np);
+		if (port_cfg->phy_mode < 0) {
+			dev_info(gsw->dev, "incorrect phy-mode %d\n", port);
+			continue;
+		}
+
+		fixed_link_node = of_get_child_by_name(port_np, "fixed-link");
+		if (fixed_link_node) {
+			u32 speed;
+
+			port_cfg->force_link = 1;
+			port_cfg->duplex = mt753x_get_duplex(fixed_link_node);
+
+			if (of_property_read_u32(fixed_link_node, "speed",
+						 &speed)) {
+				speed = 0;
+				continue;
+			}
+
+			of_node_put(fixed_link_node);
+
+			switch (speed) {
+			case 10:
+				port_cfg->speed = MAC_SPD_10;
+				break;
+			case 100:
+				port_cfg->speed = MAC_SPD_100;
+				break;
+			case 1000:
+				port_cfg->speed = MAC_SPD_1000;
+				break;
+			case 2500:
+				port_cfg->speed = MAC_SPD_2500;
+				break;
+			default:
+				dev_info(gsw->dev, "incorrect speed %d\n",
+					 speed);
+				continue;
+			}
+		}
+
+		port_cfg->ssc_on = of_property_read_bool(port_cfg->np,
+							 "mediatek,ssc-on");
+		port_cfg->stag_on = of_property_read_bool(port_cfg->np,
+							  "mediatek,stag-on");
+		port_cfg->enabled = 1;
+	}
+}
+
+void mt753x_tr_write(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr,
+		     u32 data)
+{
+	ktime_t timeout;
+	u32 timeout_us;
+	u32 val;
+
+	if (addr < MT753X_NUM_PHYS)
+		addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+	gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE);
+
+	val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+	timeout_us = 100000;
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+	while (1) {
+		val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+		if (!!(val & PHY_TR_PKT_XMT_STA))
+			break;
+
+		if (ktime_compare(ktime_get(), timeout) > 0)
+			goto out;
+	}
+
+	gsw->mii_write(gsw, addr, PHY_TR_LOW_DATA, PHY_TR_LOW_VAL(data));
+	gsw->mii_write(gsw, addr, PHY_TR_HIGH_DATA, PHY_TR_HIGH_VAL(data));
+	val = PHY_TR_PKT_XMT_STA | (PHY_TR_WRITE << PHY_TR_WR_S) |
+	      (ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) |
+	      (daddr << PHY_TR_DATA_ADDR_S);
+	gsw->mii_write(gsw, addr, PHY_TR_CTRL, val);
+
+	timeout_us = 100000;
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+	while (1) {
+		val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+		if (!!(val & PHY_TR_PKT_XMT_STA))
+			break;
+
+		if (ktime_compare(ktime_get(), timeout) > 0)
+			goto out;
+	}
+out:
+	gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
+}
+
+int mt753x_tr_read(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr)
+{
+	ktime_t timeout;
+	u32 timeout_us;
+	u32 val;
+	u8 val_h;
+
+	if (addr < MT753X_NUM_PHYS)
+		addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+	gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE);
+
+	val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+	timeout_us = 100000;
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+	while (1) {
+		val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+		if (!!(val & PHY_TR_PKT_XMT_STA))
+			break;
+
+		if (ktime_compare(ktime_get(), timeout) > 0) {
+			gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
+			return -ETIMEDOUT;
+		}
+	}
+
+	val = PHY_TR_PKT_XMT_STA | (PHY_TR_READ << PHY_TR_WR_S) |
+	      (ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) |
+	      (daddr << PHY_TR_DATA_ADDR_S);
+	gsw->mii_write(gsw, addr, PHY_TR_CTRL, val);
+
+	timeout_us = 100000;
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+	while (1) {
+		val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+		if (!!(val & PHY_TR_PKT_XMT_STA))
+			break;
+
+		if (ktime_compare(ktime_get(), timeout) > 0) {
+			gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
+			return -ETIMEDOUT;
+		}
+	}
+
+	val = gsw->mii_read(gsw, addr, PHY_TR_LOW_DATA);
+	val_h = gsw->mii_read(gsw, addr, PHY_TR_HIGH_DATA);
+	val |= (val_h << 16);
+
+	gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
+
+	return val;
+}
+
+static void mt753x_add_gsw(struct gsw_mt753x *gsw)
+{
+	mutex_lock(&mt753x_devs_lock);
+	gsw->id = mt753x_id++;
+	INIT_LIST_HEAD(&gsw->list);
+	list_add_tail(&gsw->list, &mt753x_devs);
+	mutex_unlock(&mt753x_devs_lock);
+}
+
+static void mt753x_remove_gsw(struct gsw_mt753x *gsw)
+{
+	mutex_lock(&mt753x_devs_lock);
+	list_del(&gsw->list);
+	mutex_unlock(&mt753x_devs_lock);
+}
+
+
+struct gsw_mt753x *mt753x_get_gsw(u32 id)
+{
+	struct gsw_mt753x *dev;
+
+	mutex_lock(&mt753x_devs_lock);
+
+	list_for_each_entry(dev, &mt753x_devs, list) {
+		if (dev->id == id)
+			return dev;
+	}
+
+	mutex_unlock(&mt753x_devs_lock);
+
+	return NULL;
+}
+
+struct gsw_mt753x *mt753x_get_first_gsw(void)
+{
+	struct gsw_mt753x *dev;
+
+	mutex_lock(&mt753x_devs_lock);
+
+	list_for_each_entry(dev, &mt753x_devs, list)
+		return dev;
+
+	mutex_unlock(&mt753x_devs_lock);
+
+	return NULL;
+}
+
+void mt753x_put_gsw(void)
+{
+	mutex_unlock(&mt753x_devs_lock);
+}
+
+void mt753x_lock_gsw(void)
+{
+	mutex_lock(&mt753x_devs_lock);
+}
+
+static int mt753x_hw_reset(struct gsw_mt753x *gsw)
+{
+	struct device_node *np = gsw->dev->of_node;
+	struct reset_control *rstc;
+	int mcm;
+	int ret = -EINVAL;
+
+	mcm = of_property_read_bool(np, "mediatek,mcm");
+	if (mcm) {
+		rstc = devm_reset_control_get(gsw->dev, "mcm");
+		ret = IS_ERR(rstc);
+		if (IS_ERR(rstc)) {
+			dev_err(gsw->dev, "Missing reset ctrl of switch\n");
+			return ret;
+		}
+
+		reset_control_assert(rstc);
+		msleep(30);
+		reset_control_deassert(rstc);
+
+		gsw->reset_pin = -1;
+		return 0;
+	}
+
+	gsw->reset_pin = of_get_named_gpio(np, "reset-gpios", 0);
+	if (gsw->reset_pin < 0) {
+		dev_err(gsw->dev, "Missing reset pin of switch\n");
+		return ret;
+	}
+
+	ret = devm_gpio_request(gsw->dev, gsw->reset_pin, "mt753x-reset");
+	if (ret) {
+		dev_info(gsw->dev, "Failed to request gpio %d\n",
+			 gsw->reset_pin);
+		return ret;
+	}
+
+	gpio_direction_output(gsw->reset_pin, 0);
+	msleep(30);
+	gpio_set_value(gsw->reset_pin, 1);
+	msleep(500);
+
+	return 0;
+}
+#if 1 //XDXDXDXD
+static int mt753x_mdio_read(struct mii_bus *bus, int addr, int reg)
+{
+	struct gsw_mt753x *gsw = bus->priv;
+
+	return gsw->mii_read(gsw, addr, reg);
+}
+
+static int mt753x_mdio_write(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+	struct gsw_mt753x *gsw = bus->priv;
+
+	gsw->mii_write(gsw, addr, reg, val);
+
+	return 0;
+}
+
+static const struct net_device_ops mt753x_dummy_netdev_ops = {
+};
+
+static void mt753x_phy_link_handler(struct net_device *dev)
+{
+	struct mt753x_phy *phy = container_of(dev, struct mt753x_phy, netdev);
+	struct phy_device *phydev = phy->phydev;
+	struct gsw_mt753x *gsw = phy->gsw;
+	u32 port = phy - gsw->phys;
+
+	if (phydev->link) {
+		dev_info(gsw->dev,
+			 "Port %d Link is Up - %s/%s - flow control %s\n",
+			 port, phy_speed_to_str(phydev->speed),
+			 (phydev->duplex == DUPLEX_FULL) ? "Full" : "Half",
+			 phydev->pause ? "rx/tx" : "off");
+	} else {
+		dev_info(gsw->dev, "Port %d Link is Down\n", port);
+	}
+}
+
+static void mt753x_connect_internal_phys(struct gsw_mt753x *gsw,
+					 struct device_node *mii_np)
+{
+	struct device_node *phy_np;
+	struct mt753x_phy *phy;
+	int phy_mode;
+	u32 phyad;
+
+	if (!mii_np)
+		return;
+
+	for_each_child_of_node(mii_np, phy_np) {
+		if (of_property_read_u32(phy_np, "reg", &phyad))
+			continue;
+
+		if (phyad >= MT753X_NUM_PHYS)
+			continue;
+
+		phy_mode = of_get_phy_mode(phy_np);
+		if (phy_mode < 0) {
+			dev_info(gsw->dev, "incorrect phy-mode %d for PHY %d\n",
+				 phy_mode, phyad);
+			continue;
+		}
+
+		phy = &gsw->phys[phyad];
+		phy->gsw = gsw;
+
+		init_dummy_netdev(&phy->netdev);
+		phy->netdev.netdev_ops = &mt753x_dummy_netdev_ops;
+
+		phy->phydev = of_phy_connect(&phy->netdev, phy_np,
+					mt753x_phy_link_handler, 0, phy_mode);
+		if (!phy->phydev) {
+			dev_info(gsw->dev, "could not connect to PHY %d\n",
+				 phyad);
+			continue;
+		}
+
+		phy_start(phy->phydev);
+	}
+}
+
+static void mt753x_disconnect_internal_phys(struct gsw_mt753x *gsw)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(gsw->phys); i++) {
+		if (gsw->phys[i].phydev) {
+			phy_stop(gsw->phys[i].phydev);
+			phy_disconnect(gsw->phys[i].phydev);
+			gsw->phys[i].phydev = NULL;
+		}
+	}
+}
+
+static int mt753x_mdio_register(struct gsw_mt753x *gsw)
+{
+	struct device_node *mii_np;
+	int i, ret;
+
+	mii_np = of_get_child_by_name(gsw->dev->of_node, "mdio-bus");
+	if (mii_np && !of_device_is_available(mii_np)) {
+		ret = -ENODEV;
+		goto err_put_node;
+	}
+
+	gsw->gphy_bus = devm_mdiobus_alloc(gsw->dev);
+	if (!gsw->gphy_bus) {
+		ret = -ENOMEM;
+		goto err_put_node;
+	}
+
+	gsw->gphy_bus->name = "mt753x_mdio";
+	gsw->gphy_bus->read = mt753x_mdio_read;
+	gsw->gphy_bus->write = mt753x_mdio_write;
+	gsw->gphy_bus->priv = gsw;
+	gsw->gphy_bus->parent = gsw->dev;
+	gsw->gphy_bus->phy_mask = BIT(MT753X_NUM_PHYS) - 1;
+//	gsw->gphy_bus->irq = gsw->phy_irqs;
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		gsw->gphy_bus->irq[i] = PHY_POLL;
+
+	if (mii_np)
+		snprintf(gsw->gphy_bus->id, MII_BUS_ID_SIZE, "%s@%s",
+			 mii_np->name, gsw->dev->of_node->name);
+	else
+		snprintf(gsw->gphy_bus->id, MII_BUS_ID_SIZE, "mdio@%s",
+			 gsw->dev->of_node->name);
+
+	ret = of_mdiobus_register(gsw->gphy_bus, mii_np);
+
+	if (ret) {
+		devm_mdiobus_free(gsw->dev, gsw->gphy_bus);
+		gsw->gphy_bus = NULL;
+	} else {
+		if (gsw->phy_status_poll)
+			mt753x_connect_internal_phys(gsw, mii_np);
+	}
+
+err_put_node:
+	if (mii_np)
+		of_node_put(mii_np);
+
+	return ret;
+}
+#endif
+
+static irqreturn_t mt753x_irq_handler(int irq, void *dev)
+{
+	struct gsw_mt753x *gsw = dev;
+
+	disable_irq_nosync(gsw->irq);
+
+	schedule_work(&gsw->irq_worker);
+
+	return IRQ_HANDLED;
+}
+
+static int mt753x_probe(struct platform_device *pdev)
+{
+	struct gsw_mt753x *gsw;
+	struct mt753x_sw_id *sw;
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *mdio;
+	struct mii_bus *mdio_bus;
+	int ret = -EINVAL;
+	struct chip_rev rev;
+	struct mt753x_mapping *map;
+	int i;
+
+	mdio = of_parse_phandle(np, "mediatek,mdio", 0);
+	if (!mdio)
+		return -EINVAL;
+
+	mdio_bus = of_mdio_find_bus(mdio);
+	if (!mdio_bus)
+		return -EPROBE_DEFER;
+
+	gsw = devm_kzalloc(&pdev->dev, sizeof(struct gsw_mt753x), GFP_KERNEL);
+	if (!gsw)
+		return -ENOMEM;
+
+	gsw->host_bus = mdio_bus;
+	gsw->dev = &pdev->dev;
+	mutex_init(&gsw->mii_lock);
+
+	/* Switch hard reset */
+	if (mt753x_hw_reset(gsw))
+		goto fail;
+
+	/* Fetch the SMI address dirst */
+	if (of_property_read_u32(np, "mediatek,smi-addr", &gsw->smi_addr))
+		gsw->smi_addr = MT753X_DFL_SMI_ADDR;
+
+	/* Get LAN/WAN port mapping */
+	map = mt753x_find_mapping(np);
+	if (map) {
+		mt753x_apply_mapping(gsw, map);
+		gsw->global_vlan_enable = 1;
+		dev_info(gsw->dev, "LAN/WAN VLAN setting=%s\n", map->name);
+	}
+
+	/* Load MAC port configurations */
+	mt753x_load_port_cfg(gsw);
+
+	/* Check for valid switch and then initialize */
+	for (i = 0; i < ARRAY_SIZE(mt753x_sw_ids); i++) {
+		if (!mt753x_sw_ids[i]->detect(gsw, &rev)) {
+			sw = mt753x_sw_ids[i];
+
+			gsw->name = rev.name;
+			gsw->model = sw->model;
+
+			dev_info(gsw->dev, "Switch is MediaTek %s rev %d",
+				 gsw->name, rev.rev);
+
+			/* Initialize the switch */
+			ret = sw->init(gsw);
+			if (ret)
+				goto fail;
+
+			break;
+		}
+	}
+
+	if (i >= ARRAY_SIZE(mt753x_sw_ids)) {
+		dev_err(gsw->dev, "No mt753x switch found\n");
+		goto fail;
+	}
+
+	gsw->irq = platform_get_irq(pdev, 0);
+	if (gsw->irq >= 0) {
+		ret = devm_request_irq(gsw->dev, gsw->irq, mt753x_irq_handler,
+				       0, dev_name(gsw->dev), gsw);
+		if (ret) {
+			dev_err(gsw->dev, "Failed to request irq %d\n",
+				gsw->irq);
+			goto fail;
+		}
+
+		INIT_WORK(&gsw->irq_worker, mt753x_irq_worker);
+	}
+
+	platform_set_drvdata(pdev, gsw);
+
+	gsw->phy_status_poll = of_property_read_bool(gsw->dev->of_node,
+						     "mediatek,phy-poll");
+
+	mt753x_add_gsw(gsw);
+#if 1 //XDXD
+	mt753x_mdio_register(gsw);
+#endif
+
+	mt753x_swconfig_init(gsw);
+
+	if (sw->post_init)
+		sw->post_init(gsw);
+
+	if (gsw->irq >= 0)
+		mt753x_irq_enable(gsw);
+
+	return 0;
+
+fail:
+	devm_kfree(&pdev->dev, gsw);
+
+	return ret;
+}
+
+static int mt753x_remove(struct platform_device *pdev)
+{
+	struct gsw_mt753x *gsw = platform_get_drvdata(pdev);
+
+	if (gsw->irq >= 0)
+		cancel_work_sync(&gsw->irq_worker);
+
+	if (gsw->reset_pin >= 0)
+		devm_gpio_free(&pdev->dev, gsw->reset_pin);
+
+#ifdef CONFIG_SWCONFIG
+	mt753x_swconfig_destroy(gsw);
+#endif
+
+#if 1 //XDXD
+	mt753x_disconnect_internal_phys(gsw);
+
+	mdiobus_unregister(gsw->gphy_bus);
+#endif
+
+	mt753x_remove_gsw(gsw);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id mt753x_ids[] = {
+	{ .compatible = "mediatek,mt753x" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, mt753x_ids);
+
+static struct platform_driver mt753x_driver = {
+	.probe = mt753x_probe,
+	.remove = mt753x_remove,
+	.driver = {
+		.name = "mt753x",
+		.of_match_table = mt753x_ids,
+	},
+};
+
+static int __init mt753x_init(void)
+{
+	int ret;
+
+	INIT_LIST_HEAD(&mt753x_devs);
+	ret = platform_driver_register(&mt753x_driver);
+
+	mt753x_nl_init();
+
+	return ret;
+}
+module_init(mt753x_init);
+
+static void __exit mt753x_exit(void)
+{
+	mt753x_nl_exit();
+
+	platform_driver_unregister(&mt753x_driver);
+}
+module_exit(mt753x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
+MODULE_DESCRIPTION("Driver for MediaTek MT753x Gigabit Switch");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_nl.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_nl.c
new file mode 100755
index 0000000..a04c701
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_nl.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Sirui Zhao <Sirui.Zhao@mediatek.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/genetlink.h>
+
+#include "mt753x.h"
+#include "mt753x_nl.h"
+
+struct mt753x_nl_cmd_item {
+	enum mt753x_cmd cmd;
+	bool require_dev;
+	int (*process)(struct genl_info *info, struct gsw_mt753x *gsw);
+	u32 nr_required_attrs;
+	const enum mt753x_attr *required_attrs;
+};
+
+static int mt753x_nl_response(struct sk_buff *skb, struct genl_info *info);
+
+static const struct nla_policy mt753x_nl_cmd_policy[] = {
+	[MT753X_ATTR_TYPE_MESG] = { .type = NLA_STRING },
+	[MT753X_ATTR_TYPE_PHY] = { .type = NLA_S32 },
+	[MT753X_ATTR_TYPE_REG] = { .type = NLA_S32 },
+	[MT753X_ATTR_TYPE_VAL] = { .type = NLA_S32 },
+	[MT753X_ATTR_TYPE_DEV_NAME] = { .type = NLA_S32 },
+	[MT753X_ATTR_TYPE_DEV_ID] = { .type = NLA_S32 },
+	[MT753X_ATTR_TYPE_DEVAD] = { .type = NLA_S32 },
+};
+
+static const struct genl_ops mt753x_nl_ops[] = {
+	{
+		.cmd = MT753X_CMD_REQUEST,
+		.doit = mt753x_nl_response,
+//		.policy = mt753x_nl_cmd_policy,
+		.flags = GENL_ADMIN_PERM,
+	}, {
+		.cmd = MT753X_CMD_READ,
+		.doit = mt753x_nl_response,
+//		.policy = mt753x_nl_cmd_policy,
+		.flags = GENL_ADMIN_PERM,
+	}, {
+		.cmd = MT753X_CMD_WRITE,
+		.doit = mt753x_nl_response,
+//		.policy = mt753x_nl_cmd_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
+};
+
+static struct genl_family mt753x_nl_family = {
+	.name =		MT753X_GENL_NAME,
+	.version =	MT753X_GENL_VERSION,
+	.maxattr =	MT753X_NR_ATTR_TYPE,
+	.ops =		mt753x_nl_ops,
+	.n_ops =	ARRAY_SIZE(mt753x_nl_ops),
+	.policy =	mt753x_nl_cmd_policy,
+};
+
+static int mt753x_nl_list_devs(char *buff, int size)
+{
+	struct gsw_mt753x *gsw;
+	int len, total = 0;
+	char buf[80];
+
+	memset(buff, 0, size);
+
+	mt753x_lock_gsw();
+
+	list_for_each_entry(gsw, &mt753x_devs, list) {
+		len = snprintf(buf, sizeof(buf),
+			       "id: %d, model: %s, node: %s\n",
+			       gsw->id, gsw->name, gsw->dev->of_node->name);
+		strncat(buff, buf, size - total);
+		total += len;
+	}
+
+	mt753x_put_gsw();
+
+	return total;
+}
+
+static int mt753x_nl_prepare_reply(struct genl_info *info, u8 cmd,
+				   struct sk_buff **skbp)
+{
+	struct sk_buff *msg;
+	void *reply;
+
+	if (!info)
+		return -EINVAL;
+
+	msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	/* Construct send-back message header */
+	reply = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+			    &mt753x_nl_family, 0, cmd);
+	if (!reply) {
+		nlmsg_free(msg);
+		return -EINVAL;
+	}
+
+	*skbp = msg;
+	return 0;
+}
+
+static int mt753x_nl_send_reply(struct sk_buff *skb, struct genl_info *info)
+{
+	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
+	void *reply = genlmsg_data(genlhdr);
+
+	/* Finalize a generic netlink message (update message header) */
+	genlmsg_end(skb, reply);
+
+	/* reply to a request */
+	return genlmsg_reply(skb, info);
+}
+
+static s32 mt753x_nl_get_s32(struct genl_info *info, enum mt753x_attr attr,
+			     s32 defval)
+{
+	struct nlattr *na;
+
+	na = info->attrs[attr];
+	if (na)
+		return nla_get_s32(na);
+
+	return defval;
+}
+
+static int mt753x_nl_get_u32(struct genl_info *info, enum mt753x_attr attr,
+			     u32 *val)
+{
+	struct nlattr *na;
+
+	na = info->attrs[attr];
+	if (na) {
+		*val = nla_get_u32(na);
+		return 0;
+	}
+
+	return -1;
+}
+
+static struct gsw_mt753x *mt753x_nl_parse_find_gsw(struct genl_info *info)
+{
+	struct gsw_mt753x *gsw;
+	struct nlattr *na;
+	int gsw_id;
+
+	na = info->attrs[MT753X_ATTR_TYPE_DEV_ID];
+	if (na) {
+		gsw_id = nla_get_s32(na);
+		if (gsw_id >= 0)
+			gsw = mt753x_get_gsw(gsw_id);
+		else
+			gsw = mt753x_get_first_gsw();
+	} else {
+		gsw = mt753x_get_first_gsw();
+	}
+
+	return gsw;
+}
+
+static int mt753x_nl_get_swdevs(struct genl_info *info, struct gsw_mt753x *gsw)
+{
+	struct sk_buff *rep_skb = NULL;
+	char dev_info[512];
+	int ret;
+
+	ret = mt753x_nl_list_devs(dev_info, sizeof(dev_info));
+	if (!ret) {
+		pr_info("No switch registered\n");
+		return -EINVAL;
+	}
+
+	ret = mt753x_nl_prepare_reply(info, MT753X_CMD_REPLY, &rep_skb);
+	if (ret < 0)
+		goto err;
+
+	ret = nla_put_string(rep_skb, MT753X_ATTR_TYPE_MESG, dev_info);
+	if (ret < 0)
+		goto err;
+
+	return mt753x_nl_send_reply(rep_skb, info);
+
+err:
+	if (rep_skb)
+		nlmsg_free(rep_skb);
+
+	return ret;
+}
+
+static int mt753x_nl_reply_read(struct genl_info *info, struct gsw_mt753x *gsw)
+{
+	struct sk_buff *rep_skb = NULL;
+	s32 phy, devad, reg;
+	int value;
+	int ret = 0;
+
+	phy = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_PHY, -1);
+	devad = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_DEVAD, -1);
+	reg = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_REG, -1);
+
+	if (reg < 0)
+		goto err;
+
+	ret = mt753x_nl_prepare_reply(info, MT753X_CMD_READ, &rep_skb);
+	if (ret < 0)
+		goto err;
+
+	if (phy >= 0) {
+		if (devad < 0)
+			value = gsw->mii_read(gsw, phy, reg);
+		else
+			value = gsw->mmd_read(gsw, phy, devad, reg);
+	} else {
+		value = mt753x_reg_read(gsw, reg);
+	}
+
+	ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_REG, reg);
+	if (ret < 0)
+		goto err;
+
+	ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_VAL, value);
+	if (ret < 0)
+		goto err;
+
+	return mt753x_nl_send_reply(rep_skb, info);
+
+err:
+	if (rep_skb)
+		nlmsg_free(rep_skb);
+
+	return ret;
+}
+
+static int mt753x_nl_reply_write(struct genl_info *info, struct gsw_mt753x *gsw)
+{
+	struct sk_buff *rep_skb = NULL;
+	s32 phy, devad, reg;
+	u32 value;
+	int ret = 0;
+
+	phy = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_PHY, -1);
+	devad = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_DEVAD, -1);
+	reg = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_REG, -1);
+
+	if (mt753x_nl_get_u32(info, MT753X_ATTR_TYPE_VAL, &value))
+		goto err;
+
+	if (reg < 0)
+		goto err;
+
+	ret = mt753x_nl_prepare_reply(info, MT753X_CMD_WRITE, &rep_skb);
+	if (ret < 0)
+		goto err;
+
+	if (phy >= 0) {
+		if (devad < 0)
+			gsw->mii_write(gsw, phy, reg, value);
+		else
+			gsw->mmd_write(gsw, phy, devad, reg, value);
+	} else {
+		mt753x_reg_write(gsw, reg, value);
+	}
+
+	ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_REG, reg);
+	if (ret < 0)
+		goto err;
+
+	ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_VAL, value);
+	if (ret < 0)
+		goto err;
+
+	return mt753x_nl_send_reply(rep_skb, info);
+
+err:
+	if (rep_skb)
+		nlmsg_free(rep_skb);
+
+	return ret;
+}
+
+static const enum mt753x_attr mt753x_nl_cmd_read_attrs[] = {
+	MT753X_ATTR_TYPE_REG
+};
+
+static const enum mt753x_attr mt753x_nl_cmd_write_attrs[] = {
+	MT753X_ATTR_TYPE_REG,
+	MT753X_ATTR_TYPE_VAL
+};
+
+static const struct mt753x_nl_cmd_item mt753x_nl_cmds[] = {
+	{
+		.cmd = MT753X_CMD_REQUEST,
+		.require_dev = false,
+		.process = mt753x_nl_get_swdevs
+	}, {
+		.cmd = MT753X_CMD_READ,
+		.require_dev = true,
+		.process = mt753x_nl_reply_read,
+		.required_attrs = mt753x_nl_cmd_read_attrs,
+		.nr_required_attrs = ARRAY_SIZE(mt753x_nl_cmd_read_attrs),
+	}, {
+		.cmd = MT753X_CMD_WRITE,
+		.require_dev = true,
+		.process = mt753x_nl_reply_write,
+		.required_attrs = mt753x_nl_cmd_write_attrs,
+		.nr_required_attrs = ARRAY_SIZE(mt753x_nl_cmd_write_attrs),
+	}
+};
+
+static int mt753x_nl_response(struct sk_buff *skb, struct genl_info *info)
+{
+	struct genlmsghdr *hdr = nlmsg_data(info->nlhdr);
+	const struct mt753x_nl_cmd_item *cmditem = NULL;
+	struct gsw_mt753x *gsw = NULL;
+	u32 sat_req_attrs = 0;
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(mt753x_nl_cmds); i++) {
+		if (hdr->cmd == mt753x_nl_cmds[i].cmd) {
+			cmditem = &mt753x_nl_cmds[i];
+			break;
+		}
+	}
+
+	if (!cmditem) {
+		pr_info("mt753x-nl: unknown cmd %u\n", hdr->cmd);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cmditem->nr_required_attrs; i++) {
+		if (info->attrs[cmditem->required_attrs[i]])
+			sat_req_attrs++;
+	}
+
+	if (sat_req_attrs != cmditem->nr_required_attrs) {
+		pr_info("mt753x-nl: missing required attr(s) for cmd %u\n",
+			hdr->cmd);
+		return -EINVAL;
+	}
+
+	if (cmditem->require_dev) {
+		gsw = mt753x_nl_parse_find_gsw(info);
+		if (!gsw) {
+			pr_info("mt753x-nl: failed to find switch dev\n");
+			return -EINVAL;
+		}
+	}
+
+	ret = cmditem->process(info, gsw);
+
+	mt753x_put_gsw();
+
+	return ret;
+}
+
+int __init mt753x_nl_init(void)
+{
+	int ret;
+
+	ret = genl_register_family(&mt753x_nl_family);
+	if (ret) {
+		pr_info("mt753x-nl: genl_register_family_with_ops failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void __exit mt753x_nl_exit(void)
+{
+	genl_unregister_family(&mt753x_nl_family);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_regs.h b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_regs.h
new file mode 100755
index 0000000..1784873
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_regs.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#ifndef _MT753X_REGS_H_
+#define _MT753X_REGS_H_
+
+#include <linux/bitops.h>
+
+/* Values of Egress TAG Control */
+#define ETAG_CTRL_UNTAG			0
+#define ETAG_CTRL_TAG			2
+#define ETAG_CTRL_SWAP			1
+#define ETAG_CTRL_STACK			3
+
+#define VTCR				0x90
+#define VAWD1				0x94
+#define VAWD2				0x98
+
+/* Fields of VTCR */
+#define VTCR_BUSY			BIT(31)
+#define IDX_INVLD			BIT(16)
+#define VTCR_FUNC_S			12
+#define VTCR_FUNC_M			0xf000
+#define VTCR_VID_S			0
+#define VTCR_VID_M			0xfff
+
+/* Values of VTCR_FUNC */
+#define VTCR_READ_VLAN_ENTRY		0
+#define VTCR_WRITE_VLAN_ENTRY		1
+#define VTCR_INVD_VLAN_ENTRY		2
+#define VTCR_ENABLE_VLAN_ENTRY		3
+#define VTCR_READ_ACL_ENTRY		4
+#define VTCR_WRITE_ACL_ENTRY		5
+#define VTCR_READ_TRTCM_TABLE		6
+#define VTCR_WRITE_TRTCM_TABLE		7
+#define VTCR_READ_ACL_MASK_ENTRY	8
+#define VTCR_WRITE_ACL_MASK_ENTRY	9
+#define VTCR_READ_ACL_RULE_ENTRY	10
+#define VTCR_WRITE_ACL_RULE_ENTRY	11
+#define VTCR_READ_ACL_RATE_ENTRY	12
+#define VTCR_WRITE_ACL_RATE_ENTRY	13
+
+/* VLAN entry fields */
+/* VAWD1 */
+#define PORT_STAG			BIT(31)
+#define IVL_MAC				BIT(30)
+#define EG_CON				BIT(29)
+#define VTAG_EN				BIT(28)
+#define COPY_PRI			BIT(27)
+#define USER_PRI_S			24
+#define USER_PRI_M			0x7000000
+#define PORT_MEM_S			16
+#define PORT_MEM_M			0xff0000
+#define S_TAG1_S			4
+#define S_TAG1_M			0xfff0
+#define FID_S				1
+#define FID_M				0x0e
+#define VENTRY_VALID			BIT(0)
+
+/* VAWD2 */
+#define S_TAG2_S			16
+#define S_TAG2_M			0xffff0000
+#define PORT_ETAG_S(p)			((p) * 2)
+#define PORT_ETAG_M			0x03
+
+#define PORT_CTRL_BASE			0x2000
+#define PORT_CTRL_PORT_OFFSET		0x100
+#define PORT_CTRL_REG(p, r)		(PORT_CTRL_BASE + \
+					(p) * PORT_CTRL_PORT_OFFSET +  (r))
+#define CKGCR(p)			PORT_CTRL_REG(p, 0x00)
+#define PCR(p)				PORT_CTRL_REG(p, 0x04)
+#define PIC(p)				PORT_CTRL_REG(p, 0x08)
+#define PSC(p)				PORT_CTRL_REG(p, 0x0c)
+#define PVC(p)				PORT_CTRL_REG(p, 0x10)
+#define PPBV1(p)			PORT_CTRL_REG(p, 0x14)
+#define PPBV2(p)			PORT_CTRL_REG(p, 0x18)
+#define BSR(p)				PORT_CTRL_REG(p, 0x1c)
+#define STAG01				PORT_CTRL_REG(p, 0x20)
+#define STAG23				PORT_CTRL_REG(p, 0x24)
+#define STAG45				PORT_CTRL_REG(p, 0x28)
+#define STAG67				PORT_CTRL_REG(p, 0x2c)
+
+#define PPBV(p, g)			(PPBV1(p) + ((g) / 2) * 4)
+
+/* Fields of PCR */
+#define MLDV2_EN			BIT(30)
+#define EG_TAG_S			28
+#define EG_TAG_M			0x30000000
+#define PORT_PRI_S			24
+#define PORT_PRI_M			0x7000000
+#define PORT_MATRIX_S			16
+#define PORT_MATRIX_M			0xff0000
+#define UP2DSCP_EN			BIT(12)
+#define UP2TAG_EN			BIT(11)
+#define ACL_EN				BIT(10)
+#define PORT_TX_MIR			BIT(9)
+#define PORT_RX_MIR			BIT(8)
+#define ACL_MIR				BIT(7)
+#define MIS_PORT_FW_S			4
+#define MIS_PORT_FW_M			0x70
+#define VLAN_MIS			BIT(2)
+#define PORT_VLAN_S			0
+#define PORT_VLAN_M			0x03
+
+/* Values of PORT_VLAN */
+#define PORT_MATRIX_MODE		0
+#define FALLBACK_MODE			1
+#define CHECK_MODE			2
+#define SECURITY_MODE			3
+
+/* Fields of PVC */
+#define STAG_VPID_S			16
+#define STAG_VPID_M			0xffff0000
+#define DIS_PVID			BIT(15)
+#define FORCE_PVID			BIT(14)
+#define PT_VPM				BIT(12)
+#define PT_OPTION			BIT(11)
+#define PVC_EG_TAG_S			8
+#define PVC_EG_TAG_M			0x700
+#define VLAN_ATTR_S			6
+#define VLAN_ATTR_M			0xc0
+#define PVC_PORT_STAG			BIT(5)
+#define BC_LKYV_EN			BIT(4)
+#define MC_LKYV_EN			BIT(3)
+#define UC_LKYV_EN			BIT(2)
+#define ACC_FRM_S			0
+#define ACC_FRM_M			0x03
+
+/* Values of VLAN_ATTR */
+#define VA_USER_PORT			0
+#define VA_STACK_PORT			1
+#define VA_TRANSLATION_PORT		2
+#define VA_TRANSPARENT_PORT		3
+
+/* Fields of PPBV */
+#define GRP_PORT_PRI_S(g)		(((g) % 2) * 16 + 13)
+#define GRP_PORT_PRI_M			0x07
+#define GRP_PORT_VID_S(g)		(((g) % 2) * 16)
+#define GRP_PORT_VID_M			0xfff
+
+#define PORT_MAC_CTRL_BASE		0x3000
+#define PORT_MAC_CTRL_PORT_OFFSET	0x100
+#define PORT_MAC_CTRL_REG(p, r)		(PORT_MAC_CTRL_BASE + \
+					(p) * PORT_MAC_CTRL_PORT_OFFSET + (r))
+#define PMCR(p)				PORT_MAC_CTRL_REG(p, 0x00)
+#define PMEEECR(p)			PORT_MAC_CTRL_REG(p, 0x04)
+#define PMSR(p)				PORT_MAC_CTRL_REG(p, 0x08)
+#define PINT_EN(p)			PORT_MAC_CTRL_REG(p, 0x10)
+#define PINT_STS(p)			PORT_MAC_CTRL_REG(p, 0x14)
+
+#define GMACCR				(PORT_MAC_CTRL_BASE + 0xe0)
+#define TXCRC_EN			BIT(19)
+#define RXCRC_EN			BIT(18)
+#define PRMBL_LMT_EN			BIT(17)
+#define MTCC_LMT_S			9
+#define MTCC_LMT_M			0x1e00
+#define MAX_RX_JUMBO_S			2
+#define MAX_RX_JUMBO_M			0x3c
+#define MAX_RX_PKT_LEN_S		0
+#define MAX_RX_PKT_LEN_M		0x3
+
+/* Values of MAX_RX_PKT_LEN */
+#define RX_PKT_LEN_1518			0
+#define RX_PKT_LEN_1536			1
+#define RX_PKT_LEN_1522			2
+#define RX_PKT_LEN_MAX_JUMBO		3
+
+/* Fields of PMCR */
+#define IPG_CFG_S			18
+#define IPG_CFG_M			0xc0000
+#define EXT_PHY				BIT(17)
+#define MAC_MODE			BIT(16)
+#define MAC_TX_EN			BIT(14)
+#define MAC_RX_EN			BIT(13)
+#define MAC_PRE				BIT(11)
+#define BKOFF_EN			BIT(9)
+#define BACKPR_EN			BIT(8)
+#define FORCE_EEE1G			BIT(7)
+#define FORCE_EEE1000			BIT(6)
+#define FORCE_RX_FC			BIT(5)
+#define FORCE_TX_FC			BIT(4)
+#define FORCE_SPD_S			2
+#define FORCE_SPD_M			0x0c
+#define FORCE_DPX			BIT(1)
+#define FORCE_LINK			BIT(0)
+
+/* Fields of PMSR */
+#define EEE1G_STS			BIT(7)
+#define EEE100_STS			BIT(6)
+#define RX_FC_STS			BIT(5)
+#define TX_FC_STS			BIT(4)
+#define MAC_SPD_STS_S			2
+#define MAC_SPD_STS_M			0x0c
+#define MAC_DPX_STS			BIT(1)
+#define MAC_LNK_STS			BIT(0)
+
+/* Values of MAC_SPD_STS */
+#define MAC_SPD_10			0
+#define MAC_SPD_100			1
+#define MAC_SPD_1000			2
+#define MAC_SPD_2500			3
+
+/* Values of IPG_CFG */
+#define IPG_96BIT			0
+#define IPG_96BIT_WITH_SHORT_IPG	1
+#define IPG_64BIT			2
+
+#define MIB_COUNTER_BASE		0x4000
+#define MIB_COUNTER_PORT_OFFSET		0x100
+#define MIB_COUNTER_REG(p, r)		(MIB_COUNTER_BASE + \
+					(p) * MIB_COUNTER_PORT_OFFSET + (r))
+#define STATS_TDPC			0x00
+#define STATS_TCRC			0x04
+#define STATS_TUPC			0x08
+#define STATS_TMPC			0x0C
+#define STATS_TBPC			0x10
+#define STATS_TCEC			0x14
+#define STATS_TSCEC			0x18
+#define STATS_TMCEC			0x1C
+#define STATS_TDEC			0x20
+#define STATS_TLCEC			0x24
+#define STATS_TXCEC			0x28
+#define STATS_TPPC			0x2C
+#define STATS_TL64PC			0x30
+#define STATS_TL65PC			0x34
+#define STATS_TL128PC			0x38
+#define STATS_TL256PC			0x3C
+#define STATS_TL512PC			0x40
+#define STATS_TL1024PC			0x44
+#define STATS_TOC			0x48
+#define STATS_RDPC			0x60
+#define STATS_RFPC			0x64
+#define STATS_RUPC			0x68
+#define STATS_RMPC			0x6C
+#define STATS_RBPC			0x70
+#define STATS_RAEPC			0x74
+#define STATS_RCEPC			0x78
+#define STATS_RUSPC			0x7C
+#define STATS_RFEPC			0x80
+#define STATS_ROSPC			0x84
+#define STATS_RJEPC			0x88
+#define STATS_RPPC			0x8C
+#define STATS_RL64PC			0x90
+#define STATS_RL65PC			0x94
+#define STATS_RL128PC			0x98
+#define STATS_RL256PC			0x9C
+#define STATS_RL512PC			0xA0
+#define STATS_RL1024PC			0xA4
+#define STATS_ROC			0xA8
+#define STATS_RDPC_CTRL			0xB0
+#define STATS_RDPC_ING			0xB4
+#define STATS_RDPC_ARL			0xB8
+
+#define SYS_CTRL			0x7000
+#define SW_PHY_RST			BIT(2)
+#define SW_SYS_RST			BIT(1)
+#define SW_REG_RST			BIT(0)
+
+#define SYS_INT_EN			0x7008
+#define SYS_INT_STS			0x700c
+#define MAC_PC_INT			BIT(16)
+#define PHY_INT(p)			BIT((p) + 8)
+#define PHY_LC_INT(p)			BIT(p)
+
+#define PHY_IAC				0x701c
+#define PHY_ACS_ST			BIT(31)
+#define MDIO_REG_ADDR_S			25
+#define MDIO_REG_ADDR_M			0x3e000000
+#define MDIO_PHY_ADDR_S			20
+#define MDIO_PHY_ADDR_M			0x1f00000
+#define MDIO_CMD_S			18
+#define MDIO_CMD_M			0xc0000
+#define MDIO_ST_S			16
+#define MDIO_ST_M			0x30000
+#define MDIO_RW_DATA_S			0
+#define MDIO_RW_DATA_M			0xffff
+
+/* MDIO_CMD: MDIO commands */
+#define MDIO_CMD_ADDR			0
+#define MDIO_CMD_WRITE			1
+#define MDIO_CMD_READ			2
+#define MDIO_CMD_READ_C45		3
+
+/* MDIO_ST: MDIO start field */
+#define MDIO_ST_C45			0
+#define MDIO_ST_C22			1
+
+#define HWSTRAP				0x7800
+#define MHWSTRAP			0x7804
+
+/* Internal GPHY Page Control Register */
+#define PHY_CL22_PAGE_CTRL		0x1f
+#define PHY_TR_PAGE			0x52b5
+
+/* Internal GPHY Token Ring Access Registers */
+#define PHY_TR_CTRL			0x10
+#define PHY_TR_LOW_DATA			0x11
+#define PHY_TR_HIGH_DATA		0x12
+
+/* Fields of PHY_TR_CTRL */
+#define PHY_TR_PKT_XMT_STA		BIT(15)
+#define PHY_TR_WR_S			13
+#define PHY_TR_CH_ADDR_S		11
+#define PHY_TR_NODE_ADDR_S		7
+#define PHY_TR_DATA_ADDR_S		1
+
+enum phy_tr_wr {
+	PHY_TR_WRITE = 0,
+	PHY_TR_READ = 1,
+};
+
+/* Helper macro for GPHY Token Ring Access */
+#define PHY_TR_LOW_VAL(x)		((x) & 0xffff)
+#define PHY_TR_HIGH_VAL(x)		(((x) & 0xff0000) >> 16)
+
+/* Token Ring Channels */
+#define PMA_CH				0x1
+#define DSP_CH				0x2
+
+/* Token Ring Nodes */
+#define PMA_NOD				0xf
+#define DSP_NOD				0xd
+
+/* Token Ring register range */
+enum tr_pma_reg_addr {
+	PMA_MIN = 0x0,
+	PMA_01  = 0x1,
+	PMA_17  = 0x17,
+	PMA_18  = 0x18,
+	PMA_MAX = 0x3d,
+};
+
+enum tr_dsp_reg_addr {
+	DSP_MIN = 0x0,
+	DSP_06  = 0x6,
+	DSP_08  = 0x8,
+	DSP_0f  = 0xf,
+	DSP_10  = 0x10,
+	DSP_MAX = 0x3e,
+};
+#endif /* _MT753X_REGS_H_ */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c
new file mode 100755
index 0000000..7a05952
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include <linux/if.h>
+#include <linux/list.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/bitops.h>
+#include <net/genetlink.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
+#include <linux/workqueue.h>
+#include <linux/of_device.h>
+
+#include "mt753x.h"
+#include "mt753x_swconfig.h"
+#include "mt753x_regs.h"
+
+#define MT753X_PORT_MIB_TXB_ID	18	/* TxByte */
+#define MT753X_PORT_MIB_RXB_ID	37	/* RxByte */
+
+#define MIB_DESC(_s, _o, _n)   \
+	{                       \
+		.size = (_s),   \
+		.offset = (_o), \
+		.name = (_n),   \
+	}
+
+struct mt753x_mib_desc {
+	unsigned int size;
+	unsigned int offset;
+	const char *name;
+};
+
+static const struct mt753x_mib_desc mt753x_mibs[] = {
+	MIB_DESC(1, STATS_TDPC, "TxDrop"),
+	MIB_DESC(1, STATS_TCRC, "TxCRC"),
+	MIB_DESC(1, STATS_TUPC, "TxUni"),
+	MIB_DESC(1, STATS_TMPC, "TxMulti"),
+	MIB_DESC(1, STATS_TBPC, "TxBroad"),
+	MIB_DESC(1, STATS_TCEC, "TxCollision"),
+	MIB_DESC(1, STATS_TSCEC, "TxSingleCol"),
+	MIB_DESC(1, STATS_TMCEC, "TxMultiCol"),
+	MIB_DESC(1, STATS_TDEC, "TxDefer"),
+	MIB_DESC(1, STATS_TLCEC, "TxLateCol"),
+	MIB_DESC(1, STATS_TXCEC, "TxExcCol"),
+	MIB_DESC(1, STATS_TPPC, "TxPause"),
+	MIB_DESC(1, STATS_TL64PC, "Tx64Byte"),
+	MIB_DESC(1, STATS_TL65PC, "Tx65Byte"),
+	MIB_DESC(1, STATS_TL128PC, "Tx128Byte"),
+	MIB_DESC(1, STATS_TL256PC, "Tx256Byte"),
+	MIB_DESC(1, STATS_TL512PC, "Tx512Byte"),
+	MIB_DESC(1, STATS_TL1024PC, "Tx1024Byte"),
+	MIB_DESC(2, STATS_TOC, "TxByte"),
+	MIB_DESC(1, STATS_RDPC, "RxDrop"),
+	MIB_DESC(1, STATS_RFPC, "RxFiltered"),
+	MIB_DESC(1, STATS_RUPC, "RxUni"),
+	MIB_DESC(1, STATS_RMPC, "RxMulti"),
+	MIB_DESC(1, STATS_RBPC, "RxBroad"),
+	MIB_DESC(1, STATS_RAEPC, "RxAlignErr"),
+	MIB_DESC(1, STATS_RCEPC, "RxCRC"),
+	MIB_DESC(1, STATS_RUSPC, "RxUnderSize"),
+	MIB_DESC(1, STATS_RFEPC, "RxFragment"),
+	MIB_DESC(1, STATS_ROSPC, "RxOverSize"),
+	MIB_DESC(1, STATS_RJEPC, "RxJabber"),
+	MIB_DESC(1, STATS_RPPC, "RxPause"),
+	MIB_DESC(1, STATS_RL64PC, "Rx64Byte"),
+	MIB_DESC(1, STATS_RL65PC, "Rx65Byte"),
+	MIB_DESC(1, STATS_RL128PC, "Rx128Byte"),
+	MIB_DESC(1, STATS_RL256PC, "Rx256Byte"),
+	MIB_DESC(1, STATS_RL512PC, "Rx512Byte"),
+	MIB_DESC(1, STATS_RL1024PC, "Rx1024Byte"),
+	MIB_DESC(2, STATS_ROC, "RxByte"),
+	MIB_DESC(1, STATS_RDPC_CTRL, "RxCtrlDrop"),
+	MIB_DESC(1, STATS_RDPC_ING, "RxIngDrop"),
+	MIB_DESC(1, STATS_RDPC_ARL, "RxARLDrop")
+};
+
+enum {
+	/* Global attributes. */
+	MT753X_ATTR_ENABLE_VLAN,
+};
+
+static int mt753x_get_vlan_enable(struct switch_dev *dev,
+				  const struct switch_attr *attr,
+				  struct switch_val *val)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+	val->value.i = gsw->global_vlan_enable;
+
+	return 0;
+}
+
+static int mt753x_set_vlan_enable(struct switch_dev *dev,
+				  const struct switch_attr *attr,
+				  struct switch_val *val)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+	gsw->global_vlan_enable = val->value.i != 0;
+
+	return 0;
+}
+
+static int mt753x_get_port_pvid(struct switch_dev *dev, int port, int *val)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+	if (port >= MT753X_NUM_PORTS)
+		return -EINVAL;
+
+	*val = mt753x_reg_read(gsw, PPBV1(port));
+	*val &= GRP_PORT_VID_M;
+
+	return 0;
+}
+
+static int mt753x_set_port_pvid(struct switch_dev *dev, int port, int pvid)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+	if (port >= MT753X_NUM_PORTS)
+		return -EINVAL;
+
+	if (pvid < MT753X_MIN_VID || pvid > MT753X_MAX_VID)
+		return -EINVAL;
+
+	gsw->port_entries[port].pvid = pvid;
+
+	return 0;
+}
+
+static int mt753x_get_vlan_ports(struct switch_dev *dev, struct switch_val *val)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+	u32 member;
+	u32 etags;
+	int i;
+
+	val->len = 0;
+
+	if (val->port_vlan < 0 || val->port_vlan >= MT753X_NUM_VLANS)
+		return -EINVAL;
+
+	mt753x_vlan_ctrl(gsw, VTCR_READ_VLAN_ENTRY, val->port_vlan);
+
+	member = mt753x_reg_read(gsw, VAWD1);
+	member &= PORT_MEM_M;
+	member >>= PORT_MEM_S;
+
+	etags = mt753x_reg_read(gsw, VAWD2);
+
+	for (i = 0; i < MT753X_NUM_PORTS; i++) {
+		struct switch_port *p;
+		int etag;
+
+		if (!(member & BIT(i)))
+			continue;
+
+		p = &val->value.ports[val->len++];
+		p->id = i;
+
+		etag = (etags >> PORT_ETAG_S(i)) & PORT_ETAG_M;
+
+		if (etag == ETAG_CTRL_TAG)
+			p->flags |= BIT(SWITCH_PORT_FLAG_TAGGED);
+		else if (etag != ETAG_CTRL_UNTAG)
+			dev_info(gsw->dev,
+				 "vlan egress tag control neither untag nor tag.\n");
+	}
+
+	return 0;
+}
+
+static int mt753x_set_vlan_ports(struct switch_dev *dev, struct switch_val *val)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+	u8 member = 0;
+	u8 etags = 0;
+	int i;
+
+	if (val->port_vlan < 0 || val->port_vlan >= MT753X_NUM_VLANS ||
+	    val->len > MT753X_NUM_PORTS)
+		return -EINVAL;
+
+	for (i = 0; i < val->len; i++) {
+		struct switch_port *p = &val->value.ports[i];
+
+		if (p->id >= MT753X_NUM_PORTS)
+			return -EINVAL;
+
+		member |= BIT(p->id);
+
+		if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED))
+			etags |= BIT(p->id);
+	}
+
+	gsw->vlan_entries[val->port_vlan].member = member;
+	gsw->vlan_entries[val->port_vlan].etags = etags;
+
+	return 0;
+}
+
+static int mt753x_set_vid(struct switch_dev *dev,
+			  const struct switch_attr *attr,
+			  struct switch_val *val)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+	int vlan;
+	u16 vid;
+
+	vlan = val->port_vlan;
+	vid = (u16)val->value.i;
+
+	if (vlan < 0 || vlan >= MT753X_NUM_VLANS)
+		return -EINVAL;
+
+	if (vid < MT753X_MIN_VID || vid > MT753X_MAX_VID)
+		return -EINVAL;
+
+	gsw->vlan_entries[vlan].vid = vid;
+	return 0;
+}
+
+static int mt753x_get_vid(struct switch_dev *dev,
+			  const struct switch_attr *attr,
+			  struct switch_val *val)
+{
+	val->value.i = val->port_vlan;
+	return 0;
+}
+
+static int mt753x_get_port_link(struct switch_dev *dev, int port,
+				struct switch_port_link *link)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+	u32 speed, pmsr;
+
+	if (port < 0 || port >= MT753X_NUM_PORTS)
+		return -EINVAL;
+
+	pmsr = mt753x_reg_read(gsw, PMSR(port));
+
+	link->link = pmsr & MAC_LNK_STS;
+	link->duplex = pmsr & MAC_DPX_STS;
+	speed = (pmsr & MAC_SPD_STS_M) >> MAC_SPD_STS_S;
+
+	switch (speed) {
+	case MAC_SPD_10:
+		link->speed = SWITCH_PORT_SPEED_10;
+		break;
+	case MAC_SPD_100:
+		link->speed = SWITCH_PORT_SPEED_100;
+		break;
+	case MAC_SPD_1000:
+		link->speed = SWITCH_PORT_SPEED_1000;
+		break;
+	case MAC_SPD_2500:
+		/* TODO: swconfig has no support for 2500 now */
+		link->speed = SWITCH_PORT_SPEED_UNKNOWN;
+		break;
+	}
+
+	return 0;
+}
+
+static int mt753x_set_port_link(struct switch_dev *dev, int port,
+				struct switch_port_link *link)
+{
+#ifndef MODULE
+	if (port >= MT753X_NUM_PHYS)
+		return -EINVAL;
+
+	return switch_generic_set_link(dev, port, link);
+#else
+	return -ENOTSUPP;
+#endif
+}
+
+static u64 get_mib_counter(struct gsw_mt753x *gsw, int i, int port)
+{
+	unsigned int offset;
+	u64 lo, hi, hi2;
+
+	offset = mt753x_mibs[i].offset;
+
+	if (mt753x_mibs[i].size == 1)
+		return mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset));
+
+	do {
+		hi = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset + 4));
+		lo = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset));
+		hi2 = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset + 4));
+	} while (hi2 != hi);
+
+	return (hi << 32) | lo;
+}
+
+static int mt753x_get_port_mib(struct switch_dev *dev,
+			       const struct switch_attr *attr,
+			       struct switch_val *val)
+{
+	static char buf[4096];
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+	int i, len = 0;
+
+	if (val->port_vlan >= MT753X_NUM_PORTS)
+		return -EINVAL;
+
+	len += snprintf(buf + len, sizeof(buf) - len,
+			"Port %d MIB counters\n", val->port_vlan);
+
+	for (i = 0; i < ARRAY_SIZE(mt753x_mibs); ++i) {
+		u64 counter;
+
+		len += snprintf(buf + len, sizeof(buf) - len,
+				"%-11s: ", mt753x_mibs[i].name);
+		counter = get_mib_counter(gsw, i, val->port_vlan);
+		len += snprintf(buf + len, sizeof(buf) - len, "%llu\n",
+				counter);
+	}
+
+	val->value.s = buf;
+	val->len = len;
+	return 0;
+}
+
+static int mt753x_get_port_stats(struct switch_dev *dev, int port,
+				 struct switch_port_stats *stats)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+	if (port < 0 || port >= MT753X_NUM_PORTS)
+		return -EINVAL;
+
+	stats->tx_bytes = get_mib_counter(gsw, MT753X_PORT_MIB_TXB_ID, port);
+	stats->rx_bytes = get_mib_counter(gsw, MT753X_PORT_MIB_RXB_ID, port);
+
+	return 0;
+}
+
+static void mt753x_port_isolation(struct gsw_mt753x *gsw)
+{
+	int i;
+
+	for (i = 0; i < MT753X_NUM_PORTS; i++)
+		mt753x_reg_write(gsw, PCR(i),
+				 BIT(gsw->cpu_port) << PORT_MATRIX_S);
+
+	mt753x_reg_write(gsw, PCR(gsw->cpu_port), PORT_MATRIX_M);
+
+	for (i = 0; i < MT753X_NUM_PORTS; i++) {
+		u32 pvc_mode = 0x8100 << STAG_VPID_S;
+
+		if ((gsw->port5_cfg.stag_on && i == 5) ||
+		    (gsw->port6_cfg.stag_on && i == 6))
+			pvc_mode |= PVC_PORT_STAG;
+		else
+			pvc_mode |= (VA_TRANSPARENT_PORT << VLAN_ATTR_S);
+
+		mt753x_reg_write(gsw, PVC(i), pvc_mode);
+	}
+}
+
+static int mt753x_apply_config(struct switch_dev *dev)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+	if (!gsw->global_vlan_enable) {
+		mt753x_port_isolation(gsw);
+		return 0;
+	}
+
+	mt753x_apply_vlan_config(gsw);
+
+	return 0;
+}
+
+static int mt753x_reset_switch(struct switch_dev *dev)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+	int i;
+
+	memset(gsw->port_entries, 0, sizeof(gsw->port_entries));
+	memset(gsw->vlan_entries, 0, sizeof(gsw->vlan_entries));
+
+	/* set default vid of each vlan to the same number of vlan, so the vid
+	 * won't need be set explicitly.
+	 */
+	for (i = 0; i < MT753X_NUM_VLANS; i++)
+		gsw->vlan_entries[i].vid = i;
+
+	return 0;
+}
+
+static int mt753x_phy_read16(struct switch_dev *dev, int addr, u8 reg,
+			     u16 *value)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+	*value = gsw->mii_read(gsw, addr, reg);
+
+	return 0;
+}
+
+static int mt753x_phy_write16(struct switch_dev *dev, int addr, u8 reg,
+			      u16 value)
+{
+	struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+	gsw->mii_write(gsw, addr, reg, value);
+
+	return 0;
+}
+
+static const struct switch_attr mt753x_global[] = {
+	{
+		.type = SWITCH_TYPE_INT,
+		.name = "enable_vlan",
+		.description = "VLAN mode (1:enabled)",
+		.max = 1,
+		.id = MT753X_ATTR_ENABLE_VLAN,
+		.get = mt753x_get_vlan_enable,
+		.set = mt753x_set_vlan_enable,
+	}
+};
+
+static const struct switch_attr mt753x_port[] = {
+	{
+		.type = SWITCH_TYPE_STRING,
+		.name = "mib",
+		.description = "Get MIB counters for port",
+		.get = mt753x_get_port_mib,
+		.set = NULL,
+	},
+};
+
+static const struct switch_attr mt753x_vlan[] = {
+	{
+		.type = SWITCH_TYPE_INT,
+		.name = "vid",
+		.description = "VLAN ID (0-4094)",
+		.set = mt753x_set_vid,
+		.get = mt753x_get_vid,
+		.max = 4094,
+	},
+};
+
+static const struct switch_dev_ops mt753x_swdev_ops = {
+	.attr_global = {
+		.attr = mt753x_global,
+		.n_attr = ARRAY_SIZE(mt753x_global),
+	},
+	.attr_port = {
+		.attr = mt753x_port,
+		.n_attr = ARRAY_SIZE(mt753x_port),
+	},
+	.attr_vlan = {
+		.attr = mt753x_vlan,
+		.n_attr = ARRAY_SIZE(mt753x_vlan),
+	},
+	.get_vlan_ports = mt753x_get_vlan_ports,
+	.set_vlan_ports = mt753x_set_vlan_ports,
+	.get_port_pvid = mt753x_get_port_pvid,
+	.set_port_pvid = mt753x_set_port_pvid,
+	.get_port_link = mt753x_get_port_link,
+	.set_port_link = mt753x_set_port_link,
+	.get_port_stats = mt753x_get_port_stats,
+	.apply_config = mt753x_apply_config,
+	.reset_switch = mt753x_reset_switch,
+	.phy_read16 = mt753x_phy_read16,
+	.phy_write16 = mt753x_phy_write16,
+};
+
+int mt753x_swconfig_init(struct gsw_mt753x *gsw)
+{
+	struct device_node *np = gsw->dev->of_node;
+	struct switch_dev *swdev;
+	int ret;
+
+	if (of_property_read_u32(np, "mediatek,cpuport", &gsw->cpu_port))
+		gsw->cpu_port = MT753X_DFL_CPU_PORT;
+
+	swdev = &gsw->swdev;
+
+	swdev->name = gsw->name;
+	swdev->alias = gsw->name;
+	swdev->cpu_port = gsw->cpu_port;
+	swdev->ports = MT753X_NUM_PORTS;
+	swdev->vlans = MT753X_NUM_VLANS;
+	swdev->ops = &mt753x_swdev_ops;
+
+	ret = register_switch(swdev, NULL);
+	if (ret) {
+		dev_notice(gsw->dev, "Failed to register switch %s\n",
+			   swdev->name);
+		return ret;
+	}
+
+	mt753x_apply_config(swdev);
+
+	return 0;
+}
+
+void mt753x_swconfig_destroy(struct gsw_mt753x *gsw)
+{
+	unregister_switch(&gsw->swdev);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_vlan.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_vlan.c
new file mode 100755
index 0000000..c806566
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_vlan.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ */
+
+#include "mt753x.h"
+#include "mt753x_regs.h"
+
+struct mt753x_mapping mt753x_def_mapping[] = {
+	{
+		.name = "llllw",
+		.pvids = { 1, 1, 1, 1, 2, 2, 1 },
+		.members = { 0, 0x4f, 0x30 },
+		.etags = { 0, 0, 0 },
+		.vids = { 0, 1, 2 },
+	}, {
+		.name = "wllll",
+		.pvids = { 2, 1, 1, 1, 1, 2, 1 },
+		.members = { 0, 0x5e, 0x21 },
+		.etags = { 0, 0, 0 },
+		.vids = { 0, 1, 2 },
+	}, {
+		.name = "lwlll",
+		.pvids = { 1, 2, 1, 1, 1, 2, 1 },
+		.members = { 0, 0x5d, 0x22 },
+		.etags = { 0, 0, 0 },
+		.vids = { 0, 1, 2 },
+	},
+};
+
+void mt753x_vlan_ctrl(struct gsw_mt753x *gsw, u32 cmd, u32 val)
+{
+	int i;
+
+	mt753x_reg_write(gsw, VTCR,
+			 VTCR_BUSY | ((cmd << VTCR_FUNC_S) & VTCR_FUNC_M) |
+			 (val & VTCR_VID_M));
+
+	for (i = 0; i < 300; i++) {
+		u32 val = mt753x_reg_read(gsw, VTCR);
+
+		if ((val & VTCR_BUSY) == 0)
+			break;
+
+		usleep_range(1000, 1100);
+	}
+
+	if (i == 300)
+		dev_info(gsw->dev, "vtcr timeout\n");
+}
+
+static void mt753x_write_vlan_entry(struct gsw_mt753x *gsw, int vlan, u16 vid,
+				    u8 ports, u8 etags)
+{
+	int port;
+	u32 val;
+
+	/* vlan port membership */
+	if (ports)
+		mt753x_reg_write(gsw, VAWD1,
+				 IVL_MAC | VTAG_EN | VENTRY_VALID |
+				 ((ports << PORT_MEM_S) & PORT_MEM_M));
+	else
+		mt753x_reg_write(gsw, VAWD1, 0);
+
+	/* egress mode */
+	val = 0;
+	for (port = 0; port < MT753X_NUM_PORTS; port++) {
+		if (etags & BIT(port))
+			val |= ETAG_CTRL_TAG << PORT_ETAG_S(port);
+		else
+			val |= ETAG_CTRL_UNTAG << PORT_ETAG_S(port);
+	}
+	mt753x_reg_write(gsw, VAWD2, val);
+
+	/* write to vlan table */
+	mt753x_vlan_ctrl(gsw, VTCR_WRITE_VLAN_ENTRY, vid);
+}
+
+void mt753x_apply_vlan_config(struct gsw_mt753x *gsw)
+{
+	int i, j;
+	u8 tag_ports;
+	u8 untag_ports;
+
+	/* set all ports as security mode */
+	for (i = 0; i < MT753X_NUM_PORTS; i++)
+		mt753x_reg_write(gsw, PCR(i),
+				 PORT_MATRIX_M | SECURITY_MODE);
+
+	/* check if a port is used in tag/untag vlan egress mode */
+	tag_ports = 0;
+	untag_ports = 0;
+
+	for (i = 0; i < MT753X_NUM_VLANS; i++) {
+		u8 member = gsw->vlan_entries[i].member;
+		u8 etags = gsw->vlan_entries[i].etags;
+
+		if (!member)
+			continue;
+
+		for (j = 0; j < MT753X_NUM_PORTS; j++) {
+			if (!(member & BIT(j)))
+				continue;
+
+			if (etags & BIT(j))
+				tag_ports |= 1u << j;
+			else
+				untag_ports |= 1u << j;
+		}
+	}
+
+	/* set all untag-only ports as transparent and the rest as user port */
+	for (i = 0; i < MT753X_NUM_PORTS; i++) {
+		u32 pvc_mode = 0x8100 << STAG_VPID_S;
+
+		if (untag_ports & BIT(i) && !(tag_ports & BIT(i)))
+			pvc_mode = (0x8100 << STAG_VPID_S) |
+				(VA_TRANSPARENT_PORT << VLAN_ATTR_S);
+
+		if ((gsw->port5_cfg.stag_on && i == 5) ||
+		    (gsw->port6_cfg.stag_on && i == 6))
+			pvc_mode = (0x8100 << STAG_VPID_S) | PVC_PORT_STAG;
+
+		mt753x_reg_write(gsw, PVC(i), pvc_mode);
+	}
+
+	/* first clear the switch vlan table */
+	for (i = 0; i < MT753X_NUM_VLANS; i++)
+		mt753x_write_vlan_entry(gsw, i, i, 0, 0);
+
+	/* now program only vlans with members to avoid
+	 * clobbering remapped entries in later iterations
+	 */
+	for (i = 0; i < MT753X_NUM_VLANS; i++) {
+		u16 vid = gsw->vlan_entries[i].vid;
+		u8 member = gsw->vlan_entries[i].member;
+		u8 etags = gsw->vlan_entries[i].etags;
+
+		if (member)
+			mt753x_write_vlan_entry(gsw, i, vid, member, etags);
+	}
+
+	/* Port Default PVID */
+	for (i = 0; i < MT753X_NUM_PORTS; i++) {
+		int vlan = gsw->port_entries[i].pvid;
+		u16 pvid = 0;
+		u32 val;
+
+		if (vlan < MT753X_NUM_VLANS && gsw->vlan_entries[vlan].member)
+			pvid = gsw->vlan_entries[vlan].vid;
+
+		val = mt753x_reg_read(gsw, PPBV1(i));
+		val &= ~GRP_PORT_VID_M;
+		val |= pvid;
+		mt753x_reg_write(gsw, PPBV1(i), val);
+	}
+}
+
+struct mt753x_mapping *mt753x_find_mapping(struct device_node *np)
+{
+	const char *map;
+	int i;
+
+	if (of_property_read_string(np, "mediatek,portmap", &map))
+		return NULL;
+
+	for (i = 0; i < ARRAY_SIZE(mt753x_def_mapping); i++)
+		if (!strcmp(map, mt753x_def_mapping[i].name))
+			return &mt753x_def_mapping[i];
+
+	return NULL;
+}
+
+void mt753x_apply_mapping(struct gsw_mt753x *gsw, struct mt753x_mapping *map)
+{
+	int i = 0;
+
+	for (i = 0; i < MT753X_NUM_PORTS; i++)
+		gsw->port_entries[i].pvid = map->pvids[i];
+
+	for (i = 0; i < MT753X_NUM_VLANS; i++) {
+		gsw->vlan_entries[i].member = map->members[i];
+		gsw->vlan_entries[i].etags = map->etags[i];
+		gsw->vlan_entries[i].vid = map->vids[i];
+	}
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/pci/controller/pcie-mediatek-gen3.c b/target/linux/mediatek/files-5.4/drivers/pci/controller/pcie-mediatek-gen3.c
new file mode 100644
index 0000000..3d7a60d
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -0,0 +1,1072 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek PCIe host controller driver.
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Jianjun Wang <jianjun.wang@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_clk.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "../pci.h"
+
+#define PCIE_SETTING_REG		0x80
+#define PCIE_PCI_IDS_1			0x9c
+#define PCI_CLASS(class)		(class << 8)
+#define PCIE_RC_MODE			BIT(0)
+
+#define PCIE_CFGNUM_REG			0x140
+#define PCIE_CFG_DEVFN(devfn)		((devfn) & GENMASK(7, 0))
+#define PCIE_CFG_BUS(bus)		(((bus) << 8) & GENMASK(15, 8))
+#define PCIE_CFG_BYTE_EN(bytes)		(((bytes) << 16) & GENMASK(19, 16))
+#define PCIE_CFG_FORCE_BYTE_EN		BIT(20)
+#define PCIE_CFG_OFFSET_ADDR		0x1000
+#define PCIE_CFG_HEADER(bus, devfn) \
+	(PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
+
+#define PCIE_RST_CTRL_REG		0x148
+#define PCIE_MAC_RSTB			BIT(0)
+#define PCIE_PHY_RSTB			BIT(1)
+#define PCIE_BRG_RSTB			BIT(2)
+#define PCIE_PE_RSTB			BIT(3)
+
+#define PCIE_LTSSM_STATUS_REG		0x150
+#define PCIE_LTSSM_STATE_MASK		GENMASK(28, 24)
+#define PCIE_LTSSM_STATE(val)		((val & PCIE_LTSSM_STATE_MASK) >> 24)
+#define PCIE_LTSSM_STATE_L2_IDLE	0x14
+
+#define PCIE_LINK_STATUS_REG		0x154
+#define PCIE_PORT_LINKUP		BIT(8)
+
+#define PCIE_MSI_SET_NUM		8
+#define PCIE_MSI_IRQS_PER_SET		32
+#define PCIE_MSI_IRQS_NUM \
+	(PCIE_MSI_IRQS_PER_SET * (PCIE_MSI_SET_NUM))
+
+#define PCIE_INT_ENABLE_REG		0x180
+#define PCIE_MSI_MASK			GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
+#define PCIE_MSI_SHIFT			8
+#define PCIE_INTX_SHIFT			24
+#define PCIE_INTX_MASK			GENMASK(27, 24)
+
+#define PCIE_INT_STATUS_REG		0x184
+#define PCIE_MSI_SET_ENABLE_REG		0x190
+
+#define PCIE_ICMD_PM_REG		0x198
+#define PCIE_TURN_OFF_LINK		BIT(4)
+
+#define PCIE_MSI_ADDR_BASE_REG		0xc00
+#define PCIE_MSI_SET_OFFSET		0x10
+#define PCIE_MSI_STATUS_OFFSET		0x04
+#define PCIE_MSI_ENABLE_OFFSET		0x08
+
+#define PCIE_TRANS_TABLE_BASE_REG	0x800
+#define PCIE_ATR_SRC_ADDR_MSB_OFFSET	0x4
+#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET	0x8
+#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET	0xc
+#define PCIE_ATR_TRSL_PARAM_OFFSET	0x10
+#define PCIE_ATR_TLB_SET_OFFSET		0x20
+
+#define PCIE_MAX_TRANS_TABLES		8
+#define PCIE_ATR_EN			BIT(0)
+#define PCIE_ATR_SIZE(size) \
+	(((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
+#define PCIE_ATR_ID(id)			((id) & GENMASK(3, 0))
+#define PCIE_ATR_TYPE_MEM		PCIE_ATR_ID(0)
+#define PCIE_ATR_TYPE_IO		PCIE_ATR_ID(1)
+#define PCIE_ATR_TLP_TYPE(type)		(((type) << 16) & GENMASK(18, 16))
+#define PCIE_ATR_TLP_TYPE_MEM		PCIE_ATR_TLP_TYPE(0)
+#define PCIE_ATR_TLP_TYPE_IO		PCIE_ATR_TLP_TYPE(2)
+
+/**
+ * struct mtk_pcie_msi - MSI information for each set
+ * @base: IO mapped register base
+ * @irq: MSI set Interrupt number
+ * @index: MSI set number
+ * @msg_addr: MSI message address
+ * @domain: IRQ domain
+ */
+struct mtk_pcie_msi {
+	void __iomem *base;
+	unsigned int irq;
+	int index;
+	phys_addr_t msg_addr;
+	struct irq_domain *domain;
+};
+
+/**
+ * struct mtk_pcie_port - PCIe port information
+ * @dev: PCIe device
+ * @base: IO mapped register base
+ * @reg_base: Physical register base
+ * @mac_reset: mac reset control
+ * @phy_reset: phy reset control
+ * @phy: PHY controller block
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count for this port
+ * @irq: PCIe controller interrupt number
+ * @intx_domain: legacy INTx IRQ domain
+ * @msi_domain: MSI IRQ domain
+ * @msi_top_domain: MSI IRQ top domain
+ * @msi_info: MSI sets information
+ * @lock: lock protecting IRQ bit map
+ * @msi_irq_in_use: bit map for assigned MSI IRQ
+ */
+struct mtk_pcie_port {
+	struct device *dev;
+	void __iomem *base;
+	phys_addr_t reg_base;
+	struct reset_control *mac_reset;
+	struct reset_control *phy_reset;
+	struct phy *phy;
+	struct clk_bulk_data *clks;
+	int num_clks;
+	unsigned int busnr;
+
+	int irq;
+	struct irq_domain *intx_domain;
+	struct irq_domain *msi_domain;
+	struct irq_domain *msi_top_domain;
+	struct mtk_pcie_msi **msi_info;
+	struct mutex lock;
+	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+};
+
+/**
+ * mtk_pcie_config_tlp_header
+ * @bus: PCI bus to query
+ * @devfn: device/function number
+ * @where: offset in config space
+ * @size: data size in TLP header
+ *
+ * Set byte enable field and device information in configuration TLP header.
+ */
+static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
+					int where, int size)
+{
+	struct mtk_pcie_port *port = bus->sysdata;
+	int bytes;
+	u32 val;
+
+	bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
+
+	val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
+	      PCIE_CFG_HEADER(bus->number, devfn);
+
+	writel(val, port->base + PCIE_CFGNUM_REG);
+}
+
+static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+				      int where)
+{
+	struct mtk_pcie_port *port = bus->sysdata;
+
+	return port->base + PCIE_CFG_OFFSET_ADDR + where;
+}
+
+static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+				int where, int size, u32 *val)
+{
+	mtk_pcie_config_tlp_header(bus, devfn, where, size);
+
+	return pci_generic_config_read32(bus, devfn, where, size, val);
+}
+
+static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+				 int where, int size, u32 val)
+{
+	mtk_pcie_config_tlp_header(bus, devfn, where, size);
+
+	if (size <= 2)
+		val <<= (where & 0x3) * 8;
+
+	return pci_generic_config_write32(bus, devfn, where, 4, val);
+}
+
+static struct pci_ops mtk_pcie_ops = {
+	.map_bus = mtk_pcie_map_bus,
+	.read  = mtk_pcie_config_read,
+	.write = mtk_pcie_config_write,
+};
+
+static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
+				    resource_size_t cpu_addr,
+				    resource_size_t pci_addr,
+				    resource_size_t size,
+				    unsigned long type, int num)
+{
+	void __iomem *table;
+	u32 val = 0;
+
+	if (num >= PCIE_MAX_TRANS_TABLES) {
+		dev_notice(port->dev, "not enough translate table[%d] for addr: %#llx, limited to [%d]\n",
+			   num, (unsigned long long) cpu_addr,
+			   PCIE_MAX_TRANS_TABLES);
+		return -ENODEV;
+	}
+
+	table = port->base + PCIE_TRANS_TABLE_BASE_REG +
+		num * PCIE_ATR_TLB_SET_OFFSET;
+
+	writel(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1), table);
+	writel(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+	writel(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+	writel(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+
+	if (type == IORESOURCE_IO)
+		val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+	else
+		val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+
+	writel(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+
+	return 0;
+}
+
+static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
+{
+	struct resource_entry *entry;
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+	unsigned int table_index = 0;
+	int err;
+	u32 val;
+
+	/* Set as RC mode */
+	val = readl(port->base + PCIE_SETTING_REG);
+	val |= PCIE_RC_MODE;
+	writel(val, port->base + PCIE_SETTING_REG);
+
+	/* Set class code */
+	val = readl(port->base + PCIE_PCI_IDS_1);
+	val &= ~GENMASK(31, 8);
+	val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
+	writel(val, port->base + PCIE_PCI_IDS_1);
+
+	/* Assert all reset signals */
+	val = readl(port->base + PCIE_RST_CTRL_REG);
+	val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
+	writel(val, port->base + PCIE_RST_CTRL_REG);
+
+	/* De-assert reset signals */
+	val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB);
+	writel(val, port->base + PCIE_RST_CTRL_REG);
+
+	/* Delay 100ms to wait the reference clocks become stable */
+	usleep_range(100 * 1000, 120 * 1000);
+
+	/* De-assert PERST# signal */
+	val &= ~PCIE_PE_RSTB;
+	writel(val, port->base + PCIE_RST_CTRL_REG);
+
+	/* Check if the link is up or not */
+	err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
+			!!(val & PCIE_PORT_LINKUP), 20,
+			50 * USEC_PER_MSEC);
+	if (err) {
+		val = readl(port->base + PCIE_LTSSM_STATUS_REG);
+		dev_notice(port->dev, "PCIe link down, ltssm reg val: %#x\n",
+			   val);
+		return err;
+	}
+
+	/* Set PCIe translation windows */
+	resource_list_for_each_entry(entry, &host->windows) {
+		struct resource *res = entry->res;
+		unsigned long type = resource_type(res);
+		resource_size_t cpu_addr;
+		resource_size_t pci_addr;
+		resource_size_t size;
+		const char *range_type;
+
+		if (type == IORESOURCE_IO) {
+			cpu_addr = pci_pio_to_address(res->start);
+			range_type = "IO";
+		} else if (type == IORESOURCE_MEM) {
+			cpu_addr = res->start;
+			range_type = "MEM";
+		} else {
+			continue;
+		}
+
+		pci_addr = res->start - entry->offset;
+		size = resource_size(res);
+		err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
+					       type, table_index);
+		if (err)
+			return err;
+
+		dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+			range_type, table_index, (unsigned long long) cpu_addr,
+			(unsigned long long) pci_addr,
+			(unsigned long long) size);
+
+		table_index++;
+	}
+
+	return 0;
+}
+
+static inline struct mtk_pcie_msi *mtk_get_msi_info(struct mtk_pcie_port *port,
+						    unsigned long hwirq)
+{
+	return port->msi_info[hwirq / PCIE_MSI_IRQS_PER_SET];
+}
+
+static int mtk_pcie_set_affinity(struct irq_data *data,
+				 const struct cpumask *mask, bool force)
+{
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	struct irq_data *port_data = irq_get_irq_data(port->irq);
+	struct irq_chip *port_chip = irq_data_get_irq_chip(port_data);
+	int ret;
+
+	if (!port_chip || !port_chip->irq_set_affinity)
+		return -EINVAL;
+
+	ret = port_chip->irq_set_affinity(port_data, mask, force);
+
+	irq_data_update_effective_affinity(data, mask);
+
+	return ret;
+}
+
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct mtk_pcie_msi *msi_info;
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	unsigned long hwirq;
+
+	msi_info = mtk_get_msi_info(port, data->hwirq);
+	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+	msg->address_hi = 0;
+	msg->address_lo = lower_32_bits(msi_info->msg_addr);
+	msg->data = hwirq;
+	dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
+		hwirq, msg->address_hi, msg->address_lo, msg->data);
+}
+
+static void mtk_msi_irq_ack(struct irq_data *data)
+{
+	struct mtk_pcie_msi *msi_info;
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	unsigned long hwirq;
+
+	msi_info = mtk_get_msi_info(port, data->hwirq);
+	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+	writel(BIT(hwirq), msi_info->base + PCIE_MSI_STATUS_OFFSET);
+}
+
+static void mtk_msi_irq_mask(struct irq_data *data)
+{
+	struct mtk_pcie_msi *msi_info;
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	unsigned long hwirq;
+	u32 val;
+
+	msi_info = mtk_get_msi_info(port, data->hwirq);
+	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+	val = readl(msi_info->base + PCIE_MSI_ENABLE_OFFSET);
+	val &= ~BIT(hwirq);
+	writel(val, msi_info->base + PCIE_MSI_ENABLE_OFFSET);
+
+	pci_msi_mask_irq(data);
+}
+
+static void mtk_msi_irq_unmask(struct irq_data *data)
+{
+	struct mtk_pcie_msi *msi_info;
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	unsigned long hwirq;
+	u32 val;
+
+	msi_info = mtk_get_msi_info(port, data->hwirq);
+	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+	val = readl(msi_info->base + PCIE_MSI_ENABLE_OFFSET);
+	val |= BIT(hwirq);
+	writel(val, msi_info->base + PCIE_MSI_ENABLE_OFFSET);
+
+	pci_msi_unmask_irq(data);
+}
+
+static struct irq_chip mtk_msi_irq_chip = {
+	.irq_ack		= mtk_msi_irq_ack,
+	.irq_compose_msi_msg	= mtk_compose_msi_msg,
+	.irq_mask		= mtk_msi_irq_mask,
+	.irq_unmask		= mtk_msi_irq_unmask,
+	.irq_set_affinity	= mtk_pcie_set_affinity,
+	.name			= "PCIe",
+};
+
+static irq_hw_number_t mtk_pcie_msi_get_hwirq(struct msi_domain_info *info,
+					      msi_alloc_info_t *arg)
+{
+	struct msi_desc *entry = arg->desc;
+	struct mtk_pcie_port *port = info->chip_data;
+	int hwirq;
+
+	mutex_lock(&port->lock);
+
+	hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
+					order_base_2(entry->nvec_used));
+	if (hwirq < 0) {
+		mutex_unlock(&port->lock);
+		return -ENOSPC;
+	}
+
+	mutex_unlock(&port->lock);
+
+	return hwirq;
+}
+
+static void mtk_pcie_msi_free(struct irq_domain *domain,
+			      struct msi_domain_info *info, unsigned int virq)
+{
+	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+
+	mutex_lock(&port->lock);
+
+	bitmap_clear(port->msi_irq_in_use, data->hwirq, 1);
+
+	mutex_unlock(&port->lock);
+}
+
+static struct msi_domain_ops mtk_msi_domain_ops = {
+	.get_hwirq	= mtk_pcie_msi_get_hwirq,
+	.msi_free	= mtk_pcie_msi_free,
+};
+
+static struct msi_domain_info mtk_msi_domain_info = {
+	.flags		= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_PCI_MSIX |
+			   MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI),
+	.chip		= &mtk_msi_irq_chip,
+	.ops		= &mtk_msi_domain_ops,
+	.handler	= handle_edge_irq,
+	.handler_name	= "MSI",
+};
+
+static void mtk_msi_top_irq_eoi(struct irq_data *data)
+{
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	unsigned long msi_irq = data->hwirq + PCIE_MSI_SHIFT;
+
+	writel(BIT(msi_irq), port->base + PCIE_INT_STATUS_REG);
+}
+
+static struct irq_chip mtk_msi_top_irq_chip = {
+	.irq_eoi	= mtk_msi_top_irq_eoi,
+	.name		= "PCIe",
+};
+
+static void mtk_pcie_msi_handler(struct irq_desc *desc)
+{
+	struct mtk_pcie_msi *msi_info = irq_desc_get_handler_data(desc);
+	struct irq_chip *irqchip = irq_desc_get_chip(desc);
+	unsigned long msi_enable, msi_status;
+	unsigned int virq;
+	irq_hw_number_t bit, hwirq;
+
+	chained_irq_enter(irqchip, desc);
+
+	msi_enable = readl(msi_info->base + PCIE_MSI_ENABLE_OFFSET);
+	while ((msi_status = readl(msi_info->base + PCIE_MSI_STATUS_OFFSET))) {
+		msi_status &= msi_enable;
+		for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
+			hwirq = bit + msi_info->index * PCIE_MSI_IRQS_PER_SET;
+			virq = irq_find_mapping(msi_info->domain, hwirq);
+			generic_handle_irq(virq);
+		}
+	}
+
+	chained_irq_exit(irqchip, desc);
+}
+
+static int mtk_msi_top_domain_map(struct irq_domain *domain,
+				    unsigned int virq, irq_hw_number_t hwirq)
+{
+	struct mtk_pcie_port *port = domain->host_data;
+	struct mtk_pcie_msi *msi_info = port->msi_info[hwirq];
+
+	irq_domain_set_info(domain, virq, hwirq,
+			    &mtk_msi_top_irq_chip, domain->host_data,
+			    mtk_pcie_msi_handler, msi_info, NULL);
+
+	return 0;
+}
+
+static const struct irq_domain_ops mtk_msi_top_domain_ops = {
+	.map = mtk_msi_top_domain_map,
+};
+
+static void mtk_intx_mask(struct irq_data *data)
+{
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	u32 val;
+
+	val = readl(port->base + PCIE_INT_ENABLE_REG);
+	val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
+	writel(val, port->base + PCIE_INT_ENABLE_REG);
+}
+
+static void mtk_intx_unmask(struct irq_data *data)
+{
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	u32 val;
+
+	val = readl(port->base + PCIE_INT_ENABLE_REG);
+	val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
+	writel(val, port->base + PCIE_INT_ENABLE_REG);
+}
+
+static void mtk_intx_eoi(struct irq_data *data)
+{
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	unsigned long hwirq;
+
+	/**
+	 * As an emulated level IRQ, its interrupt status will remain
+	 * until the corresponding de-assert message is received; hence that
+	 * the status can only be cleared when the interrupt has been serviced.
+	 */
+	hwirq = data->hwirq + PCIE_INTX_SHIFT;
+	writel(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
+}
+
+static struct irq_chip mtk_intx_irq_chip = {
+	.irq_mask		= mtk_intx_mask,
+	.irq_unmask		= mtk_intx_unmask,
+	.irq_eoi		= mtk_intx_eoi,
+	.irq_set_affinity	= mtk_pcie_set_affinity,
+	.name			= "PCIe",
+};
+
+static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+			     irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
+				      handle_fasteoi_irq, "INTx");
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = mtk_pcie_intx_map,
+};
+
+static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port,
+				     struct device_node *node)
+{
+	struct device *dev = port->dev;
+	struct device_node *intc_node;
+	struct fwnode_handle *fwnode = of_node_to_fwnode(node);
+	struct mtk_pcie_msi *msi_info;
+	struct msi_domain_info *info;
+	int i, ret;
+
+	/* Setup INTx */
+	intc_node = of_get_child_by_name(node, "interrupt-controller");
+	if (!intc_node) {
+		dev_notice(dev, "missing PCIe Intc node\n");
+		return -ENODEV;
+	}
+
+	port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
+						  &intx_domain_ops, port);
+	if (!port->intx_domain) {
+		dev_notice(dev, "failed to get INTx IRQ domain\n");
+		return -ENODEV;
+	}
+
+	/* Setup MSI */
+	mutex_init(&port->lock);
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	memcpy(info, &mtk_msi_domain_info, sizeof(*info));
+	info->chip_data = port;
+
+	port->msi_domain = pci_msi_create_irq_domain(fwnode, info, NULL);
+	if (!port->msi_domain) {
+		dev_info(dev, "failed to create MSI domain\n");
+		ret = -ENODEV;
+		goto err_msi_domain;
+	}
+
+	/* Enable MSI and setup PCIe domains */
+	port->msi_top_domain = irq_domain_add_hierarchy(NULL, 0, 0, node,
+							&mtk_msi_top_domain_ops,
+							port);
+	if (!port->msi_top_domain) {
+		dev_info(dev, "failed to create MSI top domain\n");
+		ret = -ENODEV;
+		goto err_msi_top_domain;
+	}
+
+	port->msi_info = devm_kzalloc(dev, PCIE_MSI_SET_NUM, GFP_KERNEL);
+	if (!port->msi_info) {
+		ret = -ENOMEM;
+		goto err_msi_info;
+	}
+
+	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+		int offset = i * PCIE_MSI_SET_OFFSET;
+		u32 val;
+
+		msi_info = devm_kzalloc(dev, sizeof(*msi_info), GFP_KERNEL);
+		if (!msi_info) {
+			ret = -ENOMEM;
+			goto err_msi_set;
+		}
+
+		msi_info->base = port->base + PCIE_MSI_ADDR_BASE_REG + offset;
+		msi_info->msg_addr = port->reg_base + PCIE_MSI_ADDR_BASE_REG +
+				     offset;
+
+		writel(lower_32_bits(msi_info->msg_addr), msi_info->base);
+
+		msi_info->index = i;
+		msi_info->domain = port->msi_domain;
+
+		port->msi_info[i] = msi_info;
+
+		/* Alloc IRQ for each MSI set */
+		msi_info->irq = irq_create_mapping(port->msi_top_domain, i);
+		if (!msi_info->irq) {
+			dev_info(dev, "allocate MSI top IRQ failed\n");
+			ret = -ENOSPC;
+			goto err_msi_set;
+		}
+
+		val = readl(port->base + PCIE_INT_ENABLE_REG);
+		val |= BIT(i + PCIE_MSI_SHIFT);
+		writel(val, port->base + PCIE_INT_ENABLE_REG);
+
+		val = readl(port->base + PCIE_MSI_SET_ENABLE_REG);
+		val |= BIT(i);
+		writel(val, port->base + PCIE_MSI_SET_ENABLE_REG);
+	}
+
+	return 0;
+
+err_msi_set:
+	while (i-- > 0) {
+		msi_info = port->msi_info[i];
+		irq_dispose_mapping(msi_info->irq);
+	}
+err_msi_info:
+	irq_domain_remove(port->msi_top_domain);
+err_msi_top_domain:
+	irq_domain_remove(port->msi_domain);
+err_msi_domain:
+	irq_domain_remove(port->intx_domain);
+
+	return ret;
+}
+
+static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
+{
+	struct mtk_pcie_msi *msi_info;
+	int i;
+
+	irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+
+	if (port->intx_domain)
+		irq_domain_remove(port->intx_domain);
+
+	if (port->msi_domain)
+		irq_domain_remove(port->msi_domain);
+
+	if (port->msi_top_domain) {
+		for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+			msi_info = port->msi_info[i];
+			irq_dispose_mapping(msi_info->irq);
+		}
+
+		irq_domain_remove(port->msi_top_domain);
+	}
+
+	irq_dispose_mapping(port->irq);
+}
+
+static void mtk_pcie_irq_handler(struct irq_desc *desc)
+{
+	struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+	struct irq_chip *irqchip = irq_desc_get_chip(desc);
+	unsigned long status;
+	unsigned int virq;
+	irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
+
+	chained_irq_enter(irqchip, desc);
+
+	status = readl(port->base + PCIE_INT_STATUS_REG);
+	if (status & PCIE_INTX_MASK) {
+		for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
+				      PCIE_INTX_SHIFT) {
+			virq = irq_find_mapping(port->intx_domain,
+						irq_bit - PCIE_INTX_SHIFT);
+			generic_handle_irq(virq);
+		}
+	}
+
+	if (status & PCIE_MSI_MASK) {
+		irq_bit = PCIE_MSI_SHIFT;
+		for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
+				      PCIE_MSI_SHIFT) {
+			virq = irq_find_mapping(port->msi_top_domain,
+						irq_bit - PCIE_MSI_SHIFT);
+			generic_handle_irq(virq);
+		}
+	}
+
+	chained_irq_exit(irqchip, desc);
+}
+
+static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
+			      struct device_node *node)
+{
+	struct device *dev = port->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	int err;
+
+	err = mtk_pcie_init_irq_domains(port, node);
+	if (err) {
+		dev_notice(dev, "failed to init PCIe IRQ domain\n");
+		return err;
+	}
+
+	port->irq = platform_get_irq(pdev, 0);
+	if (port->irq < 0)
+		return port->irq;
+
+	irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
+
+	return 0;
+}
+
+static int mtk_pcie_clk_init(struct mtk_pcie_port *port)
+{
+	int ret;
+
+	port->num_clks = devm_clk_bulk_get_all(port->dev, &port->clks);
+	if (port->num_clks < 0) {
+		dev_notice(port->dev, "failed to get PCIe clock\n");
+		return port->num_clks;
+	}
+
+	ret = clk_bulk_prepare_enable(port->num_clks, port->clks);
+	if (ret) {
+		dev_notice(port->dev, "failed to enable PCIe clocks\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int mtk_pcie_power_up(struct mtk_pcie_port *port)
+{
+	struct device *dev = port->dev;
+	int err;
+
+	port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
+	if (IS_ERR(port->phy_reset))
+		return PTR_ERR(port->phy_reset);
+
+	/* PHY power on and enable pipe clock */
+	port->phy = devm_phy_optional_get(dev, "pcie-phy");
+	if (IS_ERR(port->phy))
+		return PTR_ERR(port->phy);
+
+	reset_control_deassert(port->phy_reset);
+
+	err = phy_power_on(port->phy);
+	if (err) {
+		dev_notice(dev, "failed to power on PCIe phy\n");
+		goto err_phy_on;
+	}
+
+	err = phy_init(port->phy);
+	if (err) {
+		dev_notice(dev, "failed to initialize PCIe phy\n");
+		goto err_phy_init;
+	}
+
+	port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
+	if (IS_ERR(port->mac_reset)) {
+		err = PTR_ERR(port->mac_reset);
+		goto err_mac_rst;
+	}
+
+	reset_control_deassert(port->mac_reset);
+
+	/* MAC power on and enable transaction layer clocks */
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	err = mtk_pcie_clk_init(port);
+	if (err) {
+		dev_notice(dev, "clock init failed\n");
+		goto err_clk_init;
+	}
+
+	return 0;
+
+err_clk_init:
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+	reset_control_assert(port->mac_reset);
+err_mac_rst:
+	phy_exit(port->phy);
+err_phy_init:
+	phy_power_off(port->phy);
+err_phy_on:
+	reset_control_assert(port->phy_reset);
+
+	return err;
+}
+
+static void mtk_pcie_power_down(struct mtk_pcie_port *port)
+{
+	clk_bulk_disable_unprepare(port->num_clks, port->clks);
+
+	pm_runtime_put_sync(port->dev);
+	pm_runtime_disable(port->dev);
+	reset_control_assert(port->mac_reset);
+
+	phy_power_off(port->phy);
+	phy_exit(port->phy);
+	reset_control_assert(port->phy_reset);
+}
+
+static int mtk_pcie_setup(struct mtk_pcie_port *port)
+{
+	struct device *dev = port->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+	struct list_head *windows = &host->windows;
+	struct resource *regs, *bus;
+	int err;
+
+	err = pci_parse_request_of_pci_ranges(dev, windows, &bus);
+	if (err)
+		return err;
+
+	port->busnr = bus->start;
+
+	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
+	port->base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(port->base)) {
+		dev_notice(dev, "failed to map register base\n");
+		return PTR_ERR(port->base);
+	}
+
+	port->reg_base = regs->start;
+
+	/* Don't touch the hardware registers before power up */
+	err = mtk_pcie_power_up(port);
+	if (err)
+		return err;
+
+	/* Try link up */
+	err = mtk_pcie_startup_port(port);
+	if (err) {
+		dev_notice(dev, "PCIe startup failed\n");
+		goto err_setup;
+	}
+
+	err = mtk_pcie_setup_irq(port, dev->of_node);
+	if (err)
+		goto err_setup;
+
+	dev_info(dev, "PCIe link up success!\n");
+
+	return 0;
+
+err_setup:
+	mtk_pcie_power_down(port);
+
+	return err;
+}
+
+static void release_io_range(struct device *dev)
+{
+	struct logic_pio_hwaddr *iorange = NULL;
+
+	iorange = find_io_range_by_fwnode(&dev->of_node->fwnode);
+	if (iorange) {
+		logic_pio_unregister_range(iorange);
+		kfree(iorange);
+	}
+}
+
+static int mtk_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_pcie_port *port;
+	struct pci_host_bridge *host;
+	int err;
+
+	host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+	if (!host)
+		return -ENOMEM;
+
+	port = pci_host_bridge_priv(host);
+
+	port->dev = dev;
+	platform_set_drvdata(pdev, port);
+
+	err = mtk_pcie_setup(port);
+	if (err)
+		goto release_resource;
+
+	host->busnr = port->busnr;
+	host->dev.parent = port->dev;
+	host->map_irq = of_irq_parse_and_map_pci;
+	host->swizzle_irq = pci_common_swizzle;
+	host->ops = &mtk_pcie_ops;
+	host->sysdata = port;
+
+	err = pci_host_probe(host);
+	if (err) {
+		mtk_pcie_irq_teardown(port);
+		mtk_pcie_power_down(port);
+		goto release_resource;
+	}
+
+	return 0;
+
+release_resource:
+	release_io_range(dev);
+	pci_free_resource_list(&host->windows);
+
+	return err;
+}
+
+static int mtk_pcie_remove(struct platform_device *pdev)
+{
+	struct mtk_pcie_port *port = platform_get_drvdata(pdev);
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+
+	pci_lock_rescan_remove();
+	pci_stop_root_bus(host->bus);
+	pci_remove_root_bus(host->bus);
+	pci_unlock_rescan_remove();
+
+	mtk_pcie_irq_teardown(port);
+	mtk_pcie_power_down(port);
+
+	return 0;
+}
+
+static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + PCIE_ICMD_PM_REG);
+	val |= PCIE_TURN_OFF_LINK;
+	writel(val, port->base + PCIE_ICMD_PM_REG);
+
+	/* Check the link is L2 */
+	return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
+				  (PCIE_LTSSM_STATE(val) ==
+				   PCIE_LTSSM_STATE_L2_IDLE), 20,
+				   50 * USEC_PER_MSEC);
+}
+
+static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+{
+	struct mtk_pcie_port *port = dev_get_drvdata(dev);
+	int err;
+	u32 val;
+
+	/* Trigger link to L2 state */
+	err = mtk_pcie_turn_off_link(port);
+	if (err) {
+		dev_notice(port->dev, "can not enter L2 state\n");
+		return err;
+	}
+
+	/* Pull down the PERST# pin */
+	val = readl(port->base + PCIE_RST_CTRL_REG);
+	val |= PCIE_PE_RSTB;
+	writel(val, port->base + PCIE_RST_CTRL_REG);
+
+	dev_dbg(port->dev, "enter L2 state success");
+
+	clk_bulk_disable_unprepare(port->num_clks, port->clks);
+
+	phy_power_off(port->phy);
+
+	return 0;
+}
+
+static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+{
+	struct mtk_pcie_port *port = dev_get_drvdata(dev);
+	int err;
+
+	phy_power_on(port->phy);
+
+	err = clk_bulk_prepare_enable(port->num_clks, port->clks);
+	if (err) {
+		dev_dbg(dev, "failed to enable PCIe clocks\n");
+		return err;
+	}
+
+	err = mtk_pcie_startup_port(port);
+	if (err) {
+		dev_notice(port->dev, "resume failed\n");
+		return err;
+	}
+
+	dev_dbg(port->dev, "resume done\n");
+
+	return 0;
+}
+
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+				      mtk_pcie_resume_noirq)
+};
+
+static const struct of_device_id mtk_pcie_of_match[] = {
+	{ .compatible = "mediatek,mt8192-pcie" },
+	{},
+};
+
+static struct platform_driver mtk_pcie_driver = {
+	.probe = mtk_pcie_probe,
+	.remove = mtk_pcie_remove,
+	.driver = {
+		.name = "mtk-pcie",
+		.of_match_table = mtk_pcie_of_match,
+		.pm = &mtk_pcie_pm_ops,
+	},
+};
+
+module_platform_driver(mtk_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/target/linux/mediatek/files-5.4/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/target/linux/mediatek/files-5.4/drivers/pinctrl/mediatek/pinctrl-mt7986.c
new file mode 100644
index 0000000..27fd850
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/pinctrl/mediatek/pinctrl-mt7986.c
@@ -0,0 +1,1064 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The MT7986 driver based on Linux generic pinctrl binding.
+ *
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#include "pinctrl-moore.h"
+
+#define MT7986_PIN(_number, _name)				\
+	MTK_PIN(_number, _name, 0, _number, DRV_GRP1)
+
+#define PIN_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits)	\
+	PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit,	\
+		       _x_bits, 32, 0)
+
+#define PINS_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits)	\
+	PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit,	\
+		      _x_bits, 32, 1)
+
+static const struct mtk_pin_field_calc mt7986_pin_mode_range[] = {
+	PIN_FIELD(0, 100, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_dir_range[] = {
+	PIN_FIELD(0, 100, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_di_range[] = {
+	PIN_FIELD(0, 100, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_do_range[] = {
+	PIN_FIELD(0, 100, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_ies_range[] = {
+	PIN_FIELD_BASE(0, 0, 2, 0x40, 0x10, 17, 1),
+	PIN_FIELD_BASE(1, 1, 3, 0x20, 0x10, 10, 1),
+	PIN_FIELD_BASE(2, 2, 3, 0x20, 0x10, 11, 1),
+	PIN_FIELD_BASE(3, 3, 4, 0x20, 0x10, 0, 1),
+	PIN_FIELD_BASE(4, 4, 4, 0x20, 0x10, 1, 1),
+	PIN_FIELD_BASE(5, 5, 2, 0x40, 0x10, 0, 1),
+	PIN_FIELD_BASE(6, 6, 2, 0x40, 0x10, 1, 1),
+	PIN_FIELD_BASE(7, 7, 3, 0x20, 0x10, 0, 1),
+	PIN_FIELD_BASE(8, 8, 3, 0x20, 0x10, 1, 1),
+	PIN_FIELD_BASE(9, 9, 3, 0x20, 0x10, 2, 1),
+	PIN_FIELD_BASE(10, 10, 3, 0x20, 0x10, 3, 1),
+	PIN_FIELD_BASE(11, 11, 2, 0x40, 0x10, 8, 1),
+	PIN_FIELD_BASE(12, 12, 2, 0x40, 0x10, 9, 1),
+	PIN_FIELD_BASE(13, 13, 2, 0x40, 0x10, 10, 1),
+	PIN_FIELD_BASE(14, 14, 2, 0x40, 0x10, 11, 1),
+	PIN_FIELD_BASE(15, 15, 2, 0x40, 0x10, 2, 1),
+	PIN_FIELD_BASE(16, 16, 2, 0x40, 0x10, 3, 1),
+	PIN_FIELD_BASE(17, 17, 2, 0x40, 0x10, 4, 1),
+	PIN_FIELD_BASE(18, 18, 2, 0x40, 0x10, 5, 1),
+	PIN_FIELD_BASE(19, 19, 2, 0x40, 0x10, 6, 1),
+	PIN_FIELD_BASE(20, 20, 2, 0x40, 0x10, 7, 1),
+	PIN_FIELD_BASE(21, 21, 1, 0x30, 0x10, 12, 1),
+	PIN_FIELD_BASE(22, 22, 1, 0x30, 0x10, 13, 1),
+	PIN_FIELD_BASE(23, 23, 1, 0x30, 0x10, 14, 1),
+	PIN_FIELD_BASE(24, 24, 1, 0x30, 0x10, 18, 1),
+	PIN_FIELD_BASE(25, 25, 1, 0x30, 0x10, 17, 1),
+	PIN_FIELD_BASE(26, 26, 1, 0x30, 0x10, 15, 1),
+	PIN_FIELD_BASE(27, 27, 1, 0x30, 0x10, 16, 1),
+	PIN_FIELD_BASE(28, 28, 1, 0x30, 0x10, 19, 1),
+	PIN_FIELD_BASE(29, 29, 1, 0x30, 0x10, 20, 1),
+	PIN_FIELD_BASE(30, 30, 1, 0x30, 0x10, 23, 1),
+	PIN_FIELD_BASE(31, 31, 1, 0x30, 0x10, 22, 1),
+	PIN_FIELD_BASE(32, 32, 1, 0x30, 0x10, 21, 1),
+	PIN_FIELD_BASE(33, 33, 3, 0x20, 0x10, 4, 1),
+	PIN_FIELD_BASE(34, 34, 3, 0x20, 0x10, 8, 1),
+	PIN_FIELD_BASE(35, 35, 3, 0x20, 0x10, 7, 1),
+	PIN_FIELD_BASE(36, 36, 3, 0x20, 0x10, 5, 1),
+	PIN_FIELD_BASE(37, 37, 3, 0x20, 0x10, 6, 1),
+	PIN_FIELD_BASE(38, 38, 3, 0x20, 0x10, 9, 1),
+	PIN_FIELD_BASE(39, 39, 2, 0x40, 0x10, 18, 1),
+	PIN_FIELD_BASE(40, 40, 2, 0x40, 0x10, 19, 1),
+	PIN_FIELD_BASE(41, 41, 2, 0x40, 0x10, 12, 1),
+	PIN_FIELD_BASE(42, 42, 2, 0x40, 0x10, 22, 1),
+	PIN_FIELD_BASE(43, 43, 2, 0x40, 0x10, 23, 1),
+	PIN_FIELD_BASE(44, 44, 2, 0x40, 0x10, 20, 1),
+	PIN_FIELD_BASE(45, 45, 2, 0x40, 0x10, 21, 1),
+	PIN_FIELD_BASE(46, 46, 2, 0x40, 0x10, 26, 1),
+	PIN_FIELD_BASE(47, 47, 2, 0x40, 0x10, 27, 1),
+	PIN_FIELD_BASE(48, 48, 2, 0x40, 0x10, 24, 1),
+	PIN_FIELD_BASE(49, 49, 2, 0x40, 0x10, 25, 1),
+	PIN_FIELD_BASE(50, 50, 1, 0x30, 0x10, 2, 1),
+	PIN_FIELD_BASE(51, 51, 1, 0x30, 0x10, 3, 1),
+	PIN_FIELD_BASE(52, 52, 1, 0x30, 0x10, 4, 1),
+	PIN_FIELD_BASE(53, 53, 1, 0x30, 0x10, 5, 1),
+	PIN_FIELD_BASE(54, 54, 1, 0x30, 0x10, 6, 1),
+	PIN_FIELD_BASE(55, 55, 1, 0x30, 0x10, 7, 1),
+	PIN_FIELD_BASE(56, 56, 1, 0x30, 0x10, 8, 1),
+	PIN_FIELD_BASE(57, 57, 1, 0x30, 0x10, 9, 1),
+	PIN_FIELD_BASE(58, 58, 1, 0x30, 0x10, 1, 1),
+	PIN_FIELD_BASE(59, 59, 1, 0x30, 0x10, 0, 1),
+	PIN_FIELD_BASE(60, 60, 1, 0x30, 0x10, 10, 1),
+	PIN_FIELD_BASE(61, 61, 1, 0x30, 0x10, 11, 1),
+	PIN_FIELD_BASE(62, 62, 2, 0x40, 0x10, 15, 1),
+	PIN_FIELD_BASE(63, 63, 2, 0x40, 0x10, 14, 1),
+	PIN_FIELD_BASE(64, 64, 2, 0x40, 0x10, 13, 1),
+	PIN_FIELD_BASE(65, 65, 2, 0x40, 0x10, 16, 1),
+	PIN_FIELD_BASE(66, 66, 4, 0x20, 0x10, 2, 1),
+	PIN_FIELD_BASE(67, 67, 4, 0x20, 0x10, 3, 1),
+	PIN_FIELD_BASE(68, 68, 4, 0x20, 0x10, 4, 1),
+	PIN_FIELD_BASE(69, 69, 5, 0x30, 0x10, 1, 1),
+	PIN_FIELD_BASE(70, 70, 5, 0x30, 0x10, 0, 1),
+	PIN_FIELD_BASE(71, 71, 5, 0x30, 0x10, 16, 1),
+	PIN_FIELD_BASE(72, 72, 5, 0x30, 0x10, 14, 1),
+	PIN_FIELD_BASE(73, 73, 5, 0x30, 0x10, 15, 1),
+	PIN_FIELD_BASE(74, 74, 5, 0x30, 0x10, 4, 1),
+	PIN_FIELD_BASE(75, 75, 5, 0x30, 0x10, 6, 1),
+	PIN_FIELD_BASE(76, 76, 5, 0x30, 0x10, 7, 1),
+	PIN_FIELD_BASE(77, 77, 5, 0x30, 0x10, 8, 1),
+	PIN_FIELD_BASE(78, 78, 5, 0x30, 0x10, 2, 1),
+	PIN_FIELD_BASE(79, 79, 5, 0x30, 0x10, 3, 1),
+	PIN_FIELD_BASE(80, 80, 5, 0x30, 0x10, 9, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x30, 0x10, 10, 1),
+	PIN_FIELD_BASE(82, 82, 5, 0x30, 0x10, 11, 1),
+	PIN_FIELD_BASE(83, 83, 5, 0x30, 0x10, 12, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x30, 0x10, 13, 1),
+	PIN_FIELD_BASE(85, 85, 5, 0x30, 0x10, 5, 1),
+	PIN_FIELD_BASE(86, 86, 6, 0x30, 0x10, 1, 1),
+	PIN_FIELD_BASE(87, 87, 6, 0x30, 0x10, 0, 1),
+	PIN_FIELD_BASE(88, 88, 6, 0x30, 0x10, 14, 1),
+	PIN_FIELD_BASE(89, 89, 6, 0x30, 0x10, 12, 1),
+	PIN_FIELD_BASE(90, 90, 6, 0x30, 0x10, 13, 1),
+	PIN_FIELD_BASE(91, 91, 6, 0x30, 0x10, 4, 1),
+	PIN_FIELD_BASE(92, 92, 6, 0x30, 0x10, 5, 1),
+	PIN_FIELD_BASE(93, 93, 6, 0x30, 0x10, 6, 1),
+	PIN_FIELD_BASE(94, 94, 6, 0x30, 0x10, 7, 1),
+	PIN_FIELD_BASE(95, 95, 6, 0x30, 0x10, 2, 1),
+	PIN_FIELD_BASE(96, 96, 6, 0x30, 0x10, 3, 1),
+	PIN_FIELD_BASE(97, 97, 6, 0x30, 0x10, 8, 1),
+	PIN_FIELD_BASE(98, 98, 6, 0x30, 0x10, 9, 1),
+	PIN_FIELD_BASE(99, 99, 6, 0x30, 0x10, 10, 1),
+	PIN_FIELD_BASE(100, 100, 6, 0x30, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_smt_range[] = {
+	PIN_FIELD_BASE(0, 0, 2, 0xf0, 0x10, 17, 1),
+	PIN_FIELD_BASE(1, 1, 3, 0x90, 0x10, 10, 1),
+	PIN_FIELD_BASE(2, 2, 3, 0x90, 0x10, 11, 1),
+	PIN_FIELD_BASE(3, 3, 4, 0x90, 0x10, 0, 1),
+	PIN_FIELD_BASE(4, 4, 4, 0x90, 0x10, 1, 1),
+	PIN_FIELD_BASE(5, 5, 2, 0xf0, 0x10, 0, 1),
+	PIN_FIELD_BASE(6, 6, 2, 0xf0, 0x10, 1, 1),
+	PIN_FIELD_BASE(7, 7, 3, 0x90, 0x10, 0, 1),
+	PIN_FIELD_BASE(8, 8, 3, 0x90, 0x10, 1, 1),
+	PIN_FIELD_BASE(9, 9, 3, 0x90, 0x10, 2, 1),
+	PIN_FIELD_BASE(10, 10, 3, 0x90, 0x10, 3, 1),
+	PIN_FIELD_BASE(11, 11, 2, 0xf0, 0x10, 8, 1),
+	PIN_FIELD_BASE(12, 12, 2, 0xf0, 0x10, 9, 1),
+	PIN_FIELD_BASE(13, 13, 2, 0xf0, 0x10, 10, 1),
+	PIN_FIELD_BASE(14, 14, 2, 0xf0, 0x10, 11, 1),
+	PIN_FIELD_BASE(15, 15, 2, 0xf0, 0x10, 2, 1),
+	PIN_FIELD_BASE(16, 16, 2, 0xf0, 0x10, 3, 1),
+	PIN_FIELD_BASE(17, 17, 2, 0xf0, 0x10, 4, 1),
+	PIN_FIELD_BASE(18, 18, 2, 0xf0, 0x10, 5, 1),
+	PIN_FIELD_BASE(19, 19, 2, 0xf0, 0x10, 6, 1),
+	PIN_FIELD_BASE(20, 20, 2, 0xf0, 0x10, 7, 1),
+	PIN_FIELD_BASE(21, 21, 1, 0xc0, 0x10, 12, 1),
+	PIN_FIELD_BASE(22, 22, 1, 0xc0, 0x10, 13, 1),
+	PIN_FIELD_BASE(23, 23, 1, 0xc0, 0x10, 14, 1),
+	PIN_FIELD_BASE(24, 24, 1, 0xc0, 0x10, 18, 1),
+	PIN_FIELD_BASE(25, 25, 1, 0xc0, 0x10, 17, 1),
+	PIN_FIELD_BASE(26, 26, 1, 0xc0, 0x10, 15, 1),
+	PIN_FIELD_BASE(27, 27, 1, 0xc0, 0x10, 16, 1),
+	PIN_FIELD_BASE(28, 28, 1, 0xc0, 0x10, 19, 1),
+	PIN_FIELD_BASE(29, 29, 1, 0xc0, 0x10, 20, 1),
+	PIN_FIELD_BASE(30, 30, 1, 0xc0, 0x10, 23, 1),
+	PIN_FIELD_BASE(31, 31, 1, 0xc0, 0x10, 22, 1),
+	PIN_FIELD_BASE(32, 32, 1, 0xc0, 0x10, 21, 1),
+	PIN_FIELD_BASE(33, 33, 3, 0x90, 0x10, 4, 1),
+	PIN_FIELD_BASE(34, 34, 3, 0x90, 0x10, 8, 1),
+	PIN_FIELD_BASE(35, 35, 3, 0x90, 0x10, 7, 1),
+	PIN_FIELD_BASE(36, 36, 3, 0x90, 0x10, 5, 1),
+	PIN_FIELD_BASE(37, 37, 3, 0x90, 0x10, 6, 1),
+	PIN_FIELD_BASE(38, 38, 3, 0x90, 0x10, 9, 1),
+	PIN_FIELD_BASE(39, 39, 2, 0xf0, 0x10, 18, 1),
+	PIN_FIELD_BASE(40, 40, 2, 0xf0, 0x10, 19, 1),
+	PIN_FIELD_BASE(41, 41, 2, 0xf0, 0x10, 12, 1),
+	PIN_FIELD_BASE(42, 42, 2, 0xf0, 0x10, 22, 1),
+	PIN_FIELD_BASE(43, 43, 2, 0xf0, 0x10, 23, 1),
+	PIN_FIELD_BASE(44, 44, 2, 0xf0, 0x10, 20, 1),
+	PIN_FIELD_BASE(45, 45, 2, 0xf0, 0x10, 21, 1),
+	PIN_FIELD_BASE(46, 46, 2, 0xf0, 0x10, 26, 1),
+	PIN_FIELD_BASE(47, 47, 2, 0xf0, 0x10, 27, 1),
+	PIN_FIELD_BASE(48, 48, 2, 0xf0, 0x10, 24, 1),
+	PIN_FIELD_BASE(49, 49, 2, 0xf0, 0x10, 25, 1),
+	PIN_FIELD_BASE(50, 50, 1, 0xc0, 0x10, 2, 1),
+	PIN_FIELD_BASE(51, 51, 1, 0xc0, 0x10, 3, 1),
+	PIN_FIELD_BASE(52, 52, 1, 0xc0, 0x10, 4, 1),
+	PIN_FIELD_BASE(53, 53, 1, 0xc0, 0x10, 5, 1),
+	PIN_FIELD_BASE(54, 54, 1, 0xc0, 0x10, 6, 1),
+	PIN_FIELD_BASE(55, 55, 1, 0xc0, 0x10, 7, 1),
+	PIN_FIELD_BASE(56, 56, 1, 0xc0, 0x10, 8, 1),
+	PIN_FIELD_BASE(57, 57, 1, 0xc0, 0x10, 9, 1),
+	PIN_FIELD_BASE(58, 58, 1, 0xc0, 0x10, 1, 1),
+	PIN_FIELD_BASE(59, 59, 1, 0xc0, 0x10, 0, 1),
+	PIN_FIELD_BASE(60, 60, 1, 0xc0, 0x10, 10, 1),
+	PIN_FIELD_BASE(61, 61, 1, 0xc0, 0x10, 11, 1),
+	PIN_FIELD_BASE(62, 62, 2, 0xf0, 0x10, 15, 1),
+	PIN_FIELD_BASE(63, 63, 2, 0xf0, 0x10, 14, 1),
+	PIN_FIELD_BASE(64, 64, 2, 0xf0, 0x10, 13, 1),
+	PIN_FIELD_BASE(65, 65, 2, 0xf0, 0x10, 16, 1),
+	PIN_FIELD_BASE(66, 66, 4, 0x90, 0x10, 2, 1),
+	PIN_FIELD_BASE(67, 67, 4, 0x90, 0x10, 3, 1),
+	PIN_FIELD_BASE(68, 68, 4, 0x90, 0x10, 4, 1),
+	PIN_FIELD_BASE(69, 69, 5, 0x80, 0x10, 1, 1),
+	PIN_FIELD_BASE(70, 70, 5, 0x80, 0x10, 0, 1),
+	PIN_FIELD_BASE(71, 71, 5, 0x80, 0x10, 16, 1),
+	PIN_FIELD_BASE(72, 72, 5, 0x80, 0x10, 14, 1),
+	PIN_FIELD_BASE(73, 73, 5, 0x80, 0x10, 15, 1),
+	PIN_FIELD_BASE(74, 74, 5, 0x80, 0x10, 4, 1),
+	PIN_FIELD_BASE(75, 75, 5, 0x80, 0x10, 6, 1),
+	PIN_FIELD_BASE(76, 76, 5, 0x80, 0x10, 7, 1),
+	PIN_FIELD_BASE(77, 77, 5, 0x80, 0x10, 8, 1),
+	PIN_FIELD_BASE(78, 78, 5, 0x80, 0x10, 2, 1),
+	PIN_FIELD_BASE(79, 79, 5, 0x80, 0x10, 3, 1),
+	PIN_FIELD_BASE(80, 80, 5, 0x80, 0x10, 9, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x80, 0x10, 10, 1),
+	PIN_FIELD_BASE(82, 82, 5, 0x80, 0x10, 11, 1),
+	PIN_FIELD_BASE(83, 83, 5, 0x80, 0x10, 12, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x80, 0x10, 13, 1),
+	PIN_FIELD_BASE(85, 85, 5, 0x80, 0x10, 5, 1),
+	PIN_FIELD_BASE(86, 86, 6, 0x70, 0x10, 1, 1),
+	PIN_FIELD_BASE(87, 87, 6, 0x70, 0x10, 0, 1),
+	PIN_FIELD_BASE(88, 88, 6, 0x70, 0x10, 14, 1),
+	PIN_FIELD_BASE(89, 89, 6, 0x70, 0x10, 12, 1),
+	PIN_FIELD_BASE(90, 90, 6, 0x70, 0x10, 13, 1),
+	PIN_FIELD_BASE(91, 91, 6, 0x70, 0x10, 4, 1),
+	PIN_FIELD_BASE(92, 92, 6, 0x70, 0x10, 5, 1),
+	PIN_FIELD_BASE(93, 93, 6, 0x70, 0x10, 6, 1),
+	PIN_FIELD_BASE(94, 94, 6, 0x70, 0x10, 7, 1),
+	PIN_FIELD_BASE(95, 95, 6, 0x70, 0x10, 2, 1),
+	PIN_FIELD_BASE(96, 96, 6, 0x70, 0x10, 3, 1),
+	PIN_FIELD_BASE(97, 97, 6, 0x70, 0x10, 8, 1),
+	PIN_FIELD_BASE(98, 98, 6, 0x70, 0x10, 9, 1),
+	PIN_FIELD_BASE(99, 99, 6, 0x70, 0x10, 10, 1),
+	PIN_FIELD_BASE(100, 100, 6, 0x70, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_pu_range[] = {
+	PIN_FIELD_BASE(69, 69, 5, 0x50, 0x10, 1, 1),
+	PIN_FIELD_BASE(70, 70, 5, 0x50, 0x10, 0, 1),
+	PIN_FIELD_BASE(71, 71, 5, 0x50, 0x10, 16, 1),
+	PIN_FIELD_BASE(72, 72, 5, 0x50, 0x10, 14, 1),
+	PIN_FIELD_BASE(73, 73, 5, 0x50, 0x10, 15, 1),
+	PIN_FIELD_BASE(74, 74, 5, 0x50, 0x10, 4, 1),
+	PIN_FIELD_BASE(75, 75, 5, 0x50, 0x10, 6, 1),
+	PIN_FIELD_BASE(76, 76, 5, 0x50, 0x10, 7, 1),
+	PIN_FIELD_BASE(77, 77, 5, 0x50, 0x10, 8, 1),
+	PIN_FIELD_BASE(78, 78, 5, 0x50, 0x10, 2, 1),
+	PIN_FIELD_BASE(79, 79, 5, 0x50, 0x10, 3, 1),
+	PIN_FIELD_BASE(80, 80, 5, 0x50, 0x10, 9, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x50, 0x10, 10, 1),
+	PIN_FIELD_BASE(82, 82, 5, 0x50, 0x10, 11, 1),
+	PIN_FIELD_BASE(83, 83, 5, 0x50, 0x10, 12, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x50, 0x10, 13, 1),
+	PIN_FIELD_BASE(85, 85, 5, 0x50, 0x10, 5, 1),
+	PIN_FIELD_BASE(86, 86, 6, 0x50, 0x10, 1, 1),
+	PIN_FIELD_BASE(87, 87, 6, 0x50, 0x10, 0, 1),
+	PIN_FIELD_BASE(88, 88, 6, 0x50, 0x10, 14, 1),
+	PIN_FIELD_BASE(89, 89, 6, 0x50, 0x10, 12, 1),
+	PIN_FIELD_BASE(90, 90, 6, 0x50, 0x10, 13, 1),
+	PIN_FIELD_BASE(91, 91, 6, 0x50, 0x10, 4, 1),
+	PIN_FIELD_BASE(92, 92, 6, 0x50, 0x10, 5, 1),
+	PIN_FIELD_BASE(93, 93, 6, 0x50, 0x10, 6, 1),
+	PIN_FIELD_BASE(94, 94, 6, 0x50, 0x10, 7, 1),
+	PIN_FIELD_BASE(95, 95, 6, 0x50, 0x10, 2, 1),
+	PIN_FIELD_BASE(96, 96, 6, 0x50, 0x10, 3, 1),
+	PIN_FIELD_BASE(97, 97, 6, 0x50, 0x10, 8, 1),
+	PIN_FIELD_BASE(98, 98, 6, 0x50, 0x10, 9, 1),
+	PIN_FIELD_BASE(99, 99, 6, 0x50, 0x10, 10, 1),
+	PIN_FIELD_BASE(100, 100, 6, 0x50, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_pd_range[] = {
+	PIN_FIELD_BASE(69, 69, 5, 0x40, 0x10, 1, 1),
+	PIN_FIELD_BASE(70, 70, 5, 0x40, 0x10, 0, 1),
+	PIN_FIELD_BASE(71, 71, 5, 0x40, 0x10, 16, 1),
+	PIN_FIELD_BASE(72, 72, 5, 0x40, 0x10, 14, 1),
+	PIN_FIELD_BASE(73, 73, 5, 0x40, 0x10, 15, 1),
+	PIN_FIELD_BASE(74, 74, 5, 0x40, 0x10, 4, 1),
+	PIN_FIELD_BASE(75, 75, 5, 0x40, 0x10, 6, 1),
+	PIN_FIELD_BASE(76, 76, 5, 0x40, 0x10, 7, 1),
+	PIN_FIELD_BASE(77, 77, 5, 0x40, 0x10, 8, 1),
+	PIN_FIELD_BASE(78, 78, 5, 0x40, 0x10, 2, 1),
+	PIN_FIELD_BASE(79, 79, 5, 0x40, 0x10, 3, 1),
+	PIN_FIELD_BASE(80, 80, 5, 0x40, 0x10, 9, 1),
+	PIN_FIELD_BASE(81, 81, 5, 0x40, 0x10, 10, 1),
+	PIN_FIELD_BASE(82, 82, 5, 0x40, 0x10, 11, 1),
+	PIN_FIELD_BASE(83, 83, 5, 0x40, 0x10, 12, 1),
+	PIN_FIELD_BASE(84, 84, 5, 0x40, 0x10, 13, 1),
+	PIN_FIELD_BASE(85, 85, 5, 0x40, 0x10, 5, 1),
+	PIN_FIELD_BASE(86, 86, 6, 0x40, 0x10, 1, 1),
+	PIN_FIELD_BASE(87, 87, 6, 0x40, 0x10, 0, 1),
+	PIN_FIELD_BASE(88, 88, 6, 0x40, 0x10, 14, 1),
+	PIN_FIELD_BASE(89, 89, 6, 0x40, 0x10, 12, 1),
+	PIN_FIELD_BASE(90, 90, 6, 0x40, 0x10, 13, 1),
+	PIN_FIELD_BASE(91, 91, 6, 0x40, 0x10, 4, 1),
+	PIN_FIELD_BASE(92, 92, 6, 0x40, 0x10, 5, 1),
+	PIN_FIELD_BASE(93, 93, 6, 0x40, 0x10, 6, 1),
+	PIN_FIELD_BASE(94, 94, 6, 0x40, 0x10, 7, 1),
+	PIN_FIELD_BASE(95, 95, 6, 0x40, 0x10, 2, 1),
+	PIN_FIELD_BASE(96, 96, 6, 0x40, 0x10, 3, 1),
+	PIN_FIELD_BASE(97, 97, 6, 0x40, 0x10, 8, 1),
+	PIN_FIELD_BASE(98, 98, 6, 0x40, 0x10, 9, 1),
+	PIN_FIELD_BASE(99, 99, 6, 0x40, 0x10, 10, 1),
+	PIN_FIELD_BASE(100, 100, 6, 0x40, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_drv_range[] = {
+	PIN_FIELD_BASE(0, 0, 2, 0x10, 0x10, 21, 3),
+	PIN_FIELD_BASE(1, 1, 3, 0x10, 0x10, 0, 3),
+	PIN_FIELD_BASE(2, 2, 3, 0x10, 0x10, 3, 3),
+	PIN_FIELD_BASE(3, 3, 4, 0x00, 0x10, 0, 1),
+	PIN_FIELD_BASE(4, 4, 4, 0x00, 0x10, 1, 1),
+	PIN_FIELD_BASE(5, 5, 2, 0x00, 0x10, 0, 3),
+	PIN_FIELD_BASE(6, 6, 2, 0x00, 0x10, 21, 3),
+	PIN_FIELD_BASE(7, 7, 3, 0x00, 0x10, 0, 3),
+	PIN_FIELD_BASE(8, 8, 3, 0x00, 0x10, 3, 3),
+	PIN_FIELD_BASE(9, 9, 3, 0x00, 0x10, 6, 3),
+	PIN_FIELD_BASE(10, 10, 3, 0x00, 0x10, 9, 3),
+	PIN_FIELD_BASE(11, 11, 2, 0x00, 0x10, 24, 3),
+	PIN_FIELD_BASE(12, 12, 2, 0x00, 0x10, 27, 3),
+	PIN_FIELD_BASE(13, 13, 2, 0x10, 0x10, 0, 3),
+	PIN_FIELD_BASE(14, 14, 2, 0x10, 0x10, 3, 3),
+	PIN_FIELD_BASE(15, 15, 2, 0x00, 0x10, 3, 3),
+	PIN_FIELD_BASE(16, 16, 2, 0x00, 0x10, 6, 3),
+	PIN_FIELD_BASE(17, 17, 2, 0x00, 0x10, 9, 3),
+	PIN_FIELD_BASE(18, 18, 2, 0x00, 0x10, 12, 3),
+	PIN_FIELD_BASE(19, 19, 2, 0x00, 0x10, 15, 3),
+	PIN_FIELD_BASE(20, 20, 2, 0x00, 0x10, 18, 3),
+	PIN_FIELD_BASE(21, 21, 1, 0x10, 0x10, 6, 3),
+	PIN_FIELD_BASE(22, 22, 1, 0x10, 0x10, 9, 3),
+	PIN_FIELD_BASE(23, 23, 1, 0x10, 0x10, 12, 3),
+	PIN_FIELD_BASE(24, 24, 1, 0x10, 0x10, 24, 3),
+	PIN_FIELD_BASE(25, 25, 1, 0x10, 0x10, 21, 3),
+	PIN_FIELD_BASE(26, 26, 1, 0x10, 0x10, 15, 3),
+	PIN_FIELD_BASE(27, 27, 1, 0x10, 0x10, 18, 3),
+	PIN_FIELD_BASE(28, 28, 1, 0x10, 0x10, 27, 3),
+	PIN_FIELD_BASE(29, 29, 1, 0x20, 0x10, 0, 3),
+	PIN_FIELD_BASE(30, 30, 1, 0x20, 0x10, 9, 3),
+	PIN_FIELD_BASE(31, 31, 1, 0x20, 0x10, 6, 3),
+	PIN_FIELD_BASE(32, 32, 1, 0x20, 0x10, 3, 3),
+	PIN_FIELD_BASE(33, 33, 3, 0x00, 0x10, 12, 3),
+	PIN_FIELD_BASE(34, 34, 3, 0x00, 0x10, 24, 3),
+	PIN_FIELD_BASE(35, 35, 3, 0x00, 0x10, 21, 3),
+	PIN_FIELD_BASE(36, 36, 3, 0x00, 0x10, 15, 3),
+	PIN_FIELD_BASE(37, 37, 3, 0x00, 0x10, 18, 3),
+	PIN_FIELD_BASE(38, 38, 3, 0x00, 0x10, 27, 3),
+	PIN_FIELD_BASE(39, 39, 2, 0x10, 0x10, 27, 3),
+	PIN_FIELD_BASE(40, 40, 2, 0x20, 0x10, 0, 3),
+	PIN_FIELD_BASE(41, 41, 2, 0x10, 0x10, 6, 3),
+	PIN_FIELD_BASE(42, 42, 2, 0x20, 0x10, 9, 3),
+	PIN_FIELD_BASE(43, 43, 2, 0x20, 0x10, 12, 3),
+	PIN_FIELD_BASE(44, 44, 2, 0x20, 0x10, 3, 3),
+	PIN_FIELD_BASE(45, 45, 2, 0x20, 0x10, 6, 3),
+	PIN_FIELD_BASE(46, 46, 2, 0x20, 0x10, 21, 3),
+	PIN_FIELD_BASE(47, 47, 2, 0x20, 0x10, 24, 3),
+	PIN_FIELD_BASE(48, 48, 2, 0x20, 0x10, 15, 3),
+	PIN_FIELD_BASE(49, 49, 2, 0x20, 0x10, 18, 3),
+	PIN_FIELD_BASE(50, 50, 1, 0x00, 0x10, 6, 3),
+	PIN_FIELD_BASE(51, 51, 1, 0x00, 0x10, 9, 3),
+	PIN_FIELD_BASE(52, 52, 1, 0x00, 0x10, 12, 3),
+	PIN_FIELD_BASE(53, 53, 1, 0x00, 0x10, 15, 3),
+	PIN_FIELD_BASE(54, 54, 1, 0x00, 0x10, 18, 3),
+	PIN_FIELD_BASE(55, 55, 1, 0x00, 0x10, 21, 3),
+	PIN_FIELD_BASE(56, 56, 1, 0x00, 0x10, 24, 3),
+	PIN_FIELD_BASE(57, 57, 1, 0x00, 0x10, 27, 3),
+	PIN_FIELD_BASE(58, 58, 1, 0x00, 0x10, 3, 3),
+	PIN_FIELD_BASE(59, 59, 1, 0x00, 0x10, 0, 3),
+	PIN_FIELD_BASE(60, 60, 1, 0x10, 0x10, 0, 3),
+	PIN_FIELD_BASE(61, 61, 1, 0x10, 0x10, 3, 3),
+	PIN_FIELD_BASE(62, 62, 2, 0x10, 0x10, 15, 3),
+	PIN_FIELD_BASE(63, 63, 2, 0x10, 0x10, 12, 3),
+	PIN_FIELD_BASE(64, 64, 2, 0x10, 0x10, 9, 3),
+	PIN_FIELD_BASE(65, 65, 2, 0x10, 0x10, 18, 3),
+	PIN_FIELD_BASE(66, 66, 4, 0x00, 0x10, 2, 3),
+	PIN_FIELD_BASE(67, 67, 4, 0x00, 0x10, 5, 3),
+	PIN_FIELD_BASE(68, 68, 4, 0x00, 0x10, 8, 3),
+	PIN_FIELD_BASE(69, 69, 5, 0x00, 0x10, 3, 3),
+	PIN_FIELD_BASE(70, 70, 5, 0x00, 0x10, 0, 3),
+	PIN_FIELD_BASE(71, 71, 5, 0x10, 0x10, 18, 3),
+	PIN_FIELD_BASE(72, 72, 5, 0x10, 0x10, 12, 3),
+	PIN_FIELD_BASE(73, 73, 5, 0x10, 0x10, 15, 3),
+	PIN_FIELD_BASE(74, 74, 5, 0x00, 0x10, 15, 3),
+	PIN_FIELD_BASE(75, 75, 5, 0x00, 0x10, 18, 3),
+	PIN_FIELD_BASE(76, 76, 5, 0x00, 0x10, 21, 3),
+	PIN_FIELD_BASE(77, 77, 5, 0x00, 0x10, 24, 3),
+	PIN_FIELD_BASE(78, 78, 5, 0x00, 0x10, 6, 3),
+	PIN_FIELD_BASE(79, 79, 5, 0x00, 0x10, 9, 3),
+	PIN_FIELD_BASE(80, 80, 5, 0x00, 0x10, 27, 3),
+	PIN_FIELD_BASE(81, 81, 5, 0x10, 0x10, 0, 3),
+	PIN_FIELD_BASE(82, 82, 5, 0x10, 0x10, 3, 3),
+	PIN_FIELD_BASE(83, 83, 5, 0x10, 0x10, 6, 3),
+	PIN_FIELD_BASE(84, 84, 5, 0x10, 0x10, 9, 3),
+	PIN_FIELD_BASE(85, 85, 5, 0x00, 0x10, 12, 3),
+	PIN_FIELD_BASE(86, 86, 6, 0x00, 0x10, 3, 3),
+	PIN_FIELD_BASE(87, 87, 6, 0x00, 0x10, 0, 3),
+	PIN_FIELD_BASE(88, 88, 6, 0x10, 0x10, 12, 3),
+	PIN_FIELD_BASE(89, 89, 6, 0x10, 0x10, 6, 3),
+	PIN_FIELD_BASE(90, 90, 6, 0x10, 0x10, 9, 3),
+	PIN_FIELD_BASE(91, 91, 6, 0x00, 0x10, 12, 3),
+	PIN_FIELD_BASE(92, 92, 6, 0x00, 0x10, 15, 3),
+	PIN_FIELD_BASE(93, 93, 6, 0x00, 0x10, 18, 3),
+	PIN_FIELD_BASE(94, 94, 6, 0x00, 0x10, 21, 3),
+	PIN_FIELD_BASE(95, 95, 6, 0x00, 0x10, 6, 3),
+	PIN_FIELD_BASE(96, 96, 6, 0x00, 0x10, 9, 3),
+	PIN_FIELD_BASE(97, 97, 6, 0x00, 0x10, 24, 3),
+	PIN_FIELD_BASE(98, 98, 6, 0x00, 0x10, 27, 3),
+	PIN_FIELD_BASE(99, 99, 6, 0x10, 0x10, 2, 3),
+	PIN_FIELD_BASE(100, 100, 6, 0x10, 0x10, 5, 3),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_pupd_range[] = {
+	PIN_FIELD_BASE(0, 0, 2, 0x60, 0x10, 17, 1),
+	PIN_FIELD_BASE(1, 1, 3, 0x30, 0x10, 10, 1),
+	PIN_FIELD_BASE(2, 2, 3, 0x30, 0x10, 11, 1),
+	PIN_FIELD_BASE(3, 3, 4, 0x40, 0x10, 0, 1),
+	PIN_FIELD_BASE(4, 4, 4, 0x40, 0x10, 1, 1),
+	PIN_FIELD_BASE(5, 5, 2, 0x60, 0x10, 0, 1),
+	PIN_FIELD_BASE(6, 6, 2, 0x60, 0x10, 1, 1),
+	PIN_FIELD_BASE(7, 7, 3, 0x30, 0x10, 0, 1),
+	PIN_FIELD_BASE(8, 8, 3, 0x30, 0x10, 1, 1),
+	PIN_FIELD_BASE(9, 9, 3, 0x30, 0x10, 2, 1),
+	PIN_FIELD_BASE(10, 10, 3, 0x30, 0x10, 3, 1),
+	PIN_FIELD_BASE(11, 11, 2, 0x60, 0x10, 8, 1),
+	PIN_FIELD_BASE(12, 12, 2, 0x60, 0x10, 9, 1),
+	PIN_FIELD_BASE(13, 13, 2, 0x60, 0x10, 10, 1),
+	PIN_FIELD_BASE(14, 14, 2, 0x60, 0x10, 11, 1),
+	PIN_FIELD_BASE(15, 15, 2, 0x60, 0x10, 2, 1),
+	PIN_FIELD_BASE(16, 16, 2, 0x60, 0x10, 3, 1),
+	PIN_FIELD_BASE(17, 17, 2, 0x60, 0x10, 4, 1),
+	PIN_FIELD_BASE(18, 18, 2, 0x60, 0x10, 5, 1),
+	PIN_FIELD_BASE(19, 19, 2, 0x60, 0x10, 6, 1),
+	PIN_FIELD_BASE(20, 20, 2, 0x60, 0x10, 7, 1),
+	PIN_FIELD_BASE(21, 21, 1, 0x40, 0x10, 12, 1),
+	PIN_FIELD_BASE(22, 22, 1, 0x40, 0x10, 13, 1),
+	PIN_FIELD_BASE(23, 23, 1, 0x40, 0x10, 14, 1),
+	PIN_FIELD_BASE(24, 24, 1, 0x40, 0x10, 18, 1),
+	PIN_FIELD_BASE(25, 25, 1, 0x40, 0x10, 17, 1),
+	PIN_FIELD_BASE(26, 26, 1, 0x40, 0x10, 15, 1),
+	PIN_FIELD_BASE(27, 27, 1, 0x40, 0x10, 16, 1),
+	PIN_FIELD_BASE(28, 28, 1, 0x40, 0x10, 19, 1),
+	PIN_FIELD_BASE(29, 29, 1, 0x40, 0x10, 20, 1),
+	PIN_FIELD_BASE(30, 30, 1, 0x40, 0x10, 23, 1),
+	PIN_FIELD_BASE(31, 31, 1, 0x40, 0x10, 22, 1),
+	PIN_FIELD_BASE(32, 32, 1, 0x40, 0x10, 21, 1),
+	PIN_FIELD_BASE(33, 33, 3, 0x30, 0x10, 4, 1),
+	PIN_FIELD_BASE(34, 34, 3, 0x30, 0x10, 8, 1),
+	PIN_FIELD_BASE(35, 35, 3, 0x30, 0x10, 7, 1),
+	PIN_FIELD_BASE(36, 36, 3, 0x30, 0x10, 5, 1),
+	PIN_FIELD_BASE(37, 37, 3, 0x30, 0x10, 6, 1),
+	PIN_FIELD_BASE(38, 38, 3, 0x30, 0x10, 9, 1),
+	PIN_FIELD_BASE(39, 39, 2, 0x60, 0x10, 18, 1),
+	PIN_FIELD_BASE(40, 40, 2, 0x60, 0x10, 19, 1),
+	PIN_FIELD_BASE(41, 41, 2, 0x60, 0x10, 12, 1),
+	PIN_FIELD_BASE(42, 42, 2, 0x60, 0x10, 22, 1),
+	PIN_FIELD_BASE(43, 43, 2, 0x60, 0x10, 23, 1),
+	PIN_FIELD_BASE(44, 44, 2, 0x60, 0x10, 20, 1),
+	PIN_FIELD_BASE(45, 45, 2, 0x60, 0x10, 21, 1),
+	PIN_FIELD_BASE(46, 46, 2, 0x60, 0x10, 26, 1),
+	PIN_FIELD_BASE(47, 47, 2, 0x60, 0x10, 27, 1),
+	PIN_FIELD_BASE(48, 48, 2, 0x60, 0x10, 24, 1),
+	PIN_FIELD_BASE(49, 49, 2, 0x60, 0x10, 25, 1),
+	PIN_FIELD_BASE(50, 50, 1, 0x40, 0x10, 2, 1),
+	PIN_FIELD_BASE(51, 51, 1, 0x40, 0x10, 3, 1),
+	PIN_FIELD_BASE(52, 52, 1, 0x40, 0x10, 4, 1),
+	PIN_FIELD_BASE(53, 53, 1, 0x40, 0x10, 5, 1),
+	PIN_FIELD_BASE(54, 54, 1, 0x40, 0x10, 6, 1),
+	PIN_FIELD_BASE(55, 55, 1, 0x40, 0x10, 7, 1),
+	PIN_FIELD_BASE(56, 56, 1, 0x40, 0x10, 8, 1),
+	PIN_FIELD_BASE(57, 57, 1, 0x40, 0x10, 9, 1),
+	PIN_FIELD_BASE(58, 58, 1, 0x40, 0x10, 1, 1),
+	PIN_FIELD_BASE(59, 59, 1, 0x40, 0x10, 0, 1),
+	PIN_FIELD_BASE(60, 60, 1, 0x40, 0x10, 10, 1),
+	PIN_FIELD_BASE(61, 61, 1, 0x40, 0x10, 11, 1),
+	PIN_FIELD_BASE(62, 62, 2, 0x60, 0x10, 15, 1),
+	PIN_FIELD_BASE(63, 63, 2, 0x60, 0x10, 14, 1),
+	PIN_FIELD_BASE(64, 64, 2, 0x60, 0x10, 13, 1),
+	PIN_FIELD_BASE(65, 65, 2, 0x60, 0x10, 16, 1),
+	PIN_FIELD_BASE(66, 66, 4, 0x40, 0x10, 2, 1),
+	PIN_FIELD_BASE(67, 67, 4, 0x40, 0x10, 3, 1),
+	PIN_FIELD_BASE(68, 68, 4, 0x40, 0x10, 4, 1),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_r0_range[] = {
+	PIN_FIELD_BASE(0, 0, 2, 0x70, 0x10, 17, 1),
+	PIN_FIELD_BASE(1, 1, 3, 0x40, 0x10, 10, 1),
+	PIN_FIELD_BASE(2, 2, 3, 0x40, 0x10, 11, 1),
+	PIN_FIELD_BASE(3, 3, 4, 0x50, 0x10, 0, 1),
+	PIN_FIELD_BASE(4, 4, 4, 0x50, 0x10, 1, 1),
+	PIN_FIELD_BASE(5, 5, 2, 0x70, 0x10, 0, 1),
+	PIN_FIELD_BASE(6, 6, 2, 0x70, 0x10, 1, 1),
+	PIN_FIELD_BASE(7, 7, 3, 0x40, 0x10, 0, 1),
+	PIN_FIELD_BASE(8, 8, 3, 0x40, 0x10, 1, 1),
+	PIN_FIELD_BASE(9, 9, 3, 0x40, 0x10, 2, 1),
+	PIN_FIELD_BASE(10, 10, 3, 0x40, 0x10, 3, 1),
+	PIN_FIELD_BASE(11, 11, 2, 0x70, 0x10, 8, 1),
+	PIN_FIELD_BASE(12, 12, 2, 0x70, 0x10, 9, 1),
+	PIN_FIELD_BASE(13, 13, 2, 0x70, 0x10, 10, 1),
+	PIN_FIELD_BASE(14, 14, 2, 0x70, 0x10, 11, 1),
+	PIN_FIELD_BASE(15, 15, 2, 0x70, 0x10, 2, 1),
+	PIN_FIELD_BASE(16, 16, 2, 0x70, 0x10, 3, 1),
+	PIN_FIELD_BASE(17, 17, 2, 0x70, 0x10, 4, 1),
+	PIN_FIELD_BASE(18, 18, 2, 0x70, 0x10, 5, 1),
+	PIN_FIELD_BASE(19, 19, 2, 0x70, 0x10, 6, 1),
+	PIN_FIELD_BASE(20, 20, 2, 0x70, 0x10, 7, 1),
+	PIN_FIELD_BASE(21, 21, 1, 0x50, 0x10, 12, 1),
+	PIN_FIELD_BASE(22, 22, 1, 0x50, 0x10, 13, 1),
+	PIN_FIELD_BASE(23, 23, 1, 0x50, 0x10, 14, 1),
+	PIN_FIELD_BASE(24, 24, 1, 0x50, 0x10, 18, 1),
+	PIN_FIELD_BASE(25, 25, 1, 0x50, 0x10, 17, 1),
+	PIN_FIELD_BASE(26, 26, 1, 0x50, 0x10, 15, 1),
+	PIN_FIELD_BASE(27, 27, 1, 0x50, 0x10, 16, 1),
+	PIN_FIELD_BASE(28, 28, 1, 0x50, 0x10, 19, 1),
+	PIN_FIELD_BASE(29, 29, 1, 0x50, 0x10, 20, 1),
+	PIN_FIELD_BASE(30, 30, 1, 0x50, 0x10, 23, 1),
+	PIN_FIELD_BASE(31, 31, 1, 0x50, 0x10, 22, 1),
+	PIN_FIELD_BASE(32, 32, 1, 0x50, 0x10, 21, 1),
+	PIN_FIELD_BASE(33, 33, 3, 0x40, 0x10, 4, 1),
+	PIN_FIELD_BASE(34, 34, 3, 0x40, 0x10, 8, 1),
+	PIN_FIELD_BASE(35, 35, 3, 0x40, 0x10, 7, 1),
+	PIN_FIELD_BASE(36, 36, 3, 0x40, 0x10, 5, 1),
+	PIN_FIELD_BASE(37, 37, 3, 0x40, 0x10, 6, 1),
+	PIN_FIELD_BASE(38, 38, 3, 0x40, 0x10, 9, 1),
+	PIN_FIELD_BASE(39, 39, 2, 0x70, 0x10, 18, 1),
+	PIN_FIELD_BASE(40, 40, 2, 0x70, 0x10, 19, 1),
+	PIN_FIELD_BASE(41, 41, 2, 0x70, 0x10, 12, 1),
+	PIN_FIELD_BASE(42, 42, 2, 0x70, 0x10, 22, 1),
+	PIN_FIELD_BASE(43, 43, 2, 0x70, 0x10, 23, 1),
+	PIN_FIELD_BASE(44, 44, 2, 0x70, 0x10, 20, 1),
+	PIN_FIELD_BASE(45, 45, 2, 0x70, 0x10, 21, 1),
+	PIN_FIELD_BASE(46, 46, 2, 0x70, 0x10, 26, 1),
+	PIN_FIELD_BASE(47, 47, 2, 0x70, 0x10, 27, 1),
+	PIN_FIELD_BASE(48, 48, 2, 0x70, 0x10, 24, 1),
+	PIN_FIELD_BASE(49, 49, 2, 0x70, 0x10, 25, 1),
+	PIN_FIELD_BASE(50, 50, 1, 0x50, 0x10, 2, 1),
+	PIN_FIELD_BASE(51, 51, 1, 0x50, 0x10, 3, 1),
+	PIN_FIELD_BASE(52, 52, 1, 0x50, 0x10, 4, 1),
+	PIN_FIELD_BASE(53, 53, 1, 0x50, 0x10, 5, 1),
+	PIN_FIELD_BASE(54, 54, 1, 0x50, 0x10, 6, 1),
+	PIN_FIELD_BASE(55, 55, 1, 0x50, 0x10, 7, 1),
+	PIN_FIELD_BASE(56, 56, 1, 0x50, 0x10, 8, 1),
+	PIN_FIELD_BASE(57, 57, 1, 0x50, 0x10, 9, 1),
+	PIN_FIELD_BASE(58, 58, 1, 0x50, 0x10, 1, 1),
+	PIN_FIELD_BASE(59, 59, 1, 0x50, 0x10, 0, 1),
+	PIN_FIELD_BASE(60, 60, 1, 0x50, 0x10, 10, 1),
+	PIN_FIELD_BASE(61, 61, 1, 0x50, 0x10, 11, 1),
+	PIN_FIELD_BASE(62, 62, 2, 0x70, 0x10, 15, 1),
+	PIN_FIELD_BASE(63, 63, 2, 0x70, 0x10, 14, 1),
+	PIN_FIELD_BASE(64, 64, 2, 0x70, 0x10, 13, 1),
+	PIN_FIELD_BASE(65, 65, 2, 0x70, 0x10, 16, 1),
+	PIN_FIELD_BASE(66, 66, 4, 0x50, 0x10, 2, 1),
+	PIN_FIELD_BASE(67, 67, 4, 0x50, 0x10, 3, 1),
+	PIN_FIELD_BASE(68, 68, 4, 0x50, 0x10, 4, 1),
+};
+
+static const struct mtk_pin_field_calc mt7986_pin_r1_range[] = {
+	PIN_FIELD_BASE(0, 0, 2, 0x80, 0x10, 17, 1),
+	PIN_FIELD_BASE(1, 1, 3, 0x50, 0x10, 10, 1),
+	PIN_FIELD_BASE(2, 2, 3, 0x50, 0x10, 11, 1),
+	PIN_FIELD_BASE(3, 3, 4, 0x60, 0x10, 0, 1),
+	PIN_FIELD_BASE(4, 4, 4, 0x60, 0x10, 1, 1),
+	PIN_FIELD_BASE(5, 5, 2, 0x80, 0x10, 0, 1),
+	PIN_FIELD_BASE(6, 6, 2, 0x80, 0x10, 1, 1),
+	PIN_FIELD_BASE(7, 7, 3, 0x50, 0x10, 0, 1),
+	PIN_FIELD_BASE(8, 8, 3, 0x50, 0x10, 1, 1),
+	PIN_FIELD_BASE(9, 9, 3, 0x50, 0x10, 2, 1),
+	PIN_FIELD_BASE(10, 10, 3, 0x50, 0x10, 3, 1),
+	PIN_FIELD_BASE(11, 11, 2, 0x80, 0x10, 8, 1),
+	PIN_FIELD_BASE(12, 12, 2, 0x80, 0x10, 9, 1),
+	PIN_FIELD_BASE(13, 13, 2, 0x80, 0x10, 10, 1),
+	PIN_FIELD_BASE(14, 14, 2, 0x80, 0x10, 11, 1),
+	PIN_FIELD_BASE(15, 15, 2, 0x80, 0x10, 2, 1),
+	PIN_FIELD_BASE(16, 16, 2, 0x80, 0x10, 3, 1),
+	PIN_FIELD_BASE(17, 17, 2, 0x80, 0x10, 4, 1),
+	PIN_FIELD_BASE(18, 18, 2, 0x80, 0x10, 5, 1),
+	PIN_FIELD_BASE(19, 19, 2, 0x80, 0x10, 6, 1),
+	PIN_FIELD_BASE(20, 20, 2, 0x80, 0x10, 7, 1),
+	PIN_FIELD_BASE(21, 21, 1, 0x60, 0x10, 12, 1),
+	PIN_FIELD_BASE(22, 22, 1, 0x60, 0x10, 13, 1),
+	PIN_FIELD_BASE(23, 23, 1, 0x60, 0x10, 14, 1),
+	PIN_FIELD_BASE(24, 24, 1, 0x60, 0x10, 18, 1),
+	PIN_FIELD_BASE(25, 25, 1, 0x60, 0x10, 17, 1),
+	PIN_FIELD_BASE(26, 26, 1, 0x60, 0x10, 15, 1),
+	PIN_FIELD_BASE(27, 27, 1, 0x60, 0x10, 16, 1),
+	PIN_FIELD_BASE(28, 28, 1, 0x60, 0x10, 19, 1),
+	PIN_FIELD_BASE(29, 29, 1, 0x60, 0x10, 20, 1),
+	PIN_FIELD_BASE(30, 30, 1, 0x60, 0x10, 23, 1),
+	PIN_FIELD_BASE(31, 31, 1, 0x60, 0x10, 22, 1),
+	PIN_FIELD_BASE(32, 32, 1, 0x60, 0x10, 21, 1),
+	PIN_FIELD_BASE(33, 33, 3, 0x50, 0x10, 4, 1),
+	PIN_FIELD_BASE(34, 34, 3, 0x50, 0x10, 8, 1),
+	PIN_FIELD_BASE(35, 35, 3, 0x50, 0x10, 7, 1),
+	PIN_FIELD_BASE(36, 36, 3, 0x50, 0x10, 5, 1),
+	PIN_FIELD_BASE(37, 37, 3, 0x50, 0x10, 6, 1),
+	PIN_FIELD_BASE(38, 38, 3, 0x50, 0x10, 9, 1),
+	PIN_FIELD_BASE(39, 39, 2, 0x80, 0x10, 18, 1),
+	PIN_FIELD_BASE(40, 40, 2, 0x80, 0x10, 19, 1),
+	PIN_FIELD_BASE(41, 41, 2, 0x80, 0x10, 12, 1),
+	PIN_FIELD_BASE(42, 42, 2, 0x80, 0x10, 22, 1),
+	PIN_FIELD_BASE(43, 43, 2, 0x80, 0x10, 23, 1),
+	PIN_FIELD_BASE(44, 44, 2, 0x80, 0x10, 20, 1),
+	PIN_FIELD_BASE(45, 45, 2, 0x80, 0x10, 21, 1),
+	PIN_FIELD_BASE(46, 46, 2, 0x80, 0x10, 26, 1),
+	PIN_FIELD_BASE(47, 47, 2, 0x80, 0x10, 27, 1),
+	PIN_FIELD_BASE(48, 48, 2, 0x80, 0x10, 24, 1),
+	PIN_FIELD_BASE(49, 49, 2, 0x80, 0x10, 25, 1),
+	PIN_FIELD_BASE(50, 50, 1, 0x60, 0x10, 2, 1),
+	PIN_FIELD_BASE(51, 51, 1, 0x60, 0x10, 3, 1),
+	PIN_FIELD_BASE(52, 52, 1, 0x60, 0x10, 4, 1),
+	PIN_FIELD_BASE(53, 53, 1, 0x60, 0x10, 5, 1),
+	PIN_FIELD_BASE(54, 54, 1, 0x60, 0x10, 6, 1),
+	PIN_FIELD_BASE(55, 55, 1, 0x60, 0x10, 7, 1),
+	PIN_FIELD_BASE(56, 56, 1, 0x60, 0x10, 8, 1),
+	PIN_FIELD_BASE(57, 57, 1, 0x60, 0x10, 9, 1),
+	PIN_FIELD_BASE(58, 58, 1, 0x60, 0x10, 1, 1),
+	PIN_FIELD_BASE(59, 59, 1, 0x60, 0x10, 0, 1),
+	PIN_FIELD_BASE(60, 60, 1, 0x60, 0x10, 10, 1),
+	PIN_FIELD_BASE(61, 61, 1, 0x60, 0x10, 11, 1),
+	PIN_FIELD_BASE(62, 62, 2, 0x80, 0x10, 15, 1),
+	PIN_FIELD_BASE(63, 63, 2, 0x80, 0x10, 14, 1),
+	PIN_FIELD_BASE(64, 64, 2, 0x80, 0x10, 13, 1),
+	PIN_FIELD_BASE(65, 65, 2, 0x80, 0x10, 16, 1),
+	PIN_FIELD_BASE(66, 66, 4, 0x60, 0x10, 2, 1),
+	PIN_FIELD_BASE(67, 67, 4, 0x60, 0x10, 3, 1),
+	PIN_FIELD_BASE(68, 68, 4, 0x60, 0x10, 4, 1),
+};
+
+static const struct mtk_pin_reg_calc mt7986_reg_cals[] = {
+	[PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt7986_pin_mode_range),
+	[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt7986_pin_dir_range),
+	[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt7986_pin_di_range),
+	[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt7986_pin_do_range),
+	[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt7986_pin_smt_range),
+	[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt7986_pin_ies_range),
+	[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt7986_pin_pu_range),
+	[PINCTRL_PIN_REG_PD] = MTK_RANGE(mt7986_pin_pd_range),
+	[PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt7986_pin_drv_range),
+	[PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt7986_pin_pupd_range),
+	[PINCTRL_PIN_REG_R0] = MTK_RANGE(mt7986_pin_r0_range),
+	[PINCTRL_PIN_REG_R1] = MTK_RANGE(mt7986_pin_r1_range),
+};
+
+static const struct mtk_pin_desc mt7986_pins[] = {
+	MT7986_PIN(0, "SYS_WATCHDOG"),
+	MT7986_PIN(1, "WF2G_LED"),
+	MT7986_PIN(2, "WF5G_LED"),
+	MT7986_PIN(3, "I2C_SCL"),
+	MT7986_PIN(4, "I2C_SDA"),
+	MT7986_PIN(5, "GPIO_0"),
+	MT7986_PIN(6, "GPIO_1"),
+	MT7986_PIN(7, "GPIO_2"),
+	MT7986_PIN(8, "GPIO_3"),
+	MT7986_PIN(9, "GPIO_4"),
+	MT7986_PIN(10, "GPIO_5"),
+	MT7986_PIN(11, "GPIO_6"),
+	MT7986_PIN(12, "GPIO_7"),
+	MT7986_PIN(13, "GPIO_8"),
+	MT7986_PIN(14, "GPIO_9"),
+	MT7986_PIN(15, "GPIO_10"),
+	MT7986_PIN(16, "GPIO_11"),
+	MT7986_PIN(17, "GPIO_12"),
+	MT7986_PIN(18, "GPIO_13"),
+	MT7986_PIN(19, "GPIO_14"),
+	MT7986_PIN(20, "GPIO_15"),
+	MT7986_PIN(21, "PWM0"),
+	MT7986_PIN(22, "PWM1"),
+	MT7986_PIN(23, "SPI0_CLK"),
+	MT7986_PIN(24, "SPI0_MOSI"),
+	MT7986_PIN(25, "SPI0_MISO"),
+	MT7986_PIN(26, "SPI0_CS"),
+	MT7986_PIN(27, "SPI0_HOLD"),
+	MT7986_PIN(28, "SPI0_WP"),
+	MT7986_PIN(29, "SPI1_CLK"),
+	MT7986_PIN(30, "SPI1_MOSI"),
+	MT7986_PIN(31, "SPI1_MISO"),
+	MT7986_PIN(32, "SPI1_CS"),
+	MT7986_PIN(33, "SPI2_CLK"),
+	MT7986_PIN(34, "SPI2_MOSI"),
+	MT7986_PIN(35, "SPI2_MISO"),
+	MT7986_PIN(36, "SPI2_CS"),
+	MT7986_PIN(37, "SPI2_HOLD"),
+	MT7986_PIN(38, "SPI2_WP"),
+	MT7986_PIN(39, "UART0_RXD"),
+	MT7986_PIN(40, "UART0_TXD"),
+	MT7986_PIN(41, "PCIE_PERESET_N"),
+	MT7986_PIN(42, "UART1_RXD"),
+	MT7986_PIN(43, "UART1_TXD"),
+	MT7986_PIN(44, "UART1_CTS"),
+	MT7986_PIN(45, "UART1_RTS"),
+	MT7986_PIN(46, "UART2_RXD"),
+	MT7986_PIN(47, "UART2_TXD"),
+	MT7986_PIN(48, "UART2_CTS"),
+	MT7986_PIN(49, "UART2_RTS"),
+	MT7986_PIN(50, "EMMC_DATA_0"),
+	MT7986_PIN(51, "EMMC_DATA_1"),
+	MT7986_PIN(52, "EMMC_DATA_2"),
+	MT7986_PIN(53, "EMMC_DATA_3"),
+	MT7986_PIN(54, "EMMC_DATA_4"),
+	MT7986_PIN(55, "EMMC_DATA_5"),
+	MT7986_PIN(56, "EMMC_DATA_6"),
+	MT7986_PIN(57, "EMMC_DATA_7"),
+	MT7986_PIN(58, "EMMC_CMD"),
+	MT7986_PIN(59, "EMMC_CK"),
+	MT7986_PIN(60, "EMMC_DSL"),
+	MT7986_PIN(61, "EMMC_RSTB"),
+	MT7986_PIN(62, "PCM_DTX"),
+	MT7986_PIN(63, "PCM_DRX"),
+	MT7986_PIN(64, "PCM_CLK"),
+	MT7986_PIN(65, "PCM_FS"),
+	MT7986_PIN(66, "MT7531_INT"),
+	MT7986_PIN(67, "SMI_MDC"),
+	MT7986_PIN(68, "SMI_MDIO"),
+	MT7986_PIN(69, "WF0_DIG_RESETB"),
+	MT7986_PIN(70, "WF0_CBA_RESETB"),
+	MT7986_PIN(71, "WF0_XO_REQ"),
+	MT7986_PIN(72, "WF0_TOP_CLK"),
+	MT7986_PIN(73, "WF0_TOP_DATA"),
+	MT7986_PIN(74, "WF0_HB1"),
+	MT7986_PIN(75, "WF0_HB2"),
+	MT7986_PIN(76, "WF0_HB3"),
+	MT7986_PIN(77, "WF0_HB4"),
+	MT7986_PIN(78, "WF0_HB0"),
+	MT7986_PIN(79, "WF0_HB0_B"),
+	MT7986_PIN(80, "WF0_HB5"),
+	MT7986_PIN(81, "WF0_HB6"),
+	MT7986_PIN(82, "WF0_HB7"),
+	MT7986_PIN(83, "WF0_HB8"),
+	MT7986_PIN(84, "WF0_HB9"),
+	MT7986_PIN(85, "WF0_HB10"),
+	MT7986_PIN(86, "WF1_DIG_RESETB"),
+	MT7986_PIN(87, "WF1_CBA_RESETB"),
+	MT7986_PIN(88, "WF1_XO_REQ"),
+	MT7986_PIN(89, "WF1_TOP_CLK"),
+	MT7986_PIN(90, "WF1_TOP_DATA"),
+	MT7986_PIN(91, "WF1_HB1"),
+	MT7986_PIN(92, "WF1_HB2"),
+	MT7986_PIN(93, "WF1_HB3"),
+	MT7986_PIN(94, "WF1_HB4"),
+	MT7986_PIN(95, "WF1_HB0"),
+	MT7986_PIN(96, "WF1_HB0_B"),
+	MT7986_PIN(97, "WF1_HB5"),
+	MT7986_PIN(98, "WF1_HB6"),
+	MT7986_PIN(99, "WF1_HB7"),
+	MT7986_PIN(100, "WF1_HB8"),
+};
+
+/* List all groups consisting of these pins dedicated to the enablement of
+ * certain hardware block and the corresponding mode for all of the pins.
+ * The hardware probably has multiple combinations of these pinouts.
+ */
+
+/* SYS_WATCHDOG */
+static int mt7986_watchdog_pins[] = { 0, };
+static int mt7986_watchdog_funcs[] = { 1, };
+
+/* WF2G_LED(1), WF5G_LED */
+static int mt7986_wifi_led_pins[] = { 1, 2, };
+static int mt7986_wifi_led_funcs[] = { 1, 1, };
+
+/* I2C */
+static int mt7986_i2c_pins[] = { 3, 4, };
+static int mt7986_i2c_funcs[] = { 1, 1, };
+
+/* UART1 */
+static int mt7986_uart1_0_pins[] = { 7, 8, 9, 10, };
+static int mt7986_uart1_0_funcs[] = { 3, 3, 3, 3, };
+
+/* JTAG */
+static int mt7986_jtag_pins[] = { 11, 12, 13, 14, 15, };
+static int mt7986_jtag_funcs[] = { 1, 1, 1, 1, 1, };
+
+/* SPI1 */
+static int mt7986_spi1_0_pins[] = { 11, 12, 13, 14, };
+static int mt7986_spi1_0_funcs[] = { 3, 3, 3, 3, };
+
+/* PWM */
+static int mt7986_pwm1_1_pins[] = { 20, };
+static int mt7986_pwm1_1_funcs[] = { 2, };
+
+/* PWM */
+static int mt7986_pwm0_pins[] = { 21, };
+static int mt7986_pwm0_funcs[] = { 1, };
+
+/* PWM */
+static int mt7986_pwm1_0_pins[] = { 22, };
+static int mt7986_pwm1_0_funcs[] = { 1, };
+
+/* EMMC */
+static int mt7986_emmc_45_pins[] = { 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, };
+static int mt7986_emmc_45_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
+
+/* SNFI */
+static int mt7986_snfi_pins[] = { 23, 24, 25, 26, 27, 28, };
+static int mt7986_snfi_funcs[] = { 1, 1, 1, 1, 1, 1, };
+
+/* SPI1 */
+static int mt7986_spi1_1_pins[] = { 23, 24, 25, 26, };
+static int mt7986_spi1_1_funcs[] = { 3, 3, 3, 3, };
+
+/* UART1 */
+static int mt7986_uart1_1_pins[] = { 23, 24, 25, 26, };
+static int mt7986_uart1_1_funcs[] = { 4, 4, 4, 4, };
+
+/* SPI1 */
+static int mt7986_spi1_2_pins[] = { 29, 30, 31, 32, };
+static int mt7986_spi1_2_funcs[] = { 1, 1, 1, 1, };
+
+/* UART1 */
+static int mt7986_uart1_2_pins[] = { 29, 30, 31, 32, };
+static int mt7986_uart1_2_funcs[] = { 3, 3, 3, 3, };
+
+/* UART2 */
+static int mt7986_uart2_0_pins[] = { 29, 30, 31, 32, };
+static int mt7986_uart2_0_funcs[] = { 4, 4, 4, 4, };
+
+/* SPI0 */
+static int mt7986_spi0_pins[] = { 33, 34, 35, 36, };
+static int mt7986_spi0_funcs[] = { 1, 1, 1, 1, };
+
+/* SPI0 */
+static int mt7986_spi0_wp_hold_pins[] = { 37, 38, };
+static int mt7986_spi0_wp_hold_funcs[] = { 1, 1, };
+
+/* UART2 */
+static int mt7986_uart2_1_pins[] = { 33, 34, 35, 36, };
+static int mt7986_uart2_1_funcs[] = { 3, 3, 3, 3, };
+
+/* UART1 */
+static int mt7986_uart1_3_rx_tx_pins[] = { 35, 36, };
+static int mt7986_uart1_3_rx_tx_funcs[] = { 2, 2, };
+
+/* UART1 */
+static int mt7986_uart1_3_cts_rts_pins[] = { 37, 38, };
+static int mt7986_uart1_3_cts_rts_funcs[] = { 2, 2, };
+
+/* SPI1 */
+static int mt7986_spi1_3_pins[] = { 33, 34, 35, 36, };
+static int mt7986_spi1_3_funcs[] = { 4, 4, 4, 4, };
+
+/* UART0 */
+static int mt7986_uart0_pins[] = { 39, 40, };
+static int mt7986_uart0_funcs[] = { 1, 1, };
+
+/* PCIE_PERESET_N */
+static int mt7986_pcie_reset_pins[] = { 41, };
+static int mt7986_pcie_reset_funcs[] = { 1, };
+
+/* UART1 */
+static int mt7986_uart1_pins[] = { 42, 43, 44, 45, };
+static int mt7986_uart1_funcs[] = { 1, 1, 1, 1, };
+
+/* UART1 */
+static int mt7986_uart2_pins[] = { 46, 47, 48, 49, };
+static int mt7986_uart2_funcs[] = { 1, 1, 1, 1, };
+
+/* EMMC */
+static int mt7986_emmc_51_pins[] = { 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, };
+static int mt7986_emmc_51_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
+
+/* PCM */
+static int mt7986_pcm_pins[] = { 62, 63, 64, 65, };
+static int mt7986_pcm_funcs[] = { 1, 1, 1, 1, };
+
+/* MT7531_INT */
+static int mt7986_switch_int_pins[] = { 66, };
+static int mt7986_switch_int_funcs[] = { 1, };
+
+/* MDC_MDIO */
+static int mt7986_mdc_mdio_pins[] = { 67, 68, };
+static int mt7986_mdc_mdio_funcs[] = { 1, 1, };
+
+/* WF0_MODE1 */
+static int mt7986_wf0_mode1_pins[] = { 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85 };
+static int mt7986_wf0_mode1_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+
+/* WF0_HB */
+static int mt7986_wf0_hb_pins[] = { 74, 75, 76, 77, 78 };
+static int mt7986_wf0_hb_funcs[] = { 2, 2, 2, 2, 2 };
+
+/* WF0_MODE3 */
+static int mt7986_wf0_mode3_pins[] = { 74, 75, 76, 77, 78, 80 };
+static int mt7986_wf0_mode3_funcs[] = { 3, 3, 3, 3, 3, 3 };
+
+/* WF1_HB */
+static int mt7986_wf1_hb_pins[] = { 79, 80, 81, 82, 83, 84, 85 };
+static int mt7986_wf1_hb_funcs[] = { 2, 2, 2, 2, 2, 2, 2 };
+
+/* WF1_MODE1 */
+static int mt7986_wf1_mode1_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100 };
+static int mt7986_wf1_mode1_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+
+/* WF1_MODE2 */
+static int mt7986_wf1_mode2_pins[] = { 91, 92, 93, 94, 95, 97 };
+static int mt7986_wf1_mode2_funcs[] = { 2, 2, 2, 2, 2, 2 };
+
+static const struct group_desc mt7986_groups[] = {
+	/*  @GPIO(0): SYS_WATCHDOG(1) */
+        PINCTRL_PIN_GROUP("watchdog", mt7986_watchdog),
+	/*  @GPIO(1,2): WF2G_LED(1), WF5G_LED(1) */
+        PINCTRL_PIN_GROUP("wifi_led", mt7986_wifi_led),
+	/*  @GPIO(3,4): I2C(1) */
+        PINCTRL_PIN_GROUP("i2c", mt7986_i2c),
+	/*  @GPIO(7,10): UART1(3) */
+        PINCTRL_PIN_GROUP("uart1_0", mt7986_uart1_0),
+	/*  @GPIO(11,15): JTAG(1) */
+        PINCTRL_PIN_GROUP("jtag", mt7986_jtag),
+	/*  @GPIO(11,15): SPI1(3) */
+        PINCTRL_PIN_GROUP("spi1_0", mt7986_spi1_0),
+	/*  @GPIO(20): PWM(2) */
+        PINCTRL_PIN_GROUP("pwm1_1", mt7986_pwm1_1),
+	/*  @GPIO(21): PWM(1) */
+        PINCTRL_PIN_GROUP("pwm0", mt7986_pwm0),
+	/*  @GPIO(22): PWM(1) */
+        PINCTRL_PIN_GROUP("pwm1_0", mt7986_pwm1_0),
+	/*  @GPIO(22,32): EMMC(2) */
+        PINCTRL_PIN_GROUP("emmc_45", mt7986_emmc_45),
+	/*  @GPIO(23,28): SNFI(1) */
+        PINCTRL_PIN_GROUP("snfi", mt7986_snfi),
+	/*  @GPIO(23,26): SPI1(2) */
+        PINCTRL_PIN_GROUP("spi1_1", mt7986_spi1_1),
+	/*  @GPIO(23,26): UART1(4) */
+        PINCTRL_PIN_GROUP("uart1_1", mt7986_uart1_1),
+	/*  @GPIO(29,32): SPI1(1) */
+        PINCTRL_PIN_GROUP("spi1_2", mt7986_spi1_2),
+	/*  @GPIO(29,32): UART1(3) */
+        PINCTRL_PIN_GROUP("uart1_2", mt7986_uart1_2),
+	/*  @GPIO(29,32): UART2(4) */
+        PINCTRL_PIN_GROUP("uart2_0", mt7986_uart2_0),
+	/*  @GPIO(33,36): SPI0(1) */
+        PINCTRL_PIN_GROUP("spi0", mt7986_spi0),
+	/*  @GPIO(37,38): SPI0(1) */
+        PINCTRL_PIN_GROUP("spi0_wp_hold", mt7986_spi0_wp_hold),
+	/*  @GPIO(33,36): UART2(3) */
+        PINCTRL_PIN_GROUP("uart2_1", mt7986_uart2_1),
+	/*  @GPIO(35,36): UART1(2) */
+        PINCTRL_PIN_GROUP("uart1_3_rx_tx", mt7986_uart1_3_rx_tx),
+	/*  @GPIO(37,38): UART1(2) */
+        PINCTRL_PIN_GROUP("uart1_3_cts_rts", mt7986_uart1_3_cts_rts),
+	/*  @GPIO(33,36): SPI1(4) */
+        PINCTRL_PIN_GROUP("spi1_3", mt7986_spi1_3),
+	/*  @GPIO(39,40): UART0(1) */
+        PINCTRL_PIN_GROUP("uart0", mt7986_uart0),
+	/*  @GPIO(41): PCIE_PERESET_N(1) */
+        PINCTRL_PIN_GROUP("pcie_reset", mt7986_pcie_reset),
+	/*  @GPIO(42,45): UART1(1) */
+        PINCTRL_PIN_GROUP("uart1", mt7986_uart1),
+	/*  @GPIO(46,49): UART1(1) */
+        PINCTRL_PIN_GROUP("uart2", mt7986_uart2),
+	/*  @GPIO(50,61): EMMC(1) */
+        PINCTRL_PIN_GROUP("emmc_51", mt7986_emmc_51),
+	/*  @GPIO(62,65): PCM(1) */
+        PINCTRL_PIN_GROUP("pcm", mt7986_pcm),
+	/*  @GPIO(66): MT7531_INT(1) */
+        PINCTRL_PIN_GROUP("switch_int", mt7986_switch_int),
+	/*  @GPIO(67,68): MDC_MDIO(1) */
+        PINCTRL_PIN_GROUP("mdc_mdio", mt7986_mdc_mdio),
+    /*  @GPIO(69,85): WF0_MODE1(1) */
+        PINCTRL_PIN_GROUP("wf0_mode1", mt7986_wf0_mode1),
+    /*  @GPIO(74,78): WF0_HB(2) */
+        PINCTRL_PIN_GROUP("wf0_hb", mt7986_wf0_hb),
+    /*  @GPIO(74,80): WF0_MODE3(3) */
+        PINCTRL_PIN_GROUP("wf0_mode3", mt7986_wf0_mode3),
+    /*  @GPIO(79,85): WF1_HB(2) */
+        PINCTRL_PIN_GROUP("wf1_hb", mt7986_wf1_hb),
+    /*  @GPIO(86,100): WF1_MODE1(1) */
+        PINCTRL_PIN_GROUP("wf1_mode1", mt7986_wf1_mode1),
+     /*  @GPIO(91,97): WF1_MODE2(2) */
+        PINCTRL_PIN_GROUP("wf1_mode2", mt7986_wf1_mode2),
+};
+
+/* Joint those groups owning the same capability in user point of view which
+ * allows that people tend to use through the device tree.
+ */
+static const char *mt7986_ethernet_groups[] = { "mdc_mdio", "wf0_mode1", "wf0_hb",
+						"wf0_mode3", "wf1_hb", "wf1_mode1", "wf1_mode2" };
+static const char *mt7986_i2c_groups[] = { "i2c", };
+static const char *mt7986_led_groups[] = { "wifi_led", };
+static const char *mt7986_pwm_groups[] = { "pwm0", "pwm1_0", "pwm1_1", };
+static const char *mt7986_spi_groups[] = { "spi0", "spi1_0", "spi1_1",
+					   "spi1_2", "spi1_3", };
+static const char *mt7986_uart_groups[] = { "uart1_0", "uart1_1", "uart1_2",
+					    "uart1_3_rx_tx", "uart1_3_cts_rts",
+					    "uart2_0", "uart2_1",
+					    "uart0", "uart1", "uart2", };
+static const char *mt7986_wdt_groups[] = { "watchdog", };
+static const char *mt7986_flash_groups[] = { "snfi", "emmc_45", "emmc_51", "spi0", "spi0_wp_hold"};
+
+static const struct function_desc mt7986_functions[] = {
+	{"eth",	mt7986_ethernet_groups, ARRAY_SIZE(mt7986_ethernet_groups)},
+	{"i2c", mt7986_i2c_groups, ARRAY_SIZE(mt7986_i2c_groups)},
+	{"led",	mt7986_led_groups, ARRAY_SIZE(mt7986_led_groups)},
+	{"pwm",	mt7986_pwm_groups, ARRAY_SIZE(mt7986_pwm_groups)},
+	{"spi",	mt7986_spi_groups, ARRAY_SIZE(mt7986_spi_groups)},
+	{"uart", mt7986_uart_groups, ARRAY_SIZE(mt7986_uart_groups)},
+	{"watchdog", mt7986_wdt_groups, ARRAY_SIZE(mt7986_wdt_groups)},
+	{"flash", mt7986_flash_groups, ARRAY_SIZE(mt7986_flash_groups)},
+};
+
+static const struct mtk_eint_hw mt7986_eint_hw = {
+	.port_mask = 7,
+	.ports     = 7,
+	.ap_num    = ARRAY_SIZE(mt7986_pins),
+	.db_cnt    = 16,
+};
+
+static const char * const mt7986_pinctrl_register_base_names[] = {
+	"gpio_base", "iocfg_rt_base", "iocfg_rb_base", "iocfg_lt_base",
+	"iocfg_lb_base", "iocfg_tr_base", "iocfg_tl_base",
+};
+
+static struct mtk_pin_soc mt7986_data = {
+	.reg_cal = mt7986_reg_cals,
+	.pins = mt7986_pins,
+	.npins = ARRAY_SIZE(mt7986_pins),
+	.grps = mt7986_groups,
+	.ngrps = ARRAY_SIZE(mt7986_groups),
+	.funcs = mt7986_functions,
+	.nfuncs = ARRAY_SIZE(mt7986_functions),
+	.eint_hw = &mt7986_eint_hw,
+	.gpio_m = 0,
+	.ies_present = false,
+	.base_names = mt7986_pinctrl_register_base_names,
+	.nbase_names = ARRAY_SIZE(mt7986_pinctrl_register_base_names),
+	.bias_disable_set = mtk_pinconf_bias_disable_set,
+	.bias_disable_get = mtk_pinconf_bias_disable_get,
+	.bias_set = mtk_pinconf_bias_set,
+	.bias_get = mtk_pinconf_bias_get,
+	.drive_set = mtk_pinconf_drive_set_rev1,
+	.drive_get = mtk_pinconf_drive_get_rev1,
+	.adv_pull_get = mtk_pinconf_adv_pull_get,
+	.adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+static const struct of_device_id mt7986_pinctrl_of_match[] = {
+	{ .compatible = "mediatek,mt7986-pinctrl", },
+	{}
+};
+
+static int mt7986_pinctrl_probe(struct platform_device *pdev)
+{
+	return mtk_moore_pinctrl_probe(pdev, &mt7986_data);
+}
+
+static struct platform_driver mt7986_pinctrl_driver = {
+	.driver = {
+		.name = "mt7986-pinctrl",
+		.of_match_table = mt7986_pinctrl_of_match,
+	},
+	.probe = mt7986_pinctrl_probe,
+};
+
+static int __init mt7986_pinctrl_init(void)
+{
+	return platform_driver_register(&mt7986_pinctrl_driver);
+}
+arch_initcall(mt7986_pinctrl_init);
diff --git a/target/linux/mediatek/files-5.4/include/net/ra_nat.h b/target/linux/mediatek/files-5.4/include/net/ra_nat.h
new file mode 100755
index 0000000..7ac691c
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/include/net/ra_nat.h
@@ -0,0 +1,537 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ */
+
+#ifndef _RA_NAT_WANTED
+#define _RA_NAT_WANTED
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+
+#ifndef NEXTHDR_IPIP
+#define NEXTHDR_IPIP 4
+#endif
+
+#define hwnat_vlan_tx_tag_present(__skb)     ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define hwnat_vlan_tag_get(__skb)         ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+
+#if defined(CONFIG_HW_NAT)
+extern void hwnat_magic_tag_set_zero(struct sk_buff *skb);
+extern void hwnat_check_magic_tag(struct sk_buff *skb);
+extern void hwnat_set_headroom_zero(struct sk_buff *skb);
+extern void hwnat_set_tailroom_zero(struct sk_buff *skb);
+extern void hwnat_copy_headroom(u8 *data, struct sk_buff *skb);
+extern void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb);
+extern void hwnat_setup_dma_ops(struct device *dev, bool coherent);
+#else
+
+static inline void hwnat_magic_tag_set_zero(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_check_magic_tag(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_set_headroom_zero(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_set_tailroom_zero(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_copy_headroom(u8 *data, struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb)
+{
+}
+
+#endif
+enum foe_cpu_reason {
+	TTL_0 = 0x02,		/* IPv4(IPv6) TTL(hop limit) = 0 */
+	/* IPv4(IPv6) has option(extension) header */
+	HAS_OPTION_HEADER = 0x03,
+	NO_FLOW_IS_ASSIGNED = 0x07,	/* No flow is assigned */
+	/* IPv4 HNAT doesn't support IPv4 /w fragment */
+	IPV4_WITH_FRAGMENT = 0x08,
+	/* IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment */
+	IPV4_HNAPT_DSLITE_WITH_FRAGMENT = 0x09,
+	/* IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport */
+	IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP = 0x0A,
+	/* IPv6 5T-route/6RD can't find TCP/UDP sport/dport */
+	IPV6_5T_6RD_WITHOUT_TCP_UDP = 0x0B,
+	/* Ingress packet is TCP fin/syn/rst */
+	/*(for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */
+	TCP_FIN_SYN_RST = 0x0C,
+	UN_HIT = 0x0D,		/* FOE Un-hit */
+	HIT_UNBIND = 0x0E,	/* FOE Hit unbind */
+	/* FOE Hit unbind & rate reach */
+	HIT_UNBIND_RATE_REACH = 0x0F,
+	HIT_BIND_TCP_FIN = 0x10,	/* Hit bind PPE TCP FIN entry */
+	/* Hit bind PPE entry and TTL(hop limit) = 1 */
+	/* and TTL(hot limit) - 1 */
+	HIT_BIND_TTL_1 = 0x11,
+	/* Hit bind and VLAN replacement violation */
+	/*(Ingress 1(0) VLAN layers and egress 4(3 or 4) VLAN layers) */
+	HIT_BIND_WITH_VLAN_VIOLATION = 0x12,
+	/* Hit bind and keep alive with unicast old-header packet */
+	HIT_BIND_KEEPALIVE_UC_OLD_HDR = 0x13,
+	/* Hit bind and keep alive with multicast new-header packet */
+	HIT_BIND_KEEPALIVE_MC_NEW_HDR = 0x14,
+	/* Hit bind and keep alive with duplicate old-header packet */
+	HIT_BIND_KEEPALIVE_DUP_OLD_HDR = 0x15,
+	/* FOE Hit bind & force to CPU */
+	HIT_BIND_FORCE_TO_CPU = 0x16,
+	/* Hit bind and remove tunnel IP header, */
+	/* but inner IP has option/next header */
+	HIT_BIND_WITH_OPTION_HEADER = 0x17,
+	/* Hit bind and exceed MTU */
+	HIT_BIND_EXCEED_MTU = 0x1C,
+	HIT_BIND_PACKET_SAMPLING = 0x1B,	/*  PS packet */
+	/*  Switch clone multicast packet to CPU */
+	HIT_BIND_MULTICAST_TO_CPU = 0x18,
+	/*  Switch clone multicast packet to GMAC1 & CPU */
+	HIT_BIND_MULTICAST_TO_GMAC_CPU = 0x19,
+	HIT_PRE_BIND = 0x1A	/*  Pre-bind */
+};
+
+#define MAX_IF_NUM 64
+
+struct dmad_rx_descinfo4 {
+	uint32_t foe_entry_num:15;
+	uint32_t rsv0:3;
+	uint32_t CRSN:5;
+	uint32_t rsv1:3;
+	uint32_t SPORT:4;
+	uint32_t ppe:1;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv3:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t rsv4:4;
+	u16 MAGIC_TAG_PROTECT;
+} __packed;
+
+struct pdma_rx_desc_info4 {
+	u16 MAGIC_TAG_PROTECT;
+	uint32_t foe_entry_num:14;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t rsv:6;
+	uint32_t foe_entry_num_1:1;
+	uint32_t ppe:1;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv2:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t rsv3:4;
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+} __packed;
+
+struct head_rx_descinfo4 {
+	uint32_t foe_entry_num:14;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t rsv:6;
+	uint32_t foe_entry_num_1:1;
+	uint32_t ppe:1;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv2:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t rsv3:4;
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+	u16 MAGIC_TAG_PROTECT;
+} __packed;
+
+struct cb_rx_desc_info4 {
+	u16 MAGIC_TAG_PROTECT0;
+	uint32_t foe_entry_num:15;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t ALG:1;
+	uint32_t rsv:7;
+	uint16_t IF:8;
+	uint16_t WDMAID:2;
+	uint16_t RXID:2;
+	uint16_t WCID:10;
+	uint16_t BSSID:6;
+	uint16_t rsv1:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t rsv2:4;
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+	u16 MAGIC_TAG_PROTECT1;
+} __packed;
+
+
+
+#define FOE_INFO_LEN		    12
+#define WIFI_INFO_LEN		    6
+
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_INFO_LEN		    (6 + 4 + WIFI_INFO_LEN)
+#define FOE_MAGIC_FASTPATH	    0x77
+#define FOE_MAGIC_L2TPPATH	    0x78
+#endif
+
+#define FOE_MAGIC_PCI		    0x73
+#define FOE_MAGIC_WLAN		    0x74
+#define FOE_MAGIC_GE		    0x75
+#define FOE_MAGIC_PPE		    0x76
+#define FOE_MAGIC_WED0		    0x78
+#define FOE_MAGIC_WED1		    0x79
+#define FOE_MAGIC_MED		    0x80
+#define FOE_MAGIC_EDMA0		    0x81
+#define FOE_MAGIC_EDMA1		    0x82
+#define TAG_PROTECT                 0x6789
+#define USE_HEAD_ROOM               0
+#define USE_TAIL_ROOM               1
+#define USE_CB                      2
+#define ALL_INFO_ERROR              3
+
+/**************************DMAD FORMAT********************************/
+#define FOE_TAG_PROTECT(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT)
+
+#define FOE_ENTRY_NUM(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->foe_entry_num)
+#define FOE_ALG(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->ALG)
+#define FOE_AI(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->CRSN)
+#define FOE_SP(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->SPORT)
+#define FOE_MAGIC_TAG(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->IF)
+#define FOE_WDMA_ID(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID(skb)	(((struct dmad_rx_descinfo4 *)((skb)->head))->RXID)
+#define FOE_WC_ID(skb)	(((struct dmad_rx_descinfo4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID(skb)	(((struct dmad_rx_descinfo4 *)((skb)->head))->BSSID)
+#define FOE_PPE(skb)	(((struct dmad_rx_descinfo4 *)((skb)->head))->ppe)
+
+/***********************HEAD FORMAT*************************************/
+
+#define FOE_TAG_PROTECT_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT)
+#define FOE_ENTRY_NUM_LSB_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num)
+#define FOE_ENTRY_NUM_MSB_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num_1)
+
+#define FOE_ENTRY_NUM_HEAD(skb)  \
+	(((FOE_ENTRY_NUM_MSB_HEAD(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_HEAD(skb))
+
+
+#define FOE_ALG_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->ALG)
+#define FOE_AI_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->CRSN)
+#define FOE_SP_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SPORT)
+#define FOE_MAGIC_TAG_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->IF)
+
+
+#define FOE_WDMA_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->RXID)
+#define FOE_WC_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->BSSID)
+#define FOE_PPE_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->PPE)
+
+/****************************TAIL FORMAT***************************************/
+#define FOE_TAG_PROTECT_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->MAGIC_TAG_PROTECT)
+#define FOE_ENTRY_NUM_LSB_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num)
+
+#define FOE_ENTRY_NUM_MSB_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num_1)
+#define FOE_ENTRY_NUM_TAIL(skb)  \
+	(((FOE_ENTRY_NUM_MSB_TAIL(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_TAIL(skb))
+#define FOE_ALG_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ALG)
+#define FOE_AI_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->CRSN)
+#define FOE_SP_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SPORT)
+#define FOE_MAGIC_TAG_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->IF)
+
+#define FOE_WDMA_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WDMAID)
+#define FOE_RX_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->RXID)
+#define FOE_WC_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WCID)
+#define FOE_BSS_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->BSSID)
+
+#define FOE_PPE_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ppe)
+/*********************************************************************/
+#define FOE_WDMA_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->RXID)
+#define FOE_WC_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->BSSID)
+
+#define FOE_MINFO(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->minfo)
+#define FOE_MINFO_NTYPE(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->ntype)
+#define FOE_MINFO_CHID(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->chid)
+#define FOE_MINFO_HEAD(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->minfo)
+#define FOE_MINFO_NTYPE_HEAD(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->ntype)
+#define FOE_MINFO_CHID_HEAD(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->chid)
+
+#define FOE_MINFO_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->minfo)
+#define FOE_MINFO_NTYPE_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ntype)
+#define FOE_MINFO_CHID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->chid)
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
+#define FOE_DEST(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
+#endif
+
+#define IS_SPACE_AVAILABLE_HEAD(skb)  \
+	((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+#define IS_SPACE_AVAILABLE_HEAD(skb)  \
+	((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+#define FOE_INFO_START_ADDR_HEAD(skb)	(skb->head)
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
+#define FOE_DEST_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
+#endif
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
+#define FOE_DEST_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
+#endif
+#define IS_SPACE_AVAILABLE_TAIL(skb)  \
+	(((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0))
+#define IS_SPACE_AVAILABLE_TAIL(skb)  \
+	(((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0))
+#define FOE_INFO_START_ADDR_TAIL(skb)  \
+	((unsigned char *)(long)(skb_end_pointer(skb) - FOE_INFO_LEN))
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SOURCE)
+#define FOE_DEST_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->DEST)
+#endif
+
+/* change the position of skb_CB if necessary */
+#define CB_OFFSET		    40
+#define IS_SPACE_AVAILABLE_CB(skb)    1
+#define FOE_INFO_START_ADDR_CB(skb)    (skb->cb +  CB_OFFSET)
+#define FOE_TAG_PROTECT_CB0(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT0)
+#define FOE_TAG_PROTECT_CB1(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT1)
+#define FOE_ENTRY_NUM_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->foe_entry_num)
+#define FOE_ALG_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->ALG)
+#define FOE_AI_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->CRSN)
+#define FOE_SP_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SPORT)
+#define FOE_MAGIC_TAG_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->IF)
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_CB(skb)	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SOURCE)
+#define FOE_DEST_CB(skb)	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->DEST)
+#endif
+
+#define IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)  \
+	(FOE_TAG_PROTECT_HEAD(skb) == TAG_PROTECT)
+#define IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)  \
+	(FOE_TAG_PROTECT_TAIL(skb) == TAG_PROTECT)
+#define IS_MAGIC_TAG_PROTECT_VALID_CB(skb)  \
+	((FOE_TAG_PROTECT_CB0(skb) == TAG_PROTECT) && \
+	(FOE_TAG_PROTECT_CB0(skb) == FOE_TAG_PROTECT_CB1(skb)))
+
+#define IS_IF_PCIE_WLAN_HEAD(skb)  \
+	((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \
+	(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \
+	(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE))
+
+#define IS_IF_PCIE_WLAN_TAIL(skb)  \
+	((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \
+	(FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN))
+
+#define IS_IF_PCIE_WLAN_CB(skb)  \
+	((FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_PCI) || \
+	(FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_WLAN))
+
+/* macros */
+#define magic_tag_set_zero(skb) \
+{ \
+	if ((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE)) { \
+		if (IS_SPACE_AVAILABLE_HEAD(skb)) \
+			FOE_MAGIC_TAG_HEAD(skb) = 0; \
+	} \
+	if ((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN) || \
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_GE)) { \
+		if (IS_SPACE_AVAILABLE_TAIL(skb)) \
+			FOE_MAGIC_TAG_TAIL(skb) = 0; \
+	} \
+}
+
+static inline void hwnat_set_l2tp_unhit(struct iphdr *iph, struct sk_buff *skb)
+{
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	/* only clear headeroom for TCP OR not L2TP packets */
+	if ((iph->protocol == 0x6) || (ntohs(udp_hdr(skb)->dest) != 1701)) {
+		if (IS_SPACE_AVAILABLE_HEAD(skb)) {
+			FOE_MAGIC_TAG(skb) = 0;
+			FOE_AI(skb) = UN_HIT;
+		}
+	}
+#endif
+}
+
+static inline void hwnat_set_l2tp_fast_path(u32 l2tp_fast_path, u32 pptp_fast_path)
+{
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	l2tp_fast_path = 1;
+	pptp_fast_path = 0;
+#endif
+}
+
+static inline void hwnat_clear_l2tp_fast_path(u32 l2tp_fast_path)
+{
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	l2tp_fast_path = 0;
+#endif
+}
+
+/* #define CONFIG_HW_NAT_IPI */
+#if defined(CONFIG_HW_NAT_IPI)
+extern int debug_level;
+int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+		struct rps_dev_flow **rflowp);
+uint32_t ppe_extif_rx_handler(struct sk_buff *skb);
+int hitbind_force_to_cpu_handler(struct sk_buff *skb, struct foe_entry *entry);
+extern unsigned int ipidbg[num_possible_cpus()][10];
+extern unsigned int ipidbg2[num_possible_cpus()][10];
+/* #define HNAT_IPI_RXQUEUE	1 */
+#define HNAT_IPI_DQ		1
+#define HNAT_IPI_HASH_NORMAL	0
+#define HNAT_IPI_HASH_VTAG		1
+#define HNAT_IPI_HASH_FROM_EXTIF	2
+#define HNAT_IPI_HASH_FROM_GMAC		4
+
+struct hnat_ipi_s {
+#if defined(HNAT_IPI_DQ)
+	struct sk_buff_head     skb_input_queue;
+	struct sk_buff_head     skb_process_queue;
+#elif defined(HNAT_IPI_RXQUEUE)
+	atomic_t rx_queue_num;
+	unsigned int rx_queue_ridx;
+	unsigned int rx_queue_widx;
+	struct sk_buff **rx_queue;
+#else
+	/* unsigned int dummy0[0]; */
+	struct sk_buff_head     skb_ipi_queue;
+	/* unsigned int dummy1[8]; */
+#endif
+	unsigned long time_rec, recv_time;
+	unsigned int ipi_accum;
+	/*hwnat ipi use*/
+	spinlock_t      ipilock;
+	struct tasklet_struct smp_func_call_tsk;
+} ____cacheline_aligned_in_smp;
+
+struct hnat_ipi_stat {
+	unsigned long drop_pkt_num_from_extif;
+	unsigned long drop_pkt_num_from_ppehit;
+	unsigned int smp_call_cnt_from_extif;
+	unsigned int smp_call_cnt_from_ppehit;
+	atomic_t cpu_status;
+	/* atomic_t cpu_status_from_extif; */
+	/* atomic_t cpu_status_from_ppehit; */
+
+	/* atomic_t hook_status_from_extif; */
+	/* atomic_t hook_status_from_ppehit; */
+} ____cacheline_aligned_in_smp;
+
+#define cpu_status_from_extif	cpu_status
+#define cpu_status_from_ppehit	cpu_status
+
+struct hnat_ipi_cfg {
+	unsigned int enable_from_extif;
+	unsigned int enable_from_ppehit;
+	unsigned int queue_thresh_from_extif;
+	unsigned int queue_thresh_from_ppehit;
+	unsigned int drop_pkt_from_extif;
+	unsigned int drop_pkt_from_ppehit;
+	unsigned int ipi_cnt_mod_from_extif;
+	unsigned int ipi_cnt_mod_from_ppehit;
+} ____cacheline_aligned_in_smp;
+
+int hnat_ipi_init(void);
+int hnat_ipi_de_init(void);
+#endif
+
+#define QDMA_RX		5
+#define PDMA_RX		0
+
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/net/nat/foe_hook/Makefile b/target/linux/mediatek/files-5.4/net/nat/foe_hook/Makefile
new file mode 100755
index 0000000..b0d41e5
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/net/nat/foe_hook/Makefile
@@ -0,0 +1,5 @@
+obj-y		+= foe_hook.o
+
+foe_hook-objs	+= hook_base.o
+foe_hook-objs	+= hook_ext.o
+
diff --git a/target/linux/mediatek/files-5.4/net/nat/foe_hook/hook_base.c b/target/linux/mediatek/files-5.4/net/nat/foe_hook/hook_base.c
new file mode 100755
index 0000000..2e41170
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/net/nat/foe_hook/hook_base.c
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/ra_nat.h>
+#define PURPOSE "FAST_NAT_SUPPORT"
+
+int (*ra_sw_nat_hook_rx)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_rx);
+
+int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_tx);
diff --git a/target/linux/mediatek/files-5.4/net/nat/foe_hook/hook_ext.c b/target/linux/mediatek/files-5.4/net/nat/foe_hook/hook_ext.c
new file mode 100755
index 0000000..72afec4
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/net/nat/foe_hook/hook_ext.c
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/ra_nat.h>
+
+struct net_device	*dst_port[MAX_IF_NUM];
+EXPORT_SYMBOL(dst_port);
+u8 dst_port_type[MAX_IF_NUM];
+EXPORT_SYMBOL(dst_port_type);
+
+struct foe_entry *ppe_virt_foe_base_tmp;
+EXPORT_SYMBOL(ppe_virt_foe_base_tmp);
+struct foe_entry *ppe1_virt_foe_base_tmp;
+EXPORT_SYMBOL(ppe1_virt_foe_base_tmp);
+
+int (*ppe_hook_rx_wifi)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ppe_hook_rx_wifi);
+int (*ppe_hook_tx_wifi)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ppe_hook_tx_wifi);
+
+int (*ppe_hook_rx_modem)(struct sk_buff *skb, u32 cpu_reason, u32 foe_entry_num) = NULL;
+EXPORT_SYMBOL(ppe_hook_rx_modem);
+int (*ppe_hook_tx_modem)(struct sk_buff *skb, u32 net_type, u32 channel_id) = NULL;
+EXPORT_SYMBOL(ppe_hook_tx_modem);
+
+int (*ppe_hook_rx_eth)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ppe_hook_rx_eth);
+int (*ppe_hook_tx_eth)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ppe_hook_tx_eth);
+
+int (*ppe_hook_rx_ext)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ppe_hook_rx_ext);
+int (*ppe_hook_tx_ext)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ppe_hook_tx_ext);
+
+void (*ppe_dev_register_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_register_hook);
+void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_unregister_hook);
+
+void  hwnat_magic_tag_set_zero(struct sk_buff *skb)
+{
+	if ((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) ||
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) ||
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE)) {
+		if (IS_SPACE_AVAILABLE_HEAD(skb))
+			FOE_MAGIC_TAG_HEAD(skb) = 0;
+	}
+	if ((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) ||
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN) ||
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_GE)) {
+		if (IS_SPACE_AVAILABLE_TAIL(skb))
+			FOE_MAGIC_TAG_TAIL(skb) = 0;
+	}
+}
+EXPORT_SYMBOL(hwnat_magic_tag_set_zero);
+
+void hwnat_check_magic_tag(struct sk_buff *skb)
+{
+	if (IS_SPACE_AVAILABLE_HEAD(skb)) {
+		FOE_MAGIC_TAG_HEAD(skb) = 0;
+		FOE_AI_HEAD(skb) = UN_HIT;
+	}
+	if (IS_SPACE_AVAILABLE_TAIL(skb)) {
+		FOE_MAGIC_TAG_TAIL(skb) = 0;
+		FOE_AI_TAIL(skb) = UN_HIT;
+	}
+}
+EXPORT_SYMBOL(hwnat_check_magic_tag);
+
+void hwnat_set_headroom_zero(struct sk_buff *skb)
+{
+	if (skb->cloned != 1) {
+		if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb) ||
+		    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE)) {
+			if (IS_SPACE_AVAILABLE_HEAD(skb))
+				memset(FOE_INFO_START_ADDR_HEAD(skb), 0,
+				       FOE_INFO_LEN);
+		}
+	}
+}
+EXPORT_SYMBOL(hwnat_set_headroom_zero);
+
+void hwnat_set_tailroom_zero(struct sk_buff *skb)
+{
+	if (skb->cloned != 1) {
+		if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb) ||
+		    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE)) {
+			if (IS_SPACE_AVAILABLE_TAIL(skb))
+				memset(FOE_INFO_START_ADDR_TAIL(skb), 0,
+				       FOE_INFO_LEN);
+		}
+	}
+}
+EXPORT_SYMBOL(hwnat_set_tailroom_zero);
+
+void hwnat_copy_headroom(u8 *data, struct sk_buff *skb)
+{
+	memcpy(data, skb->head, FOE_INFO_LEN);
+}
+EXPORT_SYMBOL(hwnat_copy_headroom);
+
+void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb)
+{
+	memcpy((data + size - FOE_INFO_LEN),
+	       (skb_end_pointer(skb) - FOE_INFO_LEN),
+	       FOE_INFO_LEN);
+}
+EXPORT_SYMBOL(hwnat_copy_tailroom);
+
+void hwnat_setup_dma_ops(struct device *dev, bool coherent)
+{
+	arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+}
+EXPORT_SYMBOL(hwnat_setup_dma_ops);
+
diff --git a/target/linux/mediatek/image/mt7986.mk b/target/linux/mediatek/image/mt7986.mk
new file mode 100644
index 0000000..d36cafa
--- /dev/null
+++ b/target/linux/mediatek/image/mt7986.mk
@@ -0,0 +1,128 @@
+KERNEL_LOADADDR := 0x44080000
+
+define Device/mt7986a-mt7975-ax6000-rfb1
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := mt7986a-mt7975-ax6000-rfb1
+  DEVICE_DTS := mt7986a-mt7975-ax6000-rfb1
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+endef
+TARGET_DEVICES += mt7986a-mt7975-ax6000-rfb1
+
+define Device/mt7986a-mt7975-ax6000-rfb1-snand
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := mt7986a-mt7975-ax6000-rfb1 (SPI-NAND,UBI)
+  DEVICE_DTS := mt7986a-mt7975-ax6000-rfb1
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+  SUPPORTED_DEVICES := mediatek,mt7986-rfb-snand
+  UBINIZE_OPTS := -E 5
+  BLOCKSIZE := 128k
+  PAGESIZE := 2048
+  IMAGE_SIZE := 65536k
+  KERNEL_IN_UBI := 1
+  IMAGES += factory.bin
+  IMAGE/factory.bin := append-ubi | check-size $$$$(IMAGE_SIZE)
+  IMAGE/sysupgrade.bin := sysupgrade-tar | append-metadata
+endef
+TARGET_DEVICES += mt7986a-mt7975-ax6000-rfb1-snand
+
+define Device/mt7986a-mt7976-ax6000-rfb2
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := mt7986a-mt7976-ax6000-rfb2
+  DEVICE_DTS := mt7986a-mt7976-ax6000-rfb2
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+endef
+TARGET_DEVICES += mt7986a-mt7976-ax6000-rfb2
+
+define Device/mt7986a-mt7976-ax6000-rfb2-snand
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := mt7986a-mt7976-ax6000-rfb2 (SPI-NAND,UBI)
+  DEVICE_DTS := mt7986a-mt7976-ax6000-rfb2
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+  SUPPORTED_DEVICES := mediatek,mt7986-rfb-snand
+  UBINIZE_OPTS := -E 5
+  BLOCKSIZE := 128k
+  PAGESIZE := 2048
+  IMAGE_SIZE := 65536k
+  KERNEL_IN_UBI := 1
+  IMAGES += factory.bin
+  IMAGE/factory.bin := append-ubi | check-size $$$$(IMAGE_SIZE)
+  IMAGE/sysupgrade.bin := sysupgrade-tar | append-metadata
+endef
+TARGET_DEVICES += mt7986a-mt7976-ax6000-rfb2-snand
+
+define Device/mt7986a-mt7976-ax7800-rfb2
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := mt7986a-mt7976-ax7800-rfb2
+  DEVICE_DTS := mt7986a-mt7976-ax7800-rfb2
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+endef
+TARGET_DEVICES += mt7986a-mt7976-ax7800-rfb2
+
+define Device/mt7986a-mt7976-ax7800-rfb2-snand
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := mt7986a-mt7976-ax7800-rfb2 (SPI-NAND,UBI)
+  DEVICE_DTS := mt7986a-mt7976-ax7800-rfb2
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+  SUPPORTED_DEVICES := mediatek,mt7986-rfb-snand
+  UBINIZE_OPTS := -E 5
+  BLOCKSIZE := 128k
+  PAGESIZE := 2048
+  IMAGE_SIZE := 65536k
+  KERNEL_IN_UBI := 1
+  IMAGES += factory.bin
+  IMAGE/factory.bin := append-ubi | check-size $$$$(IMAGE_SIZE)
+  IMAGE/sysupgrade.bin := sysupgrade-tar | append-metadata
+endef
+TARGET_DEVICES += mt7986a-mt7976-ax7800-rfb2-snand
+
+define Device/mt7986b-mt7975-ax6000-rfb1
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := mt7986b-mt7975-ax6000-rfb1
+  DEVICE_DTS := mt7986b-mt7975-ax6000-rfb1
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+endef
+TARGET_DEVICES += mt7986b-mt7975-ax6000-rfb1
+
+define Device/mt7986b-mt7975-ax6000-rfb1-snand
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := mt7986b-mt7975-ax6000-rfb1 (SPI-NAND,UBI)
+  DEVICE_DTS := mt7986b-mt7975-ax6000-rfb1
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+  SUPPORTED_DEVICES := mediatek,mt7986-rfb-snand
+  UBINIZE_OPTS := -E 5
+  BLOCKSIZE := 128k
+  PAGESIZE := 2048
+  IMAGE_SIZE := 65536k
+  KERNEL_IN_UBI := 1
+  IMAGES += factory.bin
+  IMAGE/factory.bin := append-ubi | check-size $$$$(IMAGE_SIZE)
+  IMAGE/sysupgrade.bin := sysupgrade-tar | append-metadata
+endef
+TARGET_DEVICES += mt7986b-mt7975-ax6000-rfb1-snand
+
+define Device/mediatek_mt7986-fpga
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := MTK7986 FPGA
+  DEVICE_DTS := mt7986-fpga
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+  IMAGE/sysupgrade.bin := append-kernel | pad-to 256k | \
+       append-rootfs | pad-rootfs | append-metadata
+endef
+TARGET_DEVICES += mediatek_mt7986-fpga
+
+define Device/mediatek_mt7986-fpga-ubi
+  DEVICE_VENDOR := MediaTek
+  DEVICE_MODEL := MTK7986 FPGA (UBI)
+  DEVICE_DTS := mt7986-fpga-ubi
+  DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+  SUPPORTED_DEVICES := mediatek,mt7986-fpga,ubi
+  UBINIZE_OPTS := -E 5
+  BLOCKSIZE := 128k
+  PAGESIZE := 2048
+  IMAGE_SIZE := 65536k
+  KERNEL_IN_UBI := 1
+  IMAGES += factory.bin
+  IMAGE/factory.bin := append-ubi | check-size $$$$(IMAGE_SIZE)
+  IMAGE/sysupgrade.bin := sysupgrade-tar | append-metadata
+endef
+TARGET_DEVICES += mediatek_mt7986-fpga-ubi
diff --git a/target/linux/mediatek/modules.mk b/target/linux/mediatek/modules.mk
new file mode 100644
index 0000000..67a983a
--- /dev/null
+++ b/target/linux/mediatek/modules.mk
@@ -0,0 +1,83 @@
+define KernelPackage/ata-ahci-mtk
+  TITLE:=Mediatek AHCI Serial ATA support
+  KCONFIG:=CONFIG_AHCI_MTK
+  FILES:= \
+	$(LINUX_DIR)/drivers/ata/ahci_mtk.ko \
+	$(LINUX_DIR)/drivers/ata/libahci_platform.ko
+  AUTOLOAD:=$(call AutoLoad,40,libahci libahci_platform ahci_mtk,1)
+  $(call AddDepends/ata)
+  DEPENDS+=@TARGET_mediatek_mt7622
+endef
+
+define KernelPackage/ata-ahci-mtk/description
+ Mediatek AHCI Serial ATA host controllers
+endef
+
+$(eval $(call KernelPackage,ata-ahci-mtk))
+
+define KernelPackage/btmtkuart
+  SUBMENU:=Other modules
+  TITLE:=MediaTek HCI UART driver
+  DEPENDS:=@TARGET_mediatek_mt7622 +kmod-bluetooth +mt7622bt-firmware
+  KCONFIG:=CONFIG_BT_MTKUART
+  FILES:= \
+        $(LINUX_DIR)/drivers/bluetooth/btmtkuart.ko
+  AUTOLOAD:=$(call AutoProbe,btmtkuart)
+endef
+
+$(eval $(call KernelPackage,btmtkuart))
+
+define KernelPackage/sdhci-mtk
+  SUBMENU:=Other modules
+  TITLE:=Mediatek SDHCI driver
+  DEPENDS:=@TARGET_mediatek_mt7622 +kmod-sdhci
+  KCONFIG:=CONFIG_MMC_MTK 
+  FILES:= \
+	$(LINUX_DIR)/drivers/mmc/host/mtk-sd.ko
+  AUTOLOAD:=$(call AutoProbe,mtk-sd,1)
+endef
+
+$(eval $(call KernelPackage,sdhci-mtk))
+
+define KernelPackage/crypto-hw-mtk
+  TITLE:= MediaTek's Crypto Engine module
+  DEPENDS:=@TARGET_mediatek
+  KCONFIG:= \
+	CONFIG_CRYPTO_HW=y \
+	CONFIG_CRYPTO_AES=y \
+	CONFIG_CRYPTO_AEAD=y \
+	CONFIG_CRYPTO_SHA1=y \
+	CONFIG_CRYPTO_SHA256=y \
+	CONFIG_CRYPTO_SHA512=y \
+	CONFIG_CRYPTO_HMAC=y \
+	CONFIG_CRYPTO_DEV_MEDIATEK
+  FILES:=$(LINUX_DIR)/drivers/crypto/mediatek/mtk-crypto.ko
+  AUTOLOAD:=$(call AutoLoad,90,mtk-crypto)
+  $(call AddDepends/crypto)
+endef
+
+define KernelPackage/crypto-hw-mtk/description
+  MediaTek's EIP97 Cryptographic Engine driver.
+endef
+
+$(eval $(call KernelPackage,crypto-hw-mtk))
+
+define KernelPackage/mediatek_hnat
+  SUBMENU:=Network Devices
+  TITLE:=Mediatek HNAT module
+  DEPENDS:=@TARGET_mediatek +kmod-nf-conntrack
+  KCONFIG:= \
+	CONFIG_BRIDGE_NETFILTER=y \
+	CONFIG_NETFILTER_FAMILY_BRIDGE=y \
+	CONFIG_NET_MEDIATEK_HNAT \
+	CONFIG_NET_MEDIATEK_HW_QOS=n
+  FILES:= \
+        $(LINUX_DIR)/drivers/net/ethernet/mediatek/mtk_hnat/mtkhnat.ko
+endef
+
+define KernelPackage/mediatek_hnat/description
+  Kernel modules for MediaTek HW NAT offloading
+endef
+
+$(eval $(call KernelPackage,mediatek_hnat))
+
diff --git a/target/linux/mediatek/mt7986/base-files/etc/board.d/02_network b/target/linux/mediatek/mt7986/base-files/etc/board.d/02_network
new file mode 100755
index 0000000..2947c26
--- /dev/null
+++ b/target/linux/mediatek/mt7986/base-files/etc/board.d/02_network
@@ -0,0 +1,52 @@
+#!/bin/sh
+
+. /lib/functions.sh
+. /lib/functions/uci-defaults.sh
+. /lib/functions/system.sh
+
+mediatek_setup_interfaces()
+{
+	local board="$1"
+
+	case $board in
+	*fpga*)
+		ucidef_set_interfaces_lan_wan "eth0" "eth1"
+		ucidef_add_switch "switch0" \
+			"0:lan" "1:lan" "2:lan" "3:lan" "4:wan" "6u@eth0" "5u@eth1"
+		;;
+	*)
+		ucidef_set_interfaces_lan_wan "lan0 lan1 lan2 lan3" wan
+		;;
+	esac
+}
+
+mediatek_setup_macs()
+{
+	local board="$1"
+	local part_name="Factory"
+	local lan_mac=""
+	local wan_mac=""
+	local lan_mac_offset=""
+	local wan_mac_offset=""
+
+	case $board in
+	*)
+		lan_mac_offset="0x2A"
+		wan_mac_offset="0x24"
+		;;
+	esac
+
+	lan_mac=$(mtd_get_mac_binary $part_name $lan_mac_offset)
+	wan_mac=$(mtd_get_mac_binary $part_name $wan_mac_offset)
+
+	[ -n "$lan_mac" ] && ucidef_set_interface_macaddr "lan" "$lan_mac"
+	[ -n "$wan_mac" ] && ucidef_set_interface_macaddr "wan" "$wan_mac"
+}
+
+board_config_update
+board=$(board_name)
+mediatek_setup_interfaces $board
+mediatek_setup_macs $board
+board_config_flush
+
+exit 0
diff --git a/target/linux/mediatek/mt7986/base-files/lib/preinit/98_10_mtk_failsafe_init b/target/linux/mediatek/mt7986/base-files/lib/preinit/98_10_mtk_failsafe_init
new file mode 100755
index 0000000..99c3978
--- /dev/null
+++ b/target/linux/mediatek/mt7986/base-files/lib/preinit/98_10_mtk_failsafe_init
@@ -0,0 +1,9 @@
+#!/bin/sh
+# Copyright (C) 2006-2015 OpenWrt.org
+# Copyright (C) 2010 Vertical Communications
+
+failsafe_mtk_init() {
+	/sbin/mtk_failsafe.sh
+}
+
+boot_hook_add failsafe failsafe_mtk_init
diff --git a/target/linux/mediatek/mt7986/base-files/lib/upgrade/platform.sh b/target/linux/mediatek/mt7986/base-files/lib/upgrade/platform.sh
new file mode 100755
index 0000000..1b4de86
--- /dev/null
+++ b/target/linux/mediatek/mt7986/base-files/lib/upgrade/platform.sh
@@ -0,0 +1,47 @@
+platform_do_upgrade() {
+	local board=$(board_name)
+
+	case "$board" in
+	mediatek,mt7986-fpga,ubi |\
+	mediatek,mt7986-rfb-snand)
+		nand_do_upgrade "$1"
+		;;
+	*)
+		default_do_upgrade "$1"
+		;;
+	esac
+}
+
+PART_NAME=firmware
+
+platform_check_image() {
+	local board=$(board_name)
+	local magic="$(get_magic_long "$1")"
+
+	[ "$#" -gt 1 ] && return 1
+
+	case "$board" in
+	mediatek,mt7986-fpga,ubi |\
+	mediatek,mt7986-rfb-snand)
+		# tar magic `ustar`
+		magic="$(dd if="$1" bs=1 skip=257 count=5 2>/dev/null)"
+
+		[ "$magic" != "ustar" ] && {
+			echo "Invalid image type."
+			return 1
+		}
+
+		return 0
+		;;
+	*)
+		[ "$magic" != "d00dfeed" ] && {
+			echo "Invalid image type."
+			return 1
+		}
+		return 0
+		;;
+	esac
+
+	return 0
+}
+
diff --git a/target/linux/mediatek/mt7986/config-5.4 b/target/linux/mediatek/mt7986/config-5.4
new file mode 100644
index 0000000..0635622
--- /dev/null
+++ b/target/linux/mediatek/mt7986/config-5.4
@@ -0,0 +1,599 @@
+CONFIG_64BIT=y
+CONFIG_AHCI_MTK=y
+CONFIG_ARCH_CLOCKSOURCE_DATA=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
+CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN=y
+CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y
+CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
+CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
+CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
+CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
+CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
+CONFIG_ARCH_HAS_KCOV=y
+CONFIG_ARCH_HAS_KEEPINITRD=y
+CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
+CONFIG_ARCH_HAS_PTE_DEVMAP=y
+CONFIG_ARCH_HAS_PTE_SPECIAL=y
+CONFIG_ARCH_HAS_SETUP_DMA_OPS=y
+CONFIG_ARCH_HAS_SET_DIRECT_MAP=y
+CONFIG_ARCH_HAS_SET_MEMORY=y
+CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
+CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
+CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y
+CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y
+CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y
+CONFIG_ARCH_HAS_TICK_BROADCAST=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_ARCH_INLINE_READ_LOCK=y
+CONFIG_ARCH_INLINE_READ_LOCK_BH=y
+CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y
+CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y
+CONFIG_ARCH_INLINE_READ_UNLOCK=y
+CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y
+CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y
+CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y
+CONFIG_ARCH_INLINE_SPIN_LOCK=y
+CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y
+CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y
+CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y
+CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y
+CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y
+CONFIG_ARCH_INLINE_WRITE_LOCK=y
+CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y
+CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y
+CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y
+CONFIG_ARCH_KEEP_MEMBLOCK=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MMAP_RND_BITS=18
+CONFIG_ARCH_MMAP_RND_BITS_MAX=24
+CONFIG_ARCH_MMAP_RND_BITS_MIN=18
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
+CONFIG_ARCH_PROC_KCORE_TEXT=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_ARCH_SUPPORTS_INT128=y
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
+CONFIG_ARCH_USE_MEMREMAP_PROT=y
+CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
+CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
+CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
+CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
+CONFIG_ARM64=y
+CONFIG_ARM64_4K_PAGES=y
+CONFIG_ARM64_CNP=y
+CONFIG_ARM64_CONT_SHIFT=4
+CONFIG_ARM64_ERRATUM_1165522=y
+CONFIG_ARM64_ERRATUM_1286807=y
+CONFIG_ARM64_ERRATUM_1418040=y
+CONFIG_ARM64_HW_AFDBM=y
+CONFIG_ARM64_PAGE_SHIFT=12
+CONFIG_ARM64_PAN=y
+CONFIG_ARM64_PA_BITS=48
+CONFIG_ARM64_PA_BITS_48=y
+CONFIG_ARM64_PTR_AUTH=y
+CONFIG_ARM64_SSBD=y
+CONFIG_ARM64_SVE=y
+# CONFIG_ARM64_SW_TTBR0_PAN is not set
+CONFIG_ARM64_TAGGED_ADDR_ABI=y
+CONFIG_ARM64_UAO=y
+CONFIG_ARM64_VA_BITS=39
+CONFIG_ARM64_VA_BITS_39=y
+CONFIG_ARM64_VHE=y
+CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y
+# CONFIG_ARMV8_DEPRECATED is not set
+CONFIG_ARM_AMBA=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
+CONFIG_ARM_GIC=y
+CONFIG_ARM_GIC_V2M=y
+CONFIG_ARM_GIC_V3=y
+CONFIG_ARM_GIC_V3_ITS=y
+CONFIG_ARM_GIC_V3_ITS_PCI=y
+CONFIG_ARM_MEDIATEK_CPUFREQ=y
+CONFIG_ARM_PMU=y
+CONFIG_ARM_PSCI_FW=y
+CONFIG_ATA=y
+CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_BLK_DEV_DM_BUILTIN=y
+# CONFIG_BLK_DEV_MD is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_MQ_PCI=y
+CONFIG_BLK_PM=y
+CONFIG_BLK_SCSI_REQUEST=y
+CONFIG_BLOCK_COMPAT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_BT=y
+CONFIG_BT_BCM=y
+CONFIG_BT_BREDR=y
+CONFIG_BT_DEBUGFS=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_BCM=y
+# CONFIG_BT_HCIUART_INTEL is not set
+# CONFIG_BT_HCIUART_NOKIA is not set
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_BT_HCIUART_SERDEV=y
+CONFIG_BT_HCIVHCI=y
+CONFIG_BT_HS=y
+CONFIG_BT_LE=y
+CONFIG_BT_MTKUART=y
+CONFIG_BT_QCA=y
+CONFIG_CAVIUM_TX2_ERRATUM_219=y
+CONFIG_CC_HAS_KASAN_GENERIC=y
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_CLKSRC_MMIO=y
+CONFIG_CLOCK_THERMAL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_COMMON_CLK=y
+CONFIG_COMMON_CLK_MEDIATEK=y
+CONFIG_COMMON_CLK_MT2712=y
+# CONFIG_COMMON_CLK_MT2712_BDPSYS is not set
+# CONFIG_COMMON_CLK_MT2712_IMGSYS is not set
+# CONFIG_COMMON_CLK_MT2712_JPGDECSYS is not set
+# CONFIG_COMMON_CLK_MT2712_MFGCFG is not set
+# CONFIG_COMMON_CLK_MT2712_MMSYS is not set
+# CONFIG_COMMON_CLK_MT2712_VDECSYS is not set
+# CONFIG_COMMON_CLK_MT2712_VENCSYS is not set
+# CONFIG_COMMON_CLK_MT6779 is not set
+# CONFIG_COMMON_CLK_MT6797 is not set
+CONFIG_COMMON_CLK_MT7622=y
+CONFIG_COMMON_CLK_MT7622_AUDSYS=y
+CONFIG_COMMON_CLK_MT7622_ETHSYS=y
+CONFIG_COMMON_CLK_MT7622_HIFSYS=y
+# CONFIG_COMMON_CLK_MT8173 is not set
+CONFIG_COMMON_CLK_MT8183=y
+# CONFIG_COMMON_CLK_MT8183_AUDIOSYS is not set
+# CONFIG_COMMON_CLK_MT8183_CAMSYS is not set
+# CONFIG_COMMON_CLK_MT8183_IMGSYS is not set
+# CONFIG_COMMON_CLK_MT8183_IPU_ADL is not set
+# CONFIG_COMMON_CLK_MT8183_IPU_CONN is not set
+# CONFIG_COMMON_CLK_MT8183_IPU_CORE0 is not set
+# CONFIG_COMMON_CLK_MT8183_IPU_CORE1 is not set
+# CONFIG_COMMON_CLK_MT8183_MFGCFG is not set
+# CONFIG_COMMON_CLK_MT8183_MMSYS is not set
+# CONFIG_COMMON_CLK_MT8183_VDECSYS is not set
+# CONFIG_COMMON_CLK_MT8183_VENCSYS is not set
+CONFIG_COMMON_CLK_MT8516=y
+# CONFIG_COMMON_CLK_MT8516_AUDSYS is not set
+CONFIG_COMPAT=y
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_COMPAT_BINFMT_ELF=y
+CONFIG_COMPAT_NETLINK_MESSAGES=y
+CONFIG_COMPAT_OLD_SIGACTION=y
+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+# CONFIG_CPUFREQ_DT is not set
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_RMAP=y
+CONFIG_CPU_THERMAL=y
+CONFIG_CRC16=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_DRBG_HMAC=y
+CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_ECC=y
+CONFIG_CRYPTO_ECDH=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_JITTERENTROPY=y
+CONFIG_CRYPTO_KPP=y
+CONFIG_CRYPTO_KPP2=y
+CONFIG_CRYPTO_LIB_SHA256=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_NULL2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_DEBUG_MISC=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMADEVICES=y
+CONFIG_DMATEST=y
+CONFIG_DMA_DIRECT_REMAP=y
+CONFIG_DMA_ENGINE=y
+CONFIG_DMA_ENGINE_RAID=y
+CONFIG_DMA_OF=y
+CONFIG_DMA_REMAP=y
+CONFIG_DMA_VIRTUAL_CHANNELS=y
+CONFIG_DM_BUFIO=y
+# CONFIG_DM_CRYPT is not set
+# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
+CONFIG_DM_INIT=y
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_SNAPSHOT is not set
+CONFIG_DM_VERITY=y
+# CONFIG_DM_VERITY_FEC is not set
+# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set
+CONFIG_DRM_RCAR_WRITEBACK=y
+CONFIG_DTC=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_EDAC_SUPPORT=y
+CONFIG_EFI_EARLYCON=y
+CONFIG_EINT_MTK=y
+CONFIG_FIXED_PHY=y
+CONFIG_FIX_EARLYCON_MEM=y
+# CONFIG_FLATMEM_MANUAL is not set
+CONFIG_FONT_8x16=y
+CONFIG_FONT_AUTOSELECT=y
+CONFIG_FONT_SUPPORT=y
+CONFIG_FRAME_POINTER=y
+CONFIG_FUJITSU_ERRATUM_010001=y
+CONFIG_FW_LOADER_PAGED_BUF=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_GENERIC_ARCH_TOPOLOGY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_CPU_VULNERABILITIES=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_EARLY_IOREMAP=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_MULTI_HANDLER=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_PHY=y
+CONFIG_GENERIC_PINCONF=y
+CONFIG_GENERIC_PINCTRL_GROUPS=y
+CONFIG_GENERIC_PINMUX_FUNCTIONS=y
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GLOB=y
+CONFIG_GPIOLIB=y
+CONFIG_HANDLE_DOMAIN_IRQ=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_ARCH_AUDITSYSCALL=y
+CONFIG_HAVE_ARCH_BITREVERSE=y
+CONFIG_HAVE_ARCH_HUGE_VMAP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y
+CONFIG_HAVE_ARCH_KASAN=y
+CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
+CONFIG_HAVE_ARCH_PFN_VALID=y
+CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_HAVE_ARCH_STACKLEAK=y
+CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_HAVE_ARCH_VMAP_STACK=y
+CONFIG_HAVE_ARM_SMCCC=y
+CONFIG_HAVE_ASM_MODVERSIONS=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CONTEXT_TRACKING=y
+CONFIG_HAVE_COPY_THREAD_TLS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_HAVE_DEBUG_BUGVERBOSE=y
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_FAST_GUP=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y
+CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_GENERIC_VDSO=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_HAVE_NET_DSA=y
+CONFIG_HAVE_PATA_PLATFORM=y
+CONFIG_HAVE_PCI=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_RCU_TABLE_FREE=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_RSEQ=y
+CONFIG_HAVE_SCHED_AVG_IRQ=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_UID16=y
+CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
+CONFIG_HOLES_IN_ZONE=y
+# CONFIG_HW_NAT is not set
+CONFIG_HZ=250
+CONFIG_HZ_250=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_IIO=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_INLINE_READ_LOCK=y
+CONFIG_INLINE_READ_LOCK_BH=y
+CONFIG_INLINE_READ_LOCK_IRQ=y
+CONFIG_INLINE_READ_LOCK_IRQSAVE=y
+CONFIG_INLINE_READ_UNLOCK_BH=y
+CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y
+CONFIG_INLINE_SPIN_LOCK=y
+CONFIG_INLINE_SPIN_LOCK_BH=y
+CONFIG_INLINE_SPIN_LOCK_IRQ=y
+CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y
+CONFIG_INLINE_SPIN_TRYLOCK=y
+CONFIG_INLINE_SPIN_TRYLOCK_BH=y
+CONFIG_INLINE_SPIN_UNLOCK_BH=y
+CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y
+CONFIG_INLINE_WRITE_LOCK=y
+CONFIG_INLINE_WRITE_LOCK_BH=y
+CONFIG_INLINE_WRITE_LOCK_IRQ=y
+CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y
+CONFIG_INLINE_WRITE_UNLOCK_BH=y
+CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y
+CONFIG_IO_URING=y
+CONFIG_IRQCHIP=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_IRQ_WORK=y
+CONFIG_JUMP_LABEL=y
+CONFIG_LIBFDT=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MD=y
+CONFIG_MDIO_BUS=y
+CONFIG_MDIO_DEVICE=y
+CONFIG_MEDIATEK_MT6577_AUXADC=y
+CONFIG_MEDIATEK_NETSYS_V2=y
+CONFIG_MEDIATEK_WATCHDOG=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEMFD_CREATE=y
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7
+CONFIG_MFD_SYSCON=y
+CONFIG_MIGRATION=y
+CONFIG_MMC=y
+CONFIG_MMC_MTK=y
+# CONFIG_MMC_TIFM_SD is not set
+CONFIG_MODULES_TREE_LOOKUP=y
+CONFIG_MODULES_USE_ELF_RELA=y
+CONFIG_MT753X_GSW=y
+CONFIG_MTD_NAND_CORE=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING=y
+CONFIG_MTD_NAND_MTK=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_SPI_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_SPLIT_FIRMWARE=y
+CONFIG_MTD_SPLIT_FIT_FW=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BEB_LIMIT=20
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+# CONFIG_MTK_CMDQ is not set
+# CONFIG_MTK_CQDMA is not set
+CONFIG_MTK_EFUSE=y
+CONFIG_MTK_HSDMA=y
+CONFIG_MTK_INFRACFG=y
+CONFIG_MTK_PMIC_WRAP=y
+CONFIG_MTK_SCPSYS=y
+CONFIG_MTK_THERMAL=y
+CONFIG_MTK_TIMER=y
+# CONFIG_MTK_UART_APDMA is not set
+CONFIG_MTK_SPI_NAND=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_NET_FLOW_LIMIT=y
+CONFIG_NET_MEDIATEK_SOC=y
+CONFIG_NET_VENDOR_MEDIATEK=y
+CONFIG_NLS=y
+CONFIG_NO_HZ_COMMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=2
+CONFIG_NVMEM=y
+CONFIG_NVMEM_SYSFS=y
+# CONFIG_OCTEONTX2_AF is not set
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_KOBJ=y
+CONFIG_OF_MDIO=y
+CONFIG_OF_NET=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_PADATA=y
+CONFIG_PARTITION_PERCPU=y
+CONFIG_PCI=y
+# CONFIG_PCIE_AL is not set
+CONFIG_PCIE_MEDIATEK=y
+CONFIG_PCIE_MEDIATEK_GEN3=y
+CONFIG_PCI_DEBUG=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_MSI_IRQ_DOMAIN=y
+CONFIG_PERF_EVENTS=y
+CONFIG_PGTABLE_LEVELS=3
+CONFIG_PHYLIB=y
+CONFIG_PHYLINK=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_PHY_MTK_TPHY=y
+# CONFIG_PHY_MTK_UFS is not set
+# CONFIG_PHY_MTK_XSPHY is not set
+CONFIG_PINCTRL=y
+# CONFIG_PINCTRL_MT2712 is not set
+# CONFIG_PINCTRL_MT6765 is not set
+# CONFIG_PINCTRL_MT6797 is not set
+# CONFIG_PINCTRL_MT7622 is not set
+CONFIG_PINCTRL_MT7986=y
+CONFIG_HW_RANDOM=y
+# CONFIG_PINCTRL_MT8173 is not set
+# CONFIG_PINCTRL_MT8183 is not set
+CONFIG_PINCTRL_MT8516=y
+CONFIG_PINCTRL_MTK=y
+CONFIG_PINCTRL_MTK_MOORE=y
+CONFIG_PM=y
+CONFIG_PM_CLK=y
+CONFIG_PM_GENERIC_DOMAINS=y
+CONFIG_PM_GENERIC_DOMAINS_OF=y
+CONFIG_PM_OPP=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PWM=y
+CONFIG_PWM_MEDIATEK=y
+# CONFIG_PWM_MTK_DISP is not set
+CONFIG_PWM_SYSFS=y
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_QUEUED_SPINLOCKS=y
+# CONFIG_RAETH is not set
+CONFIG_RAS=y
+CONFIG_RATIONAL=y
+# CONFIG_RAVE_SP_CORE is not set
+CONFIG_RCU_NEED_SEGCBLIST=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_REALTEK_PHY=y
+CONFIG_REFCOUNT_FULL=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_MT6380=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RESET_TI_SYSCON=y
+CONFIG_RFS_ACCEL=y
+CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
+CONFIG_RPS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MT7622=y
+CONFIG_RTC_I2C_AND_SPI=y
+# CONFIG_RTL8367S_GSW is not set
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_SCHED_MC=y
+CONFIG_SCSI=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_SERIAL_8250_FSL=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_NR_UARTS=3
+CONFIG_SERIAL_8250_RUNTIME_UARTS=3
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_SERIAL_MCTRL_GPIO=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SG_POOL=y
+CONFIG_SMP=y
+CONFIG_SPARSEMEM=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+CONFIG_SPI_MT65XX=y
+# CONFIG_SPI_MTK_NOR is not set
+CONFIG_SPI_MTK_SNFI=y
+CONFIG_SRCU=y
+CONFIG_SWIOTLB=y
+CONFIG_SWPHY=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_SYS_SUPPORTS_HUGETLBFS=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
+CONFIG_THERMAL_EMULATION=y
+CONFIG_THERMAL_GOV_BANG_BANG=y
+CONFIG_THERMAL_GOV_FAIR_SHARE=y
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_OF=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THREAD_INFO_IN_TASK=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_TREE_RCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UCLAMP_TASK is not set
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_USB=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_UAS=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_MTK=y
+# CONFIG_USB_XHCI_PLATFORM is not set
+CONFIG_VMAP_STACK=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y
+CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
+# CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP is not set
+CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y
+CONFIG_WATCHDOG_PRETIMEOUT_GOV_SEL=m
+CONFIG_WATCHDOG_SYSFS=y
+CONFIG_XPS=y
+CONFIG_ZONE_DMA32=y
diff --git a/target/linux/mediatek/mt7986/profiles/default.mk b/target/linux/mediatek/mt7986/profiles/default.mk
new file mode 100755
index 0000000..2ef570b
--- /dev/null
+++ b/target/linux/mediatek/mt7986/profiles/default.mk
@@ -0,0 +1,15 @@
+#
+# Copyright (C) 2015 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+define Profile/Default
+	NAME:=Default Profile (minimum package set)
+endef
+
+define Profile/Default/Description
+	Default package set compatible with most boards.
+endef
+$(eval $(call Profile,Default))
diff --git a/target/linux/mediatek/mt7986/target.mk b/target/linux/mediatek/mt7986/target.mk
new file mode 100755
index 0000000..3ec9208
--- /dev/null
+++ b/target/linux/mediatek/mt7986/target.mk
@@ -0,0 +1,11 @@
+ARCH:=aarch64
+SUBTARGET:=mt7986
+BOARDNAME:=MT7986
+CPU_TYPE:=cortex-a53
+FEATURES:=squashfs nand ramdisk
+
+KERNELNAME:=Image dtbs
+
+define Target/Description
+	Build firmware images for MediaTek MT7986 ARM based boards.
+endef
diff --git a/target/linux/mediatek/patches-5.4/0020-dts-mt7622-enable-new-mtk-snand-for-ubi.patch b/target/linux/mediatek/patches-5.4/0020-dts-mt7622-enable-new-mtk-snand-for-ubi.patch
new file mode 100644
index 0000000..3a9e061
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0020-dts-mt7622-enable-new-mtk-snand-for-ubi.patch
@@ -0,0 +1,23 @@
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -567,6 +567,20 @@
+ 		status = "disabled";
+ 	};
+ 
++	snand: snfi@1100d000 {
++		compatible = "mediatek,mt7622-snand";
++		reg = <0 0x1100d000 0 0x1000>, <0 0x1100e000 0 0x1000>;
++		reg-names = "nfi", "ecc";
++		interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
++		clocks = <&pericfg CLK_PERI_NFI_PD>,
++			 <&pericfg CLK_PERI_SNFI_PD>,
++			 <&pericfg CLK_PERI_NFIECC_PD>;
++		clock-names = "nfi_clk", "pad_clk", "ecc_clk";
++		#address-cells = <1>;
++		#size-cells = <0>;
++		status = "disabled";
++	};
++
+ 	nor_flash: spi@11014000 {
+ 		compatible = "mediatek,mt7622-nor",
+ 			     "mediatek,mt8173-nor";
diff --git a/target/linux/mediatek/patches-5.4/0100-hwnat_Kconfig_Makefile.patch b/target/linux/mediatek/patches-5.4/0100-hwnat_Kconfig_Makefile.patch
new file mode 100755
index 0000000..e0ac7ab
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0100-hwnat_Kconfig_Makefile.patch
@@ -0,0 +1,33 @@
+--- a/net/Kconfig	2020-04-29 17:25:49.750444000 +0800
++++ b/net/Kconfig	2020-04-29 17:42:40.950424000 +0800
+@@ -451,6 +451,18 @@
+ 	  migration of VMs with direct attached VFs by failing over to the
+ 	  paravirtual datapath when the VF is unplugged.
+ 
++config HW_NAT
++	bool "HW NAT support"
++	default n
++	---help---
++	 This feature provides a fast path to support network lan/wan nat.
++	 If you need hw_nat engine to reduce cpu loading, please say Y.
++
++	  Note that the answer to this question doesn't directly affect the
++	  kernel: saying N will just cause the configurator to skip all
++	  the questions about Mediatek Ethernet devices. If you say Y,
++	  you will be asked for your specific card in the following questions.
++
+ endif   # if NET
+ 
+ # Used by archs to tell that they support BPF JIT compiler plus which flavour.
+--- a/net/Makefile	2020-04-23 16:36:46.000000000 +0800
++++ b/net/Makefile	2020-04-29 17:42:58.106487000 +0800
+@@ -62,6 +62,9 @@
+ obj-$(CONFIG_6LOWPAN)		+= 6lowpan/
+ obj-$(CONFIG_IEEE802154)	+= ieee802154/
+ obj-$(CONFIG_MAC802154)		+= mac802154/
++ifeq ($(CONFIG_HW_NAT),y)
++obj-y                           += nat/foe_hook/
++endif
+ 
+ ifeq ($(CONFIG_NET),y)
+ obj-$(CONFIG_SYSCTL)		+= sysctl_net.o
diff --git a/target/linux/mediatek/patches-5.4/0200-show_model_name_in_cpuinfo_on_arm64.patch b/target/linux/mediatek/patches-5.4/0200-show_model_name_in_cpuinfo_on_arm64.patch
new file mode 100644
index 0000000..98e5ab6
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0200-show_model_name_in_cpuinfo_on_arm64.patch
@@ -0,0 +1,16 @@
+Index: linux-5.4.70/arch/arm64/kernel/cpuinfo.c
+===================================================================
+--- linux-5.4.70.orig/arch/arm64/kernel/cpuinfo.c
++++ linux-5.4.70/arch/arm64/kernel/cpuinfo.c
+@@ -139,9 +139,8 @@ static int c_show(struct seq_file *m, vo
+ 		 * "processor".  Give glibc what it expects.
+ 		 */
+ 		seq_printf(m, "processor\t: %d\n", i);
+-		if (compat)
+-			seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+-				   MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
++		seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
++			   MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
+ 
+ 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ 			   loops_per_jiffy / (500000UL/HZ),
diff --git a/target/linux/mediatek/patches-5.4/0504-macsec-revert-async-support.patch b/target/linux/mediatek/patches-5.4/0504-macsec-revert-async-support.patch
new file mode 100644
index 0000000..d52db50
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0504-macsec-revert-async-support.patch
@@ -0,0 +1,12 @@
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1309,8 +1309,7 @@
+ 	struct crypto_aead *tfm;
+ 	int ret;
+ 
+-	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
+-	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
++	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+ 
+ 	if (IS_ERR(tfm))
+ 		return tfm;
diff --git a/target/linux/mediatek/patches-5.4/0666-spi-mediatek-support-IPM-Design.patch b/target/linux/mediatek/patches-5.4/0666-spi-mediatek-support-IPM-Design.patch
new file mode 100644
index 0000000..b8b2dc7
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0666-spi-mediatek-support-IPM-Design.patch
@@ -0,0 +1,636 @@
+From 675b477b2a50b2fb97f35944756f89644bf70092 Mon Sep 17 00:00:00 2001
+From: Qii Wang <qii.wang@mediatek.com>
+Date: Tue, 5 Jan 2021 16:48:39 +0800
+Subject: [PATCH] spi: mediatek: support IPM Design
+
+[Description]
+1. support sigle mode;
+2. support dual/quad mode with spi-mem framework.
+
+Signed-off-by: Leilk Liu <leilk.liu@mediatek.com>
+Reviewed-by: Qii Wang <qii.wang@mediatek.com>
+---
+ drivers/spi/spi-mt65xx.c                 | 395 +++++++++++++++++++++--
+ include/linux/platform_data/spi-mt65xx.h |   2 +-
+ 2 files changed, 370 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index 8acf24f7c..9183c64e4 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -17,6 +17,7 @@
+ #include <linux/platform_data/spi-mt65xx.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/spi/spi.h>
++#include <linux/spi/spi-mem.h>
+ #include <linux/dma-mapping.h>
+ 
+ #define SPI_CFG0_REG                      0x0000
+@@ -31,6 +32,7 @@
+ #define SPI_CFG2_REG                      0x0028
+ #define SPI_TX_SRC_REG_64                 0x002c
+ #define SPI_RX_DST_REG_64                 0x0030
++#define SPI_CFG3_IPM_REG                  0x0040
+ 
+ #define SPI_CFG0_SCK_HIGH_OFFSET          0
+ #define SPI_CFG0_SCK_LOW_OFFSET           8
+@@ -42,13 +44,15 @@
+ #define SPI_CFG1_CS_IDLE_OFFSET           0
+ #define SPI_CFG1_PACKET_LOOP_OFFSET       8
+ #define SPI_CFG1_PACKET_LENGTH_OFFSET     16
+-#define SPI_CFG1_GET_TICK_DLY_OFFSET      30
++#define SPI_CFG1_GET_TICKDLY_OFFSET       29
+ 
++#define SPI_CFG1_GET_TICKDLY_MASK	  GENMASK(31, 29)
+ #define SPI_CFG1_CS_IDLE_MASK             0xff
+ #define SPI_CFG1_PACKET_LOOP_MASK         0xff00
+ #define SPI_CFG1_PACKET_LENGTH_MASK       0x3ff0000
++#define SPI_CFG1_IPM_PACKET_LENGTH_MASK   GENMASK(31, 16)
+ #define SPI_CFG2_SCK_HIGH_OFFSET          0
+-#define SPI_CFG2_SCK_LOW_OFFSET           16
++#define SPI_CFG2_SCK_LOW_OFFSET		  16
+ 
+ #define SPI_CMD_ACT                  BIT(0)
+ #define SPI_CMD_RESUME               BIT(1)
+@@ -67,6 +71,25 @@
+ #define SPI_CMD_TX_ENDIAN            BIT(15)
+ #define SPI_CMD_FINISH_IE            BIT(16)
+ #define SPI_CMD_PAUSE_IE             BIT(17)
++#define SPI_CMD_IPM_NONIDLE_MODE     BIT(19)
++#define SPI_CMD_IPM_SPIM_LOOP        BIT(21)
++#define SPI_CMD_IPM_GET_TICKDLY_OFFSET    22
++
++#define SPI_CMD_IPM_GET_TICKDLY_MASK	GENMASK(24, 22)
++
++#define PIN_MODE_CFG(x)	((x) / 2)
++
++#define SPI_CFG3_IPM_PIN_MODE_OFFSET		0
++#define SPI_CFG3_IPM_HALF_DUPLEX_DIR		BIT(2)
++#define SPI_CFG3_IPM_HALF_DUPLEX_EN		BIT(3)
++#define SPI_CFG3_IPM_XMODE_EN			BIT(4)
++#define SPI_CFG3_IPM_NODATA_FLAG		BIT(5)
++#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET		8
++#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET	12
++
++#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK		GENMASK(1, 0)
++#define SPI_CFG3_IPM_CMD_BYTELEN_MASK		GENMASK(11, 8)
++#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK		GENMASK(15, 12)
+ 
+ #define MT8173_SPI_MAX_PAD_SEL 3
+ 
+@@ -77,6 +100,9 @@
+ 
+ #define MTK_SPI_MAX_FIFO_SIZE 32U
+ #define MTK_SPI_PACKET_SIZE 1024
++#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
++#define MTK_SPI_IPM_PACKET_LOOP SZ_256
++
+ #define MTK_SPI_32BITS_MASK  (0xffffffff)
+ 
+ #define DMA_ADDR_EXT_BITS (36)
+@@ -90,6 +116,9 @@ struct mtk_spi_compatible {
+ 	bool enhance_timing;
+ 	/* some IC support DMA addr extension */
+ 	bool dma_ext;
++	/* the IPM IP design improve some feature, and support dual/quad mode */
++	bool ipm_design;
++	bool support_quad;
+ };
+ 
+ struct mtk_spi {
+@@ -104,6 +133,12 @@ struct mtk_spi {
+ 	struct scatterlist *tx_sgl, *rx_sgl;
+ 	u32 tx_sgl_len, rx_sgl_len;
+ 	const struct mtk_spi_compatible *dev_comp;
++
++	struct completion spimem_done;
++	bool use_spimem;
++	struct device *dev;
++	dma_addr_t tx_dma;
++	dma_addr_t rx_dma;
+ };
+ 
+ static const struct mtk_spi_compatible mtk_common_compat;
+@@ -112,6 +147,14 @@ static const struct mtk_spi_compatible mt2712_compat = {
+ 	.must_tx = true,
+ };
+ 
++static const struct mtk_spi_compatible ipm_compat = {
++	.must_tx = true,
++	.enhance_timing = true,
++	.dma_ext = true,
++	.ipm_design = true,
++	.support_quad = true,
++};
++
+ static const struct mtk_spi_compatible mt6765_compat = {
+ 	.need_pad_sel = true,
+ 	.must_tx = true,
+@@ -140,11 +183,14 @@ static const struct mtk_spi_compatible mt8183_compat = {
+  * supplies it.
+  */
+ static const struct mtk_chip_config mtk_default_chip_info = {
+-	.cs_pol = 0,
+ 	.sample_sel = 0,
++	.get_tick_dly = 0,
+ };
+ 
+ static const struct of_device_id mtk_spi_of_match[] = {
++	{ .compatible = "mediatek,ipm-spi",
++		.data = (void *)&ipm_compat,
++	},
+ 	{ .compatible = "mediatek,mt2701-spi",
+ 		.data = (void *)&mtk_common_compat,
+ 	},
+@@ -190,19 +236,48 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
+ 	writel(reg_val, mdata->base + SPI_CMD_REG);
+ }
+ 
+-static int mtk_spi_prepare_message(struct spi_master *master,
+-				   struct spi_message *msg)
++static int mtk_spi_hw_init(struct spi_master *master,
++			   struct spi_device *spi)
+ {
+ 	u16 cpha, cpol;
+ 	u32 reg_val;
+-	struct spi_device *spi = msg->spi;
+ 	struct mtk_chip_config *chip_config = spi->controller_data;
+ 	struct mtk_spi *mdata = spi_master_get_devdata(master);
+ 
+ 	cpha = spi->mode & SPI_CPHA ? 1 : 0;
+ 	cpol = spi->mode & SPI_CPOL ? 1 : 0;
+ 
++	if (mdata->dev_comp->enhance_timing) {
++		if (mdata->dev_comp->ipm_design) {
++			/* CFG3 reg only used for spi-mem,
++			 * here write to default value
++			 */
++			writel(0x0, mdata->base + SPI_CFG3_IPM_REG);
++
++			reg_val = readl(mdata->base + SPI_CMD_REG);
++			reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
++			reg_val |= chip_config->get_tick_dly
++				   << SPI_CMD_IPM_GET_TICKDLY_OFFSET;
++			writel(reg_val, mdata->base + SPI_CMD_REG);
++		} else {
++			reg_val = readl(mdata->base + SPI_CFG1_REG);
++			reg_val &= ~SPI_CFG1_GET_TICKDLY_MASK;
++			reg_val |= chip_config->get_tick_dly
++				   << SPI_CFG1_GET_TICKDLY_OFFSET;
++			writel(reg_val, mdata->base + SPI_CFG1_REG);
++		}
++	}
++
+ 	reg_val = readl(mdata->base + SPI_CMD_REG);
++	if (mdata->dev_comp->ipm_design) {
++		/* SPI transfer without idle time until packet length done */
++		reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
++		if (spi->mode & SPI_LOOP)
++			reg_val |= SPI_CMD_IPM_SPIM_LOOP;
++		else
++			reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
++	}
++
+ 	if (cpha)
+ 		reg_val |= SPI_CMD_CPHA;
+ 	else
+@@ -231,10 +306,12 @@ static int mtk_spi_prepare_message(struct spi_master *master,
+ #endif
+ 
+ 	if (mdata->dev_comp->enhance_timing) {
+-		if (chip_config->cs_pol)
++		/* set CS polarity */
++		if (spi->mode & SPI_CS_HIGH)
+ 			reg_val |= SPI_CMD_CS_POL;
+ 		else
+ 			reg_val &= ~SPI_CMD_CS_POL;
++
+ 		if (chip_config->sample_sel)
+ 			reg_val |= SPI_CMD_SAMPLE_SEL;
+ 		else
+@@ -260,11 +337,20 @@ static int mtk_spi_prepare_message(struct spi_master *master,
+ 	return 0;
+ }
+ 
++static int mtk_spi_prepare_message(struct spi_master *master,
++				   struct spi_message *msg)
++{
++	return mtk_spi_hw_init(master, msg->spi);
++}
++
+ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
+ {
+ 	u32 reg_val;
+ 	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+ 
++	if (spi->mode & SPI_CS_HIGH)
++		enable = !enable;
++
+ 	reg_val = readl(mdata->base + SPI_CMD_REG);
+ 	if (!enable) {
+ 		reg_val |= SPI_CMD_PAUSE_EN;
+@@ -278,14 +364,14 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
+ }
+ 
+ static void mtk_spi_prepare_transfer(struct spi_master *master,
+-				     struct spi_transfer *xfer)
++				     u32 speed_hz)
+ {
+ 	u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
+ 	struct mtk_spi *mdata = spi_master_get_devdata(master);
+ 
+ 	spi_clk_hz = clk_get_rate(mdata->spi_clk);
+-	if (xfer->speed_hz < spi_clk_hz / 2)
+-		div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
++	if (speed_hz < spi_clk_hz / 2)
++		div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
+ 	else
+ 		div = 1;
+ 
+@@ -323,12 +409,24 @@ static void mtk_spi_setup_packet(struct spi_master *master)
+ 	u32 packet_size, packet_loop, reg_val;
+ 	struct mtk_spi *mdata = spi_master_get_devdata(master);
+ 
+-	packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
++	if (mdata->dev_comp->ipm_design)
++		packet_size = min_t(u32,
++				    mdata->xfer_len,
++				    MTK_SPI_IPM_PACKET_SIZE);
++	else
++		packet_size = min_t(u32,
++				    mdata->xfer_len,
++				    MTK_SPI_PACKET_SIZE);
++
+ 	packet_loop = mdata->xfer_len / packet_size;
+ 
+ 	reg_val = readl(mdata->base + SPI_CFG1_REG);
+-	reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
++	if (mdata->dev_comp->ipm_design)
++		reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
++	else
++		reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
+ 	reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
++	reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
+ 	reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
+ 	writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
+@@ -423,7 +521,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
+ 	mdata->cur_transfer = xfer;
+ 	mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
+ 	mdata->num_xfered = 0;
+-	mtk_spi_prepare_transfer(master, xfer);
++	mtk_spi_prepare_transfer(master, xfer->speed_hz);
+ 	mtk_spi_setup_packet(master);
+ 
+ 	cnt = xfer->len / 4;
+@@ -455,7 +553,7 @@ static int mtk_spi_dma_transfer(struct spi_master *master,
+ 	mdata->cur_transfer = xfer;
+ 	mdata->num_xfered = 0;
+ 
+-	mtk_spi_prepare_transfer(master, xfer);
++	mtk_spi_prepare_transfer(master, xfer->speed_hz);
+ 
+ 	cmd = readl(mdata->base + SPI_CMD_REG);
+ 	if (xfer->tx_buf)
+@@ -532,6 +630,13 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+ 	else
+ 		mdata->state = MTK_SPI_IDLE;
+ 
++	/* SPI-MEM ops */
++	if (mdata->use_spimem) {
++		complete(&mdata->spimem_done);
++
++		return IRQ_HANDLED;
++	}
++
+ 	if (!master->can_dma(master, master->cur_msg->spi, trans)) {
+ 		if (trans->rx_buf) {
+ 			cnt = mdata->xfer_len / 4;
+@@ -615,12 +720,241 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
++static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
++				     const struct spi_mem_op *op)
++{
++	if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
++	    op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
++		return false;
++
++	if (op->addr.nbytes && op->dummy.nbytes &&
++	    op->addr.buswidth != op->dummy.buswidth)
++		return false;
++
++	if (op->addr.nbytes + op->dummy.nbytes > 16)
++		return false;
++
++	if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
++		if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
++		    MTK_SPI_IPM_PACKET_LOOP ||
++		    op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
++			return false;
++	}
++
++	if (op->data.dir == SPI_MEM_DATA_IN &&
++	    !IS_ALIGNED((size_t)op->data.buf.in, 4))
++		return false;
++
++	return true;
++}
++
++static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
++				   const struct spi_mem_op *op)
++{
++	struct mtk_spi *mdata = spi_master_get_devdata(master);
++
++	writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
++	       mdata->base + SPI_TX_SRC_REG);
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++	if (mdata->dev_comp->dma_ext)
++		writel((u32)(mdata->tx_dma >> 32),
++		       mdata->base + SPI_TX_SRC_REG_64);
++#endif
++
++	if (op->data.dir == SPI_MEM_DATA_IN) {
++		writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
++			   mdata->base + SPI_RX_DST_REG);
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++		if (mdata->dev_comp->dma_ext)
++			writel((u32)(mdata->rx_dma >> 32),
++				   mdata->base + SPI_RX_DST_REG_64);
++#endif
++	}
++}
++
++static int mtk_spi_transfer_wait(struct spi_mem *mem,
++				 const struct spi_mem_op *op)
++{
++	struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
++	unsigned long long ms = 1;
++
++	if (op->data.dir == SPI_MEM_NO_DATA)
++		ms = 8LL * 1000LL * 32;
++	else
++		ms = 8LL * 1000LL * op->data.nbytes;
++	do_div(ms, mem->spi->max_speed_hz);
++	ms += ms + 1000; /* 1s tolerance */
++
++	if (ms > UINT_MAX)
++		ms = UINT_MAX;
++
++	if (!wait_for_completion_timeout(&mdata->spimem_done,
++					 msecs_to_jiffies(ms))) {
++		dev_err(mdata->dev, "spi-mem transfer timeout\n");
++		return -ETIMEDOUT;
++	}
++
++	return 0;
++}
++
++static int mtk_spi_mem_exec_op(struct spi_mem *mem,
++				const struct spi_mem_op *op)
++{
++	struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
++	u32 reg_val, nio = 1, tx_size;
++	char *tx_tmp_buf;
++	int ret = 0;
++
++	mdata->use_spimem = true;
++	reinit_completion(&mdata->spimem_done);
++
++	mtk_spi_reset(mdata);
++	mtk_spi_hw_init(mem->spi->master, mem->spi);
++	mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
++
++	reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
++	/* opcode byte len */
++	reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
++	reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
++
++	/* addr & dummy byte len */
++	reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
++	if (op->addr.nbytes || op->dummy.nbytes)
++		reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
++			    SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
++
++	/* data byte len */
++	if (op->data.dir == SPI_MEM_NO_DATA) {
++		reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
++		writel(0, mdata->base + SPI_CFG1_REG);
++	} else {
++		reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
++		mdata->xfer_len = op->data.nbytes;
++		mtk_spi_setup_packet(mem->spi->master);
++	}
++
++	if (op->addr.nbytes || op->dummy.nbytes) {
++		if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
++			reg_val |= SPI_CFG3_IPM_XMODE_EN;
++		else
++			reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
++	}
++
++	if (op->addr.buswidth == 2 ||
++	    op->dummy.buswidth == 2 ||
++	    op->data.buswidth == 2)
++		nio = 2;
++	else if (op->addr.buswidth == 4 ||
++		 op->dummy.buswidth == 4 ||
++		 op->data.buswidth == 4)
++		nio = 4;
++
++	reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
++	reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
++
++	reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
++	if (op->data.dir == SPI_MEM_DATA_IN)
++		reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
++	else
++		reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
++	writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
++
++	tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
++	if (op->data.dir == SPI_MEM_DATA_OUT)
++		tx_size += op->data.nbytes;
++
++	tx_size = max(tx_size, (u32)32);
++
++	tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
++	if (!tx_tmp_buf)
++		return -ENOMEM;
++
++	tx_tmp_buf[0] = op->cmd.opcode;
++
++	if (op->addr.nbytes) {
++		int i;
++
++		for (i = 0; i < op->addr.nbytes; i++)
++			tx_tmp_buf[i + 1] = op->addr.val >>
++					(8 * (op->addr.nbytes - i - 1));
++	}
++
++	if (op->dummy.nbytes)
++		memset(tx_tmp_buf + op->addr.nbytes + 1,
++		       0xff,
++		       op->dummy.nbytes);
++
++	if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
++		memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
++		       op->data.buf.out,
++		       op->data.nbytes);
++
++	mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
++				       tx_size, DMA_TO_DEVICE);
++	if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
++		ret = -ENOMEM;
++		goto err_exit;
++	}
++
++	if (op->data.dir == SPI_MEM_DATA_IN) {
++		mdata->rx_dma = dma_map_single(mdata->dev,
++					       op->data.buf.in,
++					       op->data.nbytes,
++					       DMA_FROM_DEVICE);
++		if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
++			ret = -ENOMEM;
++			goto unmap_tx_dma;
++		}
++	}
++
++	reg_val = readl(mdata->base + SPI_CMD_REG);
++	reg_val |= SPI_CMD_TX_DMA;
++	if (op->data.dir == SPI_MEM_DATA_IN)
++		reg_val |= SPI_CMD_RX_DMA;
++	writel(reg_val, mdata->base + SPI_CMD_REG);
++
++	mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
++
++	mtk_spi_enable_transfer(mem->spi->master);
++
++	/* Wait for the interrupt. */
++	ret = mtk_spi_transfer_wait(mem, op);
++	if (ret)
++		goto unmap_rx_dma;
++
++	/* spi disable dma */
++	reg_val = readl(mdata->base + SPI_CMD_REG);
++	reg_val &= ~SPI_CMD_TX_DMA;
++	if (op->data.dir == SPI_MEM_DATA_IN)
++		reg_val &= ~SPI_CMD_RX_DMA;
++	writel(reg_val, mdata->base + SPI_CMD_REG);
++
++	if (op->data.dir == SPI_MEM_DATA_IN)
++		dma_unmap_single(mdata->dev, mdata->rx_dma,
++				 op->data.nbytes, DMA_FROM_DEVICE);
++unmap_rx_dma:
++	dma_unmap_single(mdata->dev, mdata->rx_dma,
++			 op->data.nbytes, DMA_FROM_DEVICE);
++unmap_tx_dma:
++	dma_unmap_single(mdata->dev, mdata->tx_dma,
++			 tx_size, DMA_TO_DEVICE);
++err_exit:
++	kfree(tx_tmp_buf);
++	mdata->use_spimem = false;
++
++	return ret;
++}
++
++static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
++	.supports_op = mtk_spi_mem_supports_op,
++	.exec_op = mtk_spi_mem_exec_op,
++};
++
+ static int mtk_spi_probe(struct platform_device *pdev)
+ {
+ 	struct spi_master *master;
+ 	struct mtk_spi *mdata;
+ 	const struct of_device_id *of_id;
+-	struct resource *res;
+ 	int i, irq, ret, addr_bits;
+ 
+ 	master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
+@@ -629,7 +963,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	}
+ 
+-	master->auto_runtime_pm = true;
++//	master->auto_runtime_pm = true;
+ 	master->dev.of_node = pdev->dev.of_node;
+ 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ 
+@@ -648,9 +982,25 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ 
+ 	mdata = spi_master_get_devdata(master);
+ 	mdata->dev_comp = of_id->data;
++
++	if (mdata->dev_comp->enhance_timing)
++		master->mode_bits |= SPI_CS_HIGH;
++
+ 	if (mdata->dev_comp->must_tx)
+ 		master->flags = SPI_MASTER_MUST_TX;
+ 
++	if (mdata->dev_comp->ipm_design)
++		master->mode_bits |= SPI_LOOP;
++
++	if (mdata->dev_comp->support_quad) {
++		master->mem_ops = &mtk_spi_mem_ops;
++		master->mode_bits |= SPI_RX_DUAL | SPI_TX_DUAL |
++				     SPI_RX_QUAD | SPI_TX_QUAD;
++
++		mdata->dev = &pdev->dev;
++		init_completion(&mdata->spimem_done);
++	}
++
+ 	if (mdata->dev_comp->need_pad_sel) {
+ 		mdata->pad_num = of_property_count_u32_elems(
+ 			pdev->dev.of_node,
+@@ -683,15 +1033,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	platform_set_drvdata(pdev, master);
+-
+-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-	if (!res) {
+-		ret = -ENODEV;
+-		dev_err(&pdev->dev, "failed to determine base address\n");
+-		goto err_put_master;
+-	}
+-
+-	mdata->base = devm_ioremap_resource(&pdev->dev, res);
++	mdata->base = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(mdata->base)) {
+ 		ret = PTR_ERR(mdata->base);
+ 		goto err_put_master;
+@@ -713,6 +1055,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ 		goto err_put_master;
+ 	}
+ 
++/*
+ 	mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
+ 	if (IS_ERR(mdata->parent_clk)) {
+ 		ret = PTR_ERR(mdata->parent_clk);
+@@ -750,7 +1093,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ 	clk_disable_unprepare(mdata->spi_clk);
+ 
+ 	pm_runtime_enable(&pdev->dev);
+-
++*/
+ 	ret = devm_spi_register_master(&pdev->dev, master);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
+diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
+index f0e6d6483..fae9bc15c 100644
+--- a/include/linux/platform_data/spi-mt65xx.h
++++ b/include/linux/platform_data/spi-mt65xx.h
+@@ -11,7 +11,7 @@
+ 
+ /* Board specific platform_data */
+ struct mtk_chip_config {
+-	u32 cs_pol;
+ 	u32 sample_sel;
++	u32 get_tick_dly;
+ };
+ #endif
+-- 
+2.17.1
+
diff --git a/target/linux/mediatek/patches-5.4/0666-spi-mtk-nor-fix-timeout-calculation-overflow.patch b/target/linux/mediatek/patches-5.4/0666-spi-mtk-nor-fix-timeout-calculation-overflow.patch
new file mode 100644
index 0000000..86b2089
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0666-spi-mtk-nor-fix-timeout-calculation-overflow.patch
@@ -0,0 +1,179 @@
+From patchwork Tue Sep 22 11:49:02 2020
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Chuanhong Guo <gch981213@gmail.com>
+X-Patchwork-Id: 11792387
+Return-Path: 
+ <SRS0=i66O=C7=lists.infradead.org=linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@kernel.org>
+Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org
+ [172.30.200.123])
+	by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 21EB0618
+	for <patchwork-linux-arm@patchwork.kernel.org>;
+ Tue, 22 Sep 2020 11:51:33 +0000 (UTC)
+Received: from merlin.infradead.org (merlin.infradead.org [205.233.59.134])
+	(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
+	(No client certificate requested)
+	by mail.kernel.org (Postfix) with ESMTPS id E15FF221EB
+	for <patchwork-linux-arm@patchwork.kernel.org>;
+ Tue, 22 Sep 2020 11:51:32 +0000 (UTC)
+Authentication-Results: mail.kernel.org;
+	dkim=pass (2048-bit key) header.d=lists.infradead.org
+ header.i=@lists.infradead.org header.b="KBg/skkC";
+	dkim=fail reason="signature verification failed" (2048-bit key)
+ header.d=gmail.com header.i=@gmail.com header.b="Gtqp4rrT"
+DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org E15FF221EB
+Authentication-Results: mail.kernel.org;
+ dmarc=fail (p=none dis=none) header.from=gmail.com
+Authentication-Results: mail.kernel.org;
+ spf=none
+ smtp.mailfrom=linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+	d=lists.infradead.org; s=merlin.20170209; h=Sender:Content-Transfer-Encoding:
+	Content-Type:Cc:List-Subscribe:List-Help:List-Post:List-Archive:
+	List-Unsubscribe:List-Id:MIME-Version:Message-Id:Date:Subject:To:From:
+	Reply-To:Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender
+	:Resent-To:Resent-Cc:Resent-Message-ID:In-Reply-To:References:List-Owner;
+	bh=Xg61WV47qNPjINdHDPnF6T3q8GN8f9evwhTMdYR0Zqs=; b=KBg/skkCvnF7/8AlleTay0p/H2
+	hC4Lzo+slWhX5/eepUEXzhTr5ORf4Dx9gD65UEuordKQKFpg6Y9ApoGaYtmBJ0vABdAZt+oVG4sFf
+	K3z3CYV6EZ5qvwsZt53Xm3YsHojgu+Lnc/MGgGWBRjCtTP7gshm480pZ0w6ADgHvrym5hNajUF6+5
+	zMm5Wwq34jxUApGU7k5FAPsvO5ctYCuhECq/mLB6tplCVh3/+XLdSiHMUlY17fh+xs732kgaDotuQ
+	QYgXtDmMB1pVKCq5cf3Bcuz7Ww47vLSx4rBxtdB/vpp2w9SdrU6K8Q7DuJ3+XrGfbMhKtBU5ektA8
+	GxEUUaKw==;
+Received: from localhost ([::1] helo=merlin.infradead.org)
+	by merlin.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux))
+	id 1kKgo2-0000Ze-Fb; Tue, 22 Sep 2020 11:50:00 +0000
+Received: from mail-pg1-x543.google.com ([2607:f8b0:4864:20::543])
+ by merlin.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1kKgnr-0000Vv-6z; Tue, 22 Sep 2020 11:49:49 +0000
+Received: by mail-pg1-x543.google.com with SMTP id o25so6798387pgm.0;
+ Tue, 22 Sep 2020 04:49:46 -0700 (PDT)
+DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;
+ h=from:to:cc:subject:date:message-id:mime-version
+ :content-transfer-encoding;
+ bh=EJwpKrbgqo/Jc/SWHvyAGB9CrpkZ5L1Hzq9tInFHTYk=;
+ b=Gtqp4rrTgM1+bYxfUQXe+lfPcgHRW6GccdN42Iszl6ozMbezvftl1BUcKE22S6eFW3
+ Vs+lcKZN9Eh9C53YAAd0cuZYhJ2GqlfGNLA/9SyB7s/gIwHqO9Cuu17YpB9dAFfEUxoS
+ 825uUcTeRe6BTagZAh2/MBluiMY3TszRi94MbOftxUg+wSqp0wMAPe9RN0gAEc/l2xgK
+ 8PhXbZv3uItI4QqoKYiz93vrF/zYhj+oGTI44g2li2fpAgCNL7lXCpSE2C9NsEe+YqTw
+ aO5A3W8t4jvp8oCJEvr/MWY1ZZLd1fVJ17W3aGXoDi/7EUcAvX9G5Ee7U68UXGMtty/d
+ z5Nw==
+X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
+ d=1e100.net; s=20161025;
+ h=x-gm-message-state:from:to:cc:subject:date:message-id:mime-version
+ :content-transfer-encoding;
+ bh=EJwpKrbgqo/Jc/SWHvyAGB9CrpkZ5L1Hzq9tInFHTYk=;
+ b=XhcpP16zYyJr/qCT9JbO3fn8RyfI44xJL3hvgNrlcr4ljkEZ4TF6OfyhjdEZYeeA3C
+ kLlWuAqrSn6mweuhS2LZ0BV5QL/YYaVO4wP4B/y3j+tNbnW3JNM0NtEY19pOtaM4vYK/
+ tPuNxld5RvJWxQ9BLs8hH6y7j/ob6oDug170P5YkwK6Wa/FLCi2bw92/vldhdnFP/Nny
+ 1bbiWRVls1Ra/Q3z90tGViMkBdlcff6MI9DR5M6a1HTQN7kN9rLDCMGs3r9XVComY07N
+ ECbrZbL+iJwuRuT43RAUxE72X/Pn0WYD20unzITf8bta92usNDRgEuxc1bLyL+uHxgUk
+ YQKA==
+X-Gm-Message-State: AOAM531Xr1Bg4uwupCAPpH4eBWVrXGALjIWa+5AVNZ8w6ltS4BGgWv6b
+ e4g6ycKnUp/KalpJhOMi90o=
+X-Google-Smtp-Source: 
+ ABdhPJx36OliaaLkiX3ZeZNNWgd/qSKiRor2X0eeHScDrjMSi5bTiEzAfX5j7hkQgqz8ZUT0qqLRNA==
+X-Received: by 2002:a63:1863:: with SMTP id 35mr3131307pgy.413.1600775385014;
+ Tue, 22 Sep 2020 04:49:45 -0700 (PDT)
+Received: from guoguo-omen.lan ([156.96.148.94])
+ by smtp.gmail.com with ESMTPSA id r4sm2223750pjf.4.2020.09.22.04.49.42
+ (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);
+ Tue, 22 Sep 2020 04:49:44 -0700 (PDT)
+From: Chuanhong Guo <gch981213@gmail.com>
+To: linux-spi@vger.kernel.org
+Subject: [PATCH v2] spi: spi-mtk-nor: fix timeout calculation overflow
+Date: Tue, 22 Sep 2020 19:49:02 +0800
+Message-Id: <20200922114905.2942859-1-gch981213@gmail.com>
+X-Mailer: git-send-email 2.26.2
+MIME-Version: 1.0
+X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 
+X-CRM114-CacheID: sfid-20200922_074948_345420_69207EBE 
+X-CRM114-Status: GOOD (  12.60  )
+X-Spam-Score: 2.6 (++)
+X-Spam-Report: SpamAssassin version 3.4.4 on merlin.infradead.org summary:
+ Content analysis details:   (2.6 points)
+ pts rule name              description
+ ---- ----------------------
+ --------------------------------------------------
+ 2.6 RCVD_IN_SBL            RBL: Received via a relay in Spamhaus SBL
+ [156.96.148.94 listed in zen.spamhaus.org]
+ -0.0 RCVD_IN_DNSWL_NONE     RBL: Sender listed at https://www.dnswl.org/,
+ no trust [2607:f8b0:4864:20:0:0:0:543 listed in]
+ [list.dnswl.org]
+ 0.0 FREEMAIL_FROM          Sender email is commonly abused enduser mail
+ provider [gch981213[at]gmail.com]
+ 0.2 FREEMAIL_ENVFROM_END_DIGIT Envelope-from freemail username ends
+ in digit [gch981213[at]gmail.com]
+ -0.0 SPF_PASS               SPF: sender matches SPF record
+ 0.0 SPF_HELO_NONE          SPF: HELO does not publish an SPF Record
+ -0.1 DKIM_VALID Message has at least one valid DKIM or DK signature
+ -0.1 DKIM_VALID_AU          Message has a valid DKIM or DK signature from
+ author's domain
+ -0.1 DKIM_VALID_EF          Message has a valid DKIM or DK signature from
+ envelope-from domain
+ 0.1 DKIM_SIGNED            Message has a DKIM or DK signature,
+ not necessarily
+ valid
+X-BeenThere: linux-arm-kernel@lists.infradead.org
+X-Mailman-Version: 2.1.29
+Precedence: list
+List-Id: <linux-arm-kernel.lists.infradead.org>
+List-Unsubscribe: 
+ <http://lists.infradead.org/mailman/options/linux-arm-kernel>,
+ <mailto:linux-arm-kernel-request@lists.infradead.org?subject=unsubscribe>
+List-Archive: <http://lists.infradead.org/pipermail/linux-arm-kernel/>
+List-Post: <mailto:linux-arm-kernel@lists.infradead.org>
+List-Help: <mailto:linux-arm-kernel-request@lists.infradead.org?subject=help>
+List-Subscribe: 
+ <http://lists.infradead.org/mailman/listinfo/linux-arm-kernel>,
+ <mailto:linux-arm-kernel-request@lists.infradead.org?subject=subscribe>
+Cc: linux-kernel@vger.kernel.org, stable@vger.kernel.org,
+ Mark Brown <broonie@kernel.org>, linux-mediatek@lists.infradead.org,
+ bayi.cheng@mediatek.com, Matthias Brugger <matthias.bgg@gmail.com>,
+ Chuanhong Guo <gch981213@gmail.com>, linux-arm-kernel@lists.infradead.org
+Sender: "linux-arm-kernel" <linux-arm-kernel-bounces@lists.infradead.org>
+Errors-To: 
+ linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org
+
+CLK_TO_US macro is used to calculate potential transfer time for various
+timeout handling. However it overflows on transfer bigger than 512 bytes
+because it first did (len * 8 * 1000000).
+This controller typically operates at 45MHz. This patch did 2 things:
+1. calculate clock / 1000000 first
+2. add a 4M transfer size cap so that the final timeout in DMA reading
+   doesn't overflow
+
+Fixes: 881d1ee9fe81f ("spi: add support for mediatek spi-nor controller")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Chuanhong Guo <gch981213@gmail.com>
+---
+
+Change since v1: fix transfer size cap to 4M
+
+ drivers/spi/spi-mtk-nor.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
+index 6e6ca2b8e6c82..62f5ff2779884 100644
+--- a/drivers/spi/spi-mtk-nor.c
++++ b/drivers/spi/spi-mtk-nor.c
+@@ -89,7 +89,7 @@
+ // Buffered page program can do one 128-byte transfer
+ #define MTK_NOR_PP_SIZE			128
+ 
+-#define CLK_TO_US(sp, clkcnt)		((clkcnt) * 1000000 / sp->spi_freq)
++#define CLK_TO_US(sp, clkcnt)		DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
+ 
+ struct mtk_nor {
+ 	struct spi_controller *ctlr;
+@@ -177,6 +177,10 @@ static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+ 	if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
+ 		if ((op->data.dir == SPI_MEM_DATA_IN) &&
+ 		    mtk_nor_match_read(op)) {
++			// limit size to prevent timeout calculation overflow
++			if (op->data.nbytes > 0x400000)
++				op->data.nbytes = 0x400000;
++
+ 			if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
+ 			    (op->data.nbytes < MTK_NOR_DMA_ALIGN))
+ 				op->data.nbytes = 1;
diff --git a/target/linux/mediatek/patches-5.4/0667-spi-mediatek-fix-timeout-for-large-data.patch b/target/linux/mediatek/patches-5.4/0667-spi-mediatek-fix-timeout-for-large-data.patch
new file mode 100644
index 0000000..a04f5d6
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0667-spi-mediatek-fix-timeout-for-large-data.patch
@@ -0,0 +1,34 @@
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -720,6 +720,23 @@ static irqreturn_t mtk_spi_interrupt(int
+ 	return IRQ_HANDLED;
+ }
+ 
++static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
++                                      struct spi_mem_op *op)
++{
++	int opcode_len;
++
++	if(!op->data.nbytes)
++		return 0;
++
++	if (op->data.dir != SPI_MEM_NO_DATA) {
++		opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
++		if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE)
++			op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE -opcode_len;
++	}
++
++	return 0;
++}
++
+ static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
+ 				     const struct spi_mem_op *op)
+ {
+@@ -946,6 +963,7 @@ err_exit:
+ }
+ 
+ static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
++	.adjust_op_size = mtk_spi_mem_adjust_op_size,
+ 	.supports_op = mtk_spi_mem_supports_op,
+ 	.exec_op = mtk_spi_mem_exec_op,
+ };
diff --git a/target/linux/mediatek/patches-5.4/0668-spi-mediatek-fix-dma-unmap-twice.patch b/target/linux/mediatek/patches-5.4/0668-spi-mediatek-fix-dma-unmap-twice.patch
new file mode 100644
index 0000000..31562bf
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0668-spi-mediatek-fix-dma-unmap-twice.patch
@@ -0,0 +1,16 @@
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -946,12 +946,10 @@ static int mtk_spi_mem_exec_op(struct sp
+ 		reg_val &= ~SPI_CMD_RX_DMA;
+ 	writel(reg_val, mdata->base + SPI_CMD_REG);
+ 
++unmap_rx_dma:
+ 	if (op->data.dir == SPI_MEM_DATA_IN)
+ 		dma_unmap_single(mdata->dev, mdata->rx_dma,
+ 				 op->data.nbytes, DMA_FROM_DEVICE);
+-unmap_rx_dma:
+-	dma_unmap_single(mdata->dev, mdata->rx_dma,
+-			 op->data.nbytes, DMA_FROM_DEVICE);
+ unmap_tx_dma:
+ 	dma_unmap_single(mdata->dev, mdata->tx_dma,
+ 			 tx_size, DMA_TO_DEVICE);
diff --git a/target/linux/mediatek/patches-5.4/0669-fix-SPIM-NAND-and-NOR-probing.patch b/target/linux/mediatek/patches-5.4/0669-fix-SPIM-NAND-and-NOR-probing.patch
new file mode 100644
index 0000000..582771b
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0669-fix-SPIM-NAND-and-NOR-probing.patch
@@ -0,0 +1,33 @@
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -1073,7 +1073,7 @@ static int mtk_spi_probe(struct platform
+ 		goto err_put_master;
+ 	}
+ 
+-/*
++
+ 	mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
+ 	if (IS_ERR(mdata->parent_clk)) {
+ 		ret = PTR_ERR(mdata->parent_clk);
+@@ -1101,17 +1101,17 @@ static int mtk_spi_probe(struct platform
+ 		goto err_put_master;
+ 	}
+ 
+-	ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
++	/*ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
+ 		clk_disable_unprepare(mdata->spi_clk);
+ 		goto err_put_master;
+ 	}
+ 
+-	clk_disable_unprepare(mdata->spi_clk);
++	clk_disable_unprepare(mdata->sel_clk);*/
++
++	//pm_runtime_enable(&pdev->dev);
+ 
+-	pm_runtime_enable(&pdev->dev);
+-*/
+ 	ret = devm_spi_register_master(&pdev->dev, master);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
diff --git a/target/linux/mediatek/patches-5.4/0701-fix-mtk-nfi-driver-dependency.patch b/target/linux/mediatek/patches-5.4/0701-fix-mtk-nfi-driver-dependency.patch
new file mode 100644
index 0000000..3023076
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0701-fix-mtk-nfi-driver-dependency.patch
@@ -0,0 +1,10 @@
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -429,6 +429,7 @@ config SPI_MT65XX
+ 
+ config SPI_MTK_SNFI
+ 	tristate "MediaTek SPI NAND interface"
++	depends on MTD
+ 	select MTD_SPI_NAND
+ 	help
+ 	  This selects the SPI NAND FLASH interface(SNFI),
diff --git a/target/linux/mediatek/patches-5.4/1002-mtkhnat-add-support-for-virtual-interface-acceleration.patch b/target/linux/mediatek/patches-5.4/1002-mtkhnat-add-support-for-virtual-interface-acceleration.patch
new file mode 100644
index 0000000..150087a
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/1002-mtkhnat-add-support-for-virtual-interface-acceleration.patch
@@ -0,0 +1,127 @@
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index 3d73c0c..960ade1 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -92,9 +92,12 @@ struct flow_offload {
+ #define FLOW_OFFLOAD_PATH_VLAN		BIT(1)
+ #define FLOW_OFFLOAD_PATH_PPPOE		BIT(2)
+ #define FLOW_OFFLOAD_PATH_DSA		BIT(3)
++#define FLOW_OFFLOAD_PATH_DSLITE	BIT(4)
++#define FLOW_OFFLOAD_PATH_6RD		BIT(5)
+ 
+ struct flow_offload_hw_path {
+ 	struct net_device *dev;
++	struct net_device *virt_dev;
+ 	u32 flags;
+ 
+ 	u8 eth_src[ETH_ALEN];
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index be6801524..c51af70f6 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -761,6 +761,7 @@ static int vlan_dev_flow_offload_check(struct flow_offload_hw_path *path)
+ 	path->flags |= FLOW_OFFLOAD_PATH_VLAN;
+ 	path->vlan_proto = vlan->vlan_proto;
+ 	path->vlan_id = vlan->vlan_id;
++	path->virt_dev = dev;
+ 	path->dev = vlan->real_dev;
+ 
+ 	if (vlan->real_dev->netdev_ops->ndo_flow_offload_check)
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 1b7e3141c..da4e34f74 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -57,6 +57,11 @@
+ #include <net/netns/generic.h>
+ #include <net/dst_metadata.h>
+ 
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_flow_table.h>
++#endif
++
+ MODULE_AUTHOR("Ville Nuorvala");
+ MODULE_DESCRIPTION("IPv6 tunneling device");
+ MODULE_LICENSE("GPL");
+@@ -1880,6 +1885,22 @@ int ip6_tnl_get_iflink(const struct net_device *dev)
+ }
+ EXPORT_SYMBOL(ip6_tnl_get_iflink);
+ 
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++static int ipip6_dev_flow_offload_check(struct flow_offload_hw_path *path)
++{
++	struct net_device *dev = path->dev;
++	struct ip6_tnl *tnl = netdev_priv(dev);
++
++	if (path->flags & FLOW_OFFLOAD_PATH_DSLITE)
++		return -EEXIST;
++
++	path->flags |= FLOW_OFFLOAD_PATH_DSLITE;
++	path->dev = tnl->dev;
++
++	return 0;
++}
++#endif /* CONFIG_NF_FLOW_TABLE */
++
+ int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
+ 			  unsigned int num)
+ {
+@@ -1941,6 +1962,9 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
+ 	.ndo_change_mtu = ip6_tnl_change_mtu,
+ 	.ndo_get_stats	= ip6_get_stats,
+ 	.ndo_get_iflink = ip6_tnl_get_iflink,
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++	.ndo_flow_offload_check = ipip6_dev_flow_offload_check,
++#endif
+ };
+ 
+ #define IPXIPX_FEATURES (NETIF_F_SG |		\
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 98954830c..42b6e8c4c 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -52,6 +52,11 @@
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ 
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_flow_table.h>
++#endif
++
+ /*
+    This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
+ 
+@@ -1345,6 +1350,22 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ 	return err;
+ }
+ 
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++static int ipip6_dev_flow_offload_check(struct flow_offload_hw_path *path)
++{
++	struct net_device *dev = path->dev;
++	struct ip_tunnel *tnl = netdev_priv(dev);
++
++	if (path->flags & FLOW_OFFLOAD_PATH_6RD)
++		return -EEXIST;
++
++	path->flags |= FLOW_OFFLOAD_PATH_6RD;
++	path->dev = tnl->dev;
++
++	return 0;
++}
++#endif /* CONFIG_NF_FLOW_TABLE */
++
+ static const struct net_device_ops ipip6_netdev_ops = {
+ 	.ndo_init	= ipip6_tunnel_init,
+ 	.ndo_uninit	= ipip6_tunnel_uninit,
+@@ -1352,6 +1373,9 @@ static const struct net_device_ops ipip6_netdev_ops = {
+ 	.ndo_do_ioctl	= ipip6_tunnel_ioctl,
+ 	.ndo_get_stats64 = ip_tunnel_get_stats64,
+ 	.ndo_get_iflink = ip_tunnel_get_iflink,
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++	.ndo_flow_offload_check = ipip6_dev_flow_offload_check,
++#endif
+ };
+ 
+ static void ipip6_dev_free(struct net_device *dev)
diff --git a/target/linux/mediatek/patches-5.4/1015-pcie-add-pcie-gen3-upstream-driver.patch b/target/linux/mediatek/patches-5.4/1015-pcie-add-pcie-gen3-upstream-driver.patch
new file mode 100644
index 0000000..4b99d9d
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/1015-pcie-add-pcie-gen3-upstream-driver.patch
@@ -0,0 +1,36 @@
+diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
+index 70e0782..67988f8 100644
+--- a/drivers/pci/controller/Kconfig
++++ b/drivers/pci/controller/Kconfig
+@@ -241,6 +241,19 @@ config PCIE_MEDIATEK
+ 	  Say Y here if you want to enable PCIe controller support on
+ 	  MediaTek SoCs.
+ 
++config PCIE_MEDIATEK_GEN3
++	tristate "MediaTek Gen3 PCIe controller"
++	depends on ARCH_MEDIATEK || COMPILE_TEST
++	depends on PCI_MSI_IRQ_DOMAIN
++	help
++	  Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
++	  This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
++	  and support up to 256 MSI interrupt numbers for
++	  multi-function devices.
++
++	  Say Y here if you want to enable Gen3 PCIe controller support on
++	  MediaTek SoCs.
++
+ config PCIE_MOBIVEIL
+ 	bool "Mobiveil AXI PCIe controller"
+ 	depends on ARCH_ZYNQMP || COMPILE_TEST
+diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
+index a2a22c9..54a496a 100644
+--- a/drivers/pci/controller/Makefile
++++ b/drivers/pci/controller/Makefile
+@@ -27,6 +27,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+ obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
+ obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
+ obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
++obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
+ obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
+ obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
+ obj-$(CONFIG_VMD) += vmd.o
diff --git a/target/linux/mediatek/patches-5.4/1023-kgdb-add-interrupt-control.patch b/target/linux/mediatek/patches-5.4/1023-kgdb-add-interrupt-control.patch
new file mode 100644
index 0000000..e0ee954
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/1023-kgdb-add-interrupt-control.patch
@@ -0,0 +1,42 @@
+diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
+index 1a157ca..258fe4b 100644
+--- a/arch/arm64/kernel/kgdb.c
++++ b/arch/arm64/kernel/kgdb.c
+@@ -18,6 +18,10 @@
+ #include <asm/debug-monitors.h>
+ #include <asm/insn.h>
+ #include <asm/traps.h>
++#include <asm/ptrace.h>
++
++
++static DEFINE_PER_CPU(unsigned int, kgdb_pstate);
+ 
+ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ 	{ "x0", 8, offsetof(struct pt_regs, regs[0])},
+@@ -206,6 +210,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
+ 		err = 0;
+ 		break;
+ 	case 's':
++		__this_cpu_write(kgdb_pstate, linux_regs->pstate);
++		linux_regs->pstate |= PSR_I_BIT;
+ 		/*
+ 		 * Update step address value with address passed
+ 		 * with step packet.
+@@ -249,9 +255,17 @@ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
+ 
+ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
+ {
++	unsigned int pstate;
++
+ 	if (!kgdb_single_step)
+ 		return DBG_HOOK_ERROR;
++	kernel_disable_single_step();
+ 
++	pstate = __this_cpu_read(kgdb_pstate);
++	if (pstate & PSR_I_BIT)
++		regs->pstate |= PSR_I_BIT;
++	else
++		regs->pstate &= ~PSR_I_BIT;
+ 	kgdb_handle_exception(0, SIGTRAP, 0, regs);
+ 	return DBG_HOOK_HANDLED;
+ }
diff --git a/target/linux/mediatek/patches-5.4/1024-pcie-add-multi-MSI-support.patch b/target/linux/mediatek/patches-5.4/1024-pcie-add-multi-MSI-support.patch
new file mode 100644
index 0000000..5cf486c
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/1024-pcie-add-multi-MSI-support.patch
@@ -0,0 +1,64 @@
+diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
+index 2a54fa7a3..132b3204c 100644
+--- a/drivers/pci/controller/pcie-mediatek.c
++++ b/drivers/pci/controller/pcie-mediatek.c
+@@ -446,24 +446,24 @@ static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int vir
+ 				     unsigned int nr_irqs, void *args)
+ {
+ 	struct mtk_pcie_port *port = domain->host_data;
+-	unsigned long bit;
++	int bit, i;
+ 
+-	WARN_ON(nr_irqs != 1);
+ 	mutex_lock(&port->lock);
+ 
+-	bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+-	if (bit >= MTK_MSI_IRQS_NUM) {
++	bit = bitmap_find_free_region(port->msi_irq_in_use, MTK_MSI_IRQS_NUM,
++							order_base_2(nr_irqs));
++	if (bit < 0) {
+ 		mutex_unlock(&port->lock);
+ 		return -ENOSPC;
+ 	}
+ 
+-	__set_bit(bit, port->msi_irq_in_use);
+-
+ 	mutex_unlock(&port->lock);
+ 
+-	irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
+-			    domain->host_data, handle_edge_irq,
+-			    NULL, NULL);
++	for (i = 0; i < nr_irqs; i++) {
++		irq_domain_set_info(domain, virq + i, bit + i,
++				    &mtk_msi_bottom_irq_chip, domain->host_data,
++				    handle_edge_irq, NULL, NULL);
++	}
+ 
+ 	return 0;
+ }
+@@ -501,7 +501,7 @@ static struct irq_chip mtk_msi_irq_chip = {
+ 
+ static struct msi_domain_info mtk_msi_domain_info = {
+ 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+-		   MSI_FLAG_PCI_MSIX),
++		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ 	.chip	= &mtk_msi_irq_chip,
+ };
+ 
+@@ -633,14 +633,14 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
+ 		if (status & MSI_STATUS){
+ 			unsigned long imsi_status;
+ 
++			/* Clear MSI interrupt status */
++			writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ 			while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
+ 				for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
+ 					virq = irq_find_mapping(port->inner_domain, bit);
+ 					generic_handle_irq(virq);
+ 				}
+ 			}
+-			/* Clear MSI interrupt status */
+-			writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ 		}
+ 	}
+ 
diff --git a/target/linux/mediatek/patches-5.4/400-mtd-add-mtk-snand-driver.patch b/target/linux/mediatek/patches-5.4/400-mtd-add-mtk-snand-driver.patch
new file mode 100644
index 0000000..185c55d
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/400-mtd-add-mtk-snand-driver.patch
@@ -0,0 +1,21 @@
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -228,6 +228,8 @@ source "drivers/mtd/ubi/Kconfig"
+ 
+ source "drivers/mtd/hyperbus/Kconfig"
+ 
++source "drivers/mtd/mtk-snand/Kconfig"
++
+ source "drivers/mtd/composite/Kconfig"
+ 
+ endif # MTD
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -33,5 +33,7 @@ obj-$(CONFIG_MTD_SPI_NOR)	+= spi-nor/
+ obj-$(CONFIG_MTD_UBI)		+= ubi/
+ obj-$(CONFIG_MTD_HYPERBUS)	+= hyperbus/
+ 
++obj-$(CONFIG_MTK_SPI_NAND)	+= mtk-snand/
++
+ # Composite drivers must be loaded last
+ obj-y				+= composite/
diff --git a/target/linux/mediatek/patches-5.4/401-pinctrl-add-mt7986-driver.patch b/target/linux/mediatek/patches-5.4/401-pinctrl-add-mt7986-driver.patch
new file mode 100644
index 0000000..a02873d
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/401-pinctrl-add-mt7986-driver.patch
@@ -0,0 +1,30 @@
+diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
+index 701f9af..9109f91 100644
+--- a/drivers/pinctrl/mediatek/Kconfig
++++ b/drivers/pinctrl/mediatek/Kconfig
+@@ -100,6 +100,13 @@ config PINCTRL_MT7622
+ 	default ARM64 && ARCH_MEDIATEK
+ 	select PINCTRL_MTK_MOORE
+ 
++config PINCTRL_MT7986
++	bool "Mediatek MT7986 pin control"
++	depends on OF
++	depends on ARM64 || COMPILE_TEST
++	default ARM64 && ARCH_MEDIATEK
++	select PINCTRL_MTK_MOORE
++
+ config PINCTRL_MT8173
+ 	bool "Mediatek MT8173 pin control"
+ 	depends on OF
+diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
+index a74325a..d408585 100644
+--- a/drivers/pinctrl/mediatek/Makefile
++++ b/drivers/pinctrl/mediatek/Makefile
+@@ -15,6 +15,7 @@ obj-$(CONFIG_PINCTRL_MT6797)	+= pinctrl-mt6797.o
+ obj-$(CONFIG_PINCTRL_MT7622)	+= pinctrl-mt7622.o
+ obj-$(CONFIG_PINCTRL_MT7623)	+= pinctrl-mt7623.o
+ obj-$(CONFIG_PINCTRL_MT7629)	+= pinctrl-mt7629.o
++obj-$(CONFIG_PINCTRL_MT7986)	+= pinctrl-mt7986.o
+ obj-$(CONFIG_PINCTRL_MT8173)	+= pinctrl-mt8173.o
+ obj-$(CONFIG_PINCTRL_MT8183)	+= pinctrl-mt8183.o
+ obj-$(CONFIG_PINCTRL_MT8516)	+= pinctrl-mt8516.o
diff --git a/target/linux/mediatek/patches-5.4/730-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch b/target/linux/mediatek/patches-5.4/730-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch
new file mode 100644
index 0000000..6b10584
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/730-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch
@@ -0,0 +1,44 @@
+--- linux-5.4.77.orig/net/dsa/tag_mtk.c
++++ linux-5.4.77/net/dsa/tag_mtk.c
+@@ -73,22 +73,28 @@ static struct sk_buff *mtk_tag_rcv(struc
+ 	bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+ 				!is_broadcast_ether_addr(dest);
+ 
+-	if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
+-		return NULL;
++	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) {
++		hdr = ntohs(skb->vlan_proto);
++		skb->vlan_proto = 0;
++		skb->vlan_tci = 0;
++	} else {
++		if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
++			return NULL;
+ 
+-	/* The MTK header is added by the switch between src addr
+-	 * and ethertype at this point, skb->data points to 2 bytes
+-	 * after src addr so header should be 2 bytes right before.
+-	 */
+-	phdr = (__be16 *)(skb->data - 2);
+-	hdr = ntohs(*phdr);
++		/* The MTK header is added by the switch between src addr
++		 * and ethertype at this point, skb->data points to 2 bytes
++		 * after src addr so header should be 2 bytes right before.
++		 */
++		phdr = (__be16 *)(skb->data - 2);
++		hdr = ntohs(*phdr);
+ 
+-	/* Remove MTK tag and recalculate checksum. */
+-	skb_pull_rcsum(skb, MTK_HDR_LEN);
++		/* Remove MTK tag and recalculate checksum. */
++		skb_pull_rcsum(skb, MTK_HDR_LEN);
+ 
+-	memmove(skb->data - ETH_HLEN,
+-		skb->data - ETH_HLEN - MTK_HDR_LEN,
+-		2 * ETH_ALEN);
++		memmove(skb->data - ETH_HLEN,
++			skb->data - ETH_HLEN - MTK_HDR_LEN,
++			2 * ETH_ALEN);
++	}
+ 
+ 	/* Get source port information */
+ 	port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);
diff --git a/target/linux/mediatek/patches-5.4/738-mt7531-gsw-internal_phy_calibration.patch b/target/linux/mediatek/patches-5.4/738-mt7531-gsw-internal_phy_calibration.patch
new file mode 100755
index 0000000..361eca6
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/738-mt7531-gsw-internal_phy_calibration.patch
@@ -0,0 +1,1282 @@
+Index: drivers/net/phy/mtk/mt753x/Makefile
+===================================================================
+--- a/drivers/net/phy/mtk/mt753x/Makefile
++++ b/drivers/net/phy/mtk/mt753x/Makefile
+@@ -7,5 +7,5 @@ obj-$(CONFIG_MT753X_GSW)	+= mt753x.o
+ mt753x-$(CONFIG_SWCONFIG)	+= mt753x_swconfig.o
+ 
+ mt753x-y			+= mt753x_mdio.o mt7530.o mt7531.o \
+-					mt753x_common.o mt753x_vlan.o mt753x_nl.o
++					mt753x_common.o mt753x_vlan.o mt753x_nl.o mt753x_phy.o
+ 
+Index: drivers/net/phy/mtk/mt753x/mt7531.c
+===================================================================
+--- a/drivers/net/phy/mtk/mt753x/mt7531.c
++++ b/drivers/net/phy/mtk/mt753x/mt7531.c
+@@ -658,6 +658,27 @@ static void mt7531_core_pll_setup(struct
+ 
+ static int mt7531_internal_phy_calibration(struct gsw_mt753x *gsw)
+ {
++	u32 i, val;
++	int ret;
++
++	dev_info(gsw->dev,">>>>>>>>>>>>>>>>>>>>>>>>>>>>> START CALIBRATION:\n");
++
++	/* gphy value from sw path */
++	val = gsw->mmd_read(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403);
++	val |= GBE_EFUSE_SETTING;
++	gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
++
++	for (i = 0; i < 5; i++) {
++		dev_info(gsw->dev, "-------- gephy-calbration (port:%d) --------\n",
++			 i);
++		ret = mt753x_phy_calibration(gsw, i);
++
++		/* set Auto-negotiation with giga extension. */
++		gsw->mii_write(gsw, i, 0, 0x1340);
++		if (ret)
++			return ret;
++	}
++
+ 	return 0;
+ }
+ 
+Index: drivers/net/phy/mtk/mt753x/mt753x.h
+===================================================================
+--- a/drivers/net/phy/mtk/mt753x/mt753x.h
++++ b/drivers/net/phy/mtk/mt753x/mt753x.h
+@@ -140,6 +140,8 @@ void mt753x_irq_enable(struct gsw_mt753x
+ int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr);
+ int extphy_init(struct gsw_mt753x *gsw, int addr);
+ 
++int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr);
++
+ /* MDIO Indirect Access Registers */
+ #define MII_MMD_ACC_CTL_REG		0x0d
+ #define MMD_CMD_S			14
+Index: drivers/net/phy/mtk/mt753x/mt753x_phy.c
+===================================================================
+new file mode 100644
+--- /dev/null
++++ b/drivers/net/phy/mtk/mt753x/mt753x_phy.c
+@@ -0,0 +1,1069 @@
++// SPDX-License-Identifier:	GPL-2.0+
++/*
++ * Common part for MediaTek MT753x gigabit switch
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Weijie Gao <weijie.gao@mediatek.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/delay.h>
++
++#include "mt753x.h"
++#include "mt753x_regs.h"
++#include "mt753x_phy.h"
++
++u32 tc_phy_read_dev_reg(struct gsw_mt753x *gsw, u32 port_num, u32 dev_addr, u32 reg_addr)
++{
++	u32 phy_val;
++    phy_val = gsw->mmd_read(gsw, port_num, dev_addr, reg_addr);
++    
++    //printk("switch phy cl45 r %d 0x%x 0x%x = %x\n",port_num, dev_addr, reg_addr, phy_val);
++	//switch_phy_read_cl45(port_num, dev_addr, reg_addr, &phy_val);
++	return phy_val;
++}
++
++void tc_phy_write_dev_reg(struct gsw_mt753x *gsw, u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
++{
++	u32 phy_val;
++    gsw->mmd_write(gsw, port_num, dev_addr, reg_addr, write_data);
++    phy_val = gsw->mmd_read(gsw, port_num, dev_addr, reg_addr);
++    //printk("switch phy cl45 w %d 0x%x 0x%x 0x%x --> read back 0x%x\n",port_num, dev_addr, reg_addr, write_data, phy_val);
++	//switch_phy_write_cl45(port_num, dev_addr, reg_addr, write_data);
++}
++
++void switch_phy_write(struct gsw_mt753x *gsw, u32 port_num, u32 reg_addr, u32 write_data){
++	gsw->mii_write(gsw, port_num, reg_addr, write_data);
++}
++
++u32 switch_phy_read(struct gsw_mt753x *gsw, u32 port_num, u32 reg_addr){
++	return gsw->mii_read(gsw, port_num, reg_addr);
++}
++
++const u8 MT753x_ZCAL_TO_R50ohm_GE_TBL_100[64] = {
++	127, 127, 127, 127, 127, 127, 127, 127,
++	127, 127, 127, 127, 127, 123, 122, 117,
++	115, 112, 103, 100, 98, 87, 85, 83,
++	81, 72, 70, 68, 66, 64, 55, 53,
++	52, 50, 49, 48, 38, 36, 35, 34,
++	33, 32, 22, 21, 20, 19, 18, 17,
++	16, 7, 6, 5, 4, 3, 2, 1,
++	0, 0, 0, 0, 0, 0, 0, 0
++};
++
++const u8 MT753x_TX_OFFSET_TBL[64] = {
++	0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
++	0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
++	0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, 0x8,
++	0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0,
++	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
++};
++
++u8 ge_cal_flag;
++
++u8 all_ge_ana_cal_wait(struct gsw_mt753x *gsw, u32 delay, u32 phyaddr) // for EN7512 
++{
++	u8 all_ana_cal_status;	
++	u32 cnt, tmp_1e_17c;
++	//tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017c, 0x0001);	// da_calin_flag pull high
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++	//printk("delay = %d\n", delay);
++	
++	cnt = 10000;
++	do {
++		udelay(delay);
++		cnt--;
++		all_ana_cal_status = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17b) & 0x1;
++
++	} while ((all_ana_cal_status == 0) && (cnt != 0));
++
++
++	if(all_ana_cal_status == 1) {
++		tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0);
++		return all_ana_cal_status;
++	} else {
++		tmp_1e_17c = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17c);
++		if ((tmp_1e_17c & 0x1) != 1) {
++			pr_info("FIRST MDC/MDIO write error\n");
++			pr_info("FIRST 1e_17c = %x\n", tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17c));
++
++		}
++		printk("re-K again\n");
++        
++		tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0);
++		tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++		cnt = 10000;
++		do {
++			udelay(delay);
++			cnt--;
++			tmp_1e_17c = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17c);
++			if ((tmp_1e_17c & 0x1) != 1) {
++				pr_info("SECOND MDC/MDIO write error\n");
++				pr_info("SECOND 1e_17c = %x\n", tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17c));
++				tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++				tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++				tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++			}
++		} while ((cnt != 0) && (tmp_1e_17c == 0));
++
++		cnt = 10000;
++		do {
++			udelay(delay);
++			cnt--;
++			all_ana_cal_status = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17b) & 0x1;
++	
++		} while ((all_ana_cal_status == 0) && (cnt != 0));
++	
++		tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0);
++	}
++
++    if(all_ana_cal_status == 0){
++        pr_info("!!!!!!!!!!!! dev1Eh_reg17b ERROR\n");
++    }
++	
++	return all_ana_cal_status;
++}
++
++
++
++
++int ge_cal_rext(struct gsw_mt753x *gsw, u8 phyaddr, u32 delay)
++{
++	u8 rg_zcal_ctrl, all_ana_cal_status;
++	u16 ad_cal_comp_out_init;
++	u16 dev1e_e0_ana_cal_r5;
++	int calibration_polarity;
++	u8 cnt = 0;
++	u16 dev1e_17a_tmp, dev1e_e0_tmp;
++
++	/* *** Iext/Rext Cal start ************ */
++	all_ana_cal_status = ANACAL_INIT;
++	/* analog calibration enable, Rext calibration enable */
++	/* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
++	/* 1e_dc[0]:rg_txvos_calen */
++	/* 1e_e1[4]:rg_cal_refsel(0:1.2V) */
++	//tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x1110)
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x1110);
++	//tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0000);
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0);
++	//tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e1, 0x0000);
++	//tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e1, 0x10);
++	
++	rg_zcal_ctrl = 0x20;/* start with 0 dB */
++	dev1e_e0_ana_cal_r5 = tc_phy_read_dev_reg(gsw,  PHY0, 0x1e, 0xe0); // get default value
++	/* 1e_e0[5:0]:rg_zcal_ctrl */
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0xe0, rg_zcal_ctrl);
++	all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr);/* delay 20 usec */
++
++	if (all_ana_cal_status == 0) {
++		all_ana_cal_status = ANACAL_ERROR;
++		printk(" GE Rext AnaCal ERROR init!   \r\n");
++		return -1;
++	}
++	/* 1e_17a[8]:ad_cal_comp_out */
++	ad_cal_comp_out_init = (tc_phy_read_dev_reg(gsw,  PHY0, 0x1e, 0x017a) >> 8) & 0x1;
++	if (ad_cal_comp_out_init == 1)
++		calibration_polarity = -1;
++	else /* ad_cal_comp_out_init == 0 */
++		calibration_polarity = 1;
++	cnt = 0;
++	while (all_ana_cal_status < ANACAL_ERROR) {
++		cnt++;
++		rg_zcal_ctrl += calibration_polarity;
++		tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0xe0, (rg_zcal_ctrl));
++		all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); /* delay 20 usec */
++		dev1e_17a_tmp = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a);
++		if (all_ana_cal_status == 0) {
++			all_ana_cal_status = ANACAL_ERROR;
++			printk("  GE Rext AnaCal ERROR 2!   \r\n");
++			return -1;
++		} else if (((dev1e_17a_tmp >> 8) & 0x1) != ad_cal_comp_out_init) {
++			all_ana_cal_status = ANACAL_FINISH;
++			//printk("  GE Rext AnaCal Done! (%d)(0x%x)  \r\n", cnt, rg_zcal_ctrl);
++		} else {
++			dev1e_17a_tmp = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a);
++			dev1e_e0_tmp =	tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0xe0);
++			if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
++				all_ana_cal_status = ANACAL_SATURATION;  /* need to FT(IC fail?) */
++				printk(" GE Rext AnaCal Saturation!  \r\n");
++				rg_zcal_ctrl = 0x20;  /* 0 dB */
++			} 
++		}
++	}
++
++	if (all_ana_cal_status == ANACAL_ERROR) {
++		rg_zcal_ctrl = 0x20;  /* 0 dB */
++		tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++	} else if(all_ana_cal_status == ANACAL_FINISH){
++		//tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++		tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, ((rg_zcal_ctrl << 8) | rg_zcal_ctrl));
++		printk("0x1e-e0 = %x\n", tc_phy_read_dev_reg(gsw,  PHY0, 0x1e, 0x00e0));
++		/* ****  1f_115[2:0] = rg_zcal_ctrl[5:3]  // Mog review */
++		tc_phy_write_dev_reg(gsw, PHY0, 0x1f, 0x0115, ((rg_zcal_ctrl & 0x3f) >> 3));
++		printk("0x1f-115 = %x\n", tc_phy_read_dev_reg(gsw,  PHY0, 0x1f, 0x115));
++		printk("  GE Rext AnaCal Done! (%d)(0x%x)  \r\n", cnt, rg_zcal_ctrl);
++		ge_cal_flag = 1;
++	} else {
++		printk("GE Rxet cal something wrong2\n");
++	}
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x0000);
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000);
++
++	return 0;
++}
++
++//-----------------------------------------------------------------
++int ge_cal_r50(struct gsw_mt753x *gsw, u8 phyaddr, u32 delay)
++{
++	u8 rg_zcal_ctrl, all_ana_cal_status, calibration_pair;
++	u16 ad_cal_comp_out_init;
++	u16 dev1e_e0_ana_cal_r5;
++	int calibration_polarity;
++	u8 cnt = 0;
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x1100);	// 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000);	// 1e_dc[0]:rg_txvos_calen
++
++	for(calibration_pair = ANACAL_PAIR_A; calibration_pair <= ANACAL_PAIR_D; calibration_pair ++) {
++		rg_zcal_ctrl = 0x20;  						// start with 0 dB
++		dev1e_e0_ana_cal_r5 = (tc_phy_read_dev_reg(gsw,  PHY0, 0x1e, 0x00e0) & (~0x003f));
++		tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));	// 1e_e0[5:0]:rg_zcal_ctrl
++		if(calibration_pair == ANACAL_PAIR_A)
++		{
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x1101);	// 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000);	
++			//printk("R50 pair A 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x00db), tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x00dc));
++
++		}
++		else if(calibration_pair == ANACAL_PAIR_B)
++		{
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x1100);	// 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x1000);	// 1e_dc[12]:rg_zcalen_b
++			//printk("R50 pair B 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x00db),tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x00dc));
++
++		}
++		else if(calibration_pair == ANACAL_PAIR_C)
++		{
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x1100);	// 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0100);	// 1e_dc[8]:rg_zcalen_c
++			//printk("R50 pair C 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x00db), tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x00dc));
++
++		}
++		else // if(calibration_pair == ANACAL_PAIR_D)
++		{
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x1100);	// 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0010);	// 1e_dc[4]:rg_zcalen_d
++			//printk("R50 pair D 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x00db), tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x00dc));
++
++		}
++
++		all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++		if(all_ana_cal_status == 0)
++		{
++			all_ana_cal_status = ANACAL_ERROR;	
++			printk( "GE R50 AnaCal ERROR init!   \r\n");
++			return -1;
++		}
++	
++		ad_cal_comp_out_init = (tc_phy_read_dev_reg(gsw,  PHY0, 0x1e, 0x017a)>>8) & 0x1;		// 1e_17a[8]:ad_cal_comp_out	
++		if(ad_cal_comp_out_init == 1)
++			calibration_polarity = -1;
++		else
++			calibration_polarity = 1;
++
++		cnt = 0;
++		while(all_ana_cal_status < ANACAL_ERROR)
++		{
++			cnt ++;
++			rg_zcal_ctrl += calibration_polarity;
++			tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++			all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++
++			if(all_ana_cal_status == 0)
++			{
++				all_ana_cal_status = ANACAL_ERROR;	
++				printk( "  GE R50 AnaCal ERROR 2!   \r\n");
++				return -1;
++			}
++			else if(((tc_phy_read_dev_reg(gsw,  PHY0, 0x1e, 0x017a)>>8)&0x1) != ad_cal_comp_out_init) 
++			{
++				all_ana_cal_status = ANACAL_FINISH;	
++			}
++			else {
++				if((rg_zcal_ctrl == 0x3F)||(rg_zcal_ctrl == 0x00))	
++				{
++					all_ana_cal_status = ANACAL_SATURATION;  // need to FT
++					printk( " GE R50 AnaCal Saturation!  \r\n");
++				}
++			}
++		}
++		
++		if(all_ana_cal_status == ANACAL_ERROR) {	
++			rg_zcal_ctrl = 0x20;  // 0 dB
++			//tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++		}
++		else {
++			rg_zcal_ctrl = MT753x_ZCAL_TO_R50ohm_GE_TBL_100[rg_zcal_ctrl - 9];	// wait Mog zcal/r50 mapping table
++			printk( " GE R50 AnaCal Done! (%d) (0x%x)(0x%x) \r\n", cnt, rg_zcal_ctrl, (rg_zcal_ctrl|0x80));
++		}
++		
++		if(calibration_pair == ANACAL_PAIR_A) {
++			ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174) & (~0x7f00);
++			//ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174);
++			//printk( " GE-a 1e_174(0x%x)(0x%x), 1e_175(0x%x)  \r\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174), ad_cal_comp_out_init, tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175));
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0174, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<8)&0xff00) | 0x8000)));	// 1e_174[15:8]
++			//printk( " GE-a 1e_174(0x%x), 1e_175(0x%x)  \r\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175));
++		}
++		else if(calibration_pair == ANACAL_PAIR_B) {
++			ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174) & (~0x007f);
++			//ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174);
++			//printk( " GE-b 1e_174(0x%x)(0x%x), 1e_175(0x%x)  \r\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174), ad_cal_comp_out_init, tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175));
++			
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0174, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<0)&0x00ff) | 0x0080)));	// 1e_174[7:0]
++			//printk( " GE-b 1e_174(0x%x), 1e_175(0x%x)  \r\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175));
++		}
++		else if(calibration_pair == ANACAL_PAIR_C) {
++			ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175) & (~0x7f00);
++			//ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175);
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0175, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<8)&0xff00) | 0x8000)));	// 1e_175[15:8]
++			//printk( " GE-c 1e_174(0x%x), 1e_175(0x%x)  \r\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175));
++		} else {// if(calibration_pair == ANACAL_PAIR_D) 
++			ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175) & (~0x007f);
++			//ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175);
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0175, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<0)&0x00ff) | 0x0080)));	// 1e_175[7:0]
++			//printk( " GE-d 1e_174(0x%x), 1e_175(0x%x)  \r\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175));
++		}
++		//tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00e0, ((rg_zcal_ctrl<<8)|rg_zcal_ctrl));
++	}
++	
++	printk( " GE 1e_174(0x%x), 1e_175(0x%x)  \r\n", tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x0175));
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x0000);
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000);
++
++	return 0;
++}
++
++int ge_cal_tx_offset(struct gsw_mt753x *gsw,  u8 phyaddr, u32 delay)
++{
++	u8 all_ana_cal_status, calibration_pair;
++	u16 ad_cal_comp_out_init;
++	int calibration_polarity, tx_offset_temp;
++	u8 tx_offset_reg_shift, tabl_idx, i;
++	u8 cnt = 0;
++	u16 tx_offset_reg, reg_temp, cal_temp;
++	//switch_phy_write(phyaddr, R0, 0x2100);//harry tmp
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0100);	// 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0001);	// 1e_dc[0]:rg_txvos_calen
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0096, 0x8000);	// 1e_96[15]:bypass_tx_offset_cal, Hw bypass, Fw cal
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x003e, 0xf808);	// 1e_3e
++	for(i = 0; i <= 4; i++)
++		tc_phy_write_dev_reg(gsw, i, 0x1e, 0x00dd, 0x0000);	
++	for(calibration_pair = ANACAL_PAIR_A; calibration_pair <= ANACAL_PAIR_D; calibration_pair ++)
++	{
++		tabl_idx = 31;
++		tx_offset_temp = MT753x_TX_OFFSET_TBL[tabl_idx];
++
++		if(calibration_pair == ANACAL_PAIR_A) {
++			//tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5010);
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x1000);				// 1e_dd[12]:rg_txg_calen_a
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017d, (0x8000|DAC_IN_0V));	// 1e_17d:dac_in0_a
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0181, (0x8000|DAC_IN_0V));	// 1e_181:dac_in1_a
++			//printk("tx offset pairA 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++			reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0172) & (~0x3f00));
++			tx_offset_reg_shift = 8;									// 1e_172[13:8]
++			tx_offset_reg = 0x0172;
++
++			//tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++		} else if(calibration_pair == ANACAL_PAIR_B) {
++			//tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5018);
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0100);				// 1e_dd[8]:rg_txg_calen_b
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017e, (0x8000|DAC_IN_0V));	// 1e_17e:dac_in0_b
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0182, (0x8000|DAC_IN_0V));	// 1e_182:dac_in1_b
++			//printk("tx offset pairB 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++			reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0172) & (~0x003f));
++			tx_offset_reg_shift = 0;									// 1e_172[5:0]
++			tx_offset_reg = 0x0172;
++			//tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++		} else if(calibration_pair == ANACAL_PAIR_C) {
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0010);				// 1e_dd[4]:rg_txg_calen_c
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017f, (0x8000|DAC_IN_0V));	// 1e_17f:dac_in0_c
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0183, (0x8000|DAC_IN_0V));	// 1e_183:dac_in1_c
++			reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0173) & (~0x3f00));
++			//printk("tx offset pairC 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++			tx_offset_reg_shift = 8;									// 1e_173[13:8]
++			tx_offset_reg = 0x0173;
++			//tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++		} else {// if(calibration_pair == ANACAL_PAIR_D)
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0001);				// 1e_dd[0]:rg_txg_calen_d
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0180, (0x8000|DAC_IN_0V));	// 1e_180:dac_in0_d
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0184, (0x8000|DAC_IN_0V));	// 1e_184:dac_in1_d
++			//printk("tx offset pairD 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++			reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0173) & (~0x003f));
++			tx_offset_reg_shift = 0;									// 1e_173[5:0]
++			tx_offset_reg = 0x0173;
++			//tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++		}
++		tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));	// 1e_172, 1e_173
++		all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++		if(all_ana_cal_status == 0) {
++			all_ana_cal_status = ANACAL_ERROR;	
++			printk( " GE Tx offset AnaCal ERROR init!   \r\n");
++			return -1;
++		}
++	
++		ad_cal_comp_out_init = (tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a)>>8) & 0x1;		// 1e_17a[8]:ad_cal_comp_out	
++		if(ad_cal_comp_out_init == 1)
++			calibration_polarity = 1;
++		else
++			calibration_polarity = -1;
++
++		cnt = 0;
++		//printk("TX offset cnt = %d, tabl_idx= %x, offset_val = %x\n", cnt, tabl_idx, MT753x_TX_OFFSET_TBL[tabl_idx]);
++		while(all_ana_cal_status < ANACAL_ERROR) {
++			
++			cnt ++;
++			tabl_idx += calibration_polarity;
++			//tx_offset_temp += calibration_polarity;
++			//cal_temp = tx_offset_temp;
++			cal_temp = MT753x_TX_OFFSET_TBL[tabl_idx];
++			//printk("TX offset cnt = %d, tabl_idx= %x, offset_val = %x\n", cnt, tabl_idx, MT753x_TX_OFFSET_TBL[tabl_idx]);
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_offset_reg, (reg_temp|(cal_temp<<tx_offset_reg_shift)));
++
++			all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++			if(all_ana_cal_status == 0) {
++				all_ana_cal_status = ANACAL_ERROR;	
++				printk( " GE Tx offset AnaCal ERROR init 2!   \r\n");
++				return -1;
++			} else if(((tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a)>>8)&0x1) != ad_cal_comp_out_init) {
++				all_ana_cal_status = ANACAL_FINISH;	
++			} else {
++				if((tabl_idx == 0)||(tabl_idx == 0x3f)) {
++					all_ana_cal_status = ANACAL_SATURATION;  // need to FT
++					printk( " GE Tx offset AnaCal Saturation!  \r\n");
++				}
++			}
++		}
++		
++		if(all_ana_cal_status == ANACAL_ERROR) {	
++			tx_offset_temp = TX_AMP_OFFSET_0MV;
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++		} else {
++			printk( " GE Tx offset AnaCal Done! (pair-%d)(%d)(0x%x) 0x1e_%x=0x%x\n", calibration_pair, cnt, MT753x_TX_OFFSET_TBL[tabl_idx], tx_offset_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_offset_reg));
++		}
++	}
++
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017d, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017e, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017f, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0180, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0181, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0182, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0183, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0184, 0x0000);
++	
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0000);	// disable analog calibration circuit
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000);	// disable Tx offset calibration circuit
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x0000);	// disable analog calibration circuit
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000);	// disable Tx offset calibration circuit
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x003e, 0x0000);	// disable Tx VLD force mode
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0000);	// disable Tx offset/amplitude calibration circuit	
++
++	return 0;
++}
++
++int ge_cal_tx_amp(struct gsw_mt753x *gsw, u8 phyaddr, u32 delay)
++{
++	u8	all_ana_cal_status, calibration_pair, i;
++	u16	ad_cal_comp_out_init;
++	int	calibration_polarity;
++	u32	tx_amp_reg_shift; 
++	u16	reg_temp;
++	u32	tx_amp_temp, tx_amp_reg, cnt=0, tx_amp_reg_100;
++	u32	debug_tmp, reg_backup, reg_tmp; 
++	u32	orig_1e_11, orig_1f_300;
++
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x1100);	// 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0001);	// 1e_dc[0]:rg_txvos_calen
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e1, 0x0010);	// 1e_e1[4]:select 1V
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x003e, 0xf808);	// 1e_3e:enable Tx VLD
++
++	orig_1e_11 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x11);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x11, 0xff00);
++//	tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27a, 0x33);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0xc9, 0xffff);
++	orig_1f_300 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x300);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x300, 0x4);
++	for(i = 0; i <= 4; i++)
++		tc_phy_write_dev_reg(gsw, i, 0x1e, 0x00dd, 0x0000);
++	for(calibration_pair = ANACAL_PAIR_A; calibration_pair <= ANACAL_PAIR_D; calibration_pair ++) {
++		tx_amp_temp = 0x20;	// start with 0 dB
++
++		if(calibration_pair == ANACAL_PAIR_A) {
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x1000);				// 1e_dd[12]:tx_a amp calibration enable
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017d, (0x8000|DAC_IN_2V));	// 1e_17d:dac_in0_a	
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0181, (0x8000|DAC_IN_2V));	// 1e_181:dac_in1_a
++			reg_temp = (tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x012) & (~0xfc00));
++			tx_amp_reg_shift = 10;										// 1e_12[15:10]
++			tx_amp_reg = 0x12;
++			tx_amp_reg_100 = 0x16;
++		} else if(calibration_pair == ANACAL_PAIR_B) {
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0100);				// 1e_dd[8]:tx_b amp calibration enable
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017e, (0x8000|DAC_IN_2V));	// 1e_17e:dac_in0_b
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0182, (0x8000|DAC_IN_2V));	// 1e_182:dac_in1_b
++			reg_temp = (tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x017) & (~0x3f00));
++			tx_amp_reg_shift = 8;										// 1e_17[13:8]
++			tx_amp_reg = 0x17;
++			tx_amp_reg_100 = 0x18;
++		} else if(calibration_pair == ANACAL_PAIR_C) {
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0010);				// 1e_dd[4]:tx_c amp calibration enable
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017f, (0x8000|DAC_IN_2V));	// 1e_17f:dac_in0_c
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0183, (0x8000|DAC_IN_2V));	// 1e_183:dac_in1_c
++			reg_temp = (tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x019) & (~0x3f00));
++			tx_amp_reg_shift = 8;										// 1e_19[13:8]
++			tx_amp_reg = 0x19;
++			tx_amp_reg_100 = 0x20;
++		} else { //if(calibration_pair == ANACAL_PAIR_D)
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0001);				// 1e_dd[0]:tx_d amp calibration enable
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0180, (0x8000|DAC_IN_2V));	// 1e_180:dac_in0_d
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0184, (0x8000|DAC_IN_2V));	// 1e_184:dac_in1_d
++			reg_temp = (tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x021) & (~0x3f00));
++			tx_amp_reg_shift = 8;										// 1e_21[13:8]
++			tx_amp_reg = 0x21;
++			tx_amp_reg_100 = 0x22;
++		}
++		tc_phy_write_dev_reg( gsw, phyaddr, 0x1e, tx_amp_reg, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));	// 1e_12, 1e_17, 1e_19, 1e_21
++		tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++		all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); 	// delay 20 usec
++		if(all_ana_cal_status == 0) {
++			all_ana_cal_status = ANACAL_ERROR;	
++			printk( " GE Tx amp AnaCal ERROR init init!   \r\n");
++			return -1;
++		}
++	
++		ad_cal_comp_out_init = (tc_phy_read_dev_reg(gsw,  PHY0, 0x1e, 0x017a)>>8) & 0x1;		// 1e_17a[8]:ad_cal_comp_out
++		if(ad_cal_comp_out_init == 1)
++			calibration_polarity = -1;
++		else
++			calibration_polarity = 1;
++
++		cnt =0;
++		while(all_ana_cal_status < ANACAL_ERROR) {
++			cnt ++;
++			tx_amp_temp += calibration_polarity;
++			//printk("tx_amp : %x, 1e %x = %x\n", tx_amp_temp, tx_amp_reg, (reg_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++			tc_phy_write_dev_reg( gsw, phyaddr, 0x1e, tx_amp_reg, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++			all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++			if(all_ana_cal_status == 0) {
++				all_ana_cal_status = ANACAL_ERROR;	
++				printk( " GE Tx amp AnaCal ERROR 2!   \r\n");
++				return -1;
++			} else if(((tc_phy_read_dev_reg(gsw,  PHY0, 0x1e, 0x017a)>>8)&0x1) != ad_cal_comp_out_init) {
++				//printk("TX AMP ANACAL_FINISH\n");
++				all_ana_cal_status = ANACAL_FINISH;
++				if (phyaddr == 0) {
++					if (calibration_pair == ANACAL_PAIR_A)
++						tx_amp_temp = tx_amp_temp - 2;
++					else if(calibration_pair == ANACAL_PAIR_B)
++						tx_amp_temp = tx_amp_temp - 1;
++					else if(calibration_pair == ANACAL_PAIR_C)
++						tx_amp_temp = tx_amp_temp - 2;
++					else if(calibration_pair == ANACAL_PAIR_D)
++						tx_amp_temp = tx_amp_temp - 1;
++				} else if (phyaddr == 1) {
++					if (calibration_pair == ANACAL_PAIR_A)
++						tx_amp_temp = tx_amp_temp - 1;
++					else if(calibration_pair == ANACAL_PAIR_B)
++						tx_amp_temp = tx_amp_temp ;
++					else if(calibration_pair == ANACAL_PAIR_C)
++						tx_amp_temp = tx_amp_temp - 1;
++					else if(calibration_pair == ANACAL_PAIR_D)
++						tx_amp_temp = tx_amp_temp - 1;
++				} else if (phyaddr == 2) {
++					if (calibration_pair == ANACAL_PAIR_A)
++						tx_amp_temp = tx_amp_temp;
++					else if(calibration_pair == ANACAL_PAIR_B)
++						tx_amp_temp = tx_amp_temp - 1;
++					else if(calibration_pair == ANACAL_PAIR_C)
++						tx_amp_temp = tx_amp_temp;
++					else if(calibration_pair == ANACAL_PAIR_D)
++						tx_amp_temp = tx_amp_temp - 1;
++				} else if (phyaddr == 3) {
++					tx_amp_temp = tx_amp_temp;
++				} else if (phyaddr == 4) {
++					if (calibration_pair == ANACAL_PAIR_A)
++						tx_amp_temp = tx_amp_temp;
++					else if(calibration_pair == ANACAL_PAIR_B)
++						tx_amp_temp = tx_amp_temp - 1;
++					else if(calibration_pair == ANACAL_PAIR_C)
++						tx_amp_temp = tx_amp_temp;
++					else if(calibration_pair == ANACAL_PAIR_D)
++						tx_amp_temp = tx_amp_temp;
++				}								
++				reg_temp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, tx_amp_reg)&(~0xff00);
++				tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)));
++				tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, (tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)));
++				if (phyaddr == 0) {
++					if ((tx_amp_reg == 0x12) || (tx_amp_reg == 0x17)) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 7));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++					}
++					if (tx_amp_reg_100 == 0x16) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp+1+4)<<tx_amp_reg_shift)));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++					}
++					if (tx_amp_reg_100 == 0x18) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp+4)<<tx_amp_reg_shift)));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++					}
++				} else if (phyaddr == 1) {
++					if (tx_amp_reg == 0x12) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 9));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++					}
++					if (tx_amp_reg == 0x17){
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 7));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++					}
++					if (tx_amp_reg_100 == 0x16) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp+4)<<tx_amp_reg_shift)));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++					}
++					if (tx_amp_reg_100 == 0x18) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-1+4)<<tx_amp_reg_shift)));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++					}
++				} else if (phyaddr == 2) {
++					if ((tx_amp_reg == 0x12) || (tx_amp_reg == 0x17)) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 6));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++					}
++					if ((tx_amp_reg_100 == 0x16) || (tx_amp_reg_100 == 0x18)) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-1+4)<<tx_amp_reg_shift)));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++					}
++				} else if (phyaddr == 3) {
++					if (tx_amp_reg == 0x12) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 4));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++					}
++					if (tx_amp_reg == 0x17) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 7));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++					}
++					if (tx_amp_reg_100 == 0x16) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-2+4)<<tx_amp_reg_shift)));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++					}
++					if (tx_amp_reg_100 == 0x18) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-1+3)<<tx_amp_reg_shift)));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++					}
++				} else if (phyaddr == 4) {
++					if ((tx_amp_reg == 0x12) || (tx_amp_reg == 0x17)) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 5));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++					}
++					if (tx_amp_reg_100 == 0x16) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-2+4)<<tx_amp_reg_shift)));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++					}
++					if (tx_amp_reg_100 == 0x18) {
++						//printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++						tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-1+4)<<tx_amp_reg_shift)));
++						//printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++					}
++				}	
++
++				if (calibration_pair == ANACAL_PAIR_A){
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x12);
++					reg_tmp = ((reg_backup & 0xfc00) >> 10);
++					reg_tmp -= 8;
++                                       reg_backup = 0x0000;
++                                       reg_backup |= ((reg_tmp << 10) | (reg_tmp << 0));
++					tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x12, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x12);
++					//printk("PORT[%d] 1e.012 = %x (OFFSET_1000M_PAIR_A)\n", phyaddr, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x16);
++					reg_tmp = ((reg_backup & 0x3f) >> 0);
++					reg_tmp -= 8;
++					reg_backup = (reg_backup & (~0x3f));
++					reg_backup |= (reg_tmp << 0);
++					tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x16, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x16);
++					//printk("PORT[%d] 1e.016 = %x (OFFSET_TESTMODE_1000M_PAIR_A)\n", phyaddr, reg_backup);
++				}
++				else if(calibration_pair == ANACAL_PAIR_B){
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x17);
++					reg_tmp = ((reg_backup & 0x3f00) >> 8);
++					reg_tmp -= 8;
++                                       reg_backup = 0x0000;
++                                       reg_backup |= ((reg_tmp << 8) | (reg_tmp << 0));
++					tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x17, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x17);
++					//printk("PORT[%d] 1e.017 = %x (OFFSET_1000M_PAIR_B)\n", phyaddr, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x18);
++					reg_tmp = ((reg_backup & 0x3f) >> 0);
++					reg_tmp -= 8;
++					reg_backup = (reg_backup & (~0x3f));
++					reg_backup |= (reg_tmp << 0);
++					tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x18);
++					//printk("PORT[%d] 1e.018 = %x (OFFSET_TESTMODE_1000M_PAIR_B)\n", phyaddr, reg_backup);
++				}
++				else if(calibration_pair == ANACAL_PAIR_C){
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x19);
++					reg_tmp = ((reg_backup & 0x3f00) >> 8);
++					reg_tmp -= 8;
++					reg_backup = (reg_backup & (~0x3f00));
++					reg_backup |= (reg_tmp << 8);
++					tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x19, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x19);
++					//printk("PORT[%d] 1e.019 = %x (OFFSET_1000M_PAIR_C)\n", phyaddr, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x20);
++					reg_tmp = ((reg_backup & 0x3f) >> 0);
++					reg_tmp -= 8;
++					reg_backup = (reg_backup & (~0x3f));
++					reg_backup |= (reg_tmp << 0);
++					tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x20, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x20);
++					//printk("PORT[%d] 1e.020 = %x (OFFSET_TESTMODE_1000M_PAIR_C)\n", phyaddr, reg_backup);
++				}
++				else if(calibration_pair == ANACAL_PAIR_D){
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x21);
++					reg_tmp = ((reg_backup & 0x3f00) >> 8);
++					reg_tmp -= 8;
++					reg_backup = (reg_backup & (~0x3f00));
++					reg_backup |= (reg_tmp << 8);
++					tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x21, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x21);
++					//printk("PORT[%d] 1e.021 = %x (OFFSET_1000M_PAIR_D)\n", phyaddr, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x22);
++					reg_tmp = ((reg_backup & 0x3f) >> 0);
++					reg_tmp -= 8;
++					reg_backup = (reg_backup & (~0x3f));
++					reg_backup |= (reg_tmp << 0);
++					tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x22, reg_backup);
++					reg_backup = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x22);
++					//printk("PORT[%d] 1e.022 = %x (OFFSET_TESTMODE_1000M_PAIR_D)\n", phyaddr, reg_backup);
++				}
++
++				if (calibration_pair == ANACAL_PAIR_A){
++					//printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++					debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x12);
++					//printk("1e.012 = 0x%x\n", debug_tmp);
++					debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x16);
++					//printk("1e.016 = 0x%x\n", debug_tmp);
++				}
++	
++				else if(calibration_pair == ANACAL_PAIR_B){
++					//printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++					debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x17);
++					//printk("1e.017 = 0x%x\n", debug_tmp);
++					debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x18);
++					//printk("1e.018 = 0x%x\n", debug_tmp);
++				}
++				else if(calibration_pair == ANACAL_PAIR_C){
++					//printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++					debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x19);
++					//printk("1e.019 = 0x%x\n", debug_tmp);
++					debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x20);
++					//printk("1e.020 = 0x%x\n", debug_tmp);
++				}
++				else if(calibration_pair == ANACAL_PAIR_D){
++					//printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++					debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x21);
++					//printk("1e.021 = 0x%x\n", debug_tmp);
++					debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x22);
++					//printk("1e.022 = 0x%x\n", debug_tmp);
++				}
++
++
++				printk( " GE Tx amp AnaCal Done! (pair-%d)(1e_%x = 0x%x)\n", calibration_pair, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++				
++			} else {
++				if((tx_amp_temp == 0x3f)||(tx_amp_temp == 0x00)) {
++					all_ana_cal_status = ANACAL_SATURATION;  // need to FT
++					printk( " GE Tx amp AnaCal Saturation!  \r\n");
++				}
++			}
++		}
++
++		if(all_ana_cal_status == ANACAL_ERROR) {	
++			tx_amp_temp = 0x20;
++			tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, (reg_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++		}
++	}
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017d, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017e, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017f, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0180, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0181, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0182, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0183, 0x0000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0184, 0x0000);
++	
++	/* disable analog calibration circuit */
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0000);
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000);	// disable Tx offset calibration circuit
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x0000);	// disable analog calibration circuit
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000);	// disable Tx offset calibration circuit
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x003e, 0x0000);	// disable Tx VLD force mode
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0000);	// disable Tx offset/amplitude calibration circuit
++	
++	
++
++	//tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x273, 0x2000);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0xc9, 0x0fff);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x145, 0x1000);
++
++	/* Restore CR to default */
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x11, orig_1e_11);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x300, orig_1f_300);
++
++	return 0;
++}
++
++//-----------------------------------------------------------------
++
++int phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++	//u32	reg_tmp,reg_tmp0, reg_tmp1, i;
++	u32 reg_tmp;
++	u32 CALDLY = 40;
++	u32 orig_1e_11, orig_1e_185, orig_1e_e1, orig_1f_100;
++	int ret;
++	/* set [12]AN disable, [8]full duplex, [13/6]1000Mbps */
++	//tc_phy_write_dev_reg(phyaddr, 0x0,  0x0140);
++	switch_phy_write(gsw, phyaddr, R0, 0x140);
++
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x145, 0x1010);/* fix mdi */
++	orig_1e_185 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, RG_185);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, RG_185, 0);/* disable tx slew control */
++	orig_1f_100 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x100);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x100, 0xc000);/* BG voltage output */
++	//tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x403, 0x1099); //bypass efuse
++
++#if (1)
++	//	1f_27c[12:8] cr_da_tx_i2mpb_10m	Trimming TX bias setup(@10M)
++	//tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27c, 0x1f1f);
++	//tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27c, 0x3300);
++
++	//reg_tmp1 = tc_phy_read_dev_reg(gsw,  PHY0, 0x1f, 0x27c);
++	//dev1Fh_reg273h TXVLD DA register	- Adjust voltage mode TX amplitude.
++	//tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0);
++	//tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x273, 0x1000);
++	//reg_tmp1 = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1f, 0x273);
++	//printk("reg_tmp1273 = %x\n", reg_tmp1);
++	/*1e_11 TX  overshoot Enable (PAIR A/B/C/D) in gbe mode*/
++
++	orig_1e_11 = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x11);
++	reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x11);
++	reg_tmp = reg_tmp | (0xf << 12);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x11, reg_tmp);
++	orig_1e_e1 = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x00e1);
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e1, 0x10);
++	/* calibration start ============ */
++	printk("CALDLY = %d\n", CALDLY);
++	if(ge_cal_flag == 0){
++		ret = ge_cal_rext(gsw, 0, CALDLY);
++		if (ret == -1){
++			printk("ge_cal_rext error K port =%d\n", phyaddr);
++			return ret;
++		}
++		ge_cal_flag = 1;
++	}
++
++	/* *** R50 Cal start ***************************** */
++	/*phyaddress = 0*/
++	ret = ge_cal_r50(gsw, phyaddr, CALDLY);
++	if (ret == -1){
++		printk("R50 error K port =%d\n", phyaddr);
++		return ret;
++	}
++	/* *** R50 Cal end *** */
++	/* *** Tx offset Cal start *********************** */
++	ret = ge_cal_tx_offset(gsw, phyaddr, CALDLY);
++	if (ret == -1){
++		printk("ge_cal_tx_offset error K port =%d\n", phyaddr);
++		return ret;
++	}
++	/* *** Tx offset Cal end *** */
++
++	/* *** Tx Amp Cal start *** */
++	ret = ge_cal_tx_amp(gsw, phyaddr, CALDLY);
++	if (ret == -1){
++		printk("ge_cal_tx_amp error K port =%d\n", phyaddr);
++		return ret;
++	}
++	/* *** Tx Amp Cal end *** */
++	/*tmp maybe changed*/
++	//tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27c, 0x1111);
++	//tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27b, 0x47);
++	//tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x273, 0x2000);
++
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3a8, 0x0810);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3aa, 0x0008);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3ab, 0x0810);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3ad, 0x0008);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3ae, 0x0106);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3b0, 0x0001);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3b1, 0x0106);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3b3, 0x0001);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18c, 0x0001);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18d, 0x0001);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18e, 0x0001);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18f, 0x0001);
++
++	/*da_tx_bias1_b_tx_standby = 5'b10 (dev1eh_reg3aah[12:8])*/
++	reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x3aa);
++	reg_tmp = reg_tmp & ~(0x1f00);
++	reg_tmp = reg_tmp | 0x2 << 8;
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3aa, reg_tmp);
++
++	/*da_tx_bias1_a_tx_standby = 5'b10 (dev1eh_reg3a9h[4:0])*/
++	reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1e, 0x3a9);
++	reg_tmp = reg_tmp & ~(0x1f);
++	reg_tmp = reg_tmp | 0x2;
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3a9, reg_tmp);
++
++	/* Restore CR to default */
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, RG_185, orig_1e_185);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x100, orig_1f_100);
++	tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x11, orig_1e_11);
++	tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e1, orig_1e_e1);
++#endif
++	return 0;
++}
++
++void rx_dc_offset(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++    pr_info("PORT %d RX_DC_OFFSET\n", phyaddr);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x96, 0x8000);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x37, 0x3);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x107, 0x4000);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x171, 0x1e5);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x39, 0x200f);
++    udelay(40);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x39, 0x000f);
++    udelay(40);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x171, 0x65);
++}
++
++void check_rx_dc_offset_pair_a(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++    u32 reg_tmp;
++
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x114f);
++    reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1f, 0x1a);
++    reg_tmp = reg_tmp & 0xff;
++    pr_info("before pairA output = %x\n", reg_tmp);
++    udelay(40);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1142);
++    udelay(40);
++    reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1f, 0x1a);
++    reg_tmp = reg_tmp & 0xff;   
++    pr_info("after pairA output = %x\n", reg_tmp);
++    if ((reg_tmp & 0x80) != 0)
++        reg_tmp = (~reg_tmp) + 1;
++    if ((reg_tmp & 0xff) >4)
++        pr_info("pairA RX_DC_OFFSET error");
++}
++
++void check_rx_dc_offset_pair_b(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++    u32 reg_tmp;
++
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1151);
++    reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1f, 0x1a);
++    reg_tmp = reg_tmp & 0xff;
++    pr_info("before pairB output = %x\n", reg_tmp);
++    udelay(40);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1143);
++    udelay(40);
++    reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1f, 0x1a);
++    reg_tmp = reg_tmp & 0xff;   
++    pr_info("after pairB output = %x\n", reg_tmp);
++    if ((reg_tmp & 0x80) != 0)
++        reg_tmp = (~reg_tmp) + 1;
++    if ((reg_tmp & 0xff) >4)
++        pr_info("pairB RX_DC_OFFSET error");
++}
++
++void check_rx_dc_offset_pair_c(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++    u32 reg_tmp;
++
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1153);
++    reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1f, 0x1a);
++    reg_tmp = reg_tmp & 0xff;
++    pr_info("before pairC output = %x\n", reg_tmp);
++    udelay(40);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1144);
++    udelay(40);
++    reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1f, 0x1a);
++    reg_tmp = reg_tmp & 0xff;   
++    pr_info("after pairC output = %x\n", reg_tmp);
++    if ((reg_tmp & 0x80) != 0)
++        reg_tmp = (~reg_tmp) + 1;
++    if ((reg_tmp & 0xff) >4)
++        pr_info("pairC RX_DC_OFFSET error");
++}
++
++void check_rx_dc_offset_pair_d(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++    u32 reg_tmp;
++
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1155);
++    reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1f, 0x1a);
++    reg_tmp = reg_tmp & 0xff;
++    pr_info("before pairD output = %x\n", reg_tmp);
++    udelay(40);
++    tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1145);
++    udelay(40);
++    reg_tmp = tc_phy_read_dev_reg(gsw,  phyaddr, 0x1f, 0x1a);
++    reg_tmp = reg_tmp & 0xff;   
++    pr_info("after pairD output = %x\n", reg_tmp);
++    if ((reg_tmp & 0x80) != 0)
++        reg_tmp = (~reg_tmp) + 1;
++    if ((reg_tmp & 0xff) >4)
++        pr_info("pairD RX_DC_OFFSET error");
++}
++
++
++int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr){
++
++	int ret;
++
++	ret = phy_calibration(gsw, phyaddr);
++
++	rx_dc_offset(gsw, phyaddr);
++	check_rx_dc_offset_pair_a(gsw, phyaddr);
++	check_rx_dc_offset_pair_b(gsw, phyaddr);
++	check_rx_dc_offset_pair_c(gsw, phyaddr);
++	check_rx_dc_offset_pair_d(gsw, phyaddr);
++
++	return ret;
++}
+Index: drivers/net/phy/mtk/mt753x/mt753x_phy.h
+===================================================================
+new file mode 100644
+--- /dev/null
++++ b/drivers/net/phy/mtk/mt753x/mt753x_phy.h
+@@ -0,0 +1,145 @@
++/* SPDX-License-Identifier:	GPL-2.0+ */
++/*
++ * Register definitions for MediaTek MT753x Gigabit switches
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Weijie Gao <weijie.gao@mediatek.com>
++ */
++
++#ifndef _MT753X_PHY_H_
++#define _MT753X_PHY_H_
++
++#include <linux/bitops.h>
++
++/*phy calibration use*/
++#define DEV_1E				0x1E
++/*global device 0x1f, always set P0*/
++#define DEV_1F				0x1F
++
++
++/************IEXT/REXT CAL***************/
++/* bits range: for example BITS(16,23) = 0xFF0000*/
++#define BITS(m, n)   (~(BIT(m) - 1) & ((BIT(n) - 1) | BIT(n)))
++#define ANACAL_INIT			0x01
++#define ANACAL_ERROR			0xFD
++#define ANACAL_SATURATION		0xFE
++#define	ANACAL_FINISH			0xFF
++#define ANACAL_PAIR_A			0
++#define ANACAL_PAIR_B			1
++#define ANACAL_PAIR_C			2
++#define ANACAL_PAIR_D			3
++#define DAC_IN_0V			0x00
++#define DAC_IN_2V			0xf0
++#define TX_AMP_OFFSET_0MV		0x20
++#define TX_AMP_OFFSET_VALID_BITS	6
++
++#define R0				0
++#define PHY0				0
++#define PHY1				1
++#define PHY2				2
++#define PHY3				3
++#define PHY4				4
++#define ANA_TEST_MODE			BITS(8, 15)
++#define TST_TCLK_SEL			BITs(6, 7)
++#define ANA_TEST_VGA_RG			0x100
++
++#define FORCE_MDI_CROSS_OVER		BITS(3, 4)
++#define T10_TEST_CTL_RG			0x145
++#define RG_185				0x185
++#define RG_TX_SLEW			BIT(0)
++#define ANA_CAL_0			0xdb
++#define RG_CAL_CKINV			BIT(12)
++#define RG_ANA_CALEN			BIT(8)
++#define RG_REXT_CALEN			BIT(4)
++#define RG_ZCALEN_A			BIT(0)
++#define ANA_CAL_1			0xdc
++#define RG_ZCALEN_B			BIT(12)
++#define RG_ZCALEN_C			BIT(8)
++#define RG_ZCALEN_D			BIT(4)
++#define RG_TXVOS_CALEN			BIT(0)
++#define ANA_CAL_6			0xe1
++#define RG_CAL_REFSEL			BIT(4)
++#define RG_CAL_COMP_PWD			BIT(0)
++#define ANA_CAL_5			0xe0
++#define RG_REXT_TRIM			BITs(8, 13)
++#define RG_ZCAL_CTRL			BITs(0, 5)
++#define RG_17A				0x17a
++#define AD_CAL_COMP_OUT			BIT(8)
++#define RG_17B				0x17b
++#define AD_CAL_CLK			bit(0)
++#define RG_17C				0x17c
++#define DA_CALIN_FLAG			bit(0)
++/************R50 CAL****************************/
++#define RG_174				0x174
++#define RG_R50OHM_RSEL_TX_A_EN		BIT[15]
++#define CR_R50OHM_RSEL_TX_A		BITS[8:14]
++#define RG_R50OHM_RSEL_TX_B_EN		BIT[7]
++#define CR_R50OHM_RSEL_TX_B		BITS[6:0]
++#define RG_175				0x175
++#define RG_R50OHM_RSEL_TX_C_EN		BITS[15]
++#define CR_R50OHM_RSEL_TX_C		BITS[8:14]
++#define RG_R50OHM_RSEL_TX_D_EN		BIT[7]
++#define CR_R50OHM_RSEL_TX_D		BITS[0:6]
++/**********TX offset Calibration***************************/
++#define RG_95				0x96
++#define BYPASS_TX_OFFSET_CAL		BIT(15)
++#define RG_3E				0x3e
++#define BYPASS_PD_TXVLD_A		BIT(15)
++#define BYPASS_PD_TXVLD_B		BIT(14)
++#define BYPASS_PD_TXVLD_C		BIT(13)
++#define BYPASS_PD_TXVLD_D		BIT(12)
++#define BYPASS_PD_TX_10M		BIT(11)
++#define POWER_DOWN_TXVLD_A		BIT(7)
++#define POWER_DOWN_TXVLD_B		BIT(6)
++#define POWER_DOWN_TXVLD_C		BIT(5)
++#define POWER_DOWN_TXVLD_D		BIT(4)
++#define POWER_DOWN_TX_10M		BIT(3)
++#define RG_DD				0xdd
++#define RG_TXG_CALEN_A			BIT(12)
++#define RG_TXG_CALEN_B			BIT(8)
++#define RG_TXG_CALEN_C			BIT(4)
++#define RG_TXG_CALEN_D			BIT(0)
++#define RG_17D				0x17D
++#define FORCE_DASN_DAC_IN0_A		BIT(15)
++#define DASN_DAC_IN0_A			BITS(0, 9)
++#define RG_17E				0x17E
++#define FORCE_DASN_DAC_IN0_B		BIT(15)
++#define DASN_DAC_IN0_B			BITS(0, 9)
++#define RG_17F				0x17F
++
++#define FORCE_DASN_DAC_IN0_C		BIT(15)
++#define DASN_DAC_IN0_C			BITS(0, 9)
++#define RG_180				0x180
++#define FORCE_DASN_DAC_IN0_D		BIT(15)
++#define DASN_DAC_IN0_D			BITS(0, 9)
++
++#define RG_181				0x181
++#define FORCE_DASN_DAC_IN1_A		BIT(15)
++#define DASN_DAC_IN1_A			BITS(0, 9)
++#define RG_182				0x182
++#define FORCE_DASN_DAC_IN1_B		BIT(15)
++#define DASN_DAC_IN1_B			BITS(0, 9)
++#define RG_183				0x183
++#define FORCE_DASN_DAC_IN1_C		BIT15]
++#define DASN_DAC_IN1_C			BITS(0, 9)
++#define RG_184				0x184
++#define FORCE_DASN_DAC_IN1_D		BIT(15)
++#define DASN_DAC_IN1_D			BITS(0, 9)
++#define RG_172				0x172
++#define CR_TX_AMP_OFFSET_A		BITS(8, 13)
++#define CR_TX_AMP_OFFSET_B		BITS(0, 5)
++#define RG_173				0x173
++#define CR_TX_AMP_OFFSET_C		BITS(8, 13)
++#define CR_TX_AMP_OFFSET_D		BITS(0, 5)
++/**********TX Amp Calibration ***************************/
++#define RG_12				0x12
++#define DA_TX_I2MPB_A_GBE		BITS(10, 15)
++#define RG_17				0x17
++#define DA_TX_I2MPB_B_GBE		BITS(8, 13)
++#define RG_19				0x19
++#define DA_TX_I2MPB_C_GBE		BITS(8, 13)
++#define RG_21				0x21
++#define DA_TX_I2MPB_D_GBE		BITS(8, 13)
++
++#endif /* _MT753X_REGS_H_ */
diff --git a/target/linux/mediatek/patches-5.4/739-mt7531-gsw-port5_external_phy_init.patch b/target/linux/mediatek/patches-5.4/739-mt7531-gsw-port5_external_phy_init.patch
new file mode 100755
index 0000000..0d88c60
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/739-mt7531-gsw-port5_external_phy_init.patch
@@ -0,0 +1,156 @@
+From 9206472ba03032aea120604e8637b52408ca4b3a Mon Sep 17 00:00:00 2001
+From: Landen Chao <landen.chao@mediatek.com>
+Date: Fri, 29 May 2020 15:12:35 +0800
+Subject: [PATCH 2/2] 740_patch
+
+Change-Id: I7e0164751702f573d5185c4290ff78688f42f603
+---
+ drivers/net/phy/mtk/mt753x/Makefile        |  3 +-
+ drivers/net/phy/mtk/mt753x/mt7531.c        |  3 +
+ drivers/net/phy/mtk/mt753x/mt753x.h        |  1 +
+ drivers/net/phy/mtk/mt753x/mt753x_extphy.c | 69 ++++++++++++++++++++++
+ drivers/net/phy/mtk/mt753x/mt753x_extphy.h | 18 ++++++
+ 5 files changed, 93 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/phy/mtk/mt753x/mt753x_extphy.c
+ create mode 100644 drivers/net/phy/mtk/mt753x/mt753x_extphy.h
+
+diff --git a/drivers/net/phy/mtk/mt753x/Makefile b/drivers/net/phy/mtk/mt753x/Makefile
+index 384b0ff7..694ffa83 100644
+--- a/drivers/net/phy/mtk/mt753x/Makefile
++++ b/drivers/net/phy/mtk/mt753x/Makefile
+@@ -7,5 +7,6 @@ obj-$(CONFIG_MT753X_GSW)	+= mt753x.o
+ mt753x-$(CONFIG_SWCONFIG)	+= mt753x_swconfig.o
+ 
+ mt753x-y			+= mt753x_mdio.o mt7530.o mt7531.o \
+-					mt753x_common.o mt753x_vlan.o mt753x_nl.o mt753x_phy.o
++					mt753x_common.o mt753x_vlan.o mt753x_nl.o mt753x_phy.o \
++					mt753x_extphy.o
+ 
+diff --git a/drivers/net/phy/mtk/mt753x/mt7531.c b/drivers/net/phy/mtk/mt753x/mt7531.c
+index 04729835..4a2943b1 100644
+--- a/drivers/net/phy/mtk/mt753x/mt7531.c
++++ b/drivers/net/phy/mtk/mt753x/mt7531.c
+@@ -265,6 +265,9 @@ static int mt7531_set_port_sgmii_force_mode(struct gsw_mt753x *gsw, u32 port,
+ 		return -EINVAL;
+ 	}
+ 
++	if (port == 5)
++		extphy_init(gsw, port);
++
+ 	port_base = port - 5;
+ 
+ 	switch (port_cfg->speed) {
+diff --git a/drivers/net/phy/mtk/mt753x/mt753x.h b/drivers/net/phy/mtk/mt753x/mt753x.h
+index 5053a7d7..a3f343cd 100644
+--- a/drivers/net/phy/mtk/mt753x/mt753x.h
++++ b/drivers/net/phy/mtk/mt753x/mt753x.h
+@@ -154,6 +154,7 @@ void mt753x_irq_worker(struct work_struct *work);
+ void mt753x_irq_enable(struct gsw_mt753x *gsw);
+ 
+ int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr);
++int extphy_init(struct gsw_mt753x *gsw, int addr);
+ 
+ /* MDIO Indirect Access Registers */
+ #define MII_MMD_ACC_CTL_REG		0x0d
+diff --git a/drivers/net/phy/mtk/mt753x/mt753x_extphy.c b/drivers/net/phy/mtk/mt753x/mt753x_extphy.c
+new file mode 100644
+index 00000000..f58e8a62
+--- /dev/null
++++ b/drivers/net/phy/mtk/mt753x/mt753x_extphy.c
+@@ -0,0 +1,69 @@
++/*
++ * Driver for MediaTek MT7531 gigabit switch
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Landen Chao <landen.chao@mediatek.com>
++ *
++ * SPDX-License-Identifier:	GPL-2.0+
++ */
++
++#include <linux/kernel.h>
++#include <linux/mii.h>
++
++#include "mt753x.h"
++#include "mt753x_regs.h"
++#include "mt753x_extphy.h"
++
++int gpy211_init(struct gsw_mt753x *gsw, int addr)
++{
++	/* Enable rate adaption */
++	gsw->mmd_write(gsw, addr, 0x1e, 0x8, 0x24e2);
++
++	return 0;
++}
++
++static struct mt753x_extphy_id extphy_tbl[] = {
++        {0x67c9de00, 0x0fffffff0, gpy211_init},
++};
++
++static u32 get_cl22_phy_id(struct gsw_mt753x *gsw, int addr)
++{
++	int phy_reg;
++	u32 phy_id = 0;
++
++	phy_reg = gsw->mii_read(gsw, addr, MII_PHYSID1);
++	if (phy_reg < 0)
++		return 0;
++	phy_id = (phy_reg & 0xffff) << 16;
++
++	/* Grab the bits from PHYIR2, and put them in the lower half */
++	phy_reg = gsw->mii_read(gsw, addr, MII_PHYSID2);
++	if (phy_reg < 0)
++		return 0;
++
++	phy_id |= (phy_reg & 0xffff);
++
++	return phy_id;
++}
++
++static inline bool phy_id_is_match(u32 id, struct mt753x_extphy_id *phy)
++{
++	return ((id & phy->phy_id_mask) == (phy->phy_id & phy->phy_id_mask));
++}
++
++int extphy_init(struct gsw_mt753x *gsw, int addr)
++{
++	int i;
++	u32 phy_id;
++	struct mt753x_extphy_id *extphy;
++
++	phy_id = get_cl22_phy_id(gsw, addr);
++	for (i = 0; i < ARRAY_SIZE(extphy_tbl); i++) {
++		extphy = &extphy_tbl[i];
++		if(phy_id_is_match(phy_id, extphy))
++			extphy->init(gsw, addr);
++	}
++
++	return 0;
++}
+diff --git a/drivers/net/phy/mtk/mt753x/mt753x_extphy.h b/drivers/net/phy/mtk/mt753x/mt753x_extphy.h
+new file mode 100644
+index 00000000..2b72c8a9
+--- /dev/null
++++ b/drivers/net/phy/mtk/mt753x/mt753x_extphy.h
+@@ -0,0 +1,18 @@
++/*
++ * Driver for MediaTek MT753x gigabit switch
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Landen Chao <landen.chao@mediatek.com>
++ *
++ * SPDX-License-Identifier:	GPL-2.0+
++ */
++
++#ifndef _MT753X_EXTPHY_H_
++#define _MT753X_EXTPHY_H_
++struct mt753x_extphy_id {
++        u32 phy_id;
++        u32 phy_id_mask;
++	int (*init)(struct gsw_mt753x *gsw, int addr);
++};
++#endif
+-- 
+2.17.1
+
diff --git a/target/linux/ramips/patches-5.4/0099-mt7621-add-l2c-er35-workaround.patch b/target/linux/ramips/patches-5.4/0099-mt7621-add-l2c-er35-workaround.patch
new file mode 100644
index 0000000..07da3d2
--- /dev/null
+++ b/target/linux/ramips/patches-5.4/0099-mt7621-add-l2c-er35-workaround.patch
@@ -0,0 +1,142 @@
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2466,6 +2466,17 @@ config SB1_PASS_2_1_WORKAROUNDS
+ 	depends on CPU_SB1 && CPU_SB1_PASS_2
+ 	default y
+ 
++config MIPS_ER35_WORKAROUNDS
++	bool
++	depends on SYS_SUPPORTS_MIPS_CPS
++	select ZONE_DMA
++	default y
++
++config MIPS_ER35_RESERVED_SPACE
++	hex
++	default 0x1000000
++	depends on MIPS_ER35_WORKAROUNDS
++
+ choice
+ 	prompt "SmartMIPS or microMIPS ASE support"
+ 
+--- a/arch/mips/include/asm/dma.h
++++ b/arch/mips/include/asm/dma.h
+@@ -87,6 +87,8 @@
+ #if defined(CONFIG_SGI_IP22) || defined(CONFIG_SGI_IP28)
+ /* don't care; ISA bus master won't work, ISA slave DMA supports 32bit addr */
+ #define MAX_DMA_ADDRESS		PAGE_OFFSET
++#elif defined(CONFIG_MIPS_ER35_WORKAROUNDS)
++#define MAX_DMA_ADDRESS		(PAGE_OFFSET + CONFIG_MIPS_ER35_RESERVED_SPACE)
+ #else
+ #define MAX_DMA_ADDRESS		(PAGE_OFFSET + 0x01000000)
+ #endif
+--- a/arch/mips/kernel/head.S
++++ b/arch/mips/kernel/head.S
+@@ -22,6 +22,7 @@
+ #include <asm/irqflags.h>
+ #include <asm/regdef.h>
+ #include <asm/mipsregs.h>
++#include <asm/cacheops.h>
+ #include <asm/stackframe.h>
+ 
+ #include <kernel-entry-init.h>
+@@ -93,6 +94,67 @@ NESTED(kernel_entry, 16, sp)			# kernel entry point
+ 
+ 	setup_c0_status_pri
+ 
++#ifdef CONFIG_MIPS_ER35_WORKAROUNDS
++	/* Jump to KSEG1 so that we can perform cache related operations */
++	PTR_LA	t0, 0f
++	li	t1, 5
++	ins	t0, t1, 29, 3
++	jr	t0
++	nop
++0:
++
++	/* Calculate L2 Cache size */
++	MFC0	t0, CP0_CONFIG, 2
++	ext	t1, t0, 4, 4
++	li	t2, 2
++	sllv	t1, t2, t1		/* Cache line size */
++
++	ext	t2, t0, 8, 4
++	li	t3, 64
++	sllv	t2, t3, t2		/* Sets per way */
++
++	ext	t3, t0, 0, 4
++	addiu	t3, 1			/* Number of ways */
++
++	mul	t2, t2, t3		/* Number of sets */
++
++
++	/* Flush L2 Cache before setting CCA overrides */
++	move	t3, zero
++1:
++	cache	Index_Writeback_Inv_SD, 0(t3)
++	sub	t2, 1
++	add	t3, t3, t1
++	bne	t2, zero, 1b
++	nop
++
++	sync
++
++	/*
++	 * Override bottom CONFIG_MIPS_ER35_RESERVED_SPACE of DDR to
++	 * Uncached (which will be reserved as DMA zone)
++	 */
++	MFC0	t0, CP0_CMGCRBASE
++	PTR_SLL	t0, t0, 4
++	li	t1, 5
++	ins	t0, t1, 29, 3
++
++	/* GCR_REG2_MASK */
++	lui	t1, (~((CONFIG_MIPS_ER35_RESERVED_SPACE - 1) >> 16)) & 0xffff
++	ori	t1, t1, 0x0051
++	sw	t1, 0xb8(t0)
++
++	/* GCR_REG2_BASE */
++	sw	zero, 0xb0(t0)
++
++	/* Set default override to Write-through */
++	lw	t1, 0x08(t0)
++	li	t2, 0xffff8000
++	and	t1, t1, t2
++	ori	t1, 0x10
++	sw	t1, 0x08(t0)
++#endif
++
+ 	/* We might not get launched at the address the kernel is linked to,
+ 	   so we jump there.  */
+ 	PTR_LA	t0, 0f
+--- a/arch/mips/ralink/Kconfig
++++ b/arch/mips/ralink/Kconfig
+@@ -59,6 +59,8 @@ choice
+ 		select HAVE_PCI if PCI_MT7621
+ 		select WEAK_REORDERING_BEYOND_LLSC
+ 		select GENERIC_CLOCKEVENTS_BROADCAST
++		select MIPS_ER35_WORKAROUNDS
++
+ endchoice
+ 
+ choice
+--- a/arch/mips/ralink/Platform
++++ b/arch/mips/ralink/Platform
+@@ -30,5 +30,5 @@ cflags-$(CONFIG_SOC_MT7620)	+= -I$(srctree)/arch/mips/include/asm/mach-ralink/mt
+ 
+ # Ralink MT7621
+ #
+-load-$(CONFIG_SOC_MT7621)	+= 0xffffffff80001000
++load-$(CONFIG_SOC_MT7621)	+= 0xffffffff80001000+$(CONFIG_MIPS_ER35_RESERVED_SPACE)
+ cflags-$(CONFIG_SOC_MT7621)	+= -I$(srctree)/arch/mips/include/asm/mach-ralink/mt7621
+--- a/kernel/dma/direct.c
++++ b/kernel/dma/direct.c
+@@ -91,6 +91,10 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
+ 	struct page *page = NULL;
+ 	u64 phys_mask;
+ 
++#ifdef CONFIG_MIPS_ER35_WORKAROUNDS
++	gfp |= __GFP_DMA;
++#endif
++
+ 	if (attrs & DMA_ATTR_NO_WARN)
+ 		gfp |= __GFP_NOWARN;
+