[][Add initial mtk feed for OpenWRT v21.02]
[Description]
Add initial mtk feed for OpenWRT v21.02
[Release-log]
N/A
Change-Id: I8051c6ba87f1ccf26c02fdd88a17d66f63c0b101
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/4495320
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Kconfig b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Kconfig
new file mode 100755
index 0000000..b097f52
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Kconfig
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_MEDIATEK
+ bool "MediaTek ethernet driver"
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
+ ---help---
+ If you have a Mediatek SoC with ethernet, say Y.
+
+if NET_VENDOR_MEDIATEK
+
+config NET_MEDIATEK_SOC
+ tristate "MediaTek SoC Gigabit Ethernet support"
+ select PHYLINK
+ ---help---
+ This driver supports the gigabit ethernet MACs in the
+ MediaTek SoC family.
+
+config MEDIATEK_NETSYS_V2
+ tristate "MediaTek Ethernet NETSYS V2 support"
+ depends on ARCH_MEDIATEK && NET_MEDIATEK_SOC
+ ---help---
+ This options enable MTK Ethernet NETSYS V2 support
+
+config NET_MEDIATEK_HNAT
+ tristate "MediaTek HW NAT support"
+ depends on NET_MEDIATEK_SOC && NF_CONNTRACK && IP_NF_NAT
+ ---help---
+ This driver supports the hardward Network Address Translation
+ in the MediaTek MT7986/MT2701/MT7622/MT7629/MT7621 chipset
+ family.
+
+config NET_MEDIATEK_HW_QOS
+ bool "Mediatek HW QoS support"
+ depends on NET_MEDIATEK_HNAT
+ default n
+ ---help---
+ This driver supports the hardward
+ quality of service (QoS) control
+ for the hardware NAT in the
+ MediaTek chipset family.
+
+endif #NET_VENDOR_MEDIATEK
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile
new file mode 100755
index 0000000..f046e73
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
+mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o
+obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
new file mode 100755
index 0000000..82aa6ca
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
@@ -0,0 +1,840 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#include <linux/trace_seq.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/of_mdio.h>
+
+#include "mtk_eth_soc.h"
+#include "mtk_eth_dbg.h"
+
+struct mtk_eth_debug {
+ struct dentry *root;
+};
+
+struct mtk_eth *g_eth;
+
+struct mtk_eth_debug eth_debug;
+
+void mt7530_mdio_w32(struct mtk_eth *eth, u32 reg, u32 val)
+{
+ mutex_lock(ð->mii_bus->mdio_lock);
+
+ _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
+ _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
+ _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
+
+ mutex_unlock(ð->mii_bus->mdio_lock);
+}
+
+u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
+{
+ u16 high, low;
+
+ mutex_lock(ð->mii_bus->mdio_lock);
+
+ _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
+ low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
+ high = _mtk_mdio_read(eth, 0x1f, 0x10);
+
+ mutex_unlock(ð->mii_bus->mdio_lock);
+
+ return (high << 16) | (low & 0xffff);
+}
+
+void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+{
+ mtk_w32(eth, val, reg + 0x10000);
+}
+EXPORT_SYMBOL(mtk_switch_w32);
+
+u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
+{
+ return mtk_r32(eth, reg + 0x10000);
+}
+EXPORT_SYMBOL(mtk_switch_r32);
+
+static int mtketh_debug_show(struct seq_file *m, void *private)
+{
+ struct mtk_eth *eth = m->private;
+ struct mtk_mac *mac = 0;
+ u32 d;
+ int i, j = 0;
+
+ for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
+ if (!eth->mac[i] ||
+ of_phy_is_fixed_link(eth->mac[i]->of_node))
+ continue;
+ mac = eth->mac[i];
+#if 0 //FIXME
+ while (j < 30) {
+ d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
+
+ seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
+ mac->phy_dev->addr, j, d);
+ j++;
+ }
+#endif
+ }
+ return 0;
+}
+
+static int mtketh_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtketh_debug_show, inode->i_private);
+}
+
+static const struct file_operations mtketh_debug_fops = {
+ .open = mtketh_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
+{
+ struct mtk_eth *eth = m->private;
+ u32 offset, data;
+ int i;
+ struct mt7530_ranges {
+ u32 start;
+ u32 end;
+ } ranges[] = {
+ {0x0, 0xac},
+ {0x1000, 0x10e0},
+ {0x1100, 0x1140},
+ {0x1200, 0x1240},
+ {0x1300, 0x1340},
+ {0x1400, 0x1440},
+ {0x1500, 0x1540},
+ {0x1600, 0x1640},
+ {0x1800, 0x1848},
+ {0x1900, 0x1948},
+ {0x1a00, 0x1a48},
+ {0x1b00, 0x1b48},
+ {0x1c00, 0x1c48},
+ {0x1d00, 0x1d48},
+ {0x1e00, 0x1e48},
+ {0x1f60, 0x1ffc},
+ {0x2000, 0x212c},
+ {0x2200, 0x222c},
+ {0x2300, 0x232c},
+ {0x2400, 0x242c},
+ {0x2500, 0x252c},
+ {0x2600, 0x262c},
+ {0x3000, 0x3014},
+ {0x30c0, 0x30f8},
+ {0x3100, 0x3114},
+ {0x3200, 0x3214},
+ {0x3300, 0x3314},
+ {0x3400, 0x3414},
+ {0x3500, 0x3514},
+ {0x3600, 0x3614},
+ {0x4000, 0x40d4},
+ {0x4100, 0x41d4},
+ {0x4200, 0x42d4},
+ {0x4300, 0x43d4},
+ {0x4400, 0x44d4},
+ {0x4500, 0x45d4},
+ {0x4600, 0x46d4},
+ {0x4f00, 0x461c},
+ {0x7000, 0x7038},
+ {0x7120, 0x7124},
+ {0x7800, 0x7804},
+ {0x7810, 0x7810},
+ {0x7830, 0x7830},
+ {0x7a00, 0x7a7c},
+ {0x7b00, 0x7b04},
+ {0x7e00, 0x7e04},
+ {0x7ffc, 0x7ffc},
+ };
+
+ if (!mt7530_exist(eth))
+ return -EOPNOTSUPP;
+
+ if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
+ (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
+ seq_puts(m, "no switch found\n");
+ return 0;
+ }
+
+ for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
+ for (offset = ranges[i].start;
+ offset <= ranges[i].end; offset += 4) {
+ data = mt7530_mdio_r32(eth, offset);
+ seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
+ offset, data);
+ }
+ }
+
+ return 0;
+}
+
+static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
+}
+
+static const struct file_operations mtketh_debug_mt7530sw_fops = {
+ .open = mtketh_debug_mt7530sw_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
+ const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ struct mtk_eth *eth = file->private_data;
+ char buf[32], *token, *p = buf;
+ u32 reg, value, phy;
+ int ret;
+
+ if (!mt7530_exist(eth))
+ return -EOPNOTSUPP;
+
+ if (*off != 0)
+ return 0;
+
+ if (len > sizeof(buf) - 1)
+ len = sizeof(buf) - 1;
+
+ ret = strncpy_from_user(buf, ptr, len);
+ if (ret < 0)
+ return ret;
+ buf[len] = '\0';
+
+ token = strsep(&p, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)&phy))
+ return -EINVAL;
+
+ token = strsep(&p, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)®))
+ return -EINVAL;
+
+ token = strsep(&p, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)&value))
+ return -EINVAL;
+
+ pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
+ 0x1f, reg, value);
+ mt7530_mdio_w32(eth, reg, value);
+ pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
+ 0x1f, reg, mt7530_mdio_r32(eth, reg));
+
+ return len;
+}
+
+static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ struct mtk_eth *eth = file->private_data;
+ char buf[32], *token, *p = buf;
+ u32 reg, value, phy;
+ int ret;
+
+ if (*off != 0)
+ return 0;
+
+ if (len > sizeof(buf) - 1)
+ len = sizeof(buf) - 1;
+
+ ret = strncpy_from_user(buf, ptr, len);
+ if (ret < 0)
+ return ret;
+ buf[len] = '\0';
+
+ token = strsep(&p, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)&phy))
+ return -EINVAL;
+
+ token = strsep(&p, " ");
+
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)®))
+ return -EINVAL;
+
+ token = strsep(&p, " ");
+
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)&value))
+ return -EINVAL;
+
+ pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
+ phy, reg, value);
+
+ _mtk_mdio_write(eth, phy, reg, value);
+
+ pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
+ phy, reg, _mtk_mdio_read(eth, phy, reg));
+
+ return len;
+}
+
+static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ struct mtk_eth *eth = file->private_data;
+
+ schedule_work(ð->pending_work);
+ return len;
+}
+
+static const struct file_operations fops_reg_w = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = mtketh_debugfs_write,
+ .llseek = noop_llseek,
+};
+
+static const struct file_operations fops_eth_reset = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = mtketh_debugfs_reset,
+ .llseek = noop_llseek,
+};
+
+static const struct file_operations fops_mt7530sw_reg_w = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = mtketh_mt7530sw_debugfs_write,
+ .llseek = noop_llseek,
+};
+
+void mtketh_debugfs_exit(struct mtk_eth *eth)
+{
+ debugfs_remove_recursive(eth_debug.root);
+}
+
+int mtketh_debugfs_init(struct mtk_eth *eth)
+{
+ int ret = 0;
+
+ eth_debug.root = debugfs_create_dir("mtketh", NULL);
+ if (!eth_debug.root) {
+ dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
+ ret = -ENOMEM;
+ }
+
+ debugfs_create_file("phy_regs", S_IRUGO,
+ eth_debug.root, eth, &mtketh_debug_fops);
+ debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
+ eth_debug.root, eth, &fops_reg_w);
+ debugfs_create_file("reset", S_IFREG | S_IWUSR,
+ eth_debug.root, eth, &fops_eth_reset);
+ if (mt7530_exist(eth)) {
+ debugfs_create_file("mt7530sw_regs", S_IRUGO,
+ eth_debug.root, eth,
+ &mtketh_debug_mt7530sw_fops);
+ debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
+ eth_debug.root, eth,
+ &fops_mt7530sw_reg_w);
+ }
+ return ret;
+}
+
+void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
+ u32 *read_data)
+{
+ if (mt7530_exist(eth) && phy_addr == 31)
+ *read_data = mt7530_mdio_r32(eth, phy_register);
+
+ else
+ *read_data = _mtk_mdio_read(eth, phy_addr, phy_register);
+}
+
+void mii_mgr_write_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
+ u32 write_data)
+{
+ if (mt7530_exist(eth) && phy_addr == 31)
+ mt7530_mdio_w32(eth, phy_register, write_data);
+
+ else
+ _mtk_mdio_write(eth, phy_addr, phy_register, write_data);
+}
+
+static void mii_mgr_read_cl45(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data)
+{
+ mtk_cl45_ind_read(eth, port, devad, reg, data);
+}
+
+static void mii_mgr_write_cl45(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data)
+{
+ mtk_cl45_ind_write(eth, port, devad, reg, data);
+}
+
+int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_mii_ioctl_data mii;
+ struct mtk_esw_reg reg;
+
+ switch (cmd) {
+ case MTKETH_MII_READ:
+ if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+ goto err_copy;
+ mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
+ &mii.val_out);
+ if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
+ goto err_copy;
+
+ return 0;
+ case MTKETH_MII_WRITE:
+ if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+ goto err_copy;
+ mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
+ mii.val_in);
+
+ return 0;
+ case MTKETH_MII_READ_CL45:
+ if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+ goto err_copy;
+ mii_mgr_read_cl45(eth, mii.port_num, mii.dev_addr, mii.reg_addr,
+ &mii.val_out);
+ if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
+ goto err_copy;
+
+ return 0;
+ case MTKETH_MII_WRITE_CL45:
+ if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+ goto err_copy;
+ mii_mgr_write_cl45(eth, mii.port_num, mii.dev_addr, mii.reg_addr,
+ mii.val_in);
+ return 0;
+ case MTKETH_ESW_REG_READ:
+ if (!mt7530_exist(eth))
+ return -EOPNOTSUPP;
+ if (copy_from_user(®, ifr->ifr_data, sizeof(reg)))
+ goto err_copy;
+ if (reg.off > REG_ESW_MAX)
+ return -EINVAL;
+ reg.val = mtk_switch_r32(eth, reg.off);
+
+ if (copy_to_user(ifr->ifr_data, ®, sizeof(reg)))
+ goto err_copy;
+
+ return 0;
+ case MTKETH_ESW_REG_WRITE:
+ if (!mt7530_exist(eth))
+ return -EOPNOTSUPP;
+ if (copy_from_user(®, ifr->ifr_data, sizeof(reg)))
+ goto err_copy;
+ if (reg.off > REG_ESW_MAX)
+ return -EINVAL;
+ mtk_switch_w32(eth, reg.val, reg.off);
+
+ return 0;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+err_copy:
+ return -EFAULT;
+}
+
+int esw_cnt_read(struct seq_file *seq, void *v)
+{
+ unsigned int pkt_cnt = 0;
+ int i = 0;
+ struct mtk_eth *eth = g_eth;
+ unsigned int mib_base = MTK_GDM1_TX_GBCNT;
+
+ seq_puts(seq, "\n <<CPU>>\n");
+ seq_puts(seq, " |\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, "| <<PSE>> |\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, " |\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, "| <<GDMA>> |\n");
+ seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n",
+ mtk_r32(eth, mib_base));
+ seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n",
+ mtk_r32(eth, mib_base+0x08));
+ seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n",
+ mtk_r32(eth, mib_base+0x10));
+ seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n",
+ mtk_r32(eth, mib_base+0x14));
+ seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n",
+ mtk_r32(eth, mib_base+0x18));
+ seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n",
+ mtk_r32(eth, mib_base+0x1C));
+ seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n",
+ mtk_r32(eth, mib_base+0x20));
+ seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n",
+ mtk_r32(eth, mib_base+0x24));
+ seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n",
+ mtk_r32(eth, mib_base+0x28));
+ seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n",
+ mtk_r32(eth, mib_base+0x2C));
+ seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n",
+ mtk_r32(eth, mib_base+0x30));
+ seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n",
+ mtk_r32(eth, mib_base+0x38));
+ seq_puts(seq, "| |\n");
+ seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n",
+ mtk_r32(eth, mib_base+0x40));
+ seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n",
+ mtk_r32(eth, mib_base+0x48));
+ seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n",
+ mtk_r32(eth, mib_base+0x50));
+ seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n",
+ mtk_r32(eth, mib_base+0x54));
+ seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n",
+ mtk_r32(eth, mib_base+0x58));
+ seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n",
+ mtk_r32(eth, mib_base+0x5C));
+ seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n",
+ mtk_r32(eth, mib_base+0x60));
+ seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n",
+ mtk_r32(eth, mib_base+0x64));
+ seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n",
+ mtk_r32(eth, mib_base+0x68));
+ seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n",
+ mtk_r32(eth, mib_base+0x6C));
+ seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n",
+ mtk_r32(eth, mib_base+0x70));
+ seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n",
+ mtk_r32(eth, mib_base+0x78));
+ seq_puts(seq, "+-----------------------------------------------+\n");
+
+ if (!mt7530_exist(eth))
+ return 0;
+
+#define DUMP_EACH_PORT(base) \
+ do { \
+ for (i = 0; i < 7; i++) { \
+ pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));\
+ seq_printf(seq, "%8u ", pkt_cnt); \
+ } \
+ seq_puts(seq, "\n"); \
+ } while (0)
+
+ seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
+ "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
+ "Port6");
+ seq_puts(seq, "Tx Drop Packet :");
+ DUMP_EACH_PORT(0x4000);
+ seq_puts(seq, "Tx CRC Error :");
+ DUMP_EACH_PORT(0x4004);
+ seq_puts(seq, "Tx Unicast Packet :");
+ DUMP_EACH_PORT(0x4008);
+ seq_puts(seq, "Tx Multicast Packet :");
+ DUMP_EACH_PORT(0x400C);
+ seq_puts(seq, "Tx Broadcast Packet :");
+ DUMP_EACH_PORT(0x4010);
+ seq_puts(seq, "Tx Collision Event :");
+ DUMP_EACH_PORT(0x4014);
+ seq_puts(seq, "Tx Pause Packet :");
+ DUMP_EACH_PORT(0x402C);
+ seq_puts(seq, "Rx Drop Packet :");
+ DUMP_EACH_PORT(0x4060);
+ seq_puts(seq, "Rx Filtering Packet :");
+ DUMP_EACH_PORT(0x4064);
+ seq_puts(seq, "Rx Unicast Packet :");
+ DUMP_EACH_PORT(0x4068);
+ seq_puts(seq, "Rx Multicast Packet :");
+ DUMP_EACH_PORT(0x406C);
+ seq_puts(seq, "Rx Broadcast Packet :");
+ DUMP_EACH_PORT(0x4070);
+ seq_puts(seq, "Rx Alignment Error :");
+ DUMP_EACH_PORT(0x4074);
+ seq_puts(seq, "Rx CRC Error :");
+ DUMP_EACH_PORT(0x4078);
+ seq_puts(seq, "Rx Undersize Error :");
+ DUMP_EACH_PORT(0x407C);
+ seq_puts(seq, "Rx Fragment Error :");
+ DUMP_EACH_PORT(0x4080);
+ seq_puts(seq, "Rx Oversize Error :");
+ DUMP_EACH_PORT(0x4084);
+ seq_puts(seq, "Rx Jabber Error :");
+ DUMP_EACH_PORT(0x4088);
+ seq_puts(seq, "Rx Pause Packet :");
+ DUMP_EACH_PORT(0x408C);
+ mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
+ mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
+
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+
+static int switch_count_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, esw_cnt_read, 0);
+}
+
+static const struct file_operations switch_count_fops = {
+ .owner = THIS_MODULE,
+ .open = switch_count_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static struct proc_dir_entry *proc_tx_ring, *proc_rx_ring;
+
+int tx_ring_read(struct seq_file *seq, void *v)
+{
+ struct mtk_tx_ring *ring = &g_eth->tx_ring;
+ struct mtk_tx_dma *tx_ring;
+ int i = 0;
+
+ tx_ring =
+ kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
+ if (!tx_ring) {
+ seq_puts(seq, " allocate temp tx_ring fail.\n");
+ return 0;
+ }
+
+ for (i = 0; i < MTK_DMA_SIZE; i++)
+ tx_ring[i] = ring->dma[i];
+
+ seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
+ seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
+ seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ dma_addr_t tmp = ring->phys + i * sizeof(*tx_ring);
+
+ seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
+ *(int *)&tx_ring[i].txd1, *(int *)&tx_ring[i].txd2,
+ *(int *)&tx_ring[i].txd3, *(int *)&tx_ring[i].txd4);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ seq_printf(seq, " %08x %08x %08x %08x",
+ *(int *)&tx_ring[i].txd5, *(int *)&tx_ring[i].txd6,
+ *(int *)&tx_ring[i].txd7, *(int *)&tx_ring[i].txd8);
+#endif
+ seq_printf(seq, "\n");
+ }
+
+ kfree(tx_ring);
+ return 0;
+}
+
+static int tx_ring_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tx_ring_read, NULL);
+}
+
+static const struct file_operations tx_ring_fops = {
+ .owner = THIS_MODULE,
+ .open = tx_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+int rx_ring_read(struct seq_file *seq, void *v)
+{
+ struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
+ struct mtk_rx_dma *rx_ring;
+
+ int i = 0;
+
+ rx_ring =
+ kmalloc(sizeof(struct mtk_rx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
+ if (!rx_ring) {
+ seq_puts(seq, " allocate temp rx_ring fail.\n");
+ return 0;
+ }
+
+ for (i = 0; i < MTK_DMA_SIZE; i++)
+ rx_ring[i] = ring->dma[i];
+
+ seq_printf(seq, "next to read: %d\n",
+ NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ seq_printf(seq, "%d: %08x %08x %08x %08x", i,
+ *(int *)&rx_ring[i].rxd1, *(int *)&rx_ring[i].rxd2,
+ *(int *)&rx_ring[i].rxd3, *(int *)&rx_ring[i].rxd4);
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ seq_printf(seq, " %08x %08x %08x %08x",
+ *(int *)&rx_ring[i].rxd5, *(int *)&rx_ring[i].rxd6,
+ *(int *)&rx_ring[i].rxd7, *(int *)&rx_ring[i].rxd8);
+#endif
+ seq_printf(seq, "\n");
+ }
+
+ kfree(rx_ring);
+ return 0;
+}
+
+static int rx_ring_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rx_ring_read, NULL);
+}
+
+static const struct file_operations rx_ring_fops = {
+ .owner = THIS_MODULE,
+ .open = rx_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+int dbg_regs_read(struct seq_file *seq, void *v)
+{
+ struct mtk_eth *eth = g_eth;
+
+ seq_puts(seq, " <<PSE DEBUG REG DUMP>>\n");
+ seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
+ mtk_r32(eth, MTK_PSE_FQFC_CFG));
+ seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_IQ_STA(0)));
+ seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_IQ_STA(1)));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_IQ_STA(2)));
+ seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_IQ_STA(3)));
+ }
+
+ seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(0)));
+ seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(1)));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(2)));
+ seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(3)));
+ }
+
+ seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
+ mtk_r32(eth, MTK_QDMA_FQ_CNT));
+ seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
+ mtk_r32(eth, MTK_FE_PSE_FREE));
+ seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
+ mtk_r32(eth, MTK_FE_DROP_FQ));
+ seq_printf(seq, "| FE_DROP_FC : %08x |\n",
+ mtk_r32(eth, MTK_FE_DROP_FC));
+ seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
+ mtk_r32(eth, MTK_FE_DROP_PPE));
+ seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
+ mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
+ seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
+ mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
+ seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
+ mtk_r32(eth, MTK_MAC_MCR(0)));
+ seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
+ mtk_r32(eth, MTK_MAC_MCR(1)));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
+ mtk_r32(eth, MTK_FE_CDM1_FSM));
+ seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
+ mtk_r32(eth, MTK_FE_CDM2_FSM));
+ seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
+ mtk_r32(eth, MTK_FE_GDM1_FSM));
+ seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
+ mtk_r32(eth, MTK_FE_GDM2_FSM));
+ }
+
+ return 0;
+}
+
+static int dbg_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_regs_read, 0);
+}
+
+static const struct file_operations dbg_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = dbg_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+#define PROCREG_ESW_CNT "esw_cnt"
+#define PROCREG_TXRING "tx_ring"
+#define PROCREG_RXRING "rx_ring"
+#define PROCREG_DIR "mtketh"
+#define PROCREG_DBG_REGS "dbg_regs"
+
+struct proc_dir_entry *proc_reg_dir;
+static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs;
+
+int debug_proc_init(struct mtk_eth *eth)
+{
+ g_eth = eth;
+
+ if (!proc_reg_dir)
+ proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
+
+ proc_tx_ring =
+ proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
+ if (!proc_tx_ring)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
+
+ proc_rx_ring =
+ proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
+ if (!proc_rx_ring)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
+
+ proc_esw_cnt =
+ proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
+ if (!proc_esw_cnt)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
+
+ proc_dbg_regs =
+ proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
+ if (!proc_dbg_regs)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
+
+ return 0;
+}
+
+void debug_proc_exit(void)
+{
+ if (proc_tx_ring)
+ remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
+ if (proc_rx_ring)
+ remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
+
+ if (proc_esw_cnt)
+ remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
+
+ if (proc_reg_dir)
+ remove_proc_entry(PROCREG_DIR, 0);
+
+ if (proc_dbg_regs)
+ remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h
new file mode 100755
index 0000000..c7924f4
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#ifndef MTK_ETH_DBG_H
+#define MTK_ETH_DBG_H
+
+/* Debug Purpose Register */
+#define MTK_PSE_FQFC_CFG 0x100
+#define MTK_FE_CDM1_FSM 0x220
+#define MTK_FE_CDM2_FSM 0x224
+#define MTK_FE_GDM1_FSM 0x228
+#define MTK_FE_GDM2_FSM 0x22C
+#define MTK_FE_PSE_FREE 0x240
+#define MTK_FE_DROP_FQ 0x244
+#define MTK_FE_DROP_FC 0x248
+#define MTK_FE_DROP_PPE 0x24C
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define MTK_PSE_IQ_STA(x) (0x180 + (x) * 0x4)
+#define MTK_PSE_OQ_STA(x) (0x1A0 + (x) * 0x4)
+#else
+#define MTK_PSE_IQ_STA(x) (0x110 + (x) * 0x4)
+#define MTK_PSE_OQ_STA(x) (0x118 + (x) * 0x4)
+#endif
+
+#define MTKETH_MII_READ 0x89F3
+#define MTKETH_MII_WRITE 0x89F4
+#define MTKETH_ESW_REG_READ 0x89F1
+#define MTKETH_ESW_REG_WRITE 0x89F2
+#define MTKETH_MII_READ_CL45 0x89FC
+#define MTKETH_MII_WRITE_CL45 0x89FD
+#define REG_ESW_MAX 0xFC
+
+struct mtk_esw_reg {
+ unsigned int off;
+ unsigned int val;
+};
+
+struct mtk_mii_ioctl_data {
+ unsigned int phy_id;
+ unsigned int reg_num;
+ unsigned int val_in;
+ unsigned int val_out;
+ unsigned int port_num;
+ unsigned int dev_addr;
+ unsigned int reg_addr;
+};
+
+#if defined(CONFIG_NET_DSA_MT7530) || defined(CONFIG_MT753X_GSW)
+static inline bool mt7530_exist(struct mtk_eth *eth)
+{
+ return true;
+}
+#else
+static inline bool mt7530_exist(struct mtk_eth *eth)
+{
+ return false;
+}
+#endif
+
+extern u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg);
+extern u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
+ u32 phy_register, u32 write_data);
+
+extern u32 mtk_cl45_ind_read(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data);
+extern u32 mtk_cl45_ind_write(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data);
+
+int debug_proc_init(struct mtk_eth *eth);
+void debug_proc_exit(void);
+
+int mtketh_debugfs_init(struct mtk_eth *eth);
+void mtketh_debugfs_exit(struct mtk_eth *eth);
+int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+#endif /* MTK_ETH_DBG_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c
new file mode 100755
index 0000000..ef11cf3
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for configuring path from GMAC/GDM to target PHY
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+struct mtk_eth_muxc {
+ const char *name;
+ int cap_bit;
+ int (*set_path)(struct mtk_eth *eth, int path);
+};
+
+static const char *mtk_eth_path_name(int path)
+{
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_RGMII:
+ return "gmac1_rgmii";
+ case MTK_ETH_PATH_GMAC1_TRGMII:
+ return "gmac1_trgmii";
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ return "gmac1_sgmii";
+ case MTK_ETH_PATH_GMAC2_RGMII:
+ return "gmac2_rgmii";
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ return "gmac2_sgmii";
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ return "gmac2_gephy";
+ case MTK_ETH_PATH_GDM1_ESW:
+ return "gdm1_esw";
+ default:
+ return "unknown path";
+ }
+}
+
+static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
+{
+ bool updated = true;
+ u32 val, mask, set;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = 0;
+ break;
+ case MTK_ETH_PATH_GDM1_ESW:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = MTK_MUX_TO_ESW;
+ break;
+ default:
+ updated = false;
+ break;
+ };
+
+ if (updated) {
+ val = mtk_r32(eth, MTK_MAC_MISC);
+ val = (val & mask) | set;
+ mtk_w32(eth, val, MTK_MAC_MISC);
+ }
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val = ~(u32)GEPHY_MAC_SEL;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val = CO_QPHY_SEL;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val = SYSCFG0_SGMII_GMAC1;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val = SYSCFG0_SGMII_GMAC2;
+ break;
+ case MTK_ETH_PATH_GMAC1_RGMII:
+ case MTK_ETH_PATH_GMAC2_RGMII:
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= SYSCFG0_SGMII_MASK;
+
+ if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
+ (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
+ val = 0;
+ else
+ updated = false;
+ break;
+ default:
+ updated = false;
+ break;
+ };
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val |= SYSCFG0_SGMII_GMAC1_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val |= SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ default:
+ updated = false;
+ };
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static const struct mtk_eth_muxc mtk_eth_muxc[] = {
+ {
+ .name = "mux_gdm1_to_gmac1_esw",
+ .cap_bit = MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
+ .set_path = set_mux_gdm1_to_gmac1_esw,
+ }, {
+ .name = "mux_gmac2_gmac0_to_gephy",
+ .cap_bit = MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
+ .set_path = set_mux_gmac2_gmac0_to_gephy,
+ }, {
+ .name = "mux_u3_gmac2_to_qphy",
+ .cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
+ .set_path = set_mux_u3_gmac2_to_qphy,
+ }, {
+ .name = "mux_gmac1_gmac2_to_sgmii_rgmii",
+ .cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
+ .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
+ }, {
+ .name = "mux_gmac12_to_gephy_sgmii",
+ .cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
+ .set_path = set_mux_gmac12_to_gephy_sgmii,
+ },
+};
+
+static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
+{
+ int i, err = 0;
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, path)) {
+ dev_err(eth->dev, "path %s isn't support on the SoC\n",
+ mtk_eth_path_name(path));
+ return -EINVAL;
+ }
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
+ return 0;
+
+ /* Setup MUX in path fabric */
+ for (i = 0; i < ARRAY_SIZE(mtk_eth_muxc); i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, mtk_eth_muxc[i].cap_bit)) {
+ err = mtk_eth_muxc[i].set_path(eth, path);
+ if (err)
+ goto out;
+ } else {
+ dev_dbg(eth->dev, "mux %s isn't present on the SoC\n",
+ mtk_eth_muxc[i].name);
+ }
+ }
+
+out:
+ return err;
+}
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
+ MTK_ETH_PATH_GMAC2_SGMII;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path = 0;
+
+ if (mac_id == 1)
+ path = MTK_ETH_PATH_GMAC2_GEPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
+ MTK_ETH_PATH_GMAC2_RGMII;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
new file mode 100755
index 0000000..5aa0bc0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -0,0 +1,3465 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/if_vlan.h>
+#include <linux/reset.h>
+#include <linux/tcp.h>
+#include <linux/interrupt.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/phylink.h>
+#include <net/dsa.h>
+
+#include "mtk_eth_soc.h"
+#include "mtk_eth_dbg.h"
+
+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+#include "mtk_hnat/nf_hnat_mtk.h"
+#endif
+
+static int mtk_msg_level = -1;
+module_param_named(msg_level, mtk_msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
+
+#define MTK_ETHTOOL_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+
+/* strings used by ethtool */
+static const struct mtk_ethtool_stats {
+ char str[ETH_GSTRING_LEN];
+ u32 offset;
+} mtk_ethtool_stats[] = {
+ MTK_ETHTOOL_STAT(tx_bytes),
+ MTK_ETHTOOL_STAT(tx_packets),
+ MTK_ETHTOOL_STAT(tx_skip),
+ MTK_ETHTOOL_STAT(tx_collisions),
+ MTK_ETHTOOL_STAT(rx_bytes),
+ MTK_ETHTOOL_STAT(rx_packets),
+ MTK_ETHTOOL_STAT(rx_overflow),
+ MTK_ETHTOOL_STAT(rx_fcs_errors),
+ MTK_ETHTOOL_STAT(rx_short_errors),
+ MTK_ETHTOOL_STAT(rx_long_errors),
+ MTK_ETHTOOL_STAT(rx_checksum_errors),
+ MTK_ETHTOOL_STAT(rx_flow_control_packets),
+};
+
+static const char * const mtk_clks_source_name[] = {
+ "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
+ "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
+ "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
+ "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
+};
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+{
+ __raw_writel(val, eth->base + reg);
+}
+
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
+{
+ return __raw_readl(eth->base + reg);
+}
+
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+{
+ u32 val;
+
+ val = mtk_r32(eth, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(eth, val, reg);
+ return reg;
+}
+
+static int mtk_mdio_busy_wait(struct mtk_eth *eth)
+{
+ unsigned long t_start = jiffies;
+
+ while (1) {
+ if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
+ return 0;
+ if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
+ break;
+ usleep_range(10, 20);
+ }
+
+ dev_err(eth->dev, "mdio: MDIO timeout\n");
+ return -1;
+}
+
+u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
+ u32 phy_register, u32 write_data)
+{
+ if (mtk_mdio_busy_wait(eth))
+ return -1;
+
+ write_data &= 0xffff;
+
+ mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
+ (phy_register << PHY_IAC_REG_SHIFT) |
+ (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
+ MTK_PHY_IAC);
+
+ if (mtk_mdio_busy_wait(eth))
+ return -1;
+
+ return 0;
+}
+
+u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+{
+ u32 d;
+
+ if (mtk_mdio_busy_wait(eth))
+ return 0xffff;
+
+ mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
+ (phy_reg << PHY_IAC_REG_SHIFT) |
+ (phy_addr << PHY_IAC_ADDR_SHIFT),
+ MTK_PHY_IAC);
+
+ if (mtk_mdio_busy_wait(eth))
+ return 0xffff;
+
+ d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
+
+ return d;
+}
+
+static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
+ int phy_reg, u16 val)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
+}
+
+static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read(eth, phy_addr, phy_reg);
+}
+
+u32 mtk_cl45_ind_read(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data)
+{
+ mutex_lock(ð->mii_bus->mdio_lock);
+
+ _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
+ _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
+ _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
+ *data = _mtk_mdio_read(eth, port, MII_MMD_ADDR_DATA_REG);
+
+ mutex_unlock(ð->mii_bus->mdio_lock);
+
+ return 0;
+}
+
+u32 mtk_cl45_ind_write(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data)
+{
+ mutex_lock(ð->mii_bus->mdio_lock);
+
+ _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
+ _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
+ _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
+ _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, data);
+
+ mutex_unlock(ð->mii_bus->mdio_lock);
+
+ return 0;
+}
+
+static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ u32 val;
+
+ /* Check DDR memory type.
+ * Currently TRGMII mode with DDR2 memory is not supported.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
+ if (interface == PHY_INTERFACE_MODE_TRGMII &&
+ val & SYSCFG_DRAM_TYPE_DDR2) {
+ dev_err(eth->dev,
+ "TRGMII mode with DDR2 memory is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
+ ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_MT7621_MASK, val);
+
+ return 0;
+}
+
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface, int speed)
+{
+ u32 val;
+ int ret;
+
+ if (interface == PHY_INTERFACE_MODE_TRGMII) {
+ mtk_w32(eth, TRGMII_MODE, INTF_MODE);
+ val = 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+ return;
+ }
+
+ val = (speed == SPEED_1000) ?
+ INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
+ mtk_w32(eth, val, INTF_MODE);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_CLK_SEL362_5,
+ ETHSYS_TRGMII_CLK_SEL362_5);
+
+ val = (speed == SPEED_1000) ? 250000000 : 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+
+ val = (speed == SPEED_1000) ?
+ RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_RCK_CTRL);
+
+ val = (speed == SPEED_1000) ?
+ TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_TCK_CTRL);
+}
+
+static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new, sid, i;
+ int val, ge_mode, err;
+
+ /* MT76x8 has no hardware settings between for the MAC */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ mac->interface != state->interface) {
+ /* Setup soc pin functions */
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ if (mac->id)
+ goto err_phy;
+ if (!MTK_HAS_CAPS(mac->hw->soc->caps,
+ MTK_GMAC1_TRGMII))
+ goto err_phy;
+ /* fall through */
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_RMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
+ err = mtk_gmac_rgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ err = mtk_gmac_sgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
+ err = mtk_gmac_gephy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ default:
+ goto err_phy;
+ }
+
+ /* Setup clock for 1st gmac */
+ if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps,
+ MTK_TRGMII_MT7621_CLK)) {
+ if (mt7621_gmac0_rgmii_adjust(mac->hw,
+ state->interface))
+ goto err_phy;
+ } else {
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->interface,
+ state->speed);
+
+ /* mt7623_pad_clk_setup */
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ mtk_w32(mac->hw,
+ TD_DM_DRVP(8) | TD_DM_DRVN(8),
+ TRGMII_TD_ODT(i));
+
+ /* Assert/release MT7623 RXC reset */
+ mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
+ TRGMII_RCK_CTRL);
+ mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
+ }
+ }
+
+ ge_mode = 0;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ ge_mode = 1;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ ge_mode = 2;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (mac->id)
+ goto err_phy;
+ ge_mode = 3;
+ break;
+ default:
+ break;
+ }
+
+ /* put the gmac into the right mode */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
+ val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+
+ mac->interface = state->interface;
+ }
+
+ /* SGMII */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface)) {
+ /* The path GMAC to SGMII will be enabled once the SGMIISYS is
+ * being setup done.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK,
+ ~(u32)SYSCFG0_SGMII_MASK);
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac->id;
+
+ /* Setup SGMIISYS with the determined property */
+ if (state->interface != PHY_INTERFACE_MODE_SGMII)
+ err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
+ state);
+ else if (phylink_autoneg_inband(mode))
+ err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
+
+ if (err)
+ goto init_err;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+ } else if (phylink_autoneg_inband(mode)) {
+ dev_err(eth->dev,
+ "In-band mode not supported in non SGMII mode!\n");
+ return;
+ }
+
+ /* Setup gmac */
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
+ MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
+ MAC_MCR_FORCE_RX_FC);
+ mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+
+ switch (state->speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ mcr_new |= MAC_MCR_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr_new |= MAC_MCR_SPEED_100;
+ break;
+ }
+ if (state->duplex == DUPLEX_FULL) {
+ mcr_new |= MAC_MCR_FORCE_DPX;
+ if (state->pause & MLO_PAUSE_TX)
+ mcr_new |= MAC_MCR_FORCE_TX_FC;
+ if (state->pause & MLO_PAUSE_RX)
+ mcr_new |= MAC_MCR_FORCE_RX_FC;
+ }
+
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+
+ return;
+
+err_phy:
+ dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
+ mac->id, phy_modes(state->interface));
+ return;
+
+init_err:
+ dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
+ mac->id, phy_modes(state->interface), err);
+}
+
+static int mtk_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
+
+ state->link = (pmsr & MAC_MSR_LINK);
+ state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
+
+ switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case MAC_MSR_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MAC_MSR_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
+ if (pmsr & MAC_MSR_RX_FC)
+ state->pause |= MLO_PAUSE_RX;
+ if (pmsr & MAC_MSR_TX_FC)
+ state->pause |= MLO_PAUSE_TX;
+
+ return 1;
+}
+
+static void mtk_mac_an_restart(struct phylink_config *config)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+
+ mtk_sgmii_restart_an(mac->hw, mac->id);
+}
+
+static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
+
+static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phy)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
+
+static void mtk_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
+ phy_interface_mode_is_rgmii(state->interface)) &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
+ !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
+ (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface)))) {
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 1000baseT_Half);
+ /* fall through */
+ case PHY_INTERFACE_MODE_SGMII:
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ /* fall through */
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_NA:
+ default:
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ break;
+ }
+
+ if (state->interface == PHY_INTERFACE_MODE_NA) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+ }
+
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
+}
+
+static const struct phylink_mac_ops mtk_phylink_ops = {
+ .validate = mtk_validate,
+ .mac_link_state = mtk_mac_link_state,
+ .mac_an_restart = mtk_mac_an_restart,
+ .mac_config = mtk_mac_config,
+ .mac_link_down = mtk_mac_link_down,
+ .mac_link_up = mtk_mac_link_up,
+};
+
+static int mtk_mdio_init(struct mtk_eth *eth)
+{
+ struct device_node *mii_np;
+ int ret;
+
+ mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ if (!mii_np) {
+ dev_err(eth->dev, "no %s child node found", "mdio-bus");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(mii_np)) {
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ eth->mii_bus = devm_mdiobus_alloc(eth->dev);
+ if (!eth->mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ eth->mii_bus->name = "mdio";
+ eth->mii_bus->read = mtk_mdio_read;
+ eth->mii_bus->write = mtk_mdio_write;
+ eth->mii_bus->priv = eth;
+ eth->mii_bus->parent = eth->dev;
+
+ snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
+ ret = of_mdiobus_register(eth->mii_bus, mii_np);
+
+err_put_node:
+ of_node_put(mii_np);
+ return ret;
+}
+
+static void mtk_mdio_cleanup(struct mtk_eth *eth)
+{
+ if (!eth->mii_bus)
+ return;
+
+ mdiobus_unregister(eth->mii_bus);
+}
+
+static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->tx_irq_lock, flags);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
+ spin_unlock_irqrestore(ð->tx_irq_lock, flags);
+}
+
+static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->tx_irq_lock, flags);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
+ spin_unlock_irqrestore(ð->tx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(ð->rx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(ð->rx_irq_lock, flags);
+}
+
+static int mtk_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret = eth_mac_addr(dev, p);
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ const char *macaddr = dev->dev_addr;
+
+ if (ret)
+ return ret;
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ spin_lock_bh(&mac->hw->page_lock);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MT7628_SDM_MAC_ADRH);
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MT7628_SDM_MAC_ADRL);
+ } else {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MTK_GDMA_MAC_ADRH(mac->id));
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MTK_GDMA_MAC_ADRL(mac->id));
+ }
+ spin_unlock_bh(&mac->hw->page_lock);
+
+ return 0;
+}
+
+void mtk_stats_update_mac(struct mtk_mac *mac)
+{
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ unsigned int base = MTK_GDM1_TX_GBCNT;
+ u64 stats;
+
+ base += hw_stats->reg_offset;
+
+ u64_stats_update_begin(&hw_stats->syncp);
+
+ hw_stats->rx_bytes += mtk_r32(mac->hw, base);
+ stats = mtk_r32(mac->hw, base + 0x04);
+ if (stats)
+ hw_stats->rx_bytes += (stats << 32);
+ hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
+ hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
+ hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
+ hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
+ hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
+ hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
+ hw_stats->rx_flow_control_packets +=
+ mtk_r32(mac->hw, base + 0x24);
+ hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
+ hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
+ hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
+ stats = mtk_r32(mac->hw, base + 0x34);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+ u64_stats_update_end(&hw_stats->syncp);
+}
+
+static void mtk_stats_update(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->mac[i] || !eth->mac[i]->hw_stats)
+ continue;
+ if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
+ mtk_stats_update_mac(eth->mac[i]);
+ spin_unlock(ð->mac[i]->hw_stats->stats_lock);
+ }
+ }
+}
+
+static void mtk_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ unsigned int start;
+
+ if (netif_running(dev) && netif_device_present(dev)) {
+ if (spin_trylock_bh(&hw_stats->stats_lock)) {
+ mtk_stats_update_mac(mac);
+ spin_unlock_bh(&hw_stats->stats_lock);
+ }
+ }
+
+ do {
+ start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
+ storage->rx_packets = hw_stats->rx_packets;
+ storage->tx_packets = hw_stats->tx_packets;
+ storage->rx_bytes = hw_stats->rx_bytes;
+ storage->tx_bytes = hw_stats->tx_bytes;
+ storage->collisions = hw_stats->tx_collisions;
+ storage->rx_length_errors = hw_stats->rx_short_errors +
+ hw_stats->rx_long_errors;
+ storage->rx_over_errors = hw_stats->rx_overflow;
+ storage->rx_crc_errors = hw_stats->rx_fcs_errors;
+ storage->rx_errors = hw_stats->rx_checksum_errors;
+ storage->tx_aborted_errors = hw_stats->tx_skip;
+ } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
+
+ storage->tx_errors = dev->stats.tx_errors;
+ storage->rx_dropped = dev->stats.rx_dropped;
+ storage->tx_dropped = dev->stats.tx_dropped;
+}
+
+static inline int mtk_max_frag_size(int mtu)
+{
+ /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
+ if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
+ mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
+ return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+static inline int mtk_max_buf_size(int frag_size)
+{
+ int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
+
+ return buf_size;
+}
+
+static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
+ struct mtk_rx_dma *dma_rxd)
+{
+ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
+ rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+#endif
+}
+
+/* the qdma core needs scratch memory to be setup */
+static int mtk_init_fq_dma(struct mtk_eth *eth)
+{
+ dma_addr_t phy_ring_tail;
+ int cnt = MTK_DMA_SIZE;
+ dma_addr_t dma_addr;
+ int i;
+
+ if (!eth->soc->has_sram) {
+ eth->scratch_ring = dma_alloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ ð->phy_scratch_ring,
+ GFP_ATOMIC);
+ } else {
+ eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
+ }
+
+ if (unlikely(!eth->scratch_ring))
+ return -ENOMEM;
+
+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
+ GFP_KERNEL);
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dev,
+ eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ return -ENOMEM;
+
+ phy_ring_tail = eth->phy_scratch_ring +
+ (sizeof(struct mtk_tx_dma) * (cnt - 1));
+
+ for (i = 0; i < cnt; i++) {
+ eth->scratch_ring[i].txd1 =
+ (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
+ if (i < cnt - 1)
+ eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
+ ((i + 1) * sizeof(struct mtk_tx_dma)));
+ eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
+
+ eth->scratch_ring[i].txd4 = 0;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ if (eth->soc->has_sram && ((sizeof(struct mtk_tx_dma)) > 16)) {
+ eth->scratch_ring[i].txd5 = 0;
+ eth->scratch_ring[i].txd6 = 0;
+ eth->scratch_ring[i].txd7 = 0;
+ eth->scratch_ring[i].txd8 = 0;
+ }
+#endif
+ }
+
+ mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
+ mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
+ mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
+
+ return 0;
+}
+
+static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
+{
+ void *ret = ring->dma;
+
+ return ret + (desc - ring->phys);
+}
+
+static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+ struct mtk_tx_dma *txd)
+{
+ int idx = txd - ring->dma;
+
+ return &ring->buf[idx];
+}
+
+static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
+ struct mtk_tx_dma *dma)
+{
+ return ring->dma_pdma - ring->dma + dma;
+}
+
+static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+{
+ return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+}
+
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+ dma_unmap_single(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+ } else {
+ if (dma_unmap_len(tx_buf, dma_len0)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_unmap_len(tx_buf, dma_len1)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr1),
+ dma_unmap_len(tx_buf, dma_len1),
+ DMA_TO_DEVICE);
+ }
+ }
+
+ tx_buf->flags = 0;
+ if (tx_buf->skb &&
+ (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+}
+
+static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+ struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
+ size_t size, int idx)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ } else {
+ if (idx & 1) {
+ txd->txd3 = mapped_addr;
+ txd->txd2 |= TX_DMA_PLEN1(size);
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, size);
+ } else {
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ txd->txd1 = mapped_addr;
+ txd->txd2 = TX_DMA_PLEN0(size);
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ }
+ }
+}
+
+static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+ int tx_num, struct mtk_tx_ring *ring, bool gso)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_dma *itxd, *txd;
+ struct mtk_tx_dma *itxd_pdma, *txd_pdma;
+ struct mtk_tx_buf *itx_buf, *tx_buf;
+ dma_addr_t mapped_addr;
+ unsigned int nr_frags;
+ int i, n_desc = 1;
+ u32 txd4 = 0, fport;
+ u32 qid = 0;
+ int k = 0;
+
+ itxd = ring->next_free;
+ itxd_pdma = qdma_to_pdma(ring, itxd);
+ if (itxd == ring->last_free)
+ return -ENOMEM;
+
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ memset(itx_buf, 0, sizeof(*itx_buf));
+
+ mapped_addr = dma_map_single(eth->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ return -ENOMEM;
+
+ WRITE_ONCE(itxd->txd1, mapped_addr);
+ itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+ MTK_TX_FLAGS_FPORT1;
+ setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+ k++);
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ qid = skb->mark & (MTK_QDMA_TX_MASK);
+#endif
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ u32 txd5 = 0, txd6 = 0;
+ /* set the forward port */
+ fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2;
+ txd4 |= fport;
+
+ if (gso)
+ txd5 |= TX_DMA_TSO_V2;
+
+ /* TX Checksum offload */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ txd5 |= TX_DMA_CHKSUM_V2;
+
+ /* VLAN header offload */
+ if (skb_vlan_tag_present(skb))
+ txd6 |= TX_DMA_INS_VLAN_V2 | skb_vlan_tag_get(skb);
+
+ txd4 = txd4 | TX_DMA_SWC_V2;
+
+ WRITE_ONCE(itxd->txd3, (TX_DMA_PLEN0(skb_headlen(skb)) |
+ (!nr_frags * TX_DMA_LS0)));
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ WRITE_ONCE(itxd->txd5, txd5);
+ WRITE_ONCE(itxd->txd6, txd6);
+#endif
+ } else {
+ /* set the forward port */
+ fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ txd4 |= fport;
+
+ if (gso)
+ txd4 |= TX_DMA_TSO;
+
+ /* TX Checksum offload */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ txd4 |= TX_DMA_CHKSUM;
+
+ /* VLAN header offload */
+ if (skb_vlan_tag_present(skb))
+ txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
+
+ WRITE_ONCE(itxd->txd3,
+ TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
+ (!nr_frags * TX_DMA_LS0) | QID_LOW_BITS(qid));
+ }
+ /* TX SG offload */
+ txd = itxd;
+ txd_pdma = qdma_to_pdma(ring, txd);
+
+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+ if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ txd4 &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
+ txd4 |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
+ } else {
+ txd4 &= ~(0x7 << TX_DMA_FPORT_SHIFT);
+ txd4 |= 0x4 << TX_DMA_FPORT_SHIFT;
+ }
+ }
+
+ trace_printk("[%s] nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
+ __func__, nr_frags, HNAT_SKB_CB2(skb)->magic, txd4);
+#endif
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ unsigned int offset = 0;
+ int frag_size = skb_frag_size(frag);
+
+ while (frag_size) {
+ bool last_frag = false;
+ unsigned int frag_map_size;
+ bool new_desc = true;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+ (i & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto err_dma;
+
+ n_desc++;
+ } else {
+ new_desc = false;
+ }
+
+
+ frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
+ mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
+ frag_map_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ goto err_dma;
+
+ if (i == nr_frags - 1 &&
+ (frag_size - frag_map_size) == 0)
+ last_frag = true;
+
+ WRITE_ONCE(txd->txd1, mapped_addr);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ WRITE_ONCE(txd->txd3, (TX_DMA_PLEN0(frag_map_size) |
+ last_frag * TX_DMA_LS0));
+ WRITE_ONCE(txd->txd4, fport | TX_DMA_SWC_V2 |
+ QID_BITS_V2(qid));
+ } else {
+ WRITE_ONCE(txd->txd3,
+ (TX_DMA_SWC | QID_LOW_BITS(qid) |
+ TX_DMA_PLEN0(frag_map_size) |
+ last_frag * TX_DMA_LS0));
+ WRITE_ONCE(txd->txd4,
+ fport | QID_HIGH_BITS(qid));
+ }
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+ tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+ MTK_TX_FLAGS_FPORT1;
+
+ setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
+ frag_map_size, k++);
+
+ frag_size -= frag_map_size;
+ offset += frag_map_size;
+ }
+ }
+
+ /* store skb to cleanup */
+ itx_buf->skb = skb;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2))
+ WRITE_ONCE(itxd->txd4, txd4 | QID_BITS_V2(qid));
+ else
+ WRITE_ONCE(itxd->txd4, txd4 | QID_HIGH_BITS(qid));
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (k & 0x1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ netdev_sent_queue(dev, skb->len);
+ skb_tx_timestamp(skb);
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+ !netdev_xmit_more())
+ mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ } else {
+ int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
+ ring->dma_size);
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ }
+
+ return 0;
+
+err_dma:
+ do {
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+
+ /* unmap dma */
+ mtk_tx_unmap(eth, tx_buf);
+
+ itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
+ itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
+ itxd_pdma = qdma_to_pdma(ring, itxd);
+ } while (itxd != txd);
+
+ return -ENOMEM;
+}
+
+static inline int mtk_cal_txd_req(struct sk_buff *skb)
+{
+ int i, nfrags;
+ skb_frag_t *frag;
+
+ nfrags = 1;
+ if (skb_is_gso(skb)) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+ MTK_TX_DMA_BUF_LEN);
+ }
+ } else {
+ nfrags += skb_shinfo(skb)->nr_frags;
+ }
+
+ return nfrags;
+}
+
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ if (netif_queue_stopped(eth->netdev[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_wake_queue(eth->netdev[i]);
+ }
+}
+
+static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ struct net_device_stats *stats = &dev->stats;
+ bool gso = false;
+ int tx_num;
+
+ /* normally we can rely on the stack not calling this more than once,
+ * however we have 2 queues running on the same ring so we need to lock
+ * the ring access
+ */
+ spin_lock(ð->page_lock);
+
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+ goto drop;
+
+ tx_num = mtk_cal_txd_req(skb);
+ if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
+ netif_stop_queue(dev);
+ netif_err(eth, tx_queued, dev,
+ "Tx Ring full when queue awake!\n");
+ spin_unlock(ð->page_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* TSO: fill MSS info in tcp checksum field */
+ if (skb_is_gso(skb)) {
+ if (skb_cow_head(skb, 0)) {
+ netif_warn(eth, tx_err, dev,
+ "GSO expand head fail.\n");
+ goto drop;
+ }
+
+ if (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ gso = true;
+ tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
+ }
+ }
+
+ if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
+ goto drop;
+
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
+ netif_stop_queue(dev);
+
+ spin_unlock(ð->page_lock);
+
+ return NETDEV_TX_OK;
+
+drop:
+ spin_unlock(ð->page_lock);
+ stats->tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
+{
+ int i;
+ struct mtk_rx_ring *ring;
+ int idx;
+
+ if (!eth->hwlro)
+ return ð->rx_ring[0];
+
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = ð->rx_ring[i];
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+ if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+ }
+ }
+
+ return NULL;
+}
+
+static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring;
+ int i;
+
+ if (!eth->hwlro) {
+ ring = ð->rx_ring[0];
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ } else {
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = ð->rx_ring[i];
+ if (ring->calc_idx_update) {
+ ring->calc_idx_update = false;
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ }
+ }
+ }
+}
+
+static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring;
+ int idx;
+ struct sk_buff *skb;
+ u8 *data, *new_data;
+ struct mtk_rx_dma *rxd, trxd;
+ int done = 0;
+
+ while (done < budget) {
+ struct net_device *netdev;
+ unsigned int pktlen;
+ dma_addr_t dma_addr;
+ int mac;
+
+ ring = mtk_get_rx_ring(eth);
+ if (unlikely(!ring))
+ goto rx_done;
+
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+ rxd = &ring->dma[idx];
+ data = ring->data[idx];
+
+ mtk_rx_get_desc(&trxd, rxd);
+ if (!(trxd.rxd2 & RX_DMA_DONE))
+ break;
+
+ /* find out which mac the packet come from. values start at 1 */
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mac = 0;
+ } else {
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+ mac = RX_DMA_GET_SPORT(trxd.rxd5) - 1;
+ else
+#endif
+ mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
+ 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
+ }
+
+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+ !eth->netdev[mac]))
+ goto release_desc;
+
+ netdev = eth->netdev[mac];
+
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+ goto release_desc;
+
+ /* alloc new buffer */
+ new_data = napi_alloc_frag(ring->frag_size);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+ dma_addr = dma_map_single(eth->dev,
+ new_data + NET_SKB_PAD +
+ eth->ip_align,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ /* receive data */
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ dma_unmap_single(eth->dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+ skb->dev = netdev;
+ skb_put(skb, pktlen);
+
+ if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) &&
+ (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
+ (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) &&
+ (trxd.rxd3 & eth->rx_dma_l4_valid)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
+ if (trxd.rxd4 & RX_DMA_VTAG_V2)
+ __vlan_hwaccel_put_tag(skb,
+ htons(RX_DMA_VPID_V2(trxd.rxd3,
+ trxd.rxd4)),
+ RX_DMA_VID_V2(trxd.rxd4));
+ } else {
+ if (trxd.rxd2 & RX_DMA_VTAG)
+ __vlan_hwaccel_put_tag(skb,
+ htons(RX_DMA_VPID(trxd.rxd3)),
+ RX_DMA_VID(trxd.rxd3));
+ }
+
+ /* If netdev is attached to dsa switch, the special
+ * tag inserted in VLAN field by switch hardware can
+ * be offload by RX HW VLAN offload. Clears the VLAN
+ * information from @skb to avoid unexpected 8021d
+ * handler before packet enter dsa framework.
+ */
+ if (netdev_uses_dsa(netdev))
+ __vlan_hwaccel_clear_tag(skb);
+ }
+
+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+ *(u32 *)(skb->head) = trxd.rxd5;
+ else
+#endif
+ *(u32 *)(skb->head) = trxd.rxd4;
+
+ skb_hnat_alg(skb) = 0;
+ skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+
+ if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
+ trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
+ __func__, skb_hnat_reason(skb));
+ skb->pkt_type = PACKET_HOST;
+ }
+
+ trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
+ __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
+ skb_hnat_reason(skb), skb_hnat_alg(skb));
+#endif
+
+ skb_record_rx_queue(skb, 0);
+ napi_gro_receive(napi, skb);
+
+ ring->data[idx] = new_data;
+ rxd->rxd1 = (unsigned int)dma_addr;
+
+release_desc:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ rxd->rxd2 = RX_DMA_LSO;
+ else
+ rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+
+ ring->calc_idx = idx;
+
+ done++;
+ }
+
+rx_done:
+ if (done) {
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+ mtk_update_rx_cpu_idx(eth);
+ }
+
+ return done;
+}
+
+static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ struct mtk_tx_dma *desc;
+ struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
+ u32 cpu, dma;
+
+ cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
+ dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+
+ desc = mtk_qdma_phys_to_virt(ring, cpu);
+
+ while ((cpu != dma) && budget) {
+ u32 next_cpu = desc->txd2;
+ int mac = 0;
+
+ if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
+ break;
+
+ desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
+
+ tx_buf = mtk_desc_to_tx_buf(ring, desc);
+ if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
+ mac = 1;
+
+ skb = tx_buf->skb;
+ if (!skb)
+ break;
+
+ if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ bytes[mac] += skb->len;
+ done[mac]++;
+ budget--;
+ }
+ mtk_tx_unmap(eth, tx_buf);
+
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = next_cpu;
+ }
+
+ mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+
+ return budget;
+}
+
+static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ struct mtk_tx_dma *desc;
+ struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
+ u32 cpu, dma;
+
+ cpu = ring->cpu_idx;
+ dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+
+ while ((cpu != dma) && budget) {
+ tx_buf = &ring->buf[cpu];
+ skb = tx_buf->skb;
+ if (!skb)
+ break;
+
+ if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ bytes[0] += skb->len;
+ done[0]++;
+ budget--;
+ }
+
+ mtk_tx_unmap(eth, tx_buf);
+
+ desc = &ring->dma[cpu];
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
+ }
+
+ ring->cpu_idx = cpu;
+
+ return budget;
+}
+
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ unsigned int done[MTK_MAX_DEVS];
+ unsigned int bytes[MTK_MAX_DEVS];
+ int total = 0, i;
+
+ memset(done, 0, sizeof(done));
+ memset(bytes, 0, sizeof(bytes));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
+ else
+ budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i] || !done[i])
+ continue;
+ netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
+ total += done[i];
+ }
+
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
+ mtk_wake_queue(eth);
+
+ return total;
+}
+
+static void mtk_handle_status_irq(struct mtk_eth *eth)
+{
+ u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
+
+ if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
+ mtk_stats_update(eth);
+ mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
+ MTK_INT_STATUS2);
+ }
+}
+
+static int mtk_napi_tx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ u32 status, mask;
+ int tx_done = 0;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
+ tx_done = mtk_poll_tx(eth, budget);
+
+ if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, eth->tx_int_status_reg);
+ mask = mtk_r32(eth, eth->tx_int_mask_reg);
+ dev_info(eth->dev,
+ "done tx %d, intr 0x%08x/0x%x\n",
+ tx_done, status, mask);
+ }
+
+ if (tx_done == budget)
+ return budget;
+
+ status = mtk_r32(eth, eth->tx_int_status_reg);
+ if (status & MTK_TX_DONE_INT)
+ return budget;
+
+ napi_complete(napi);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+
+ return tx_done;
+}
+
+static int mtk_napi_rx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ u32 status, mask;
+ int rx_done = 0;
+ int remain_budget = budget;
+
+ mtk_handle_status_irq(eth);
+
+poll_again:
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ rx_done = mtk_poll_rx(napi, remain_budget, eth);
+
+ if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+ mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ dev_info(eth->dev,
+ "done rx %d, intr 0x%08x/0x%x\n",
+ rx_done, status, mask);
+ }
+ if (rx_done == remain_budget)
+ return budget;
+
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+ if (status & MTK_RX_DONE_INT) {
+ remain_budget -= rx_done;
+ goto poll_again;
+ }
+ napi_complete(napi);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+
+ return rx_done + budget - remain_budget;
+}
+
+static int mtk_tx_alloc(struct mtk_eth *eth)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ int i, sz = sizeof(*ring->dma);
+
+ ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
+ GFP_KERNEL);
+ if (!ring->buf)
+ goto no_tx_mem;
+
+ if (!eth->soc->has_sram)
+ ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_ATOMIC);
+ else {
+ ring->dma = eth->scratch_ring + MTK_DMA_SIZE;
+ ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
+ }
+
+ if (!ring->dma)
+ goto no_tx_mem;
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ int next = (i + 1) % MTK_DMA_SIZE;
+ u32 next_ptr = ring->phys + next * sz;
+
+ ring->dma[i].txd2 = next_ptr;
+ ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ ring->dma[i].txd4 = 0;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ if (eth->soc->has_sram && ( sz > 16)) {
+ ring->dma[i].txd5 = 0;
+ ring->dma[i].txd6 = 0;
+ ring->dma[i].txd7 = 0;
+ ring->dma[i].txd8 = 0;
+ }
+#endif
+ }
+
+ /* On MT7688 (PDMA only) this driver uses the ring->dma structs
+ * only as the framework. The real HW descriptors are the PDMA
+ * descriptors in ring->dma_pdma.
+ */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys_pdma,
+ GFP_ATOMIC);
+ if (!ring->dma_pdma)
+ goto no_tx_mem;
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
+ ring->dma_pdma[i].txd4 = 0;
+ }
+ }
+
+ ring->dma_size = MTK_DMA_SIZE;
+ atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
+ ring->next_free = &ring->dma[0];
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->thresh = MAX_SKB_FRAGS;
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
+ mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_CRX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_DRX_PTR);
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
+ MTK_QTX_CFG(0));
+ } else {
+ mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
+ mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
+ mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+ }
+
+ return 0;
+
+no_tx_mem:
+ return -ENOMEM;
+}
+
+static void mtk_tx_clean(struct mtk_eth *eth)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ int i;
+
+ if (ring->buf) {
+ for (i = 0; i < MTK_DMA_SIZE; i++)
+ mtk_tx_unmap(eth, &ring->buf[i]);
+ kfree(ring->buf);
+ ring->buf = NULL;
+ }
+
+ if (!eth->soc->has_sram && ring->dma) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma),
+ ring->dma,
+ ring->phys);
+ ring->dma = NULL;
+ }
+
+ if (ring->dma_pdma) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
+ ring->dma_pdma,
+ ring->phys_pdma);
+ ring->dma_pdma = NULL;
+ }
+}
+
+static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+{
+ struct mtk_rx_ring *ring;
+ int rx_data_len, rx_dma_size;
+ int i;
+
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ if (ring_no)
+ return -EINVAL;
+ ring = ð->rx_ring_qdma;
+ } else {
+ ring = ð->rx_ring[ring_no];
+ }
+
+ if (rx_flag == MTK_RX_FLAGS_HWLRO) {
+ rx_data_len = MTK_MAX_LRO_RX_LENGTH;
+ rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ } else {
+ rx_data_len = ETH_DATA_LEN;
+ rx_dma_size = MTK_DMA_SIZE;
+ }
+
+ ring->frag_size = mtk_max_frag_size(rx_data_len);
+ ring->buf_size = mtk_max_buf_size(ring->frag_size);
+ ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
+ GFP_KERNEL);
+ if (!ring->data)
+ return -ENOMEM;
+
+ for (i = 0; i < rx_dma_size; i++) {
+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ if (!ring->data[i])
+ return -ENOMEM;
+ }
+
+ if ((!eth->soc->has_sram) || (eth->soc->has_sram
+ && (rx_flag != MTK_RX_FLAGS_NORMAL)))
+ ring->dma = dma_alloc_coherent(eth->dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
+ else {
+ struct mtk_tx_ring *tx_ring = ð->tx_ring;
+ ring->dma = (struct mtk_rx_dma *)(tx_ring->dma + MTK_DMA_SIZE);
+ ring->phys = tx_ring->phys + MTK_DMA_SIZE * sizeof(*tx_ring->dma);
+ }
+
+ if (!ring->dma)
+ return -ENOMEM;
+
+ for (i = 0; i < rx_dma_size; i++) {
+ dma_addr_t dma_addr = dma_map_single(eth->dev,
+ ring->data[i] + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ return -ENOMEM;
+ ring->dma[i].rxd1 = (unsigned int)dma_addr;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ ring->dma[i].rxd2 = RX_DMA_LSO;
+ else
+ ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+
+ ring->dma[i].rxd3 = 0;
+ ring->dma[i].rxd4 = 0;
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+ if (eth->soc->has_sram && ((sizeof(struct mtk_rx_dma)) > 16)) {
+ ring->dma[i].rxd5 = 0;
+ ring->dma[i].rxd6 = 0;
+ ring->dma[i].rxd7 = 0;
+ ring->dma[i].rxd8 = 0;
+ }
+#endif
+ }
+ ring->dma_size = rx_dma_size;
+ ring->calc_idx_update = false;
+ ring->calc_idx = rx_dma_size - 1;
+ ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
+ MTK_QRX_CRX_IDX_CFG(ring_no) :
+ MTK_PRX_CRX_IDX_CFG(ring_no);
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
+ mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
+ } else {
+ mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
+ mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
+ }
+
+ return 0;
+}
+
+static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
+{
+ int i;
+
+ if (ring->data && ring->dma) {
+ for (i = 0; i < ring->dma_size; i++) {
+ if (!ring->data[i])
+ continue;
+ if (!ring->dma[i].rxd1)
+ continue;
+ dma_unmap_single(eth->dev,
+ ring->dma[i].rxd1,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+ skb_free_frag(ring->data[i]);
+ }
+ kfree(ring->data);
+ ring->data = NULL;
+ }
+
+ if(in_sram)
+ return;
+
+ if (ring->dma) {
+ dma_free_coherent(eth->dev,
+ ring->dma_size * sizeof(*ring->dma),
+ ring->dma,
+ ring->phys);
+ ring->dma = NULL;
+ }
+}
+
+static int mtk_hwlro_rx_init(struct mtk_eth *eth)
+{
+ int i;
+ u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
+ u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
+
+ /* set LRO rings to auto-learn modes */
+ ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
+
+ /* validate LRO ring */
+ ring_ctrl_dw2 |= MTK_RING_VLD;
+
+ /* set AGE timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
+ ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
+
+ /* set max AGG timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
+
+ /* set max LRO AGG count */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
+ ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
+
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
+ }
+
+ /* IPv4 checksum update enable */
+ lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
+
+ /* switch priority comparison to packet count mode */
+ lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
+
+ /* bandwidth threshold setting */
+ mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
+
+ /* auto-learn score delta setting */
+ mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
+
+ /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
+ mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
+ MTK_PDMA_LRO_ALT_REFRESH_TIMER);
+
+ /* set HW LRO mode & the max aggregation count for rx packets */
+ lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
+
+ /* the minimal remaining room of SDL0 in RXD for lro aggregation */
+ lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
+
+ /* enable HW LRO */
+ lro_ctrl_dw0 |= MTK_LRO_EN;
+
+ mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
+ mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
+
+ return 0;
+}
+
+static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
+{
+ int i;
+ u32 val;
+
+ /* relinquish lro rings, flush aggregated packets */
+ mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
+
+ /* wait for relinquishments done */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
+ if (val & MTK_LRO_RING_RELINQUISH_DONE) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+
+ /* invalidate lro rings */
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
+
+ /* disable HW LRO */
+ mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
+}
+
+static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
+
+ /* validate the IP setting */
+ mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+}
+
+static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
+}
+
+static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i])
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int mtk_hwlro_add_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if ((fsp->flow_type != TCP_V4_FLOW) ||
+ (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
+ (fsp->location > 1))
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
+
+ return 0;
+}
+
+static int mtk_hwlro_del_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if (fsp->location > 1)
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+
+ return 0;
+}
+
+static void mtk_hwlro_netdev_disable(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int i, hwlro_idx;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ mac->hwlro_ip[i] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+ }
+
+ mac->hwlro_ip_cnt = 0;
+}
+
+static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ /* only tcp dst ipv4 is meaningful, others are meaningless */
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
+ fsp->m_u.tcp_ip4_spec.ip4dst = 0;
+
+ fsp->h_u.tcp_ip4_spec.ip4src = 0;
+ fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+ fsp->h_u.tcp_ip4_spec.psrc = 0;
+ fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+ fsp->h_u.tcp_ip4_spec.pdst = 0;
+ fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+ fsp->h_u.tcp_ip4_spec.tos = 0;
+ fsp->m_u.tcp_ip4_spec.tos = 0xff;
+
+ return 0;
+}
+
+static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i]) {
+ rule_locs[cnt] = i;
+ cnt++;
+ }
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static netdev_features_t mtk_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!(features & NETIF_F_LRO)) {
+ struct mtk_mac *mac = netdev_priv(dev);
+ int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ if (ip_cnt) {
+ netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
+
+ features |= NETIF_F_LRO;
+ }
+ }
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
+ netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
+
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+ }
+
+ return features;
+}
+
+static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int err = 0;
+
+ if (!((dev->features ^ features) & MTK_SET_FEATURES))
+ return 0;
+
+ if (!(features & NETIF_F_LRO))
+ mtk_hwlro_netdev_disable(dev);
+
+ if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
+ mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
+ else
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+ return err;
+}
+
+/* wait for DMA to finish whatever it is doing before we start using it again */
+static int mtk_dma_busy_wait(struct mtk_eth *eth)
+{
+ unsigned long t_start = jiffies;
+
+ while (1) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ } else {
+ if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ }
+
+ if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
+ break;
+ }
+
+ dev_err(eth->dev, "DMA init timeout\n");
+ return -1;
+}
+
+static int mtk_dma_init(struct mtk_eth *eth)
+{
+ int err;
+ u32 i;
+
+ if (mtk_dma_busy_wait(eth))
+ return -EBUSY;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* QDMA needs scratch memory for internal reordering of the
+ * descriptors
+ */
+ err = mtk_init_fq_dma(eth);
+ if (err)
+ return err;
+ }
+
+ err = mtk_tx_alloc(eth);
+ if (err)
+ return err;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
+ if (err)
+ return err;
+ }
+
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
+ if (err)
+ return err;
+
+ if (eth->hwlro) {
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
+ if (err)
+ return err;
+ }
+ err = mtk_hwlro_rx_init(eth);
+ if (err)
+ return err;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* Enable random early drop and set drop threshold
+ * automatically
+ */
+ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
+ FC_THRES_MIN, MTK_QDMA_FC_THRES);
+ mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ }
+
+ return 0;
+}
+
+static void mtk_dma_free(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++)
+ if (eth->netdev[i])
+ netdev_reset_queue(eth->netdev[i]);
+ if ( !eth->soc->has_sram && eth->scratch_ring) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring,
+ eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+ }
+ mtk_tx_clean(eth);
+ mtk_rx_clean(eth, ð->rx_ring[0],1);
+ mtk_rx_clean(eth, ð->rx_ring_qdma,0);
+
+ if (eth->hwlro) {
+ mtk_hwlro_rx_uninit(eth);
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_rx_clean(eth, ð->rx_ring[i],0);
+ }
+
+ kfree(eth->scratch_head);
+}
+
+static void mtk_tx_timeout(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ eth->netdev[mac->id]->stats.tx_errors++;
+ netif_err(eth, tx_err, dev,
+ "transmit timed out\n");
+ schedule_work(ð->pending_work);
+}
+
+static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (likely(napi_schedule_prep(ð->rx_napi))) {
+ __napi_schedule(ð->rx_napi);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (likely(napi_schedule_prep(ð->tx_napi))) {
+ __napi_schedule(ð->tx_napi);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
+ if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
+ mtk_handle_irq_rx(irq, _eth);
+ }
+ if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ mtk_handle_irq_tx(irq, _eth);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mtk_poll_controller(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+}
+#endif
+
+static int mtk_start_dma(struct mtk_eth *eth)
+{
+ u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ int err;
+
+ err = mtk_dma_init(eth);
+ if (err) {
+ mtk_dma_free(eth);
+ return err;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2))
+ mtk_w32(eth,
+ MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
+ MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
+ MTK_RESV_BUF | MTK_WCOMP_EN |
+ MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
+ MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
+ else
+ mtk_w32(eth,
+ MTK_TX_DMA_EN |
+ MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_BT_32DWORDS,
+ MTK_QDMA_GLO_CFG);
+
+ mtk_w32(eth,
+ MTK_RX_DMA_EN | rx_2b_offset |
+ MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
+ MTK_PDMA_GLO_CFG);
+ } else {
+ mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
+ MTK_PDMA_GLO_CFG);
+ }
+
+ return 0;
+}
+
+static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+{
+ int i;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+
+ /* default setup the forward port to send frame to PDMA */
+ val &= ~0xffff;
+
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+
+ val |= config;
+
+ if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
+ val |= MTK_GDMA_SPECIAL_TAG;
+
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
+ }
+ /* Reset and enable PSE */
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+ mtk_w32(eth, 0, MTK_RST_GL);
+}
+
+static int mtk_open(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int err;
+
+ err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
+ if (err) {
+ netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
+ err);
+ return err;
+ }
+
+ /* we run 2 netdevs on the same dma ring so we only bring it up once */
+ if (!refcount_read(ð->dma_refcnt)) {
+ int err = mtk_start_dma(eth);
+
+ if (err)
+ return err;
+
+ mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
+
+ /* Indicates CDM to parse the MTK special tag from CPU */
+ if (netdev_uses_dsa(dev)) {
+ u32 val;
+ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+ val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
+ }
+
+ napi_enable(ð->tx_napi);
+ napi_enable(ð->rx_napi);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ refcount_set(ð->dma_refcnt, 1);
+ }
+ else
+ refcount_inc(ð->dma_refcnt);
+
+ phylink_start(mac->phylink);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
+{
+ u32 val;
+ int i;
+
+ /* stop the dma engine */
+ spin_lock_bh(ð->page_lock);
+ val = mtk_r32(eth, glo_cfg);
+ mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
+ glo_cfg);
+ spin_unlock_bh(ð->page_lock);
+
+ /* wait for dma stop */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, glo_cfg);
+ if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+}
+
+static int mtk_stop(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ phylink_stop(mac->phylink);
+
+ netif_tx_disable(dev);
+
+ phylink_disconnect_phy(mac->phylink);
+
+ /* only shutdown DMA if this is the last user */
+ if (!refcount_dec_and_test(ð->dma_refcnt))
+ return 0;
+
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ napi_disable(ð->tx_napi);
+ napi_disable(ð->rx_napi);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+
+ mtk_dma_free(eth);
+
+ return 0;
+}
+
+static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
+{
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ reset_bits);
+
+ usleep_range(1000, 1100);
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ ~reset_bits);
+ mdelay(10);
+}
+
+static void mtk_clk_disable(struct mtk_eth *eth)
+{
+ int clk;
+
+ for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
+ clk_disable_unprepare(eth->clks[clk]);
+}
+
+static int mtk_clk_enable(struct mtk_eth *eth)
+{
+ int clk, ret;
+
+ for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
+ ret = clk_prepare_enable(eth->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ return 0;
+
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(eth->clks[clk]);
+
+ return ret;
+}
+
+static int mtk_hw_init(struct mtk_eth *eth)
+{
+ int i, val, ret;
+
+ if (test_and_set_bit(MTK_HW_INIT, ð->state))
+ return 0;
+
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
+
+ ret = mtk_clk_enable(eth);
+ if (ret)
+ goto err_disable_pm;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ ret = device_reset(eth->dev);
+ if (ret) {
+ dev_err(eth->dev, "MAC reset failed!\n");
+ goto err_disable_pm;
+ }
+
+ /* enable interrupt delay for RX */
+ mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+
+ /* disable delay and normal interrupt */
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+ return 0;
+ }
+
+ /* Non-MT7628 handling... */
+ ethsys_reset(eth, RSTCTRL_FE);
+ ethsys_reset(eth, RSTCTRL_PPE);
+
+ /* Set FE to PDMAv2 if necessary */
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+ mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
+
+ if (eth->pctl) {
+ /* Set GE2 driving and slew rate */
+ regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+
+ /* set GE2 TDSEL */
+ regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+
+ /* set GE2 TUNE */
+ regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+ }
+
+ /* Set linkdown as the default for each GMAC. Its own MCR would be set
+ * up with the more appropriate value when mtk_mac_config call is being
+ * invoked.
+ */
+ for (i = 0; i < MTK_MAC_COUNT; i++)
+ mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
+
+ /* Enable RX VLan Offloading */
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+ /* enable interrupt delay for RX/TX */
+ mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
+
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+ /* FE int grouping */
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
+ /* PSE config input/output queue threshold */
+ mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
+ mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
+ mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
+
+ mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
+ mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
+ mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
+ mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
+ mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
+ mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
+ }
+
+ return 0;
+
+err_disable_pm:
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+
+ return ret;
+}
+
+static int mtk_hw_deinit(struct mtk_eth *eth)
+{
+ if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
+ return 0;
+
+ mtk_clk_disable(eth);
+
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+
+ return 0;
+}
+
+static int __init mtk_init(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ const char *mac_addr;
+
+ mac_addr = of_get_mac_address(mac->of_node);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(dev->dev_addr, mac_addr);
+
+ /* If the mac address is invalid, use random mac address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ eth_hw_addr_random(dev);
+ dev_err(eth->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ return 0;
+}
+
+static void mtk_uninit(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ phylink_disconnect_phy(mac->phylink);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+}
+
+static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phylink_mii_ioctl(mac->phylink, ifr, cmd);
+ default:
+ /* default invoke the mtk_eth_dbg handler */
+ return mtk_do_priv_ioctl(dev, ifr, cmd);
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void mtk_pending_work(struct work_struct *work)
+{
+ struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
+ int err, i;
+ unsigned long restart = 0;
+
+ rtnl_lock();
+
+ dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
+
+ while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
+ cpu_relax();
+
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ __set_bit(i, &restart);
+ }
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
+
+ /* restart underlying hardware such as power, clock, pin mux
+ * and the connected phy
+ */
+ mtk_hw_deinit(eth);
+
+ if (eth->dev->pins)
+ pinctrl_select_state(eth->dev->pins->p,
+ eth->dev->pins->default_state);
+ mtk_hw_init(eth);
+
+ /* restart DMA and enable IRQs */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!test_bit(i, &restart))
+ continue;
+ err = mtk_open(eth->netdev[i]);
+ if (err) {
+ netif_alert(eth, ifup, eth->netdev[i],
+ "Driver up/down cycle failed, closing device.\n");
+ dev_close(eth->netdev[i]);
+ }
+ }
+
+ dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+
+ clear_bit_unlock(MTK_RESETTING, ð->state);
+
+ rtnl_unlock();
+}
+
+static int mtk_free_dev(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ free_netdev(eth->netdev[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_unreg_dev(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ unregister_netdev(eth->netdev[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_cleanup(struct mtk_eth *eth)
+{
+ mtk_unreg_dev(eth);
+ mtk_free_dev(eth);
+ cancel_work_sync(ð->pending_work);
+
+ return 0;
+}
+
+static int mtk_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(ndev);
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ return phylink_ethtool_ksettings_get(mac->phylink, cmd);
+}
+
+static int mtk_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(ndev);
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ return phylink_ethtool_ksettings_set(mac->phylink, cmd);
+}
+
+static void mtk_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+ info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
+}
+
+static u32 mtk_get_msglevel(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return mac->hw->msg_enable;
+}
+
+static void mtk_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ mac->hw->msg_enable = value;
+}
+
+static int mtk_nway_reset(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ if (!mac->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_nway_reset(mac->phylink);
+}
+
+static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
+ memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int mtk_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(mtk_ethtool_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mtk_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hwstats = mac->hw_stats;
+ u64 *data_src, *data_dst;
+ unsigned int start;
+ int i;
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return;
+
+ if (netif_running(dev) && netif_device_present(dev)) {
+ if (spin_trylock_bh(&hwstats->stats_lock)) {
+ mtk_stats_update_mac(mac);
+ spin_unlock_bh(&hwstats->stats_lock);
+ }
+ }
+
+ data_src = (u64 *)hwstats;
+
+ do {
+ data_dst = data;
+ start = u64_stats_fetch_begin_irq(&hwstats->syncp);
+
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
+ *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+ } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
+}
+
+static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (dev->hw_features & NETIF_F_LRO) {
+ cmd->data = MTK_MAX_RX_RING_NUM;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (dev->hw_features & NETIF_F_LRO) {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ cmd->rule_cnt = mac->hwlro_ip_cnt;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_entry(dev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_all(dev, cmd,
+ rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_add_ipaddr(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_del_ipaddr(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops mtk_ethtool_ops = {
+ .get_link_ksettings = mtk_get_link_ksettings,
+ .set_link_ksettings = mtk_set_link_ksettings,
+ .get_drvinfo = mtk_get_drvinfo,
+ .get_msglevel = mtk_get_msglevel,
+ .set_msglevel = mtk_set_msglevel,
+ .nway_reset = mtk_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mtk_get_strings,
+ .get_sset_count = mtk_get_sset_count,
+ .get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_rxnfc = mtk_get_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
+};
+
+static const struct net_device_ops mtk_netdev_ops = {
+ .ndo_init = mtk_init,
+ .ndo_uninit = mtk_uninit,
+ .ndo_open = mtk_open,
+ .ndo_stop = mtk_stop,
+ .ndo_start_xmit = mtk_start_xmit,
+ .ndo_set_mac_address = mtk_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = mtk_do_ioctl,
+ .ndo_tx_timeout = mtk_tx_timeout,
+ .ndo_get_stats64 = mtk_get_stats64,
+ .ndo_fix_features = mtk_fix_features,
+ .ndo_set_features = mtk_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mtk_poll_controller,
+#endif
+};
+
+static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+{
+ const __be32 *_id = of_get_property(np, "reg", NULL);
+ struct phylink *phylink;
+ int phy_mode, id, err;
+ struct mtk_mac *mac;
+
+ if (!_id) {
+ dev_err(eth->dev, "missing mac id\n");
+ return -EINVAL;
+ }
+
+ id = be32_to_cpup(_id);
+ if (id >= MTK_MAC_COUNT) {
+ dev_err(eth->dev, "%d is not a valid mac id\n", id);
+ return -EINVAL;
+ }
+
+ if (eth->netdev[id]) {
+ dev_err(eth->dev, "duplicate mac id found: %d\n", id);
+ return -EINVAL;
+ }
+
+ eth->netdev[id] = alloc_etherdev(sizeof(*mac));
+ if (!eth->netdev[id]) {
+ dev_err(eth->dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+ mac = netdev_priv(eth->netdev[id]);
+ eth->mac[id] = mac;
+ mac->id = id;
+ mac->hw = eth;
+ mac->of_node = np;
+
+ memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
+ mac->hwlro_ip_cnt = 0;
+
+ mac->hw_stats = devm_kzalloc(eth->dev,
+ sizeof(*mac->hw_stats),
+ GFP_KERNEL);
+ if (!mac->hw_stats) {
+ dev_err(eth->dev, "failed to allocate counter memory\n");
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+ spin_lock_init(&mac->hw_stats->stats_lock);
+ u64_stats_init(&mac->hw_stats->syncp);
+ mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
+
+ /* phylink create */
+ phy_mode = of_get_phy_mode(np);
+ if (phy_mode < 0) {
+ dev_err(eth->dev, "incorrect phy-mode\n");
+ err = -EINVAL;
+ goto free_netdev;
+ }
+
+ /* mac config is not set */
+ mac->interface = PHY_INTERFACE_MODE_NA;
+ mac->mode = MLO_AN_PHY;
+ mac->speed = SPEED_UNKNOWN;
+
+ mac->phylink_config.dev = ð->netdev[id]->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&mac->phylink_config,
+ of_fwnode_handle(mac->of_node),
+ phy_mode, &mtk_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto free_netdev;
+ }
+
+ mac->phylink = phylink;
+
+ SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+ eth->netdev[id]->watchdog_timeo = 5 * HZ;
+ eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
+ eth->netdev[id]->base_addr = (unsigned long)eth->base;
+
+ eth->netdev[id]->hw_features = eth->soc->hw_features;
+ if (eth->hwlro)
+ eth->netdev[id]->hw_features |= NETIF_F_LRO;
+
+ eth->netdev[id]->vlan_features = eth->soc->hw_features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
+ eth->netdev[id]->features |= eth->soc->hw_features;
+ eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
+
+ eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->dev.of_node = np;
+
+ return 0;
+
+free_netdev:
+ free_netdev(eth->netdev[id]);
+ return err;
+}
+
+static int mtk_probe(struct platform_device *pdev)
+{
+ struct device_node *mac_np;
+ struct mtk_eth *eth;
+ int err, i;
+
+ eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
+ if (!eth)
+ return -ENOMEM;
+
+ eth->soc = of_device_get_match_data(&pdev->dev);
+
+ eth->dev = &pdev->dev;
+ eth->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(eth->base))
+ return PTR_ERR(eth->base);
+
+ if(eth->soc->has_sram) {
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
+ } else {
+ eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+ eth->ip_align = NET_IP_ALIGN;
+ } else {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
+ else
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
+ }
+
+ spin_lock_init(ð->page_lock);
+ spin_lock_init(ð->tx_irq_lock);
+ spin_lock_init(ð->rx_irq_lock);
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,ethsys");
+ if (IS_ERR(eth->ethsys)) {
+ dev_err(&pdev->dev, "no ethsys regmap found\n");
+ return PTR_ERR(eth->ethsys);
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
+ eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,infracfg");
+ if (IS_ERR(eth->infra)) {
+ dev_err(&pdev->dev, "no infracfg regmap found\n");
+ return PTR_ERR(eth->infra);
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
+ GFP_KERNEL);
+ if (!eth->sgmii)
+ return -ENOMEM;
+
+ err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
+ eth->soc->ana_rgc3);
+
+ if (err)
+ return err;
+ }
+
+ if (eth->soc->required_pctl) {
+ eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,pctl");
+ if (IS_ERR(eth->pctl)) {
+ dev_err(&pdev->dev, "no pctl regmap found\n");
+ return PTR_ERR(eth->pctl);
+ }
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
+ eth->irq[i] = eth->irq[0];
+ else
+ eth->irq[i] = platform_get_irq(pdev, i);
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ return -ENXIO;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+ eth->clks[i] = devm_clk_get(eth->dev,
+ mtk_clks_source_name[i]);
+ if (IS_ERR(eth->clks[i])) {
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (eth->soc->required_clks & BIT(i)) {
+ dev_err(&pdev->dev, "clock %s not found\n",
+ mtk_clks_source_name[i]);
+ return -EINVAL;
+ }
+ eth->clks[i] = NULL;
+ }
+ }
+
+ eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+ INIT_WORK(ð->pending_work, mtk_pending_work);
+
+ err = mtk_hw_init(eth);
+ if (err)
+ return err;
+
+ eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
+
+ for_each_child_of_node(pdev->dev.of_node, mac_np) {
+ if (!of_device_is_compatible(mac_np,
+ "mediatek,eth-mac"))
+ continue;
+
+ if (!of_device_is_available(mac_np))
+ continue;
+
+ err = mtk_add_mac(eth, mac_np);
+ if (err) {
+ of_node_put(mac_np);
+ goto err_deinit_hw;
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
+ err = devm_request_irq(eth->dev, eth->irq[0],
+ mtk_handle_irq, 0,
+ dev_name(eth->dev), eth);
+ } else {
+ err = devm_request_irq(eth->dev, eth->irq[1],
+ mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
+
+ err = devm_request_irq(eth->dev, eth->irq[2],
+ mtk_handle_irq_rx, 0,
+ dev_name(eth->dev), eth);
+ }
+ if (err)
+ goto err_free_dev;
+
+ /* No MT7628/88 support yet */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ err = mtk_mdio_init(eth);
+ if (err)
+ goto err_free_dev;
+ }
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ err = register_netdev(eth->netdev[i]);
+ if (err) {
+ dev_err(eth->dev, "error bringing up device\n");
+ goto err_deinit_mdio;
+ } else
+ netif_info(eth, probe, eth->netdev[i],
+ "mediatek frame engine at 0x%08lx, irq %d\n",
+ eth->netdev[i]->base_addr, eth->irq[0]);
+ }
+
+ /* we run 2 devices on the same DMA ring so we need a dummy device
+ * for NAPI to work
+ */
+ init_dummy_netdev(ð->dummy_dev);
+ netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
+ MTK_NAPI_WEIGHT);
+ netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
+ MTK_NAPI_WEIGHT);
+
+ mtketh_debugfs_init(eth);
+ debug_proc_init(eth);
+
+ platform_set_drvdata(pdev, eth);
+
+ return 0;
+
+err_deinit_mdio:
+ mtk_mdio_cleanup(eth);
+err_free_dev:
+ mtk_free_dev(eth);
+err_deinit_hw:
+ mtk_hw_deinit(eth);
+
+ return err;
+}
+
+static int mtk_remove(struct platform_device *pdev)
+{
+ struct mtk_eth *eth = platform_get_drvdata(pdev);
+ struct mtk_mac *mac;
+ int i;
+
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ mac = netdev_priv(eth->netdev[i]);
+ phylink_disconnect_phy(mac->phylink);
+ }
+
+ mtk_hw_deinit(eth);
+
+ netif_napi_del(ð->tx_napi);
+ netif_napi_del(ð->rx_napi);
+ mtk_cleanup(eth);
+ mtk_mdio_cleanup(eth);
+
+ return 0;
+}
+
+static const struct mtk_soc_data mt2701_data = {
+ .caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7621_data = {
+ .caps = MT7621_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7621_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7622_data = {
+ .ana_rgc3 = 0x2028,
+ .caps = MT7622_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7622_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7623_data = {
+ .caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7629_data = {
+ .ana_rgc3 = 0x128,
+ .caps = MT7629_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7629_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7986_data = {
+ .ana_rgc3 = 0x128,
+ .caps = MT7986_CAPS,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = true,
+};
+
+static const struct mtk_soc_data rt5350_data = {
+ .caps = MT7628_CAPS,
+ .hw_features = MTK_HW_FEATURES_MT7628,
+ .required_clks = MT7628_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = false,
+};
+
+const struct of_device_id of_mtk_match[] = {
+ { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
+ { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
+ { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
+ { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
+ { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_mtk_match);
+
+static struct platform_driver mtk_driver = {
+ .probe = mtk_probe,
+ .remove = mtk_remove,
+ .driver = {
+ .name = "mtk_soc_eth",
+ .of_match_table = of_mtk_match,
+ },
+};
+
+module_platform_driver(mtk_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
new file mode 100755
index 0000000..f240e63
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -0,0 +1,1091 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#ifndef MTK_ETH_H
+#define MTK_ETH_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/refcount.h>
+#include <linux/phylink.h>
+
+#define MTK_QDMA_PAGE_SIZE 2048
+#define MTK_MAX_RX_LENGTH 1536
+#define MTK_DMA_SIZE 2048
+#define MTK_NAPI_WEIGHT 256
+#define MTK_MAC_COUNT 2
+#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
+#define MTK_DMA_DUMMY_DESC 0xffffffff
+#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_SG | NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_IPV6_CSUM)
+#define MTK_SET_FEATURES (NETIF_F_LRO | \
+ NETIF_F_HW_VLAN_CTAG_RX)
+#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
+#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+
+#define MTK_MAX_RX_RING_NUM 4
+#define MTK_HW_LRO_DMA_SIZE 8
+
+#define MTK_MAX_LRO_RX_LENGTH (4096 * 3)
+#define MTK_MAX_LRO_IP_CNT 2
+#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */
+#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */
+#define MTK_HW_LRO_AGG_TIME 10 /* 200us */
+#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */
+#define MTK_HW_LRO_MAX_AGG_CNT 64
+#define MTK_HW_LRO_BW_THRE 3000
+#define MTK_HW_LRO_REPLACE_DELTA 1000
+#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
+
+/* Frame Engine Global Reset Register */
+#define MTK_RST_GL 0x04
+#define RST_GL_PSE BIT(0)
+
+/* Frame Engine Interrupt Status Register */
+#define MTK_INT_STATUS2 0x08
+#define MTK_GDM1_AF BIT(28)
+#define MTK_GDM2_AF BIT(29)
+
+/* PDMA HW LRO Alter Flow Timer Register */
+#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c
+
+/* Frame Engine Interrupt Grouping Register */
+#define MTK_FE_INT_GRP 0x20
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMQ_IG_CTRL 0x1400
+#define MTK_CDMQ_STAG_EN BIT(0)
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMP_IG_CTRL 0x400
+#define MTK_CDMP_STAG_EN BIT(0)
+
+/* CDMP Exgress Control Register */
+#define MTK_CDMP_EG_CTRL 0x404
+
+/* GDM Exgress Control Register */
+#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
+#define MTK_GDMA_SPECIAL_TAG BIT(24)
+#define MTK_GDMA_ICS_EN BIT(22)
+#define MTK_GDMA_TCS_EN BIT(21)
+#define MTK_GDMA_UCS_EN BIT(20)
+#define MTK_GDMA_TO_PDMA 0x0
+#define MTK_GDMA_DROP_ALL 0x7777
+
+/* Unicast Filter MAC Address Register - Low */
+#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
+
+/* Unicast Filter MAC Address Register - High */
+#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+
+/* Internal SRAM offset */
+#define MTK_ETH_SRAM_OFFSET 0x40000
+
+/* FE global misc reg*/
+#define MTK_FE_GLO_MISC 0x124
+
+/* PSE Input Queue Reservation Register*/
+#define PSE_IQ_REV(x) (0x140 + ((x - 1) * 0x4))
+
+/* PSE Output Queue Threshold Register*/
+#define PSE_OQ_TH(x) (0x160 + ((x - 1) * 0x4))
+
+#define MTK_PDMA_V2 BIT(4)
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define CONFIG_MEDIATEK_NETSYS_RX_V2 1
+
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define PDMA_BASE 0x6000
+#else
+#define PDMA_BASE 0x4000
+#endif
+
+#define QDMA_BASE 0x4400
+#else
+#define PDMA_BASE 0x0800
+#define QDMA_BASE 0x1800
+#endif
+/* PDMA RX Base Pointer Register */
+#define MTK_PRX_BASE_PTR0 (PDMA_BASE + 0x100)
+#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
+
+/* PDMA RX Maximum Count Register */
+#define MTK_PRX_MAX_CNT0 (MTK_PRX_BASE_PTR0 + 0x04)
+#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
+
+/* PDMA RX CPU Pointer Register */
+#define MTK_PRX_CRX_IDX0 (MTK_PRX_BASE_PTR0 + 0x08)
+#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+
+/* PDMA HW LRO Control Registers */
+#define MTK_PDMA_LRO_CTRL_DW0 (PDMA_BASE + 0x180)
+#define MTK_LRO_EN BIT(0)
+#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
+#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+
+#define MTK_PDMA_LRO_CTRL_DW1 (MTK_PDMA_LRO_CTRL_DW0 + 0x04)
+#define MTK_PDMA_LRO_CTRL_DW2 (MTK_PDMA_LRO_CTRL_DW0 + 0x08)
+#define MTK_PDMA_LRO_CTRL_DW3 (MTK_PDMA_LRO_CTRL_DW0 + 0x0c)
+#define MTK_ADMA_MODE BIT(15)
+#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
+
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_GLO_CFG (PDMA_BASE + 0x204)
+#define MTK_MULTI_EN BIT(10)
+#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
+
+/* PDMA Reset Index Register */
+#define MTK_PDMA_RST_IDX (PDMA_BASE + 0x208)
+#define MTK_PST_DRX_IDX0 BIT(16)
+#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
+
+/* PDMA Delay Interrupt Register */
+#define MTK_PDMA_DELAY_INT (PDMA_BASE + 0x20c)
+#define MTK_PDMA_DELAY_RX_EN BIT(15)
+#define MTK_PDMA_DELAY_RX_PINT 4
+#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
+#define MTK_PDMA_DELAY_RX_PTIME 4
+#define MTK_PDMA_DELAY_RX_DELAY \
+ (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
+ (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
+
+/* PDMA Interrupt Status Register */
+#define MTK_PDMA_INT_STATUS (PDMA_BASE + 0x220)
+
+/* PDMA Interrupt Mask Register */
+#define MTK_PDMA_INT_MASK (PDMA_BASE + 0x228)
+
+/* PDMA HW LRO Alter Flow Delta Register */
+#define MTK_PDMA_LRO_ALT_SCORE_DELTA (PDMA_BASE + 0x24c)
+
+/* PDMA Interrupt grouping registers */
+#define MTK_PDMA_INT_GRP1 (PDMA_BASE + 0x250)
+#define MTK_PDMA_INT_GRP2 (PDMA_BASE + 0x254)
+
+/* PDMA HW LRO IP Setting Registers */
+#define MTK_LRO_RX_RING0_DIP_DW0 (PDMA_BASE + 0x304)
+#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
+#define MTK_RING_MYIP_VLD BIT(9)
+
+/* PDMA HW LRO Ring Control Registers */
+#define MTK_LRO_RX_RING0_CTRL_DW1 (PDMA_BASE + 0x328)
+#define MTK_LRO_RX_RING0_CTRL_DW2 (PDMA_BASE + 0x32c)
+#define MTK_LRO_RX_RING0_CTRL_DW3 (PDMA_BASE + 0x330)
+#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
+#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
+#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
+#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
+#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
+#define MTK_RING_AUTO_LERAN_MODE (3 << 6)
+#define MTK_RING_VLD BIT(8)
+#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
+#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
+#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
+
+/* QDMA TX Queue Configuration Registers */
+#define MTK_QTX_CFG(x) (QDMA_BASE + (x * 0x10))
+#define QDMA_RES_THRES 4
+
+/* QDMA TX Queue Scheduler Registers */
+#define MTK_QTX_SCH(x) (QDMA_BASE + 4 + (x * 0x10))
+
+/* QDMA RX Base Pointer Register */
+#define MTK_QRX_BASE_PTR0 (QDMA_BASE + 0x100)
+#define MTK_QRX_BASE_PTR_CFG(x) (MTK_QRX_BASE_PTR0 + ((x) * 0x10))
+
+/* QDMA RX Maximum Count Register */
+#define MTK_QRX_MAX_CNT0 (QDMA_BASE + 0x104)
+#define MTK_QRX_MAX_CNT_CFG(x) (MTK_QRX_MAX_CNT0 + ((x) * 0x10))
+
+/* QDMA RX CPU Pointer Register */
+#define MTK_QRX_CRX_IDX0 (QDMA_BASE + 0x108)
+#define MTK_QRX_CRX_IDX_CFG(x) (MTK_QRX_CRX_IDX0 + ((x) * 0x10))
+
+/* QDMA RX DMA Pointer Register */
+#define MTK_QRX_DRX_IDX0 (QDMA_BASE + 0x10c)
+
+/* QDMA Global Configuration Register */
+#define MTK_QDMA_GLO_CFG (QDMA_BASE + 0x204)
+#define MTK_RX_2B_OFFSET BIT(31)
+#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_NDP_CO_PRO BIT(10)
+#define MTK_TX_WB_DDONE BIT(6)
+#define MTK_DMA_SIZE_16DWORDS (2 << 4)
+#define MTK_DMA_SIZE_32DWORDS (3 << 4)
+#define MTK_RX_DMA_BUSY BIT(3)
+#define MTK_TX_DMA_BUSY BIT(1)
+#define MTK_RX_DMA_EN BIT(2)
+#define MTK_TX_DMA_EN BIT(0)
+#define MTK_DMA_BUSY_TIMEOUT HZ
+
+/* QDMA V2 Global Configuration Register */
+#define MTK_CHK_DDONE_EN BIT(28)
+#define MTK_DMAD_WR_WDONE BIT(26)
+#define MTK_WCOMP_EN BIT(24)
+#define MTK_RESV_BUF (0x40 << 16)
+#define MTK_MUTLI_CNT (0x4 << 12)
+
+/* QDMA Reset Index Register */
+#define MTK_QDMA_RST_IDX (QDMA_BASE + 0x208)
+
+/* QDMA Delay Interrupt Register */
+#define MTK_QDMA_DELAY_INT (QDMA_BASE + 0x20c)
+
+/* QDMA Flow Control Register */
+#define MTK_QDMA_FC_THRES (QDMA_BASE + 0x210)
+#define FC_THRES_DROP_MODE BIT(20)
+#define FC_THRES_DROP_EN (7 << 16)
+#define FC_THRES_MIN 0x4444
+
+/* QDMA Interrupt Status Register */
+#define MTK_QDMA_INT_STATUS (QDMA_BASE + 0x218)
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define MTK_RX_DONE_DLY BIT(14)
+#else
+#define MTK_RX_DONE_DLY BIT(30)
+#endif
+#define MTK_RX_DONE_INT3 BIT(19)
+#define MTK_RX_DONE_INT2 BIT(18)
+#define MTK_RX_DONE_INT1 BIT(17)
+#define MTK_RX_DONE_INT0 BIT(16)
+#define MTK_TX_DONE_INT3 BIT(3)
+#define MTK_TX_DONE_INT2 BIT(2)
+#define MTK_TX_DONE_INT1 BIT(1)
+#define MTK_TX_DONE_INT0 BIT(0)
+#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
+#define MTK_TX_DONE_DLY BIT(28)
+#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
+
+/* QDMA Interrupt grouping registers */
+#define MTK_QDMA_INT_GRP1 (QDMA_BASE + 0x220)
+#define MTK_QDMA_INT_GRP2 (QDMA_BASE + 0x224)
+#define MTK_RLS_DONE_INT BIT(0)
+
+/* QDMA Interrupt Status Register */
+#define MTK_QDMA_INT_MASK (QDMA_BASE + 0x21c)
+
+/* QDMA Interrupt Mask Register */
+#define MTK_QDMA_HRED2 (QDMA_BASE + 0x244)
+
+/* QDMA TX Forward CPU Pointer Register */
+#define MTK_QTX_CTX_PTR (QDMA_BASE +0x300)
+
+/* QDMA TX Forward DMA Pointer Register */
+#define MTK_QTX_DTX_PTR (QDMA_BASE +0x304)
+
+/* QDMA TX Release CPU Pointer Register */
+#define MTK_QTX_CRX_PTR (QDMA_BASE +0x310)
+
+/* QDMA TX Release DMA Pointer Register */
+#define MTK_QTX_DRX_PTR (QDMA_BASE +0x314)
+
+/* QDMA FQ Head Pointer Register */
+#define MTK_QDMA_FQ_HEAD (QDMA_BASE +0x320)
+
+/* QDMA FQ Head Pointer Register */
+#define MTK_QDMA_FQ_TAIL (QDMA_BASE +0x324)
+
+/* QDMA FQ Free Page Counter Register */
+#define MTK_QDMA_FQ_CNT (QDMA_BASE +0x328)
+
+/* QDMA FQ Free Page Buffer Length Register */
+#define MTK_QDMA_FQ_BLEN (QDMA_BASE +0x32c)
+
+/* GMA1 Received Good Byte Count Register */
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define MTK_GDM1_TX_GBCNT 0x1C00
+#else
+#define MTK_GDM1_TX_GBCNT 0x2400
+#endif
+#define MTK_STAT_OFFSET 0x40
+
+/* QDMA TX NUM */
+#define MTK_QDMA_TX_NUM 16
+#define MTK_QDMA_TX_MASK ((MTK_QDMA_TX_NUM) - 1)
+#define QID_LOW_BITS(x) ((x) & 0xf)
+#define QID_HIGH_BITS(x) ((((x) >> 4) & 0x3) << 20)
+#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
+
+/* QDMA V2 descriptor txd6 */
+#define TX_DMA_INS_VLAN_V2 BIT(16)
+
+/* QDMA V2 descriptor txd5 */
+#define TX_DMA_CHKSUM_V2 (0x7 << 28)
+#define TX_DMA_TSO_V2 BIT(31)
+
+/* QDMA V2 descriptor txd4 */
+#define TX_DMA_FPORT_SHIFT_V2 8
+#define TX_DMA_FPORT_MASK_V2 0xf
+#define TX_DMA_SWC_V2 BIT(30)
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define MTK_TX_DMA_BUF_LEN 0xffff
+#define MTK_TX_DMA_BUF_SHIFT 8
+#else
+#define MTK_TX_DMA_BUF_LEN 0x3fff
+#define MTK_TX_DMA_BUF_SHIFT 16
+#endif
+
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define MTK_RX_DMA_BUF_LEN 0xffff
+#define MTK_RX_DMA_BUF_SHIFT 8
+#define RX_DMA_SPORT_SHIFT 26
+#define RX_DMA_SPORT_MASK 0xf
+#else
+#define MTK_RX_DMA_BUF_LEN 0x3fff
+#define MTK_RX_DMA_BUF_SHIFT 16
+#define RX_DMA_SPORT_SHIFT 19
+#define RX_DMA_SPORT_MASK 0x7
+#endif
+
+/* QDMA descriptor txd4 */
+#define TX_DMA_CHKSUM (0x7 << 29)
+#define TX_DMA_TSO BIT(28)
+#define TX_DMA_FPORT_SHIFT 25
+#define TX_DMA_FPORT_MASK 0x7
+#define TX_DMA_INS_VLAN BIT(16)
+
+/* QDMA descriptor txd3 */
+#define TX_DMA_OWNER_CPU BIT(31)
+#define TX_DMA_LS0 BIT(30)
+#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << MTK_TX_DMA_BUF_SHIFT)
+#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
+#define TX_DMA_SWC BIT(14)
+#define TX_DMA_SDL(_x) (TX_DMA_PLEN0(_x))
+
+/* PDMA on MT7628 */
+#define TX_DMA_DONE BIT(31)
+#define TX_DMA_LS1 BIT(14)
+#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
+
+/* QDMA descriptor rxd2 */
+#define RX_DMA_DONE BIT(31)
+#define RX_DMA_LSO BIT(30)
+#define RX_DMA_PLEN0(_x) (((_x) & MTK_RX_DMA_BUF_LEN) << MTK_RX_DMA_BUF_SHIFT)
+#define RX_DMA_GET_PLEN0(_x) (((_x) >> MTK_RX_DMA_BUF_SHIFT) & MTK_RX_DMA_BUF_LEN)
+#define RX_DMA_VTAG BIT(15)
+
+/* QDMA descriptor rxd3 */
+#define RX_DMA_VID(_x) ((_x) & VLAN_VID_MASK)
+#define RX_DMA_TCI(_x) ((_x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID(_x) (((_x) >> 16) & 0xffff)
+
+/* QDMA descriptor rxd4 */
+#define RX_DMA_L4_VALID BIT(24)
+#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
+#define RX_DMA_SPECIAL_TAG BIT(22) /* switch header in packet */
+
+#define RX_DMA_GET_SPORT(_x) (((_x) >> RX_DMA_SPORT_SHIFT) & RX_DMA_SPORT_MASK)
+
+/* PDMA V2 descriptor rxd3 */
+#define RX_DMA_VTAG_V2 BIT(0)
+#define RX_DMA_L4_VALID_V2 BIT(2)
+
+/* PDMA V2 descriptor rxd4 */
+#define RX_DMA_VID_V2(_x) RX_DMA_VID(_x)
+#define RX_DMA_TCI_V2(_x) (((_x) >> 1) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID_V2(x3, x4) ((((x3) & 1) << 15) | (((x4) >> 17) & 0x7fff))
+
+/* PHY Indirect Access Control registers */
+#define MTK_PHY_IAC 0x10004
+#define PHY_IAC_ACCESS BIT(31)
+#define PHY_IAC_READ BIT(19)
+#define PHY_IAC_WRITE BIT(18)
+#define PHY_IAC_START BIT(16)
+#define PHY_IAC_ADDR_SHIFT 20
+#define PHY_IAC_REG_SHIFT 25
+#define PHY_IAC_TIMEOUT HZ
+
+#define MTK_MAC_MISC 0x1000c
+#define MTK_MUX_TO_ESW BIT(0)
+
+/* Mac control registers */
+#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
+#define MAC_MCR_MAX_RX_1536 BIT(24)
+#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
+#define MAC_MCR_FORCE_MODE BIT(15)
+#define MAC_MCR_TX_EN BIT(14)
+#define MAC_MCR_RX_EN BIT(13)
+#define MAC_MCR_BACKOFF_EN BIT(9)
+#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_FORCE_RX_FC BIT(5)
+#define MAC_MCR_FORCE_TX_FC BIT(4)
+#define MAC_MCR_SPEED_1000 BIT(3)
+#define MAC_MCR_SPEED_100 BIT(2)
+#define MAC_MCR_FORCE_DPX BIT(1)
+#define MAC_MCR_FORCE_LINK BIT(0)
+#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+
+/* Mac status registers */
+#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
+#define MAC_MSR_EEE1G BIT(7)
+#define MAC_MSR_EEE100M BIT(6)
+#define MAC_MSR_RX_FC BIT(5)
+#define MAC_MSR_TX_FC BIT(4)
+#define MAC_MSR_SPEED_1000 BIT(3)
+#define MAC_MSR_SPEED_100 BIT(2)
+#define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)
+#define MAC_MSR_DPX BIT(1)
+#define MAC_MSR_LINK BIT(0)
+
+/* TRGMII RXC control register */
+#define TRGMII_RCK_CTRL 0x10300
+#define DQSI0(x) ((x << 0) & GENMASK(6, 0))
+#define DQSI1(x) ((x << 8) & GENMASK(14, 8))
+#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define RXC_RST BIT(31)
+#define RXC_DQSISEL BIT(30)
+#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
+#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+
+#define NUM_TRGMII_CTRL 5
+
+/* TRGMII RXC control register */
+#define TRGMII_TCK_CTRL 0x10340
+#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define TXC_INV BIT(30)
+#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
+#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+
+/* TRGMII TX Drive Strength */
+#define TRGMII_TD_ODT(i) (0x10354 + 8 * (i))
+#define TD_DM_DRVP(x) ((x) & 0xf)
+#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
+
+/* TRGMII Interface mode register */
+#define INTF_MODE 0x10390
+#define TRGMII_INTF_DIS BIT(0)
+#define TRGMII_MODE BIT(1)
+#define TRGMII_CENTRAL_ALIGNED BIT(2)
+#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
+#define INTF_MODE_RGMII_10_100 0
+
+/* GPIO port control registers for GMAC 2*/
+#define GPIO_OD33_CTRL8 0x4c0
+#define GPIO_BIAS_CTRL 0xed0
+#define GPIO_DRV_SEL10 0xf00
+
+/* ethernet subsystem chip id register */
+#define ETHSYS_CHIPID0_3 0x0
+#define ETHSYS_CHIPID4_7 0x4
+#define MT7623_ETH 7623
+#define MT7622_ETH 7622
+#define MT7621_ETH 7621
+
+/* ethernet system control register */
+#define ETHSYS_SYSCFG 0x10
+#define SYSCFG_DRAM_TYPE_DDR2 BIT(4)
+
+/* ethernet subsystem config register */
+#define ETHSYS_SYSCFG0 0x14
+#define SYSCFG0_GE_MASK 0x3
+#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
+#define SYSCFG0_SGMII_MASK GENMASK(9, 8)
+#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
+#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
+
+
+/* ethernet subsystem clock register */
+#define ETHSYS_CLKCFG0 0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+#define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6))
+#define ETHSYS_TRGMII_MT7621_APLL BIT(6)
+#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
+
+/* ethernet reset control register */
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_PPE BIT(31)
+
+/* SGMII subsystem config registers */
+/* Register to auto-negotiation restart */
+#define SGMSYS_PCS_CONTROL_1 0x0
+#define SGMII_AN_RESTART BIT(9)
+#define SGMII_ISOLATE BIT(10)
+#define SGMII_AN_ENABLE BIT(12)
+#define SGMII_LINK_STATYS BIT(18)
+#define SGMII_AN_ABILITY BIT(19)
+#define SGMII_AN_COMPLETE BIT(21)
+#define SGMII_PCS_FAULT BIT(23)
+#define SGMII_AN_EXPANSION_CLR BIT(30)
+
+/* Register to programmable link timer, the unit in 2 * 8ns */
+#define SGMSYS_PCS_LINK_TIMER 0x18
+#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0))
+
+/* Register to control remote fault */
+#define SGMSYS_SGMII_MODE 0x20
+#define SGMII_IF_MODE_BIT0 BIT(0)
+#define SGMII_SPEED_DUPLEX_AN BIT(1)
+#define SGMII_SPEED_10 0x0
+#define SGMII_SPEED_100 BIT(2)
+#define SGMII_SPEED_1000 BIT(3)
+#define SGMII_DUPLEX_FULL BIT(4)
+#define SGMII_IF_MODE_BIT5 BIT(5)
+#define SGMII_REMOTE_FAULT_DIS BIT(8)
+#define SGMII_CODE_SYNC_SET_VAL BIT(9)
+#define SGMII_CODE_SYNC_SET_EN BIT(10)
+#define SGMII_SEND_AN_ERROR_EN BIT(11)
+#define SGMII_IF_MODE_MASK GENMASK(5, 1)
+
+/* Register to set SGMII speed, ANA RG_ Control Signals III*/
+#define SGMSYS_ANA_RG_CS3 0x2028
+#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
+#define RG_PHY_SPEED_1_25G 0x0
+#define RG_PHY_SPEED_3_125G BIT(2)
+
+/* Register to power up QPHY */
+#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
+#define SGMII_PHYA_PWD BIT(4)
+
+/* Infrasys subsystem config registers */
+#define INFRA_MISC2 0x70c
+#define CO_QPHY_SEL BIT(0)
+#define GEPHY_MAC_SEL BIT(1)
+
+/*MDIO control*/
+#define MII_MMD_ACC_CTL_REG 0x0d
+#define MII_MMD_ADDR_DATA_REG 0x0e
+#define MMD_OP_MODE_DATA BIT(14)
+
+/* MT7628/88 specific stuff */
+#define MT7628_PDMA_OFFSET 0x0800
+#define MT7628_SDM_OFFSET 0x0c00
+
+#define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00)
+#define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04)
+#define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08)
+#define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c)
+#define MT7628_PST_DTX_IDX0 BIT(0)
+
+#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
+#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+
+struct mtk_rx_dma {
+ unsigned int rxd1;
+ unsigned int rxd2;
+ unsigned int rxd3;
+ unsigned int rxd4;
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ unsigned int rxd5;
+ unsigned int rxd6;
+ unsigned int rxd7;
+ unsigned int rxd8;
+#endif
+} __packed __aligned(4);
+
+struct mtk_tx_dma {
+ unsigned int txd1;
+ unsigned int txd2;
+ unsigned int txd3;
+ unsigned int txd4;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ unsigned int txd5;
+ unsigned int txd6;
+ unsigned int txd7;
+ unsigned int txd8;
+#endif
+} __packed __aligned(4);
+
+struct mtk_eth;
+struct mtk_mac;
+
+/* struct mtk_hw_stats - the structure that holds the traffic statistics.
+ * @stats_lock: make sure that stats operations are atomic
+ * @reg_offset: the status register offset of the SoC
+ * @syncp: the refcount
+ *
+ * All of the supported SoCs have hardware counters for traffic statistics.
+ * Whenever the status IRQ triggers we can read the latest stats from these
+ * counters and store them in this struct.
+ */
+struct mtk_hw_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_skip;
+ u64 tx_collisions;
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_overflow;
+ u64 rx_fcs_errors;
+ u64 rx_short_errors;
+ u64 rx_long_errors;
+ u64 rx_checksum_errors;
+ u64 rx_flow_control_packets;
+
+ spinlock_t stats_lock;
+ u32 reg_offset;
+ struct u64_stats_sync syncp;
+};
+
+enum mtk_tx_flags {
+ /* PDMA descriptor can point at 1-2 segments. This enum allows us to
+ * track how memory was allocated so that it can be freed properly.
+ */
+ MTK_TX_FLAGS_SINGLE0 = 0x01,
+ MTK_TX_FLAGS_PAGE0 = 0x02,
+
+ /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
+ * SKB out instead of looking up through hardware TX descriptor.
+ */
+ MTK_TX_FLAGS_FPORT0 = 0x04,
+ MTK_TX_FLAGS_FPORT1 = 0x08,
+};
+
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+ MTK_CLK_ETHIF,
+ MTK_CLK_SGMIITOP,
+ MTK_CLK_ESW,
+ MTK_CLK_GP0,
+ MTK_CLK_GP1,
+ MTK_CLK_GP2,
+ MTK_CLK_FE,
+ MTK_CLK_TRGPLL,
+ MTK_CLK_SGMII_TX_250M,
+ MTK_CLK_SGMII_RX_250M,
+ MTK_CLK_SGMII_CDR_REF,
+ MTK_CLK_SGMII_CDR_FB,
+ MTK_CLK_SGMII2_TX_250M,
+ MTK_CLK_SGMII2_RX_250M,
+ MTK_CLK_SGMII2_CDR_REF,
+ MTK_CLK_SGMII2_CDR_FB,
+ MTK_CLK_SGMII_CK,
+ MTK_CLK_ETH2PLL,
+ MTK_CLK_WOCPU0,
+ MTK_CLK_WOCPU1,
+ MTK_CLK_MAX
+};
+
+#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
+ BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \
+ BIT(MTK_CLK_TRGPLL))
+#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
+ BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_GP2) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII_CK) | \
+ BIT(MTK_CLK_ETH2PLL))
+#define MT7621_CLKS_BITMAP (0)
+#define MT7628_CLKS_BITMAP (0)
+#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
+ BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT(MTK_CLK_SGMII_CK) | \
+ BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
+
+#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB))
+
+enum mtk_dev_state {
+ MTK_HW_INIT,
+ MTK_RESETTING
+};
+
+/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
+ * by the TX descriptor s
+ * @skb: The SKB pointer of the packet being sent
+ * @dma_addr0: The base addr of the first segment
+ * @dma_len0: The length of the first segment
+ * @dma_addr1: The base addr of the second segment
+ * @dma_len1: The length of the second segment
+ */
+struct mtk_tx_buf {
+ struct sk_buff *skb;
+ u32 flags;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr0);
+ DEFINE_DMA_UNMAP_LEN(dma_len0);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr1);
+ DEFINE_DMA_UNMAP_LEN(dma_len1);
+};
+
+/* struct mtk_tx_ring - This struct holds info describing a TX ring
+ * @dma: The descriptor ring
+ * @buf: The memory pointed at by the ring
+ * @phys: The physical addr of tx_buf
+ * @next_free: Pointer to the next free descriptor
+ * @last_free: Pointer to the last free descriptor
+ * @thresh: The threshold of minimum amount of free descriptors
+ * @free_count: QDMA uses a linked list. Track how many free descriptors
+ * are present
+ */
+struct mtk_tx_ring {
+ struct mtk_tx_dma *dma;
+ struct mtk_tx_buf *buf;
+ dma_addr_t phys;
+ struct mtk_tx_dma *next_free;
+ struct mtk_tx_dma *last_free;
+ u16 thresh;
+ atomic_t free_count;
+ int dma_size;
+ struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */
+ dma_addr_t phys_pdma;
+ int cpu_idx;
+};
+
+/* PDMA rx ring mode */
+enum mtk_rx_flags {
+ MTK_RX_FLAGS_NORMAL = 0,
+ MTK_RX_FLAGS_HWLRO,
+ MTK_RX_FLAGS_QDMA,
+};
+
+/* struct mtk_rx_ring - This struct holds info describing a RX ring
+ * @dma: The descriptor ring
+ * @data: The memory pointed at by the ring
+ * @phys: The physical addr of rx_buf
+ * @frag_size: How big can each fragment be
+ * @buf_size: The size of each packet buffer
+ * @calc_idx: The current head of ring
+ */
+struct mtk_rx_ring {
+ struct mtk_rx_dma *dma;
+ u8 **data;
+ dma_addr_t phys;
+ u16 frag_size;
+ u16 buf_size;
+ u16 dma_size;
+ bool calc_idx_update;
+ u16 calc_idx;
+ u32 crx_idx_reg;
+};
+
+enum mkt_eth_capabilities {
+ MTK_RGMII_BIT = 0,
+ MTK_TRGMII_BIT,
+ MTK_SGMII_BIT,
+ MTK_ESW_BIT,
+ MTK_GEPHY_BIT,
+ MTK_MUX_BIT,
+ MTK_INFRA_BIT,
+ MTK_SHARED_SGMII_BIT,
+ MTK_HWLRO_BIT,
+ MTK_SHARED_INT_BIT,
+ MTK_TRGMII_MT7621_CLK_BIT,
+ MTK_QDMA_BIT,
+ MTK_NETSYS_TX_V2_BIT,
+ MTK_NETSYS_RX_V2_BIT,
+ MTK_SOC_MT7628_BIT,
+
+ /* MUX BITS*/
+ MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
+ MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
+ MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
+ MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
+
+ /* PATH BITS */
+ MTK_ETH_PATH_GMAC1_RGMII_BIT,
+ MTK_ETH_PATH_GMAC1_TRGMII_BIT,
+ MTK_ETH_PATH_GMAC1_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_RGMII_BIT,
+ MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_GEPHY_BIT,
+ MTK_ETH_PATH_GDM1_ESW_BIT,
+};
+
+/* Supported hardware group on SoCs */
+#define MTK_RGMII BIT(MTK_RGMII_BIT)
+#define MTK_TRGMII BIT(MTK_TRGMII_BIT)
+#define MTK_SGMII BIT(MTK_SGMII_BIT)
+#define MTK_ESW BIT(MTK_ESW_BIT)
+#define MTK_GEPHY BIT(MTK_GEPHY_BIT)
+#define MTK_MUX BIT(MTK_MUX_BIT)
+#define MTK_INFRA BIT(MTK_INFRA_BIT)
+#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT)
+#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
+#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
+#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA BIT(MTK_QDMA_BIT)
+#define MTK_NETSYS_TX_V2 BIT(MTK_NETSYS_TX_V2_BIT)
+#define MTK_NETSYS_RX_V2 BIT(MTK_NETSYS_RX_V2_BIT)
+#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
+
+#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
+ BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
+#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
+ BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
+#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
+ BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
+ BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
+#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
+ BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
+
+/* Supported path present on SoCs */
+#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
+#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
+
+#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
+#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
+#define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII)
+#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
+#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
+#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
+
+/* MUXes present on SoCs */
+/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
+#define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX)
+
+/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
+#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \
+ (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA)
+
+/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
+#define MTK_MUX_U3_GMAC2_TO_QPHY \
+ (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA)
+
+/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
+#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
+ (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
+ MTK_SHARED_SGMII)
+
+/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
+#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
+ (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
+
+#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
+
+#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
+ MTK_GMAC2_RGMII | MTK_SHARED_INT | \
+ MTK_TRGMII_MT7621_CLK | MTK_QDMA)
+
+#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
+ MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
+ MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
+
+#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
+ MTK_QDMA)
+
+#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628)
+
+#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+ MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
+ MTK_MUX_U3_GMAC2_TO_QPHY | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
+
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_NETSYS_TX_V2 | MTK_NETSYS_RX_V2)
+#else
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_NETSYS_TX_V2)
+#endif
+
+/* struct mtk_eth_data - This is the structure holding all differences
+ * among various plaforms
+ * @ana_rgc3: The offset for register ANA_RGC3 related to
+ * sgmiisys syscon
+ * @caps Flags shown the extra capability for the SoC
+ * @hw_features Flags shown HW features
+ * @required_clks Flags shown the bitmap for required clocks on
+ * the target SoC
+ * @required_pctl A bool value to show whether the SoC requires
+ * the extra setup for those pins used by GMAC.
+ */
+struct mtk_soc_data {
+ u32 ana_rgc3;
+ u32 caps;
+ u32 required_clks;
+ bool required_pctl;
+ netdev_features_t hw_features;
+ bool has_sram;
+};
+
+/* currently no SoC has more than 2 macs */
+#define MTK_MAX_DEVS 2
+
+#define MTK_SGMII_PHYSPEED_AN BIT(31)
+#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0)
+#define MTK_SGMII_PHYSPEED_1000 BIT(0)
+#define MTK_SGMII_PHYSPEED_2500 BIT(1)
+#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
+
+/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
+ * characteristics
+ * @regmap: The register map pointing at the range used to setup
+ * SGMII modes
+ * @flags: The enum refers to which mode the sgmii wants to run on
+ * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ */
+
+struct mtk_sgmii {
+ struct regmap *regmap[MTK_MAX_DEVS];
+ u32 flags[MTK_MAX_DEVS];
+ u32 ana_rgc3;
+};
+
+/* struct mtk_eth - This is the main datasructure for holding the state
+ * of the driver
+ * @dev: The device pointer
+ * @base: The mapped register i/o base
+ * @page_lock: Make sure that register operations are atomic
+ * @tx_irq__lock: Make sure that IRQ register operations are atomic
+ * @rx_irq__lock: Make sure that IRQ register operations are atomic
+ * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
+ * dummy for NAPI to work
+ * @netdev: The netdev instances
+ * @mac: Each netdev is linked to a physical MAC
+ * @irq: The IRQ that we are using
+ * @msg_enable: Ethtool msg level
+ * @ethsys: The register map pointing at the range used to setup
+ * MII modes
+ * @infra: The register map pointing at the range used to setup
+ * SGMII and GePHY path
+ * @pctl: The register map pointing at the range used to setup
+ * GMAC port drive/slew values
+ * @dma_refcnt: track how many netdevs are using the DMA engine
+ * @tx_ring: Pointer to the memory holding info about the TX ring
+ * @rx_ring: Pointer to the memory holding info about the RX ring
+ * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
+ * @tx_napi: The TX NAPI struct
+ * @rx_napi: The RX NAPI struct
+ * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
+ * @scratch_head: The scratch memory that scratch_ring points to.
+ * @clks: clock array for all clocks required
+ * @mii_bus: If there is a bus we need to create an instance for it
+ * @pending_work: The workqueue used to reset the dma ring
+ * @state: Initialization and runtime state of the device
+ * @soc: Holding specific data among vaious SoCs
+ */
+
+struct mtk_eth {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t page_lock;
+ spinlock_t tx_irq_lock;
+ spinlock_t rx_irq_lock;
+ struct net_device dummy_dev;
+ struct net_device *netdev[MTK_MAX_DEVS];
+ struct mtk_mac *mac[MTK_MAX_DEVS];
+ int irq[3];
+ u32 msg_enable;
+ unsigned long sysclk;
+ struct regmap *ethsys;
+ struct regmap *infra;
+ struct mtk_sgmii *sgmii;
+ struct regmap *pctl;
+ bool hwlro;
+ refcount_t dma_refcnt;
+ struct mtk_tx_ring tx_ring;
+ struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
+ struct mtk_rx_ring rx_ring_qdma;
+ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
+ struct mtk_tx_dma *scratch_ring;
+ dma_addr_t phy_scratch_ring;
+ void *scratch_head;
+ struct clk *clks[MTK_CLK_MAX];
+
+ struct mii_bus *mii_bus;
+ struct work_struct pending_work;
+ unsigned long state;
+
+ const struct mtk_soc_data *soc;
+
+ u32 tx_int_mask_reg;
+ u32 tx_int_status_reg;
+ u32 rx_dma_l4_valid;
+ int ip_align;
+};
+
+/* struct mtk_mac - the structure that holds the info about the MACs of the
+ * SoC
+ * @id: The number of the MAC
+ * @interface: Interface mode kept for detecting change in hw settings
+ * @of_node: Our devicetree node
+ * @hw: Backpointer to our main datastruture
+ * @hw_stats: Packet statistics counter
+ */
+struct mtk_mac {
+ int id;
+ phy_interface_t interface;
+ unsigned int mode;
+ int speed;
+ struct device_node *of_node;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct mtk_eth *hw;
+ struct mtk_hw_stats *hw_stats;
+ __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
+ int hwlro_ip_cnt;
+};
+
+/* the struct describing the SoC. these are declared in the soc_xyz.c files */
+extern const struct of_device_id of_mtk_match[];
+
+/* read the hardware status register */
+void mtk_stats_update_mac(struct mtk_mac *mac);
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
+ u32 ana_rgc3);
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+ const struct phylink_link_state *state);
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
+
+#endif /* MTK_ETH_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/Makefile b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/Makefile
new file mode 100644
index 0000000..bf1bbcb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/Makefile
@@ -0,0 +1,5 @@
+ccflags-y=-Werror
+
+obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtkhnat.o
+mtkhnat-objs := hnat.o hnat_nf_hook.o hnat_debugfs.o hnat_mcast.o
+mtkhnat-$(CONFIG_NET_DSA_MT7530) += hnat_stag.o
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
new file mode 100644
index 0000000..3e87791
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
@@ -0,0 +1,665 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/if.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "nf_hnat_mtk.h"
+#include "hnat.h"
+
+struct mtk_hnat *hnat_priv;
+
+int (*ra_sw_nat_hook_rx)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_rx);
+int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_tx);
+
+void (*ppe_dev_register_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_register_hook);
+void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_unregister_hook);
+
+static void hnat_sma_build_entry(struct timer_list *t)
+{
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
+}
+
+void hnat_cache_ebl(int enable)
+{
+ cr_set_field(hnat_priv->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_CAH_CTRL, CAH_EN, enable);
+}
+
+static void hnat_reset_timestamp(struct timer_list *t)
+{
+ struct foe_entry *entry;
+ int hash_index;
+
+ hnat_cache_ebl(0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 0);
+ writel(0, hnat_priv->fe_base + 0x0010);
+
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.state == BIND)
+ entry->bfib1.time_stamp =
+ readl(hnat_priv->fe_base + 0x0010) & (0xFFFF);
+ }
+
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 1);
+ hnat_cache_ebl(1);
+
+ mod_timer(&hnat_priv->hnat_reset_timestamp_timer, jiffies + 14400 * HZ);
+}
+
+static void cr_set_bits(void __iomem *reg, u32 bs)
+{
+ u32 val = readl(reg);
+
+ val |= bs;
+ writel(val, reg);
+}
+
+static void cr_clr_bits(void __iomem *reg, u32 bs)
+{
+ u32 val = readl(reg);
+
+ val &= ~bs;
+ writel(val, reg);
+}
+
+void cr_set_field(void __iomem *reg, u32 field, u32 val)
+{
+ unsigned int tv = readl(reg);
+
+ tv &= ~field;
+ tv |= ((val) << (ffs((unsigned int)field) - 1));
+ writel(tv, reg);
+}
+
+/*boundary entry can't be used to accelerate data flow*/
+static void exclude_boundary_entry(struct foe_entry *foe_table_cpu)
+{
+ int entry_base = 0;
+ int bad_entry, i, j;
+ struct foe_entry *foe_entry;
+ /*these entries are boundary every 128 entries*/
+ int boundary_entry_offset[8] = { 12, 25, 38, 51, 76, 89, 102, 115};
+
+ if (!foe_table_cpu)
+ return;
+
+ for (i = 0; entry_base < hnat_priv->foe_etry_num; i++) {
+ /* set boundary entries as static*/
+ for (j = 0; j < 8; j++) {
+ bad_entry = entry_base + boundary_entry_offset[j];
+ foe_entry = &foe_table_cpu[bad_entry];
+ foe_entry->udib1.sta = 1;
+ }
+ entry_base = (i + 1) * 128;
+ }
+}
+
+void set_gmac_ppe_fwd(int id, int enable)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = hnat_priv->fe_base + (id ? GDMA2_FWD_CFG : GDMA1_FWD_CFG);
+
+ if (enable) {
+ cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE);
+
+ return;
+ }
+
+ /*disabled */
+ val = readl(reg);
+ if ((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE)
+ cr_set_field(reg, GDM_ALL_FRC_MASK,
+ BITS_GDM_ALL_FRC_P_CPU_PDMA);
+}
+
+static int hnat_start(void)
+{
+ u32 foe_table_sz;
+ u32 foe_mib_tb_sz;
+ int etry_num_cfg;
+
+ /* mapp the FOE table */
+ for (etry_num_cfg = DEF_ETRY_NUM_CFG ; etry_num_cfg >= 0 ; etry_num_cfg--, hnat_priv->foe_etry_num /= 2) {
+ foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
+ hnat_priv->foe_table_cpu = dma_alloc_coherent(
+ hnat_priv->dev, foe_table_sz, &hnat_priv->foe_table_dev, GFP_KERNEL);
+
+ if (hnat_priv->foe_table_cpu)
+ break;
+ }
+
+ if (!hnat_priv->foe_table_cpu)
+ return -1;
+ dev_info(hnat_priv->dev, "FOE entry number = %d\n", hnat_priv->foe_etry_num);
+
+ writel(hnat_priv->foe_table_dev, hnat_priv->ppe_base + PPE_TB_BASE);
+ memset(hnat_priv->foe_table_cpu, 0, foe_table_sz);
+
+ if (hnat_priv->data->version == MTK_HNAT_V1)
+ exclude_boundary_entry(hnat_priv->foe_table_cpu);
+
+ if (hnat_priv->data->per_flow_accounting) {
+ foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
+ hnat_priv->foe_mib_cpu = dma_alloc_coherent(hnat_priv->dev, foe_mib_tb_sz,
+ &hnat_priv->foe_mib_dev, GFP_KERNEL);
+ if (!hnat_priv->foe_mib_cpu)
+ return -1;
+ writel(hnat_priv->foe_mib_dev, hnat_priv->ppe_base + PPE_MIB_TB_BASE);
+ memset(hnat_priv->foe_mib_cpu, 0, foe_mib_tb_sz);
+
+ hnat_priv->acct =
+ kzalloc(hnat_priv->foe_etry_num * sizeof(struct hnat_accounting),
+ GFP_KERNEL);
+ if (!hnat_priv->acct)
+ return -1;
+ }
+ /* setup hashing */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TB_ETRY_NUM, etry_num_cfg);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, HASH_MODE, HASH_MODE_1);
+ writel(HASH_SEED_KEY, hnat_priv->ppe_base + PPE_HASH_SEED);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, XMODE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TB_ENTRY_SIZE, ENTRY_80B);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
+
+ /* set ip proto */
+ writel(0xFFFFFFFF, hnat_priv->ppe_base + PPE_IP_PROT_CHK);
+
+ /* setup caching */
+ hnat_cache_ebl(1);
+
+ /* enable FOE */
+ cr_set_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+ BIT_UDP_IP4F_NAT_EN | BIT_IPV4_NAT_EN | BIT_IPV4_NAPT_EN |
+ BIT_IPV4_NAT_FRAG_EN | BIT_IPV4_HASH_GREK |
+ BIT_IPV4_DSL_EN | BIT_IPV6_6RD_EN |
+ BIT_IPV6_3T_ROUTE_EN | BIT_IPV6_5T_ROUTE_EN);
+
+ if (hnat_priv->data->version == MTK_HNAT_V4)
+ cr_set_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+ BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
+
+ /* setup FOE aging */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, NTU_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UNBD_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_UNB_AGE, UNB_MNP, 1000);
+ cr_set_field(hnat_priv->ppe_base + PPE_UNB_AGE, UNB_DLTA, 3);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, FIN_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_0, UDP_DLTA, 12);
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_0, NTU_DLTA, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, FIN_DLTA, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, TCP_DLTA, 7);
+
+ /* setup FOE ka */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SCAN_MODE, 2);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, KA_CFG, 3);
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, KA_T, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, TCP_KA, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, UDP_KA, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_1, NTU_KA, 1);
+
+ /* setup FOE rate limit */
+ cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_0, QURT_LMT, 16383);
+ cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_0, HALF_LMT, 16383);
+ cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_1, FULL_LMT, 16383);
+ /* setup binding threshold as 30 packets per second */
+ cr_set_field(hnat_priv->ppe_base + PPE_BNDR, BIND_RATE, 0x1E);
+
+ /* setup FOE cf gen */
+ cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, PPE_EN, 1);
+ writel(0, hnat_priv->ppe_base + PPE_DFT_CPORT); /* pdma */
+ /* writel(0x55555555, hnat_priv->ppe_base + PPE_DFT_CPORT); */ /* qdma */
+ cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, TTL0_DRP, 0);
+
+ if (hnat_priv->data->version == MTK_HNAT_V4) {
+ writel(0xcb777, hnat_priv->ppe_base + PPE_DFT_CPORT1);
+ writel(0x7f, hnat_priv->ppe_base + PPE_SBW_CTRL);
+ }
+
+ /*enable ppe mib counter*/
+ if (hnat_priv->data->per_flow_accounting) {
+ cr_set_field(hnat_priv->ppe_base + PPE_MIB_CFG, MIB_EN, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_MIB_CFG, MIB_READ_CLEAR, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_MIB_CAH_CTRL, MIB_CAH_EN, 1);
+ }
+
+ hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
+
+ dev_info(hnat_priv->dev, "hwnat start\n");
+
+ return 0;
+}
+
+static int ppe_busy_wait(void)
+{
+ unsigned long t_start = jiffies;
+ u32 r = 0;
+
+ while (1) {
+ r = readl((hnat_priv->ppe_base + 0x0));
+ if (!(r & BIT(31)))
+ return 0;
+ if (time_after(jiffies, t_start + HZ))
+ break;
+ usleep_range(10, 20);
+ }
+
+ dev_notice(hnat_priv->dev, "ppe:%s timeout\n", __func__);
+
+ return -1;
+}
+
+static void hnat_stop(void)
+{
+ u32 foe_table_sz;
+ u32 foe_mib_tb_sz;
+ struct foe_entry *entry, *end;
+ u32 r1 = 0, r2 = 0;
+
+ /* send all traffic back to the DMA engine */
+ set_gmac_ppe_fwd(0, 0);
+ set_gmac_ppe_fwd(1, 0);
+
+ dev_info(hnat_priv->dev, "hwnat stop\n");
+
+ if (hnat_priv->foe_table_cpu) {
+ entry = hnat_priv->foe_table_cpu;
+ end = hnat_priv->foe_table_cpu + hnat_priv->foe_etry_num;
+ while (entry < end) {
+ entry->bfib1.state = INVALID;
+ entry++;
+ }
+ }
+ /* disable caching */
+ hnat_cache_ebl(0);
+
+ /* flush cache has to be ahead of hnat disable --*/
+ cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, PPE_EN, 0);
+
+ /* disable scan mode and keep-alive */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SCAN_MODE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, KA_CFG, 0);
+
+ ppe_busy_wait();
+
+ /* disable FOE */
+ cr_clr_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+ BIT_IPV4_NAPT_EN | BIT_IPV4_NAT_EN | BIT_IPV4_NAT_FRAG_EN |
+ BIT_IPV6_HASH_GREK | BIT_IPV4_DSL_EN |
+ BIT_IPV6_6RD_EN | BIT_IPV6_3T_ROUTE_EN |
+ BIT_IPV6_5T_ROUTE_EN | BIT_FUC_FOE | BIT_FMC_FOE);
+
+ if (hnat_priv->data->version == MTK_HNAT_V4)
+ cr_clr_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+ BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
+
+ /* disable FOE aging */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, NTU_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UNBD_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, FIN_AGE, 0);
+
+ r1 = readl(hnat_priv->fe_base + 0x100);
+ r2 = readl(hnat_priv->fe_base + 0x10c);
+
+ dev_info(hnat_priv->dev, "0x100 = 0x%x, 0x10c = 0x%x\n", r1, r2);
+
+ if (((r1 & 0xff00) >> 0x8) >= (r1 & 0xff) ||
+ ((r1 & 0xff00) >> 0x8) >= (r2 & 0xff)) {
+ dev_info(hnat_priv->dev, "reset pse\n");
+ writel(0x1, hnat_priv->fe_base + 0x4);
+ }
+
+ /* free the FOE table */
+ foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
+ if (hnat_priv->foe_table_cpu)
+ dma_free_coherent(hnat_priv->dev, foe_table_sz, hnat_priv->foe_table_cpu,
+ hnat_priv->foe_table_dev);
+ writel(0, hnat_priv->ppe_base + PPE_TB_BASE);
+
+ if (hnat_priv->data->per_flow_accounting) {
+ foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
+ if (hnat_priv->foe_mib_cpu)
+ dma_free_coherent(hnat_priv->dev, foe_mib_tb_sz,
+ hnat_priv->foe_mib_cpu, hnat_priv->foe_mib_dev);
+ writel(0, hnat_priv->ppe_base + PPE_MIB_TB_BASE);
+ kfree(hnat_priv->acct);
+ }
+}
+
+static void hnat_release_netdev(void)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (ext_entry->dev)
+ dev_put(ext_entry->dev);
+ ext_if_del(ext_entry);
+ kfree(ext_entry);
+ }
+
+ if (hnat_priv->g_ppdev)
+ dev_put(hnat_priv->g_ppdev);
+}
+
+static struct notifier_block nf_hnat_netdevice_nb __read_mostly = {
+ .notifier_call = nf_hnat_netdevice_event,
+};
+
+static struct notifier_block nf_hnat_netevent_nb __read_mostly = {
+ .notifier_call = nf_hnat_netevent_handler,
+};
+
+int hnat_enable_hook(void)
+{
+ /* register hook functions used by WHNAT module.
+ */
+ if (hnat_priv->data->whnat) {
+ ra_sw_nat_hook_rx =
+ (hnat_priv->data->version == MTK_HNAT_V4) ?
+ mtk_sw_nat_hook_rx : NULL;
+ ra_sw_nat_hook_tx = mtk_sw_nat_hook_tx;
+ ppe_dev_register_hook = mtk_ppe_dev_register_hook;
+ ppe_dev_unregister_hook = mtk_ppe_dev_unregister_hook;
+ }
+
+ if (hnat_register_nf_hooks())
+ return -1;
+
+ hook_toggle = 1;
+
+ return 0;
+}
+
+int hnat_disable_hook(void)
+{
+ int hash_index;
+ struct foe_entry *entry;
+
+ ra_sw_nat_hook_tx = NULL;
+ ra_sw_nat_hook_rx = NULL;
+ hnat_unregister_nf_hooks();
+
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_ONLY_FWD_CPU);
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.state == BIND) {
+ entry->ipv4_hnapt.udib1.state = INVALID;
+ entry->ipv4_hnapt.udib1.time_stamp =
+ readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
+ }
+ }
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+
+ mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
+ hook_toggle = 0;
+
+ return 0;
+}
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+static struct packet_type mtk_pack_type __read_mostly = {
+ .type = HQOS_MAGIC_TAG,
+ .func = mtk_hqos_ptype_cb,
+};
+#endif
+
+static int hnat_probe(struct platform_device *pdev)
+{
+ int i;
+ int err = 0;
+ int index = 0;
+ struct resource *res;
+ const char *name;
+ struct device_node *np;
+ unsigned int val;
+ struct property *prop;
+ struct extdev_entry *ext_entry;
+ const struct of_device_id *match;
+
+ hnat_priv = devm_kzalloc(&pdev->dev, sizeof(struct mtk_hnat), GFP_KERNEL);
+ if (!hnat_priv)
+ return -ENOMEM;
+
+ hnat_priv->foe_etry_num = DEF_ETRY_NUM;
+
+ match = of_match_device(of_hnat_match, &pdev->dev);
+ hnat_priv->data = (struct mtk_hnat_data *)match->data;
+
+ hnat_priv->dev = &pdev->dev;
+ np = hnat_priv->dev->of_node;
+
+ err = of_property_read_string(np, "mtketh-wan", &name);
+ if (err < 0)
+ return -EINVAL;
+
+ strncpy(hnat_priv->wan, (char *)name, IFNAMSIZ);
+ dev_info(&pdev->dev, "wan = %s\n", hnat_priv->wan);
+
+ err = of_property_read_string(np, "mtketh-lan", &name);
+ if (err < 0)
+ strncpy(hnat_priv->lan, "eth0", IFNAMSIZ);
+ else
+ strncpy(hnat_priv->lan, (char *)name, IFNAMSIZ);
+ dev_info(&pdev->dev, "lan = %s\n", hnat_priv->lan);
+
+ err = of_property_read_string(np, "mtketh-ppd", &name);
+ if (err < 0)
+ strncpy(hnat_priv->ppd, "eth0", IFNAMSIZ);
+ else
+ strncpy(hnat_priv->ppd, (char *)name, IFNAMSIZ);
+ dev_info(&pdev->dev, "ppd = %s\n", hnat_priv->ppd);
+
+ /*get total gmac num in hnat*/
+ err = of_property_read_u32_index(np, "mtketh-max-gmac", 0, &val);
+
+ if (err < 0)
+ return -EINVAL;
+
+ hnat_priv->gmac_num = val;
+
+ dev_info(&pdev->dev, "gmac num = %d\n", hnat_priv->gmac_num);
+
+ err = of_property_read_u32_index(np, "mtkdsa-wan-port", 0, &val);
+
+ if (err < 0) {
+ hnat_priv->wan_dsa_port = NONE_DSA_PORT;
+ } else {
+ hnat_priv->wan_dsa_port = val;
+ dev_info(&pdev->dev, "wan dsa port = %d\n", hnat_priv->wan_dsa_port);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ hnat_priv->fe_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ if (!hnat_priv->fe_base)
+ return -EADDRNOTAVAIL;
+
+ hnat_priv->ppe_base = (hnat_priv->data->version == MTK_HNAT_V4) ?
+ hnat_priv->fe_base + 0x2600 : hnat_priv->fe_base + 0xe00;
+
+ err = hnat_init_debugfs(hnat_priv);
+ if (err)
+ return err;
+
+ prop = of_find_property(np, "ext-devices", NULL);
+ for (name = of_prop_next_string(prop, NULL); name;
+ name = of_prop_next_string(prop, name), index++) {
+ ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
+ if (!ext_entry) {
+ err = -ENOMEM;
+ goto err_out1;
+ }
+ strncpy(ext_entry->name, (char *)name, IFNAMSIZ);
+ ext_if_add(ext_entry);
+ }
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ dev_info(&pdev->dev, "ext devices = %s\n", ext_entry->name);
+ }
+
+ hnat_priv->lvid = 1;
+ hnat_priv->wvid = 2;
+
+ err = hnat_start();
+ if (err)
+ goto err_out;
+
+ if (hnat_priv->data->whnat) {
+ err = whnat_adjust_nf_hooks();
+ if (err)
+ goto err_out;
+ }
+
+ err = hnat_enable_hook();
+ if (err)
+ goto err_out;
+
+ register_netdevice_notifier(&nf_hnat_netdevice_nb);
+ register_netevent_notifier(&nf_hnat_netevent_nb);
+ if (hnat_priv->data->mcast)
+ hnat_mcast_enable();
+ timer_setup(&hnat_priv->hnat_sma_build_entry_timer, hnat_sma_build_entry, 0);
+ if (hnat_priv->data->version == MTK_HNAT_V3) {
+ timer_setup(&hnat_priv->hnat_reset_timestamp_timer, hnat_reset_timestamp, 0);
+ hnat_priv->hnat_reset_timestamp_timer.expires = jiffies;
+ add_timer(&hnat_priv->hnat_reset_timestamp_timer);
+ }
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (IS_GMAC1_MODE)
+ dev_add_pack(&mtk_pack_type);
+#endif
+
+ return 0;
+
+err_out:
+ hnat_stop();
+err_out1:
+ hnat_deinit_debugfs(hnat_priv);
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ ext_if_del(ext_entry);
+ kfree(ext_entry);
+ }
+ return err;
+}
+
+static int hnat_remove(struct platform_device *pdev)
+{
+ unregister_netdevice_notifier(&nf_hnat_netdevice_nb);
+ unregister_netevent_notifier(&nf_hnat_netevent_nb);
+ hnat_disable_hook();
+
+ if (hnat_priv->data->mcast)
+ hnat_mcast_disable();
+
+ hnat_stop();
+ hnat_deinit_debugfs(hnat_priv);
+ hnat_release_netdev();
+ del_timer_sync(&hnat_priv->hnat_sma_build_entry_timer);
+ if (hnat_priv->data->version == MTK_HNAT_V3)
+ del_timer_sync(&hnat_priv->hnat_reset_timestamp_timer);
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (IS_GMAC1_MODE)
+ dev_remove_pack(&mtk_pack_type);
+#endif
+
+ return 0;
+}
+
+static const struct mtk_hnat_data hnat_data_v1 = {
+ .num_of_sch = 2,
+ .whnat = false,
+ .per_flow_accounting = false,
+ .mcast = false,
+ .version = MTK_HNAT_V1,
+};
+
+static const struct mtk_hnat_data hnat_data_v2 = {
+ .num_of_sch = 2,
+ .whnat = true,
+ .per_flow_accounting = true,
+ .mcast = false,
+ .version = MTK_HNAT_V2,
+};
+
+static const struct mtk_hnat_data hnat_data_v3 = {
+ .num_of_sch = 4,
+ .whnat = false,
+ .per_flow_accounting = false,
+ .mcast = false,
+ .version = MTK_HNAT_V3,
+};
+
+static const struct mtk_hnat_data hnat_data_v4 = {
+ .num_of_sch = 4,
+ .whnat = true,
+ .per_flow_accounting = true,
+ .mcast = false,
+ .version = MTK_HNAT_V4,
+};
+
+const struct of_device_id of_hnat_match[] = {
+ { .compatible = "mediatek,mtk-hnat", .data = &hnat_data_v3 },
+ { .compatible = "mediatek,mtk-hnat_v1", .data = &hnat_data_v1 },
+ { .compatible = "mediatek,mtk-hnat_v2", .data = &hnat_data_v2 },
+ { .compatible = "mediatek,mtk-hnat_v3", .data = &hnat_data_v3 },
+ { .compatible = "mediatek,mtk-hnat_v4", .data = &hnat_data_v4 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_hnat_match);
+
+static struct platform_driver hnat_driver = {
+ .probe = hnat_probe,
+ .remove = hnat_remove,
+ .driver = {
+ .name = "mediatek_soc_hnat",
+ .of_match_table = of_hnat_match,
+ },
+};
+
+module_platform_driver(hnat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Mediatek Hardware NAT");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
new file mode 100644
index 0000000..336b4ad
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
@@ -0,0 +1,925 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <net/netevent.h>
+#include <linux/mod_devicetable.h>
+#include "hnat_mcast.h"
+
+/*--------------------------------------------------------------------------*/
+/* Register Offset*/
+/*--------------------------------------------------------------------------*/
+#define PPE_GLO_CFG 0x00
+#define PPE_FLOW_CFG 0x04
+#define PPE_IP_PROT_CHK 0x08
+#define PPE_IP_PROT_0 0x0C
+#define PPE_IP_PROT_1 0x10
+#define PPE_IP_PROT_2 0x14
+#define PPE_IP_PROT_3 0x18
+#define PPE_TB_CFG 0x1C
+#define PPE_TB_BASE 0x20
+#define PPE_TB_USED 0x24
+#define PPE_BNDR 0x28
+#define PPE_BIND_LMT_0 0x2C
+#define PPE_BIND_LMT_1 0x30
+#define PPE_KA 0x34
+#define PPE_UNB_AGE 0x38
+#define PPE_BND_AGE_0 0x3C
+#define PPE_BND_AGE_1 0x40
+#define PPE_HASH_SEED 0x44
+#define PPE_DFT_CPORT 0x48
+#define PPE_DFT_CPORT1 0x4C
+#define PPE_MCAST_PPSE 0x84
+#define PPE_MCAST_L_0 0x88
+#define PPE_MCAST_H_0 0x8C
+#define PPE_MCAST_L_1 0x90
+#define PPE_MCAST_H_1 0x94
+#define PPE_MCAST_L_2 0x98
+#define PPE_MCAST_H_2 0x9C
+#define PPE_MCAST_L_3 0xA0
+#define PPE_MCAST_H_3 0xA4
+#define PPE_MCAST_L_4 0xA8
+#define PPE_MCAST_H_4 0xAC
+#define PPE_MCAST_L_5 0xB0
+#define PPE_MCAST_H_5 0xB4
+#define PPE_MCAST_L_6 0xBC
+#define PPE_MCAST_H_6 0xC0
+#define PPE_MCAST_L_7 0xC4
+#define PPE_MCAST_H_7 0xC8
+#define PPE_MCAST_L_8 0xCC
+#define PPE_MCAST_H_8 0xD0
+#define PPE_MCAST_L_9 0xD4
+#define PPE_MCAST_H_9 0xD8
+#define PPE_MCAST_L_A 0xDC
+#define PPE_MCAST_H_A 0xE0
+#define PPE_MCAST_L_B 0xE4
+#define PPE_MCAST_H_B 0xE8
+#define PPE_MCAST_L_C 0xEC
+#define PPE_MCAST_H_C 0xF0
+#define PPE_MCAST_L_D 0xF4
+#define PPE_MCAST_H_D 0xF8
+#define PPE_MCAST_L_E 0xFC
+#define PPE_MCAST_H_E 0xE0
+#define PPE_MCAST_L_F 0x100
+#define PPE_MCAST_H_F 0x104
+#define PPE_MCAST_L_10 0xC00
+#define PPE_MCAST_H_10 0xC04
+#define PPE_MTU_DRP 0x108
+#define PPE_MTU_VLYR_0 0x10C
+#define PPE_MTU_VLYR_1 0x110
+#define PPE_MTU_VLYR_2 0x114
+#define PPE_VPM_TPID 0x118
+#define PPE_CAH_CTRL 0x120
+#define PPE_CAH_TAG_SRH 0x124
+#define PPE_CAH_LINE_RW 0x128
+#define PPE_CAH_WDATA 0x12C
+#define PPE_CAH_RDATA 0x130
+
+#define PPE_MIB_CFG 0X134
+#define PPE_MIB_TB_BASE 0X138
+#define PPE_MIB_SER_CR 0X13C
+#define PPE_MIB_SER_R0 0X140
+#define PPE_MIB_SER_R1 0X144
+#define PPE_MIB_SER_R2 0X148
+#define PPE_MIB_CAH_CTRL 0X150
+#define PPE_MIB_CAH_TAG_SRH 0X154
+#define PPE_MIB_CAH_LINE_RW 0X158
+#define PPE_MIB_CAH_WDATA 0X15C
+#define PPE_MIB_CAH_RDATA 0X160
+#define PPE_SBW_CTRL 0x174
+
+#define GDMA1_FWD_CFG 0x500
+#define GDMA2_FWD_CFG 0x1500
+
+#define QTX_CFG(x) (QDMA_BASE + ((x) * 0x10))
+#define QTX_SCH(x) (QDMA_BASE + 0x4 + ((x) * 0x10))
+#define QDMA_PAGE (QDMA_BASE + 0x1f0)
+#define QDMA_TX_2SCH_BASE (QDMA_BASE + 0x214)
+#define QTX_MIB_IF (QDMA_BASE + 0x2bc)
+#define QDMA_TX_4SCH_BASE(x) (QDMA_BASE + 0x398 + (((x) >> 1) * 0x4))
+
+/*--------------------------------------------------------------------------*/
+/* Register Mask*/
+/*--------------------------------------------------------------------------*/
+/* PPE_TB_CFG mask */
+#define TB_ETRY_NUM (0x7 << 0) /* RW */
+#define TB_ENTRY_SIZE (0x1 << 3) /* RW */
+#define SMA (0x3 << 4) /* RW */
+#define NTU_AGE (0x1 << 7) /* RW */
+#define UNBD_AGE (0x1 << 8) /* RW */
+#define TCP_AGE (0x1 << 9) /* RW */
+#define UDP_AGE (0x1 << 10) /* RW */
+#define FIN_AGE (0x1 << 11) /* RW */
+#define KA_CFG (0x3 << 12)
+#define HASH_MODE (0x3 << 14) /* RW */
+#define SCAN_MODE (0x3 << 16) /* RW */
+#define XMODE (0x3 << 18) /* RW */
+
+/*PPE_CAH_CTRL mask*/
+#define CAH_EN (0x1 << 0) /* RW */
+#define CAH_X_MODE (0x1 << 9) /* RW */
+
+/*PPE_UNB_AGE mask*/
+#define UNB_DLTA (0xff << 0) /* RW */
+#define UNB_MNP (0xffff << 16) /* RW */
+
+/*PPE_BND_AGE_0 mask*/
+#define UDP_DLTA (0xffff << 0) /* RW */
+#define NTU_DLTA (0xffff << 16) /* RW */
+
+/*PPE_BND_AGE_1 mask*/
+#define TCP_DLTA (0xffff << 0) /* RW */
+#define FIN_DLTA (0xffff << 16) /* RW */
+
+/*PPE_KA mask*/
+#define KA_T (0xffff << 0) /* RW */
+#define TCP_KA (0xff << 16) /* RW */
+#define UDP_KA (0xff << 24) /* RW */
+
+/*PPE_BIND_LMT_0 mask*/
+#define QURT_LMT (0x3ff << 0) /* RW */
+#define HALF_LMT (0x3ff << 16) /* RW */
+
+/*PPE_BIND_LMT_1 mask*/
+#define FULL_LMT (0x3fff << 0) /* RW */
+#define NTU_KA (0xff << 16) /* RW */
+
+/*PPE_BNDR mask*/
+#define BIND_RATE (0xffff << 0) /* RW */
+#define PBND_RD_PRD (0xffff << 16) /* RW */
+
+/*PPE_GLO_CFG mask*/
+#define PPE_EN (0x1 << 0) /* RW */
+#define TTL0_DRP (0x1 << 4) /* RW */
+#define MCAST_TB_EN (0x1 << 7) /* RW */
+#define MCAST_HASH (0x3 << 12) /* RW */
+
+#define MC_P3_PPSE (0xf << 12) /* RW */
+#define MC_P2_PPSE (0xf << 8) /* RW */
+#define MC_P1_PPSE (0xf << 4) /* RW */
+#define MC_P0_PPSE (0xf << 0) /* RW */
+
+#define MIB_EN (0x1 << 0) /* RW */
+#define MIB_READ_CLEAR (0X1 << 1) /* RW */
+#define MIB_CAH_EN (0X1 << 0) /* RW */
+
+/*GDMA_FWD_CFG mask */
+#define GDM_UFRC_MASK (0x7 << 12) /* RW */
+#define GDM_BFRC_MASK (0x7 << 8) /*RW*/
+#define GDM_MFRC_MASK (0x7 << 4) /*RW*/
+#define GDM_OFRC_MASK (0x7 << 0) /*RW*/
+#define GDM_ALL_FRC_MASK \
+ (GDM_UFRC_MASK | GDM_BFRC_MASK | GDM_MFRC_MASK | GDM_OFRC_MASK)
+
+/*QDMA_PAGE mask*/
+#define QTX_CFG_PAGE (0xf << 0) /* RW */
+
+/*QTX_MIB_IF mask*/
+#define MIB_ON_QTX_CFG (0x1 << 31) /* RW */
+#define VQTX_MIB_EN (0x1 << 28) /* RW */
+
+/*--------------------------------------------------------------------------*/
+/* Descriptor Structure */
+/*--------------------------------------------------------------------------*/
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+struct hnat_unbind_info_blk {
+ u32 time_stamp : 8;
+ u32 sp : 4;
+ u32 pcnt : 8;
+ u32 ilgf : 1;
+ u32 mc : 1;
+ u32 preb : 1;
+ u32 pkt_type : 5;
+ u32 state : 2;
+ u32 udp : 1;
+ u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_bind_info_blk {
+ u32 time_stamp : 8;
+ u32 sp : 4;
+ u32 mc : 1;
+ u32 ka : 1; /* keep alive */
+ u32 vlan_layer : 3;
+ u32 psn : 1; /* egress packet has PPPoE session */
+ u32 vpm : 1; /* 0:ethertype remark, 1:0x8100(CR default) */
+ u32 ps : 1; /* packet sampling */
+ u32 cah : 1; /* cacheable flag */
+ u32 rmt : 1; /* remove tunnel ip header (6rd/dslite only) */
+ u32 ttl : 1;
+ u32 pkt_type : 5;
+ u32 state : 2;
+ u32 udp : 1;
+ u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_info_blk2 {
+ u32 qid : 7; /* QID in Qos Port */
+ u32 port_mg : 1;
+ u32 fqos : 1; /* force to PSE QoS port */
+ u32 dp : 4; /* force to PSE port x */
+ u32 mcast : 1; /* multicast this packet to CPU */
+ u32 pcpl : 1; /* OSBN */
+ u32 mibf : 1;
+ u32 alen : 1;
+ u32 rxid : 2;
+ u32 winfoi : 1;
+ u32 port_ag : 4;
+ u32 dscp : 8; /* DSCP value */
+} __packed;
+
+struct hnat_winfo {
+ u32 bssid : 6; /* WiFi Bssidx */
+ u32 wcid : 10; /* WiFi wtable Idx */
+} __packed;
+
+#else
+struct hnat_unbind_info_blk {
+ u32 time_stamp : 8;
+ u32 pcnt : 16; /* packet count */
+ u32 preb : 1;
+ u32 pkt_type : 3;
+ u32 state : 2;
+ u32 udp : 1;
+ u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_bind_info_blk {
+ u32 time_stamp : 15;
+ u32 ka : 1; /* keep alive */
+ u32 vlan_layer : 3;
+ u32 psn : 1; /* egress packet has PPPoE session */
+ u32 vpm : 1; /* 0:ethertype remark, 1:0x8100(CR default) */
+ u32 ps : 1; /* packet sampling */
+ u32 cah : 1; /* cacheable flag */
+ u32 rmt : 1; /* remove tunnel ip header (6rd/dslite only) */
+ u32 ttl : 1;
+ u32 pkt_type : 3;
+ u32 state : 2;
+ u32 udp : 1;
+ u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_info_blk2 {
+ u32 qid : 4; /* QID in Qos Port */
+ u32 fqos : 1; /* force to PSE QoS port */
+ u32 dp : 3; /* force to PSE port x
+ * 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP
+ */
+ u32 mcast : 1; /* multicast this packet to CPU */
+ u32 pcpl : 1; /* OSBN */
+ u32 mibf : 1; /* 0:off 1:on PPE MIB counter */
+ u32 alen : 1; /* 0:post 1:pre packet length in accounting */
+ u32 port_mg : 6; /* port meter group */
+ u32 port_ag : 6; /* port account group */
+ u32 dscp : 8; /* DSCP value */
+} __packed;
+
+struct hnat_winfo {
+ u32 bssid : 6; /* WiFi Bssidx */
+ u32 wcid : 8; /* WiFi wtable Idx */
+ u32 rxid : 2; /* WiFi Ring idx */
+} __packed;
+#endif
+
+/* info blk2 for WHNAT */
+struct hnat_info_blk2_whnat {
+ u32 qid : 4; /* QID[3:0] in Qos Port */
+ u32 fqos : 1; /* force to PSE QoS port */
+ u32 dp : 3; /* force to PSE port x
+ * 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP
+ */
+ u32 mcast : 1; /* multicast this packet to CPU */
+ u32 pcpl : 1; /* OSBN */
+ u32 mibf : 1; /* 0:off 1:on PPE MIB counter */
+ u32 alen : 1; /* 0:post 1:pre packet length in accounting */
+ u32 qid2 : 2; /* QID[5:4] in Qos Port */
+ u32 resv : 2;
+ u32 wdmaid : 1; /* 0:to pcie0 dev 1:to pcie1 dev */
+ u32 winfoi : 1; /* 0:off 1:on Wi-Fi hwnat support */
+ u32 port_ag : 6; /* port account group */
+ u32 dscp : 8; /* DSCP value */
+} __packed;
+
+struct hnat_ipv4_hnapt {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 sip;
+ u32 dip;
+ u16 dport;
+ u16 sport;
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+ u32 new_sip;
+ u32 new_dip;
+ u16 new_dport;
+ u16 new_sport;
+ u16 m_timestamp; /* For mcast*/
+ u16 resv1;
+ u32 resv2;
+ u32 resv3 : 26;
+ u32 act_dp : 6; /* UDF */
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+#endif
+} __packed;
+
+struct hnat_ipv4_dslite {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 sip;
+ u32 dip;
+ u16 dport;
+ u16 sport;
+
+ u32 tunnel_sipv6_0;
+ u32 tunnel_sipv6_1;
+ u32 tunnel_sipv6_2;
+ u32 tunnel_sipv6_3;
+
+ u32 tunnel_dipv6_0;
+ u32 tunnel_dipv6_1;
+ u32 tunnel_dipv6_2;
+ u32 tunnel_dipv6_3;
+
+ u8 flow_lbl[3]; /* in order to consist with Linux kernel (should be 20bits) */
+ u8 priority; /* in order to consist with Linux kernel (should be 8bits) */
+ u32 hop_limit : 8;
+ u32 resv2 : 18;
+ u32 act_dp : 6; /* UDF */
+
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+ u32 new_sip;
+ u32 new_dip;
+ u16 new_dport;
+ u16 new_sport;
+#endif
+} __packed;
+
+struct hnat_ipv6_3t_route {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 ipv6_sip0;
+ u32 ipv6_sip1;
+ u32 ipv6_sip2;
+ u32 ipv6_sip3;
+ u32 ipv6_dip0;
+ u32 ipv6_dip1;
+ u32 ipv6_dip2;
+ u32 ipv6_dip3;
+ u32 prot : 8;
+ u32 resv : 24;
+
+ u32 resv1;
+ u32 resv2;
+ u32 resv3;
+ u32 resv4 : 26;
+ u32 act_dp : 6; /* UDF */
+
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+#endif
+} __packed;
+
+struct hnat_ipv6_5t_route {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 ipv6_sip0;
+ u32 ipv6_sip1;
+ u32 ipv6_sip2;
+ u32 ipv6_sip3;
+ u32 ipv6_dip0;
+ u32 ipv6_dip1;
+ u32 ipv6_dip2;
+ u32 ipv6_dip3;
+ u16 dport;
+ u16 sport;
+
+ u32 resv1;
+ u32 resv2;
+ u32 resv3;
+ u32 resv4 : 26;
+ u32 act_dp : 6; /* UDF */
+
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+#endif
+} __packed;
+
+struct hnat_ipv6_6rd {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 ipv6_sip0;
+ u32 ipv6_sip1;
+ u32 ipv6_sip2;
+ u32 ipv6_sip3;
+ u32 ipv6_dip0;
+ u32 ipv6_dip1;
+ u32 ipv6_dip2;
+ u32 ipv6_dip3;
+ u16 dport;
+ u16 sport;
+
+ u32 tunnel_sipv4;
+ u32 tunnel_dipv4;
+ u32 hdr_chksum : 16;
+ u32 dscp : 8;
+ u32 ttl : 8;
+ u32 flag : 3;
+ u32 resv1 : 13;
+ u32 per_flow_6rd_id : 1;
+ u32 resv2 : 9;
+ u32 act_dp : 6; /* UDF */
+
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+ u32 resv3;
+ u32 resv4;
+ u16 new_dport;
+ u16 new_sport;
+#endif
+} __packed;
+
+struct foe_entry {
+ union {
+ struct hnat_unbind_info_blk udib1;
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_ipv4_hnapt ipv4_hnapt;
+ struct hnat_ipv4_dslite ipv4_dslite;
+ struct hnat_ipv6_3t_route ipv6_3t_route;
+ struct hnat_ipv6_5t_route ipv6_5t_route;
+ struct hnat_ipv6_6rd ipv6_6rd;
+ };
+};
+
+/* If user wants to change default FOE entry number, both DEF_ETRY_NUM and
+ * DEF_ETRY_NUM_CFG need to be modified.
+ */
+#define DEF_ETRY_NUM 8192
+/* feasible values : 16384, 8192, 4096, 2048, 1024 */
+#define DEF_ETRY_NUM_CFG TABLE_8K
+/* corresponding values : TABLE_16K, TABLE_8K, TABLE_4K, TABLE_2K, TABLE_1K */
+#define MAX_EXT_DEVS (0x3fU)
+#define MAX_IF_NUM 64
+
+struct mib_entry {
+ u32 byt_cnt_l;
+ u16 byt_cnt_h;
+ u32 pkt_cnt_l;
+ u8 pkt_cnt_h;
+ u8 resv0;
+ u32 resv1;
+} __packed;
+
+struct hnat_accounting {
+ u64 bytes;
+ u64 packets;
+};
+
+enum mtk_hnat_version {
+ MTK_HNAT_V1 = 1, /* version 1: mt7621, mt7623 */
+ MTK_HNAT_V2, /* version 2: mt7622 */
+ MTK_HNAT_V3, /* version 3: mt7629 */
+ MTK_HNAT_V4, /* version 4: mt7986 */
+};
+
+struct mtk_hnat_data {
+ u8 num_of_sch;
+ bool whnat;
+ bool per_flow_accounting;
+ bool mcast;
+ enum mtk_hnat_version version;
+};
+
+struct mtk_hnat {
+ struct device *dev;
+ void __iomem *fe_base;
+ void __iomem *ppe_base;
+ struct foe_entry *foe_table_cpu;
+ dma_addr_t foe_table_dev;
+ u8 enable;
+ u8 enable1;
+ struct dentry *root;
+ struct debugfs_regset32 *regset;
+
+ struct mib_entry *foe_mib_cpu;
+ dma_addr_t foe_mib_dev;
+ struct hnat_accounting *acct;
+ const struct mtk_hnat_data *data;
+
+ /*devices we plays for*/
+ char wan[IFNAMSIZ];
+ char lan[IFNAMSIZ];
+ char ppd[IFNAMSIZ];
+ u16 lvid;
+ u16 wvid;
+
+ struct reset_control *rstc;
+
+ u8 gmac_num;
+ u8 wan_dsa_port;
+ struct ppe_mcast_table *pmcast;
+
+ u32 foe_etry_num;
+ struct net_device *g_ppdev;
+ struct net_device *wifi_hook_if[MAX_IF_NUM];
+ struct extdev_entry *ext_if[MAX_EXT_DEVS];
+ struct timer_list hnat_sma_build_entry_timer;
+ struct timer_list hnat_reset_timestamp_timer;
+ struct timer_list hnat_mcast_check_timer;
+};
+
+struct extdev_entry {
+ char name[IFNAMSIZ];
+ struct net_device *dev;
+};
+
+struct tcpudphdr {
+ __be16 src;
+ __be16 dst;
+};
+
+enum FoeEntryState { INVALID = 0, UNBIND = 1, BIND = 2, FIN = 3 };
+
+enum FoeIpAct {
+ IPV4_HNAPT = 0,
+ IPV4_HNAT = 1,
+ IPV4_DSLITE = 3,
+ IPV6_3T_ROUTE = 4,
+ IPV6_5T_ROUTE = 5,
+ IPV6_6RD = 7,
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ IPV4_MAP_T = 8,
+ IPV4_MAP_E = 9,
+#else
+ IPV4_MAP_T = 6,
+ IPV4_MAP_E = 6,
+#endif
+};
+
+/*--------------------------------------------------------------------------*/
+/* Common Definition*/
+/*--------------------------------------------------------------------------*/
+
+#define HNAT_SW_VER "1.1.0"
+#define HASH_SEED_KEY 0x12345678
+
+/*PPE_TB_CFG value*/
+#define ENTRY_80B 1
+#define ENTRY_64B 0
+#define TABLE_1K 0
+#define TABLE_2K 1
+#define TABLE_4K 2
+#define TABLE_8K 3
+#define TABLE_16K 4
+#define SMA_DROP 0 /* Drop the packet */
+#define SMA_DROP2 1 /* Drop the packet */
+#define SMA_ONLY_FWD_CPU 2 /* Only Forward to CPU */
+#define SMA_FWD_CPU_BUILD_ENTRY 3 /* Forward to CPU and build new FOE entry */
+#define HASH_MODE_0 0
+#define HASH_MODE_1 1
+#define HASH_MODE_2 2
+#define HASH_MODE_3 3
+
+/*PPE_FLOW_CFG*/
+#define BIT_FUC_FOE BIT(2)
+#define BIT_FMC_FOE BIT(1)
+#define BIT_FBC_FOE BIT(0)
+#define BIT_UDP_IP4F_NAT_EN BIT(7) /*Enable IPv4 fragment + UDP packet NAT*/
+#define BIT_IPV6_3T_ROUTE_EN BIT(8)
+#define BIT_IPV6_5T_ROUTE_EN BIT(9)
+#define BIT_IPV6_6RD_EN BIT(10)
+#define BIT_IPV4_NAT_EN BIT(12)
+#define BIT_IPV4_NAPT_EN BIT(13)
+#define BIT_IPV4_DSL_EN BIT(14)
+#define BIT_MIB_BUSY BIT(16)
+#define BIT_IPV4_NAT_FRAG_EN BIT(17)
+#define BIT_IPV4_HASH_GREK BIT(19)
+#define BIT_IPV6_HASH_GREK BIT(20)
+#define BIT_IPV4_MAPE_EN BIT(21)
+#define BIT_IPV4_MAPT_EN BIT(22)
+
+/*GDMA_FWD_CFG value*/
+#define BITS_GDM_UFRC_P_PPE (NR_PPE_PORT << 12)
+#define BITS_GDM_BFRC_P_PPE (NR_PPE_PORT << 8)
+#define BITS_GDM_MFRC_P_PPE (NR_PPE_PORT << 4)
+#define BITS_GDM_OFRC_P_PPE (NR_PPE_PORT << 0)
+#define BITS_GDM_ALL_FRC_P_PPE \
+ (BITS_GDM_UFRC_P_PPE | BITS_GDM_BFRC_P_PPE | BITS_GDM_MFRC_P_PPE | \
+ BITS_GDM_OFRC_P_PPE)
+
+#define BITS_GDM_UFRC_P_CPU_PDMA (NR_PDMA_PORT << 12)
+#define BITS_GDM_BFRC_P_CPU_PDMA (NR_PDMA_PORT << 8)
+#define BITS_GDM_MFRC_P_CPU_PDMA (NR_PDMA_PORT << 4)
+#define BITS_GDM_OFRC_P_CPU_PDMA (NR_PDMA_PORT << 0)
+#define BITS_GDM_ALL_FRC_P_CPU_PDMA \
+ (BITS_GDM_UFRC_P_CPU_PDMA | BITS_GDM_BFRC_P_CPU_PDMA | \
+ BITS_GDM_MFRC_P_CPU_PDMA | BITS_GDM_OFRC_P_CPU_PDMA)
+
+#define BITS_GDM_UFRC_P_CPU_QDMA (NR_QDMA_PORT << 12)
+#define BITS_GDM_BFRC_P_CPU_QDMA (NR_QDMA_PORT << 8)
+#define BITS_GDM_MFRC_P_CPU_QDMA (NR_QDMA_PORT << 4)
+#define BITS_GDM_OFRC_P_CPU_QDMA (NR_QDMA_PORT << 0)
+#define BITS_GDM_ALL_FRC_P_CPU_QDMA \
+ (BITS_GDM_UFRC_P_CPU_QDMA | BITS_GDM_BFRC_P_CPU_QDMA | \
+ BITS_GDM_MFRC_P_CPU_QDMA | BITS_GDM_OFRC_P_CPU_QDMA)
+
+#define BITS_GDM_UFRC_P_DISCARD (NR_DISCARD << 12)
+#define BITS_GDM_BFRC_P_DISCARD (NR_DISCARD << 8)
+#define BITS_GDM_MFRC_P_DISCARD (NR_DISCARD << 4)
+#define BITS_GDM_OFRC_P_DISCARD (NR_DISCARD << 0)
+#define BITS_GDM_ALL_FRC_P_DISCARD \
+ (BITS_GDM_UFRC_P_DISCARD | BITS_GDM_BFRC_P_DISCARD | \
+ BITS_GDM_MFRC_P_DISCARD | BITS_GDM_OFRC_P_DISCARD)
+
+#define hnat_is_enabled(hnat_priv) (hnat_priv->enable)
+#define hnat_enabled(hnat_priv) (hnat_priv->enable = 1)
+#define hnat_disabled(hnat_priv) (hnat_priv->enable = 0)
+#define hnat_is_enabled1(hnat_priv) (hnat_priv->enable1)
+#define hnat_enabled1(hnat_priv) (hnat_priv->enable1 = 1)
+#define hnat_disabled1(hnat_priv) (hnat_priv->enable1 = 0)
+
+#define entry_hnat_is_bound(e) (e->bfib1.state == BIND)
+#define entry_hnat_state(e) (e->bfib1.state)
+
+#define skb_hnat_is_hashed(skb) \
+ (skb_hnat_entry(skb) != 0x3fff && skb_hnat_entry(skb) < hnat_priv->foe_etry_num)
+#define FROM_GE_LAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_LAN)
+#define FROM_GE_WAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_WAN)
+#define FROM_GE_PPD(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_PPD)
+#define FROM_GE_VIRTUAL(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL)
+#define FROM_EXT(skb) (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
+#define FOE_MAGIC_GE_LAN 0x1
+#define FOE_MAGIC_GE_WAN 0x2
+#define FOE_MAGIC_EXT 0x3
+#define FOE_MAGIC_GE_VIRTUAL 0x4
+#define FOE_MAGIC_GE_PPD 0x5
+#define FOE_MAGIC_WED0 0x6
+#define FOE_MAGIC_WED1 0x7
+#define FOE_INVALID 0xf
+#define index6b(i) (0x3fU - i)
+
+#define IPV4_HNAPT 0
+#define IPV4_HNAT 1
+#define IP_FORMAT(addr) \
+ (((unsigned char *)&addr)[3], ((unsigned char *)&addr)[2], \
+ ((unsigned char *)&addr)[1], ((unsigned char *)&addr)[0])
+
+/*PSE Ports*/
+#define NR_PDMA_PORT 0
+#define NR_GMAC1_PORT 1
+#define NR_GMAC2_PORT 2
+#define NR_WHNAT_WDMA_PORT 3
+#define NR_PPE_PORT 4
+#define NR_QDMA_PORT 5
+#define NR_DISCARD 7
+#define NR_WDMA0_PORT 8
+#define NR_WDMA1_PORT 9
+#define LAN_DEV_NAME hnat_priv->lan
+#define IS_WAN(dev) \
+ (!strncmp((dev)->name, hnat_priv->wan, strlen(hnat_priv->wan)))
+#define IS_LAN(dev) (!strncmp(dev->name, LAN_DEV_NAME, strlen(LAN_DEV_NAME)))
+#define IS_BR(dev) (!strncmp(dev->name, "br", 2))
+#define IS_WHNAT(dev) \
+ ((hnat_priv->data->whnat && \
+ (get_wifi_hook_if_index_from_dev(dev) != 0)) ? 1 : 0)
+#define IS_EXT(dev) ((get_index_from_dev(dev) != 0) ? 1 : 0)
+#define IS_PPD(dev) (!strcmp(dev->name, hnat_priv->ppd))
+#define IS_IPV4_HNAPT(x) (((x)->bfib1.pkt_type == IPV4_HNAPT) ? 1 : 0)
+#define IS_IPV4_HNAT(x) (((x)->bfib1.pkt_type == IPV4_HNAT) ? 1 : 0)
+#define IS_IPV4_GRP(x) (IS_IPV4_HNAPT(x) | IS_IPV4_HNAT(x))
+#define IS_IPV4_DSLITE(x) (((x)->bfib1.pkt_type == IPV4_DSLITE) ? 1 : 0)
+#define IS_IPV4_MAPE(x) (((x)->bfib1.pkt_type == IPV4_MAP_E) ? 1 : 0)
+#define IS_IPV4_MAPT(x) (((x)->bfib1.pkt_type == IPV4_MAP_T) ? 1 : 0)
+#define IS_IPV6_3T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_3T_ROUTE) ? 1 : 0)
+#define IS_IPV6_5T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_5T_ROUTE) ? 1 : 0)
+#define IS_IPV6_6RD(x) (((x)->bfib1.pkt_type == IPV6_6RD) ? 1 : 0)
+#define IS_IPV6_GRP(x) \
+ (IS_IPV6_3T_ROUTE(x) | IS_IPV6_5T_ROUTE(x) | IS_IPV6_6RD(x) | \
+ IS_IPV4_DSLITE(x) | IS_IPV4_MAPE(x) | IS_IPV4_MAPT(x))
+#define IS_BOND_MODE (!strncmp(LAN_DEV_NAME, "bond", 4))
+#define IS_GMAC1_MODE ((hnat_priv->gmac_num == 1) ? 1 : 0)
+
+#define es(entry) (entry_state[entry->bfib1.state])
+#define ei(entry, end) (hnat_priv->foe_etry_num - (int)(end - entry))
+#define pt(entry) (packet_type[entry->ipv4_hnapt.bfib1.pkt_type])
+#define ipv4_smac(mac, e) \
+ ({ \
+ mac[0] = e->ipv4_hnapt.smac_hi[3]; \
+ mac[1] = e->ipv4_hnapt.smac_hi[2]; \
+ mac[2] = e->ipv4_hnapt.smac_hi[1]; \
+ mac[3] = e->ipv4_hnapt.smac_hi[0]; \
+ mac[4] = e->ipv4_hnapt.smac_lo[1]; \
+ mac[5] = e->ipv4_hnapt.smac_lo[0]; \
+ })
+#define ipv4_dmac(mac, e) \
+ ({ \
+ mac[0] = e->ipv4_hnapt.dmac_hi[3]; \
+ mac[1] = e->ipv4_hnapt.dmac_hi[2]; \
+ mac[2] = e->ipv4_hnapt.dmac_hi[1]; \
+ mac[3] = e->ipv4_hnapt.dmac_hi[0]; \
+ mac[4] = e->ipv4_hnapt.dmac_lo[1]; \
+ mac[5] = e->ipv4_hnapt.dmac_lo[0]; \
+ })
+
+#define IS_DSA_LAN(dev) (!strncmp(dev->name, "lan", 3))
+#define IS_DSA_WAN(dev) (!strncmp(dev->name, "wan", 3))
+#define NONE_DSA_PORT 0xff
+#define MAX_CRSN_NUM 32
+#define IPV6_HDR_LEN 40
+
+/*QDMA_PAGE value*/
+#define NUM_OF_Q_PER_PAGE 16
+
+/*IPv6 Header*/
+#ifndef NEXTHDR_IPIP
+#define NEXTHDR_IPIP 4
+#endif
+
+extern const struct of_device_id of_hnat_match[];
+extern struct mtk_hnat *hnat_priv;
+
+#if defined(CONFIG_NET_DSA_MT7530)
+void hnat_dsa_fill_stag(const struct net_device *netdev,
+ struct foe_entry *entry,
+ struct flow_offload_hw_path *hw_path,
+ u16 eth_proto, int mape);
+
+static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)
+{
+ return (priv->wan_dsa_port != NONE_DSA_PORT);
+}
+#else
+static inline void hnat_dsa_fill_stag(const struct net_device *netdev,
+ struct foe_entry *entry,
+ struct flow_offload_hw_path *hw_path,
+ u16 eth_proto, int mape)
+{
+}
+
+static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)
+{
+ return false;
+}
+#endif
+
+void hnat_deinit_debugfs(struct mtk_hnat *h);
+int hnat_init_debugfs(struct mtk_hnat *h);
+int hnat_register_nf_hooks(void);
+void hnat_unregister_nf_hooks(void);
+int whnat_adjust_nf_hooks(void);
+int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *unused);
+extern int dbg_cpu_reason;
+extern int debug_level;
+extern int hook_toggle;
+extern int mape_toggle;
+
+int ext_if_add(struct extdev_entry *ext_entry);
+int ext_if_del(struct extdev_entry *ext_entry);
+void cr_set_field(void __iomem *reg, u32 field, u32 val);
+int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no);
+int mtk_sw_nat_hook_rx(struct sk_buff *skb);
+void mtk_ppe_dev_register_hook(struct net_device *dev);
+void mtk_ppe_dev_unregister_hook(struct net_device *dev);
+int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
+ void *ptr);
+int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
+ void *ptr);
+uint32_t foe_dump_pkt(struct sk_buff *skb);
+uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb);
+int hnat_enable_hook(void);
+int hnat_disable_hook(void);
+void hnat_cache_ebl(int enable);
+void set_gmac_ppe_fwd(int gmac_no, int enable);
+int entry_delete(int index);
+
+static inline u16 foe_timestamp(struct mtk_hnat *h)
+{
+ return (readl(hnat_priv->fe_base + 0x0010)) & 0xffff;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
new file mode 100644
index 0000000..4ae9128
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
@@ -0,0 +1,1952 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/iopoll.h>
+
+#include "hnat.h"
+#include "nf_hnat_mtk.h"
+#include "../mtk_eth_soc.h"
+
+int dbg_entry_state = BIND;
+typedef int (*debugfs_write_func)(int par1);
+int debug_level;
+int dbg_cpu_reason;
+int hook_toggle;
+int mape_toggle;
+unsigned int dbg_cpu_reason_cnt[MAX_CRSN_NUM];
+
+static const char * const entry_state[] = { "INVALID", "UNBIND", "BIND", "FIN" };
+
+static const char * const packet_type[] = {
+ "IPV4_HNAPT", "IPV4_HNAT", "IPV6_1T_ROUTE", "IPV4_DSLITE",
+ "IPV6_3T_ROUTE", "IPV6_5T_ROUTE", "REV", "IPV6_6RD",
+ "IPV4_MAP_T", "IPV4_MAP_E",
+};
+
+static uint8_t *show_cpu_reason(struct sk_buff *skb)
+{
+ static u8 buf[32];
+
+ switch (skb_hnat_reason(skb)) {
+ case TTL_0:
+ return "IPv4(IPv6) TTL(hop limit)\n";
+ case HAS_OPTION_HEADER:
+ return "Ipv4(IPv6) has option(extension) header\n";
+ case NO_FLOW_IS_ASSIGNED:
+ return "No flow is assigned\n";
+ case IPV4_WITH_FRAGMENT:
+ return "IPv4 HNAT doesn't support IPv4 /w fragment\n";
+ case IPV4_HNAPT_DSLITE_WITH_FRAGMENT:
+ return "IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n";
+ case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP:
+ return "IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n";
+ case IPV6_5T_6RD_WITHOUT_TCP_UDP:
+ return "IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n";
+ case TCP_FIN_SYN_RST:
+ return "Ingress packet is TCP fin/syn/rst\n";
+ case UN_HIT:
+ return "FOE Un-hit\n";
+ case HIT_UNBIND:
+ return "FOE Hit unbind\n";
+ case HIT_UNBIND_RATE_REACH:
+ return "FOE Hit unbind & rate reach\n";
+ case HIT_BIND_TCP_FIN:
+ return "Hit bind PPE TCP FIN entry\n";
+ case HIT_BIND_TTL_1:
+ return "Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1\n";
+ case HIT_BIND_WITH_VLAN_VIOLATION:
+ return "Hit bind and VLAN replacement violation\n";
+ case HIT_BIND_KEEPALIVE_UC_OLD_HDR:
+ return "Hit bind and keep alive with unicast old-header packet\n";
+ case HIT_BIND_KEEPALIVE_MC_NEW_HDR:
+ return "Hit bind and keep alive with multicast new-header packet\n";
+ case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+ return "Hit bind and keep alive with duplicate old-header packet\n";
+ case HIT_BIND_FORCE_TO_CPU:
+ return "FOE Hit bind & force to CPU\n";
+ case HIT_BIND_EXCEED_MTU:
+ return "Hit bind and exceed MTU\n";
+ case HIT_BIND_MULTICAST_TO_CPU:
+ return "Hit bind multicast packet to CPU\n";
+ case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+ return "Hit bind multicast packet to GMAC & CPU\n";
+ case HIT_PRE_BIND:
+ return "Pre bind\n";
+ }
+
+ sprintf(buf, "CPU Reason Error - %X\n", skb_hnat_entry(skb));
+ return buf;
+}
+
+uint32_t foe_dump_pkt(struct sk_buff *skb)
+{
+ struct foe_entry *entry;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+ pr_info("\nRx===<FOE_Entry=%d>=====\n", skb_hnat_entry(skb));
+ pr_info("RcvIF=%s\n", skb->dev->name);
+ pr_info("FOE_Entry=%d\n", skb_hnat_entry(skb));
+ pr_info("CPU Reason=%s", show_cpu_reason(skb));
+ pr_info("ALG=%d\n", skb_hnat_alg(skb));
+ pr_info("SP=%d\n", skb_hnat_sport(skb));
+
+ /* some special alert occurred, so entry_num is useless (just skip it) */
+ if (skb_hnat_entry(skb) == 0x3fff)
+ return 1;
+
+ /* PPE: IPv4 packet=IPV4_HNAT IPv6 packet=IPV6_ROUTE */
+ if (IS_IPV4_GRP(entry)) {
+ __be32 saddr = htonl(entry->ipv4_hnapt.sip);
+ __be32 daddr = htonl(entry->ipv4_hnapt.dip);
+
+ pr_info("Information Block 1=%x\n",
+ entry->ipv4_hnapt.info_blk1);
+ pr_info("SIP=%pI4\n", &saddr);
+ pr_info("DIP=%pI4\n", &daddr);
+ pr_info("SPORT=%d\n", entry->ipv4_hnapt.sport);
+ pr_info("DPORT=%d\n", entry->ipv4_hnapt.dport);
+ pr_info("Information Block 2=%x\n",
+ entry->ipv4_hnapt.info_blk2);
+ pr_info("State = %s, proto = %s\n", entry->bfib1.state == 0 ?
+ "Invalid" : entry->bfib1.state == 1 ?
+ "Unbind" : entry->bfib1.state == 2 ?
+ "BIND" : entry->bfib1.state == 3 ?
+ "FIN" : "Unknown",
+ entry->ipv4_hnapt.bfib1.udp == 0 ?
+ "TCP" : entry->ipv4_hnapt.bfib1.udp == 1 ?
+ "UDP" : "Unknown");
+ } else if (IS_IPV6_GRP(entry)) {
+ pr_info("Information Block 1=%x\n",
+ entry->ipv6_5t_route.info_blk1);
+ pr_info("IPv6_SIP=%08X:%08X:%08X:%08X\n",
+ entry->ipv6_5t_route.ipv6_sip0,
+ entry->ipv6_5t_route.ipv6_sip1,
+ entry->ipv6_5t_route.ipv6_sip2,
+ entry->ipv6_5t_route.ipv6_sip3);
+ pr_info("IPv6_DIP=%08X:%08X:%08X:%08X\n",
+ entry->ipv6_5t_route.ipv6_dip0,
+ entry->ipv6_5t_route.ipv6_dip1,
+ entry->ipv6_5t_route.ipv6_dip2,
+ entry->ipv6_5t_route.ipv6_dip3);
+ pr_info("SPORT=%d\n", entry->ipv6_5t_route.sport);
+ pr_info("DPORT=%d\n", entry->ipv6_5t_route.dport);
+ pr_info("Information Block 2=%x\n",
+ entry->ipv6_5t_route.info_blk2);
+ pr_info("State = %s, proto = %s\n", entry->bfib1.state == 0 ?
+ "Invalid" : entry->bfib1.state == 1 ?
+ "Unbind" : entry->bfib1.state == 2 ?
+ "BIND" : entry->bfib1.state == 3 ?
+ "FIN" : "Unknown",
+ entry->ipv6_5t_route.bfib1.udp == 0 ?
+ "TCP" : entry->ipv6_5t_route.bfib1.udp == 1 ?
+ "UDP" : "Unknown");
+ } else {
+ pr_info("unknown Pkt_type=%d\n", entry->bfib1.pkt_type);
+ }
+
+ pr_info("==================================\n");
+ return 1;
+}
+
+uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb)
+{
+ switch (skb_hnat_reason(skb)) {
+ case TTL_0:
+ dbg_cpu_reason_cnt[0]++;
+ return 0;
+ case HAS_OPTION_HEADER:
+ dbg_cpu_reason_cnt[1]++;
+ return 0;
+ case NO_FLOW_IS_ASSIGNED:
+ dbg_cpu_reason_cnt[2]++;
+ return 0;
+ case IPV4_WITH_FRAGMENT:
+ dbg_cpu_reason_cnt[3]++;
+ return 0;
+ case IPV4_HNAPT_DSLITE_WITH_FRAGMENT:
+ dbg_cpu_reason_cnt[4]++;
+ return 0;
+ case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP:
+ dbg_cpu_reason_cnt[5]++;
+ return 0;
+ case IPV6_5T_6RD_WITHOUT_TCP_UDP:
+ dbg_cpu_reason_cnt[6]++;
+ return 0;
+ case TCP_FIN_SYN_RST:
+ dbg_cpu_reason_cnt[7]++;
+ return 0;
+ case UN_HIT:
+ dbg_cpu_reason_cnt[8]++;
+ return 0;
+ case HIT_UNBIND:
+ dbg_cpu_reason_cnt[9]++;
+ return 0;
+ case HIT_UNBIND_RATE_REACH:
+ dbg_cpu_reason_cnt[10]++;
+ return 0;
+ case HIT_BIND_TCP_FIN:
+ dbg_cpu_reason_cnt[11]++;
+ return 0;
+ case HIT_BIND_TTL_1:
+ dbg_cpu_reason_cnt[12]++;
+ return 0;
+ case HIT_BIND_WITH_VLAN_VIOLATION:
+ dbg_cpu_reason_cnt[13]++;
+ return 0;
+ case HIT_BIND_KEEPALIVE_UC_OLD_HDR:
+ dbg_cpu_reason_cnt[14]++;
+ return 0;
+ case HIT_BIND_KEEPALIVE_MC_NEW_HDR:
+ dbg_cpu_reason_cnt[15]++;
+ return 0;
+ case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+ dbg_cpu_reason_cnt[16]++;
+ return 0;
+ case HIT_BIND_FORCE_TO_CPU:
+ dbg_cpu_reason_cnt[17]++;
+ return 0;
+ case HIT_BIND_EXCEED_MTU:
+ dbg_cpu_reason_cnt[18]++;
+ return 0;
+ case HIT_BIND_MULTICAST_TO_CPU:
+ dbg_cpu_reason_cnt[19]++;
+ return 0;
+ case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+ dbg_cpu_reason_cnt[20]++;
+ return 0;
+ case HIT_PRE_BIND:
+ dbg_cpu_reason_cnt[21]++;
+ return 0;
+ }
+
+ return 0;
+}
+
+int hnat_set_usage(int level)
+{
+ debug_level = level;
+ pr_info("Read cpu_reason count: cat /sys/kernel/debug/hnat/cpu_reason\n\n");
+ pr_info("====================Advanced Settings====================\n");
+ pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/cpu_reason\n\n");
+ pr_info("Commands: [type] [option]\n");
+ pr_info(" 0 0~7 Set debug_level(0~7), current debug_level=%d\n",
+ debug_level);
+ pr_info(" 1 cpu_reason Track entries of the set cpu_reason\n");
+ pr_info(" Set type=1 will change debug_level=7\n");
+ pr_info("cpu_reason list:\n");
+ pr_info(" 2 IPv4(IPv6) TTL(hop limit) = 0\n");
+ pr_info(" 3 IPv4(IPv6) has option(extension) header\n");
+ pr_info(" 7 No flow is assigned\n");
+ pr_info(" 8 IPv4 HNAT doesn't support IPv4 /w fragment\n");
+ pr_info(" 9 IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n");
+ pr_info(" 10 IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n");
+ pr_info(" 11 IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n");
+ pr_info(" 12 Ingress packet is TCP fin/syn/rst\n");
+ pr_info(" 13 FOE Un-hit\n");
+ pr_info(" 14 FOE Hit unbind\n");
+ pr_info(" 15 FOE Hit unbind & rate reach\n");
+ pr_info(" 16 Hit bind PPE TCP FIN entry\n");
+ pr_info(" 17 Hit bind PPE entry and TTL(hop limit) = 1\n");
+ pr_info(" 18 Hit bind and VLAN replacement violation\n");
+ pr_info(" 19 Hit bind and keep alive with unicast old-header packet\n");
+ pr_info(" 20 Hit bind and keep alive with multicast new-header packet\n");
+ pr_info(" 21 Hit bind and keep alive with duplicate old-header packet\n");
+ pr_info(" 22 FOE Hit bind & force to CPU\n");
+ pr_info(" 23 HIT_BIND_WITH_OPTION_HEADER\n");
+ pr_info(" 24 Switch clone multicast packet to CPU\n");
+ pr_info(" 25 Switch clone multicast packet to GMAC1 & CPU\n");
+ pr_info(" 26 HIT_PRE_BIND\n");
+ pr_info(" 27 HIT_BIND_PACKET_SAMPLING\n");
+ pr_info(" 28 Hit bind and exceed MTU\n");
+
+ return 0;
+}
+
+int hnat_cpu_reason(int cpu_reason)
+{
+ dbg_cpu_reason = cpu_reason;
+ debug_level = 7;
+ pr_info("show cpu reason = %d\n", cpu_reason);
+
+ return 0;
+}
+
+int entry_set_usage(int level)
+{
+ debug_level = level;
+ pr_info("Show all entries(default state=bind): cat /sys/kernel/debug/hnat/hnat_entry\n\n");
+ pr_info("====================Advanced Settings====================\n");
+ pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/hnat_entry\n\n");
+ pr_info("Commands: [type] [option]\n");
+ pr_info(" 0 0~7 Set debug_level(0~7), current debug_level=%d\n",
+ debug_level);
+ pr_info(" 1 0~3 Change tracking state\n");
+ pr_info(" (0:invalid; 1:unbind; 2:bind; 3:fin)\n");
+ pr_info(" 2 <entry_idx> Show specific foe entry info. of assigned <entry_idx>\n");
+ pr_info(" 3 <entry_idx> Delete specific foe entry of assigned <entry_idx>\n");
+
+ return 0;
+}
+
+int entry_set_state(int state)
+{
+ dbg_entry_state = state;
+ pr_info("ENTRY STATE = %s\n", dbg_entry_state == 0 ?
+ "Invalid" : dbg_entry_state == 1 ?
+ "Unbind" : dbg_entry_state == 2 ?
+ "BIND" : dbg_entry_state == 3 ?
+ "FIN" : "Unknown");
+ return 0;
+}
+
+int entry_detail(int index)
+{
+ struct foe_entry *entry;
+ struct mtk_hnat *h = hnat_priv;
+ u32 *p;
+ u32 i = 0;
+ u32 print_cnt;
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ __be32 saddr, daddr, nsaddr, ndaddr;
+
+ entry = h->foe_table_cpu + index;
+ saddr = htonl(entry->ipv4_hnapt.sip);
+ daddr = htonl(entry->ipv4_hnapt.dip);
+ nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+ ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+ p = (uint32_t *)entry;
+ pr_info("==========<Flow Table Entry=%d (%p)>===============\n", index,
+ entry);
+ if (debug_level >= 2) {
+ print_cnt = 20;
+ for (i = 0; i < print_cnt; i++)
+ pr_info("%02d: %08X\n", i, *(p + i));
+ }
+ pr_info("-----------------<Flow Info>------------------\n");
+ pr_info("Information Block 1: %08X\n", entry->ipv4_hnapt.info_blk1);
+
+ if (IS_IPV4_HNAPT(entry)) {
+ pr_info("Information Block 2: %08X (FP=%d FQOS=%d QID=%d)",
+ entry->ipv4_hnapt.info_blk2,
+ entry->ipv4_hnapt.iblk2.dp,
+ entry->ipv4_hnapt.iblk2.fqos,
+ entry->ipv4_hnapt.iblk2.qid);
+ pr_info("Create IPv4 HNAPT entry\n");
+ pr_info("IPv4 Org IP/Port: %pI4:%d->%pI4:%d\n", &saddr,
+ entry->ipv4_hnapt.sport, &daddr,
+ entry->ipv4_hnapt.dport);
+ pr_info("IPv4 New IP/Port: %pI4:%d->%pI4:%d\n", &nsaddr,
+ entry->ipv4_hnapt.new_sport, &ndaddr,
+ entry->ipv4_hnapt.new_dport);
+ } else if (IS_IPV4_HNAT(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv4_hnapt.info_blk2);
+ pr_info("Create IPv4 HNAT entry\n");
+ pr_info("IPv4 Org IP: %pI4->%pI4\n", &saddr, &daddr);
+ pr_info("IPv4 New IP: %pI4->%pI4\n", &nsaddr, &ndaddr);
+ } else if (IS_IPV4_DSLITE(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv4_dslite.info_blk2);
+ pr_info("Create IPv4 Ds-Lite entry\n");
+ pr_info("IPv4 Ds-Lite: %pI4:%d->%pI4:%d\n", &saddr,
+ entry->ipv4_dslite.sport, &daddr,
+ entry->ipv4_dslite.dport);
+ pr_info("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+ entry->ipv4_dslite.tunnel_sipv6_0,
+ entry->ipv4_dslite.tunnel_sipv6_1,
+ entry->ipv4_dslite.tunnel_sipv6_2,
+ entry->ipv4_dslite.tunnel_sipv6_3,
+ entry->ipv4_dslite.tunnel_dipv6_0,
+ entry->ipv4_dslite.tunnel_dipv6_1,
+ entry->ipv4_dslite.tunnel_dipv6_2,
+ entry->ipv4_dslite.tunnel_dipv6_3);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ } else if (IS_IPV4_MAPE(entry)) {
+ nsaddr = htonl(entry->ipv4_dslite.new_sip);
+ ndaddr = htonl(entry->ipv4_dslite.new_dip);
+
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv4_dslite.info_blk2);
+ pr_info("Create IPv4 MAP-E entry\n");
+ pr_info("IPv4 MAP-E Org IP/Port: %pI4:%d->%pI4:%d\n",
+ &saddr, entry->ipv4_dslite.sport,
+ &daddr, entry->ipv4_dslite.dport);
+ pr_info("IPv4 MAP-E New IP/Port: %pI4:%d->%pI4:%d\n",
+ &nsaddr, entry->ipv4_dslite.new_sport,
+ &ndaddr, entry->ipv4_dslite.new_dport);
+ pr_info("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+ entry->ipv4_dslite.tunnel_sipv6_0,
+ entry->ipv4_dslite.tunnel_sipv6_1,
+ entry->ipv4_dslite.tunnel_sipv6_2,
+ entry->ipv4_dslite.tunnel_sipv6_3,
+ entry->ipv4_dslite.tunnel_dipv6_0,
+ entry->ipv4_dslite.tunnel_dipv6_1,
+ entry->ipv4_dslite.tunnel_dipv6_2,
+ entry->ipv4_dslite.tunnel_dipv6_3);
+#endif
+ } else if (IS_IPV6_3T_ROUTE(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv6_3t_route.info_blk2);
+ pr_info("Create IPv6 3-Tuple entry\n");
+ pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Prot=%d)\n",
+ entry->ipv6_3t_route.ipv6_sip0,
+ entry->ipv6_3t_route.ipv6_sip1,
+ entry->ipv6_3t_route.ipv6_sip2,
+ entry->ipv6_3t_route.ipv6_sip3,
+ entry->ipv6_3t_route.ipv6_dip0,
+ entry->ipv6_3t_route.ipv6_dip1,
+ entry->ipv6_3t_route.ipv6_dip2,
+ entry->ipv6_3t_route.ipv6_dip3,
+ entry->ipv6_3t_route.prot);
+ } else if (IS_IPV6_5T_ROUTE(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv6_5t_route.info_blk2);
+ pr_info("Create IPv6 5-Tuple entry\n");
+ pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+ entry->ipv6_5t_route.ipv6_sip0,
+ entry->ipv6_5t_route.ipv6_sip1,
+ entry->ipv6_5t_route.ipv6_sip2,
+ entry->ipv6_5t_route.ipv6_sip3,
+ entry->ipv6_5t_route.sport,
+ entry->ipv6_5t_route.ipv6_dip0,
+ entry->ipv6_5t_route.ipv6_dip1,
+ entry->ipv6_5t_route.ipv6_dip2,
+ entry->ipv6_5t_route.ipv6_dip3,
+ entry->ipv6_5t_route.dport);
+ } else if (IS_IPV6_6RD(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv6_6rd.info_blk2);
+ pr_info("Create IPv6 6RD entry\n");
+ pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+ entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+ entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+ entry->ipv6_6rd.sport, entry->ipv6_6rd.ipv6_dip0,
+ entry->ipv6_6rd.ipv6_dip1, entry->ipv6_6rd.ipv6_dip2,
+ entry->ipv6_6rd.ipv6_dip3, entry->ipv6_6rd.dport);
+ }
+ if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry)) {
+ *((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi);
+ *((u16 *)&h_source[4]) = swab16(entry->ipv4_hnapt.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+ *((u16 *)&h_dest[4]) = swab16(entry->ipv4_hnapt.dmac_lo);
+ pr_info("SMAC=%pM => DMAC=%pM\n", h_source, h_dest);
+ pr_info("State = %s, ", entry->bfib1.state == 0 ?
+ "Invalid" : entry->bfib1.state == 1 ?
+ "Unbind" : entry->bfib1.state == 2 ?
+ "BIND" : entry->bfib1.state == 3 ?
+ "FIN" : "Unknown");
+ pr_info("Vlan_Layer = %u, ", entry->bfib1.vlan_layer);
+ pr_info("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+ entry->ipv4_hnapt.etype, entry->ipv4_hnapt.vlan1,
+ entry->ipv4_hnapt.vlan2);
+ pr_info("multicast = %d, pppoe = %d, proto = %s\n",
+ entry->ipv4_hnapt.iblk2.mcast,
+ entry->ipv4_hnapt.bfib1.psn,
+ entry->ipv4_hnapt.bfib1.udp == 0 ?
+ "TCP" : entry->ipv4_hnapt.bfib1.udp == 1 ?
+ "UDP" : "Unknown");
+ pr_info("=========================================\n\n");
+ } else {
+ *((u32 *)h_source) = swab32(entry->ipv6_5t_route.smac_hi);
+ *((u16 *)&h_source[4]) = swab16(entry->ipv6_5t_route.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+ *((u16 *)&h_dest[4]) = swab16(entry->ipv6_5t_route.dmac_lo);
+ pr_info("SMAC=%pM => DMAC=%pM\n", h_source, h_dest);
+ pr_info("State = %s, ", entry->bfib1.state == 0 ?
+ "Invalid" : entry->bfib1.state == 1 ?
+ "Unbind" : entry->bfib1.state == 2 ?
+ "BIND" : entry->bfib1.state == 3 ?
+ "FIN" : "Unknown");
+
+ pr_info("Vlan_Layer = %u, ", entry->bfib1.vlan_layer);
+ pr_info("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+ entry->ipv6_5t_route.etype, entry->ipv6_5t_route.vlan1,
+ entry->ipv6_5t_route.vlan2);
+ pr_info("multicast = %d, pppoe = %d, proto = %s\n",
+ entry->ipv6_5t_route.iblk2.mcast,
+ entry->ipv6_5t_route.bfib1.psn,
+ entry->ipv6_5t_route.bfib1.udp == 0 ?
+ "TCP" : entry->ipv6_5t_route.bfib1.udp == 1 ?
+ "UDP" : "Unknown");
+ pr_info("=========================================\n\n");
+ }
+ return 0;
+}
+
+int entry_delete(int index)
+{
+ struct foe_entry *entry;
+ struct mtk_hnat *h = hnat_priv;
+
+ entry = h->foe_table_cpu + index;
+ memset(entry, 0, sizeof(struct foe_entry));
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+
+ pr_info("delete entry idx = %d\n", index);
+
+ return 0;
+}
+EXPORT_SYMBOL(entry_delete);
+
+int cr_set_usage(int level)
+{
+ debug_level = level;
+ pr_info("Dump hnat CR: cat /sys/kernel/debug/hnat/hnat_setting\n\n");
+ pr_info("====================Advanced Settings====================\n");
+ pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/hnat_setting\n\n");
+ pr_info("Commands: [type] [option]\n");
+ pr_info(" 0 0~7 Set debug_level(0~7), current debug_level=%d\n",
+ debug_level);
+ pr_info(" 1 0~65535 Set binding threshold\n");
+ pr_info(" 2 0~65535 Set TCP bind lifetime\n");
+ pr_info(" 3 0~65535 Set FIN bind lifetime\n");
+ pr_info(" 4 0~65535 Set UDP bind lifetime\n");
+ pr_info(" 5 0~255 Set TCP keep alive interval\n");
+ pr_info(" 6 0~255 Set UDP keep alive interval\n");
+
+ return 0;
+}
+
+int binding_threshold(int threshold)
+{
+ pr_info("Binding Threshold =%d\n", threshold);
+ writel(threshold, hnat_priv->ppe_base + PPE_BNDR);
+ return 0;
+}
+
+int tcp_bind_lifetime(int tcp_life)
+{
+ pr_info("tcp_life = %d\n", tcp_life);
+ /* set Delta time for aging out an bind TCP FOE entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, TCP_DLTA, tcp_life);
+
+ return 0;
+}
+
+int fin_bind_lifetime(int fin_life)
+{
+ pr_info("fin_life = %d\n", fin_life);
+ /* set Delta time for aging out an bind TCP FIN FOE entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, FIN_DLTA, fin_life);
+
+ return 0;
+}
+
+int udp_bind_lifetime(int udp_life)
+{
+ pr_info("udp_life = %d\n", udp_life);
+ /* set Delta time for aging out an bind UDP FOE entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_0, UDP_DLTA, udp_life);
+
+ return 0;
+}
+
+int tcp_keep_alive(int tcp_interval)
+{
+ if (tcp_interval > 255) {
+ tcp_interval = 255;
+ pr_info("TCP keep alive max interval = 255\n");
+ } else {
+ pr_info("tcp_interval = %d\n", tcp_interval);
+ }
+ /* Keep alive time for bind FOE TCP entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, TCP_KA, tcp_interval);
+
+ return 0;
+}
+
+int udp_keep_alive(int udp_interval)
+{
+ if (udp_interval > 255) {
+ udp_interval = 255;
+ pr_info("TCP/UDP keep alive max interval = 255\n");
+ } else {
+ pr_info("udp_interval = %d\n", udp_interval);
+ }
+ /* Keep alive timer for bind FOE UDP entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, UDP_KA, udp_interval);
+
+ return 0;
+}
+
+static const debugfs_write_func hnat_set_func[] = {
+ [0] = hnat_set_usage,
+ [1] = hnat_cpu_reason,
+};
+
+static const debugfs_write_func entry_set_func[] = {
+ [0] = entry_set_usage,
+ [1] = entry_set_state,
+ [2] = entry_detail,
+ [3] = entry_delete,
+};
+
+static const debugfs_write_func cr_set_func[] = {
+ [0] = cr_set_usage, [1] = binding_threshold,
+ [2] = tcp_bind_lifetime, [3] = fin_bind_lifetime,
+ [4] = udp_bind_lifetime, [5] = tcp_keep_alive,
+ [6] = udp_keep_alive,
+};
+
+static struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 index)
+{
+ struct hnat_accounting *acount;
+ u32 val, cnt_r0, cnt_r1, cnt_r2;
+ int ret = -1;
+
+ if (!hnat_priv->data->per_flow_accounting)
+ return NULL;
+
+ writel(index | (1 << 16), h->ppe_base + PPE_MIB_SER_CR);
+ ret = readx_poll_timeout_atomic(readl, h->ppe_base + PPE_MIB_SER_CR, val,
+ !(val & BIT_MIB_BUSY), 20, 10000);
+ if (ret < 0) {
+ pr_notice("mib busy,please check later\n");
+ return NULL;
+ }
+ cnt_r0 = readl(h->ppe_base + PPE_MIB_SER_R0);
+ cnt_r1 = readl(h->ppe_base + PPE_MIB_SER_R1);
+ cnt_r2 = readl(h->ppe_base + PPE_MIB_SER_R2);
+ acount = &h->acct[index];
+ acount->bytes += cnt_r0 + ((u64)(cnt_r1 & 0xffff) << 32);
+ acount->packets +=
+ ((cnt_r1 & 0xffff0000) >> 16) + ((cnt_r2 & 0xffffff) << 16);
+
+ return acount;
+}
+
+#define PRINT_COUNT(m, acount) {if (acount) \
+ seq_printf(m, "bytes=%llu|packets=%llu|", \
+ acount->bytes, acount->packets); }
+static int hnat_debug_show(struct seq_file *m, void *private)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct foe_entry *entry, *end;
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ struct hnat_accounting *acount;
+ u32 entry_index = 0;
+
+ entry = h->foe_table_cpu;
+ end = h->foe_table_cpu + hnat_priv->foe_etry_num;
+ while (entry < end) {
+ if (!entry->bfib1.state) {
+ entry++;
+ entry_index++;
+ continue;
+ }
+ acount = hnat_get_count(h, entry_index);
+ if (IS_IPV4_HNAPT(entry)) {
+ __be32 saddr = htonl(entry->ipv4_hnapt.sip);
+ __be32 daddr = htonl(entry->ipv4_hnapt.dip);
+ __be32 nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+ __be32 ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+
+ *((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv4_hnapt.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_hnapt.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|%pI4:%d->%pI4:%d=>%pI4:%d->%pI4:%d|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x|vlan1=%d|vlan2=%d\n",
+ entry, ei(entry, end), es(entry), pt(entry), &saddr,
+ entry->ipv4_hnapt.sport, &daddr,
+ entry->ipv4_hnapt.dport, &nsaddr,
+ entry->ipv4_hnapt.new_sport, &ndaddr,
+ entry->ipv4_hnapt.new_dport, h_source, h_dest,
+ ntohs(entry->ipv4_hnapt.etype),
+ entry->ipv4_hnapt.info_blk1,
+ entry->ipv4_hnapt.info_blk2,
+ entry->ipv4_hnapt.vlan1,
+ entry->ipv4_hnapt.vlan2);
+ } else if (IS_IPV4_HNAT(entry)) {
+ __be32 saddr = htonl(entry->ipv4_hnapt.sip);
+ __be32 daddr = htonl(entry->ipv4_hnapt.dip);
+ __be32 nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+ __be32 ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+
+ *((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv4_hnapt.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_hnapt.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|%pI4->%pI4=>%pI4->%pI4|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x|vlan1=%d|vlan2=%d\n",
+ entry, ei(entry, end), es(entry), pt(entry), &saddr,
+ &daddr, &nsaddr, &ndaddr, h_source, h_dest,
+ ntohs(entry->ipv4_hnapt.etype),
+ entry->ipv4_hnapt.info_blk1,
+ entry->ipv4_hnapt.info_blk2,
+ entry->ipv4_hnapt.vlan1,
+ entry->ipv4_hnapt.vlan2);
+ } else if (IS_IPV6_5T_ROUTE(entry)) {
+ u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0;
+ u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1;
+ u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2;
+ u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3;
+ u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0;
+ u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1;
+ u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2;
+ u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3;
+
+ *((u32 *)h_source) =
+ swab32(entry->ipv6_5t_route.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv6_5t_route.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv6_5t_route.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x(sp=%d)->DIP=%08x:%08x:%08x:%08x(dp=%d)|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry), ipv6_sip0,
+ ipv6_sip1, ipv6_sip2, ipv6_sip3,
+ entry->ipv6_5t_route.sport, ipv6_dip0,
+ ipv6_dip1, ipv6_dip2, ipv6_dip3,
+ entry->ipv6_5t_route.dport, h_source, h_dest,
+ ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+ } else if (IS_IPV6_3T_ROUTE(entry)) {
+ u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0;
+ u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1;
+ u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2;
+ u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3;
+ u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0;
+ u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1;
+ u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2;
+ u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3;
+
+ *((u32 *)h_source) =
+ swab32(entry->ipv6_5t_route.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv6_5t_route.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv6_5t_route.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x->DIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry), ipv6_sip0,
+ ipv6_sip1, ipv6_sip2, ipv6_sip3, ipv6_dip0,
+ ipv6_dip1, ipv6_dip2, ipv6_dip3, h_source,
+ h_dest, ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+ } else if (IS_IPV6_6RD(entry)) {
+ u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0;
+ u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1;
+ u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2;
+ u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3;
+ u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0;
+ u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1;
+ u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2;
+ u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3;
+ __be32 tsaddr = htonl(entry->ipv6_6rd.tunnel_sipv4);
+ __be32 tdaddr = htonl(entry->ipv6_6rd.tunnel_dipv4);
+
+ *((u32 *)h_source) =
+ swab32(entry->ipv6_5t_route.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv6_5t_route.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv6_5t_route.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x(sp=%d)->DIP=%08x:%08x:%08x:%08x(dp=%d)|TSIP=%pI4->TDIP=%pI4|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry), ipv6_sip0,
+ ipv6_sip1, ipv6_sip2, ipv6_sip3,
+ entry->ipv6_5t_route.sport, ipv6_dip0,
+ ipv6_dip1, ipv6_dip2, ipv6_dip3,
+ entry->ipv6_5t_route.dport, &tsaddr, &tdaddr,
+ h_source, h_dest,
+ ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+ } else if (IS_IPV4_DSLITE(entry)) {
+ __be32 saddr = htonl(entry->ipv4_hnapt.sip);
+ __be32 daddr = htonl(entry->ipv4_hnapt.dip);
+ u32 ipv6_tsip0 = entry->ipv4_dslite.tunnel_sipv6_0;
+ u32 ipv6_tsip1 = entry->ipv4_dslite.tunnel_sipv6_1;
+ u32 ipv6_tsip2 = entry->ipv4_dslite.tunnel_sipv6_2;
+ u32 ipv6_tsip3 = entry->ipv4_dslite.tunnel_sipv6_3;
+ u32 ipv6_tdip0 = entry->ipv4_dslite.tunnel_dipv6_0;
+ u32 ipv6_tdip1 = entry->ipv4_dslite.tunnel_dipv6_1;
+ u32 ipv6_tdip2 = entry->ipv4_dslite.tunnel_dipv6_2;
+ u32 ipv6_tdip3 = entry->ipv4_dslite.tunnel_dipv6_3;
+
+ *((u32 *)h_source) = swab32(entry->ipv4_dslite.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv4_dslite.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_dslite.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_dslite.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%pI4->DIP=%pI4|TSIP=%08x:%08x:%08x:%08x->TDIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry), &saddr,
+ &daddr, ipv6_tsip0, ipv6_tsip1, ipv6_tsip2,
+ ipv6_tsip3, ipv6_tdip0, ipv6_tdip1, ipv6_tdip2,
+ ipv6_tdip3, h_source, h_dest,
+ ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ } else if (IS_IPV4_MAPE(entry)) {
+ __be32 saddr = htonl(entry->ipv4_dslite.sip);
+ __be32 daddr = htonl(entry->ipv4_dslite.dip);
+ __be32 nsaddr = htonl(entry->ipv4_dslite.new_sip);
+ __be32 ndaddr = htonl(entry->ipv4_dslite.new_dip);
+ u32 ipv6_tsip0 = entry->ipv4_dslite.tunnel_sipv6_0;
+ u32 ipv6_tsip1 = entry->ipv4_dslite.tunnel_sipv6_1;
+ u32 ipv6_tsip2 = entry->ipv4_dslite.tunnel_sipv6_2;
+ u32 ipv6_tsip3 = entry->ipv4_dslite.tunnel_sipv6_3;
+ u32 ipv6_tdip0 = entry->ipv4_dslite.tunnel_dipv6_0;
+ u32 ipv6_tdip1 = entry->ipv4_dslite.tunnel_dipv6_1;
+ u32 ipv6_tdip2 = entry->ipv4_dslite.tunnel_dipv6_2;
+ u32 ipv6_tdip3 = entry->ipv4_dslite.tunnel_dipv6_3;
+
+ *((u32 *)h_source) = swab32(entry->ipv4_dslite.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv4_dslite.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_dslite.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_dslite.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%pI4:%d->DIP=%pI4:%d|NSIP=%pI4:%d->NDIP=%pI4:%d|TSIP=%08x:%08x:%08x:%08x->TDIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry),
+ &saddr, entry->ipv4_dslite.sport,
+ &daddr, entry->ipv4_dslite.dport,
+ &nsaddr, entry->ipv4_dslite.new_sport,
+ &ndaddr, entry->ipv4_dslite.new_dport,
+ ipv6_tsip0, ipv6_tsip1, ipv6_tsip2,
+ ipv6_tsip3, ipv6_tdip0, ipv6_tdip1,
+ ipv6_tdip2, ipv6_tdip3, h_source, h_dest,
+ ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+#endif
+ } else
+ seq_printf(m, "addr=0x%p|index=%d state=%s\n", entry, ei(entry, end),
+ es(entry));
+ entry++;
+ entry_index++;
+ }
+
+ return 0;
+}
+
+static int hnat_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_debug_show, file->private_data);
+}
+
+static const struct file_operations hnat_debug_fops = {
+ .open = hnat_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int hnat_whnat_show(struct seq_file *m, void *private)
+{
+ int i;
+ struct net_device *dev;
+
+ for (i = 0; i < MAX_IF_NUM; i++) {
+ dev = hnat_priv->wifi_hook_if[i];
+ if (dev)
+ seq_printf(m, "%d:%s\n", i, dev->name);
+ else
+ continue;
+ }
+
+ return 0;
+}
+
+static int hnat_whnat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_whnat_show, file->private_data);
+}
+
+static const struct file_operations hnat_whnat_fops = {
+ .open = hnat_whnat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int cpu_reason_read(struct seq_file *m, void *private)
+{
+ int i;
+
+ pr_info("============ CPU REASON =========\n");
+ pr_info("(2)IPv4(IPv6) TTL(hop limit) = %u\n", dbg_cpu_reason_cnt[0]);
+ pr_info("(3)Ipv4(IPv6) has option(extension) header = %u\n",
+ dbg_cpu_reason_cnt[1]);
+ pr_info("(7)No flow is assigned = %u\n", dbg_cpu_reason_cnt[2]);
+ pr_info("(8)IPv4 HNAT doesn't support IPv4 /w fragment = %u\n",
+ dbg_cpu_reason_cnt[3]);
+ pr_info("(9)IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment = %u\n",
+ dbg_cpu_reason_cnt[4]);
+ pr_info("(10)IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport = %u\n",
+ dbg_cpu_reason_cnt[5]);
+ pr_info("(11)IPv6 5T-route/6RD can't find TCP/UDP sport/dport = %u\n",
+ dbg_cpu_reason_cnt[6]);
+ pr_info("(12)Ingress packet is TCP fin/syn/rst = %u\n",
+ dbg_cpu_reason_cnt[7]);
+ pr_info("(13)FOE Un-hit = %u\n", dbg_cpu_reason_cnt[8]);
+ pr_info("(14)FOE Hit unbind = %u\n", dbg_cpu_reason_cnt[9]);
+ pr_info("(15)FOE Hit unbind & rate reach = %u\n",
+ dbg_cpu_reason_cnt[10]);
+ pr_info("(16)Hit bind PPE TCP FIN entry = %u\n",
+ dbg_cpu_reason_cnt[11]);
+ pr_info("(17)Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1 = %u\n",
+ dbg_cpu_reason_cnt[12]);
+ pr_info("(18)Hit bind and VLAN replacement violation = %u\n",
+ dbg_cpu_reason_cnt[13]);
+ pr_info("(19)Hit bind and keep alive with unicast old-header packet = %u\n",
+ dbg_cpu_reason_cnt[14]);
+ pr_info("(20)Hit bind and keep alive with multicast new-header packet = %u\n",
+ dbg_cpu_reason_cnt[15]);
+ pr_info("(21)Hit bind and keep alive with duplicate old-header packet = %u\n",
+ dbg_cpu_reason_cnt[16]);
+ pr_info("(22)FOE Hit bind & force to CPU = %u\n",
+ dbg_cpu_reason_cnt[17]);
+ pr_info("(28)Hit bind and exceed MTU =%u\n", dbg_cpu_reason_cnt[18]);
+ pr_info("(24)Hit bind multicast packet to CPU = %u\n",
+ dbg_cpu_reason_cnt[19]);
+ pr_info("(25)Hit bind multicast packet to GMAC & CPU = %u\n",
+ dbg_cpu_reason_cnt[20]);
+ pr_info("(26)Pre bind = %u\n", dbg_cpu_reason_cnt[21]);
+
+ for (i = 0; i < 22; i++)
+ dbg_cpu_reason_cnt[i] = 0;
+ return 0;
+}
+
+static int cpu_reason_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cpu_reason_read, file->private_data);
+}
+
+ssize_t cpu_reason_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[32];
+ char *p_buf;
+ int len = count;
+ long arg0 = 0, arg1 = 0;
+ char *p_token = NULL;
+ char *p_delimiter = " \t";
+ int ret;
+
+ if (len >= sizeof(buf)) {
+ pr_info("input handling fail!\n");
+ len = sizeof(buf) - 1;
+ return -1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ p_buf = buf;
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg0 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg0);
+
+ switch (arg0) {
+ case 0:
+ case 1:
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg1 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg1);
+ break;
+ default:
+ pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+ arg0 = 0;
+ arg1 = 0;
+ break;
+ }
+
+ (*hnat_set_func[arg0])(arg1);
+
+ return len;
+}
+
+static const struct file_operations cpu_reason_fops = {
+ .open = cpu_reason_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = cpu_reason_write,
+ .release = single_release,
+};
+
+void dbg_dump_entry(struct seq_file *m, struct foe_entry *entry,
+ uint32_t index)
+{
+ __be32 saddr, daddr, nsaddr, ndaddr;
+
+ saddr = htonl(entry->ipv4_hnapt.sip);
+ daddr = htonl(entry->ipv4_hnapt.dip);
+ nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+ ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+
+ if (IS_IPV4_HNAPT(entry)) {
+ seq_printf(m,
+ "NAPT(%d): %pI4:%d->%pI4:%d => %pI4:%d->%pI4:%d\n",
+ index, &saddr, entry->ipv4_hnapt.sport, &daddr,
+ entry->ipv4_hnapt.dport, &nsaddr,
+ entry->ipv4_hnapt.new_sport, &ndaddr,
+ entry->ipv4_hnapt.new_dport);
+ } else if (IS_IPV4_HNAT(entry)) {
+ seq_printf(m, "NAT(%d): %pI4->%pI4 => %pI4->%pI4\n",
+ index, &saddr, &daddr, &nsaddr, &ndaddr);
+ }
+
+ if (IS_IPV4_DSLITE(entry)) {
+ seq_printf(m,
+ "IPv4 Ds-Lite(%d): %pI4:%d->%pI4:%d => %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+ index, &saddr, entry->ipv4_dslite.sport, &daddr,
+ entry->ipv4_dslite.dport,
+ entry->ipv4_dslite.tunnel_sipv6_0,
+ entry->ipv4_dslite.tunnel_sipv6_1,
+ entry->ipv4_dslite.tunnel_sipv6_2,
+ entry->ipv4_dslite.tunnel_sipv6_3,
+ entry->ipv4_dslite.tunnel_dipv6_0,
+ entry->ipv4_dslite.tunnel_dipv6_1,
+ entry->ipv4_dslite.tunnel_dipv6_2,
+ entry->ipv4_dslite.tunnel_dipv6_3);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ } else if (IS_IPV4_MAPE(entry)) {
+ nsaddr = htonl(entry->ipv4_dslite.new_sip);
+ ndaddr = htonl(entry->ipv4_dslite.new_dip);
+
+ seq_printf(m,
+ "IPv4 MAP-E(%d): %pI4:%d->%pI4:%d => %pI4:%d->%pI4:%d | Tunnel=%08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+ index, &saddr, entry->ipv4_dslite.sport,
+ &daddr, entry->ipv4_dslite.dport,
+ &nsaddr, entry->ipv4_dslite.new_sport,
+ &ndaddr, entry->ipv4_dslite.new_dport,
+ entry->ipv4_dslite.tunnel_sipv6_0,
+ entry->ipv4_dslite.tunnel_sipv6_1,
+ entry->ipv4_dslite.tunnel_sipv6_2,
+ entry->ipv4_dslite.tunnel_sipv6_3,
+ entry->ipv4_dslite.tunnel_dipv6_0,
+ entry->ipv4_dslite.tunnel_dipv6_1,
+ entry->ipv4_dslite.tunnel_dipv6_2,
+ entry->ipv4_dslite.tunnel_dipv6_3);
+#endif
+ } else if (IS_IPV6_3T_ROUTE(entry)) {
+ seq_printf(m,
+ "IPv6_3T(%d): %08X:%08X:%08X:%08X => %08X:%08X:%08X:%08X (Prot=%d)\n",
+ index, entry->ipv6_3t_route.ipv6_sip0,
+ entry->ipv6_3t_route.ipv6_sip1,
+ entry->ipv6_3t_route.ipv6_sip2,
+ entry->ipv6_3t_route.ipv6_sip3,
+ entry->ipv6_3t_route.ipv6_dip0,
+ entry->ipv6_3t_route.ipv6_dip1,
+ entry->ipv6_3t_route.ipv6_dip2,
+ entry->ipv6_3t_route.ipv6_dip3,
+ entry->ipv6_3t_route.prot);
+ } else if (IS_IPV6_5T_ROUTE(entry)) {
+ seq_printf(m,
+ "IPv6_5T(%d): %08X:%08X:%08X:%08X:%d => %08X:%08X:%08X:%08X:%d\n",
+ index, entry->ipv6_5t_route.ipv6_sip0,
+ entry->ipv6_5t_route.ipv6_sip1,
+ entry->ipv6_5t_route.ipv6_sip2,
+ entry->ipv6_5t_route.ipv6_sip3,
+ entry->ipv6_5t_route.sport,
+ entry->ipv6_5t_route.ipv6_dip0,
+ entry->ipv6_5t_route.ipv6_dip1,
+ entry->ipv6_5t_route.ipv6_dip2,
+ entry->ipv6_5t_route.ipv6_dip3,
+ entry->ipv6_5t_route.dport);
+ } else if (IS_IPV6_6RD(entry)) {
+ seq_printf(m,
+ "IPv6_6RD(%d): %08X:%08X:%08X:%08X:%d => %08X:%08X:%08X:%08X:%d\n",
+ index, entry->ipv6_6rd.ipv6_sip0,
+ entry->ipv6_6rd.ipv6_sip1, entry->ipv6_6rd.ipv6_sip2,
+ entry->ipv6_6rd.ipv6_sip3, entry->ipv6_6rd.sport,
+ entry->ipv6_6rd.ipv6_dip0, entry->ipv6_6rd.ipv6_dip1,
+ entry->ipv6_6rd.ipv6_dip2, entry->ipv6_6rd.ipv6_dip3,
+ entry->ipv6_6rd.dport);
+ }
+}
+
+int hnat_entry_read(struct seq_file *m, void *private)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct foe_entry *entry, *end;
+ int hash_index;
+ int cnt;
+
+ hash_index = 0;
+ cnt = 0;
+ entry = h->foe_table_cpu;
+ end = h->foe_table_cpu + hnat_priv->foe_etry_num;
+
+ while (entry < end) {
+ if (entry->bfib1.state == dbg_entry_state) {
+ cnt++;
+ dbg_dump_entry(m, entry, hash_index);
+ }
+ hash_index++;
+ entry++;
+ }
+
+ seq_printf(m, "Total State = %s cnt = %d\n",
+ dbg_entry_state == 0 ?
+ "Invalid" : dbg_entry_state == 1 ?
+ "Unbind" : dbg_entry_state == 2 ?
+ "BIND" : dbg_entry_state == 3 ?
+ "FIN" : "Unknown", cnt);
+
+ return 0;
+}
+
+ssize_t hnat_entry_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[32];
+ char *p_buf;
+ int len = count;
+ long arg0 = 0, arg1 = 0;
+ char *p_token = NULL;
+ char *p_delimiter = " \t";
+ int ret;
+
+ if (len >= sizeof(buf)) {
+ pr_info("input handling fail!\n");
+ len = sizeof(buf) - 1;
+ return -1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ p_buf = buf;
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg0 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg0);
+
+ switch (arg0) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg1 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg1);
+ break;
+ default:
+ pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+ arg0 = 0;
+ arg1 = 0;
+ break;
+ }
+
+ (*entry_set_func[arg0])(arg1);
+
+ return len;
+}
+
+static int hnat_entry_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_entry_read, file->private_data);
+}
+
+static const struct file_operations hnat_entry_fops = {
+ .open = hnat_entry_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_entry_write,
+ .release = single_release,
+};
+
+int hnat_setting_read(struct seq_file *m, void *private)
+{
+ struct mtk_hnat *h = hnat_priv;
+ int i;
+ int cr_max;
+
+ cr_max = 319 * 4;
+ for (i = 0; i < cr_max; i = i + 0x10) {
+ pr_info("0x%p : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (void *)h->foe_table_dev + i, readl(h->ppe_base + i),
+ readl(h->ppe_base + i + 4), readl(h->ppe_base + i + 8),
+ readl(h->ppe_base + i + 0xc));
+ }
+
+ return 0;
+}
+
+static int hnat_setting_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_setting_read, file->private_data);
+}
+
+ssize_t hnat_setting_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[32];
+ char *p_buf;
+ int len = count;
+ long arg0 = 0, arg1 = 0;
+ char *p_token = NULL;
+ char *p_delimiter = " \t";
+ int ret;
+
+ if (len >= sizeof(buf)) {
+ pr_info("input handling fail!\n");
+ len = sizeof(buf) - 1;
+ return -1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ p_buf = buf;
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg0 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg0);
+
+ switch (arg0) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg1 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg1);
+ break;
+ default:
+ pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+ arg0 = 0;
+ arg1 = 0;
+ break;
+ }
+
+ (*cr_set_func[arg0])(arg1);
+
+ return len;
+}
+
+static const struct file_operations hnat_setting_fops = {
+ .open = hnat_setting_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_setting_write,
+ .release = single_release,
+};
+
+int mcast_table_dump(struct seq_file *m, void *private)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct ppe_mcast_h mcast_h;
+ struct ppe_mcast_l mcast_l;
+ u8 i, max;
+ void __iomem *reg;
+
+ if (!h->pmcast)
+ return 0;
+
+ max = h->pmcast->max_entry;
+ pr_info("MAC | VID | PortMask | QosPortMask\n");
+ for (i = 0; i < max; i++) {
+ if (i < 0x10) {
+ reg = h->ppe_base + PPE_MCAST_H_0 + i * 8;
+ mcast_h.u.value = readl(reg);
+ reg = h->ppe_base + PPE_MCAST_L_0 + i * 8;
+ mcast_l.addr = readl(reg);
+ } else {
+ reg = h->fe_base + PPE_MCAST_H_10 + (i - 0x10) * 8;
+ mcast_h.u.value = readl(reg);
+ reg = h->fe_base + PPE_MCAST_L_10 + (i - 0x10) * 8;
+ mcast_l.addr = readl(reg);
+ }
+ pr_info("%08x %d %c%c%c%c %c%c%c%c (QID=%d, mc_mpre_sel=%d)\n",
+ mcast_l.addr,
+ mcast_h.u.info.mc_vid,
+ (mcast_h.u.info.mc_px_en & 0x08) ? '1' : '-',
+ (mcast_h.u.info.mc_px_en & 0x04) ? '1' : '-',
+ (mcast_h.u.info.mc_px_en & 0x02) ? '1' : '-',
+ (mcast_h.u.info.mc_px_en & 0x01) ? '1' : '-',
+ (mcast_h.u.info.mc_px_qos_en & 0x08) ? '1' : '-',
+ (mcast_h.u.info.mc_px_qos_en & 0x04) ? '1' : '-',
+ (mcast_h.u.info.mc_px_qos_en & 0x02) ? '1' : '-',
+ (mcast_h.u.info.mc_px_qos_en & 0x01) ? '1' : '-',
+ mcast_h.u.info.mc_qos_qid +
+ ((mcast_h.u.info.mc_qos_qid54) << 4),
+ mcast_h.u.info.mc_mpre_sel);
+ }
+
+ return 0;
+}
+
+static int mcast_table_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mcast_table_dump, file->private_data);
+}
+
+static const struct file_operations hnat_mcast_fops = {
+ .open = mcast_table_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int hnat_ext_show(struct seq_file *m, void *private)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (ext_entry->dev)
+ seq_printf(m, "ext devices [%d] = %s (dev=%p, ifindex=%d)\n",
+ i, ext_entry->name, ext_entry->dev,
+ ext_entry->dev->ifindex);
+ }
+
+ return 0;
+}
+
+static int hnat_ext_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_ext_show, file->private_data);
+}
+
+static const struct file_operations hnat_ext_fops = {
+ .open = hnat_ext_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t hnat_sched_show(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ long id = (long)file->private_data;
+ struct mtk_hnat *h = hnat_priv;
+ u32 qdma_tx_sch;
+ int enable;
+ int scheduling;
+ int max_rate;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ ssize_t ret_cnt;
+ int scheduler, i;
+ u32 sch_reg;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (hnat_priv->data->num_of_sch == 4)
+ qdma_tx_sch = readl(h->fe_base + QDMA_TX_4SCH_BASE(id));
+ else
+ qdma_tx_sch = readl(h->fe_base + QDMA_TX_2SCH_BASE);
+
+ if (id & 0x1)
+ qdma_tx_sch >>= 16;
+ qdma_tx_sch &= 0xffff;
+ enable = !!(qdma_tx_sch & BIT(11));
+ scheduling = !!(qdma_tx_sch & BIT(15));
+ max_rate = ((qdma_tx_sch >> 4) & 0x7f);
+ qdma_tx_sch &= 0xf;
+ while (qdma_tx_sch--)
+ max_rate *= 10;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "EN\tScheduling\tMAX\tQueue#\n%d\t%s%16d\t", enable,
+ (scheduling == 1) ? "WRR" : "SP", max_rate);
+
+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
+ cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE,
+ (i / NUM_OF_Q_PER_PAGE));
+ sch_reg = readl(h->fe_base + QTX_SCH(i % NUM_OF_Q_PER_PAGE));
+ if (hnat_priv->data->num_of_sch == 4)
+ scheduler = (sch_reg >> 30) & 0x3;
+ else
+ scheduler = !!(sch_reg & BIT(31));
+ if (id == scheduler)
+ len += scnprintf(buf + len, buf_len - len, "%d ", i);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t hnat_sched_write(struct file *file, const char __user *buf,
+ size_t length, loff_t *offset)
+{
+ long id = (long)file->private_data;
+ struct mtk_hnat *h = hnat_priv;
+ char line[64];
+ int enable, rate, exp = 0, shift = 0;
+ char scheduling[32];
+ size_t size;
+ u32 qdma_tx_sch;
+ u32 val = 0;
+
+ if (length > sizeof(line))
+ return -EINVAL;
+
+ if (copy_from_user(line, buf, length))
+ return -EFAULT;
+
+ if (sscanf(line, "%d %s %d", &enable, scheduling, &rate) != 3)
+ return -EFAULT;
+
+ while (rate > 127) {
+ rate /= 10;
+ exp++;
+ }
+
+ if (enable)
+ val |= BIT(11);
+ if (strcmp(scheduling, "sp") != 0)
+ val |= BIT(15);
+ val |= (rate & 0x7f) << 4;
+ val |= exp & 0xf;
+ if (id & 0x1)
+ shift = 16;
+
+ if (hnat_priv->data->num_of_sch == 4)
+ qdma_tx_sch = readl(h->fe_base + QDMA_TX_4SCH_BASE(id));
+ else
+ qdma_tx_sch = readl(h->fe_base + QDMA_TX_2SCH_BASE);
+
+ qdma_tx_sch &= ~(0xffff << shift);
+ qdma_tx_sch |= val << shift;
+ if (hnat_priv->data->num_of_sch == 4)
+ writel(qdma_tx_sch, h->fe_base + QDMA_TX_4SCH_BASE(id));
+ else
+ writel(qdma_tx_sch, h->fe_base + QDMA_TX_2SCH_BASE);
+
+ size = strlen(line);
+ *offset += size;
+
+ return length;
+}
+
+static const struct file_operations hnat_sched_fops = {
+ .open = simple_open,
+ .read = hnat_sched_show,
+ .write = hnat_sched_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t hnat_queue_show(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mtk_hnat *h = hnat_priv;
+ long id = (long)file->private_data;
+ u32 qtx_sch;
+ u32 qtx_cfg;
+ int scheduler;
+ int min_rate_en;
+ int min_rate;
+ int min_rate_exp;
+ int max_rate_en;
+ int max_weight;
+ int max_rate;
+ int max_rate_exp;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE, (id / NUM_OF_Q_PER_PAGE));
+ qtx_cfg = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+ qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+ if (hnat_priv->data->num_of_sch == 4)
+ scheduler = (qtx_sch >> 30) & 0x3;
+ else
+ scheduler = !!(qtx_sch & BIT(31));
+ min_rate_en = !!(qtx_sch & BIT(27));
+ min_rate = (qtx_sch >> 20) & 0x7f;
+ min_rate_exp = (qtx_sch >> 16) & 0xf;
+ max_rate_en = !!(qtx_sch & BIT(11));
+ max_weight = (qtx_sch >> 12) & 0xf;
+ max_rate = (qtx_sch >> 4) & 0x7f;
+ max_rate_exp = qtx_sch & 0xf;
+ while (min_rate_exp--)
+ min_rate *= 10;
+
+ while (max_rate_exp--)
+ max_rate *= 10;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "scheduler: %d\nhw resv: %d\nsw resv: %d\n", scheduler,
+ (qtx_cfg >> 8) & 0xff, qtx_cfg & 0xff);
+
+ if (hnat_priv->data->version != MTK_HNAT_V1) {
+ /* Switch to debug mode */
+ cr_set_field(h->fe_base + QTX_MIB_IF, MIB_ON_QTX_CFG, 1);
+ cr_set_field(h->fe_base + QTX_MIB_IF, VQTX_MIB_EN, 1);
+ qtx_cfg = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+ qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+ len += scnprintf(buf + len, buf_len - len,
+ "packet count: %u\n", qtx_cfg);
+ len += scnprintf(buf + len, buf_len - len,
+ "packet drop: %u\n\n", qtx_sch);
+
+ /* Recover to normal mode */
+ cr_set_field(hnat_priv->fe_base + QTX_MIB_IF,
+ MIB_ON_QTX_CFG, 0);
+ cr_set_field(hnat_priv->fe_base + QTX_MIB_IF, VQTX_MIB_EN, 0);
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ " EN RATE WEIGHT\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "----------------------------\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "max%5d%9d%9d\n", max_rate_en, max_rate, max_weight);
+ len += scnprintf(buf + len, buf_len - len,
+ "min%5d%9d -\n", min_rate_en, min_rate);
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t hnat_queue_write(struct file *file, const char __user *buf,
+ size_t length, loff_t *offset)
+{
+ long id = (long)file->private_data;
+ struct mtk_hnat *h = hnat_priv;
+ char line[64];
+ int max_enable, max_rate, max_exp = 0;
+ int min_enable, min_rate, min_exp = 0;
+ int weight;
+ int resv;
+ int scheduler;
+ size_t size;
+ u32 qtx_sch;
+
+ cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE, (id / NUM_OF_Q_PER_PAGE));
+ qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+ if (length > sizeof(line))
+ return -EINVAL;
+
+ if (copy_from_user(line, buf, length))
+ return -EFAULT;
+
+ if (sscanf(line, "%d %d %d %d %d %d %d", &scheduler, &min_enable, &min_rate,
+ &max_enable, &max_rate, &weight, &resv) != 7)
+ return -EFAULT;
+
+ while (max_rate > 127) {
+ max_rate /= 10;
+ max_exp++;
+ }
+
+ while (min_rate > 127) {
+ min_rate /= 10;
+ min_exp++;
+ }
+
+ qtx_sch &= 0x70000000;
+ if (hnat_priv->data->num_of_sch == 4)
+ qtx_sch |= (scheduler & 0x3) << 30;
+ else
+ qtx_sch |= (scheduler & 0x1) << 31;
+ if (min_enable)
+ qtx_sch |= BIT(27);
+ qtx_sch |= (min_rate & 0x7f) << 20;
+ qtx_sch |= (min_exp & 0xf) << 16;
+ if (max_enable)
+ qtx_sch |= BIT(11);
+ qtx_sch |= (weight & 0xf) << 12;
+ qtx_sch |= (max_rate & 0x7f) << 4;
+ qtx_sch |= max_exp & 0xf;
+ writel(qtx_sch, h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+
+ resv &= 0xff;
+ qtx_sch = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+ qtx_sch &= 0xffff0000;
+ qtx_sch |= (resv << 8) | resv;
+ writel(qtx_sch, h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+
+ size = strlen(line);
+ *offset += size;
+
+ return length;
+}
+
+static const struct file_operations hnat_queue_fops = {
+ .open = simple_open,
+ .read = hnat_queue_show,
+ .write = hnat_queue_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t hnat_ppd_if_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[IFNAMSIZ];
+ struct net_device *dev;
+ char *p, *tmp;
+
+ if (count >= IFNAMSIZ)
+ return -EFAULT;
+
+ memset(buf, 0, IFNAMSIZ);
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ tmp = buf;
+ p = strsep(&tmp, "\n\r ");
+ dev = dev_get_by_name(&init_net, p);
+
+ if (dev) {
+ if (hnat_priv->g_ppdev)
+ dev_put(hnat_priv->g_ppdev);
+ hnat_priv->g_ppdev = dev;
+
+ strncpy(hnat_priv->ppd, p, IFNAMSIZ);
+ pr_info("hnat_priv ppd = %s\n", hnat_priv->ppd);
+ } else {
+ pr_info("no such device!\n");
+ }
+
+ return count;
+}
+
+static int hnat_ppd_if_read(struct seq_file *m, void *private)
+{
+ pr_info("hnat_priv ppd = %s\n", hnat_priv->ppd);
+
+ if (hnat_priv->g_ppdev) {
+ pr_info("hnat_priv g_ppdev name = %s\n",
+ hnat_priv->g_ppdev->name);
+ } else {
+ pr_info("hnat_priv g_ppdev is null!\n");
+ }
+
+ return 0;
+}
+
+static int hnat_ppd_if_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_ppd_if_read, file->private_data);
+}
+
+static const struct file_operations hnat_ppd_if_fops = {
+ .open = hnat_ppd_if_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_ppd_if_write,
+ .release = single_release,
+};
+
+static int hnat_mape_toggle_read(struct seq_file *m, void *private)
+{
+ pr_info("value=%d, %s is enabled now!\n", mape_toggle, (mape_toggle) ? "mape" : "ds-lite");
+
+ return 0;
+}
+
+static int hnat_mape_toggle_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_mape_toggle_read, file->private_data);
+}
+
+static ssize_t hnat_mape_toggle_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf;
+ int len = count;
+
+ if (copy_from_user(&buf, buffer, len))
+ return -EFAULT;
+
+ if (buf == '1' && !mape_toggle) {
+ pr_info("mape is going to be enabled, ds-lite is going to be disabled !\n");
+ mape_toggle = 1;
+ } else if (buf == '0' && mape_toggle) {
+ pr_info("ds-lite is going to be enabled, mape is going to be disabled !\n");
+ mape_toggle = 0;
+ }
+
+ return len;
+}
+
+static const struct file_operations hnat_mape_toggle_fops = {
+ .open = hnat_mape_toggle_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_mape_toggle_write,
+ .release = single_release,
+};
+
+static int hnat_hook_toggle_read(struct seq_file *m, void *private)
+{
+ pr_info("value=%d, hook is %s now!\n", hook_toggle, (hook_toggle) ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int hnat_hook_toggle_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_hook_toggle_read, file->private_data);
+}
+
+static ssize_t hnat_hook_toggle_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[8];
+ int len = count;
+
+ if ((len > 8) || copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ if (buf[0] == '1' && !hook_toggle) {
+ pr_info("hook is going to be enabled !\n");
+ hnat_enable_hook();
+ } else if (buf[0] == '0' && hook_toggle) {
+ pr_info("hook is going to be disabled !\n");
+ hnat_disable_hook();
+ }
+
+ return len;
+}
+
+static const struct file_operations hnat_hook_toggle_fops = {
+ .open = hnat_hook_toggle_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_hook_toggle_write,
+ .release = single_release,
+};
+
+static int hnat_version_read(struct seq_file *m, void *private)
+{
+ pr_info("HNAT SW version : %s\nHNAT HW version : %d\n", HNAT_SW_VER, hnat_priv->data->version);
+
+ return 0;
+}
+
+static int hnat_version_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_version_read, file->private_data);
+}
+
+static const struct file_operations hnat_version_fops = {
+ .open = hnat_version_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int get_ppe_mib(int index, u64 *pkt_cnt, u64 *byte_cnt)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct hnat_accounting *acount;
+ struct foe_entry *entry;
+
+ acount = hnat_get_count(h, index);
+ entry = hnat_priv->foe_table_cpu + index;
+
+ if (!acount)
+ return -1;
+
+ if (entry->bfib1.state != BIND)
+ return -1;
+
+ *pkt_cnt = acount->packets;
+ *byte_cnt = acount->bytes;
+
+ return 0;
+}
+EXPORT_SYMBOL(get_ppe_mib);
+
+int is_entry_binding(int index)
+{
+ struct foe_entry *entry;
+
+ entry = hnat_priv->foe_table_cpu + index;
+
+ return entry->bfib1.state == BIND;
+}
+EXPORT_SYMBOL(is_entry_binding);
+
+#define dump_register(nm) \
+ { \
+ .name = __stringify(nm), .offset = PPE_##nm, \
+ }
+
+static const struct debugfs_reg32 hnat_regs[] = {
+ dump_register(GLO_CFG), dump_register(FLOW_CFG),
+ dump_register(IP_PROT_CHK), dump_register(IP_PROT_0),
+ dump_register(IP_PROT_1), dump_register(IP_PROT_2),
+ dump_register(IP_PROT_3), dump_register(TB_CFG),
+ dump_register(TB_BASE), dump_register(TB_USED),
+ dump_register(BNDR), dump_register(BIND_LMT_0),
+ dump_register(BIND_LMT_1), dump_register(KA),
+ dump_register(UNB_AGE), dump_register(BND_AGE_0),
+ dump_register(BND_AGE_1), dump_register(HASH_SEED),
+ dump_register(DFT_CPORT), dump_register(MCAST_PPSE),
+ dump_register(MCAST_L_0), dump_register(MCAST_H_0),
+ dump_register(MCAST_L_1), dump_register(MCAST_H_1),
+ dump_register(MCAST_L_2), dump_register(MCAST_H_2),
+ dump_register(MCAST_L_3), dump_register(MCAST_H_3),
+ dump_register(MCAST_L_4), dump_register(MCAST_H_4),
+ dump_register(MCAST_L_5), dump_register(MCAST_H_5),
+ dump_register(MCAST_L_6), dump_register(MCAST_H_6),
+ dump_register(MCAST_L_7), dump_register(MCAST_H_7),
+ dump_register(MCAST_L_8), dump_register(MCAST_H_8),
+ dump_register(MCAST_L_9), dump_register(MCAST_H_9),
+ dump_register(MCAST_L_A), dump_register(MCAST_H_A),
+ dump_register(MCAST_L_B), dump_register(MCAST_H_B),
+ dump_register(MCAST_L_C), dump_register(MCAST_H_C),
+ dump_register(MCAST_L_D), dump_register(MCAST_H_D),
+ dump_register(MCAST_L_E), dump_register(MCAST_H_E),
+ dump_register(MCAST_L_F), dump_register(MCAST_H_F),
+ dump_register(MTU_DRP), dump_register(MTU_VLYR_0),
+ dump_register(MTU_VLYR_1), dump_register(MTU_VLYR_2),
+ dump_register(VPM_TPID), dump_register(VPM_TPID),
+ dump_register(CAH_CTRL), dump_register(CAH_TAG_SRH),
+ dump_register(CAH_LINE_RW), dump_register(CAH_WDATA),
+ dump_register(CAH_RDATA),
+};
+
+int hnat_init_debugfs(struct mtk_hnat *h)
+{
+ int ret = 0;
+ struct dentry *root;
+ struct dentry *file;
+ long i;
+ char name[16];
+
+ root = debugfs_create_dir("hnat", NULL);
+ if (!root) {
+ dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
+ ret = -ENOMEM;
+ goto err0;
+ }
+ h->root = root;
+ h->regset = kzalloc(sizeof(*h->regset), GFP_KERNEL);
+ if (!h->regset) {
+ dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
+ ret = -ENOMEM;
+ goto err1;
+ }
+ h->regset->regs = hnat_regs;
+ h->regset->nregs = ARRAY_SIZE(hnat_regs);
+ h->regset->base = h->ppe_base;
+
+ file = debugfs_create_regset32("regdump", S_IRUGO, root, h->regset);
+ if (!file) {
+ dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
+ ret = -ENOMEM;
+ goto err1;
+ }
+ debugfs_create_file("all_entry", S_IRUGO, root, h, &hnat_debug_fops);
+ debugfs_create_file("external_interface", S_IRUGO, root, h,
+ &hnat_ext_fops);
+ debugfs_create_file("whnat_interface", S_IRUGO, root, h,
+ &hnat_whnat_fops);
+ debugfs_create_file("cpu_reason", S_IFREG | S_IRUGO, root, h,
+ &cpu_reason_fops);
+ debugfs_create_file("hnat_entry", S_IRUGO | S_IRUGO, root, h,
+ &hnat_entry_fops);
+ debugfs_create_file("hnat_setting", S_IRUGO | S_IRUGO, root, h,
+ &hnat_setting_fops);
+ debugfs_create_file("mcast_table", S_IRUGO | S_IRUGO, root, h,
+ &hnat_mcast_fops);
+ debugfs_create_file("hook_toggle", S_IRUGO | S_IRUGO, root, h,
+ &hnat_hook_toggle_fops);
+ debugfs_create_file("mape_toggle", S_IRUGO | S_IRUGO, root, h,
+ &hnat_mape_toggle_fops);
+ debugfs_create_file("hnat_version", S_IRUGO | S_IRUGO, root, h,
+ &hnat_version_fops);
+ debugfs_create_file("hnat_ppd_if", S_IRUGO | S_IRUGO, root, h,
+ &hnat_ppd_if_fops);
+
+ for (i = 0; i < hnat_priv->data->num_of_sch; i++) {
+ snprintf(name, sizeof(name), "qdma_sch%ld", i);
+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
+ &hnat_sched_fops);
+ }
+
+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
+ snprintf(name, sizeof(name), "qdma_txq%ld", i);
+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
+ &hnat_queue_fops);
+ }
+
+ return 0;
+
+err1:
+ debugfs_remove_recursive(root);
+err0:
+ return ret;
+}
+
+void hnat_deinit_debugfs(struct mtk_hnat *h)
+{
+ debugfs_remove_recursive(h->root);
+ h->root = NULL;
+ kfree(h->regset);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.c
new file mode 100644
index 0000000..79e4bd0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.c
@@ -0,0 +1,347 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Zhiqiang Yang <zhiqiang.yang@mediatek.com>
+ */
+#include <net/sock.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_bridge.h>
+#include "hnat.h"
+
+/* *
+ * mcast_entry_get - Returns the index of an unused entry
+ * or an already existed entry in mtbl
+ */
+static int mcast_entry_get(u16 vlan_id, u32 dst_mac)
+{
+ int index = -1;
+ u8 i;
+ struct ppe_mcast_group *p = hnat_priv->pmcast->mtbl;
+ u8 max = hnat_priv->pmcast->max_entry;
+
+ for (i = 0; i < max; i++) {
+ if ((index == -1) && (!p->valid)) {
+ index = i; /*get the first unused entry index*/
+ continue;
+ }
+ if ((p->vid == vlan_id) && (p->mac_hi == dst_mac)) {
+ index = i;
+ break;
+ }
+ p++;
+ }
+ if (index == -1)
+ pr_info("%s:group table is full\n", __func__);
+
+ return index;
+}
+
+static void get_mac_from_mdb_entry(struct br_mdb_entry *entry,
+ u32 *mac_hi, u16 *mac_lo)
+{
+ switch (ntohs(entry->addr.proto)) {
+ case ETH_P_IP:
+ *mac_lo = 0x0100;
+ *mac_hi = swab32((entry->addr.u.ip4 & 0xfffffe00) + 0x5e);
+ break;
+ case ETH_P_IPV6:
+ *mac_lo = 0x3333;
+ *mac_hi = swab32(entry->addr.u.ip6.s6_addr32[3]);
+ break;
+ }
+ trace_printk("%s:group mac_h=0x%08x, mac_l=0x%04x\n",
+ __func__, *mac_hi, *mac_lo);
+}
+
+/*set_hnat_mtbl - set ppe multicast register*/
+static int set_hnat_mtbl(struct ppe_mcast_group *group, int index)
+{
+ struct ppe_mcast_h mcast_h;
+ struct ppe_mcast_l mcast_l;
+ u16 mac_lo = group->mac_lo;
+ u32 mac_hi = group->mac_hi;
+ u8 mc_port = group->mc_port;
+ void __iomem *reg;
+
+ mcast_h.u.value = 0;
+ mcast_l.addr = 0;
+ if (mac_lo == 0x0100)
+ mcast_h.u.info.mc_mpre_sel = 0;
+ else if (mac_lo == 0x3333)
+ mcast_h.u.info.mc_mpre_sel = 1;
+
+ mcast_h.u.info.mc_px_en = mc_port;
+ mcast_l.addr = mac_hi;
+ mcast_h.u.info.valid = group->valid;
+ trace_printk("%s:index=%d,group info=0x%x,addr=0x%x\n",
+ __func__, index, mcast_h.u.value, mcast_l.addr);
+ if (index < 0x10) {
+ reg = hnat_priv->ppe_base + PPE_MCAST_H_0 + ((index) * 8);
+ writel(mcast_h.u.value, reg);
+ reg = hnat_priv->ppe_base + PPE_MCAST_L_0 + ((index) * 8);
+ writel(mcast_l.addr, reg);
+ } else {
+ index = index - 0x10;
+ reg = hnat_priv->fe_base + PPE_MCAST_H_10 + ((index) * 8);
+ writel(mcast_h.u.value, reg);
+ reg = hnat_priv->fe_base + PPE_MCAST_L_10 + ((index) * 8);
+ writel(mcast_h.u.value, reg);
+ }
+
+ return 0;
+}
+
+/**
+ * hnat_mcast_table_update -
+ * 1.get a valid group entry
+ * 2.update group info
+ * a.update eif&oif count
+ * b.eif ==0 & oif == 0,delete it from group table
+ * c.oif != 0,set mc forward port to cpu,else do not forward to cpu
+ * 3.set the group info to ppe register
+ */
+static int hnat_mcast_table_update(int type, struct br_mdb_entry *entry)
+{
+ struct net_device *dev;
+ u32 mac_hi;
+ u16 mac_lo;
+ int index;
+ struct ppe_mcast_group *group;
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(&init_net, entry->ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ rcu_read_unlock();
+
+ get_mac_from_mdb_entry(entry, &mac_hi, &mac_lo);
+ index = mcast_entry_get(entry->vid, mac_hi);
+ if (index == -1)
+ return -1;
+
+ group = &hnat_priv->pmcast->mtbl[index];
+ group->mac_hi = mac_hi;
+ group->mac_lo = mac_lo;
+ switch (type) {
+ case RTM_NEWMDB:
+ if (IS_LAN(dev) || IS_WAN(dev))
+ group->eif++;
+ else
+ group->oif++;
+ group->vid = entry->vid;
+ group->valid = true;
+ break;
+ case RTM_DELMDB:
+ if (group->valid) {
+ if (IS_LAN(dev) || IS_WAN(dev))
+ group->eif--;
+ else
+ group->oif--;
+ }
+ break;
+ }
+ trace_printk("%s:devname=%s,eif=%d,oif=%d\n", __func__,
+ dev->name, group->eif, group->oif);
+ if (group->valid) {
+ if (group->oif && group->eif)
+ /*eth&wifi both in group,forward to cpu&GDMA1*/
+ group->mc_port = (MCAST_TO_PDMA || MCAST_TO_GDMA1);
+ else if (group->oif)
+ /*only wifi in group,forward to cpu only*/
+ group->mc_port = MCAST_TO_PDMA;
+ else
+ /*only eth in group,forward to GDMA1 only*/
+ group->mc_port = MCAST_TO_GDMA1;
+ if (!group->oif && !group->eif)
+ /*nobody in this group,clear the entry*/
+ memset(group, 0, sizeof(struct ppe_mcast_group));
+ set_hnat_mtbl(group, index);
+ }
+
+ return 0;
+}
+
+static void hnat_mcast_nlmsg_handler(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ struct nlattr *nest, *nest2, *info;
+ struct br_port_msg *bpm;
+ struct br_mdb_entry *entry;
+ struct ppe_mcast_table *pmcast;
+ struct sock *sk;
+
+ pmcast = container_of(work, struct ppe_mcast_table, work);
+ sk = pmcast->msock->sk;
+
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+ nlh = nlmsg_hdr(skb);
+ if (!nlmsg_ok(nlh, skb->len)) {
+ kfree_skb(skb);
+ continue;
+ }
+ bpm = nlmsg_data(nlh);
+ nest = nlmsg_find_attr(nlh, sizeof(bpm), MDBA_MDB);
+ if (!nest) {
+ kfree_skb(skb);
+ continue;
+ }
+ nest2 = nla_find_nested(nest, MDBA_MDB_ENTRY);
+ if (nest2) {
+ info = nla_find_nested(nest2, MDBA_MDB_ENTRY_INFO);
+ if (!info) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ entry = (struct br_mdb_entry *)nla_data(info);
+ trace_printk("%s:cmd=0x%2x,ifindex=0x%x,state=0x%x",
+ __func__, nlh->nlmsg_type,
+ entry->ifindex, entry->state);
+ trace_printk("vid=0x%x,ip=0x%x,proto=0x%x\n",
+ entry->vid, entry->addr.u.ip4,
+ entry->addr.proto);
+ hnat_mcast_table_update(nlh->nlmsg_type, entry);
+ }
+ kfree_skb(skb);
+ }
+}
+
+static void hnat_mcast_nlmsg_rcv(struct sock *sk)
+{
+ struct ppe_mcast_table *pmcast = hnat_priv->pmcast;
+ struct workqueue_struct *queue = pmcast->queue;
+ struct work_struct *work = &pmcast->work;
+
+ queue_work(queue, work);
+}
+
+static struct socket *hnat_mcast_netlink_open(struct net *net)
+{
+ struct socket *sock = NULL;
+ int ret;
+ struct sockaddr_nl addr;
+
+ ret = sock_create_kern(net, PF_NETLINK, SOCK_RAW, NETLINK_ROUTE, &sock);
+ if (ret < 0)
+ goto out;
+
+ sock->sk->sk_data_ready = hnat_mcast_nlmsg_rcv;
+ addr.nl_family = PF_NETLINK;
+ addr.nl_pid = 65536; /*fix me:how to get an unique id?*/
+ addr.nl_groups = RTMGRP_MDB;
+ ret = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0)
+ goto out;
+
+ return sock;
+out:
+ if (sock)
+ sock_release(sock);
+
+ return NULL;
+}
+
+static void hnat_mcast_check_timestamp(struct timer_list *t)
+{
+ struct foe_entry *entry;
+ int hash_index;
+ u16 e_ts, foe_ts;
+
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.sta == 1) {
+ e_ts = (entry->ipv4_hnapt.m_timestamp) & 0xffff;
+ foe_ts = foe_timestamp(hnat_priv);
+ if ((foe_ts - e_ts) > 0x3000)
+ foe_ts = (~(foe_ts)) & 0xffff;
+ if (abs(foe_ts - e_ts) > 20)
+ entry_delete(hash_index);
+ }
+ }
+ mod_timer(&hnat_priv->hnat_mcast_check_timer, jiffies + 10 * HZ);
+}
+
+int hnat_mcast_enable(void)
+{
+ struct ppe_mcast_table *pmcast;
+
+ pmcast = kzalloc(sizeof(*pmcast), GFP_KERNEL);
+ if (!pmcast)
+ goto err;
+
+ if (hnat_priv->data->version == MTK_HNAT_V1)
+ pmcast->max_entry = 0x10;
+ else
+ pmcast->max_entry = MAX_MCAST_ENTRY;
+
+ INIT_WORK(&pmcast->work, hnat_mcast_nlmsg_handler);
+ pmcast->queue = create_singlethread_workqueue("ppe_mcast");
+ if (!pmcast->queue)
+ goto err;
+
+ pmcast->msock = hnat_mcast_netlink_open(&init_net);
+ if (!pmcast->msock)
+ goto err;
+
+ hnat_priv->pmcast = pmcast;
+
+ /* mt7629 should checkout mcast entry life time manualy */
+ if (hnat_priv->data->version == MTK_HNAT_V3) {
+ timer_setup(&hnat_priv->hnat_mcast_check_timer,
+ hnat_mcast_check_timestamp, 0);
+ hnat_priv->hnat_mcast_check_timer.expires = jiffies;
+ add_timer(&hnat_priv->hnat_mcast_check_timer);
+ }
+
+ /* Enable multicast table lookup */
+ cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, MCAST_TB_EN, 1);
+ /* multicast port0 map to PDMA */
+ cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P0_PPSE, 0);
+ /* multicast port1 map to GMAC1 */
+ cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P1_PPSE, 1);
+ /* multicast port2 map to GMAC2 */
+ cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P2_PPSE, 2);
+ /* multicast port3 map to QDMA */
+ cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P3_PPSE, 5);
+
+ return 0;
+err:
+ if (pmcast->queue)
+ destroy_workqueue(pmcast->queue);
+ if (pmcast->msock)
+ sock_release(pmcast->msock);
+ kfree(pmcast);
+
+ return -1;
+}
+
+int hnat_mcast_disable(void)
+{
+ struct ppe_mcast_table *pmcast = hnat_priv->pmcast;
+ struct socket *sock = pmcast->msock;
+ struct workqueue_struct *queue = pmcast->queue;
+ struct work_struct *work = &pmcast->work;
+
+ if (hnat_priv->data->version == MTK_HNAT_V3)
+ del_timer_sync(&hnat_priv->hnat_mcast_check_timer);
+
+ if (pmcast) {
+ flush_work(work);
+ destroy_workqueue(queue);
+ sock_release(sock);
+ kfree(pmcast);
+ }
+
+ return 0;
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.h
new file mode 100644
index 0000000..048bc58
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.h
@@ -0,0 +1,69 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Zhiqiang Yang <zhiqiang.yang@mediatek.com>
+ */
+
+#ifndef NF_HNAT_MCAST_H
+#define NF_HNAT_MCAST_H
+
+#define RTMGRP_IPV4_MROUTE 0x20
+#define RTMGRP_MDB 0x2000000
+
+#define MAX_MCAST_ENTRY 64
+
+#define MCAST_TO_PDMA (0x1 << 0)
+#define MCAST_TO_GDMA1 (0x1 << 1)
+#define MCAST_TO_GDMA2 (0x1 << 2)
+
+struct ppe_mcast_group {
+ u32 mac_hi; /*multicast mac addr*/
+ u16 mac_lo; /*multicast mac addr*/
+ u16 vid;
+ u8 mc_port; /*1:forward to cpu,2:forward to GDMA1,4:forward to GDMA2*/
+ u8 eif; /*num of eth if added to multi group. */
+ u8 oif; /* num of other if added to multi group ,ex wifi.*/
+ bool valid;
+};
+
+struct ppe_mcast_table {
+ struct workqueue_struct *queue;
+ struct work_struct work;
+ struct socket *msock;
+ struct ppe_mcast_group mtbl[MAX_MCAST_ENTRY];
+ u8 max_entry;
+};
+
+struct ppe_mcast_h {
+ union {
+ u32 value;
+ struct {
+ u32 mc_vid:12;
+ u32 mc_qos_qid54:2; /* mt7622 only */
+ u32 valid:1;
+ u32 rev1:1;
+ /*0:forward to cpu,1:forward to GDMA1*/
+ u32 mc_px_en:4;
+ u32 mc_mpre_sel:2; /* 0=01:00, 2=33:33 */
+ u32 mc_vid_cmp:1;
+ u32 rev2:1;
+ u32 mc_px_qos_en:4;
+ u32 mc_qos_qid:4;
+ } info;
+ } u;
+};
+
+struct ppe_mcast_l {
+ u32 addr;
+};
+
+int hnat_mcast_enable(void);
+int hnat_mcast_disable(void);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
new file mode 100644
index 0000000..fe495ce
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
@@ -0,0 +1,2138 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv6.h>
+
+#include <net/arp.h>
+#include <net/neighbour.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#include "nf_hnat_mtk.h"
+#include "hnat.h"
+
+#include "../mtk_eth_soc.h"
+
+#define do_ge2ext_fast(dev, skb) \
+ ((IS_LAN(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
+ skb_hnat_is_hashed(skb) && \
+ skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
+#define do_ext2ge_fast_learn(dev, skb) \
+ (IS_PPD(dev) && \
+ (skb_hnat_sport(skb) == NR_PDMA_PORT || \
+ skb_hnat_sport(skb) == NR_QDMA_PORT) && \
+ ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
+ get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
+#define do_mape_w2l_fast(dev, skb) \
+ (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
+
+static struct ipv6hdr mape_l2w_v6h;
+static struct ipv6hdr mape_w2l_v6h;
+static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
+{
+ int i;
+
+ for (i = 1; i < MAX_IF_NUM; i++) {
+ if (hnat_priv->wifi_hook_if[i] == dev)
+ return i;
+ }
+
+ return 0;
+}
+
+static inline int get_ext_device_number(void)
+{
+ int i, number = 0;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
+ number += 1;
+ return number;
+}
+
+static inline int find_extif_from_devname(const char *name)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (!strcmp(name, ext_entry->name))
+ return 1;
+ }
+ return 0;
+}
+
+static inline int get_index_from_dev(const struct net_device *dev)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (dev == ext_entry->dev)
+ return ext_entry->dev->ifindex;
+ }
+ return 0;
+}
+
+static inline struct net_device *get_dev_from_index(int index)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+ struct net_device *dev = 0;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (ext_entry->dev && index == ext_entry->dev->ifindex) {
+ dev = ext_entry->dev;
+ break;
+ }
+ }
+ return dev;
+}
+
+static inline struct net_device *get_wandev_from_index(int index)
+{
+ struct net_device *wandev = 0;
+
+ wandev = dev_get_by_name(&init_net, hnat_priv->wan);
+ if (wandev->ifindex == index)
+ return wandev;
+ return NULL;
+}
+
+static inline int extif_set_dev(struct net_device *dev)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
+ dev_hold(dev);
+ ext_entry->dev = dev;
+ pr_info("%s(%s)\n", __func__, dev->name);
+
+ return ext_entry->dev->ifindex;
+ }
+ }
+
+ return -1;
+}
+
+static inline int extif_put_dev(struct net_device *dev)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (ext_entry->dev == dev) {
+ ext_entry->dev = NULL;
+ dev_put(dev);
+ pr_info("%s(%s)\n", __func__, dev->name);
+
+ return ext_entry->dev->ifindex;
+ }
+ }
+
+ return -1;
+}
+
+int ext_if_add(struct extdev_entry *ext_entry)
+{
+ int len = get_ext_device_number();
+
+ hnat_priv->ext_if[len++] = ext_entry;
+ return len;
+}
+
+int ext_if_del(struct extdev_entry *ext_entry)
+{
+ int i, j;
+
+ for (i = 0; i < MAX_EXT_DEVS; i++) {
+ if (hnat_priv->ext_if[i] == ext_entry) {
+ for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
+ hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
+ hnat_priv->ext_if[j] = NULL;
+ break;
+ }
+ }
+
+ return i;
+}
+
+void foe_clear_all_bind_entries(struct net_device *dev)
+{
+ int hash_index;
+ struct foe_entry *entry;
+
+ if (!IS_LAN(dev) && !IS_WAN(dev) &&
+ !find_extif_from_devname(dev->name) &&
+ !dev->netdev_ops->ndo_flow_offload_check)
+ return;
+
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_ONLY_FWD_CPU);
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.state == BIND) {
+ entry->ipv4_hnapt.udib1.state = INVALID;
+ entry->ipv4_hnapt.udib1.time_stamp =
+ readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
+ }
+ }
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+
+ mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
+}
+
+static void gmac_ppe_fwd_enable(struct net_device *dev)
+{
+ if (IS_LAN(dev) || IS_GMAC1_MODE)
+ set_gmac_ppe_fwd(0, 1);
+ else if (IS_WAN(dev))
+ set_gmac_ppe_fwd(1, 1);
+}
+
+int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev;
+
+ dev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_UP:
+ gmac_ppe_fwd_enable(dev);
+
+ extif_set_dev(dev);
+
+ break;
+ case NETDEV_GOING_DOWN:
+ if (!get_wifi_hook_if_index_from_dev(dev))
+ extif_put_dev(dev);
+
+ foe_clear_all_bind_entries(dev);
+
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+void foe_clear_entry(struct neighbour *neigh)
+{
+ u32 *daddr = (u32 *)neigh->primary_key;
+ unsigned char h_dest[ETH_ALEN];
+ struct foe_entry *entry;
+ int hash_index;
+ u32 dip;
+
+ dip = (u32)(*daddr);
+
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.state == BIND &&
+ entry->ipv4_hnapt.new_dip == ntohl(dip)) {
+ *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_hnapt.dmac_lo);
+ if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
+ pr_info("%s: state=%d\n", __func__,
+ neigh->nud_state);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA,
+ SMA_ONLY_FWD_CPU);
+
+ entry->ipv4_hnapt.udib1.state = INVALID;
+ entry->ipv4_hnapt.udib1.time_stamp =
+ readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+
+ mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
+ jiffies + 3 * HZ);
+
+ pr_info("Delete old entry: dip =%pI4\n", &dip);
+ pr_info("Old mac= %pM\n", h_dest);
+ pr_info("New mac= %pM\n", neigh->ha);
+ }
+ }
+ }
+}
+
+int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = NULL;
+ struct neighbour *neigh = NULL;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ neigh = ptr;
+ dev = neigh->dev;
+ if (dev)
+ foe_clear_entry(neigh);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
+{
+ struct ethhdr *eth = NULL;
+ struct ipv6hdr *ip6h = NULL;
+ struct iphdr *iph = NULL;
+
+ if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
+ (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
+ return -1;
+ }
+
+ /* point to L3 */
+ memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
+ memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
+
+ eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+ eth->h_proto = htons(ETH_P_IPV6);
+ skb->protocol = htons(ETH_P_IPV6);
+
+ iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
+ ip6h = (struct ipv6hdr *)(skb->data);
+ ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
+
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
+ return 0;
+}
+
+static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
+ struct ethhdr *eth)
+{
+ skb->pkt_type = PACKET_HOST;
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ }
+}
+
+unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
+ const char *func)
+{
+ if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
+ u16 vlan_id = 0;
+ skb_set_network_header(skb, 0);
+ skb_push(skb, ETH_HLEN);
+ set_to_ppe(skb);
+
+ vlan_id = skb_vlan_tag_get_id(skb);
+ if (vlan_id) {
+ skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
+ if (!skb)
+ return -1;
+ }
+
+ /*set where we come from*/
+ skb->vlan_proto = htons(ETH_P_8021Q);
+ skb->vlan_tci =
+ (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
+ trace_printk(
+ "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
+ __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
+ in->name, hnat_priv->g_ppdev->name);
+ skb->dev = hnat_priv->g_ppdev;
+ dev_queue_xmit(skb);
+ trace_printk("%s: called from %s successfully\n", __func__, func);
+ return 0;
+ }
+
+ trace_printk("%s: called from %s fail\n", __func__, func);
+ return -1;
+}
+
+unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
+{
+ struct ethhdr *eth = eth_hdr(skb);
+ struct net_device *dev;
+ struct foe_entry *entry;
+
+ trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
+ ntohs(skb->vlan_proto), skb->vlan_tci);
+
+ dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
+
+ if (dev) {
+ /*set where we to go*/
+ skb->dev = dev;
+ skb->vlan_proto = 0;
+ skb->vlan_tci = 0;
+
+ if (ntohs(eth->h_proto) == ETH_P_8021Q) {
+ skb = skb_vlan_untag(skb);
+ if (unlikely(!skb))
+ return -1;
+ }
+
+ if (IS_BOND_MODE &&
+ (((hnat_priv->data->version == MTK_HNAT_V4) &&
+ (skb_hnat_entry(skb) != 0x7fff)) ||
+ ((hnat_priv->data->version != MTK_HNAT_V4) &&
+ (skb_hnat_entry(skb) != 0x3fff))))
+ skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
+
+ set_from_extge(skb);
+ fix_skb_packet_type(skb, skb->dev, eth);
+ netif_rx(skb);
+ trace_printk("%s: called from %s successfully\n", __func__,
+ func);
+ return 0;
+ } else {
+ /* MapE WAN --> LAN/WLAN PingPong. */
+ dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
+ if (mape_toggle && dev) {
+ if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
+ skb_set_mac_header(skb, -ETH_HLEN);
+ skb->dev = dev;
+ set_from_mape(skb);
+ skb->vlan_proto = 0;
+ skb->vlan_tci = 0;
+ fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+ entry->bfib1.pkt_type = IPV4_HNAPT;
+ netif_rx(skb);
+ return 0;
+ }
+ }
+ trace_printk("%s: called from %s fail\n", __func__, func);
+ return -1;
+ }
+}
+
+unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
+{
+ /*set where we to go*/
+ u8 index;
+ struct foe_entry *entry;
+ struct net_device *dev;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+
+ if (IS_IPV4_GRP(entry))
+ index = entry->ipv4_hnapt.act_dp;
+ else
+ index = entry->ipv6_5t_route.act_dp;
+
+ skb->dev = get_dev_from_index(index);
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ return NF_ACCEPT;
+
+ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+ return NF_ACCEPT;
+
+ skb_pull_rcsum(skb, VLAN_HLEN);
+
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
+ 2 * ETH_ALEN);
+ }
+#endif
+
+ if (skb->dev) {
+ skb_set_network_header(skb, 0);
+ skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ trace_printk("%s: called from %s successfully\n", __func__,
+ func);
+ return 0;
+ } else {
+ if (mape_toggle) {
+ /* Add ipv6 header mape for lan/wlan -->wan */
+ dev = get_wandev_from_index(index);
+ if (dev) {
+ if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
+ skb_set_network_header(skb, 0);
+ skb_push(skb, ETH_HLEN);
+ skb_set_mac_header(skb, 0);
+ skb->dev = dev;
+ dev_queue_xmit(skb);
+ return 0;
+ }
+ trace_printk("%s: called from %s fail[MapE]\n", __func__,
+ func);
+ return -1;
+ }
+ }
+ }
+ /*if external devices is down, invalidate related ppe entry*/
+ if (entry_hnat_is_bound(entry)) {
+ entry->bfib1.state = INVALID;
+ if (IS_IPV4_GRP(entry))
+ entry->ipv4_hnapt.act_dp = 0;
+ else
+ entry->ipv6_5t_route.act_dp = 0;
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+ }
+ trace_printk("%s: called from %s fail, index=%x\n", __func__,
+ func, index);
+ return -1;
+}
+
+static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const char *func)
+{
+ trace_printk(
+ "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
+ __func__, in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
+ func);
+}
+
+static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const char *func)
+{
+ trace_printk(
+ "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
+ __func__, in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
+ func);
+}
+
+static inline void hnat_set_iif(const struct nf_hook_state *state,
+ struct sk_buff *skb, int val)
+{
+ if (IS_LAN(state->in)) {
+ skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
+ } else if (IS_PPD(state->in)) {
+ skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
+ } else if (IS_EXT(state->in)) {
+ skb_hnat_iface(skb) = FOE_MAGIC_EXT;
+ } else if (IS_WAN(state->in)) {
+ skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
+ } else if (state->in->netdev_ops->ndo_flow_offload_check) {
+ skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
+ } else if (!IS_BR(state->in)) {
+ skb_hnat_iface(skb) = FOE_INVALID;
+
+ if (is_magic_tag_valid(skb) && IS_SPACE_AVAILABLE_HEAD(skb))
+ memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
+ }
+}
+
+static inline void hnat_set_alg(const struct nf_hook_state *state,
+ struct sk_buff *skb, int val)
+{
+ skb_hnat_alg(skb) = val;
+}
+
+static inline void hnat_set_head_frags(const struct nf_hook_state *state,
+ struct sk_buff *head_skb, int val,
+ void (*fn)(const struct nf_hook_state *state,
+ struct sk_buff *skb, int val))
+{
+ struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
+
+ fn(state, head_skb, val);
+ while (segs) {
+ fn(state, segs, val);
+ segs = segs->next;
+ }
+}
+
+unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
+ const char *func)
+{
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct iphdr _iphdr;
+ struct iphdr *iph;
+ struct ethhdr *eth;
+
+ /* WAN -> LAN/WLAN MapE. */
+ if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
+ iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
+ switch (iph->protocol) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ break;
+ default:
+ return -1;
+ }
+ mape_w2l_v6h = *ip6h;
+
+ /* Remove ipv6 header. */
+ memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
+ skb->data - ETH_HLEN, ETH_HLEN);
+ skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
+ skb_set_mac_header(skb, 0);
+ skb_set_network_header(skb, ETH_HLEN);
+ skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
+
+ eth = eth_hdr(skb);
+ eth->h_proto = htons(ETH_P_IP);
+ set_to_ppe(skb);
+
+ skb->vlan_proto = htons(ETH_P_8021Q);
+ skb->vlan_tci =
+ (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
+
+ if (!hnat_priv->g_ppdev)
+ hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
+
+ skb->dev = hnat_priv->g_ppdev;
+ skb->protocol = htons(ETH_P_IP);
+
+ dev_queue_xmit(skb);
+
+ return 0;
+ }
+ return -1;
+}
+
+static unsigned int is_ppe_support_type(struct sk_buff *skb)
+{
+ struct ethhdr *eth = NULL;
+ struct iphdr *iph = NULL;
+ struct ipv6hdr *ip6h = NULL;
+ struct iphdr _iphdr;
+
+ eth = eth_hdr(skb);
+ if (is_broadcast_ether_addr(eth->h_dest))
+ return 0;
+
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_IP:
+ iph = ip_hdr(skb);
+
+ /* do not accelerate non tcp/udp traffic */
+ if ((iph->protocol == IPPROTO_TCP) ||
+ (iph->protocol == IPPROTO_UDP) ||
+ (iph->protocol == IPPROTO_IPV6)) {
+ return 1;
+ }
+
+ break;
+ case ETH_P_IPV6:
+ ip6h = ipv6_hdr(skb);
+
+ if ((ip6h->nexthdr == NEXTHDR_TCP) ||
+ (ip6h->nexthdr == NEXTHDR_UDP)) {
+ return 1;
+ } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
+ iph = skb_header_pointer(skb, IPV6_HDR_LEN,
+ sizeof(_iphdr), &_iphdr);
+
+ if ((iph->protocol == IPPROTO_TCP) ||
+ (iph->protocol == IPPROTO_UDP)) {
+ return 1;
+ }
+
+ }
+
+ break;
+ case ETH_P_8021Q:
+ return 1;
+ }
+
+ return 0;
+}
+
+static unsigned int
+mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ if (!is_ppe_support_type(skb)) {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ return NF_ACCEPT;
+ }
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
+ pre_routing_print(skb, state->in, state->out, __func__);
+
+ if ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) ||
+ (skb_hnat_iface(skb) == FOE_MAGIC_WED1))
+ return NF_ACCEPT;
+
+ /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
+ if (do_ext2ge_fast_try(state->in, skb)) {
+ if (!do_hnat_ext_to_ge(skb, state->in, __func__))
+ return NF_STOLEN;
+ if (!skb)
+ goto drop;
+ return NF_ACCEPT;
+ }
+
+ /* packets form ge -> external device
+ * For standalone wan interface
+ */
+ if (do_ge2ext_fast(state->in, skb)) {
+ if (!do_hnat_ge_to_ext(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ /* MapE need remove ipv6 header and pingpong. */
+ if (do_mape_w2l_fast(state->in, skb)) {
+ if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
+ return NF_STOLEN;
+ else
+ return NF_ACCEPT;
+ }
+
+ if (is_from_mape(skb))
+ clr_from_extge(skb);
+
+ return NF_ACCEPT;
+drop:
+ printk_ratelimited(KERN_WARNING
+ "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, state->in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ if (!is_ppe_support_type(skb)) {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ return NF_ACCEPT;
+ }
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
+ pre_routing_print(skb, state->in, state->out, __func__);
+
+ if ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) ||
+ (skb_hnat_iface(skb) == FOE_MAGIC_WED1))
+ return NF_ACCEPT;
+
+ /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
+ if (do_ext2ge_fast_try(state->in, skb)) {
+ if (!do_hnat_ext_to_ge(skb, state->in, __func__))
+ return NF_STOLEN;
+ if (!skb)
+ goto drop;
+ return NF_ACCEPT;
+ }
+
+ /* packets form ge -> external device
+ * For standalone wan interface
+ */
+ if (do_ge2ext_fast(state->in, skb)) {
+ if (!do_hnat_ge_to_ext(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ return NF_ACCEPT;
+drop:
+ printk_ratelimited(KERN_WARNING
+ "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, state->in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ struct vlan_ethhdr *veth;
+
+ if (hnat_priv->data->whnat) {
+ veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+ if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
+ skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
+ skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
+ }
+ }
+#endif
+
+ if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ return NF_ACCEPT;
+ }
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
+ pre_routing_print(skb, state->in, state->out, __func__);
+
+ if (unlikely(debug_level >= 7)) {
+ hnat_cpu_reason_cnt(skb);
+ if (skb_hnat_reason(skb) == dbg_cpu_reason)
+ foe_dump_pkt(skb);
+ }
+
+ if ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) ||
+ (skb_hnat_iface(skb) == FOE_MAGIC_WED1))
+ return NF_ACCEPT;
+
+ /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
+ if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
+ !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
+ if (!hnat_priv->g_ppdev)
+ hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
+
+ if (!do_hnat_ext_to_ge(skb, state->in, __func__))
+ return NF_STOLEN;
+ if (!skb)
+ goto drop;
+ return NF_ACCEPT;
+ }
+
+ if (hnat_priv->data->whnat) {
+ if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
+ clr_from_extge(skb);
+
+ /* packets from external devices -> xxx ,step 2, learning stage */
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (do_ext2ge_fast_learn(state->in, skb) && (eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG)) {
+#else
+ if (do_ext2ge_fast_learn(state->in, skb)) {
+#endif
+ if (!do_hnat_ext_to_ge2(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ /* packets form ge -> external device */
+ if (do_ge2ext_fast(state->in, skb)) {
+ if (!do_hnat_ge_to_ext(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+ }
+
+ /* MapE need remove ipv6 header and pingpong. (bridge mode) */
+ if (do_mape_w2l_fast(state->in, skb)) {
+ if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
+ return NF_STOLEN;
+ else
+ return NF_ACCEPT;
+ }
+
+ return NF_ACCEPT;
+drop:
+ printk_ratelimited(KERN_WARNING
+ "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, state->in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
+ const struct net_device *out,
+ struct flow_offload_hw_path *hw_path)
+{
+ const struct in6_addr *ipv6_nexthop;
+ struct neighbour *neigh = NULL;
+ struct dst_entry *dst = skb_dst(skb);
+ struct ethhdr *eth;
+
+ if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
+ memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
+ return 0;
+ }
+
+ rcu_read_lock_bh();
+ ipv6_nexthop =
+ rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+ neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
+ if (unlikely(!neigh)) {
+ dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
+ &ipv6_hdr(skb)->daddr);
+ rcu_read_unlock_bh();
+ return -1;
+ }
+
+ /* why do we get all zero ethernet address ? */
+ if (!is_valid_ether_addr(neigh->ha)) {
+ rcu_read_unlock_bh();
+ return -1;
+ }
+
+ if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
+ /*copy ether type for DS-Lite and MapE */
+ eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+ eth->h_proto = skb->protocol;
+ } else {
+ eth = eth_hdr(skb);
+ }
+
+ ether_addr_copy(eth->h_dest, neigh->ha);
+ ether_addr_copy(eth->h_source, out->dev_addr);
+
+ rcu_read_unlock_bh();
+
+ return 0;
+}
+
+static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
+ const struct net_device *out,
+ struct flow_offload_hw_path *hw_path)
+{
+ u32 nexthop;
+ struct neighbour *neigh;
+ struct dst_entry *dst = skb_dst(skb);
+ struct rtable *rt = (struct rtable *)dst;
+ struct net_device *dev = (__force struct net_device *)out;
+
+ if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
+ memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
+ return 0;
+ }
+
+ rcu_read_lock_bh();
+ nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
+ neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+ if (unlikely(!neigh)) {
+ dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
+ &ip_hdr(skb)->daddr);
+ rcu_read_unlock_bh();
+ return -1;
+ }
+
+ /* why do we get all zero ethernet address ? */
+ if (!is_valid_ether_addr(neigh->ha)) {
+ rcu_read_unlock_bh();
+ return -1;
+ }
+
+ memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
+
+ rcu_read_unlock_bh();
+
+ return 0;
+}
+
+static u16 ppe_get_chkbase(struct iphdr *iph)
+{
+ u16 org_chksum = ntohs(iph->check);
+ u16 org_tot_len = ntohs(iph->tot_len);
+ u16 org_id = ntohs(iph->id);
+ u16 chksum_tmp, tot_len_tmp, id_tmp;
+ u32 tmp = 0;
+ u16 chksum_base = 0;
+
+ chksum_tmp = ~(org_chksum);
+ tot_len_tmp = ~(org_tot_len);
+ id_tmp = ~(org_id);
+ tmp = chksum_tmp + tot_len_tmp + id_tmp;
+ tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
+ tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
+ chksum_base = tmp & 0xFFFF;
+
+ return chksum_base;
+}
+
+struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
+ struct flow_offload_hw_path *hw_path)
+{
+ switch (entry.bfib1.pkt_type) {
+ case IPV4_HNAPT:
+ case IPV4_HNAT:
+ entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
+ entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)ð->h_dest[4]));
+ entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
+ break;
+ case IPV4_DSLITE:
+ case IPV4_MAP_E:
+ case IPV6_6RD:
+ case IPV6_5T_ROUTE:
+ case IPV6_3T_ROUTE:
+ entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
+ entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)ð->h_dest[4]));
+ entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry.ipv6_5t_route.smac_lo =
+ swab16(*((u16 *)ð->h_source[4]));
+ entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
+ break;
+ }
+ return entry;
+}
+
+struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
+ struct flow_offload_hw_path *hw_path)
+{
+ entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
+ entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
+ entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
+ entry.bfib1.ttl = 1;
+ entry.bfib1.cah = 1;
+ entry.bfib1.ka = 1;
+ entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4) ?
+ readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
+ readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
+
+ switch (entry.bfib1.pkt_type) {
+ case IPV4_HNAPT:
+ case IPV4_HNAT:
+ if (is_multicast_ether_addr(ð->h_dest[0])) {
+ entry.ipv4_hnapt.iblk2.mcast = 1;
+ if (hnat_priv->data->version == MTK_HNAT_V3) {
+ entry.bfib1.sta = 1;
+ entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
+ }
+ } else {
+ entry.ipv4_hnapt.iblk2.mcast = 0;
+ }
+
+ entry.ipv4_hnapt.iblk2.port_ag =
+ (hnat_priv->data->version == MTK_HNAT_V4) ? 0x3f : 0xf;
+ break;
+ case IPV4_DSLITE:
+ case IPV4_MAP_E:
+ case IPV6_6RD:
+ case IPV6_5T_ROUTE:
+ case IPV6_3T_ROUTE:
+ if (is_multicast_ether_addr(ð->h_dest[0])) {
+ entry.ipv6_5t_route.iblk2.mcast = 1;
+ if (hnat_priv->data->version == MTK_HNAT_V3) {
+ entry.bfib1.sta = 1;
+ entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
+ }
+ } else {
+ entry.ipv6_5t_route.iblk2.mcast = 0;
+ }
+
+ entry.ipv6_5t_route.iblk2.port_ag =
+ (hnat_priv->data->version == MTK_HNAT_V4) ? 0x3f : 0xf;
+ break;
+ }
+ return entry;
+}
+
+static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
+{
+ entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
+ entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
+ entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
+}
+
+static unsigned int skb_to_hnat_info(struct sk_buff *skb,
+ const struct net_device *dev,
+ struct foe_entry *foe,
+ struct flow_offload_hw_path *hw_path)
+{
+ struct foe_entry entry = { 0 };
+ int whnat = IS_WHNAT(dev);
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcpudphdr _ports;
+ const struct tcpudphdr *pptr;
+ u32 gmac = NR_DISCARD;
+ int udp = 0;
+ u32 qid = 0;
+ int mape = 0;
+
+ if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
+ /* point to ethernet header for DS-Lite and MapE */
+ eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+ else
+ eth = eth_hdr(skb);
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ /*do not bind multicast if PPE mcast not enable*/
+ if (!hnat_priv->pmcast)
+ return 0;
+ }
+
+ entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ entry.bfib1.sp = foe->udib1.sp;
+#endif
+
+ switch (ntohs(eth->h_proto)) {
+ case ETH_P_IP:
+ iph = ip_hdr(skb);
+ switch (iph->protocol) {
+ case IPPROTO_UDP:
+ udp = 1;
+ /* fallthrough */
+ case IPPROTO_TCP:
+ entry.ipv4_hnapt.etype = htons(ETH_P_IP);
+
+ /* DS-Lite WAN->LAN */
+ if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
+ entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
+ entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
+ entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
+ entry.ipv4_dslite.sport =
+ foe->ipv4_dslite.sport;
+ entry.ipv4_dslite.dport =
+ foe->ipv4_dslite.dport;
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ if (entry.bfib1.pkt_type == IPV4_MAP_E) {
+ pptr = skb_header_pointer(skb,
+ iph->ihl * 4,
+ sizeof(_ports),
+ &_ports);
+
+ entry.ipv4_dslite.new_sip =
+ ntohl(iph->saddr);
+ entry.ipv4_dslite.new_dip =
+ ntohl(iph->daddr);
+ entry.ipv4_dslite.new_sport =
+ ntohs(pptr->src);
+ entry.ipv4_dslite.new_dport =
+ ntohs(pptr->dst);
+ }
+#endif
+
+ entry.ipv4_dslite.tunnel_sipv6_0 =
+ foe->ipv4_dslite.tunnel_sipv6_0;
+ entry.ipv4_dslite.tunnel_sipv6_1 =
+ foe->ipv4_dslite.tunnel_sipv6_1;
+ entry.ipv4_dslite.tunnel_sipv6_2 =
+ foe->ipv4_dslite.tunnel_sipv6_2;
+ entry.ipv4_dslite.tunnel_sipv6_3 =
+ foe->ipv4_dslite.tunnel_sipv6_3;
+
+ entry.ipv4_dslite.tunnel_dipv6_0 =
+ foe->ipv4_dslite.tunnel_dipv6_0;
+ entry.ipv4_dslite.tunnel_dipv6_1 =
+ foe->ipv4_dslite.tunnel_dipv6_1;
+ entry.ipv4_dslite.tunnel_dipv6_2 =
+ foe->ipv4_dslite.tunnel_dipv6_2;
+ entry.ipv4_dslite.tunnel_dipv6_3 =
+ foe->ipv4_dslite.tunnel_dipv6_3;
+
+ entry.ipv4_dslite.bfib1.rmt = 1;
+ entry.ipv4_dslite.iblk2.dscp = iph->tos;
+ entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv4_dslite.iblk2.mibf = 1;
+
+ } else {
+ entry.ipv4_hnapt.iblk2.dscp = iph->tos;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv4_hnapt.iblk2.mibf = 1;
+
+ entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
+
+ if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
+ entry.bfib1.vlan_layer += 1;
+
+ if (entry.ipv4_hnapt.vlan1)
+ entry.ipv4_hnapt.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
+ else
+ entry.ipv4_hnapt.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
+ }
+
+ entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
+ entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
+ entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
+ entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
+
+ entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
+ entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
+ }
+
+ entry.ipv4_hnapt.bfib1.udp = udp;
+ if (IS_IPV4_HNAPT(foe)) {
+ pptr = skb_header_pointer(skb, iph->ihl * 4,
+ sizeof(_ports),
+ &_ports);
+ entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
+ entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
+ }
+
+ break;
+
+ default:
+ return -1;
+ }
+ trace_printk(
+ "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
+ __func__, skb->head, skb->data, iph, skb->len,
+ skb->data_len);
+ break;
+
+ case ETH_P_IPV6:
+ ip6h = ipv6_hdr(skb);
+ switch (ip6h->nexthdr) {
+ case NEXTHDR_UDP:
+ udp = 1;
+ /* fallthrough */
+ case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
+ entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
+
+ entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
+
+ if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
+ entry.bfib1.vlan_layer += 1;
+
+ if (entry.ipv6_5t_route.vlan1)
+ entry.ipv6_5t_route.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
+ else
+ entry.ipv6_5t_route.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
+ }
+
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv6_5t_route.iblk2.mibf = 1;
+ entry.ipv6_5t_route.bfib1.udp = udp;
+
+ if (IS_IPV6_6RD(foe)) {
+ entry.ipv6_5t_route.bfib1.rmt = 1;
+ entry.ipv6_6rd.tunnel_sipv4 =
+ foe->ipv6_6rd.tunnel_sipv4;
+ entry.ipv6_6rd.tunnel_dipv4 =
+ foe->ipv6_6rd.tunnel_dipv4;
+ }
+
+ entry.ipv6_3t_route.ipv6_sip0 =
+ foe->ipv6_3t_route.ipv6_sip0;
+ entry.ipv6_3t_route.ipv6_sip1 =
+ foe->ipv6_3t_route.ipv6_sip1;
+ entry.ipv6_3t_route.ipv6_sip2 =
+ foe->ipv6_3t_route.ipv6_sip2;
+ entry.ipv6_3t_route.ipv6_sip3 =
+ foe->ipv6_3t_route.ipv6_sip3;
+
+ entry.ipv6_3t_route.ipv6_dip0 =
+ foe->ipv6_3t_route.ipv6_dip0;
+ entry.ipv6_3t_route.ipv6_dip1 =
+ foe->ipv6_3t_route.ipv6_dip1;
+ entry.ipv6_3t_route.ipv6_dip2 =
+ foe->ipv6_3t_route.ipv6_dip2;
+ entry.ipv6_3t_route.ipv6_dip3 =
+ foe->ipv6_3t_route.ipv6_dip3;
+
+ if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
+ entry.ipv6_5t_route.sport =
+ foe->ipv6_5t_route.sport;
+ entry.ipv6_5t_route.dport =
+ foe->ipv6_5t_route.dport;
+ }
+ entry.ipv6_5t_route.iblk2.dscp =
+ (ip6h->priority << 4 |
+ (ip6h->flow_lbl[0] >> 4));
+ break;
+
+ case NEXTHDR_IPIP:
+ if ((!mape_toggle &&
+ entry.bfib1.pkt_type == IPV4_DSLITE) ||
+ (mape_toggle &&
+ entry.bfib1.pkt_type == IPV4_MAP_E)) {
+ /* DS-Lite LAN->WAN */
+ entry.ipv4_dslite.bfib1.udp =
+ foe->ipv4_dslite.bfib1.udp;
+ entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
+ entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
+ entry.ipv4_dslite.sport =
+ foe->ipv4_dslite.sport;
+ entry.ipv4_dslite.dport =
+ foe->ipv4_dslite.dport;
+
+ entry.ipv4_dslite.tunnel_sipv6_0 =
+ ntohl(ip6h->saddr.s6_addr32[0]);
+ entry.ipv4_dslite.tunnel_sipv6_1 =
+ ntohl(ip6h->saddr.s6_addr32[1]);
+ entry.ipv4_dslite.tunnel_sipv6_2 =
+ ntohl(ip6h->saddr.s6_addr32[2]);
+ entry.ipv4_dslite.tunnel_sipv6_3 =
+ ntohl(ip6h->saddr.s6_addr32[3]);
+
+ entry.ipv4_dslite.tunnel_dipv6_0 =
+ ntohl(ip6h->daddr.s6_addr32[0]);
+ entry.ipv4_dslite.tunnel_dipv6_1 =
+ ntohl(ip6h->daddr.s6_addr32[1]);
+ entry.ipv4_dslite.tunnel_dipv6_2 =
+ ntohl(ip6h->daddr.s6_addr32[2]);
+ entry.ipv4_dslite.tunnel_dipv6_3 =
+ ntohl(ip6h->daddr.s6_addr32[3]);
+
+ ppe_fill_flow_lbl(&entry, ip6h);
+
+ entry.ipv4_dslite.priority = ip6h->priority;
+ entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
+ entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv4_dslite.iblk2.mibf = 1;
+ } else if (mape_toggle &&
+ entry.bfib1.pkt_type == IPV4_HNAPT) {
+ /* MapE LAN -> WAN */
+ mape = 1;
+ entry.ipv4_hnapt.iblk2.dscp =
+ foe->ipv4_hnapt.iblk2.dscp;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv4_hnapt.iblk2.mibf = 1;
+
+ entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
+
+ entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
+ entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
+ entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
+ entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
+
+ entry.ipv4_hnapt.new_sip =
+ foe->ipv4_hnapt.new_sip;
+ entry.ipv4_hnapt.new_dip =
+ foe->ipv4_hnapt.new_dip;
+ entry.ipv4_hnapt.etype = htons(ETH_P_IP);
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ entry.ipv4_hnapt.iblk2.qid =
+ (hnat_priv->data->version == MTK_HNAT_V4) ?
+ skb->mark & 0x7f : skb->mark & 0xf;
+ entry.ipv4_hnapt.iblk2.fqos = 1;
+#endif
+
+ entry.ipv4_hnapt.bfib1.udp =
+ foe->ipv4_hnapt.bfib1.udp;
+
+ entry.ipv4_hnapt.new_sport =
+ foe->ipv4_hnapt.new_sport;
+ entry.ipv4_hnapt.new_dport =
+ foe->ipv4_hnapt.new_dport;
+ mape_l2w_v6h = *ip6h;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ trace_printk(
+ "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
+ __func__, skb->head, skb->data, ip6h, skb->len,
+ skb->data_len);
+ break;
+
+ default:
+ ip6h = ipv6_hdr(skb);
+ iph = ip_hdr(skb);
+ switch (entry.bfib1.pkt_type) {
+ case IPV6_6RD: /* 6RD LAN->WAN */
+ entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
+ entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
+ entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
+ entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
+
+ entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
+ entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
+ entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
+ entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
+
+ entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
+ entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
+ entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
+ entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
+ entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
+ entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
+ entry.ipv6_6rd.ttl = iph->ttl;
+ entry.ipv6_6rd.dscp = iph->tos;
+ entry.ipv6_6rd.per_flow_6rd_id = 1;
+ entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv6_6rd.iblk2.mibf = 1;
+ break;
+
+ default:
+ return -1;
+ }
+ }
+
+ /* Fill Layer2 Info.*/
+ entry = ppe_fill_L2_info(eth, entry, hw_path);
+
+ /* Fill Info Blk*/
+ entry = ppe_fill_info_blk(eth, entry, hw_path);
+
+ if (IS_LAN(dev)) {
+ if (IS_DSA_LAN(dev))
+ hnat_dsa_fill_stag(dev, &entry, hw_path,
+ ntohs(eth->h_proto), mape);
+
+ if (IS_BOND_MODE)
+ gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
+ NR_GMAC2_PORT : NR_GMAC1_PORT;
+ else
+ gmac = NR_GMAC1_PORT;
+ } else if (IS_WAN(dev)) {
+ if (IS_DSA_WAN(dev))
+ hnat_dsa_fill_stag(dev, &entry, hw_path,
+ ntohs(eth->h_proto), mape);
+ if (mape_toggle && mape == 1) {
+ gmac = NR_PDMA_PORT;
+ /* Set act_dp = wan_dev */
+ entry.ipv4_hnapt.act_dp = dev->ifindex;
+ } else {
+ gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
+ }
+ } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN(skb) ||
+ FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
+ if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
+ entry.bfib1.vpm = 1;
+ entry.bfib1.vlan_layer = 1;
+
+ if (FROM_GE_LAN(skb))
+ entry.ipv4_hnapt.vlan1 = 1;
+ else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
+ entry.ipv4_hnapt.vlan1 = 2;
+ }
+
+ trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
+ skb_hnat_iface(skb), dev->name);
+ /* To CPU then stolen by pre-routing hant hook of LAN/WAN
+ * Current setting is PDMA RX.
+ */
+ gmac = NR_PDMA_PORT;
+ if (IS_IPV4_GRP(foe))
+ entry.ipv4_hnapt.act_dp = dev->ifindex;
+ else
+ entry.ipv6_5t_route.act_dp = dev->ifindex;
+ } else {
+ printk_ratelimited(KERN_WARNING
+ "Unknown case of dp, iif=%x --> %s\n",
+ skb_hnat_iface(skb), dev->name);
+
+ return 0;
+ }
+
+ qid = skb->mark & (MTK_QDMA_TX_MASK);
+
+ if (IS_IPV4_GRP(foe)) {
+ entry.ipv4_hnapt.iblk2.dp = gmac;
+ entry.ipv4_hnapt.iblk2.port_mg =
+ (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (hnat_priv->data->version == MTK_HNAT_V4) {
+ entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
+ } else {
+ /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
+ entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
+ if (hnat_priv->data->version != MTK_HNAT_V1)
+ entry.ipv4_hnapt.iblk2.port_mg |=
+ ((qid >> 4) & 0x3);
+
+ if (((IS_EXT(dev) && (FROM_GE_LAN(skb) ||
+ FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
+ ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
+ (!whnat)) {
+ entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
+ entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
+ entry.bfib1.vlan_layer = 1;
+ }
+ }
+
+ if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT)
+ entry.ipv4_hnapt.iblk2.fqos = 0;
+ else
+ entry.ipv4_hnapt.iblk2.fqos = 1;
+#else
+ entry.ipv4_hnapt.iblk2.fqos = 0;
+#endif
+ } else {
+ entry.ipv6_5t_route.iblk2.dp = gmac;
+ entry.ipv6_5t_route.iblk2.port_mg =
+ (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (hnat_priv->data->version == MTK_HNAT_V4) {
+ entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
+ } else {
+ /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
+ entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
+ if (hnat_priv->data->version != MTK_HNAT_V1)
+ entry.ipv6_5t_route.iblk2.port_mg |=
+ ((qid >> 4) & 0x3);
+
+ if (IS_EXT(dev) && (FROM_GE_LAN(skb) ||
+ FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
+ (!whnat)) {
+ entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
+ entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
+ entry.bfib1.vlan_layer = 1;
+ }
+ }
+
+ if (FROM_EXT(skb))
+ entry.ipv6_5t_route.iblk2.fqos = 0;
+ else
+ entry.ipv6_5t_route.iblk2.fqos = 1;
+#else
+ entry.ipv6_5t_route.iblk2.fqos = 0;
+#endif
+ }
+
+ memcpy(foe, &entry, sizeof(entry));
+ /*reset statistic for this entry*/
+ if (hnat_priv->data->per_flow_accounting)
+ memset(&hnat_priv->acct[skb_hnat_entry(skb)], 0,
+ sizeof(struct mib_entry));
+
+ wmb();
+ /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
+ * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
+ * the entry is set to BIND state in mtk_sw_nat_hook_tx().
+ */
+ if (!whnat)
+ foe->bfib1.state = BIND;
+
+ return 0;
+}
+
+int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
+{
+ struct foe_entry *entry;
+ struct ethhdr *eth;
+
+ if (skb_hnat_alg(skb) || !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
+ return NF_ACCEPT;
+
+ trace_printk(
+ "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
+ __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
+ skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
+ skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
+
+ if (!skb_hnat_is_hashed(skb))
+ return NF_ACCEPT;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+ if (entry_hnat_is_bound(entry))
+ return NF_ACCEPT;
+
+ if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
+ return NF_ACCEPT;
+
+ eth = eth_hdr(skb);
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ /*not bind multicast if PPE mcast not enable*/
+ if (!hnat_priv->pmcast)
+ return NF_ACCEPT;
+ }
+
+ /* Some mt_wifi virtual interfaces, such as apcli,
+ * will change the smac for specail purpose.
+ */
+ switch (entry->bfib1.pkt_type) {
+ case IPV4_HNAPT:
+ case IPV4_HNAT:
+ entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ break;
+ case IPV4_DSLITE:
+ case IPV4_MAP_E:
+ case IPV6_6RD:
+ case IPV6_5T_ROUTE:
+ case IPV6_3T_ROUTE:
+ entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ break;
+ }
+
+ /* MT7622 wifi hw_nat not support QoS */
+ if (IS_IPV4_GRP(entry)) {
+ entry->ipv4_hnapt.iblk2.fqos = 0;
+ if (gmac_no == NR_WHNAT_WDMA_PORT) {
+ entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
+ entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
+ entry->ipv4_hnapt.iblk2.winfoi = 1;
+#else
+ entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
+ entry->ipv4_hnapt.iblk2w.winfoi = 1;
+ entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
+#endif
+ } else {
+ if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
+ entry->bfib1.vpm = 1;
+ entry->bfib1.vlan_layer = 1;
+
+ if (FROM_GE_LAN(skb))
+ entry->ipv4_hnapt.vlan1 = 1;
+ else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
+ entry->ipv4_hnapt.vlan1 = 2;
+ }
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) {
+ entry->bfib1.vpm = 0;
+ entry->bfib1.vlan_layer = 1;
+ entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
+ entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
+ entry->ipv4_hnapt.iblk2.fqos = 1;
+ }
+#endif
+ }
+ entry->ipv4_hnapt.iblk2.dp = gmac_no;
+ } else {
+ entry->ipv6_5t_route.iblk2.fqos = 0;
+ if (gmac_no == NR_WHNAT_WDMA_PORT) {
+ entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
+ entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
+ entry->ipv6_5t_route.iblk2.winfoi = 1;
+#else
+ entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
+ entry->ipv6_5t_route.iblk2w.winfoi = 1;
+ entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
+#endif
+ } else {
+ if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
+ entry->bfib1.vpm = 1;
+ entry->bfib1.vlan_layer = 1;
+
+ if (FROM_GE_LAN(skb))
+ entry->ipv6_5t_route.vlan1 = 1;
+ else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
+ entry->ipv6_5t_route.vlan1 = 2;
+ }
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) {
+ entry->bfib1.vpm = 0;
+ entry->bfib1.vlan_layer = 1;
+ entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
+ entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
+ entry->ipv6_5t_route.iblk2.fqos = 1;
+ }
+#endif
+ }
+ entry->ipv6_5t_route.iblk2.dp = gmac_no;
+ }
+
+ entry->bfib1.state = BIND;
+
+ return NF_ACCEPT;
+}
+
+int mtk_sw_nat_hook_rx(struct sk_buff *skb)
+{
+ if (!IS_SPACE_AVAILABLE_HEAD(skb))
+ return NF_ACCEPT;
+
+ skb_hnat_alg(skb) = 0;
+ skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+
+ if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
+ skb_hnat_sport(skb) = NR_WDMA0_PORT;
+ else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
+ skb_hnat_sport(skb) = NR_WDMA1_PORT;
+
+ return NF_ACCEPT;
+}
+
+void mtk_ppe_dev_register_hook(struct net_device *dev)
+{
+ int i, number = 0;
+ struct extdev_entry *ext_entry;
+
+ if (!strncmp(dev->name, "wds", 3))
+ return;
+
+ for (i = 1; i < MAX_IF_NUM; i++) {
+ if (hnat_priv->wifi_hook_if[i] == dev) {
+ pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
+ __func__, dev->name, i);
+ return;
+ }
+ if (!hnat_priv->wifi_hook_if[i]) {
+ if (find_extif_from_devname(dev->name)) {
+ extif_set_dev(dev);
+ goto add_wifi_hook_if;
+ }
+
+ number = get_ext_device_number();
+ if (number >= MAX_EXT_DEVS) {
+ pr_info("%s : extdev array is full. %s is not registered\n",
+ __func__, dev->name);
+ return;
+ }
+
+ ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
+ if (!ext_entry)
+ return;
+
+ strncpy(ext_entry->name, dev->name, IFNAMSIZ);
+ dev_hold(dev);
+ ext_entry->dev = dev;
+ ext_if_add(ext_entry);
+
+add_wifi_hook_if:
+ dev_hold(dev);
+ hnat_priv->wifi_hook_if[i] = dev;
+
+ break;
+ }
+ }
+ pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
+}
+
+void mtk_ppe_dev_unregister_hook(struct net_device *dev)
+{
+ int i;
+
+ for (i = 1; i < MAX_IF_NUM; i++) {
+ if (hnat_priv->wifi_hook_if[i] == dev) {
+ hnat_priv->wifi_hook_if[i] = NULL;
+ dev_put(dev);
+
+ break;
+ }
+ }
+
+ extif_put_dev(dev);
+ pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
+}
+
+static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
+{
+ struct dst_entry *dst;
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn_help *help;
+
+ /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
+ * is from local_out which is also filtered in sanity check.
+ */
+ dst = skb_dst(skb);
+ if (dst && dst_xfrm(dst))
+ return 0;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return 1;
+
+ /* rcu_read_lock()ed by nf_hook_slow */
+ help = nfct_help(ct);
+ if (help && rcu_dereference(help->helper))
+ return 0;
+
+ return 1;
+}
+
+static unsigned int mtk_hnat_nf_post_routing(
+ struct sk_buff *skb, const struct net_device *out,
+ unsigned int (*fn)(struct sk_buff *, const struct net_device *,
+ struct flow_offload_hw_path *),
+ const char *func)
+{
+ struct foe_entry *entry;
+ struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
+ .virt_dev = (struct net_device*)out };
+ const struct net_device *arp_dev = out;
+
+ if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
+ !IS_SPACE_AVAILABLE_HEAD(skb)))
+ return 0;
+
+ if (unlikely(!skb_hnat_is_hashed(skb)))
+ return 0;
+
+ if (out->netdev_ops->ndo_flow_offload_check) {
+ if (out->netdev_ops->ndo_flow_offload_check(&hw_path))
+ return 0;
+ out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
+ }
+
+ if (!IS_LAN(out) && !IS_WAN(out) && !IS_EXT(out))
+ return 0;
+
+ trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
+ skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+
+ switch (skb_hnat_reason(skb)) {
+ case HIT_UNBIND_RATE_REACH:
+ if (entry_hnat_is_bound(entry))
+ break;
+
+ if (fn && !mtk_hnat_accel_type(skb))
+ break;
+
+ if (fn && fn(skb, arp_dev, &hw_path))
+ break;
+
+ skb_to_hnat_info(skb, out, entry, &hw_path);
+ break;
+ case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+ if (fn && !mtk_hnat_accel_type(skb))
+ break;
+
+ /* update mcast timestamp*/
+ if (hnat_priv->data->version == MTK_HNAT_V3 &&
+ hnat_priv->data->mcast && entry->bfib1.sta == 1)
+ entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
+
+ if (entry_hnat_is_bound(entry)) {
+ memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
+
+ return -1;
+ }
+ break;
+ case HIT_BIND_MULTICAST_TO_CPU:
+ case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+ /*do not forward to gdma again,if ppe already done it*/
+ if (IS_LAN(out) || IS_WAN(out))
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned int
+mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct foe_entry *entry;
+ struct ipv6hdr *ip6h;
+ struct iphdr _iphdr;
+ const struct iphdr *iph;
+ struct tcpudphdr _ports;
+ const struct tcpudphdr *pptr;
+ int udp = 0;
+
+ if (unlikely(!skb_hnat_is_hashed(skb)))
+ return NF_ACCEPT;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+ if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
+ ip6h = ipv6_hdr(skb);
+ if (ip6h->nexthdr == NEXTHDR_IPIP) {
+ /* Map-E LAN->WAN: need to record orig info before fn. */
+ if (mape_toggle) {
+ iph = skb_header_pointer(skb, IPV6_HDR_LEN,
+ sizeof(_iphdr), &_iphdr);
+ switch (iph->protocol) {
+ case IPPROTO_UDP:
+ udp = 1;
+ case IPPROTO_TCP:
+ break;
+
+ default:
+ return NF_ACCEPT;
+ }
+
+ pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
+ sizeof(_ports), &_ports);
+ entry->bfib1.udp = udp;
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ entry->bfib1.pkt_type = IPV4_MAP_E;
+ entry->ipv4_dslite.iblk2.dscp = iph->tos;
+ entry->ipv4_dslite.new_sip = ntohl(iph->saddr);
+ entry->ipv4_dslite.new_dip = ntohl(iph->daddr);
+ entry->ipv4_dslite.new_sport = ntohs(pptr->src);
+ entry->ipv4_dslite.new_dport = ntohs(pptr->dst);
+#else
+ entry->ipv4_hnapt.iblk2.dscp = iph->tos;
+ entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
+ entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
+ entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
+ entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
+#endif
+ } else {
+ entry->bfib1.pkt_type = IPV4_DSLITE;
+ }
+ }
+ }
+ return NF_ACCEPT;
+}
+
+static unsigned int
+mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ post_routing_print(skb, state->in, state->out, __func__);
+
+ if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
+ __func__))
+ return NF_ACCEPT;
+
+ trace_printk(
+ "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
+ skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ post_routing_print(skb, state->in, state->out, __func__);
+
+ if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
+ __func__))
+ return NF_ACCEPT;
+
+ trace_printk(
+ "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
+ skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+ if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
+ skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
+ skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
+ }
+#endif
+
+ if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
+ clr_from_extge(skb);
+
+ /* packets from external devices -> xxx ,step 2, learning stage */
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (do_ext2ge_fast_learn(state->in, skb) && (eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG)) {
+#else
+ if (do_ext2ge_fast_learn(state->in, skb)) {
+#endif
+ if (!do_hnat_ext_to_ge2(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ /* packets form ge -> external device */
+ if (do_ge2ext_fast(state->in, skb)) {
+ if (!do_hnat_ge_to_ext(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ return NF_ACCEPT;
+drop:
+ printk_ratelimited(KERN_WARNING
+ "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, state->in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ post_routing_print(skb, state->in, state->out, __func__);
+
+ if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
+ return NF_ACCEPT;
+
+ trace_printk(
+ "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
+ skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct sk_buff *new_skb;
+ struct foe_entry *entry;
+ struct iphdr *iph;
+
+ if (!skb_hnat_is_hashed(skb))
+ return NF_ACCEPT;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+
+ if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
+ new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
+ if (!new_skb) {
+ dev_info(hnat_priv->dev, "%s:drop\n", __func__);
+ return NF_DROP;
+ }
+ dev_kfree_skb(skb);
+ skb = new_skb;
+ }
+
+ /* Make the flow from local not be bound. */
+ iph = ip_hdr(skb);
+ if (iph->protocol == IPPROTO_IPV6) {
+ entry->udib1.pkt_type = IPV6_6RD;
+ hnat_set_head_frags(state, skb, 0, hnat_set_alg);
+ } else {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ }
+
+ return NF_ACCEPT;
+}
+
+static unsigned int mtk_hnat_br_nf_forward(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ if (unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+
+ return NF_ACCEPT;
+}
+
+static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
+ {
+ .hook = mtk_hnat_ipv4_nf_pre_routing,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_FIRST + 1,
+ },
+ {
+ .hook = mtk_hnat_ipv6_nf_pre_routing,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_FIRST + 1,
+ },
+ {
+ .hook = mtk_hnat_ipv6_nf_post_routing,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = mtk_hnat_ipv6_nf_local_out,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = mtk_hnat_ipv4_nf_post_routing,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = mtk_hnat_ipv4_nf_local_out,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = mtk_hnat_br_nf_local_in,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_LOCAL_IN,
+ .priority = NF_BR_PRI_FIRST,
+ },
+ {
+ .hook = mtk_hnat_br_nf_local_out,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_LOCAL_OUT,
+ .priority = NF_BR_PRI_LAST - 1,
+ },
+ {
+ .hook = mtk_pong_hqos_handler,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_PRE_ROUTING,
+ .priority = NF_BR_PRI_FIRST,
+ },
+};
+
+int hnat_register_nf_hooks(void)
+{
+ return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
+}
+
+void hnat_unregister_nf_hooks(void)
+{
+ nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
+}
+
+int whnat_adjust_nf_hooks(void)
+{
+ struct nf_hook_ops *hook = mtk_hnat_nf_ops;
+ unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
+
+ if (!hook)
+ return -1;
+
+ while (n-- > 0) {
+ if (hook[n].hook == mtk_hnat_br_nf_local_in) {
+ hook[n].hooknum = NF_BR_PRE_ROUTING;
+ } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
+ hook[n].hooknum = NF_BR_POST_ROUTING;
+ } else if (hook[n].hook == mtk_pong_hqos_handler) {
+ hook[n].hook = mtk_hnat_br_nf_forward;
+ hook[n].hooknum = NF_BR_FORWARD;
+ hook[n].priority = NF_BR_PRI_LAST - 1;
+ }
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *unused)
+{
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+ skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
+ skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
+
+ do_hnat_ge_to_ext(skb, __func__);
+
+ return 0;
+}
+#endif
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_stag.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_stag.c
new file mode 100644
index 0000000..b0fabfb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_stag.c
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Landen Chao <landen.chao@mediatek.com>
+ */
+
+#include <linux/of_device.h>
+#include <net/netfilter/nf_flow_table.h>
+#include "hnat.h"
+
+void hnat_dsa_fill_stag(const struct net_device *netdev,
+ struct foe_entry *entry,
+ struct flow_offload_hw_path *hw_path,
+ u16 eth_proto,
+ int mape)
+{
+ const struct net_device *ndev;
+ const unsigned int *port_reg;
+ int port_index;
+ u16 sp_tag;
+
+ if (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN)
+ ndev = hw_path->dev;
+ else
+ ndev = netdev;
+
+ port_reg = of_get_property(ndev->dev.of_node, "reg", NULL);
+ port_index = be32_to_cpup(port_reg);
+ sp_tag = BIT(port_index);
+
+ if (!entry->bfib1.vlan_layer)
+ entry->bfib1.vlan_layer = 1;
+ else
+ /* VLAN existence indicator */
+ sp_tag |= BIT(8);
+ entry->bfib1.vpm = 0;
+
+ switch (eth_proto) {
+ case ETH_P_IP:
+ if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE)
+ entry->ipv4_dslite.etype = sp_tag;
+ else
+ entry->ipv4_hnapt.etype = sp_tag;
+ break;
+ case ETH_P_IPV6:
+ /* In the case MAPE LAN --> WAN, binding entry is to CPU.
+ * Do not add special tag.
+ */
+ if (!mape)
+ /* etype offset of ipv6 entries are the same. */
+ entry->ipv6_5t_route.etype = sp_tag;
+
+ break;
+ default:
+ pr_info("DSA + HNAT unsupport protocol\n");
+ }
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
new file mode 100644
index 0000000..bd857f4
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
@@ -0,0 +1,126 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef NF_HNAT_MTK_H
+#define NF_HNAT_MTK_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include "../mtk_eth_soc.h"
+
+#define HNAT_SKB_CB2(__skb) ((struct hnat_skb_cb2 *)&((__skb)->cb[44]))
+struct hnat_skb_cb2 {
+ __u32 magic;
+};
+
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+struct hnat_desc {
+ u32 entry : 15;
+ u32 resv0 : 3;
+ u32 crsn : 5;
+ u32 resv1 : 3;
+ u32 sport : 4;
+ u32 resv2 : 1;
+ u32 alg : 1;
+ u32 iface : 4;
+ u32 resv3 : 4;
+ u32 magic_tag_protect : 16;
+ u32 wdmaid : 2;
+ u32 rxid : 2;
+ u32 wcid : 10;
+ u32 bssid : 6;
+} __packed;
+#else
+struct hnat_desc {
+ u32 entry : 14;
+ u32 crsn : 5;
+ u32 sport : 4;
+ u32 alg : 1;
+ u32 iface : 4;
+ u32 resv : 4;
+ u32 magic_tag_protect : 16;
+ u32 wdmaid : 8;
+ u32 rxid : 2;
+ u32 wcid : 8;
+ u32 bssid : 6;
+} __packed;
+#endif
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+#define HQOS_MAGIC_TAG 0x5678
+#define HAS_HQOS_MAGIC_TAG(skb) (skb->protocol == HQOS_MAGIC_TAG)
+#else
+#define HAS_HQOS_MAGIC_TAG(skb) NULL
+#endif
+
+#define HNAT_MAGIC_TAG 0x6789
+#define WIFI_INFO_LEN 3
+#define FOE_INFO_LEN (10 + WIFI_INFO_LEN)
+#define IS_SPACE_AVAILABLE_HEAD(skb) \
+ ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+
+#define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
+#define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
+#define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
+#define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
+#define skb_hnat_sport(skb) (((struct hnat_desc *)(skb->head))->sport)
+#define skb_hnat_alg(skb) (((struct hnat_desc *)(skb->head))->alg)
+#define skb_hnat_iface(skb) (((struct hnat_desc *)(skb->head))->iface)
+#define skb_hnat_magic_tag(skb) (((struct hnat_desc *)((skb)->head))->magic_tag_protect)
+#define skb_hnat_wdma_id(skb) (((struct hnat_desc *)((skb)->head))->wdmaid)
+#define skb_hnat_rx_id(skb) (((struct hnat_desc *)((skb)->head))->rxid)
+#define skb_hnat_wc_id(skb) (((struct hnat_desc *)((skb)->head))->wcid)
+#define skb_hnat_bss_id(skb) (((struct hnat_desc *)((skb)->head))->bssid)
+#define do_ext2ge_fast_try(dev, skb) (IS_EXT(dev) && !is_from_extge(skb))
+#define set_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x78786688)
+#define clr_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x0)
+#define set_to_ppe(skb) (HNAT_SKB_CB2(skb)->magic = 0x78681415)
+#define is_from_extge(skb) (HNAT_SKB_CB2(skb)->magic == 0x78786688)
+#define is_magic_tag_valid(skb) (skb_hnat_magic_tag(skb) == HNAT_MAGIC_TAG)
+#define set_from_mape(skb) (HNAT_SKB_CB2(skb)->magic = 0x78787788)
+#define is_from_mape(skb) (HNAT_SKB_CB2(skb)->magic == 0x78787788)
+#define is_unreserved_port(hdr) \
+ ((ntohs(hdr->source) > 1023) && (ntohs(hdr->dest) > 1023))
+
+#define TTL_0 0x02
+#define HAS_OPTION_HEADER 0x03
+#define NO_FLOW_IS_ASSIGNED 0x07
+#define IPV4_WITH_FRAGMENT 0x08
+#define IPV4_HNAPT_DSLITE_WITH_FRAGMENT 0x09
+#define IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP 0x0A
+#define IPV6_5T_6RD_WITHOUT_TCP_UDP 0x0B
+#define TCP_FIN_SYN_RST \
+ 0x0C /* Ingress packet is TCP fin/syn/rst (for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */
+#define UN_HIT 0x0D /* FOE Un-hit */
+#define HIT_UNBIND 0x0E /* FOE Hit unbind */
+#define HIT_UNBIND_RATE_REACH 0x0F
+#define HIT_BIND_TCP_FIN 0x10
+#define HIT_BIND_TTL_1 0x11
+#define HIT_BIND_WITH_VLAN_VIOLATION 0x12
+#define HIT_BIND_KEEPALIVE_UC_OLD_HDR 0x13
+#define HIT_BIND_KEEPALIVE_MC_NEW_HDR 0x14
+#define HIT_BIND_KEEPALIVE_DUP_OLD_HDR 0x15
+#define HIT_BIND_FORCE_TO_CPU 0x16
+#define HIT_BIND_WITH_OPTION_HEADER 0x17
+#define HIT_BIND_MULTICAST_TO_CPU 0x18
+#define HIT_BIND_MULTICAST_TO_GMAC_CPU 0x19
+#define HIT_PRE_BIND 0x1A
+#define HIT_BIND_PACKET_SAMPLING 0x1B
+#define HIT_BIND_EXCEED_MTU 0x1C
+
+u32 hnat_tx(struct sk_buff *skb);
+u32 hnat_set_skb_info(struct sk_buff *skb, u32 *rxd);
+u32 hnat_reg(struct net_device *, void __iomem *);
+u32 hnat_unreg(void);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c
new file mode 100755
index 0000000..4db27df
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for MediaTek SGMII circuit
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
+{
+ struct device_node *np;
+ int i;
+
+ ss->ana_rgc3 = ana_rgc3;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ np = of_parse_phandle(r, "mediatek,sgmiisys", i);
+ if (!np)
+ break;
+
+ ss->regmap[i] = syscon_node_to_regmap(np);
+ if (IS_ERR(ss->regmap[i]))
+ return PTR_ERR(ss->regmap[i]);
+ }
+
+ return 0;
+}
+
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
+{
+ unsigned int val;
+
+ if (!ss->regmap[id])
+ return -EINVAL;
+
+ /* Setup the link timer and QPHY power up inside SGMIISYS */
+ regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
+ SGMII_LINK_TIMER_DEFAULT);
+
+ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ val |= SGMII_REMOTE_FAULT_DIS;
+ regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+ regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ val |= SGMII_AN_RESTART;
+ regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+ regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val &= ~SGMII_PHYA_PWD;
+ regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+ return 0;
+}
+
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+ const struct phylink_link_state *state)
+{
+ unsigned int val;
+
+ if (!ss->regmap[id])
+ return -EINVAL;
+
+ regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
+ val &= ~RG_PHY_SPEED_MASK;
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ val |= RG_PHY_SPEED_3_125G;
+ regmap_write(ss->regmap[id], ss->ana_rgc3, val);
+
+ /* Disable SGMII AN */
+ regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ val &= ~SGMII_AN_ENABLE;
+ regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+ /* SGMII force mode setting */
+ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ val &= ~SGMII_IF_MODE_MASK;
+
+ switch (state->speed) {
+ case SPEED_10:
+ val |= SGMII_SPEED_10;
+ break;
+ case SPEED_100:
+ val |= SGMII_SPEED_100;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ val |= SGMII_SPEED_1000;
+ break;
+ };
+
+ if (state->duplex == DUPLEX_FULL)
+ val |= SGMII_DUPLEX_FULL;
+
+ regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+ /* Release PHYA power down state */
+ regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val &= ~SGMII_PHYA_PWD;
+ regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+ return 0;
+}
+
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
+{
+ struct mtk_sgmii *ss = eth->sgmii;
+ unsigned int val, sid;
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac_id;
+
+ if (!ss->regmap[sid])
+ return;
+
+ regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
+ val |= SGMII_AN_RESTART;
+ regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Kconfig b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Kconfig
new file mode 100644
index 0000000..f5be18e
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Kconfig
@@ -0,0 +1,39 @@
+config RAETH
+ tristate "Mediatek Ethernet GMAC"
+ ---help---
+ This driver supports Mediatek gigabit ethernet family of
+ adapters.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Mediatek Ethernet devices. If you say Y,
+ you will be asked for your specific card in the following questions.
+
+if RAETH
+
+config GE1_SGMII_FORCE_2500
+ bool "SGMII_FORCE_2500 (GigaSW)"
+ depends on RAETH
+ ---help---
+ If you want to use sgmii force 2500.
+ Please enable GE1_SGMII_FORCE_2500.
+ Switch must support SGMII interface.
+ This config will impact switch app makefile.
+
+config ETH_SKB_ALLOC_SELECT
+ bool "SKB Allocation API Select"
+
+choice
+ prompt "SKB Allocation API Selection"
+ depends on ETH_SKB_ALLOC_SELECT
+ default ETH_PAGE_ALLOC_SKB
+
+config ETH_SLAB_ALLOC_SKB
+ bool "SLAB skb allocation"
+
+config ETH_PAGE_ALLOC_SKB
+ bool "Page skb allocation"
+
+endchoice
+
+endif # RAETH
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Makefile b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Makefile
new file mode 100644
index 0000000..e72dd58
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/Makefile
@@ -0,0 +1,24 @@
+obj-$(CONFIG_RAETH) += raeth.o
+raeth-objs := raether.o raether_pdma.o ra_mac.o mii_mgr.o ra_switch.o ra_dbg_proc.o
+raeth-objs += raether_qdma.o
+raeth-objs += raether_rss.o
+
+ifeq ($(CONFIG_RAETH_ETHTOOL),y)
+raeth-objs += ra_ethtool.o
+endif
+
+raeth-objs += raether_hwlro.o
+raeth-objs += ra_dbg_hwlro.o
+#raeth-objs += ra_dbg_hwioc.o
+
+ccflags-y += -Idrivers/net/ethernet/raeth
+ccflags-y += -Iinclude/linux/
+
+ifeq ($(CONFIG_RAETH_PDMA_DVT),y)
+raeth-objs += dvt/raether_pdma_dvt.o
+obj-m += dvt/pkt_gen.o
+obj-m += dvt/pkt_gen_udp_frag.o
+obj-m += dvt/pkt_gen_tcp_frag.o
+endif
+
+#ccflags-y += -Werror
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.c
new file mode 100644
index 0000000..7da2517
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.c
@@ -0,0 +1,338 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "mii_mgr.h"
+
+void set_an_polling(u32 an_status)
+{
+ if (an_status == 1)
+ *(unsigned long *)(ESW_PHY_POLLING) |= (1 << 31);
+ else
+ *(unsigned long *)(ESW_PHY_POLLING) &= ~(1 << 31);
+}
+
+u32 __mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
+{
+ u32 status = 0;
+ u32 rc = 0;
+ unsigned long t_start = jiffies;
+ u32 data = 0;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ei_local->mdio_lock, flags);
+ /* We enable mdio gpio purpose register, and disable it when exit. */
+ enable_mdio(1);
+
+ /* make sure previous read operation is complete */
+ while (1) {
+ /* 0 : Read/write operation complete */
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ break;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ rc = 0;
+ pr_err("\n MDIO Read operation is ongoing !!\n");
+ goto out;
+ }
+ }
+
+ data =
+ (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register <<
+ 25);
+ sys_reg_write(MDIO_PHY_CONTROL_0, data);
+ sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31)));
+
+ /* make sure read operation is complete */
+ t_start = jiffies;
+ while (1) {
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ status = sys_reg_read(MDIO_PHY_CONTROL_0);
+ *read_data = (u32)(status & 0x0000FFFF);
+
+ enable_mdio(0);
+ rc = 1;
+ goto out;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ rc = 0;
+ pr_err
+ ("\n MDIO Read operation Time Out!!\n");
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&ei_local->mdio_lock, flags);
+ return rc;
+}
+
+u32 __mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
+{
+ unsigned long t_start = jiffies;
+ u32 data;
+ u32 rc = 0;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ei_local->mdio_lock, flags);
+ enable_mdio(1);
+
+ /* make sure previous write operation is complete */
+ while (1) {
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ break;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ rc = 0;
+ pr_err("\n MDIO Write operation ongoing\n");
+ goto out;
+ }
+ }
+
+ data =
+ (0x01 << 16) | (1 << 18) | (phy_addr << 20) | (phy_register << 25) |
+ write_data;
+ sys_reg_write(MDIO_PHY_CONTROL_0, data);
+ sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31))); /*start*/
+ /* pr_err("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0); */
+
+ t_start = jiffies;
+
+ /* make sure write operation is complete */
+ while (1) {
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ enable_mdio(0);
+ rc = 1;
+ goto out;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ rc = 0;
+ pr_err("\n MDIO Write operation Time Out\n");
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&ei_local->mdio_lock, flags);
+ return rc;
+}
+
+u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ u32 low_word;
+ u32 high_word;
+ u32 an_status = 0;
+
+ if ((ei_local->architecture &
+ (GE1_RGMII_FORCE_1000 | GE1_TRGMII_FORCE_2000 |
+ GE1_TRGMII_FORCE_2600)) && (phy_addr == 31)) {
+ an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1 << 31));
+ if (an_status)
+ set_an_polling(0);
+ if (__mii_mgr_write
+ (phy_addr, 0x1f, ((phy_register >> 6) & 0x3FF))) {
+ if (__mii_mgr_read
+ (phy_addr, (phy_register >> 2) & 0xF, &low_word)) {
+ if (__mii_mgr_read
+ (phy_addr, (0x1 << 4), &high_word)) {
+ *read_data =
+ (high_word << 16) | (low_word &
+ 0xFFFF);
+ if (an_status)
+ set_an_polling(1);
+ return 1;
+ }
+ }
+ }
+ if (an_status)
+ set_an_polling(1);
+ } else {
+ if (__mii_mgr_read(phy_addr, phy_register, read_data))
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(mii_mgr_read);
+
+u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ u32 an_status = 0;
+
+ if ((ei_local->architecture &
+ (GE1_RGMII_FORCE_1000 | GE1_TRGMII_FORCE_2000 |
+ GE1_TRGMII_FORCE_2600)) && (phy_addr == 31)) {
+ an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1 << 31));
+ if (an_status)
+ set_an_polling(0);
+ if (__mii_mgr_write
+ (phy_addr, 0x1f, (phy_register >> 6) & 0x3FF)) {
+ if (__mii_mgr_write
+ (phy_addr, ((phy_register >> 2) & 0xF),
+ write_data & 0xFFFF)) {
+ if (__mii_mgr_write
+ (phy_addr, (0x1 << 4), write_data >> 16)) {
+ if (an_status)
+ set_an_polling(1);
+ return 1;
+ }
+ }
+ }
+ if (an_status)
+ set_an_polling(1);
+ } else {
+ if (__mii_mgr_write(phy_addr, phy_register, write_data))
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mii_mgr_write);
+
+u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr)
+{
+ u32 rc = 0;
+ unsigned long t_start = jiffies;
+ u32 data = 0;
+
+ enable_mdio(1);
+
+ while (1) {
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ break;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ pr_err("\n MDIO Read operation is ongoing !!\n");
+ return rc;
+ }
+ }
+ data =
+ (dev_addr << 25) | (port_num << 20) | (0x00 << 18) | (0x00 << 16) |
+ reg_addr;
+ sys_reg_write(MDIO_PHY_CONTROL_0, data);
+ sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31)));
+
+ t_start = jiffies;
+ while (1) {
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ enable_mdio(0);
+ return 1;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ pr_err("\n MDIO Write operation Time Out\n");
+ return 0;
+ }
+ }
+}
+
+u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data)
+{
+ u32 status = 0;
+ u32 rc = 0;
+ unsigned long t_start = jiffies;
+ u32 data = 0;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ei_local->mdio_lock, flags);
+ /* set address first */
+ mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
+ /* udelay(10); */
+
+ enable_mdio(1);
+
+ while (1) {
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ break;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ rc = 0;
+ pr_err("\n MDIO Read operation is ongoing !!\n");
+ goto out;
+ }
+ }
+ data =
+ (dev_addr << 25) | (port_num << 20) | (0x03 << 18) | (0x00 << 16) |
+ reg_addr;
+ sys_reg_write(MDIO_PHY_CONTROL_0, data);
+ sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31)));
+ t_start = jiffies;
+ while (1) {
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ *read_data =
+ (sys_reg_read(MDIO_PHY_CONTROL_0) & 0x0000FFFF);
+ enable_mdio(0);
+ rc = 1;
+ goto out;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ rc = 0;
+ pr_err
+ ("\n MDIO Read operation Time Out!!\n");
+ goto out;
+ }
+ status = sys_reg_read(MDIO_PHY_CONTROL_0);
+ }
+out:
+ spin_unlock_irqrestore(&ei_local->mdio_lock, flags);
+ return rc;
+}
+
+u32 mii_mgr_write_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
+{
+ u32 rc = 0;
+ unsigned long t_start = jiffies;
+ u32 data = 0;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ei_local->mdio_lock, flags);
+ /* set address first */
+ mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
+ /* udelay(10); */
+
+ enable_mdio(1);
+ while (1) {
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ break;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ rc = 0;
+ pr_err("\n MDIO Read operation is ongoing !!\n");
+ goto out;
+ }
+ }
+
+ data =
+ (dev_addr << 25) | (port_num << 20) | (0x01 << 18) | (0x00 << 16) |
+ write_data;
+ sys_reg_write(MDIO_PHY_CONTROL_0, data);
+ sys_reg_write(MDIO_PHY_CONTROL_0, (data | (1 << 31)));
+
+ t_start = jiffies;
+
+ while (1) {
+ if (!(sys_reg_read(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
+ enable_mdio(0);
+ rc = 1;
+ goto out;
+ } else if (time_after(jiffies, t_start + 5 * HZ)) {
+ enable_mdio(0);
+ rc = 0;
+ pr_err("\n MDIO Write operation Time Out\n");
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&ei_local->mdio_lock, flags);
+ return rc;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.h
new file mode 100644
index 0000000..f8e0517
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/mii_mgr.h
@@ -0,0 +1,27 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include "raether.h"
+
+extern struct net_device *dev_raether;
+
+#define PHY_CONTROL_0 0x0004
+#define MDIO_PHY_CONTROL_0 (RALINK_ETH_MAC_BASE + PHY_CONTROL_0)
+#define enable_mdio(x)
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwioc.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwioc.c
new file mode 100644
index 0000000..1132903
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwioc.c
@@ -0,0 +1,306 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_dbg_proc.h"
+
+#define MCSI_A_PMU_CTL 0x10390100 /* PMU CTRL */
+#define MCSI_A_PMU_CYC_CNT 0x10399004 /* Cycle counter */
+#define MCSI_A_PMU_CYC_CTL 0x10399008 /* Cycle counter CTRL */
+
+#define MCSI_A_PMU_EVN_SEL0 0x1039A000 /* EVENT SELECT 0 */
+#define MCSI_A_PMU_EVN_CNT0 0x1039A004 /* Event Count 0 */
+#define MCSI_A_PMU_EVN_CTL0 0x1039A008 /* Event Count control 0 */
+
+#define MCSI_A_PMU_EVN_SEL1 0x1039B000 /* EVENT SELECT 1 */
+#define MCSI_A_PMU_EVN_CNT1 0x1039B004 /* Event Count 1 */
+#define MCSI_A_PMU_EVN_CTL1 0x1039B008 /* Event Count control 1 */
+
+#define MCSI_A_PMU_EVN_SEL2 0x1039C000 /* EVENT SELECT 2 */
+#define MCSI_A_PMU_EVN_CNT2 0x1039C004 /* Event Count 2 */
+#define MCSI_A_PMU_EVN_CTL2 0x1039C008 /* Event Count control 2 */
+
+#define MCSI_A_PMU_EVN_SEL3 0x1039D000 /* EVENT SELECT 3 */
+#define MCSI_A_PMU_EVN_CNT3 0x1039D004 /* Event Count 3 */
+#define MCSI_A_PMU_EVN_CTL3 0x1039D008 /* Event Count control 3 */
+
+#define PMU_EVN_SEL_S0 (0x0 << 5)
+#define PMU_EVN_SEL_S1 (0x1 << 5)
+#define PMU_EVN_SEL_S2 (0x2 << 5)
+#define PMU_EVN_SEL_S3 (0x3 << 5)
+#define PMU_EVN_SEL_S4 (0x4 << 5)
+#define PMU_EVN_SEL_S5 (0x5 << 5)
+#define PMU_EVN_SEL_M0 (0x6 << 5)
+#define PMU_EVN_SEL_M1 (0x7 << 5)
+#define PMU_EVN_SEL_M2 (0x8 << 5)
+
+#define PMU_EVN_READ_ANY 0x0
+#define PMU_EVN_READ_SNOOP 0x3
+#define PMU_EVN_READ_HIT 0xA
+#define PMU_EVN_WRITE_ANY 0xC
+#define PMU_EVN_WU_SNOOP 0x10
+#define PMU_EVN_WLU_SNOOP 0x11
+
+#define PMU_0_SEL (PMU_EVN_SEL_S2 | PMU_EVN_READ_SNOOP)
+#define PMU_1_SEL (PMU_EVN_SEL_S2 | PMU_EVN_READ_HIT)
+#define PMU_2_SEL (PMU_EVN_SEL_S4 | PMU_EVN_READ_SNOOP)
+#define PMU_3_SEL (PMU_EVN_SEL_S4 | PMU_EVN_READ_HIT)
+
+#define MCSI_A_PMU_CTL_BASE MCSI_A_PMU_CTL
+#define MCSI_A_PMU_CNT0_BASE MCSI_A_PMU_EVN_SEL0
+#define MCSI_A_PMU_CNT1_BASE MCSI_A_PMU_EVN_SEL1
+#define MCSI_A_PMU_CNT2_BASE MCSI_A_PMU_EVN_SEL2
+#define MCSI_A_PMU_CNT3_BASE MCSI_A_PMU_EVN_SEL3
+
+typedef int (*IOC_SET_FUNC) (int par1, int par2, int par3);
+static struct proc_dir_entry *proc_hw_io_coherent;
+
+unsigned int reg_pmu_evn_phys[] = {
+ MCSI_A_PMU_CNT0_BASE,
+ MCSI_A_PMU_CNT1_BASE,
+ MCSI_A_PMU_CNT2_BASE,
+ MCSI_A_PMU_CNT3_BASE,
+};
+
+int ioc_pmu_cnt_config(int pmu_no, int interface, int event)
+{
+ void *reg_pmu_cnt;
+ unsigned int pmu_sel;
+
+ reg_pmu_cnt = ioremap(reg_pmu_evn_phys[pmu_no], 0x10);
+
+ /* Event Select Register
+ * bit[31:8] -> Reserved
+ * bit[7:5] -> Event code to define which interface to monitor
+ * bit[4:0] -> Event code to define which event to monitor
+ */
+ pmu_sel = (interface << 5) | event;
+ sys_reg_write(reg_pmu_cnt, pmu_sel);
+
+ /*Counter Control Registers
+ * bit[31:1] -> Reserved
+ * bit[0:0] -> Counter enable
+ */
+ sys_reg_write(reg_pmu_cnt + 0x8, 0x1);
+
+ iounmap(reg_pmu_cnt);
+
+ return 0;
+}
+
+int ioc_pmu_ctl_config(int enable, int ignore1, int ignore2)
+{
+ void *reg_pmu_ctl;
+
+ reg_pmu_ctl = ioremap(MCSI_A_PMU_CTL_BASE, 0x10);
+
+ /*Performance Monitor Control Register
+ * bit[31:16] -> Reserved
+ * bit[15:12] -> Specifies the number of counters implemented
+ * bit[11:6] -> Reserved
+ * bit[5:5] -> DP: Disables cycle counter
+ * bit[4:4] -> EX: Enable export of the events to the event bus
+ * bit[3:3] -> CCD: Cycle count divider
+ * bit[2:2] -> CCR: Cycle counter reset
+ * bit[1:1] -> RST: Performance counter reset
+ * bit[0:0] -> CEN: Enable bit
+ */
+ if (enable) {
+ sys_reg_write(reg_pmu_ctl, BIT(1));
+ sys_reg_write(reg_pmu_ctl, BIT(0));
+ } else {
+ sys_reg_write(reg_pmu_ctl, 0x0);
+ }
+
+ iounmap(reg_pmu_ctl);
+
+ return 0;
+}
+
+int ioc_set_usage(int ignore1, int ignore2, int ignore3)
+{
+ pr_info("<Usage> echo \"[OP Mode] [Arg1] [Arg2 | Arg3]\" > /proc/%s\n\r",
+ PROCREG_HW_IO_COHERENT);
+ pr_info("\tControl PMU counter: echo \"1 [Enable]\" > /proc/%s\n\r",
+ PROCREG_HW_IO_COHERENT);
+ pr_info("\t\t[Enable]:\n\r\t\t\t1: enable\n\r\t\t\t0: disable\n\r");
+ pr_info("\tConfigure PMU counter: echo \"2 [CNT No.] [IF] [EVN]\" > /proc/%s\n\r",
+ PROCREG_HW_IO_COHERENT);
+ pr_info("\t\t[CNT No.]: 0/1/2/3 PMU Counter\n\r");
+ pr_info("\t\t[IF]:\n\r");
+ pr_info("\t\t\t0: PMU_EVN_SEL_S0\n\r");
+ pr_info("\t\t\t1: PMU_EVN_SEL_S1\n\r");
+ pr_info("\t\t\t2: PMU_EVN_SEL_S2\n\r");
+ pr_info("\t\t\t3: PMU_EVN_SEL_S3\n\r");
+ pr_info("\t\t\t4: PMU_EVN_SEL_S4\n\r");
+ pr_info("\t\t\t5: PMU_EVN_SEL_S5\n\r");
+ pr_info("\t\t\t6: PMU_EVN_SEL_M0\n\r");
+ pr_info("\t\t\t7: PMU_EVN_SEL_M1\n\r");
+ pr_info("\t\t\t8: PMU_EVN_SEL_M2\n\r");
+ pr_info("\t\t[EVN]:\n\r");
+ pr_info("\t\t\t0: PMU_EVN_READ_ANY\n\r");
+ pr_info("\t\t\t3: PMU_EVN_READ_SNOOP\n\r");
+ pr_info("\t\t\tA: PMU_EVN_READ_HIT\n\r");
+ pr_info("\t\t\tC: PMU_EVN_WRITE_ANY\n\r");
+ pr_info("\t\t\t10: PMU_EVN_WU_SNOOP\n\r");
+ pr_info("\t\t\t11: PMU_EVN_WLU_SNOOP\n\r");
+
+ return 0;
+}
+
+static const IOC_SET_FUNC iocoherent_set_func[] = {
+ [0] = ioc_set_usage,
+ [1] = ioc_pmu_ctl_config,
+ [2] = ioc_pmu_cnt_config,
+};
+
+ssize_t ioc_pmu_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[32];
+ char *p_buf;
+ int len = count;
+ long arg0 = 0, arg1 = 0, arg2 = 0, arg3 = 0;
+ char *p_token = NULL;
+ char *p_delimiter = " \t";
+ int ret;
+
+ if (len >= sizeof(buf)) {
+ pr_err("input handling fail!\n");
+ len = sizeof(buf) - 1;
+ return -1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ pr_info("write parameter data = %s\n\r", buf);
+
+ p_buf = buf;
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg0 = 0;
+ else
+ ret = kstrtol(p_token, 16, &arg0);
+
+ switch (arg0) {
+ case 1:
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg1 = 0;
+ else
+ ret = kstrtol(p_token, 16, &arg1);
+ break;
+ case 2:
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg1 = 0;
+ else
+ ret = kstrtol(p_token, 16, &arg1);
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg2 = 0;
+ else
+ ret = kstrtol(p_token, 16, &arg2);
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg3 = 0;
+ else
+ ret = kstrtol(p_token, 16, &arg3);
+ break;
+ }
+
+ if (iocoherent_set_func[arg0] &&
+ (ARRAY_SIZE(iocoherent_set_func) > arg0)) {
+ (*iocoherent_set_func[arg0]) (arg1, arg2, arg3);
+ } else {
+ pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+ (*iocoherent_set_func[0]) (0, 0, 0);
+ }
+
+ return len;
+}
+
+int ioc_pmu_read(struct seq_file *seq, void *v)
+{
+ void __iomem *reg_virt_0, *reg_virt_1, *reg_virt_2, *reg_virt_3;
+
+ reg_virt_0 = ioremap(MCSI_A_PMU_EVN_SEL0, 0x10);
+ reg_virt_1 = ioremap(MCSI_A_PMU_EVN_SEL1, 0x10);
+ reg_virt_2 = ioremap(MCSI_A_PMU_EVN_SEL2, 0x10);
+ reg_virt_3 = ioremap(MCSI_A_PMU_EVN_SEL3, 0x10);
+
+ seq_printf(seq, "MCSI_A_PMU_EVN_SEL0 = 0x%x\n",
+ sys_reg_read(reg_virt_0));
+ seq_printf(seq, "MCSI_A_PMU_EVN_CNT0 = 0x%x\n",
+ sys_reg_read(reg_virt_0 + 0x4));
+ seq_printf(seq, "MCSI_A_PMU_EVN_CTL0 = 0x%x\n",
+ sys_reg_read(reg_virt_0 + 0x8));
+ seq_printf(seq, "MCSI_A_PMU_EVN_SEL1 = 0x%x\n",
+ sys_reg_read(reg_virt_1));
+ seq_printf(seq, "MCSI_A_PMU_EVN_CNT1 = 0x%x\n",
+ sys_reg_read(reg_virt_1 + 0x4));
+ seq_printf(seq, "MCSI_A_PMU_EVN_CTL1 = 0x%x\n",
+ sys_reg_read(reg_virt_1 + 0x8));
+
+ seq_printf(seq, "MCSI_A_PMU_EVN_SEL2 = 0x%x\n",
+ sys_reg_read(reg_virt_2));
+ seq_printf(seq, "MCSI_A_PMU_EVN_CNT2 = 0x%x\n",
+ sys_reg_read(reg_virt_2 + 0x4));
+ seq_printf(seq, "MCSI_A_PMU_EVN_CTL2 = 0x%x\n",
+ sys_reg_read(reg_virt_2 + 0x8));
+
+ seq_printf(seq, "MCSI_A_PMU_EVN_SEL3 = 0x%x\n",
+ sys_reg_read(reg_virt_3));
+ seq_printf(seq, "MCSI_A_PMU_EVN_CNT3 = 0x%x\n",
+ sys_reg_read(reg_virt_3 + 0x4));
+ seq_printf(seq, "MCSI_A_PMU_EVN_CTL3 = 0x%x\n",
+ sys_reg_read(reg_virt_3 + 0x8));
+
+ iounmap(reg_virt_0);
+ iounmap(reg_virt_1);
+ iounmap(reg_virt_2);
+ iounmap(reg_virt_3);
+ return 0;
+}
+
+static int ioc_pmu_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ioc_pmu_read, NULL);
+}
+
+static const struct file_operations ioc_pmu_fops = {
+ .owner = THIS_MODULE,
+ .open = ioc_pmu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = ioc_pmu_write,
+ .release = single_release
+};
+
+void hwioc_debug_proc_init(struct proc_dir_entry *proc_reg_dir)
+{
+ proc_hw_io_coherent =
+ proc_create(PROCREG_HW_IO_COHERENT, 0, proc_reg_dir,
+ &ioc_pmu_fops);
+ if (!proc_hw_io_coherent)
+ pr_err("FAIL to create %s PROC!\n", PROCREG_HW_IO_COHERENT);
+}
+EXPORT_SYMBOL(hwioc_debug_proc_init);
+
+void hwioc_debug_proc_exit(struct proc_dir_entry *proc_reg_dir)
+{
+ if (proc_hw_io_coherent)
+ remove_proc_entry(PROCREG_HW_IO_COHERENT, proc_reg_dir);
+}
+EXPORT_SYMBOL(hwioc_debug_proc_exit);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwlro.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwlro.c
new file mode 100644
index 0000000..1ecad66
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_hwlro.c
@@ -0,0 +1,629 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "raether_hwlro.h"
+#include "ra_dbg_proc.h"
+
+/* HW LRO proc */
+#define HW_LRO_RING_NUM 3
+#define MAX_HW_LRO_AGGR 64
+
+typedef int (*HWLRO_DBG_FUNC) (int par1, int par2);
+unsigned int hw_lro_agg_num_cnt[HW_LRO_RING_NUM][MAX_HW_LRO_AGGR + 1];
+unsigned int hw_lro_agg_size_cnt[HW_LRO_RING_NUM][16];
+unsigned int hw_lro_tot_agg_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_tot_flush_cnt[HW_LRO_RING_NUM];
+
+/* HW LRO flush reason proc */
+#define HW_LRO_AGG_FLUSH (1)
+#define HW_LRO_AGE_FLUSH (2)
+#define HW_LRO_NOT_IN_SEQ_FLUSH (3)
+#define HW_LRO_TIMESTAMP_FLUSH (4)
+#define HW_LRO_NON_RULE_FLUSH (5)
+
+unsigned int hw_lro_agg_flush_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_age_flush_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_seq_flush_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_timestamp_flush_cnt[HW_LRO_RING_NUM];
+unsigned int hw_lro_norule_flush_cnt[HW_LRO_RING_NUM];
+
+static struct proc_dir_entry *proc_rx_ring1, *proc_rx_ring2, *proc_rx_ring3;
+static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
+
+int rx_lro_ring_read(struct seq_file *seq, void *v,
+ struct PDMA_rxdesc *rx_ring_p)
+{
+ struct PDMA_rxdesc *rx_ring;
+ int i = 0;
+
+ rx_ring =
+ kmalloc(sizeof(struct PDMA_rxdesc) * NUM_LRO_RX_DESC, GFP_KERNEL);
+ if (!rx_ring) {
+ seq_puts(seq, " allocate temp rx_ring fail.\n");
+ return 0;
+ }
+
+ for (i = 0; i < NUM_LRO_RX_DESC; i++)
+ memcpy(&rx_ring[i], &rx_ring_p[i], sizeof(struct PDMA_rxdesc));
+
+ for (i = 0; i < NUM_LRO_RX_DESC; i++) {
+ seq_printf(seq, "%d: %08x %08x %08x %08x\n", i,
+ *(int *)&rx_ring[i].rxd_info1,
+ *(int *)&rx_ring[i].rxd_info2,
+ *(int *)&rx_ring[i].rxd_info3,
+ *(int *)&rx_ring[i].rxd_info4);
+ }
+
+ kfree(rx_ring);
+ return 0;
+}
+
+int rx_ring1_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ rx_lro_ring_read(seq, v, ei_local->rx_ring[1]);
+
+ return 0;
+}
+
+int rx_ring2_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ rx_lro_ring_read(seq, v, ei_local->rx_ring[2]);
+
+ return 0;
+}
+
+int rx_ring3_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ rx_lro_ring_read(seq, v, ei_local->rx_ring[3]);
+
+ return 0;
+}
+
+static int rx_ring1_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rx_ring1_read, NULL);
+}
+
+static int rx_ring2_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rx_ring2_read, NULL);
+}
+
+static int rx_ring3_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rx_ring3_read, NULL);
+}
+
+static const struct file_operations rx_ring1_fops = {
+ .owner = THIS_MODULE,
+ .open = rx_ring1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static const struct file_operations rx_ring2_fops = {
+ .owner = THIS_MODULE,
+ .open = rx_ring2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static const struct file_operations rx_ring3_fops = {
+ .owner = THIS_MODULE,
+ .open = rx_ring3_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static int hw_lro_len_update(unsigned int agg_size)
+{
+ int len_idx;
+
+ if (agg_size > 65000)
+ len_idx = 13;
+ else if (agg_size > 60000)
+ len_idx = 12;
+ else if (agg_size > 55000)
+ len_idx = 11;
+ else if (agg_size > 50000)
+ len_idx = 10;
+ else if (agg_size > 45000)
+ len_idx = 9;
+ else if (agg_size > 40000)
+ len_idx = 8;
+ else if (agg_size > 35000)
+ len_idx = 7;
+ else if (agg_size > 30000)
+ len_idx = 6;
+ else if (agg_size > 25000)
+ len_idx = 5;
+ else if (agg_size > 20000)
+ len_idx = 4;
+ else if (agg_size > 15000)
+ len_idx = 3;
+ else if (agg_size > 10000)
+ len_idx = 2;
+ else if (agg_size > 5000)
+ len_idx = 1;
+ else
+ len_idx = 0;
+
+ return len_idx;
+}
+
+void hw_lro_stats_update(unsigned int ring_num, struct PDMA_rxdesc *rx_ring)
+{
+ unsigned int agg_cnt = rx_ring->rxd_info2.LRO_AGG_CNT;
+ unsigned int agg_size = (rx_ring->rxd_info2.PLEN1 << 14) |
+ rx_ring->rxd_info2.PLEN0;
+
+ if ((ring_num > 0) && (ring_num < 4)) {
+ hw_lro_agg_size_cnt[ring_num - 1]
+ [hw_lro_len_update(agg_size)]++;
+ hw_lro_agg_num_cnt[ring_num - 1][agg_cnt]++;
+ hw_lro_tot_flush_cnt[ring_num - 1]++;
+ hw_lro_tot_agg_cnt[ring_num - 1] += agg_cnt;
+ }
+}
+
+void hw_lro_flush_stats_update(unsigned int ring_num,
+ struct PDMA_rxdesc *rx_ring)
+{
+ unsigned int flush_reason = rx_ring->rxd_info2.REV;
+
+ if ((ring_num > 0) && (ring_num < 4)) {
+ if ((flush_reason & 0x7) == HW_LRO_AGG_FLUSH)
+ hw_lro_agg_flush_cnt[ring_num - 1]++;
+ else if ((flush_reason & 0x7) == HW_LRO_AGE_FLUSH)
+ hw_lro_age_flush_cnt[ring_num - 1]++;
+ else if ((flush_reason & 0x7) == HW_LRO_NOT_IN_SEQ_FLUSH)
+ hw_lro_seq_flush_cnt[ring_num - 1]++;
+ else if ((flush_reason & 0x7) == HW_LRO_TIMESTAMP_FLUSH)
+ hw_lro_timestamp_flush_cnt[ring_num - 1]++;
+ else if ((flush_reason & 0x7) == HW_LRO_NON_RULE_FLUSH)
+ hw_lro_norule_flush_cnt[ring_num - 1]++;
+ }
+}
+EXPORT_SYMBOL(hw_lro_flush_stats_update);
+
+ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
+ memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
+ memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
+ memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
+ memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
+ memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
+ memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
+ memset(hw_lro_timestamp_flush_cnt, 0,
+ sizeof(hw_lro_timestamp_flush_cnt));
+ memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
+
+ pr_info("clear hw lro cnt table\n");
+
+ return count;
+}
+
+int hw_lro_stats_read(struct seq_file *seq, void *v)
+{
+ int i;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ seq_puts(seq, "HW LRO statistic dump:\n");
+
+ /* Agg number count */
+ seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
+ for (i = 0; i <= MAX_HW_LRO_AGGR; i++) {
+ seq_printf(seq, " %d : %d %d %d %d\n",
+ i, hw_lro_agg_num_cnt[0][i],
+ hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
+ hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] +
+ hw_lro_agg_num_cnt[2][i]);
+ }
+
+ /* Total agg count */
+ seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
+ seq_printf(seq, " %d %d %d %d\n",
+ hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1],
+ hw_lro_tot_agg_cnt[2],
+ hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] +
+ hw_lro_tot_agg_cnt[2]);
+
+ /* Total flush count */
+ seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
+ seq_printf(seq, " %d %d %d %d\n",
+ hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1],
+ hw_lro_tot_flush_cnt[2],
+ hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
+ hw_lro_tot_flush_cnt[2]);
+
+ /* Avg agg count */
+ seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
+ seq_printf(seq, " %d %d %d %d\n",
+ (hw_lro_tot_flush_cnt[0]) ? hw_lro_tot_agg_cnt[0] /
+ hw_lro_tot_flush_cnt[0] : 0,
+ (hw_lro_tot_flush_cnt[1]) ? hw_lro_tot_agg_cnt[1] /
+ hw_lro_tot_flush_cnt[1] : 0,
+ (hw_lro_tot_flush_cnt[2]) ? hw_lro_tot_agg_cnt[2] /
+ hw_lro_tot_flush_cnt[2] : 0,
+ (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] +
+ hw_lro_tot_flush_cnt[2]) ? ((hw_lro_tot_agg_cnt[0] +
+ hw_lro_tot_agg_cnt[1] +
+ hw_lro_tot_agg_cnt[2]) /
+ (hw_lro_tot_flush_cnt[0] +
+ hw_lro_tot_flush_cnt[1] +
+ hw_lro_tot_flush_cnt[2])) : 0);
+
+ /* Statistics of aggregation size counts */
+ seq_puts(seq, "HW LRO flush pkt len:\n");
+ seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n");
+ for (i = 0; i < 15; i++) {
+ seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000,
+ (i + 1) * 5000, hw_lro_agg_size_cnt[0][i],
+ hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
+ hw_lro_agg_size_cnt[0][i] +
+ hw_lro_agg_size_cnt[1][i] +
+ hw_lro_agg_size_cnt[2][i]);
+ }
+
+ /* CONFIG_RAETH_HW_LRO_REASON_DBG */
+ if (ei_local->features & FE_HW_LRO_DBG) {
+ seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
+ seq_printf(seq, "AGG timeout: %d %d %d %d\n",
+ hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1],
+ hw_lro_agg_flush_cnt[2],
+ (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] +
+ hw_lro_agg_flush_cnt[2])
+ );
+ seq_printf(seq, "AGE timeout: %d %d %d %d\n",
+ hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1],
+ hw_lro_age_flush_cnt[2],
+ (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] +
+ hw_lro_age_flush_cnt[2])
+ );
+ seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
+ hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1],
+ hw_lro_seq_flush_cnt[2],
+ (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] +
+ hw_lro_seq_flush_cnt[2])
+ );
+ seq_printf(seq, "Timestamp: %d %d %d %d\n",
+ hw_lro_timestamp_flush_cnt[0],
+ hw_lro_timestamp_flush_cnt[1],
+ hw_lro_timestamp_flush_cnt[2],
+ (hw_lro_timestamp_flush_cnt[0] +
+ hw_lro_timestamp_flush_cnt[1] +
+ hw_lro_timestamp_flush_cnt[2])
+ );
+ seq_printf(seq, "No LRO rule: %d %d %d %d\n",
+ hw_lro_norule_flush_cnt[0],
+ hw_lro_norule_flush_cnt[1],
+ hw_lro_norule_flush_cnt[2],
+ (hw_lro_norule_flush_cnt[0] +
+ hw_lro_norule_flush_cnt[1] +
+ hw_lro_norule_flush_cnt[2])
+ );
+ }
+
+ return 0;
+}
+
+static int hw_lro_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hw_lro_stats_read, NULL);
+}
+
+static const struct file_operations hw_lro_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = hw_lro_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hw_lro_stats_write,
+ .release = single_release
+};
+
+int hwlro_agg_cnt_ctrl(int par1, int par2)
+{
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, par2);
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, par2);
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, par2);
+ return 0;
+}
+
+int hwlro_agg_time_ctrl(int par1, int par2)
+{
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, par2);
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, par2);
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, par2);
+ return 0;
+}
+
+int hwlro_age_time_ctrl(int par1, int par2)
+{
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, par2);
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, par2);
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, par2);
+ return 0;
+}
+
+int hwlro_threshold_ctrl(int par1, int par2)
+{
+ /* bandwidth threshold setting */
+ SET_PDMA_LRO_BW_THRESHOLD(par2);
+ return 0;
+}
+
+int hwlro_ring_enable_ctrl(int par1, int par2)
+{
+ if (!par2) {
+ pr_info("[hwlro_ring_enable_ctrl]Disable HW LRO rings\n");
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 0);
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
+ } else {
+ pr_info("[hwlro_ring_enable_ctrl]Enable HW LRO rings\n");
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
+ }
+
+ return 0;
+}
+
+static const HWLRO_DBG_FUNC hw_lro_dbg_func[] = {
+ [0] = hwlro_agg_cnt_ctrl,
+ [1] = hwlro_agg_time_ctrl,
+ [2] = hwlro_age_time_ctrl,
+ [3] = hwlro_threshold_ctrl,
+ [4] = hwlro_ring_enable_ctrl,
+};
+
+ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[32];
+ char *p_buf;
+ int len = count;
+ long x = 0, y = 0;
+ char *p_token = NULL;
+ char *p_delimiter = " \t";
+ int ret;
+
+ pr_info("[hw_lro_auto_tlb_write]write parameter len = %d\n\r",
+ (int)len);
+ if (len >= sizeof(buf)) {
+ pr_info("input handling fail!\n");
+ len = sizeof(buf) - 1;
+ return -1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ pr_info("[hw_lro_auto_tlb_write]write parameter data = %s\n\r", buf);
+
+ p_buf = buf;
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ x = 0;
+ else
+ ret = kstrtol(p_token, 10, &x);
+
+ p_token = strsep(&p_buf, "\t\n ");
+ if (p_token) {
+ ret = kstrtol(p_token, 10, &y);
+ pr_info("y = %ld\n\r", y);
+ }
+
+ if (hw_lro_dbg_func[x] &&
+ (ARRAY_SIZE(hw_lro_dbg_func) > x)) {
+ (*hw_lro_dbg_func[x]) (x, y);
+ }
+
+ return count;
+}
+
+void hw_lro_auto_tlb_dump(struct seq_file *seq, unsigned int index)
+{
+ int i;
+ struct PDMA_LRO_AUTO_TLB_INFO pdma_lro_auto_tlb;
+ unsigned int tlb_info[9];
+ unsigned int dw_len, cnt, priority;
+ unsigned int entry;
+
+ if (index > 4)
+ index = index - 1;
+ entry = (index * 9) + 1;
+
+ /* read valid entries of the auto-learn table */
+ sys_reg_write(PDMA_FE_ALT_CF8, entry);
+
+ /* seq_printf(seq, "\nEntry = %d\n", entry); */
+ for (i = 0; i < 9; i++) {
+ tlb_info[i] = sys_reg_read(PDMA_FE_ALT_SEQ_CFC);
+ /* seq_printf(seq, "tlb_info[%d] = 0x%x\n", i, tlb_info[i]); */
+ }
+ memcpy(&pdma_lro_auto_tlb, tlb_info,
+ sizeof(struct PDMA_LRO_AUTO_TLB_INFO));
+
+ dw_len = pdma_lro_auto_tlb.auto_tlb_info7.DW_LEN;
+ cnt = pdma_lro_auto_tlb.auto_tlb_info6.CNT;
+
+ if (sys_reg_read(ADMA_LRO_CTRL_DW0) & PDMA_LRO_ALT_SCORE_MODE)
+ priority = cnt; /* packet count */
+ else
+ priority = dw_len; /* byte count */
+
+ /* dump valid entries of the auto-learn table */
+ if (index >= 4)
+ seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
+ else
+ seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
+ if (pdma_lro_auto_tlb.auto_tlb_info8.IPV4) {
+ seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv4)\n",
+ pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
+ pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
+ pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
+ pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
+ } else {
+ seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
+ pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
+ pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
+ pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
+ pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
+ }
+ seq_printf(seq, "DIP_ID = %d\n",
+ pdma_lro_auto_tlb.auto_tlb_info8.DIP_ID);
+ seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
+ pdma_lro_auto_tlb.auto_tlb_info0.STP,
+ pdma_lro_auto_tlb.auto_tlb_info0.DTP);
+ seq_printf(seq, "VLAN_VID_VLD = %d\n",
+ pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID_VLD);
+ seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n",
+ (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 & 0xfff),
+ ((pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 >> 12) & 0xfff),
+ ((pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID1 << 8) |
+ ((pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 >> 24)
+ & 0xfff)),
+ ((pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID1 >> 4) & 0xfff));
+ seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
+ seq_printf(seq, "PRIORITY = %d\n", priority);
+}
+
+int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
+{
+ int i;
+ unsigned int reg_val;
+ unsigned int reg_op1, reg_op2, reg_op3, reg_op4;
+ unsigned int agg_cnt, agg_time, age_time;
+
+ seq_puts(seq, "Usage of /proc/mt76xx/hw_lro_auto_tlb:\n");
+ seq_puts(seq, "echo [function] [setting] > /proc/mt76xx/hw_lro_auto_tlb\n");
+ seq_puts(seq, "Functions:\n");
+ seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n");
+ seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n");
+ seq_puts(seq, "[2] = hwlro_age_time_ctrl\n");
+ seq_puts(seq, "[3] = hwlro_threshold_ctrl\n");
+ seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n\n");
+
+ /* Read valid entries of the auto-learn table */
+ sys_reg_write(PDMA_FE_ALT_CF8, 0);
+ reg_val = sys_reg_read(PDMA_FE_ALT_SEQ_CFC);
+
+ seq_printf(seq,
+ "HW LRO Auto-learn Table: (PDMA_LRO_ALT_CFC_RSEQ_DBG=0x%x)\n",
+ reg_val);
+
+ for (i = 7; i >= 0; i--) {
+ if (reg_val & (1 << i))
+ hw_lro_auto_tlb_dump(seq, i);
+ }
+
+ /* Read the agg_time/age_time/agg_cnt of LRO rings */
+ seq_puts(seq, "\nHW LRO Ring Settings\n");
+ for (i = 1; i <= 3; i++) {
+ reg_op1 = sys_reg_read(LRO_RX_RING0_CTRL_DW1 + (i * 0x40));
+ reg_op2 = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + (i * 0x40));
+ reg_op3 = sys_reg_read(LRO_RX_RING0_CTRL_DW3 + (i * 0x40));
+ reg_op4 = sys_reg_read(ADMA_LRO_CTRL_DW2);
+ agg_cnt =
+ ((reg_op3 & 0x03) << PDMA_LRO_AGG_CNT_H_OFFSET) |
+ ((reg_op2 >> PDMA_LRO_RING_AGG_CNT1_OFFSET) & 0x3f);
+ agg_time = (reg_op2 >> PDMA_LRO_RING_AGG_OFFSET) & 0xffff;
+ age_time =
+ ((reg_op2 & 0x03f) << PDMA_LRO_AGE_H_OFFSET) |
+ ((reg_op1 >> PDMA_LRO_RING_AGE1_OFFSET) & 0x3ff);
+ seq_printf(seq,
+ "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
+ i, agg_cnt, agg_time, age_time, reg_op4);
+ }
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+
+static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hw_lro_auto_tlb_read, NULL);
+}
+
+static const struct file_operations hw_lro_auto_tlb_fops = {
+ .owner = THIS_MODULE,
+ .open = hw_lro_auto_tlb_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hw_lro_auto_tlb_write,
+ .release = single_release
+};
+
+int hwlro_debug_proc_init(struct proc_dir_entry *proc_reg_dir)
+{
+ proc_rx_ring1 =
+ proc_create(PROCREG_RXRING1, 0, proc_reg_dir, &rx_ring1_fops);
+ if (!proc_rx_ring1)
+ pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING1);
+
+ proc_rx_ring2 =
+ proc_create(PROCREG_RXRING2, 0, proc_reg_dir, &rx_ring2_fops);
+ if (!proc_rx_ring2)
+ pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING2);
+
+ proc_rx_ring3 =
+ proc_create(PROCREG_RXRING3, 0, proc_reg_dir, &rx_ring3_fops);
+ if (!proc_rx_ring3)
+ pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING3);
+
+ proc_hw_lro_stats =
+ proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir,
+ &hw_lro_stats_fops);
+ if (!proc_hw_lro_stats)
+ pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
+
+ proc_hw_lro_auto_tlb =
+ proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir,
+ &hw_lro_auto_tlb_fops);
+ if (!proc_hw_lro_auto_tlb)
+ pr_info("!! FAIL to create %s PROC !!\n",
+ PROCREG_HW_LRO_AUTO_TLB);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwlro_debug_proc_init);
+
+void hwlro_debug_proc_exit(struct proc_dir_entry *proc_reg_dir)
+{
+ if (proc_rx_ring1)
+ remove_proc_entry(PROCREG_RXRING1, proc_reg_dir);
+ if (proc_rx_ring2)
+ remove_proc_entry(PROCREG_RXRING2, proc_reg_dir);
+ if (proc_rx_ring3)
+ remove_proc_entry(PROCREG_RXRING3, proc_reg_dir);
+ if (proc_hw_lro_stats)
+ remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir);
+ if (proc_hw_lro_auto_tlb)
+ remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
+}
+EXPORT_SYMBOL(hwlro_debug_proc_exit);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.c
new file mode 100644
index 0000000..468dc84
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.c
@@ -0,0 +1,1672 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_dbg_proc.h"
+#include "ra_ethtool.h"
+
+int txd_cnt[MAX_SKB_FRAGS / 2 + 1];
+int tso_cnt[16];
+
+#define MAX_AGGR 64
+#define MAX_DESC 8
+int lro_stats_cnt[MAX_AGGR + 1];
+int lro_flush_cnt[MAX_AGGR + 1];
+int lro_len_cnt1[16];
+/* int lro_len_cnt2[16]; */
+int aggregated[MAX_DESC];
+int lro_aggregated;
+int lro_flushed;
+int lro_nodesc;
+int force_flush;
+int tot_called1;
+int tot_called2;
+
+struct raeth_int_t raeth_int;
+struct proc_dir_entry *proc_reg_dir;
+static struct proc_dir_entry *proc_gmac, *proc_sys_cp0, *proc_tx_ring,
+*proc_rx_ring, *proc_skb_free;
+static struct proc_dir_entry *proc_gmac2;
+static struct proc_dir_entry *proc_ra_snmp;
+static struct proc_dir_entry *proc_num_of_txd, *proc_tso_len;
+static struct proc_dir_entry *proc_sche;
+static struct proc_dir_entry *proc_int_dbg;
+static struct proc_dir_entry *proc_set_lan_ip;
+/*extern unsigned int M2Q_table[64];
+ * extern struct QDMA_txdesc *free_head;
+ * extern struct SFQ_table *sfq0;
+ * extern struct SFQ_table *sfq1;
+ * extern struct SFQ_table *sfq2;
+ * extern struct SFQ_table *sfq3;
+ */
+
+static int ra_snmp_seq_show(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & USER_SNMPD) {
+ seq_printf(seq, "rx counters: %x %x %x %x %x %x %x\n",
+ sys_reg_read(GDMA_RX_GBCNT0),
+ sys_reg_read(GDMA_RX_GPCNT0),
+ sys_reg_read(GDMA_RX_OERCNT0),
+ sys_reg_read(GDMA_RX_FERCNT0),
+ sys_reg_read(GDMA_RX_SERCNT0),
+ sys_reg_read(GDMA_RX_LERCNT0),
+ sys_reg_read(GDMA_RX_CERCNT0));
+ seq_printf(seq, "fc config: %x %x %p %x\n",
+ sys_reg_read(CDMA_FC_CFG),
+ sys_reg_read(GDMA1_FC_CFG),
+ PDMA_FC_CFG, sys_reg_read(PDMA_FC_CFG));
+ seq_printf(seq, "ports: %x %x %x %x %x %x\n",
+ sys_reg_read(PORT0_PKCOUNT),
+ sys_reg_read(PORT1_PKCOUNT),
+ sys_reg_read(PORT2_PKCOUNT),
+ sys_reg_read(PORT3_PKCOUNT),
+ sys_reg_read(PORT4_PKCOUNT),
+ sys_reg_read(PORT5_PKCOUNT));
+ }
+
+ return 0;
+}
+
+static int ra_snmp_seq_open(struct inode *inode, struct file *file)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & USER_SNMPD)
+ return single_open(file, ra_snmp_seq_show, NULL);
+ else
+ return 0;
+}
+
+static const struct file_operations ra_snmp_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = ra_snmp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+/*Routine Name : get_idx(mode, index)
+ * Description: calculate ring usage for tx/rx rings
+ * Mode 1 : Tx Ring
+ * Mode 2 : Rx Ring
+ */
+int get_ring_usage(int mode, int i)
+{
+ unsigned long tx_ctx_idx, tx_dtx_idx, tx_usage;
+ unsigned long rx_calc_idx, rx_drx_idx, rx_usage;
+
+ struct PDMA_rxdesc *rxring;
+ struct PDMA_txdesc *txring;
+
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (mode == 2) {
+ /* cpu point to the next descriptor of rx dma ring */
+ rx_calc_idx = *(unsigned long *)RX_CALC_IDX0;
+ rx_drx_idx = *(unsigned long *)RX_DRX_IDX0;
+ rxring = (struct PDMA_rxdesc *)RX_BASE_PTR0;
+
+ rx_usage =
+ (rx_drx_idx - rx_calc_idx - 1 + num_rx_desc) % num_rx_desc;
+ if (rx_calc_idx == rx_drx_idx) {
+ if (rxring[rx_drx_idx].rxd_info2.DDONE_bit == 1)
+ tx_usage = num_rx_desc;
+ else
+ tx_usage = 0;
+ }
+ return rx_usage;
+ }
+
+ switch (i) {
+ case 0:
+ tx_ctx_idx = *(unsigned long *)TX_CTX_IDX0;
+ tx_dtx_idx = *(unsigned long *)TX_DTX_IDX0;
+ txring = ei_local->tx_ring0;
+ break;
+ default:
+ pr_debug("get_tx_idx failed %d %d\n", mode, i);
+ return 0;
+ };
+
+ tx_usage = (tx_ctx_idx - tx_dtx_idx + num_tx_desc) % num_tx_desc;
+ if (tx_ctx_idx == tx_dtx_idx) {
+ if (txring[tx_ctx_idx].txd_info2.DDONE_bit == 1)
+ tx_usage = 0;
+ else
+ tx_usage = num_tx_desc;
+ }
+ return tx_usage;
+}
+
+void dump_reg(struct seq_file *s)
+{
+ int fe_int_enable;
+ int rx_usage;
+ int dly_int_cfg;
+ int rx_base_ptr0;
+ int rx_max_cnt0;
+ int rx_calc_idx0;
+ int rx_drx_idx0;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ int tx_usage = 0;
+ int tx_base_ptr[4];
+ int tx_max_cnt[4];
+ int tx_ctx_idx[4];
+ int tx_dtx_idx[4];
+ int i;
+
+ fe_int_enable = sys_reg_read(FE_INT_ENABLE);
+ rx_usage = get_ring_usage(2, 0);
+
+ dly_int_cfg = sys_reg_read(DLY_INT_CFG);
+
+ if (!(ei_local->features & FE_QDMA)) {
+ tx_usage = get_ring_usage(1, 0);
+
+ tx_base_ptr[0] = sys_reg_read(TX_BASE_PTR0);
+ tx_max_cnt[0] = sys_reg_read(TX_MAX_CNT0);
+ tx_ctx_idx[0] = sys_reg_read(TX_CTX_IDX0);
+ tx_dtx_idx[0] = sys_reg_read(TX_DTX_IDX0);
+
+ tx_base_ptr[1] = sys_reg_read(TX_BASE_PTR1);
+ tx_max_cnt[1] = sys_reg_read(TX_MAX_CNT1);
+ tx_ctx_idx[1] = sys_reg_read(TX_CTX_IDX1);
+ tx_dtx_idx[1] = sys_reg_read(TX_DTX_IDX1);
+
+ tx_base_ptr[2] = sys_reg_read(TX_BASE_PTR2);
+ tx_max_cnt[2] = sys_reg_read(TX_MAX_CNT2);
+ tx_ctx_idx[2] = sys_reg_read(TX_CTX_IDX2);
+ tx_dtx_idx[2] = sys_reg_read(TX_DTX_IDX2);
+
+ tx_base_ptr[3] = sys_reg_read(TX_BASE_PTR3);
+ tx_max_cnt[3] = sys_reg_read(TX_MAX_CNT3);
+ tx_ctx_idx[3] = sys_reg_read(TX_CTX_IDX3);
+ tx_dtx_idx[3] = sys_reg_read(TX_DTX_IDX3);
+ }
+
+ rx_base_ptr0 = sys_reg_read(RX_BASE_PTR0);
+ rx_max_cnt0 = sys_reg_read(RX_MAX_CNT0);
+ rx_calc_idx0 = sys_reg_read(RX_CALC_IDX0);
+ rx_drx_idx0 = sys_reg_read(RX_DRX_IDX0);
+
+ seq_printf(s, "\n\nFE_INT_ENABLE : 0x%08x\n", fe_int_enable);
+
+ if (!(ei_local->features & FE_QDMA))
+ seq_printf(s, "TxRing PktCnt: %d/%d\n", tx_usage, num_tx_desc);
+
+ seq_printf(s, "RxRing PktCnt: %d/%d\n\n", rx_usage, num_rx_desc);
+ seq_printf(s, "DLY_INT_CFG : 0x%08x\n", dly_int_cfg);
+
+ if (!(ei_local->features & FE_QDMA)) {
+ for (i = 0; i < 4; i++) {
+ seq_printf(s, "TX_BASE_PTR%d : 0x%08x\n", i,
+ tx_base_ptr[i]);
+ seq_printf(s, "TX_MAX_CNT%d : 0x%08x\n", i,
+ tx_max_cnt[i]);
+ seq_printf(s, "TX_CTX_IDX%d : 0x%08x\n", i,
+ tx_ctx_idx[i]);
+ seq_printf(s, "TX_DTX_IDX%d : 0x%08x\n", i,
+ tx_dtx_idx[i]);
+ }
+ }
+
+ seq_printf(s, "RX_BASE_PTR0 : 0x%08x\n", rx_base_ptr0);
+ seq_printf(s, "RX_MAX_CNT0 : 0x%08x\n", rx_max_cnt0);
+ seq_printf(s, "RX_CALC_IDX0 : 0x%08x\n", rx_calc_idx0);
+ seq_printf(s, "RX_DRX_IDX0 : 0x%08x\n", rx_drx_idx0);
+
+ if (ei_local->features & FE_ETHTOOL)
+ seq_printf(s,
+ "The current PHY address selected by ethtool is %d\n",
+ get_current_phy_address());
+}
+
+int reg_read_main(struct seq_file *seq, void *v)
+{
+ dump_reg(seq);
+ return 0;
+}
+
+static void *seq_skb_free_start(struct seq_file *seq, loff_t *pos)
+{
+ if (*pos < num_tx_desc)
+ return pos;
+ return NULL;
+}
+
+static void *seq_skb_free_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ (*pos)++;
+ if (*pos >= num_tx_desc)
+ return NULL;
+ return pos;
+}
+
+static void seq_skb_free_stop(struct seq_file *seq, void *v)
+{
+ /* Nothing to do */
+}
+
+static int seq_skb_free_show(struct seq_file *seq, void *v)
+{
+ int i = *(loff_t *)v;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ seq_printf(seq, "%d: %08x\n", i, *(int *)&ei_local->skb_free[i]);
+
+ return 0;
+}
+
+static const struct seq_operations seq_skb_free_ops = {
+ .start = seq_skb_free_start,
+ .next = seq_skb_free_next,
+ .stop = seq_skb_free_stop,
+ .show = seq_skb_free_show
+};
+
+static int skb_free_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &seq_skb_free_ops);
+}
+
+static const struct file_operations skb_free_fops = {
+ .owner = THIS_MODULE,
+ .open = skb_free_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+int qdma_read_64queue(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_QDMA) {
+ unsigned int temp, i;
+ unsigned int sw_fq, hw_fq;
+ unsigned int min_en, min_rate, max_en, max_rate, sch, weight;
+ unsigned int queue, tx_des_cnt, hw_resv, sw_resv, queue_head,
+ queue_tail, queue_no;
+ struct net_device *dev = dev_raether;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ seq_puts(seq, "==== General Information ====\n");
+ temp = sys_reg_read(QDMA_FQ_CNT);
+ sw_fq = (temp & 0xFFFF0000) >> 16;
+ hw_fq = (temp & 0x0000FFFF);
+ seq_printf(seq, "SW TXD: %d/%d; HW TXD: %d/%d\n", sw_fq,
+ num_tx_desc, hw_fq, NUM_QDMA_PAGE);
+ seq_printf(seq, "SW TXD virtual start address: 0x%p\n",
+ ei_local->txd_pool);
+ seq_printf(seq, "HW TXD virtual start address: 0x%p\n\n",
+ free_head);
+
+ seq_puts(seq, "==== Scheduler Information ====\n");
+ temp = sys_reg_read(QDMA_TX_SCH);
+ max_en = (temp & 0x00000800) >> 11;
+ max_rate = (temp & 0x000007F0) >> 4;
+ for (i = 0; i < (temp & 0x0000000F); i++)
+ max_rate *= 10;
+ seq_printf(seq, "SCH1 rate control:%d. Rate is %dKbps.\n",
+ max_en, max_rate);
+ max_en = (temp & 0x08000000) >> 27;
+ max_rate = (temp & 0x07F00000) >> 20;
+ for (i = 0; i < (temp & 0x000F0000); i++)
+ max_rate *= 10;
+ seq_printf(seq, "SCH2 rate control:%d. Rate is %dKbps.\n\n",
+ max_en, max_rate);
+
+ seq_puts(seq, "==== Physical Queue Information ====\n");
+ sys_reg_write(QDMA_PAGE, 0);
+ for (queue = 0; queue < 64; queue++) {
+ if (queue < 16) {
+ sys_reg_write(QDMA_PAGE, 0);
+ queue_no = queue;
+ } else if (queue > 15 && queue <= 31) {
+ sys_reg_write(QDMA_PAGE, 1);
+ queue_no = queue % 16;
+ } else if (queue > 31 && queue <= 47) {
+ sys_reg_write(QDMA_PAGE, 2);
+ queue_no = queue % 32;
+ } else if (queue > 47 && queue <= 63) {
+ sys_reg_write(QDMA_PAGE, 3);
+ queue_no = queue % 48;
+ }
+
+ temp = sys_reg_read(QTX_CFG_0 + 0x10 * queue_no);
+ tx_des_cnt = (temp & 0xffff0000) >> 16;
+ hw_resv = (temp & 0xff00) >> 8;
+ sw_resv = (temp & 0xff);
+ temp = sys_reg_read(QTX_CFG_0 + (0x10 * queue_no) + 0x4);
+ sch = (temp >> 31) + 1;
+ min_en = (temp & 0x8000000) >> 27;
+ min_rate = (temp & 0x7f00000) >> 20;
+ for (i = 0; i < (temp & 0xf0000) >> 16; i++)
+ min_rate *= 10;
+ max_en = (temp & 0x800) >> 11;
+ max_rate = (temp & 0x7f0) >> 4;
+ for (i = 0; i < (temp & 0xf); i++)
+ max_rate *= 10;
+ weight = (temp & 0xf000) >> 12;
+ queue_head = sys_reg_read(QTX_HEAD_0 + 0x10 * queue_no);
+ queue_tail = sys_reg_read(QTX_TAIL_0 + 0x10 * queue_no);
+
+ seq_printf(seq, "Queue#%d Information:\n", queue);
+ seq_printf(seq,
+ "%d packets in the queue; head address is 0x%08x, tail address is 0x%08x.\n",
+ tx_des_cnt, queue_head, queue_tail);
+ seq_printf(seq,
+ "HW_RESV: %d; SW_RESV: %d; SCH: %d; Weighting: %d\n",
+ hw_resv, sw_resv, sch, weight);
+ seq_printf(seq,
+ "Min_Rate_En is %d, Min_Rate is %dKbps; Max_Rate_En is %d, Max_Rate is %dKbps.\n\n",
+ min_en, min_rate, max_en, max_rate);
+ }
+ if (ei_local->features & FE_HW_SFQ) {
+ seq_puts(seq, "==== Virtual Queue Information ====\n");
+ seq_printf(seq,
+ "VQTX_TB_BASE_0:0x%p;VQTX_TB_BASE_1:0x%p;VQTX_TB_BASE_2:0x%p;VQTX_TB_BASE_3:0x%p\n",
+ sfq0, sfq1, sfq2, sfq3);
+ temp = sys_reg_read(VQTX_NUM);
+ seq_printf(seq,
+ "VQTX_NUM_0:0x%01x;VQTX_NUM_1:0x%01x;VQTX_NUM_2:0x%01x;VQTX_NUM_3:0x%01x\n\n",
+ temp & 0xF, (temp & 0xF0) >> 4,
+ (temp & 0xF00) >> 8, (temp & 0xF000) >> 12);
+ }
+
+ seq_puts(seq, "==== Flow Control Information ====\n");
+ temp = sys_reg_read(QDMA_FC_THRES);
+ seq_printf(seq,
+ "SW_DROP_EN:%x; SW_DROP_FFA:%d; SW_DROP_MODE:%d\n",
+ (temp & 0x1000000) >> 24, (temp & 0x2000000) >> 25,
+ (temp & 0x30000000) >> 28);
+ seq_printf(seq,
+ "WH_DROP_EN:%x; HW_DROP_FFA:%d; HW_DROP_MODE:%d\n",
+ (temp & 0x10000) >> 16, (temp & 0x20000) >> 17,
+ (temp & 0x300000) >> 20);
+ seq_printf(seq, "SW_DROP_FSTVQ_MODE:%d;SW_DROP_FSTVQ:%d\n",
+ (temp & 0xC0000000) >> 30,
+ (temp & 0x08000000) >> 27);
+ seq_printf(seq, "HW_DROP_FSTVQ_MODE:%d;HW_DROP_FSTVQ:%d\n",
+ (temp & 0xC00000) >> 22, (temp & 0x080000) >> 19);
+
+ seq_puts(seq, "\n==== FSM Information\n");
+ temp = sys_reg_read(QDMA_DMA);
+ seq_printf(seq, "VQTB_FSM:0x%01x\n", (temp & 0x0F000000) >> 24);
+ seq_printf(seq, "FQ_FSM:0x%01x\n", (temp & 0x000F0000) >> 16);
+ seq_printf(seq, "TX_FSM:0x%01x\n", (temp & 0x00000F00) >> 8);
+ seq_printf(seq, "RX_FSM:0x%01x\n\n", (temp & 0x0000000f));
+
+ seq_puts(seq, "==== M2Q Information ====\n");
+ for (i = 0; i < 64; i += 8) {
+ seq_printf(seq,
+ " (%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)\n",
+ i, M2Q_table[i], i + 1, M2Q_table[i + 1],
+ i + 2, M2Q_table[i + 2], i + 3,
+ M2Q_table[i + 3], i + 4, M2Q_table[i + 4],
+ i + 5, M2Q_table[i + 5], i + 6,
+ M2Q_table[i + 6], i + 7, M2Q_table[i + 7]);
+ }
+
+ return 0;
+ } else {
+ return 0;
+ }
+}
+
+int qdma_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_QDMA) {
+ unsigned int temp, i;
+ unsigned int sw_fq, hw_fq;
+ unsigned int min_en, min_rate, max_en, max_rate, sch, weight;
+ unsigned int queue, tx_des_cnt, hw_resv, sw_resv, queue_head,
+ queue_tail;
+ struct net_device *dev = dev_raether;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ seq_puts(seq, "==== General Information ====\n");
+ temp = sys_reg_read(QDMA_FQ_CNT);
+ sw_fq = (temp & 0xFFFF0000) >> 16;
+ hw_fq = (temp & 0x0000FFFF);
+ seq_printf(seq, "SW TXD: %d/%d; HW TXD: %d/%d\n", sw_fq,
+ num_tx_desc, hw_fq, NUM_QDMA_PAGE);
+ seq_printf(seq, "SW TXD virtual start address: 0x%p\n",
+ ei_local->txd_pool);
+ seq_printf(seq, "HW TXD virtual start address: 0x%p\n\n",
+ free_head);
+
+ seq_puts(seq, "==== Scheduler Information ====\n");
+ temp = sys_reg_read(QDMA_TX_SCH);
+ max_en = (temp & 0x00000800) >> 11;
+ max_rate = (temp & 0x000007F0) >> 4;
+ for (i = 0; i < (temp & 0x0000000F); i++)
+ max_rate *= 10;
+ seq_printf(seq, "SCH1 rate control:%d. Rate is %dKbps.\n",
+ max_en, max_rate);
+ max_en = (temp & 0x08000000) >> 27;
+ max_rate = (temp & 0x07F00000) >> 20;
+ for (i = 0; i < (temp & 0x000F0000); i++)
+ max_rate *= 10;
+ seq_printf(seq, "SCH2 rate control:%d. Rate is %dKbps.\n\n",
+ max_en, max_rate);
+
+ seq_puts(seq, "==== Physical Queue Information ====\n");
+ for (queue = 0; queue < 16; queue++) {
+ temp = sys_reg_read(QTX_CFG_0 + 0x10 * queue);
+ tx_des_cnt = (temp & 0xffff0000) >> 16;
+ hw_resv = (temp & 0xff00) >> 8;
+ sw_resv = (temp & 0xff);
+ temp = sys_reg_read(QTX_CFG_0 + (0x10 * queue) + 0x4);
+ sch = (temp >> 31) + 1;
+ min_en = (temp & 0x8000000) >> 27;
+ min_rate = (temp & 0x7f00000) >> 20;
+ for (i = 0; i < (temp & 0xf0000) >> 16; i++)
+ min_rate *= 10;
+ max_en = (temp & 0x800) >> 11;
+ max_rate = (temp & 0x7f0) >> 4;
+ for (i = 0; i < (temp & 0xf); i++)
+ max_rate *= 10;
+ weight = (temp & 0xf000) >> 12;
+ queue_head = sys_reg_read(QTX_HEAD_0 + 0x10 * queue);
+ queue_tail = sys_reg_read(QTX_TAIL_0 + 0x10 * queue);
+
+ seq_printf(seq, "Queue#%d Information:\n", queue);
+ seq_printf(seq,
+ "%d packets in the queue; head address is 0x%08x, tail address is 0x%08x.\n",
+ tx_des_cnt, queue_head, queue_tail);
+ seq_printf(seq,
+ "HW_RESV: %d; SW_RESV: %d; SCH: %d; Weighting: %d\n",
+ hw_resv, sw_resv, sch, weight);
+ seq_printf(seq,
+ "Min_Rate_En is %d, Min_Rate is %dKbps; Max_Rate_En is %d, Max_Rate is %dKbps.\n\n",
+ min_en, min_rate, max_en, max_rate);
+ }
+ if (ei_local->features & FE_HW_SFQ) {
+ seq_puts(seq, "==== Virtual Queue Information ====\n");
+ seq_printf(seq,
+ "VQTX_TB_BASE_0:0x%p;VQTX_TB_BASE_1:0x%p;VQTX_TB_BASE_2:0x%p;VQTX_TB_BASE_3:0x%p\n",
+ sfq0, sfq1, sfq2, sfq3);
+ temp = sys_reg_read(VQTX_NUM);
+ seq_printf(seq,
+ "VQTX_NUM_0:0x%01x;VQTX_NUM_1:0x%01x;VQTX_NUM_2:0x%01x;VQTX_NUM_3:0x%01x\n\n",
+ temp & 0xF, (temp & 0xF0) >> 4,
+ (temp & 0xF00) >> 8, (temp & 0xF000) >> 12);
+ }
+
+ seq_puts(seq, "==== Flow Control Information ====\n");
+ temp = sys_reg_read(QDMA_FC_THRES);
+ seq_printf(seq,
+ "SW_DROP_EN:%x; SW_DROP_FFA:%d; SW_DROP_MODE:%d\n",
+ (temp & 0x1000000) >> 24, (temp & 0x2000000) >> 25,
+ (temp & 0x30000000) >> 28);
+ seq_printf(seq,
+ "WH_DROP_EN:%x; HW_DROP_FFA:%d; HW_DROP_MODE:%d\n",
+ (temp & 0x10000) >> 16, (temp & 0x20000) >> 17,
+ (temp & 0x300000) >> 20);
+ seq_printf(seq, "SW_DROP_FSTVQ_MODE:%d;SW_DROP_FSTVQ:%d\n",
+ (temp & 0xC0000000) >> 30,
+ (temp & 0x08000000) >> 27);
+ seq_printf(seq, "HW_DROP_FSTVQ_MODE:%d;HW_DROP_FSTVQ:%d\n",
+ (temp & 0xC00000) >> 22, (temp & 0x080000) >> 19);
+
+ seq_puts(seq, "\n==== FSM Information\n");
+ temp = sys_reg_read(QDMA_DMA);
+ seq_printf(seq, "VQTB_FSM:0x%01x\n", (temp & 0x0F000000) >> 24);
+ seq_printf(seq, "FQ_FSM:0x%01x\n", (temp & 0x000F0000) >> 16);
+ seq_printf(seq, "TX_FSM:0x%01x\n", (temp & 0x00000F00) >> 8);
+ seq_printf(seq, "RX_FSM:0x%01x\n\n", (temp & 0x0000000f));
+
+ seq_puts(seq, "==== M2Q Information ====\n");
+ for (i = 0; i < 64; i += 8) {
+ seq_printf(seq,
+ " (%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)\n",
+ i, M2Q_table[i], i + 1, M2Q_table[i + 1],
+ i + 2, M2Q_table[i + 2], i + 3,
+ M2Q_table[i + 3], i + 4, M2Q_table[i + 4],
+ i + 5, M2Q_table[i + 5], i + 6,
+ M2Q_table[i + 6], i + 7, M2Q_table[i + 7]);
+ }
+
+ return 0;
+ } else {
+ return 0;
+ }
+}
+
+static int qdma_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qdma_read, NULL);
+}
+
+static const struct file_operations qdma_fops = {
+ .owner = THIS_MODULE,
+ .open = qdma_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+int tx_ring_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ struct PDMA_txdesc *tx_ring;
+ int i = 0;
+
+ tx_ring = kmalloc_array(num_tx_desc, sizeof(*tx_ring), GFP_KERNEL);
+
+ if (!tx_ring)
+ /*seq_puts(seq, " allocate temp tx_ring fail.\n"); */
+ return 0;
+
+ for (i = 0; i < num_tx_desc; i++)
+ tx_ring[i] = ei_local->tx_ring0[i];
+
+ for (i = 0; i < num_tx_desc; i++) {
+ seq_printf(seq, "%d: %08x %08x %08x %08x\n", i,
+ *(int *)&tx_ring[i].txd_info1,
+ *(int *)&tx_ring[i].txd_info2,
+ *(int *)&tx_ring[i].txd_info3,
+ *(int *)&tx_ring[i].txd_info4);
+ }
+
+ kfree(tx_ring);
+ return 0;
+}
+
+static int tx_ring_open(struct inode *inode, struct file *file)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (!(ei_local->features & FE_QDMA)) {
+ return single_open(file, tx_ring_read, NULL);
+ } else if (ei_local->features & FE_QDMA) {
+ if (ei_local->chip_name == MT7622_FE)
+ return single_open(file, qdma_read_64queue, NULL);
+ else
+ return single_open(file, qdma_read, NULL);
+ } else {
+ return 0;
+ }
+}
+
+static const struct file_operations tx_ring_fops = {
+ .owner = THIS_MODULE,
+ .open = tx_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+int rx_ring_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ struct PDMA_rxdesc *rx_ring;
+ int i = 0;
+
+ rx_ring = kmalloc_array(num_rx_desc, sizeof(*rx_ring), GFP_KERNEL);
+ if (!rx_ring)
+ /*seq_puts(seq, " allocate temp rx_ring fail.\n"); */
+ return 0;
+
+ for (i = 0; i < num_rx_desc; i++) {
+ memcpy(&rx_ring[i], &ei_local->rx_ring[0][i],
+ sizeof(struct PDMA_rxdesc));
+ }
+
+ for (i = 0; i < num_rx_desc; i++) {
+ seq_printf(seq, "%d: %08x %08x %08x %08x\n", i,
+ *(int *)&rx_ring[i].rxd_info1,
+ *(int *)&rx_ring[i].rxd_info2,
+ *(int *)&rx_ring[i].rxd_info3,
+ *(int *)&rx_ring[i].rxd_info4);
+ }
+
+ kfree(rx_ring);
+ return 0;
+}
+
+static int rx_ring_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rx_ring_read, NULL);
+}
+
+static const struct file_operations rx_ring_fops = {
+ .owner = THIS_MODULE,
+ .open = rx_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+int num_of_txd_update(int num_of_txd)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO)
+ txd_cnt[num_of_txd]++;
+ return 0;
+}
+
+static void *seq_tso_txd_num_start(struct seq_file *seq, loff_t *pos)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO) {
+ seq_puts(seq, "TXD | Count\n");
+ if (*pos < (MAX_SKB_FRAGS / 2 + 1))
+ return pos;
+ }
+ return NULL;
+}
+
+static void *seq_tso_txd_num_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO) {
+ (*pos)++;
+ if (*pos >= (MAX_SKB_FRAGS / 2 + 1))
+ return NULL;
+ return pos;
+ } else {
+ return NULL;
+ }
+}
+
+static void seq_tso_txd_num_stop(struct seq_file *seq, void *v)
+{
+ /* Nothing to do */
+}
+
+static int seq_tso_txd_num_show(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO) {
+ int i = *(loff_t *)v;
+
+ seq_printf(seq, "%d: %d\n", i, txd_cnt[i]);
+ }
+ return 0;
+}
+
+ssize_t num_of_txd_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO) {
+ memset(txd_cnt, 0, sizeof(txd_cnt));
+ pr_debug("clear txd cnt table\n");
+ return count;
+ } else {
+ return 0;
+ }
+}
+
+int tso_len_update(int tso_len)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO) {
+ if (tso_len > 70000)
+ tso_cnt[14]++;
+ else if (tso_len > 65000)
+ tso_cnt[13]++;
+ else if (tso_len > 60000)
+ tso_cnt[12]++;
+ else if (tso_len > 55000)
+ tso_cnt[11]++;
+ else if (tso_len > 50000)
+ tso_cnt[10]++;
+ else if (tso_len > 45000)
+ tso_cnt[9]++;
+ else if (tso_len > 40000)
+ tso_cnt[8]++;
+ else if (tso_len > 35000)
+ tso_cnt[7]++;
+ else if (tso_len > 30000)
+ tso_cnt[6]++;
+ else if (tso_len > 25000)
+ tso_cnt[5]++;
+ else if (tso_len > 20000)
+ tso_cnt[4]++;
+ else if (tso_len > 15000)
+ tso_cnt[3]++;
+ else if (tso_len > 10000)
+ tso_cnt[2]++;
+ else if (tso_len > 5000)
+ tso_cnt[1]++;
+ else
+ tso_cnt[0]++;
+ }
+ return 0;
+}
+
+ssize_t tso_len_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO) {
+ memset(tso_cnt, 0, sizeof(tso_cnt));
+ pr_debug("clear tso cnt table\n");
+ return count;
+ } else {
+ return 0;
+ }
+}
+
+static void *seq_tso_len_start(struct seq_file *seq, loff_t *pos)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO) {
+ seq_puts(seq, " Length | Count\n");
+ if (*pos < 15)
+ return pos;
+ }
+ return NULL;
+}
+
+static void *seq_tso_len_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO) {
+ (*pos)++;
+ if (*pos >= 15)
+ return NULL;
+ return pos;
+ } else {
+ return NULL;
+ }
+}
+
+static void seq_tso_len_stop(struct seq_file *seq, void *v)
+{
+ /* Nothing to do */
+}
+
+static int seq_tso_len_show(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_TSO) {
+ int i = *(loff_t *)v;
+
+ seq_printf(seq, "%d~%d: %d\n", i * 5000, (i + 1) * 5000,
+ tso_cnt[i]);
+ }
+ return 0;
+}
+
+static const struct seq_operations seq_tso_txd_num_ops = {
+ .start = seq_tso_txd_num_start,
+ .next = seq_tso_txd_num_next,
+ .stop = seq_tso_txd_num_stop,
+ .show = seq_tso_txd_num_show
+};
+
+static int tso_txd_num_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &seq_tso_txd_num_ops);
+}
+
+static const struct file_operations tso_txd_num_fops = {
+ .owner = THIS_MODULE,
+ .open = tso_txd_num_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = num_of_txd_write,
+ .release = seq_release
+};
+
+static const struct seq_operations seq_tso_len_ops = {
+ .start = seq_tso_len_start,
+ .next = seq_tso_len_next,
+ .stop = seq_tso_len_stop,
+ .show = seq_tso_len_show
+};
+
+static int tso_len_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &seq_tso_len_ops);
+}
+
+static const struct file_operations tso_len_fops = {
+ .owner = THIS_MODULE,
+ .open = tso_len_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = tso_len_write,
+ .release = seq_release
+};
+
+static struct proc_dir_entry *proc_esw_cnt;
+static struct proc_dir_entry *proc_eth_cnt;
+
+void internal_gsw_cnt_read(struct seq_file *seq)
+{
+ unsigned int pkt_cnt = 0;
+ int i = 0;
+
+ seq_printf(seq,
+ "===================== %8s %8s %8s %8s %8s %8s %8s\n",
+ "Port0", "Port1", "Port2", "Port3", "Port4",
+ "Port5", "Port6");
+ seq_puts(seq, "Tx Drop Packet :");
+ DUMP_EACH_PORT(0x4000);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Tx CRC Error :");
+ DUMP_EACH_PORT(0x4004);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Tx Unicast Packet :");
+ DUMP_EACH_PORT(0x4008);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Tx Multicast Packet :");
+ DUMP_EACH_PORT(0x400C);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Tx Broadcast Packet :");
+ DUMP_EACH_PORT(0x4010);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Tx Collision Event :");
+ DUMP_EACH_PORT(0x4014);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Tx Pause Packet :");
+ DUMP_EACH_PORT(0x402C);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Drop Packet :");
+ DUMP_EACH_PORT(0x4060);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Filtering Packet :");
+ DUMP_EACH_PORT(0x4064);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Unicast Packet :");
+ DUMP_EACH_PORT(0x4068);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Multicast Packet :");
+ DUMP_EACH_PORT(0x406C);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Broadcast Packet :");
+ DUMP_EACH_PORT(0x4070);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Alignment Error :");
+ DUMP_EACH_PORT(0x4074);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx CRC Error :");
+ DUMP_EACH_PORT(0x4078);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Undersize Error :");
+ DUMP_EACH_PORT(0x407C);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Fragment Error :");
+ DUMP_EACH_PORT(0x4080);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Oversize Error :");
+ DUMP_EACH_PORT(0x4084);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Jabber Error :");
+ DUMP_EACH_PORT(0x4088);
+ seq_puts(seq, "\n");
+ seq_puts(seq, "Rx Pause Packet :");
+ DUMP_EACH_PORT(0x408C);
+ mii_mgr_write(31, 0x4fe0, 0xf0);
+ mii_mgr_write(31, 0x4fe0, 0x800000f0);
+
+ seq_puts(seq, "\n");
+}
+
+void pse_qdma_drop_cnt(void)
+{
+ u8 i;
+
+ pr_info(" <<PSE DROP CNT>>\n");
+ pr_info("| FQ_PCNT_MIN : %010u |\n",
+ (sys_reg_read(FE_PSE_FREE) & 0xff0000) >> 16);
+ pr_info("| FQ_PCNT : %010u |\n",
+ sys_reg_read(FE_PSE_FREE) & 0x00ff);
+ pr_info("| FE_DROP_FQ : %010u |\n",
+ sys_reg_read(FE_DROP_FQ));
+ pr_info("| FE_DROP_FC : %010u |\n",
+ sys_reg_read(FE_DROP_FC));
+ pr_info("| FE_DROP_PPE : %010u |\n",
+ sys_reg_read(FE_DROP_PPE));
+ pr_info("\n <<QDMA PKT/DROP CNT>>\n");
+
+ sys_reg_write(QTX_MIB_IF, 0x90000000);
+ for (i = 0; i < NUM_PQ; i++) {
+ if (i <= 15) {
+ sys_reg_write(QDMA_PAGE, 0);
+ pr_info("QDMA Q%d PKT CNT: %010u, DROP CNT: %010u\n", i,
+ sys_reg_read(QTX_CFG_0 + i * 16),
+ sys_reg_read(QTX_SCH_0 + i * 16));
+ } else if (i > 15 && i <= 31) {
+ sys_reg_write(QDMA_PAGE, 1);
+ pr_info("QDMA Q%d PKT CNT: %010u, DROP CNT: %010u\n", i,
+ sys_reg_read(QTX_CFG_0 + (i - 16) * 16),
+ sys_reg_read(QTX_SCH_0 + (i - 16) * 16));
+ } else if (i > 31 && i <= 47) {
+ sys_reg_write(QDMA_PAGE, 2);
+ pr_info("QDMA Q%d PKT CNT: %010u, DROP CNT: %010u\n", i,
+ sys_reg_read(QTX_CFG_0 + (i - 32) * 16),
+ sys_reg_read(QTX_SCH_0 + (i - 32) * 16));
+ } else if (i > 47 && i <= 63) {
+ sys_reg_write(QDMA_PAGE, 3);
+ pr_info("QDMA Q%d PKT CNT: %010u, DROP CNT: %010u\n", i,
+ sys_reg_read(QTX_CFG_0 + (i - 48) * 16),
+ sys_reg_read(QTX_SCH_0 + (i - 48) * 16));
+ }
+ }
+ sys_reg_write(QDMA_PAGE, 0);
+ sys_reg_write(QTX_MIB_IF, 0x0);
+}
+
+void embedded_sw_cnt_read(struct seq_file *seq)
+{
+ seq_puts(seq, "\n <<CPU>>\n");
+ seq_puts(seq, " |\n");
+ seq_puts(seq, " ^\n");
+ seq_printf(seq, " | Port6 Rx:%08u Good Pkt\n",
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xE0) & 0xFFFF);
+ seq_printf(seq, " | Port6 Tx:%08u Good Pkt\n",
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xE0) >> 16);
+ seq_puts(seq, "+---------------------v-------------------------+\n");
+ seq_puts(seq, "| P6 |\n");
+ seq_puts(seq, "| <<10/100 Embedded Switch>> |\n");
+ seq_puts(seq, "| P0 P1 P2 P3 P4 P5 |\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, " | | | | | |\n");
+ seq_printf(seq,
+ "Port0 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xE8) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x150) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xE8) >> 16,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x150) >> 16);
+
+ seq_printf(seq,
+ "Port1 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xEC) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x154) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xEC) >> 16,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x154) >> 16);
+
+ seq_printf(seq,
+ "Port2 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF0) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x158) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF0) >> 16,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x158) >> 16);
+
+ seq_printf(seq,
+ "Port3 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF4) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x15C) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF4) >> 16,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x15c) >> 16);
+
+ seq_printf(seq,
+ "Port4 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF8) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x160) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xF8) >> 16,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x160) >> 16);
+
+ seq_printf(seq,
+ "Port5 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n",
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xFC) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x164) & 0xFFFF,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0xFC) >> 16,
+ sys_reg_read(ETHDMASYS_ETH_SW_BASE + 0x164) >> 16);
+}
+
+int eth_cnt_read(struct seq_file *seq, void *v)
+{
+ pse_qdma_drop_cnt();
+ return 0;
+}
+
+int esw_cnt_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ seq_puts(seq, " <<CPU>>\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, "| <<PSE>> |\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, "| <<GDMA>> |\n");
+
+ seq_printf(seq,
+ "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C00));
+ seq_printf(seq,
+ "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C08));
+ seq_printf(seq,
+ "| GDMA1_RX_OERCNT : %010u (overflow error) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C10));
+ seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C14));
+ seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C18));
+ seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C1C));
+ seq_printf(seq,
+ "| GDMA1_RX_CERCNT : %010u (checksum error) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C20));
+ seq_printf(seq,
+ "| GDMA1_RX_FCCNT : %010u (flow control) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C24));
+ seq_printf(seq,
+ "| GDMA1_TX_SKIPCNT: %010u (about count) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C28));
+ seq_printf(seq,
+ "| GDMA1_TX_COLCNT : %010u (collision count) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C2C));
+ seq_printf(seq,
+ "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C30));
+ seq_printf(seq,
+ "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C38));
+ seq_puts(seq, "| |\n");
+ seq_printf(seq,
+ "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C40));
+ seq_printf(seq,
+ "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C48));
+ seq_printf(seq,
+ "| GDMA2_RX_OERCNT : %010u (overflow error) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C50));
+ seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C54));
+ seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C58));
+ seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C5C));
+ seq_printf(seq,
+ "| GDMA2_RX_CERCNT : %010u (checksum error) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C60));
+ seq_printf(seq,
+ "| GDMA2_RX_FCCNT : %010u (flow control) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C64));
+ seq_printf(seq,
+ "| GDMA2_TX_SKIPCNT: %010u (skip) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C68));
+ seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C6C));
+ seq_printf(seq,
+ "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C70));
+ seq_printf(seq,
+ "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n",
+ sys_reg_read(RALINK_FRAME_ENGINE_BASE + 0x1C78));
+
+ seq_puts(seq, "+-----------------------------------------------+\n");
+
+ seq_puts(seq, "\n");
+
+ if ((ei_local->chip_name == MT7623_FE) || ei_local->chip_name == MT7621_FE)
+ internal_gsw_cnt_read(seq);
+ if (ei_local->architecture & RAETH_ESW)
+ embedded_sw_cnt_read(seq);
+
+ return 0;
+}
+
+static int switch_count_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, esw_cnt_read, NULL);
+}
+
+static int eth_count_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, eth_cnt_read, NULL);
+}
+
+static const struct file_operations switch_count_fops = {
+ .owner = THIS_MODULE,
+ .open = switch_count_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static const struct file_operations eth_count_fops = {
+ .owner = THIS_MODULE,
+ .open = eth_count_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+/* proc write procedure */
+static ssize_t change_phyid(struct file *file,
+ const char __user *buffer, size_t count,
+ loff_t *data)
+{
+ int val = 0;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & (FE_ETHTOOL | FE_GE2_SUPPORT)) {
+ char buf[32];
+ struct net_device *cur_dev_p;
+ struct END_DEVICE *ei_local;
+ char if_name[64];
+ unsigned int phy_id;
+
+ if (count > 32)
+ count = 32;
+ memset(buf, 0, 32);
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ /* determine interface name */
+ strncpy(if_name, DEV_NAME, sizeof(if_name) - 1); /* "eth2" by default */
+ if (isalpha(buf[0])) {
+ val = sscanf(buf, "%4s %1d", if_name, &phy_id);
+ if (val == -1)
+ return -EFAULT;
+ } else {
+ phy_id = kstrtol(buf, 10, NULL);
+ }
+ cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
+
+ if (!cur_dev_p)
+ return -EFAULT;
+
+ ei_local = netdev_priv(cur_dev_p);
+
+ ei_local->mii_info.phy_id = (unsigned char)phy_id;
+ return count;
+ } else {
+ return 0;
+ }
+}
+
+static ssize_t change_gmac2_phyid(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ int val = 0;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & (FE_ETHTOOL | FE_GE2_SUPPORT)) {
+ char buf[32];
+ struct net_device *cur_dev_p;
+ struct PSEUDO_ADAPTER *p_pseudo_ad;
+ char if_name[64];
+ unsigned int phy_id;
+
+ if (count > 32)
+ count = 32;
+ memset(buf, 0, 32);
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+ /* determine interface name */
+ strncpy(if_name, DEV2_NAME, sizeof(if_name) - 1); /* "eth3" by default */
+ if (isalpha(buf[0])) {
+ val = sscanf(buf, "%4s %1d", if_name, &phy_id);
+ if (val == -1)
+ return -EFAULT;
+ } else {
+ phy_id = kstrtol(buf, 10, NULL);
+ }
+ cur_dev_p = dev_get_by_name(&init_net, DEV2_NAME);
+
+ if (!cur_dev_p)
+ return -EFAULT;
+ p_pseudo_ad = netdev_priv(cur_dev_p);
+ p_pseudo_ad->mii_info.phy_id = (unsigned char)phy_id;
+ return count;
+ } else {
+ return 0;
+ }
+}
+
+static const struct file_operations gmac2_fops = {
+ .owner = THIS_MODULE,
+ .write = change_gmac2_phyid
+};
+
+static int gmac_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, reg_read_main, NULL);
+}
+
+static const struct file_operations gmac_fops = {
+ .owner = THIS_MODULE,
+ .open = gmac_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = change_phyid,
+ .release = single_release
+};
+
+/* #if defined(TASKLET_WORKQUEUE_SW) */
+
+static int schedule_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & TASKLET_WORKQUEUE_SW) {
+ if (init_schedule == 1)
+ seq_printf(seq,
+ "Initialize Raeth with workqueque<%d>\n",
+ init_schedule);
+ else
+ seq_printf(seq,
+ "Initialize Raeth with tasklet<%d>\n",
+ init_schedule);
+ if (working_schedule == 1)
+ seq_printf(seq,
+ "Raeth is running at workqueque<%d>\n",
+ working_schedule);
+ else
+ seq_printf(seq,
+ "Raeth is running at tasklet<%d>\n",
+ working_schedule);
+ }
+
+ return 0;
+}
+
+static ssize_t schedule_write(struct file *file,
+ const char __user *buffer, size_t count,
+ loff_t *data)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & TASKLET_WORKQUEUE_SW) {
+ char buf[2];
+ int old;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+ old = init_schedule;
+ init_schedule = kstrtol(buf, 10, NULL);
+ pr_debug
+ ("ChangeRaethInitScheduleFrom <%d> to <%d>\n",
+ old, init_schedule);
+ pr_debug("Not running schedule at present !\n");
+
+ return count;
+ } else {
+ return 0;
+ }
+}
+
+static int schedule_switch_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, schedule_read, NULL);
+}
+
+static const struct file_operations schedule_sw_fops = {
+ .owner = THIS_MODULE,
+ .open = schedule_switch_open,
+ .read = seq_read,
+ .write = schedule_write,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+int int_stats_update(unsigned int int_status)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_RAETH_INT_DBG) {
+ if (int_status & (RX_COHERENT | TX_COHERENT | RXD_ERROR)) {
+ if (int_status & RX_COHERENT)
+ raeth_int.RX_COHERENT_CNT++;
+ if (int_status & TX_COHERENT)
+ raeth_int.TX_COHERENT_CNT++;
+ if (int_status & RXD_ERROR)
+ raeth_int.RXD_ERROR_CNT++;
+ }
+ if (int_status &
+ (RX_DLY_INT | RING1_RX_DLY_INT | RING2_RX_DLY_INT |
+ RING3_RX_DLY_INT)) {
+ if (int_status & RX_DLY_INT)
+ raeth_int.RX_DLY_INT_CNT++;
+ if (int_status & RING1_RX_DLY_INT)
+ raeth_int.RING1_RX_DLY_INT_CNT++;
+ if (int_status & RING2_RX_DLY_INT)
+ raeth_int.RING2_RX_DLY_INT_CNT++;
+ if (int_status & RING3_RX_DLY_INT)
+ raeth_int.RING3_RX_DLY_INT_CNT++;
+ }
+ if (int_status & (TX_DLY_INT))
+ raeth_int.TX_DLY_INT_CNT++;
+ if (int_status &
+ (RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 |
+ RX_DONE_INT3)) {
+ if (int_status & RX_DONE_INT0)
+ raeth_int.RX_DONE_INT0_CNT++;
+ if (int_status & RX_DONE_INT1)
+ raeth_int.RX_DONE_INT1_CNT++;
+ if (int_status & RX_DONE_INT2)
+ raeth_int.RX_DONE_INT2_CNT++;
+ if (int_status & RX_DONE_INT3)
+ raeth_int.RX_DONE_INT3_CNT++;
+ }
+ if (int_status &
+ (TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 |
+ TX_DONE_INT3)) {
+ if (int_status & TX_DONE_INT0)
+ raeth_int.TX_DONE_INT0_CNT++;
+ if (int_status & TX_DONE_INT1)
+ raeth_int.TX_DONE_INT1_CNT++;
+ if (int_status & TX_DONE_INT2)
+ raeth_int.TX_DONE_INT2_CNT++;
+ if (int_status & TX_DONE_INT3)
+ raeth_int.TX_DONE_INT3_CNT++;
+ }
+ if (int_status &
+ (ALT_RPLC_INT1 | ALT_RPLC_INT2 | ALT_RPLC_INT3)) {
+ if (int_status & ALT_RPLC_INT1)
+ raeth_int.ALT_RPLC_INT1_CNT++;
+ if (int_status & ALT_RPLC_INT2)
+ raeth_int.ALT_RPLC_INT2_CNT++;
+ if (int_status & ALT_RPLC_INT3)
+ raeth_int.ALT_RPLC_INT3_CNT++;
+ }
+ }
+ return 0;
+}
+
+static int int_dbg_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_RAETH_INT_DBG) {
+ seq_puts(seq, "Raether Interrupt Statistics\n");
+ seq_printf(seq, "RX_COHERENT = %d\n",
+ raeth_int.RX_COHERENT_CNT);
+ seq_printf(seq, "RX_DLY_INT = %d\n", raeth_int.RX_DLY_INT_CNT);
+ seq_printf(seq, "TX_COHERENT = %d\n",
+ raeth_int.TX_COHERENT_CNT);
+ seq_printf(seq, "TX_DLY_INT = %d\n", raeth_int.TX_DLY_INT_CNT);
+ seq_printf(seq, "RING3_RX_DLY_INT = %d\n",
+ raeth_int.RING3_RX_DLY_INT_CNT);
+ seq_printf(seq, "RING2_RX_DLY_INT = %d\n",
+ raeth_int.RING2_RX_DLY_INT_CNT);
+ seq_printf(seq, "RING1_RX_DLY_INT = %d\n",
+ raeth_int.RING1_RX_DLY_INT_CNT);
+ seq_printf(seq, "RXD_ERROR = %d\n", raeth_int.RXD_ERROR_CNT);
+ seq_printf(seq, "ALT_RPLC_INT3 = %d\n",
+ raeth_int.ALT_RPLC_INT3_CNT);
+ seq_printf(seq, "ALT_RPLC_INT2 = %d\n",
+ raeth_int.ALT_RPLC_INT2_CNT);
+ seq_printf(seq, "ALT_RPLC_INT1 = %d\n",
+ raeth_int.ALT_RPLC_INT1_CNT);
+ seq_printf(seq, "RX_DONE_INT3 = %d\n",
+ raeth_int.RX_DONE_INT3_CNT);
+ seq_printf(seq, "RX_DONE_INT2 = %d\n",
+ raeth_int.RX_DONE_INT2_CNT);
+ seq_printf(seq, "RX_DONE_INT1 = %d\n",
+ raeth_int.RX_DONE_INT1_CNT);
+ seq_printf(seq, "RX_DONE_INT0 = %d\n",
+ raeth_int.RX_DONE_INT0_CNT);
+ seq_printf(seq, "TX_DONE_INT3 = %d\n",
+ raeth_int.TX_DONE_INT3_CNT);
+ seq_printf(seq, "TX_DONE_INT2 = %d\n",
+ raeth_int.TX_DONE_INT2_CNT);
+ seq_printf(seq, "TX_DONE_INT1 = %d\n",
+ raeth_int.TX_DONE_INT1_CNT);
+ seq_printf(seq, "TX_DONE_INT0 = %d\n",
+ raeth_int.TX_DONE_INT0_CNT);
+
+ memset(&raeth_int, 0, sizeof(raeth_int));
+ }
+ return 0;
+}
+
+static int int_dbg_open(struct inode *inode, struct file *file)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_RAETH_INT_DBG) {
+ /* memset(&raeth_int, 0, sizeof(raeth_int)); */
+ return single_open(file, int_dbg_read, NULL);
+ } else {
+ return 0;
+ }
+}
+
+static ssize_t int_dbg_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ return 0;
+}
+
+static const struct file_operations int_dbg_sw_fops = {
+ .owner = THIS_MODULE,
+ .open = int_dbg_open,
+ .read = seq_read,
+ .write = int_dbg_write
+};
+
+static int set_lan_ip_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ seq_printf(seq, "ei_local->lan_ip4_addr = %s\n",
+ ei_local->lan_ip4_addr);
+
+ return 0;
+}
+
+static int set_lan_ip_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, set_lan_ip_read, NULL);
+}
+
+static ssize_t set_lan_ip_write(struct file *file,
+ const char __user *buffer, size_t count,
+ loff_t *data)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ char ip_tmp[IP4_ADDR_LEN];
+
+ if (count > IP4_ADDR_LEN)
+ return -EFAULT;
+
+ if (copy_from_user(ip_tmp, buffer, count))
+ return -EFAULT;
+
+ strncpy(ei_local->lan_ip4_addr, ip_tmp, count);
+
+ pr_info("[%s]LAN IP = %s\n", __func__, ei_local->lan_ip4_addr);
+
+
+ if (ei_local->features & FE_HW_LRO)
+ fe_set_hw_lro_my_ip(ei_local->lan_ip4_addr);
+
+ return count;
+}
+
+static const struct file_operations set_lan_ip_fops = {
+ .owner = THIS_MODULE,
+ .open = set_lan_ip_open,
+ .read = seq_read,
+ .write = set_lan_ip_write
+};
+
+int debug_proc_init(void)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (!proc_reg_dir)
+ proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
+
+ if (ei_local->features & FE_HW_LRO)
+ hwlro_debug_proc_init(proc_reg_dir);
+ else if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING))
+ rss_debug_proc_init(proc_reg_dir);
+
+ if (ei_local->features & FE_HW_IOCOHERENT)
+ hwioc_debug_proc_init(proc_reg_dir);
+ proc_gmac = proc_create(PROCREG_GMAC, 0, proc_reg_dir, &gmac_fops);
+ if (!proc_gmac)
+ pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_GMAC);
+
+ if (ei_local->features & (FE_ETHTOOL | FE_GE2_SUPPORT)) {
+ proc_gmac2 =
+ proc_create(PROCREG_GMAC2, 0, proc_reg_dir, &gmac2_fops);
+ if (!proc_gmac2)
+ pr_debug("!! FAIL to create %s PROC !!\n",
+ PROCREG_GMAC2);
+ }
+ proc_skb_free =
+ proc_create(PROCREG_SKBFREE, 0, proc_reg_dir, &skb_free_fops);
+ if (!proc_skb_free)
+ pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_SKBFREE);
+ proc_tx_ring = proc_create(PROCREG_TXRING, 0, proc_reg_dir,
+ &tx_ring_fops);
+ if (!proc_tx_ring)
+ pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
+ proc_rx_ring = proc_create(PROCREG_RXRING, 0,
+ proc_reg_dir, &rx_ring_fops);
+ if (!proc_rx_ring)
+ pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
+
+ if (ei_local->features & FE_TSO) {
+ proc_num_of_txd =
+ proc_create(PROCREG_NUM_OF_TXD, 0, proc_reg_dir,
+ &tso_txd_num_fops);
+ if (!proc_num_of_txd)
+ pr_debug("!! FAIL to create %s PROC !!\n",
+ PROCREG_NUM_OF_TXD);
+ proc_tso_len =
+ proc_create(PROCREG_TSO_LEN, 0, proc_reg_dir,
+ &tso_len_fops);
+ if (!proc_tso_len)
+ pr_debug("!! FAIL to create %s PROC !!\n",
+ PROCREG_TSO_LEN);
+ }
+
+ if (ei_local->features & USER_SNMPD) {
+ proc_ra_snmp =
+ proc_create(PROCREG_SNMP, S_IRUGO, proc_reg_dir,
+ &ra_snmp_seq_fops);
+ if (!proc_ra_snmp)
+ pr_debug("!! FAIL to create %s PROC !!\n",
+ PROCREG_SNMP);
+ }
+ proc_esw_cnt =
+ proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
+ if (!proc_esw_cnt)
+ pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
+
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+ proc_eth_cnt =
+ proc_create(PROCREG_ETH_CNT, 0, proc_reg_dir, ð_count_fops);
+ if (!proc_eth_cnt)
+ pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_ETH_CNT);
+ }
+
+ if (ei_local->features & TASKLET_WORKQUEUE_SW) {
+ proc_sche =
+ proc_create(PROCREG_SCHE, 0, proc_reg_dir,
+ &schedule_sw_fops);
+ if (!proc_sche)
+ pr_debug("!! FAIL to create %s PROC !!\n",
+ PROCREG_SCHE);
+ }
+
+ if (ei_local->features & FE_RAETH_INT_DBG) {
+ proc_int_dbg =
+ proc_create(PROCREG_INT_DBG, 0, proc_reg_dir,
+ &int_dbg_sw_fops);
+ if (!proc_int_dbg)
+ pr_debug("!! FAIL to create %s PROC !!\n",
+ PROCREG_INT_DBG);
+ }
+
+ /* Set LAN IP address */
+ proc_set_lan_ip =
+ proc_create(PROCREG_SET_LAN_IP, 0, proc_reg_dir, &set_lan_ip_fops);
+ if (!proc_set_lan_ip)
+ pr_debug("!! FAIL to create %s PROC !!\n", PROCREG_SET_LAN_IP);
+
+ pr_debug("PROC INIT OK!\n");
+ return 0;
+}
+
+void debug_proc_exit(void)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_HW_LRO)
+ hwlro_debug_proc_exit(proc_reg_dir);
+ else if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING))
+ rss_debug_proc_exit(proc_reg_dir);
+
+ if (ei_local->features & FE_HW_IOCOHERENT)
+ hwioc_debug_proc_exit(proc_reg_dir);
+
+ if (proc_sys_cp0)
+ remove_proc_entry(PROCREG_CP0, proc_reg_dir);
+
+ if (proc_gmac)
+ remove_proc_entry(PROCREG_GMAC, proc_reg_dir);
+
+ if (ei_local->features & (FE_ETHTOOL | FE_GE2_SUPPORT)) {
+ if (proc_gmac)
+ remove_proc_entry(PROCREG_GMAC, proc_reg_dir);
+ }
+
+ if (proc_skb_free)
+ remove_proc_entry(PROCREG_SKBFREE, proc_reg_dir);
+
+ if (proc_tx_ring)
+ remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
+
+ if (proc_rx_ring)
+ remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
+
+ if (ei_local->features & FE_TSO) {
+ if (proc_num_of_txd)
+ remove_proc_entry(PROCREG_NUM_OF_TXD, proc_reg_dir);
+
+ if (proc_tso_len)
+ remove_proc_entry(PROCREG_TSO_LEN, proc_reg_dir);
+ }
+
+ if (ei_local->features & USER_SNMPD) {
+ if (proc_ra_snmp)
+ remove_proc_entry(PROCREG_SNMP, proc_reg_dir);
+ }
+
+ if (proc_esw_cnt)
+ remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
+
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+ if (proc_eth_cnt)
+ remove_proc_entry(PROCREG_ETH_CNT, proc_reg_dir);
+ }
+
+ /* if (proc_reg_dir) */
+ /* remove_proc_entry(PROCREG_DIR, 0); */
+
+ pr_debug("proc exit\n");
+}
+EXPORT_SYMBOL(proc_reg_dir);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.h
new file mode 100644
index 0000000..8acb29e
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_dbg_proc.h
@@ -0,0 +1,95 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_DBG_PROC_H
+#define RA_DBG_PROC_H
+
+#include <linux/ctype.h>
+#include <linux/proc_fs.h>
+#include "raeth_config.h"
+
+extern struct net_device *dev_raether;
+
+void dump_qos(void);
+void dump_reg(struct seq_file *s);
+void dump_cp0(void);
+
+int debug_proc_init(void);
+void debug_proc_exit(void);
+
+int tso_len_update(int tso_len);
+int num_of_txd_update(int num_of_txd);
+#ifdef CONFIG_RAETH_LRO
+int lro_stats_update(struct net_lro_mgr *lro_mgr, bool all_flushed);
+#endif
+extern unsigned int M2Q_table[64];
+extern struct QDMA_txdesc *free_head;
+extern struct SFQ_table *sfq0;
+extern struct SFQ_table *sfq1;
+extern struct SFQ_table *sfq2;
+extern struct SFQ_table *sfq3;
+extern int init_schedule;
+extern int working_schedule;
+struct raeth_int_t {
+ unsigned int RX_COHERENT_CNT;
+ unsigned int RX_DLY_INT_CNT;
+ unsigned int TX_COHERENT_CNT;
+ unsigned int TX_DLY_INT_CNT;
+ unsigned int RING3_RX_DLY_INT_CNT;
+ unsigned int RING2_RX_DLY_INT_CNT;
+ unsigned int RING1_RX_DLY_INT_CNT;
+ unsigned int RXD_ERROR_CNT;
+ unsigned int ALT_RPLC_INT3_CNT;
+ unsigned int ALT_RPLC_INT2_CNT;
+ unsigned int ALT_RPLC_INT1_CNT;
+ unsigned int RX_DONE_INT3_CNT;
+ unsigned int RX_DONE_INT2_CNT;
+ unsigned int RX_DONE_INT1_CNT;
+ unsigned int RX_DONE_INT0_CNT;
+ unsigned int TX_DONE_INT3_CNT;
+ unsigned int TX_DONE_INT2_CNT;
+ unsigned int TX_DONE_INT1_CNT;
+ unsigned int TX_DONE_INT0_CNT;
+};
+
+int int_stats_update(unsigned int int_status);
+
+#define DUMP_EACH_PORT(base) \
+ for (i = 0; i < 7; i++) { \
+ mii_mgr_read(31, (base) + (i * 0x100), &pkt_cnt); \
+ seq_printf(seq, "%8u ", pkt_cnt); \
+ } \
+
+/* HW LRO functions */
+int hwlro_debug_proc_init(struct proc_dir_entry *proc_reg_dir);
+void hwlro_debug_proc_exit(struct proc_dir_entry *proc_reg_dir);
+
+int rss_debug_proc_init(struct proc_dir_entry *proc_reg_dir);
+void rss_debug_proc_exit(struct proc_dir_entry *proc_reg_dir);
+
+/* HW IO-Coherent functions */
+#ifdef CONFIG_RAETH_HW_IOCOHERENT
+void hwioc_debug_proc_init(struct proc_dir_entry *proc_reg_dir);
+void hwioc_debug_proc_exit(struct proc_dir_entry *proc_reg_dir);
+#else
+static inline void hwioc_debug_proc_init(struct proc_dir_entry *proc_reg_dir)
+{
+}
+
+static inline void hwioc_debug_proc_exit(struct proc_dir_entry *proc_reg_dir)
+{
+}
+#endif /* CONFIG_RAETH_HW_IOCOHERENT */
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.c
new file mode 100644
index 0000000..9ff7e0e
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.c
@@ -0,0 +1,168 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_ethtool.h"
+
+#define RAETHER_DRIVER_NAME "raether"
+#define RA_NUM_STATS 4
+
+unsigned char get_current_phy_address(void)
+{
+ struct net_device *cur_dev_p;
+ struct END_DEVICE *ei_local;
+
+ cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
+ if (!cur_dev_p)
+ return 0;
+ ei_local = netdev_priv(cur_dev_p);
+ return ei_local->mii_info.phy_id;
+}
+
+#define MII_CR_ADDR 0x00
+#define MII_CR_MR_AUTONEG_ENABLE BIT(12)
+#define MII_CR_MR_RESTART_NEGOTIATION BIT(9)
+
+#define AUTO_NEGOTIATION_ADVERTISEMENT 0x04
+#define AN_PAUSE BIT(10)
+
+u32 et_get_link(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ return mii_link_ok(&ei_local->mii_info);
+}
+
+int et_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ mii_ethtool_gset(&ei_local->mii_info, cmd);
+ return 0;
+}
+
+/* mii_mgr_read wrapper for mii.o ethtool */
+int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ unsigned int result;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ mii_mgr_read((unsigned int)ei_local->mii_info.phy_id,
+ (unsigned int)location, &result);
+/* printk("\n%s mii.o query= phy_id:%d\n",dev->name, phy_id);*/
+/*printk("address:%d retval:%x\n", location, result); */
+ return (int)result;
+}
+
+/* mii_mgr_write wrapper for mii.o ethtool */
+void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ mii_mgr_write((unsigned int)ei_local->mii_info.phy_id,
+ (unsigned int)location, (unsigned int)value);
+/* printk("mii.o write= phy_id:%d\n", phy_id);*/
+/*printk("address:%d value:%x\n", location, value); */
+}
+
+/* #ifdef CONFIG_PSEUDO_SUPPORT */
+/*We unable to re-use the Raether functions because it is hard to tell
+ * where the calling from is. From eth2 or eth3?
+ *
+ * These code size is around 950 bytes.
+ */
+
+u32 et_virt_get_link(struct net_device *dev)
+{
+ struct PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_GE2_SUPPORT)
+ return mii_link_ok(&pseudo->mii_info);
+ else
+ return 0;
+}
+
+int et_virt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_GE2_SUPPORT)
+ mii_ethtool_gset(&pseudo->mii_info, cmd);
+ return 0;
+}
+
+int mdio_virt_read(struct net_device *dev, int phy_id, int location)
+{
+ unsigned int result;
+ struct PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ mii_mgr_read((unsigned int)pseudo->mii_info.phy_id,
+ (unsigned int)location, &result);
+/* printk("%s mii.o query= phy_id:%d,\n", dev->name, phy_id); */
+/*printk("address:%d retval:%d\n", location, result);*/
+ return (int)result;
+ } else {
+ return 0;
+ }
+}
+
+void mdio_virt_write(struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ struct PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ mii_mgr_write((unsigned int)pseudo->mii_info.phy_id,
+ (unsigned int)location, (unsigned int)value);
+ }
+
+/* printk("mii.o write= phy_id:%d\n", phy_id);*/
+/*printk("address:%d value:%d\n)", location, value); */
+}
+
+void ethtool_init(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ /* init mii structure */
+ ei_local->mii_info.dev = dev;
+ ei_local->mii_info.mdio_read = mdio_read;
+ ei_local->mii_info.mdio_write = mdio_write;
+ ei_local->mii_info.phy_id_mask = 0x1f;
+ ei_local->mii_info.reg_num_mask = 0x1f;
+ ei_local->mii_info.supports_gmii =
+ mii_check_gmii_support(&ei_local->mii_info);
+
+ /* TODO: phy_id: 0~4 */
+ ei_local->mii_info.phy_id = 1;
+}
+
+void ethtool_virt_init(struct net_device *dev)
+{
+ struct PSEUDO_ADAPTER *p_pseudo_ad = netdev_priv(dev);
+
+ /* init mii structure */
+ p_pseudo_ad->mii_info.dev = dev;
+ p_pseudo_ad->mii_info.mdio_read = mdio_virt_read;
+ p_pseudo_ad->mii_info.mdio_write = mdio_virt_write;
+ p_pseudo_ad->mii_info.phy_id_mask = 0x1f;
+ p_pseudo_ad->mii_info.reg_num_mask = 0x1f;
+ p_pseudo_ad->mii_info.phy_id = 0x1e;
+ p_pseudo_ad->mii_info.supports_gmii =
+ mii_check_gmii_support(&p_pseudo_ad->mii_info);
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.h
new file mode 100644
index 0000000..cff52e2
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ethtool.h
@@ -0,0 +1,34 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_ETHTOOL_H
+#define RA_ETHTOOL_H
+
+extern struct net_device *dev_raether;
+
+/* ethtool related */
+void ethtool_init(struct net_device *dev);
+int et_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+u32 et_get_link(struct net_device *dev);
+unsigned char get_current_phy_address(void);
+int mdio_read(struct net_device *dev, int phy_id, int location);
+void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+
+/* for pseudo interface */
+void ethtool_virt_init(struct net_device *dev);
+int et_virt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+u32 et_virt_get_link(struct net_device *dev);
+int mdio_virt_read(struct net_device *dev, int phy_id, int location);
+void mdio_virt_write(struct net_device *dev, int phy_id, int location,
+ int value);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ioctl.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ioctl.h
new file mode 100644
index 0000000..b94cb33
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_ioctl.h
@@ -0,0 +1,179 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _RAETH_IOCTL_H
+#define _RAETH_IOCTL_H
+
+/* ioctl commands */
+#define RAETH_SW_IOCTL 0x89F0
+#define RAETH_ESW_REG_READ 0x89F1
+#define RAETH_ESW_REG_WRITE 0x89F2
+#define RAETH_MII_READ 0x89F3
+#define RAETH_MII_WRITE 0x89F4
+#define RAETH_ESW_INGRESS_RATE 0x89F5
+#define RAETH_ESW_EGRESS_RATE 0x89F6
+#define RAETH_ESW_PHY_DUMP 0x89F7
+#define RAETH_QDMA_IOCTL 0x89F8
+#define RAETH_EPHY_IOCTL 0x89F9
+#define RAETH_MII_READ_CL45 0x89FC
+#define RAETH_MII_WRITE_CL45 0x89FD
+#define RAETH_QDMA_SFQ_WEB_ENABLE 0x89FE
+#define RAETH_SET_LAN_IP 0x89FF
+
+/* switch ioctl commands */
+#define SW_IOCTL_SET_EGRESS_RATE 0x0000
+#define SW_IOCTL_SET_INGRESS_RATE 0x0001
+#define SW_IOCTL_SET_VLAN 0x0002
+#define SW_IOCTL_DUMP_VLAN 0x0003
+#define SW_IOCTL_DUMP_TABLE 0x0004
+#define SW_IOCTL_ADD_L2_ADDR 0x0005
+#define SW_IOCTL_DEL_L2_ADDR 0x0006
+#define SW_IOCTL_ADD_MCAST_ADDR 0x0007
+#define SW_IOCTL_DEL_MCAST_ADDR 0x0008
+#define SW_IOCTL_DUMP_MIB 0x0009
+#define SW_IOCTL_ENABLE_IGMPSNOOP 0x000A
+#define SW_IOCTL_DISABLE_IGMPSNOOP 0x000B
+#define SW_IOCTL_SET_PORT_TRUNK 0x000C
+#define SW_IOCTL_GET_PORT_TRUNK 0x000D
+#define SW_IOCTL_SET_PORT_MIRROR 0x000E
+#define SW_IOCTL_GET_PHY_STATUS 0x000F
+#define SW_IOCTL_READ_REG 0x0010
+#define SW_IOCTL_WRITE_REG 0x0011
+#define SW_IOCTL_QOS_EN 0x0012
+#define SW_IOCTL_QOS_SET_TABLE2TYPE 0x0013
+#define SW_IOCTL_QOS_GET_TABLE2TYPE 0x0014
+#define SW_IOCTL_QOS_SET_PORT2TABLE 0x0015
+#define SW_IOCTL_QOS_GET_PORT2TABLE 0x0016
+#define SW_IOCTL_QOS_SET_PORT2PRI 0x0017
+#define SW_IOCTL_QOS_GET_PORT2PRI 0x0018
+#define SW_IOCTL_QOS_SET_DSCP2PRI 0x0019
+#define SW_IOCTL_QOS_GET_DSCP2PRI 0x001a
+#define SW_IOCTL_QOS_SET_PRI2QUEUE 0x001b
+#define SW_IOCTL_QOS_GET_PRI2QUEUE 0x001c
+#define SW_IOCTL_QOS_SET_QUEUE_WEIGHT 0x001d
+#define SW_IOCTL_QOS_GET_QUEUE_WEIGHT 0x001e
+#define SW_IOCTL_SET_PHY_TEST_MODE 0x001f
+#define SW_IOCTL_GET_PHY_REG 0x0020
+#define SW_IOCTL_SET_PHY_REG 0x0021
+#define SW_IOCTL_VLAN_TAG 0x0022
+#define SW_IOCTL_CLEAR_TABLE 0x0023
+#define SW_IOCTL_CLEAR_VLAN 0x0024
+#define SW_IOCTL_SET_VLAN_MODE 0x0025
+
+/*****************QDMA IOCTL DATA*************/
+#define RAETH_QDMA_REG_READ 0x0000
+#define RAETH_QDMA_REG_WRITE 0x0001
+#define RAETH_QDMA_QUEUE_MAPPING 0x0002
+#define RAETH_QDMA_READ_CPU_CLK 0x0003
+/*********************************************/
+/******************EPHY IOCTL DATA************/
+/*MT7622 10/100 phy cal*/
+#define RAETH_VBG_IEXT_CALIBRATION 0x0000
+#define RAETH_TXG_R50_CALIBRATION 0x0001
+#define RAETH_TXG_OFFSET_CALIBRATION 0x0002
+#define RAETH_TXG_AMP_CALIBRATION 0x0003
+#define GE_TXG_R50_CALIBRATION 0x0004
+#define GE_TXG_OFFSET_CALIBRATION 0x0005
+#define GE_TXG_AMP_CALIBRATION 0x0006
+/*********************************************/
+#define REG_ESW_WT_MAC_MFC 0x10
+#define REG_ESW_ISC 0x18
+#define REG_ESW_WT_MAC_ATA1 0x74
+#define REG_ESW_WT_MAC_ATA2 0x78
+#define REG_ESW_WT_MAC_ATWD 0x7C
+#define REG_ESW_WT_MAC_ATC 0x80
+
+#define REG_ESW_TABLE_TSRA1 0x84
+#define REG_ESW_TABLE_TSRA2 0x88
+#define REG_ESW_TABLE_ATRD 0x8C
+
+#define REG_ESW_VLAN_VTCR 0x90
+#define REG_ESW_VLAN_VAWD1 0x94
+#define REG_ESW_VLAN_VAWD2 0x98
+
+#if defined(CONFIG_MACH_MT7623)
+#define REG_ESW_VLAN_ID_BASE 0x100
+#else
+#define REG_ESW_VLAN_ID_BASE 0x50
+#endif
+#define REG_ESW_VLAN_MEMB_BASE 0x70
+#define REG_ESW_TABLE_SEARCH 0x24
+#define REG_ESW_TABLE_STATUS0 0x28
+#define REG_ESW_TABLE_STATUS1 0x2C
+#define REG_ESW_TABLE_STATUS2 0x30
+#define REG_ESW_WT_MAC_AD0 0x34
+#define REG_ESW_WT_MAC_AD1 0x38
+#define REG_ESW_WT_MAC_AD2 0x3C
+
+#if defined(CONFIG_MACH_MT7623)
+#define REG_ESW_MAX 0xFC
+#else
+#define REG_ESW_MAX 0x16C
+#endif
+#define REG_HQOS_MAX 0x3FFF
+
+struct esw_reg {
+ unsigned int off;
+ unsigned int val;
+};
+
+struct ra_mii_ioctl_data {
+ __u32 phy_id;
+ __u32 reg_num;
+ __u32 val_in;
+ __u32 val_out;
+ __u32 port_num;
+ __u32 dev_addr;
+ __u32 reg_addr;
+};
+
+struct ra_switch_ioctl_data {
+ unsigned int cmd;
+ unsigned int on_off;
+ unsigned int port;
+ unsigned int bw;
+ unsigned int vid;
+ unsigned int fid;
+ unsigned int port_map;
+ unsigned int rx_port_map;
+ unsigned int tx_port_map;
+ unsigned int igmp_query_interval;
+ unsigned int reg_addr;
+ unsigned int reg_val;
+ unsigned int mode;
+ unsigned int qos_queue_num;
+ unsigned int qos_type;
+ unsigned int qos_pri;
+ unsigned int qos_dscp;
+ unsigned int qos_table_idx;
+ unsigned int qos_weight;
+ unsigned char mac[6];
+};
+
+struct qdma_ioctl_data {
+ unsigned int cmd;
+ unsigned int off;
+ unsigned int val;
+};
+
+struct ephy_ioctl_data {
+ unsigned int cmd;
+};
+
+struct esw_rate {
+ unsigned int on_off;
+ unsigned int port;
+ unsigned int bw; /*Mbps */
+};
+#endif /* _RAETH_IOCTL_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.c
new file mode 100644
index 0000000..ad822bb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.c
@@ -0,0 +1,179 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+
+void enable_auto_negotiate(struct END_DEVICE *ei_local)
+{
+ u32 reg_value;
+ pr_info("=================================\n");
+ pr_info("enable_auto_negotiate\n");
+
+ /* FIXME: we don't know how to deal with PHY end addr */
+ reg_value = sys_reg_read(ESW_PHY_POLLING);
+ reg_value |= (1 << 31);
+ reg_value &= ~(0x1f);
+ reg_value &= ~(0x1f << 8);
+
+ if (ei_local->architecture & (GE2_RGMII_AN | GE2_SGMII_AN)) {
+ /* setup PHY address for auto polling (Start Addr). */
+ /*avoid end phy address = 0 */
+ reg_value |= ((mac_to_gigaphy_mode_addr2 - 1) & 0x1f);
+ /* setup PHY address for auto polling (End Addr). */
+ reg_value |= (mac_to_gigaphy_mode_addr2 << 8);
+ } else if (ei_local->architecture & (GE1_RGMII_AN | GE1_SGMII_AN | LEOPARD_EPHY)) {
+ /* setup PHY address for auto polling (Start Addr). */
+ reg_value |= (mac_to_gigaphy_mode_addr << 0);
+ /* setup PHY address for auto polling (End Addr). */
+ reg_value |= ((mac_to_gigaphy_mode_addr + 1) << 8);
+ }
+
+ sys_reg_write(ESW_PHY_POLLING, reg_value);
+}
+
+void ra2880stop(struct END_DEVICE *ei_local)
+{
+ unsigned int reg_value;
+
+ pr_info("ra2880stop()...");
+
+ reg_value = sys_reg_read(DMA_GLO_CFG);
+ reg_value &= ~(TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
+ sys_reg_write(DMA_GLO_CFG, reg_value);
+
+ pr_info("Done\n");
+}
+
+void set_mac_address(unsigned char p[6])
+{
+ unsigned long reg_value;
+
+ reg_value = (p[0] << 8) | (p[1]);
+ sys_reg_write(GDMA1_MAC_ADRH, reg_value);
+
+ reg_value = (unsigned long)((p[2] << 24) | (p[3] << 16) | (p[4] << 8) | p[5]);
+ sys_reg_write(GDMA1_MAC_ADRL, reg_value);
+}
+
+void set_mac2_address(unsigned char p[6])
+{
+ unsigned long reg_value;
+
+ reg_value = (p[0] << 8) | (p[1]);
+ sys_reg_write(GDMA2_MAC_ADRH, reg_value);
+
+ reg_value = (unsigned long)((p[2] << 24) | (p[3] << 16) | (p[4] << 8) | p[5]);
+ sys_reg_write(GDMA2_MAC_ADRL, reg_value);
+}
+
+static int getnext(const char *src, int separator, char *dest)
+{
+ char *c;
+ int len;
+
+ if (!src || !dest)
+ return -1;
+
+ c = strchr(src, separator);
+ if (!c) {
+ strcpy(dest, src);
+ return -1;
+ }
+ len = c - src;
+ strncpy(dest, src, len);
+ dest[len] = '\0';
+ return len + 1;
+}
+
+int str_to_ip(unsigned int *ip, const char *str)
+{
+ int len;
+ const char *ptr = str;
+ char buf[128];
+ unsigned char c[4];
+ int i;
+ int ret;
+
+ for (i = 0; i < 3; ++i) {
+ len = getnext(ptr, '.', buf);
+ if (len == -1)
+ return 1; /* parse error */
+
+ ret = kstrtoul(buf, 10, (unsigned long *)&c[i]);
+ if (ret)
+ return ret;
+
+ ptr += len;
+ }
+ ret = kstrtoul(ptr, 0, (unsigned long *)&c[3]);
+ if (ret)
+ return ret;
+
+ *ip = (c[0] << 24) + (c[1] << 16) + (c[2] << 8) + c[3];
+
+ return 0;
+}
+
+void set_ge1_force_1000(void)
+{
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x100, 0x2105e33b);
+}
+
+void set_ge2_force_1000(void)
+{
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x2105e33b);
+}
+
+void set_ge1_an(void)
+{
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x100, 0x21056300);
+}
+
+void set_ge2_an(void)
+{
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x21056300);
+}
+
+void set_ge2_gmii(void)
+{
+ void __iomem *virt_addr;
+ unsigned int reg_value;
+
+ virt_addr = ioremap(ETHSYS_BASE, 0x20);
+ reg_value = sys_reg_read(virt_addr + 0x14);
+ /*[15:14] =0 RGMII, [8] = 0 SGMII disable*/
+ reg_value = reg_value & (~0xc100);
+ reg_value = reg_value | 0x4000;
+ sys_reg_write(virt_addr + 0x14, reg_value);
+ iounmap(virt_addr);
+}
+
+void set_ge0_gmii(void)
+{
+ void __iomem *virt_addr;
+ unsigned int reg_value;
+
+ virt_addr = ioremap(ETHSYS_BASE, 0x20);
+ reg_value = sys_reg_read(virt_addr + 0x14);
+ /*[15:14] =0 RGMII, [8] = 0 SGMII disable*/
+ reg_value = reg_value & (~0xc000);
+ reg_value = reg_value | 0x400;
+ sys_reg_write(virt_addr + 0x14, reg_value);
+ iounmap(virt_addr);
+}
+
+void set_ge2_force_link_down(void)
+{
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x2105e300);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.h
new file mode 100644
index 0000000..c329703
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_mac.h
@@ -0,0 +1,30 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_MAC_H
+#define RA_MAC_H
+
+void ra2880stop(struct END_DEVICE *ei_local);
+void set_mac_address(unsigned char p[6]);
+void set_mac2_address(unsigned char p[6]);
+int str_to_ip(unsigned int *ip, const char *str);
+void enable_auto_negotiate(struct END_DEVICE *ei_local);
+void set_ge1_force_1000(void);
+void set_ge2_force_1000(void);
+void set_ge1_an(void);
+void set_ge2_an(void);
+void set_ge2_gmii(void);
+void set_ge0_gmii(void);
+void set_ge2_force_link_down(void);
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.c
new file mode 100644
index 0000000..f677a8c
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.c
@@ -0,0 +1,4249 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_switch.h"
+#include "ra_mac.h"
+#include "raeth_reg.h"
+
+#define MT7622_CHIP_ID 0x08000008
+
+void reg_bit_zero(void __iomem *addr, unsigned int bit, unsigned int len)
+{
+ int reg_val;
+ int i;
+
+ reg_val = sys_reg_read(addr);
+ for (i = 0; i < len; i++)
+ reg_val &= ~(1 << (bit + i));
+ sys_reg_write(addr, reg_val);
+}
+
+void reg_bit_one(void __iomem *addr, unsigned int bit, unsigned int len)
+{
+ unsigned int reg_val;
+ unsigned int i;
+
+ reg_val = sys_reg_read(addr);
+ for (i = 0; i < len; i++)
+ reg_val |= 1 << (bit + i);
+ sys_reg_write(addr, reg_val);
+}
+
+u8 fe_cal_flag;
+u8 fe_cal_flag_mdix;
+u8 fe_cal_tx_offset_flag;
+u8 fe_cal_tx_offset_flag_mdix;
+u8 fe_cal_r50_flag;
+u8 fe_cal_vbg_flag;
+u32 iext_cal_result;
+u32 r50_p0_cal_result;
+u8 ge_cal_r50_flag;
+u8 ge_cal_tx_offset_flag;
+u8 ge_cal_flag;
+int show_time;
+static u8 ephy_addr_base;
+
+/* 50ohm_new*/
+const u8 ZCAL_TO_R50OHM_TBL_100[64] = {
+ 127, 121, 116, 115, 111, 109, 108, 104,
+ 102, 99, 97, 96, 77, 76, 73, 72,
+ 70, 69, 67, 66, 47, 46, 45, 43,
+ 42, 41, 40, 38, 37, 36, 35, 34,
+ 32, 16, 15, 14, 13, 12, 11, 10,
+ 9, 8, 7, 6, 6, 5, 4, 4,
+ 3, 2, 2, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+const u8 ZCAL_TO_R50ohm_GE_TBL_100[64] = {
+ 63, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63, 63, 63, 63, 60,
+ 57, 55, 53, 51, 48, 46, 44, 42,
+ 40, 38, 37, 36, 34, 32, 30, 28,
+ 27, 26, 25, 23, 22, 21, 19, 18,
+ 16, 15, 14, 13, 12, 11, 10, 9,
+ 8, 7, 6, 5, 4, 4, 3, 2,
+ 1, 0, 0, 0, 0, 0, 0, 0
+};
+
+const u8 ZCAL_TO_R50ohm_GE_TBL[64] = {
+ 63, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63, 63, 63, 63, 60,
+ 57, 55, 53, 51, 48, 46, 44, 42,
+ 40, 38, 37, 36, 34, 32, 30, 28,
+ 27, 26, 25, 23, 22, 21, 19, 18,
+ 16, 15, 14, 13, 12, 11, 10, 9,
+ 8, 7, 6, 5, 4, 4, 3, 2,
+ 1, 0, 0, 0, 0, 0, 0, 0
+};
+
+const u8 ZCAL_TO_REXT_TBL[64] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7
+};
+
+const u8 ZCAL_TO_FILTER_TBL[64] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 2, 2, 2, 3, 3, 3, 4,
+ 4, 4, 4, 5, 5, 5, 6, 6,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12
+};
+
+void tc_phy_write_g_reg(u8 port_num, u8 page_num,
+ u8 reg_num, u32 reg_data)
+{
+ u32 r31 = 0;
+
+ r31 |= 0 << 15; /* global */
+ r31 |= ((page_num & 0x7) << 12); /* page no */
+ mii_mgr_write(port_num, 31, r31); /* change Global page */
+ mii_mgr_write(port_num, reg_num, reg_data);
+}
+
+void tc_phy_write_l_reg(u8 port_no, u8 page_no,
+ u8 reg_num, u32 reg_data)
+{
+ u32 r31 = 0;
+
+ r31 |= 1 << 15; /* local */
+ r31 |= ((page_no & 0x7) << 12); /* page no */
+ mii_mgr_write(port_no, 31, r31); /* select local page x */
+ mii_mgr_write(port_no, reg_num, reg_data);
+}
+
+u32 tc_phy_read_g_reg(u8 port_num, u8 page_num, u8 reg_num)
+{
+ u32 phy_val;
+
+ u32 r31 = 0;
+
+ r31 |= 0 << 15; /* global */
+ r31 |= ((page_num & 0x7) << 12); /* page no */
+ mii_mgr_write(port_num, 31, r31); /* change Global page */
+ mii_mgr_read(port_num, reg_num, &phy_val);
+ return phy_val;
+}
+
+u32 tc_phy_read_l_reg(u8 port_no, u8 page_no, u8 reg_num)
+{
+ u32 phy_val;
+ u32 r31 = 0;
+
+ r31 |= 1 << 15; /* local */
+ r31 |= ((page_no & 0x7) << 12); /* page no */
+ mii_mgr_write(port_no, 31, r31); /* select local page x */
+ mii_mgr_read(port_no, reg_num, &phy_val);
+ return phy_val;
+}
+
+u32 tc_phy_read_dev_reg(u32 port_num, u32 dev_addr, u32 reg_addr)
+{
+ u32 phy_val;
+
+ mii_mgr_read_cl45(port_num, dev_addr, reg_addr, &phy_val);
+ return phy_val;
+}
+
+void tc_phy_write_dev_reg(u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
+{
+ mii_mgr_write_cl45(port_num, dev_addr, reg_addr, write_data);
+}
+
+u32 tc_mii_read(u32 phy_addr, u32 phy_register)
+{
+ u32 phy_val;
+
+ mii_mgr_read(phy_addr, phy_register, &phy_val);
+ return phy_val;
+}
+
+void tc_mii_write(u32 phy_addr, u32 phy_register, u32 write_data)
+{
+ mii_mgr_write(phy_addr, phy_register, write_data);
+}
+
+void clear_ckinv_ana_txvos(void)
+{
+ u16 g7r24_tmp;
+ /*clear RG_CAL_CKINV/RG_ANA_CALEN/RG_TXVOS_CALEN*/
+ /*g7r24[13]:0x0, RG_ANA_CALEN_P0*/
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x2000)));
+
+ /*g7r24[14]:0x0, RG_CAL_CKINV_P0*/
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x4000)));
+
+ /*g7r24[12]:0x0, DA_TXVOS_CALEN_P0*/
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x1000)));
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0);
+}
+
+u8 all_fe_ana_cal_wait_txamp(u32 delay, u8 port_num)
+{ /* for EN7512 FE // allen_20160616 */
+ u8 all_ana_cal_status;
+ u16 cnt, g7r24_temp;
+
+ tc_phy_write_l_reg(FE_CAL_COMMON, 4, 23, (0x0000));
+ g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+ g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp | 0x10);
+
+ cnt = 1000;
+ do {
+ udelay(delay);
+ cnt--;
+ all_ana_cal_status =
+ ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) >> 1) & 0x1);
+ } while ((all_ana_cal_status == 0) && (cnt != 0));
+
+ tc_phy_write_l_reg(FE_CAL_COMMON, 4, 23, (0x0000));
+ tc_phy_write_l_reg(port_num, 4, 23, (0x0000));
+ g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+ return all_ana_cal_status;
+}
+
+u8 all_fe_ana_cal_wait(u32 delay, u8 port_num)
+{
+ u8 all_ana_cal_status;
+ u16 cnt, g7r24_temp;
+
+ tc_phy_write_l_reg(FE_CAL_COMMON, 4, 23, (0x0000));
+ tc_phy_write_l_reg(port_num, 4, 23, (0x0000));
+
+ g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+ g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp | 0x10);
+ cnt = 1000;
+ do {
+ udelay(delay);
+ cnt--;
+ all_ana_cal_status =
+ ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) >> 1) & 0x1);
+
+ } while ((all_ana_cal_status == 0) && (cnt != 0));
+
+ tc_phy_write_l_reg(FE_CAL_COMMON, 4, 23, (0x0000));
+ tc_phy_write_l_reg(port_num, 4, 23, (0x0000));
+ g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+
+ return all_ana_cal_status;
+}
+
+void fe_cal_tx_amp(u8 port_num, u32 delay)
+{
+ u8 all_ana_cal_status;
+ int ad_cal_comp_out_init;
+ u16 l3r25_temp, l0r26_temp, l2r20_temp;
+ u16 l2r23_temp = 0;
+ int calibration_polarity;
+ u8 tx_amp_reg_shift = 0;
+ int tx_amp_temp = 0, cnt = 0, phyaddr, tx_amp_cnt = 0;
+ u16 tx_amp_final;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ phyaddr = port_num + ephy_addr_base;
+ tx_amp_temp = 0x20;
+ /* *** Tx Amp Cal start ********************** */
+
+/*Set device in 100M mode*/
+ tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+/*TXG output DC differential 1V*/
+ tc_phy_write_g_reg(port_num, 2, 25, 0x10c0);
+
+ tc_phy_write_g_reg(port_num, 1, 26, (0x8000 | DAC_IN_2V));
+ tc_phy_write_g_reg(port_num, 4, 21, (0x0800)); /* set default */
+ tc_phy_write_l_reg(port_num, 0, 30, (0x02c0));
+ tc_phy_write_l_reg(port_num, 4, 21, (0x0000));
+
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (0xc800));
+ tc_phy_write_l_reg(port_num, 3, 25, (0xc800));
+
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x7000);
+
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x400));
+
+ /*decide which port calibration RG_ZCALEN by port_num*/
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ l3r25_temp = l3r25_temp | 0x1000;
+ l3r25_temp = l3r25_temp & ~(0x200);
+ tc_phy_write_l_reg(port_num, 3, 25, l3r25_temp);
+
+ /*DA_PGA_MDIX_STASTUS_P0=0(L0R26[15:14] = 0x01*/
+ l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+ l0r26_temp = l0r26_temp & (~0xc000);
+ tc_phy_write_l_reg(port_num, 0, 26, 0x5203);/* Kant */
+
+ /*RG_RX2TX_EN_P0=0(L2R20[10] =0),*/
+ l2r20_temp = tc_phy_read_l_reg(port_num, 2, 20);
+ l2r20_temp = l2r20_temp & (~0x400);
+ tc_phy_write_l_reg(port_num, 2, 20, l2r20_temp);
+ tc_phy_write_l_reg(port_num, 2, 23, (tx_amp_temp));
+
+ all_ana_cal_status = all_fe_ana_cal_wait_txamp(delay, port_num);
+
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE Tx amp AnaCal ERROR! (init) \r\n");
+ }
+
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+ if (ad_cal_comp_out_init == 1)
+ calibration_polarity = -1;
+ else
+ calibration_polarity = 1;
+
+ tx_amp_temp += calibration_polarity;
+ cnt = 0;
+ tx_amp_cnt = 0;
+ while (all_ana_cal_status < ANACAL_ERROR) {
+ tc_phy_write_l_reg(port_num, 2, 23, (tx_amp_temp));
+ l2r23_temp = tc_phy_read_l_reg(port_num, 2, 23);
+ cnt++;
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ all_ana_cal_status = all_fe_ana_cal_wait_txamp(delay, port_num);
+
+ if (((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24)) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ fe_cal_flag = 1;
+ }
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE Tx amp AnaCal ERROR! (%d) \r\n", cnt);
+ } else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+ ad_cal_comp_out_init) {
+ tx_amp_cnt++;
+ all_ana_cal_status = ANACAL_FINISH;
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ ad_cal_comp_out_init =
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+ } else {
+ if ((l2r23_temp == 0x3f) || (l2r23_temp == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION;
+ pr_info
+ (" Tx amp Cal Saturation(%d)(%x)(%x)\r\n",
+ cnt, tc_phy_read_l_reg(0, 3, 25),
+ tc_phy_read_l_reg(1, 3, 25));
+ pr_info
+ (" Tx amp Cal Saturation(%x)(%x)(%x)\r\n",
+ tc_phy_read_l_reg(2, 3, 25),
+ tc_phy_read_l_reg(3, 3, 25),
+ tc_phy_read_l_reg(0, 2, 30));
+ /* tx_amp_temp += calibration_polarity; */
+ } else {
+ tx_amp_temp += calibration_polarity;
+ }
+ }
+ }
+
+ if ((all_ana_cal_status == ANACAL_ERROR) ||
+ (all_ana_cal_status == ANACAL_SATURATION)) {
+ l2r23_temp = tc_phy_read_l_reg(port_num, 2, 23);
+ tc_phy_write_l_reg(port_num, 2, 23,
+ ((tx_amp_temp << tx_amp_reg_shift)));
+ l2r23_temp = tc_phy_read_l_reg(port_num, 2, 23);
+ pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+ } else {
+ if (ei_local->chip_name == MT7622_FE) {
+ if (port_num == 0)
+ l2r23_temp = l2r23_temp + 10;
+ else if (port_num == 1)
+ l2r23_temp = l2r23_temp + 11;
+ else if (port_num == 2)
+ l2r23_temp = l2r23_temp + 10;
+ else if (port_num == 3)
+ l2r23_temp = l2r23_temp + 9;
+ else if (port_num == 4)
+ l2r23_temp = l2r23_temp + 10;
+ } else if (ei_local->chip_name == LEOPARD_FE) {
+ if (port_num == 1)
+ l2r23_temp = l2r23_temp + 3;
+ else if (port_num == 2)
+ l2r23_temp = l2r23_temp + 3;
+ else if (port_num == 3)
+ l2r23_temp = l2r23_temp + 3 - 2;
+ else if (port_num == 4)
+ l2r23_temp = l2r23_temp + 2 - 1 + 2;
+ }
+
+ tc_phy_write_l_reg(port_num, 2, 23, ((l2r23_temp) << tx_amp_reg_shift));
+ fe_cal_flag = 1;
+ }
+
+ tx_amp_final = tc_phy_read_l_reg(port_num, 2, 23) & 0x3f;
+ tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15) << 8) | 0x20);
+
+ if (ei_local->chip_name == LEOPARD_FE) {
+ if (port_num == 1)
+ tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15 - 4) << 8) | 0x20);
+ else if (port_num == 2)
+ tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15 + 2) << 8) | 0x20);
+ else if (port_num == 3)
+ tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15 + 4) << 8) | 0x20);
+ else if (port_num == 4)
+ tc_phy_write_l_reg(port_num, 2, 24, ((tx_amp_final + 15 + 4) << 8) | 0x20);
+ }
+
+ pr_info("[%d] - tx_amp_final = 0x%x\n", port_num, tx_amp_final);
+
+ /*clear RG_CAL_CKINV/RG_ANA_CALEN/RG_TXVOS_CALEN*/
+ clear_ckinv_ana_txvos();
+
+ tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+ tc_phy_write_g_reg(port_num, 1, 26, 0);
+ /* *** Tx Amp Cal end *** */
+}
+
+void fe_cal_tx_amp_mdix(u8 port_num, u32 delay)
+{
+ u8 all_ana_cal_status;
+ int ad_cal_comp_out_init;
+ u16 l3r25_temp, l4r26_temp, l0r26_temp;
+ u16 l2r20_temp, l4r26_temp_amp;
+ int calibration_polarity;
+ int tx_amp_temp = 0, cnt = 0, phyaddr, tx_amp_cnt = 0;
+ u16 tx_amp_mdix_final;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ phyaddr = port_num + ephy_addr_base;
+ tx_amp_temp = 0x20;
+/*Set device in 100M mode*/
+ tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+/*TXG output DC differential 0V*/
+ tc_phy_write_g_reg(port_num, 2, 25, 0x10c0);
+
+ tc_phy_write_g_reg(port_num, 1, 26, (0x8000 | DAC_IN_2V));
+ tc_phy_write_g_reg(port_num, 4, 21, (0x0800)); /* set default */
+ tc_phy_write_l_reg(port_num, 0, 30, (0x02c0));/*0x3f80 // l0r30[9], [7], [6], [1]*/
+ tc_phy_write_l_reg(port_num, 4, 21, (0x0000));
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (0xc800));
+ tc_phy_write_l_reg(port_num, 3, 25, (0xc800)); /* 0xca00 */
+ /* *** Tx Amp Cal start ********************** */
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x7000);
+ /* pr_info(" g7r24[%d] = %x\n", port_num, tc_phy_read_g_reg(port_num, 7, 24)); */
+
+ /*RG_TXG_CALEN =1 l3r25[10]by port number*/
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x400));
+ /*decide which port calibration RG_ZCALEN l3r25[12] by port_num*/
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ l3r25_temp = l3r25_temp | 0x1000;
+ l3r25_temp = l3r25_temp & ~(0x200);
+ tc_phy_write_l_reg(port_num, 3, 25, l3r25_temp);
+
+ /*DA_PGA_MDIX_STASTUS_P0=0(L0R26[15:14] = 0x10) & RG_RX2TX_EN_P0=0(L2R20[10] =1),*/
+ l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+ l0r26_temp = l0r26_temp & (~0xc000);
+ tc_phy_write_l_reg(port_num, 0, 26, 0x9203); /* Kant */
+ l2r20_temp = tc_phy_read_l_reg(port_num, 2, 20);
+ l2r20_temp = l2r20_temp | 0x400;
+ tc_phy_write_l_reg(port_num, 2, 20, l2r20_temp);
+
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x0400));
+/*DA_TX_I2MPB_MDIX L4R26[5:0]*/
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ /* pr_info("111l4r26 =%x\n", tc_phy_read_l_reg(port_num, 4, 26)); */
+ l4r26_temp = l4r26_temp & (~0x3f);
+ tc_phy_write_l_reg(port_num, 4, 26, (l4r26_temp | tx_amp_temp));
+ /* pr_info("222l4r26 =%x\n", tc_phy_read_l_reg(port_num, 4, 26)); */
+ all_ana_cal_status = all_fe_ana_cal_wait_txamp(delay, port_num);
+
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE Tx amp mdix AnaCal ERROR! (init) \r\n");
+ }
+
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ /*ad_cal_comp_out_init = (tc_phy_read_l_reg(FE_CAL_COMMON, 4, 23) >> 4) & 0x1;*/
+ ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+ /* pr_info("mdix ad_cal_comp_out_init = %d\n", ad_cal_comp_out_init); */
+ if (ad_cal_comp_out_init == 1) {
+ calibration_polarity = -1;
+ /* tx_amp_temp = 0x10; */
+ } else {
+ calibration_polarity = 1;
+ }
+ tx_amp_temp += calibration_polarity;
+ cnt = 0;
+ tx_amp_cnt = 0;
+ while (all_ana_cal_status < ANACAL_ERROR) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp & (~0x3f);
+ tc_phy_write_l_reg(port_num, 4, 26, (l4r26_temp | tx_amp_temp));
+ l4r26_temp = (tc_phy_read_l_reg(port_num, 4, 26));
+ l4r26_temp_amp = (tc_phy_read_l_reg(port_num, 4, 26)) & 0x3f;
+ cnt++;
+
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ all_ana_cal_status = all_fe_ana_cal_wait_txamp(delay, port_num);
+
+ if (((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24)) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ fe_cal_flag_mdix = 1;
+ }
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE Tx amp mdix AnaCal ERROR! (%d) \r\n", cnt);
+ } else if (((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24)) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ ad_cal_comp_out_init =
+ (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24)) & 0x1;
+ } else {
+ if ((l4r26_temp_amp == 0x3f) || (l4r26_temp_amp == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION;
+ pr_info
+ (" Tx amp Cal mdix Saturation(%d)(%x)(%x)\r\n",
+ cnt, tc_phy_read_l_reg(0, 3, 25),
+ tc_phy_read_l_reg(1, 3, 25));
+ pr_info
+ (" Tx amp Cal mdix Saturation(%x)(%x)(%x)\r\n",
+ tc_phy_read_l_reg(2, 3, 25),
+ tc_phy_read_l_reg(3, 3, 25),
+ tc_phy_read_l_reg(0, 2, 30));
+ /* tx_amp_temp += calibration_polarity; */
+ } else {
+ tx_amp_temp += calibration_polarity;
+ }
+ }
+ }
+
+ if ((all_ana_cal_status == ANACAL_ERROR) ||
+ (all_ana_cal_status == ANACAL_SATURATION)) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ pr_info(" FE-%d Tx amp AnaCal mdix Saturation! (%d)(l4r26=0x%x) \r\n",
+ phyaddr, cnt, l4r26_temp);
+ tc_phy_write_l_reg(port_num, 4, 26,
+ ((l4r26_temp & (~0x3f)) | tx_amp_temp));
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ pr_info(" FE-%d Tx amp AnaCal mdix Saturation! (%d)(l4r26=0x%x) \r\n",
+ phyaddr, cnt, l4r26_temp);
+ pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+ } else {
+ if (ei_local->chip_name == MT7622_FE) {
+ if (port_num == 0) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp + 10;
+ } else if (port_num == 1) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp + 11;
+ } else if (port_num == 2) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp + 9;
+ } else if (port_num == 3) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp + 9;
+ } else if (port_num == 4) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp + 9;
+ }
+ } else if (ei_local->chip_name == LEOPARD_FE) {
+ if (port_num == 1) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp + 4 - 2;
+ } else if (port_num == 2) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp + 3 - 1;
+ } else if (port_num == 3) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp + 4 - 3;
+ } else if (port_num == 4) {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ l4r26_temp = l4r26_temp + 4 - 2 + 1;
+ }
+ }
+ tc_phy_write_l_reg(port_num, 4, 26, l4r26_temp);
+ fe_cal_flag_mdix = 1;
+ }
+
+ tx_amp_mdix_final = tc_phy_read_l_reg(port_num, 4, 26) & 0x3f;
+ tc_phy_write_l_reg(port_num, 4, 27, ((tx_amp_mdix_final + 15) << 8) | 0x20);
+ if (ei_local->chip_name == LEOPARD_FE) {
+ if (port_num == 2)
+ tc_phy_write_l_reg(port_num, 4, 27,
+ ((tx_amp_mdix_final + 15 + 1) << 8) | 0x20);
+ else if (port_num == 3)
+ tc_phy_write_l_reg(port_num, 4, 27,
+ ((tx_amp_mdix_final + 15 + 4) << 8) | 0x20);
+ else if (port_num == 4)
+ tc_phy_write_l_reg(port_num, 4, 27,
+ ((tx_amp_mdix_final + 15 + 4) << 8) | 0x20);
+ }
+ pr_info("[%d] - tx_amp_mdix_final = 0x%x\n", port_num, tx_amp_mdix_final);
+
+ clear_ckinv_ana_txvos();
+ tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+ tc_phy_write_g_reg(port_num, 1, 26, 0);
+ /* *** Tx Amp Cal end *** */
+}
+
+void fe_cal_tx_offset(u8 port_num, u32 delay)
+{
+ u8 all_ana_cal_status;
+ int ad_cal_comp_out_init;
+ u16 l3r25_temp, l2r20_temp;
+ u16 g4r21_temp, l0r30_temp, l4r17_temp, l0r26_temp;
+ int calibration_polarity, tx_offset_temp;
+ int cal_temp = 0;
+ u8 tx_offset_reg_shift;
+ u8 cnt = 0, phyaddr, tx_amp_cnt = 0;
+ u16 tx_offset_final;
+
+ phyaddr = port_num + ephy_addr_base;
+/*Set device in 100M mode*/
+ tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+
+ /*// g4r21[11]:Hw bypass tx offset cal, Fw cal*/
+ g4r21_temp = tc_phy_read_g_reg(port_num, 4, 21);
+ tc_phy_write_g_reg(port_num, 4, 21, (g4r21_temp | 0x0800));
+
+ /*l0r30[9], [7], [6], [1]*/
+ l0r30_temp = tc_phy_read_l_reg(port_num, 0, 30);
+ tc_phy_write_l_reg(port_num, 0, 30, (l0r30_temp | 0x02c0));
+
+ /* tx_offset_temp = TX_AMP_OFFSET_0MV; */
+ tx_offset_temp = 0x20;
+ tx_offset_reg_shift = 8;
+ tc_phy_write_g_reg(port_num, 1, 26, (0x8000 | DAC_IN_0V));
+
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x3000);
+ /* pr_info(" g7r24[%d] = %x\n", port_num, tc_phy_read_g_reg(port_num, 7, 24)); */
+ /*RG_TXG_CALEN =1 by port number*/
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x400));
+ /*decide which port calibration RG_ZCALEN by port_num*/
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x1000));
+
+ /*DA_PGA_MDIX_STASTUS_P0=0(L0R26[15:14] = 0x01) & RG_RX2TX_EN_P0=0(L2R20[10] =0),*/
+ l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+ l0r26_temp = l0r26_temp & (~0xc000);
+ /* tc_phy_write_l_reg(port_num, 0, 26, (l0r26_temp | 0x4000)); */
+ tc_phy_write_l_reg(port_num, 0, 26, 0x5203);/* Kant */
+ /* pr_info("l0r26[%d] = %x\n", port_num, tc_phy_read_l_reg(port_num, 0, 26)); */
+ l2r20_temp = tc_phy_read_l_reg(port_num, 2, 20);
+ l2r20_temp = l2r20_temp & (~0x400);
+ tc_phy_write_l_reg(port_num, 2, 20, l2r20_temp);
+ /* pr_info("l2r20[%d] = %x\n", port_num, tc_phy_read_l_reg(port_num, 2, 20)); */
+
+ tc_phy_write_l_reg(port_num, 4, 17, (0x0000));
+ l4r17_temp = tc_phy_read_l_reg(port_num, 4, 17);
+ tc_phy_write_l_reg(port_num, 4, 17,
+ l4r17_temp |
+ (tx_offset_temp << tx_offset_reg_shift));
+/*wat AD_CAL_CLK = 1*/
+ all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE Tx offset AnaCal ERROR! (init) \r\n");
+ }
+
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+/*GET AD_CAL_COMP_OUT g724[0]*/
+ /*ad_cal_comp_out_init = (tc_phy_read_l_reg(FE_CAL_COMMON, 4, 23) >> 4) & 0x1;*/
+ ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+ if (ad_cal_comp_out_init == 1)
+ calibration_polarity = -1;
+ else
+ calibration_polarity = 1;
+ cnt = 0;
+ tx_amp_cnt = 0;
+ tx_offset_temp += calibration_polarity;
+
+ while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+ cnt++;
+ cal_temp = tx_offset_temp;
+ tc_phy_write_l_reg(port_num, 4, 17,
+ (cal_temp << tx_offset_reg_shift));
+
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE Tx offset AnaCal ERROR! (%d) \r\n", cnt);
+ } else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+
+ ad_cal_comp_out_init =
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+ } else {
+ l4r17_temp = tc_phy_read_l_reg(port_num, 4, 17);
+
+ if ((tx_offset_temp == 0x3f) || (tx_offset_temp == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION;
+ pr_info("tx offset ANACAL_SATURATION\n");
+ } else {
+ tx_offset_temp += calibration_polarity;
+ }
+ }
+ }
+
+ if ((all_ana_cal_status == ANACAL_ERROR) ||
+ (all_ana_cal_status == ANACAL_SATURATION)) {
+ tx_offset_temp = TX_AMP_OFFSET_0MV;
+ l4r17_temp = tc_phy_read_l_reg(port_num, 4, 17);
+ tc_phy_write_l_reg(port_num, 4, 17,
+ (l4r17_temp |
+ (tx_offset_temp << tx_offset_reg_shift)));
+ pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+ } else {
+ fe_cal_tx_offset_flag = 1;
+ }
+ tx_offset_final = (tc_phy_read_l_reg(port_num, 4, 17) & 0x3f00) >> 8;
+ pr_info("[%d] - tx_offset_final = 0x%x\n", port_num, tx_offset_final);
+
+ clear_ckinv_ana_txvos();
+ tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+ tc_phy_write_g_reg(port_num, 1, 26, 0);
+}
+
+void fe_cal_tx_offset_mdix(u8 port_num, u32 delay)
+{ /* for MT7622 */
+ u8 all_ana_cal_status;
+ int ad_cal_comp_out_init;
+ u16 l3r25_temp, l2r20_temp, l4r26_temp;
+ u16 g4r21_temp, l0r30_temp, l0r26_temp;
+ int calibration_polarity, tx_offset_temp;
+ int cal_temp = 0;
+ u8 tx_offset_reg_shift;
+ u8 cnt = 0, phyaddr;
+ u16 tx_offset_final_mdix;
+
+ phyaddr = port_num + ephy_addr_base;
+/*Set device in 100M mode*/
+ tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+
+ /*// g4r21[11]:Hw bypass tx offset cal, Fw cal*/
+ g4r21_temp = tc_phy_read_g_reg(port_num, 4, 21);
+ tc_phy_write_g_reg(port_num, 4, 21, (g4r21_temp | 0x0800));
+
+ /*l0r30[9], [7], [6], [1]*/
+ l0r30_temp = tc_phy_read_l_reg(port_num, 0, 30);
+ tc_phy_write_l_reg(port_num, 0, 30, (l0r30_temp | 0x02c0));
+
+ tx_offset_temp = 0x20;
+ tx_offset_reg_shift = 8;
+ tc_phy_write_g_reg(port_num, 1, 26, (0x8000 | DAC_IN_0V));
+
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x3000);
+
+ /*RG_TXG_CALEN =1 by port number*/
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x400));
+
+ /*decide which port calibration RG_ZCALEN by port_num*/
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x1000));
+
+ /*DA_PGA_MDIX_STASTUS_P0=0(L0R26[15:14] = 0x10) & RG_RX2TX_EN_P0=1(L2R20[10] =1),*/
+ l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+ l0r26_temp = l0r26_temp & (~0xc000);
+ tc_phy_write_l_reg(port_num, 0, 26, 0x9203); /* Kant */
+ l2r20_temp = tc_phy_read_l_reg(port_num, 2, 20);
+ l2r20_temp = l2r20_temp | 0x400;
+ tc_phy_write_l_reg(port_num, 2, 20, l2r20_temp);
+
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ tc_phy_write_l_reg(port_num, 4, 26, l4r26_temp & (~0x3f00));
+ tc_phy_write_l_reg(port_num, 4, 26,
+ (l4r26_temp & ~0x3f00) | (cal_temp << tx_offset_reg_shift));
+
+ all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE Tx offset mdix AnaCal ERROR! (init) \r\n");
+ }
+
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+
+ ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+ if (ad_cal_comp_out_init == 1)
+ calibration_polarity = -1;
+ else
+ calibration_polarity = 1;
+
+ cnt = 0;
+ tx_offset_temp += calibration_polarity;
+ while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+ cnt++;
+ cal_temp = tx_offset_temp;
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ tc_phy_write_l_reg(port_num, 4, 26,
+ (l4r26_temp & ~0x3f00) | (cal_temp << tx_offset_reg_shift));
+
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE Tx offset mdix AnaCal ERROR! (%d) \r\n", cnt);
+ } else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ ad_cal_comp_out_init =
+ tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+ } else {
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+
+ if ((tx_offset_temp == 0x3f) || (tx_offset_temp == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION;
+ pr_info("tx offset ANACAL_SATURATION\n");
+ } else {
+ tx_offset_temp += calibration_polarity;
+ }
+ }
+ }
+
+ if ((all_ana_cal_status == ANACAL_ERROR) ||
+ (all_ana_cal_status == ANACAL_SATURATION)) {
+ tx_offset_temp = TX_AMP_OFFSET_0MV;
+ l4r26_temp = tc_phy_read_l_reg(port_num, 4, 26);
+ tc_phy_write_l_reg(port_num, 4, 26,
+ (l4r26_temp & (~0x3f00)) | (cal_temp << tx_offset_reg_shift));
+ pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+ } else {
+ fe_cal_tx_offset_flag_mdix = 1;
+ }
+ tx_offset_final_mdix = (tc_phy_read_l_reg(port_num, 4, 26) & 0x3f00) >> 8;
+ pr_info("[%d] - tx_offset_final_mdix = 0x%x\n", port_num, tx_offset_final_mdix);
+
+ clear_ckinv_ana_txvos();
+ tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+ tc_phy_write_g_reg(port_num, 1, 26, 0);
+}
+
+void set_r50_leopard(u8 port_num, u32 r50_cal_result)
+{
+ int rg_zcal_ctrl_tx, rg_zcal_ctrl_rx;
+ u16 l4r22_temp;
+
+ rg_zcal_ctrl_rx = 0;
+ rg_zcal_ctrl_tx = 0;
+ pr_info("r50_cal_result = 0x%x\n", r50_cal_result);
+ if (port_num == 0) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)];
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)];
+ }
+ if (port_num == 1) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+ }
+ if (port_num == 2) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 6;
+ }
+ if (port_num == 3) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 5;
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 6;
+ }
+ if (port_num == 4) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result)] + 4;
+ }
+ if (rg_zcal_ctrl_tx > 0x7f)
+ rg_zcal_ctrl_tx = 0x7f;
+ if (rg_zcal_ctrl_rx > 0x7f)
+ rg_zcal_ctrl_rx = 0x7f;
+/*R50OHM_RSEL_TX= LP4R22[14:8]*/
+ tc_phy_write_l_reg(port_num, 4, 22, ((rg_zcal_ctrl_tx << 8)));
+ l4r22_temp = tc_phy_read_l_reg(port_num, 4, 22);
+/*R50OHM_RSEL_RX= LP4R22[6:0]*/
+ tc_phy_write_l_reg(port_num, 4, 22,
+ (l4r22_temp | (rg_zcal_ctrl_rx << 0)));
+ fe_cal_r50_flag = 1;
+ pr_info("[%d] - r50 final result l4r22[%d] = %x\n", port_num,
+ port_num, tc_phy_read_l_reg(port_num, 4, 22));
+}
+
+void set_r50_mt7622(u8 port_num, u32 r50_cal_result)
+{
+ int rg_zcal_ctrl_tx, rg_zcal_ctrl_rx;
+ u16 l4r22_temp;
+
+ rg_zcal_ctrl_rx = 0;
+ rg_zcal_ctrl_tx = 0;
+ pr_info("r50_cal_result = 0x%x\n", r50_cal_result);
+
+ if (port_num == 0) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 5)];
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 5)];
+ }
+ if (port_num == 1) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 3)];
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 3)];
+ }
+ if (port_num == 2) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 4)];
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 5)];
+ }
+ if (port_num == 3) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 4)];
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 3)];
+ }
+ if (port_num == 4) {
+ rg_zcal_ctrl_tx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 4)];
+ rg_zcal_ctrl_rx = ZCAL_TO_R50OHM_TBL_100[(r50_cal_result - 5)];
+ }
+/*R50OHM_RSEL_TX= LP4R22[14:8]*/
+ tc_phy_write_l_reg(port_num, 4, 22, ((rg_zcal_ctrl_tx << 8)));
+ l4r22_temp = tc_phy_read_l_reg(port_num, 4, 22);
+/*R50OHM_RSEL_RX= LP4R22[6:0]*/
+ tc_phy_write_l_reg(port_num, 4, 22,
+ (l4r22_temp | (rg_zcal_ctrl_rx << 0)));
+ fe_cal_r50_flag = 1;
+ pr_info("[%d] - r50 final result l4r22[%d] = %x\n", port_num,
+ port_num, tc_phy_read_l_reg(port_num, 4, 22));
+}
+
+void fe_ge_r50_common(u8 port_num)
+{
+ u16 l3r25_temp, g7r24_tmp, l4r23_temp;
+ u8 phyaddr;
+
+ phyaddr = port_num;
+ tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+ /*g2r25[7:5]:0x110, BG voltage output*/
+ tc_phy_write_g_reg(FE_CAL_COMMON, 2, 25, 0xf0c0);
+
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x0000);
+ /*g7r24[13]:0x01, RG_ANA_CALEN_P0=1*/
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x2000));
+ /*g7r24[14]:0x01, RG_CAL_CKINV_P0=1*/
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x4000));
+
+ /*g7r24[12]:0x01, DA_TXVOS_CALEN_P0=0*/
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x1000)));
+
+ /*DA_R50OHM_CAL_EN l4r23[0] = 0*/
+ l4r23_temp = tc_phy_read_l_reg(port_num, 4, 23);
+ l4r23_temp = l4r23_temp & ~(0x01);
+ tc_phy_write_l_reg(port_num, 4, 23, l4r23_temp);
+
+ /*RG_REXT_CALEN l2r25[13] = 0*/
+ l3r25_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 25);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (l3r25_temp & (~0x2000)));
+}
+
+void fe_cal_r50(u8 port_num, u32 delay)
+{
+ int rg_zcal_ctrl, all_ana_cal_status, rg_zcal_ctrl_tx, rg_zcal_ctrl_rx;
+ int ad_cal_comp_out_init;
+ u16 l3r25_temp, l0r4, g7r24_tmp, l4r23_temp;
+ int calibration_polarity;
+ u8 cnt = 0, phyaddr;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ phyaddr = port_num + ephy_addr_base;
+ tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+ /*g2r25[7:5]:0x110, BG voltage output*/
+ tc_phy_write_g_reg(FE_CAL_COMMON, 2, 25, 0xf0c0);
+
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x0000);
+ /*g7r24[13]:0x01, RG_ANA_CALEN_P0=1*/
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x2000));
+ /*g7r24[14]:0x01, RG_CAL_CKINV_P0=1*/
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x4000));
+
+ /*g7r24[12]:0x01, DA_TXVOS_CALEN_P0=0*/
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x1000)));
+
+ /* pr_info("g7r24 = %x\n", g7r24_tmp); */
+
+ /*DA_R50OHM_CAL_EN l4r23[0] = 1*/
+ l4r23_temp = tc_phy_read_l_reg(port_num, 4, 23);
+ tc_phy_write_l_reg(port_num, 4, 23, (l4r23_temp | (0x01)));
+
+ /*RG_REXT_CALEN l2r25[13] = 0*/
+ l3r25_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 25);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (l3r25_temp & (~0x2000)));
+
+ /*decide which port calibration RG_ZCALEN by port_num*/
+ l3r25_temp = tc_phy_read_l_reg(port_num, 3, 25);
+ tc_phy_write_l_reg(port_num, 3, 25, (l3r25_temp | 0x1000));
+
+ rg_zcal_ctrl = 0x20; /* start with 0 dB */
+ g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+
+ /*wait AD_CAL_COMP_OUT = 1*/
+ all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE R50 AnaCal ERROR! (init) \r\n");
+ }
+
+ ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+ if (ad_cal_comp_out_init == 1)
+ calibration_polarity = -1;
+ else
+ calibration_polarity = 1;
+
+ cnt = 0;
+ while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+ cnt++;
+
+ rg_zcal_ctrl += calibration_polarity;
+ g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+ all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" FE R50 AnaCal ERROR! (%d) \r\n", cnt);
+ } else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ } else {
+ if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION;
+ pr_info(" FE R50 AnaCal Saturation! (%d) \r\n",
+ cnt);
+ } else {
+ l0r4 = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ l0r4 = l0r4 & 0x1;
+ }
+ }
+ }
+ if (port_num == 0)
+ r50_p0_cal_result = rg_zcal_ctrl;
+
+ if ((all_ana_cal_status == ANACAL_ERROR) ||
+ (all_ana_cal_status == ANACAL_SATURATION)) {
+ rg_zcal_ctrl = 0x20; /* 0 dB */
+ rg_zcal_ctrl_tx = 0x7f;
+ rg_zcal_ctrl_rx = 0x7f;
+ pr_info("[%d] %s, ANACAL_SATURATION\n", port_num, __func__);
+ } else {
+ fe_cal_r50_flag = 1;
+ }
+ if (ei_local->chip_name == MT7622_FE)
+ set_r50_mt7622(port_num, rg_zcal_ctrl);
+ else if (ei_local->chip_name == LEOPARD_FE)
+ set_r50_leopard(port_num, rg_zcal_ctrl);
+
+ clear_ckinv_ana_txvos();
+ tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+}
+
+void fe_cal_vbg(u8 port_num, u32 delay)
+{
+ int rg_zcal_ctrl, all_ana_cal_status;
+ int ad_cal_comp_out_init, port_no;
+ u16 l3r25_temp, l0r4, g7r24_tmp, l3r26_temp;
+ int calibration_polarity;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ u16 g2r22_temp, rg_bg_rasel;
+ u8 cnt = 0, phyaddr;
+
+ rg_bg_rasel = 0;
+ ephy_addr_base = 0;
+ phyaddr = port_num + ephy_addr_base;
+
+ tc_phy_write_g_reg(FE_CAL_COMMON, 2, 25, 0x30c0);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 0, 25, 0x0030);
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x2000));
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp | 0x4000));
+
+ g7r24_tmp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, (g7r24_tmp & (~0x1000)));
+
+ l3r25_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 25);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, (l3r25_temp | 0x2000));
+
+ for (port_no = port_num; port_no < 5; port_no++) {
+ l3r25_temp = tc_phy_read_l_reg(port_no, 3, 25);
+ tc_phy_write_l_reg(port_no, 3, 25, (l3r25_temp & (~0x1000)));
+ }
+ rg_zcal_ctrl = 0x0; /* start with 0 dB */
+
+ g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+
+ all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" fe_cal_vbg ERROR! (init) \r\n");
+ }
+ ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+
+ if (ad_cal_comp_out_init == 1)
+ calibration_polarity = -1;
+ else
+ calibration_polarity = 1;
+
+ cnt = 0;
+ while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+ cnt++;
+ rg_zcal_ctrl += calibration_polarity;
+ g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+ all_ana_cal_status = all_fe_ana_cal_wait(delay, port_num);
+
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info("VBG ERROR(%d)status=%d\n", cnt, all_ana_cal_status);
+ } else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ } else {
+ if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION;
+ pr_info(" VBG0 AnaCal Saturation! (%d) \r\n",
+ cnt);
+ } else {
+ l0r4 = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ l0r4 = l0r4 & 0x1;
+ }
+ }
+ }
+ if ((all_ana_cal_status == ANACAL_ERROR) ||
+ (all_ana_cal_status == ANACAL_SATURATION)) {
+ rg_zcal_ctrl = 0x20; /* 0 dB */
+ } else {
+ fe_cal_vbg_flag = 1;
+ }
+
+ rg_zcal_ctrl = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (0xfc0)) >> 6;
+ iext_cal_result = rg_zcal_ctrl;
+ pr_info("iext_cal_result = 0x%x\n", iext_cal_result);
+ if (ei_local->chip_name == LEOPARD_FE)
+ rg_bg_rasel = ZCAL_TO_REXT_TBL[rg_zcal_ctrl];
+
+ l3r26_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 26);
+ l3r26_temp = l3r26_temp & (~0xfc0);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 26, l3r26_temp | ((rg_zcal_ctrl & 0x3f) << 6));
+
+ g2r22_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 2, 22);
+ g2r22_temp = g2r22_temp & (~0xe00);/*[11:9]*/
+
+ if (ei_local->chip_name == LEOPARD_FE) {
+ rg_bg_rasel = rg_bg_rasel & 0x7;
+ tc_phy_write_g_reg(FE_CAL_COMMON, 2, 22,
+ g2r22_temp | (rg_bg_rasel << 9));
+ } else if (ei_local->chip_name == MT7622_FE) {
+ rg_zcal_ctrl = rg_zcal_ctrl & 0x38;
+ tc_phy_write_g_reg(FE_CAL_COMMON, 2, 22,
+ g2r22_temp | (((rg_zcal_ctrl & 0x38) >> 3) << 9));
+ }
+ clear_ckinv_ana_txvos();
+
+ tc_phy_write_l_reg(port_num, 3, 25, 0x0000);
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, 0x0000);
+}
+
+#define CALDLY 40
+
+void do_fe_phy_all_analog_cal(u8 port_num)
+{
+ u16 l0r26_temp, l0r30_temp, l3r25_tmp;
+ u8 cnt = 0, phyaddr, i, iext_port;
+ u32 iext_s, iext_e, r50_s, r50_e, txo_s, txo_e, txa_s, txa_e;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ iext_port = 0;
+ ephy_addr_base = 0;
+ phyaddr = port_num + ephy_addr_base;
+ l0r26_temp = tc_phy_read_l_reg(port_num, 0, 26);
+ tc_phy_write_l_reg(port_num, 0, 26, 0x5600);
+ tc_phy_write_l_reg(port_num, 4, 21, 0x0000);
+ tc_phy_write_l_reg(port_num, 0, 0, 0x2100);
+
+ l0r30_temp = tc_phy_read_l_reg(port_num, 0, 30);
+
+/*eye pic.*/
+ tc_phy_write_g_reg(port_num, 5, 20, 0x0170);
+ tc_phy_write_g_reg(port_num, 5, 23, 0x0220);
+ tc_phy_write_g_reg(port_num, 5, 24, 0x0206);
+ tc_phy_write_g_reg(port_num, 5, 26, 0x0370);
+ tc_phy_write_g_reg(port_num, 5, 27, 0x02f2);
+ tc_phy_write_g_reg(port_num, 5, 29, 0x001b);
+ tc_phy_write_g_reg(port_num, 5, 30, 0x0002);
+/*Yiron default setting*/
+ for (i = port_num; i < 5; i++) {
+ tc_phy_write_g_reg(i, 3, 23, 0x0);
+ tc_phy_write_l_reg(i, 3, 23, 0x2004);
+ tc_phy_write_l_reg(i, 2, 21, 0x8551);
+ tc_phy_write_l_reg(i, 4, 17, 0x2000);
+ tc_phy_write_g_reg(i, 7, 20, 0x7c62);
+ tc_phy_write_l_reg(i, 4, 20, 0x4444);
+ tc_phy_write_l_reg(i, 2, 22, 0x1011);
+ tc_phy_write_l_reg(i, 4, 28, 0x1011);
+ tc_phy_write_l_reg(i, 4, 19, 0x2222);
+ tc_phy_write_l_reg(i, 4, 29, 0x2222);
+ tc_phy_write_l_reg(i, 2, 28, 0x3444);
+ tc_phy_write_l_reg(i, 2, 29, 0x04c6);
+ tc_phy_write_l_reg(i, 4, 30, 0x0006);
+ tc_phy_write_l_reg(i, 5, 16, 0x04c6);
+ }
+ if (ei_local->chip_name == LEOPARD_FE) {
+ tc_phy_write_l_reg(port_num, 0, 20, 0x0c0c);
+ tc_phy_write_dev_reg(0, 0x1e, 0x017d, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x017e, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x017f, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x0180, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x0181, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x0182, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x0183, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x0184, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x00db, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x00dc, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x003e, 0x0000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x00dd, 0x0000);
+
+ /*eye pic.*/
+ tc_phy_write_g_reg(1, 5, 19, 0x0100);
+ tc_phy_write_g_reg(1, 5, 20, 0x0161);
+ tc_phy_write_g_reg(1, 5, 21, 0x00f0);
+ tc_phy_write_g_reg(1, 5, 22, 0x0046);
+ tc_phy_write_g_reg(1, 5, 23, 0x0210);
+ tc_phy_write_g_reg(1, 5, 24, 0x0206);
+ tc_phy_write_g_reg(1, 5, 25, 0x0238);
+ tc_phy_write_g_reg(1, 5, 26, 0x0360);
+ tc_phy_write_g_reg(1, 5, 27, 0x02f2);
+ tc_phy_write_g_reg(1, 5, 28, 0x0240);
+ tc_phy_write_g_reg(1, 5, 29, 0x0010);
+ tc_phy_write_g_reg(1, 5, 30, 0x0002);
+ }
+ if (ei_local->chip_name == MT7622_FE)
+ iext_port = 0;
+ else if (ei_local->chip_name == LEOPARD_FE)
+ iext_port = 1;
+
+ if (port_num == iext_port) {
+ /*****VBG & IEXT Calibration*****/
+ cnt = 0;
+ while ((fe_cal_vbg_flag == 0) && (cnt < 0x03)) {
+ iext_s = jiffies;
+ fe_cal_vbg(port_num, 1); /* allen_20160608 */
+ iext_e = jiffies;
+ if (show_time)
+ pr_info("port[%d] fe_cal_vbg time = %u\n",
+ port_num, (iext_e - iext_s) * 4);
+ cnt++;
+ if (fe_cal_vbg_flag == 0)
+ pr_info(" FE-%d VBG wait! (%d) \r\n", phyaddr, cnt);
+ }
+ fe_cal_vbg_flag = 0;
+ /**** VBG & IEXT Calibration end ****/
+ }
+
+ /* *** R50 Cal start *************************************** */
+ cnt = 0;
+ while ((fe_cal_r50_flag == 0) && (cnt < 0x03)) {
+ r50_s = jiffies;
+
+ fe_cal_r50(port_num, 1);
+
+ r50_e = jiffies;
+ if (show_time)
+ pr_info("port[%d] fe_r50 time = %u\n",
+ port_num, (r50_e - r50_s) * 4);
+ cnt++;
+ if (fe_cal_r50_flag == 0)
+ pr_info(" FE-%d R50 wait! (%d) \r\n", phyaddr, cnt);
+ }
+ fe_cal_r50_flag = 0;
+ cnt = 0;
+ /* *** R50 Cal end *** */
+ /* *** Tx offset Cal start ********************************* */
+
+ cnt = 0;
+ while ((fe_cal_tx_offset_flag == 0) && (cnt < 0x03)) {
+ txo_s = jiffies;
+ fe_cal_tx_offset(port_num, CALDLY);
+ txo_e = jiffies;
+ if (show_time)
+ pr_info("port[%d] fe_cal_tx_offset time = %u\n",
+ port_num, (txo_e - txo_s) * 4);
+ cnt++;
+ }
+ fe_cal_tx_offset_flag = 0;
+ cnt = 0;
+
+ while ((fe_cal_tx_offset_flag_mdix == 0) && (cnt < 0x03)) {
+ txo_s = jiffies;
+ fe_cal_tx_offset_mdix(port_num, CALDLY);
+ txo_e = jiffies;
+ if (show_time)
+ pr_info("port[%d] fe_cal_tx_offset_mdix time = %u\n",
+ port_num, (txo_e - txo_s) * 4);
+ cnt++;
+ }
+ fe_cal_tx_offset_flag_mdix = 0;
+ cnt = 0;
+ /* *** Tx offset Cal end *** */
+
+ /* *** Tx Amp Cal start ************************************** */
+ cnt = 0;
+ while ((fe_cal_flag == 0) && (cnt < 0x3)) {
+ txa_s = jiffies;
+ fe_cal_tx_amp(port_num, CALDLY); /* allen_20160608 */
+ txa_e = jiffies;
+ if (show_time)
+ pr_info("port[%d] fe_cal_tx_amp time = %u\n",
+ port_num, (txa_e - txa_s) * 4);
+ cnt++;
+ }
+ fe_cal_flag = 0;
+ cnt = 0;
+ while ((fe_cal_flag_mdix == 0) && (cnt < 0x3)) {
+ txa_s = jiffies;
+ fe_cal_tx_amp_mdix(port_num, CALDLY);
+ txa_e = jiffies;
+ if (show_time)
+ pr_info("port[%d] fe_cal_tx_amp_mdix time = %u\n",
+ port_num, (txa_e - txa_s) * 4);
+ cnt++;
+ }
+ fe_cal_flag_mdix = 0;
+ cnt = 0;
+
+ l3r25_tmp = tc_phy_read_l_reg(port_num, 3, 25);
+ l3r25_tmp = l3r25_tmp & ~(0x1000);/*[12] RG_ZCALEN = 0*/
+ tc_phy_write_l_reg(port_num, 3, 25, l3r25_tmp);
+ tc_phy_write_g_reg(port_num, 1, 26, 0x0000);
+ tc_phy_write_l_reg(port_num, 0, 26, l0r26_temp);
+ tc_phy_write_l_reg(port_num, 0, 30, l0r30_temp);
+ tc_phy_write_g_reg(port_num, 1, 26, 0x0000);
+ tc_phy_write_l_reg(port_num, 0, 0, 0x3100);
+ /*enable flow control*/
+ tc_phy_write_g_reg(port_num, 0, 4, 0x5e1);
+}
+
+u8 all_ge_ana_cal_wait(unsigned int delay, u8 port_num) /* for EN7512 */
+{
+ u8 all_ana_cal_status;
+ u16 cnt, g7r24_temp;
+
+ g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+ g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp | 0x10);
+
+ cnt = 1000;
+ do {
+ udelay(delay);
+ cnt--;
+ all_ana_cal_status =
+ ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) >> 1) & 0x1);
+
+ } while ((all_ana_cal_status == 0) && (cnt != 0));
+ g7r24_temp = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_temp & (~0x10));
+
+ return all_ana_cal_status;
+}
+
+void ge_cal_rext(u8 phyaddr, unsigned int delay)
+{
+ u8 rg_zcal_ctrl, all_ana_cal_status;
+ u16 ad_cal_comp_out_init;
+ u16 dev1e_e0_ana_cal_r5;
+ int calibration_polarity;
+ u8 cnt = 0;
+ u16 dev1e_17a_tmp, dev1e_e0_tmp;
+
+ /* *** Iext/Rext Cal start ************ */
+ all_ana_cal_status = ANACAL_INIT;
+ /* analog calibration enable, Rext calibration enable */
+ /* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+ /* 1e_dc[0]:rg_txvos_calen */
+ /* 1e_e1[4]:rg_cal_refsel(0:1.2V) */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x1110);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e1, 0x0000);
+
+ rg_zcal_ctrl = 0x20;/* start with 0 dB */
+ dev1e_e0_ana_cal_r5 = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00e0);
+ /* 1e_e0[5:0]:rg_zcal_ctrl */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, (rg_zcal_ctrl));
+ all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);/* delay 20 usec */
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" GE Rext AnaCal ERROR! \r\n");
+ }
+ /* 1e_17a[8]:ad_cal_comp_out */
+ ad_cal_comp_out_init = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017a) >> 8) & 0x1;
+ if (ad_cal_comp_out_init == 1)
+ calibration_polarity = -1;
+ else /* ad_cal_comp_out_init == 0 */
+ calibration_polarity = 1;
+
+ cnt = 0;
+ while (all_ana_cal_status < ANACAL_ERROR) {
+ cnt++;
+ rg_zcal_ctrl += calibration_polarity;
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, (rg_zcal_ctrl));
+ all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr); /* delay 20 usec */
+ dev1e_17a_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017a);
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" GE Rext AnaCal ERROR! \r\n");
+ } else if (((dev1e_17a_tmp >> 8) & 0x1) != ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ pr_info(" GE Rext AnaCal Done! (%d)(0x%x) \r\n", cnt, rg_zcal_ctrl);
+ } else {
+ dev1e_17a_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017a);
+ dev1e_e0_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0xe0);
+ if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION; /* need to FT(IC fail?) */
+ pr_info(" GE Rext AnaCal Saturation! \r\n");
+ rg_zcal_ctrl = 0x20; /* 0 dB */
+ } else {
+ pr_info(" GE Rxet cal (%d)(%d)(%d)(0x%x) \r\n",
+ cnt, ad_cal_comp_out_init,
+ ((dev1e_17a_tmp >> 8) & 0x1), dev1e_e0_tmp);
+ }
+ }
+ }
+
+ if (all_ana_cal_status == ANACAL_ERROR) {
+ rg_zcal_ctrl = 0x20; /* 0 dB */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
+ } else {
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e0, ((rg_zcal_ctrl << 8) | rg_zcal_ctrl));
+ /* **** 1f_115[2:0] = rg_zcal_ctrl[5:3] // Mog review */
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x0115, ((rg_zcal_ctrl & 0x3f) >> 3));
+ pr_info(" GE Rext AnaCal Done! (%d)(0x%x) \r\n", cnt, rg_zcal_ctrl);
+ ge_cal_flag = 1;
+ }
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0000);
+ /* *** Iext/Rext Cal end *** */
+}
+
+void ge_cal_r50(u8 phyaddr, unsigned int delay)
+{
+ u8 rg_zcal_ctrl, all_ana_cal_status, i;
+ u16 ad_cal_comp_out_init;
+ u16 dev1e_e0_ana_cal_r5;
+ int calibration_polarity;
+ u16 cal_pair, val_tmp, g7r24_tmp;
+ u16 dev1e_174_tmp, dev1e_175_tmp, l3r25_temp;
+ u8 rg_zcal_ctrl_filter, cnt = 0;
+
+ /* *** R50 Cal start***************** */
+ fe_ge_r50_common(phyaddr);
+ /* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+ /* 1e_dc[0]:rg_txvos_calen */
+ /*disable RG_ZCALEN*/
+ /*decide which port calibration RG_ZCALEN by port_num*/
+ for (i = 1; i <= 4; i++) {
+ l3r25_temp = tc_phy_read_l_reg(i, 3, 25);
+ l3r25_temp = l3r25_temp & ~(0x1000);
+ tc_phy_write_l_reg(i, 3, 25, l3r25_temp);
+ }
+ for (cal_pair = ANACAL_PAIR_A; cal_pair <= ANACAL_PAIR_D; cal_pair++) {
+ rg_zcal_ctrl = 0x20;/* start with 0 dB */
+ dev1e_e0_ana_cal_r5 = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00e0) & (~0x003f));
+ /* 1e_e0[5:0]:rg_zcal_ctrl */
+ if (cal_pair == ANACAL_PAIR_A) {
+ /* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x1000);
+ } else if (cal_pair == ANACAL_PAIR_B) {
+ /* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+ /* 1e_dc[12]:rg_zcalen_b */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0100);
+ } else if (cal_pair == ANACAL_PAIR_C) {
+ /* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+ /* 1e_dc[8]:rg_zcalen_c */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0010);
+
+ } else {/* if(cal_pair == ANACAL_PAIR_D) */
+
+ /* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+ /* 1e_dc[4]:rg_zcalen_d */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0001);
+ }
+ rg_zcal_ctrl = 0x20; /* start with 0 dB */
+ g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+
+ /*wait AD_CAL_COMP_OUT = 1*/
+ all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" GE R50 AnaCal ERROR! (init) \r\n");
+ }
+ ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+ if (ad_cal_comp_out_init == 1)
+ calibration_polarity = -1;
+ else
+ calibration_polarity = 1;
+
+ cnt = 0;
+ while ((all_ana_cal_status < ANACAL_ERROR) && (cnt < 254)) {
+ cnt++;
+
+ rg_zcal_ctrl += calibration_polarity;
+ g7r24_tmp = (tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & (~0xfc0));
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24,
+ g7r24_tmp | ((rg_zcal_ctrl & 0x3f) << 6));
+ all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" GE R50 AnaCal ERROR! (%d) \r\n", cnt);
+ } else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ } else {
+ if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION;
+ pr_info(" GE R50 Cal Sat! rg_zcal_ctrl = 0x%x(%d)\n",
+ cnt, rg_zcal_ctrl);
+ }
+ }
+ }
+
+ if ((all_ana_cal_status == ANACAL_ERROR) ||
+ (all_ana_cal_status == ANACAL_SATURATION)) {
+ rg_zcal_ctrl = 0x20; /* 0 dB */
+ rg_zcal_ctrl_filter = 8; /*default value*/
+ } else {
+ /*DA_TX_R50*/
+ rg_zcal_ctrl_filter = rg_zcal_ctrl;
+ rg_zcal_ctrl = ZCAL_TO_R50ohm_GE_TBL[rg_zcal_ctrl];
+ /*DA_TX_FILTER*/
+ rg_zcal_ctrl_filter = ZCAL_TO_FILTER_TBL[rg_zcal_ctrl_filter];
+ rg_zcal_ctrl_filter = rg_zcal_ctrl_filter & 0xf;
+ rg_zcal_ctrl_filter = rg_zcal_ctrl_filter << 8 | rg_zcal_ctrl_filter;
+ }
+ if (all_ana_cal_status == ANACAL_FINISH) {
+ if (cal_pair == ANACAL_PAIR_A) {
+ dev1e_174_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0174);
+ dev1e_174_tmp = dev1e_174_tmp & ~(0xff00);
+ if (rg_zcal_ctrl > 4) {
+ val_tmp = (((rg_zcal_ctrl - 4) << 8) & 0xff00) |
+ dev1e_174_tmp;
+ } else {
+ val_tmp = (((0) << 8) & 0xff00) | dev1e_174_tmp;
+ }
+
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0174, val_tmp);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x03a0, rg_zcal_ctrl_filter);
+
+ pr_info("R50_PAIR_A : 1e_174 = 0x%x\n",
+ tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0174));
+ pr_info("R50_PAIR_A : 1e_3a0 = 0x%x\n",
+ tc_phy_read_dev_reg(phyaddr, 0x1e, 0x03a0));
+
+ } else if (cal_pair == ANACAL_PAIR_B) {
+ dev1e_174_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0174);
+ dev1e_174_tmp = dev1e_174_tmp & (~0x007f);
+ if (rg_zcal_ctrl > 2) {
+ val_tmp = (((rg_zcal_ctrl - 2) << 0) & 0xff) |
+ dev1e_174_tmp;
+ } else {
+ val_tmp = (((0) << 0) & 0xff) |
+ dev1e_174_tmp;
+ }
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0174, val_tmp);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x03a1, rg_zcal_ctrl_filter);
+ pr_info("R50_PAIR_B : 1e_174 = 0x%x\n",
+ tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0174));
+ pr_info("R50_PAIR_B : 1e_3a1 = 0x%x\n",
+ tc_phy_read_dev_reg(phyaddr, 0x1e, 0x03a1));
+ } else if (cal_pair == ANACAL_PAIR_C) {
+ dev1e_175_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0175);
+ dev1e_175_tmp = dev1e_175_tmp & (~0x7f00);
+ if (rg_zcal_ctrl > 4) {
+ val_tmp = dev1e_175_tmp |
+ (((rg_zcal_ctrl - 4) << 8) & 0xff00);
+ } else {
+ val_tmp = dev1e_175_tmp | (((0) << 8) & 0xff00);
+ }
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0175, val_tmp);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x03a2, rg_zcal_ctrl_filter);
+ pr_info("R50_PAIR_C : 1e_175 = 0x%x\n",
+ tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0175));
+ pr_info("R50_PAIR_C : 1e_3a2 = 0x%x\n",
+ tc_phy_read_dev_reg(phyaddr, 0x1e, 0x03a2));
+
+ } else {/* if(cal_pair == ANACAL_PAIR_D) */
+ dev1e_175_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0175);
+ dev1e_175_tmp = dev1e_175_tmp & (~0x007f);
+ if (rg_zcal_ctrl > 6) {
+ val_tmp = dev1e_175_tmp |
+ (((rg_zcal_ctrl - 6) << 0) & 0xff);
+ } else {
+ val_tmp = dev1e_175_tmp |
+ (((0) << 0) & 0xff);
+ }
+
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0175, val_tmp);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x03a3, rg_zcal_ctrl_filter);
+ pr_info("R50_PAIR_D : 1e_175 = 0x%x\n",
+ tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0175));
+ pr_info("R50_PAIR_D : 1e_3a3 = 0x%x\n",
+ tc_phy_read_dev_reg(phyaddr, 0x1e, 0x03a3));
+ }
+ }
+ }
+ clear_ckinv_ana_txvos();
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0000);
+ ge_cal_r50_flag = 1;
+ /* *** R50 Cal end *** */
+}
+
+void ge_cal_tx_amp(u8 phyaddr, unsigned int delay)
+{
+ u8 all_ana_cal_status;
+ u16 ad_cal_comp_out_init;
+ int calibration_polarity;
+ u16 cal_pair;
+ u8 tx_amp_reg_shift;
+ u16 reg_temp, val_tmp, l3r25_temp, val_tmp_100;
+ u8 tx_amp_temp, tx_amp_reg, cnt = 0, tx_amp_reg_100;
+
+ u16 tx_amp_temp_L, tx_amp_temp_M;
+ u16 tx_amp_L_100, tx_amp_M_100;
+ /* *** Tx Amp Cal start ***/
+ tc_phy_write_l_reg(0, 0, 0, 0x0140);
+
+ tc_phy_write_dev_reg(0, 0x1e, 0x3e, 0xf808);
+ tc_phy_write_dev_reg(0, 0x1e, 0x145, 0x5010);
+ tc_phy_write_dev_reg(0, 0x1e, 0x17d, 0x80f0);
+ tc_phy_write_dev_reg(0, 0x1e, 0x17e, 0x80f0);
+ tc_phy_write_dev_reg(0, 0x1e, 0x17f, 0x80f0);
+ tc_phy_write_dev_reg(0, 0x1e, 0x180, 0x80f0);
+ tc_phy_write_dev_reg(0, 0x1e, 0x181, 0x80f0);
+ tc_phy_write_dev_reg(0, 0x1e, 0x182, 0x80f0);
+ tc_phy_write_dev_reg(0, 0x1e, 0x183, 0x80f0);
+ tc_phy_write_dev_reg(0, 0x1e, 0x184, 0x80f0);
+ tc_phy_write_dev_reg(0, 0x1e, 0x00db, 0x1000);
+ tc_phy_write_dev_reg(0, 0x1e, 0x00dc, 0x0001);
+ tc_phy_write_dev_reg(0, 0x1f, 0x300, 0x4);
+ tc_phy_write_dev_reg(0, 0x1f, 0x27a, 0x33);
+ tc_phy_write_g_reg(1, 2, 25, 0xf020);
+ tc_phy_write_dev_reg(0, 0x1f, 0x300, 0x14);
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x7000);
+ l3r25_temp = tc_phy_read_l_reg(FE_CAL_COMMON, 3, 25);
+ l3r25_temp = l3r25_temp | 0x200;
+ tc_phy_write_l_reg(FE_CAL_COMMON, 3, 25, l3r25_temp);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x11, 0xff00);
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0xc9, 0xffff);
+ tc_phy_write_g_reg(1, 2, 25, 0xb020);
+
+ for (cal_pair = ANACAL_PAIR_A; cal_pair <= ANACAL_PAIR_D; cal_pair++) {
+ tx_amp_temp = 0x20; /* start with 0 dB */
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x7000);
+ if (cal_pair == ANACAL_PAIR_A) {
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x1000);
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x012) & (~0xfc00));
+ tx_amp_reg_shift = 10;
+ tx_amp_reg = 0x12;
+ tx_amp_reg_100 = 0x16;
+ } else if (cal_pair == ANACAL_PAIR_B) {
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0100);
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017) & (~0x3f00));
+ tx_amp_reg_shift = 8;
+ tx_amp_reg = 0x17;
+ tx_amp_reg_100 = 0x18;
+ } else if (cal_pair == ANACAL_PAIR_C) {
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0010);
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x019) & (~0x3f00));
+ tx_amp_reg_shift = 8;
+ tx_amp_reg = 0x19;
+ tx_amp_reg_100 = 0x20;
+ } else {/* if(cal_pair == ANACAL_PAIR_D) */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0001);
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x021) & (~0x3f00));
+ tx_amp_reg_shift = 8;
+ tx_amp_reg = 0x21;
+ tx_amp_reg_100 = 0x22;
+ }
+ /* 1e_12, 1e_17, 1e_19, 1e_21 */
+ val_tmp = tx_amp_temp | (tx_amp_temp << tx_amp_reg_shift);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg, val_tmp);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg_100, val_tmp);
+ all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" GE Tx amp AnaCal ERROR! \r\n");
+ }
+/* 1e_17a[8]:ad_cal_comp_out */
+ ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+ if (ad_cal_comp_out_init == 1)
+ calibration_polarity = -1;
+ else
+ calibration_polarity = 1;
+
+ cnt = 0;
+ while (all_ana_cal_status < ANACAL_ERROR) {
+ cnt++;
+ tx_amp_temp += calibration_polarity;
+
+ val_tmp = (tx_amp_temp | (tx_amp_temp << tx_amp_reg_shift));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg, val_tmp);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg_100, val_tmp);
+ all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" GE Tx amp AnaCal ERROR!\n");
+ } else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ } else {
+ if ((tx_amp_temp == 0x3f) || (tx_amp_temp == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION;
+ pr_info(" GE Tx amp AnaCal Saturation! \r\n");
+ }
+ }
+ }
+ if (all_ana_cal_status == ANACAL_ERROR) {
+ pr_info("ANACAL_ERROR\n");
+ tx_amp_temp = 0x20;
+ val_tmp = (reg_temp | (tx_amp_temp << tx_amp_reg_shift));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg, val_tmp);
+ }
+
+ if (all_ana_cal_status == ANACAL_FINISH) {
+ if (cal_pair == ANACAL_PAIR_A) {
+ tx_amp_temp_M = tx_amp_temp + 9;
+ tx_amp_temp_L = tx_amp_temp + 18;
+ } else if (cal_pair == ANACAL_PAIR_B) {
+ tx_amp_temp_M = tx_amp_temp + 8;
+ tx_amp_temp_L = tx_amp_temp + 22;
+ } else if (cal_pair == ANACAL_PAIR_C) {
+ tx_amp_temp_M = tx_amp_temp + 9;
+ tx_amp_temp_L = tx_amp_temp + 9;
+ } else if (cal_pair == ANACAL_PAIR_D) {
+ tx_amp_temp_M = tx_amp_temp + 9;
+ tx_amp_temp_L = tx_amp_temp + 9;
+ }
+ if (tx_amp_temp_L >= 0x3f)
+ tx_amp_temp_L = 0x3f;
+ if (tx_amp_temp_M >= 0x3f)
+ tx_amp_temp_M = 0x3f;
+ val_tmp = ((tx_amp_temp_L) |
+ ((tx_amp_temp_M) << tx_amp_reg_shift));
+ if (cal_pair == ANACAL_PAIR_A) {
+ if (tx_amp_temp < 6)
+ tx_amp_M_100 = 0;
+ else
+ tx_amp_M_100 = tx_amp_temp - 6;
+
+ if ((tx_amp_temp + 9) >= 0x3f)
+ tx_amp_L_100 = 0x3f;
+ else
+ tx_amp_L_100 = tx_amp_temp + 9;
+ val_tmp_100 = ((tx_amp_L_100) |
+ ((tx_amp_M_100) << tx_amp_reg_shift));
+ } else if (cal_pair == ANACAL_PAIR_B) {
+ if (tx_amp_temp < 7)
+ tx_amp_M_100 = 0;
+ else
+ tx_amp_M_100 = tx_amp_temp - 7;
+
+ if ((tx_amp_temp + 8) >= 0x3f)
+ tx_amp_L_100 = 0x3f;
+ else
+ tx_amp_L_100 = tx_amp_temp + 8;
+ val_tmp_100 = ((tx_amp_L_100) |
+ ((tx_amp_M_100) << tx_amp_reg_shift));
+ } else if (cal_pair == ANACAL_PAIR_C) {
+ if ((tx_amp_temp + 9) >= 0x3f)
+ tx_amp_L_100 = 0x3f;
+ else
+ tx_amp_L_100 = tx_amp_temp + 9;
+ tx_amp_M_100 = tx_amp_L_100;
+ val_tmp_100 = ((tx_amp_L_100) |
+ ((tx_amp_M_100) << tx_amp_reg_shift));
+ } else if (cal_pair == ANACAL_PAIR_D) {
+ if ((tx_amp_temp + 9) >= 0x3f)
+ tx_amp_L_100 = 0x3f;
+ else
+ tx_amp_L_100 = tx_amp_temp + 9;
+
+ tx_amp_M_100 = tx_amp_L_100;
+ val_tmp_100 = ((tx_amp_L_100) |
+ ((tx_amp_M_100) << tx_amp_reg_shift));
+ }
+
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg, val_tmp);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_amp_reg_100, val_tmp_100);
+
+ if (cal_pair == ANACAL_PAIR_A) {
+ pr_info("TX_AMP_PAIR_A : 1e_%x = 0x%x\n",
+ tx_amp_reg,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg));
+ pr_info("TX_AMP_PAIR_A : 1e_%x = 0x%x\n",
+ tx_amp_reg_100,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg_100));
+ } else if (cal_pair == ANACAL_PAIR_B) {
+ pr_info("TX_AMP_PAIR_B : 1e_%x = 0x%x\n",
+ tx_amp_reg,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg));
+ pr_info("TX_AMP_PAIR_B : 1e_%x = 0x%x\n",
+ tx_amp_reg_100,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg_100));
+ } else if (cal_pair == ANACAL_PAIR_C) {
+ pr_info("TX_AMP_PAIR_C : 1e_%x = 0x%x\n",
+ tx_amp_reg,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg));
+ pr_info("TX_AMP_PAIR_C : 1e_%x = 0x%x\n",
+ tx_amp_reg_100,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg_100));
+
+ } else {/* if(cal_pair == ANACAL_PAIR_D) */
+ pr_info("TX_AMP_PAIR_D : 1e_%x = 0x%x\n",
+ tx_amp_reg,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg));
+ pr_info("TX_AMP_PAIR_D : 1e_%x = 0x%x\n",
+ tx_amp_reg_100,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_amp_reg_100));
+ }
+ }
+ }
+
+ ge_cal_flag = 1;
+ pr_info("GE_TX_AMP END\n");
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017d, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017e, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017f, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0180, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0181, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0182, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0183, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0184, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0x2000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0xc9, 0x0fff);
+ tc_phy_write_g_reg(1, 2, 25, 0xb020);
+ tc_phy_write_dev_reg(0, 0x1e, 0x145, 0x1000);
+
+/* disable analog calibration circuit */
+/* disable Tx offset calibration circuit */
+/* disable Tx VLD force mode */
+/* disable Tx offset/amplitude calibration circuit */
+
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x003e, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0000);
+ /* *** Tx Amp Cal end *** */
+}
+
+void ge_cal_tx_offset(u8 phyaddr, unsigned int delay)
+{
+ u8 all_ana_cal_status;
+ u16 ad_cal_comp_out_init;
+ int calibration_polarity, tx_offset_temp;
+ u16 cal_pair, cal_temp;
+ u8 tx_offset_reg_shift;
+ u16 tx_offset_reg, reg_temp, val_tmp;
+ u8 cnt = 0;
+
+ tc_phy_write_l_reg(0, 0, 0, 0x2100);
+
+ /* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
+ /* 1e_dc[0]:rg_txvos_calen */
+ /* 1e_96[15]:bypass_tx_offset_cal, Hw bypass, Fw cal */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0100);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0001);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0096, 0x8000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x003e, 0xf808);/* 1e_3e */
+ tc_phy_write_g_reg(FE_CAL_COMMON, 7, 24, 0x3000);
+
+ for (cal_pair = ANACAL_PAIR_A; cal_pair <= ANACAL_PAIR_D; cal_pair++) {
+ tx_offset_temp = 0x20;
+
+ if (cal_pair == ANACAL_PAIR_A) {
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5010);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x1000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017d, (0x8000 | DAC_IN_0V));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0181, (0x8000 | DAC_IN_0V));
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0172) & (~0x3f00));
+ tx_offset_reg_shift = 8;/* 1e_172[13:8] */
+ tx_offset_reg = 0x0172;
+
+ } else if (cal_pair == ANACAL_PAIR_B) {
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5018);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0100);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017e, (0x8000 | DAC_IN_0V));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0182, (0x8000 | DAC_IN_0V));
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0172) & (~0x003f));
+ tx_offset_reg_shift = 0;
+ tx_offset_reg = 0x0172;/* 1e_172[5:0] */
+ } else if (cal_pair == ANACAL_PAIR_C) {
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0010);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017f, (0x8000 | DAC_IN_0V));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0183, (0x8000 | DAC_IN_0V));
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0173) & (~0x3f00));
+ tx_offset_reg_shift = 8;
+ tx_offset_reg = 0x0173;/* 1e_173[13:8] */
+ } else {/* if(cal_pair == ANACAL_PAIR_D) */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0001);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0180, (0x8000 | DAC_IN_0V));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0184, (0x8000 | DAC_IN_0V));
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0173) & (~0x003f));
+ tx_offset_reg_shift = 0;
+ tx_offset_reg = 0x0173;/* 1e_173[5:0] */
+ }
+ /* 1e_172, 1e_173 */
+ val_tmp = (reg_temp | (tx_offset_temp << tx_offset_reg_shift));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, val_tmp);
+
+ all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr); /* delay 20 usec */
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" GE Tx offset AnaCal ERROR! \r\n");
+ }
+ ad_cal_comp_out_init = tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1;
+ if (ad_cal_comp_out_init == 1)
+ calibration_polarity = -1;
+ else
+ calibration_polarity = 1;
+
+ cnt = 0;
+ tx_offset_temp += calibration_polarity;
+ while (all_ana_cal_status < ANACAL_ERROR) {
+ cnt++;
+ cal_temp = tx_offset_temp;
+ val_tmp = (reg_temp | (cal_temp << tx_offset_reg_shift));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, val_tmp);
+
+ all_ana_cal_status = all_ge_ana_cal_wait(delay, phyaddr);
+ if (all_ana_cal_status == 0) {
+ all_ana_cal_status = ANACAL_ERROR;
+ pr_info(" GE Tx offset AnaCal ERROR! \r\n");
+ } else if ((tc_phy_read_g_reg(FE_CAL_COMMON, 7, 24) & 0x1) !=
+ ad_cal_comp_out_init) {
+ all_ana_cal_status = ANACAL_FINISH;
+ } else {
+ if ((tx_offset_temp == 0x3f) || (tx_offset_temp == 0x00)) {
+ all_ana_cal_status = ANACAL_SATURATION;
+ pr_info("GE tx offset ANACAL_SATURATION\n");
+ /* tx_amp_temp += calibration_polarity; */
+ } else {
+ tx_offset_temp += calibration_polarity;
+ }
+ }
+ }
+ if (all_ana_cal_status == ANACAL_ERROR) {
+ tx_offset_temp = 0x20;
+ val_tmp = (reg_temp | (tx_offset_temp << tx_offset_reg_shift));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, val_tmp);
+ }
+
+ if (all_ana_cal_status == ANACAL_FINISH) {
+ if (cal_pair == ANACAL_PAIR_A) {
+ pr_info("TX_OFFSET_PAIR_A : 1e_%x = 0x%x\n",
+ tx_offset_reg,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_offset_reg));
+ } else if (cal_pair == ANACAL_PAIR_B) {
+ pr_info("TX_OFFSET_PAIR_B : 1e_%x = 0x%x\n",
+ tx_offset_reg,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_offset_reg));
+ } else if (cal_pair == ANACAL_PAIR_C) {
+ pr_info("TX_OFFSET_PAIR_C : 1e_%x = 0x%x\n",
+ tx_offset_reg,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_offset_reg));
+
+ } else {/* if(cal_pair == ANACAL_PAIR_D) */
+ pr_info("TX_OFFSET_PAIR_D : 1e_%x = 0x%x\n",
+ tx_offset_reg,
+ tc_phy_read_dev_reg(phyaddr, 0x1e, tx_offset_reg));
+ }
+ }
+ }
+ ge_cal_tx_offset_flag = 1;
+ clear_ckinv_ana_txvos();
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017d, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017e, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x017f, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0180, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0181, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0182, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0183, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0184, 0x0000);
+/* disable analog calibration circuit */
+/* disable Tx offset calibration circuit */
+/* disable Tx VLD force mode */
+/* disable Tx offset/amplitude calibration circuit */
+
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x003e, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dd, 0x0000);
+}
+
+void do_ge_phy_all_analog_cal(u8 phyaddr)
+{
+ u16 reg0_temp, dev1e_145_temp, reg_temp;
+ u16 reg_tmp;
+
+ tc_mii_write(phyaddr, 0x1f, 0x0000);/* g0 */
+ reg0_temp = tc_mii_read(phyaddr, 0x0);/* keep the default value */
+/* set [12]AN disable, [8]full duplex, [13/6]1000Mbps */
+ tc_mii_write(phyaddr, 0x0, 0x0140);
+
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x0100, 0xc000);/* BG voltage output */
+ dev1e_145_temp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0145);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0145, 0x1010);/* fix mdi */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0185, 0x0000);/* disable tx slew control */
+
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x27c, 0x1f1f);
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x27c, 0x3300);
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0);
+
+ reg_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x11);
+ reg_tmp = reg_tmp | (0xf << 12);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x11, reg_tmp);
+
+ /* calibration start ============ */
+ ge_cal_flag = 1; /*GE calibration not calibration*/
+ while (ge_cal_flag == 0)
+ ge_cal_rext(phyaddr, 100);
+
+ /* *** R50 Cal start ***************************** */
+ /*phyaddress = 0*/
+ ge_cal_r50(phyaddr, CALDLY);
+ /* *** R50 Cal end *** */
+
+ /* *** Tx offset Cal start *********************** */
+ ge_cal_tx_offset(phyaddr, CALDLY);
+ /* *** Tx offset Cal end *** */
+
+ /* *** Tx Amp Cal start *** */
+ ge_cal_tx_amp(phyaddr, CALDLY);
+ /* *** Tx Amp Cal end *** */
+
+ /* *** Rx offset Cal start *************** */
+ /* 1e_96[15]:bypass_tx_offset_cal, Hw bypass, Fw cal */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0096, 0x8000);
+ /* tx/rx_cal_criteria_value */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0037, 0x0033);
+ /* [14]: bypass all calibration, [11]: bypass adc offset cal analog */
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0039) & (~0x4800));
+ /* rx offset cal by Hw setup */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0039, reg_temp);
+ /* [12]: enable rtune calibration */
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1f, 0x0107) & (~0x1000));
+ /* disable rtune calibration */
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x0107, reg_temp);
+ /* 1e_171[8:7]: bypass tx/rx dc offset cancellation process */
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0171) & (~0x0180));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0171, (reg_temp | 0x0180));
+ reg_temp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0039);
+ /* rx offset calibration start */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0039, (reg_temp | 0x2000));
+ /* rx offset calibration end */
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0039, (reg_temp & (~0x2000)));
+ mdelay(10); /* mdelay for Hw calibration finish */
+ reg_temp = (tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0171) & (~0x0180));
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0171, reg_temp);
+
+ tc_mii_write(phyaddr, 0x0, reg0_temp);
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x0100, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0145, dev1e_145_temp);
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0x2000);
+ /* *** Rx offset Cal end *** */
+ /*eye pic*/
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x0, 0x018d);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x1, 0x01c7);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x2, 0x01c0);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3, 0x003a);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x4, 0x0206);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x5, 0x0000);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x6, 0x038a);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x7, 0x03c8);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x8, 0x03c0);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x9, 0x0235);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0xa, 0x0008);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0xb, 0x0000);
+
+ /*tmp maybe changed*/
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x27c, 0x1111);
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x27b, 0x47);
+ tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0x2200);
+
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3a8, 0x0810);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3aa, 0x0008);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3ab, 0x0810);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3ad, 0x0008);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3ae, 0x0106);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3b0, 0x0001);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3b1, 0x0106);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3b3, 0x0001);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x18c, 0x0001);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x18d, 0x0001);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x18e, 0x0001);
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x18f, 0x0001);
+
+ /*da_tx_bias1_b_tx_standby = 5'b10 (dev1eh_reg3aah[12:8])*/
+ reg_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x3aa);
+ reg_tmp = reg_tmp & ~(0x1f00);
+ reg_tmp = reg_tmp | 0x2 << 8;
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3aa, reg_tmp);
+
+ /*da_tx_bias1_a_tx_standby = 5'b10 (dev1eh_reg3a9h[4:0])*/
+ reg_tmp = tc_phy_read_dev_reg(phyaddr, 0x1e, 0x3a9);
+ reg_tmp = reg_tmp & ~(0x1f);
+ reg_tmp = reg_tmp | 0x2;
+ tc_phy_write_dev_reg(phyaddr, 0x1e, 0x3a9, reg_tmp);
+}
+
+#if 0
+static void mt7622_ephy_cal(void)
+{
+ int i;
+ unsigned long t_s, t_e;
+
+ t_s = jiffies;
+ for (i = 0; i < 5; i++)
+ do_fe_phy_all_analog_cal(i);
+ t_e = jiffies;
+ if (show_time)
+ pr_info("cal time = %lu\n", (t_e - t_s) * 4);
+}
+
+static void leopard_ephy_cal(void)
+{
+ int i, dbg;
+ unsigned long t_s, t_e;
+
+ dbg = 1;
+ if (dbg) {
+ t_s = jiffies;
+ for (i = 1; i < 5; i++)
+ do_fe_phy_all_analog_cal(i);
+
+ do_ge_phy_all_analog_cal(0);
+
+ t_e = jiffies;
+ }
+ if (show_time)
+ pr_info("cal time = %lu\n", (t_e - t_s) * 4);
+}
+#endif
+static void wait_loop(void)
+{
+ int i;
+ int read_data;
+
+ for (i = 0; i < 320; i = i + 1)
+ read_data = sys_reg_read(RALINK_ETH_SW_BASE + 0x108);
+}
+
+static void trgmii_calibration_7623(void)
+{
+ /* minimum delay for all correct */
+ unsigned int tap_a[5] = {
+ 0, 0, 0, 0, 0
+ };
+ /* maximum delay for all correct */
+ unsigned int tap_b[5] = {
+ 0, 0, 0, 0, 0
+ };
+ unsigned int final_tap[5];
+ unsigned int rxc_step_size;
+ unsigned int rxd_step_size;
+ unsigned int read_data;
+ unsigned int tmp;
+ unsigned int rd_wd;
+ int i;
+ unsigned int err_cnt[5];
+ unsigned int init_toggle_data;
+ unsigned int err_flag[5];
+ unsigned int err_total_flag;
+ unsigned int training_word;
+ unsigned int rd_tap;
+
+ void __iomem *TRGMII_7623_base;
+ void __iomem *TRGMII_7623_RD_0;
+ void __iomem *temp_addr;
+
+ TRGMII_7623_base = ETHDMASYS_ETH_SW_BASE + 0x0300;
+ TRGMII_7623_RD_0 = TRGMII_7623_base + 0x10;
+ rxd_step_size = 0x1;
+ rxc_step_size = 0x4;
+ init_toggle_data = 0x00000055;
+ training_word = 0x000000AC;
+
+ /* RX clock gating in MT7623 */
+ reg_bit_zero(TRGMII_7623_base + 0x04, 30, 2);
+ /* Assert RX reset in MT7623 */
+ reg_bit_one(TRGMII_7623_base + 0x00, 31, 1);
+ /* Set TX OE edge in MT7623 */
+ reg_bit_one(TRGMII_7623_base + 0x78, 13, 1);
+ /* Disable RX clock gating in MT7623 */
+ reg_bit_one(TRGMII_7623_base + 0x04, 30, 2);
+ /* Release RX reset in MT7623 */
+ reg_bit_zero(TRGMII_7623_base, 31, 1);
+
+ for (i = 0; i < 5; i++)
+ /* Set bslip_en = 1 */
+ reg_bit_one(TRGMII_7623_RD_0 + i * 8, 31, 1);
+
+ /* Enable Training Mode in MT7530 */
+ mii_mgr_read(0x1F, 0x7A40, &read_data);
+ read_data |= 0xc0000000;
+ mii_mgr_write(0x1F, 0x7A40, read_data);
+
+ err_total_flag = 0;
+ read_data = 0x0;
+ while (err_total_flag == 0 && read_data != 0x68) {
+ /* Enable EDGE CHK in MT7623 */
+ for (i = 0; i < 5; i++) {
+ reg_bit_zero(TRGMII_7623_RD_0 + i * 8, 28, 4);
+ reg_bit_one(TRGMII_7623_RD_0 + i * 8, 31, 1);
+ }
+ wait_loop();
+ err_total_flag = 1;
+ for (i = 0; i < 5; i++) {
+ tmp = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+ err_cnt[i] = (tmp >> 8) & 0x0000000f;
+
+ tmp = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+ rd_wd = (tmp >> 16) & 0x000000ff;
+
+ if (err_cnt[i] != 0)
+ err_flag[i] = 1;
+ else if (rd_wd != 0x55)
+ err_flag[i] = 1;
+ else
+ err_flag[i] = 0;
+ err_total_flag = err_flag[i] & err_total_flag;
+ }
+
+ /* Disable EDGE CHK in MT7623 */
+ for (i = 0; i < 5; i++) {
+ reg_bit_one(TRGMII_7623_RD_0 + i * 8, 30, 1);
+ reg_bit_zero(TRGMII_7623_RD_0 + i * 8, 28, 2);
+ reg_bit_zero(TRGMII_7623_RD_0 + i * 8, 31, 1);
+ }
+ wait_loop();
+ /* Adjust RXC delay */
+ /* RX clock gating in MT7623 */
+ reg_bit_zero(TRGMII_7623_base + 0x04, 30, 2);
+ read_data = sys_reg_read(TRGMII_7623_base);
+ if (err_total_flag == 0) {
+ tmp = (read_data & 0x0000007f) + rxc_step_size;
+ read_data >>= 8;
+ read_data &= 0xffffff80;
+ read_data |= tmp;
+ read_data <<= 8;
+ read_data &= 0xffffff80;
+ read_data |= tmp;
+ sys_reg_write(TRGMII_7623_base, read_data);
+ } else {
+ tmp = (read_data & 0x0000007f) + 16;
+ read_data >>= 8;
+ read_data &= 0xffffff80;
+ read_data |= tmp;
+ read_data <<= 8;
+ read_data &= 0xffffff80;
+ read_data |= tmp;
+ sys_reg_write(TRGMII_7623_base, read_data);
+ }
+ read_data &= 0x000000ff;
+ /* Disable RX clock gating in MT7623 */
+ reg_bit_one(TRGMII_7623_base + 0x04, 30, 2);
+ for (i = 0; i < 5; i++)
+ reg_bit_one(TRGMII_7623_RD_0 + i * 8, 31, 1);
+ }
+ /* Read RD_WD MT7623 */
+ for (i = 0; i < 5; i++) {
+ temp_addr = TRGMII_7623_RD_0 + i * 8;
+ rd_tap = 0;
+ while (err_flag[i] != 0 && rd_tap != 128) {
+ /* Enable EDGE CHK in MT7623 */
+ tmp = sys_reg_read(temp_addr);
+ tmp |= 0x40000000;
+ reg_bit_zero(temp_addr, 28, 4);
+ reg_bit_one(temp_addr, 30, 1);
+ wait_loop();
+ read_data = sys_reg_read(temp_addr);
+ /* Read MT7623 Errcnt */
+ err_cnt[i] = (read_data >> 8) & 0x0000000f;
+ rd_wd = (read_data >> 16) & 0x000000ff;
+ if (err_cnt[i] != 0 || rd_wd != 0x55)
+ err_flag[i] = 1;
+ else
+ err_flag[i] = 0;
+ /* Disable EDGE CHK in MT7623 */
+ reg_bit_zero(temp_addr, 28, 2);
+ reg_bit_zero(temp_addr, 31, 1);
+ tmp |= 0x40000000;
+ sys_reg_write(temp_addr, tmp & 0x4fffffff);
+ wait_loop();
+ if (err_flag[i] != 0) {
+ /* Add RXD delay in MT7623 */
+ rd_tap = (read_data & 0x7f) + rxd_step_size;
+
+ read_data = (read_data & 0xffffff80) | rd_tap;
+ sys_reg_write(temp_addr, read_data);
+ tap_a[i] = rd_tap;
+ } else {
+ rd_tap = (read_data & 0x0000007f) + 48;
+ read_data = (read_data & 0xffffff80) | rd_tap;
+ sys_reg_write(temp_addr, read_data);
+ }
+ }
+ pr_info("MT7623 %dth bit Tap_a = %d\n", i, tap_a[i]);
+ }
+ for (i = 0; i < 5; i++) {
+ while ((err_flag[i] == 0) && (rd_tap != 128)) {
+ read_data = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+ /* Add RXD delay in MT7623 */
+ rd_tap = (read_data & 0x7f) + rxd_step_size;
+
+ read_data = (read_data & 0xffffff80) | rd_tap;
+ sys_reg_write(TRGMII_7623_RD_0 + i * 8, read_data);
+
+ /* Enable EDGE CHK in MT7623 */
+ tmp = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+ tmp |= 0x40000000;
+ sys_reg_write(TRGMII_7623_RD_0 + i * 8,
+ (tmp & 0x4fffffff));
+ wait_loop();
+ read_data = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+ /* Read MT7623 Errcnt */
+ err_cnt[i] = (read_data >> 8) & 0xf;
+ rd_wd = (read_data >> 16) & 0x000000ff;
+ if (err_cnt[i] != 0 || rd_wd != 0x55)
+ err_flag[i] = 1;
+ else
+ err_flag[i] = 0;
+
+ /* Disable EDGE CHK in MT7623 */
+ tmp = sys_reg_read(TRGMII_7623_RD_0 + i * 8);
+ tmp |= 0x40000000;
+ sys_reg_write(TRGMII_7623_RD_0 + i * 8,
+ (tmp & 0x4fffffff));
+ wait_loop();
+ }
+ tap_b[i] = rd_tap; /* -rxd_step_size; */
+ pr_info("MT7623 %dth bit Tap_b = %d\n", i, tap_b[i]);
+ /* Calculate RXD delay = (TAP_A + TAP_B)/2 */
+ final_tap[i] = (tap_a[i] + tap_b[i]) / 2;
+ read_data = (read_data & 0xffffff80) | final_tap[i];
+ sys_reg_write(TRGMII_7623_RD_0 + i * 8, read_data);
+ }
+
+ mii_mgr_read(0x1F, 0x7A40, &read_data);
+ read_data &= 0x3fffffff;
+ mii_mgr_write(0x1F, 0x7A40, read_data);
+}
+
+static void trgmii_calibration_7530(void)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ unsigned int tap_a[5] = {
+ 0, 0, 0, 0, 0
+ };
+ unsigned int tap_b[5] = {
+ 0, 0, 0, 0, 0
+ };
+ unsigned int final_tap[5];
+ unsigned int rxc_step_size;
+ unsigned int rxd_step_size;
+ unsigned int read_data;
+ unsigned int tmp = 0;
+ int i;
+ unsigned int err_cnt[5];
+ unsigned int rd_wd;
+ unsigned int init_toggle_data;
+ unsigned int err_flag[5];
+ unsigned int err_total_flag;
+ unsigned int training_word;
+ unsigned int rd_tap;
+
+ void __iomem *TRGMII_7623_base;
+ u32 TRGMII_7530_RD_0;
+ u32 TRGMII_7530_base;
+ u32 TRGMII_7530_TX_base;
+
+ TRGMII_7623_base = ETHDMASYS_ETH_SW_BASE + 0x0300;
+ TRGMII_7530_base = 0x7A00;
+ TRGMII_7530_RD_0 = TRGMII_7530_base + 0x10;
+ rxd_step_size = 0x1;
+ rxc_step_size = 0x8;
+ init_toggle_data = 0x00000055;
+ training_word = 0x000000AC;
+
+ TRGMII_7530_TX_base = TRGMII_7530_base + 0x50;
+
+ reg_bit_one(TRGMII_7623_base + 0x40, 31, 1);
+ mii_mgr_read(0x1F, 0x7a10, &read_data);
+
+ /* RX clock gating in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_base + 0x04, &read_data);
+ read_data &= 0x3fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_base + 0x04, read_data);
+
+ /* Set TX OE edge in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_base + 0x78, &read_data);
+ read_data |= 0x00002000;
+ mii_mgr_write(0x1F, TRGMII_7530_base + 0x78, read_data);
+
+ /* Assert RX reset in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+ read_data |= 0x80000000;
+ mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+
+ /* Release RX reset in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+ read_data &= 0x7fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+
+ /* Disable RX clock gating in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_base + 0x04, &read_data);
+ read_data |= 0xC0000000;
+ mii_mgr_write(0x1F, TRGMII_7530_base + 0x04, read_data);
+
+ /*Enable Training Mode in MT7623 */
+ reg_bit_zero(TRGMII_7623_base + 0x40, 30, 1);
+ if (ei_local->architecture & GE1_TRGMII_FORCE_2000)
+ reg_bit_one(TRGMII_7623_base + 0x40, 30, 2);
+ else
+ reg_bit_one(TRGMII_7623_base + 0x40, 31, 1);
+ reg_bit_zero(TRGMII_7623_base + 0x78, 8, 4);
+ reg_bit_zero(TRGMII_7623_base + 0x50, 8, 4);
+ reg_bit_zero(TRGMII_7623_base + 0x58, 8, 4);
+ reg_bit_zero(TRGMII_7623_base + 0x60, 8, 4);
+ reg_bit_zero(TRGMII_7623_base + 0x68, 8, 4);
+ reg_bit_zero(TRGMII_7623_base + 0x70, 8, 4);
+ reg_bit_one(TRGMII_7623_base + 0x78, 11, 1);
+
+ err_total_flag = 0;
+ read_data = 0x0;
+ while (err_total_flag == 0 && (read_data != 0x68)) {
+ /* Enable EDGE CHK in MT7530 */
+ for (i = 0; i < 5; i++) {
+ mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+ &read_data);
+ read_data |= 0x40000000;
+ read_data &= 0x4fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+ read_data);
+ wait_loop();
+ mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+ &err_cnt[i]);
+ err_cnt[i] >>= 8;
+ err_cnt[i] &= 0x0000ff0f;
+ rd_wd = err_cnt[i] >> 8;
+ rd_wd &= 0x000000ff;
+ err_cnt[i] &= 0x0000000f;
+ if (err_cnt[i] != 0)
+ err_flag[i] = 1;
+ else if (rd_wd != 0x55)
+ err_flag[i] = 1;
+ else
+ err_flag[i] = 0;
+
+ if (i == 0)
+ err_total_flag = err_flag[i];
+ else
+ err_total_flag = err_flag[i] & err_total_flag;
+ /* Disable EDGE CHK in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+ &read_data);
+ read_data |= 0x40000000;
+ read_data &= 0x4fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+ read_data);
+ wait_loop();
+ }
+ /*Adjust RXC delay */
+ if (err_total_flag == 0) {
+ /* Assert RX reset in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+ read_data |= 0x80000000;
+ mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+
+ /* RX clock gating in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_base + 0x04, &read_data);
+ read_data &= 0x3fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_base + 0x04, read_data);
+
+ mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+ tmp = read_data;
+ tmp &= 0x0000007f;
+ tmp += rxc_step_size;
+ read_data &= 0xffffff80;
+ read_data |= tmp;
+ mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+ mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+
+ /* Release RX reset in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_base, &read_data);
+ read_data &= 0x7fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_base, read_data);
+
+ /* Disable RX clock gating in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_base + 0x04, &read_data);
+ read_data |= 0xc0000000;
+ mii_mgr_write(0x1F, TRGMII_7530_base + 0x04, read_data);
+ }
+ read_data = tmp;
+ }
+ /* Read RD_WD MT7530 */
+ for (i = 0; i < 5; i++) {
+ rd_tap = 0;
+ while (err_flag[i] != 0 && rd_tap != 128) {
+ /* Enable EDGE CHK in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+ &read_data);
+ read_data |= 0x40000000;
+ read_data &= 0x4fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+ read_data);
+ wait_loop();
+ err_cnt[i] = (read_data >> 8) & 0x0000000f;
+ rd_wd = (read_data >> 16) & 0x000000ff;
+ if (err_cnt[i] != 0 || rd_wd != 0x55)
+ err_flag[i] = 1;
+ else
+ err_flag[i] = 0;
+
+ if (err_flag[i] != 0) {
+ /* Add RXD delay in MT7530 */
+ rd_tap = (read_data & 0x7f) + rxd_step_size;
+ read_data = (read_data & 0xffffff80) | rd_tap;
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+ read_data);
+ tap_a[i] = rd_tap;
+ } else {
+ /* Record the min delay TAP_A */
+ tap_a[i] = (read_data & 0x0000007f);
+ rd_tap = tap_a[i] + 0x4;
+ read_data = (read_data & 0xffffff80) | rd_tap;
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+ read_data);
+ }
+
+ /* Disable EDGE CHK in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+ &read_data);
+ read_data |= 0x40000000;
+ read_data &= 0x4fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+ read_data);
+ wait_loop();
+ }
+ pr_info("MT7530 %dth bit Tap_a = %d\n", i, tap_a[i]);
+ }
+ for (i = 0; i < 5; i++) {
+ rd_tap = 0;
+ while (err_flag[i] == 0 && (rd_tap != 128)) {
+ /* Enable EDGE CHK in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+ &read_data);
+ read_data |= 0x40000000;
+ read_data &= 0x4fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+ read_data);
+ wait_loop();
+ err_cnt[i] = (read_data >> 8) & 0x0000000f;
+ rd_wd = (read_data >> 16) & 0x000000ff;
+ if (err_cnt[i] != 0 || rd_wd != 0x55)
+ err_flag[i] = 1;
+ else
+ err_flag[i] = 0;
+
+ if (err_flag[i] == 0 && (rd_tap != 128)) {
+ /* Add RXD delay in MT7530 */
+ rd_tap = (read_data & 0x7f) + rxd_step_size;
+ read_data = (read_data & 0xffffff80) | rd_tap;
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+ read_data);
+ }
+ /* Disable EDGE CHK in MT7530 */
+ mii_mgr_read(0x1F, TRGMII_7530_RD_0 + i * 8,
+ &read_data);
+ read_data |= 0x40000000;
+ read_data &= 0x4fffffff;
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8,
+ read_data);
+ wait_loop();
+ }
+ tap_b[i] = rd_tap; /* - rxd_step_size; */
+ pr_info("MT7530 %dth bit Tap_b = %d\n", i, tap_b[i]);
+ /* Calculate RXD delay = (TAP_A + TAP_B)/2 */
+ final_tap[i] = (tap_a[i] + tap_b[i]) / 2;
+ read_data = (read_data & 0xffffff80) | final_tap[i];
+ mii_mgr_write(0x1F, TRGMII_7530_RD_0 + i * 8, read_data);
+ }
+ if (ei_local->architecture & GE1_TRGMII_FORCE_2000)
+ reg_bit_zero(TRGMII_7623_base + 0x40, 31, 1);
+ else
+ reg_bit_zero(TRGMII_7623_base + 0x40, 30, 2);
+}
+
+static void mt7530_trgmii_clock_setting(u32 xtal_mode)
+{
+ u32 reg_value;
+ /* TRGMII Clock */
+ mii_mgr_write_cl45(0, 0x1f, 0x410, 0x1);
+ if (xtal_mode == 1) { /* 25MHz */
+ mii_mgr_write_cl45(0, 0x1f, 0x404, MT7530_TRGMII_PLL_25M);
+ } else if (xtal_mode == 2) { /* 40MHz */
+ mii_mgr_write_cl45(0, 0x1f, 0x404, MT7530_TRGMII_PLL_40M);
+ }
+ mii_mgr_write_cl45(0, 0x1f, 0x405, 0);
+ if (xtal_mode == 1) /* 25MHz */
+ mii_mgr_write_cl45(0, 0x1f, 0x409, 0x57);
+ else
+ mii_mgr_write_cl45(0, 0x1f, 0x409, 0x87);
+
+ if (xtal_mode == 1) /* 25MHz */
+ mii_mgr_write_cl45(0, 0x1f, 0x40a, 0x57);
+ else
+ mii_mgr_write_cl45(0, 0x1f, 0x40a, 0x87);
+
+ mii_mgr_write_cl45(0, 0x1f, 0x403, 0x1800);
+ mii_mgr_write_cl45(0, 0x1f, 0x403, 0x1c00);
+ mii_mgr_write_cl45(0, 0x1f, 0x401, 0xc020);
+ mii_mgr_write_cl45(0, 0x1f, 0x406, 0xa030);
+ mii_mgr_write_cl45(0, 0x1f, 0x406, 0xa038);
+ usleep_range(120, 130); /* for MT7623 bring up test */
+ mii_mgr_write_cl45(0, 0x1f, 0x410, 0x3);
+
+ mii_mgr_read(31, 0x7830, ®_value);
+ reg_value &= 0xFFFFFFFC;
+ reg_value |= 0x00000001;
+ mii_mgr_write(31, 0x7830, reg_value);
+
+ mii_mgr_read(31, 0x7a40, ®_value);
+ reg_value &= ~(0x1 << 30);
+ reg_value &= ~(0x1 << 28);
+ mii_mgr_write(31, 0x7a40, reg_value);
+
+ mii_mgr_write(31, 0x7a78, 0x55);
+ usleep_range(100, 110); /* for mt7623 bring up test */
+
+ /* Release MT7623 RXC reset */
+ reg_bit_zero(ETHDMASYS_ETH_SW_BASE + 0x0300, 31, 1);
+
+ trgmii_calibration_7623();
+ trgmii_calibration_7530();
+ /* Assert RX reset in MT7623 */
+ reg_bit_one(ETHDMASYS_ETH_SW_BASE + 0x0300, 31, 1);
+ /* Release RX reset in MT7623 */
+ reg_bit_zero(ETHDMASYS_ETH_SW_BASE + 0x0300, 31, 1);
+ mii_mgr_read(31, 0x7a00, ®_value);
+ reg_value |= (0x1 << 31);
+ mii_mgr_write(31, 0x7a00, reg_value);
+ mdelay(1);
+ reg_value &= ~(0x1 << 31);
+ mii_mgr_write(31, 0x7a00, reg_value);
+ mdelay(100);
+}
+
+void trgmii_set_7621(void)
+{
+ u32 val = 0;
+ u32 val_0 = 0;
+
+ val = sys_reg_read(RSTCTRL);
+ /* MT7621 need to reset GMAC and FE first */
+ val = val | RALINK_FE_RST | RALINK_ETH_RST;
+ sys_reg_write(RSTCTRL, val);
+
+ /* set TRGMII clock */
+ val_0 = sys_reg_read(CLK_CFG_0);
+ val_0 &= 0xffffff9f;
+ val_0 |= (0x1 << 5);
+ sys_reg_write(CLK_CFG_0, val_0);
+ mdelay(1);
+ val_0 = sys_reg_read(CLK_CFG_0);
+ pr_info("set CLK_CFG_0 = 0x%x!!!!!!!!!!!!!!!!!!1\n", val_0);
+ val = val & ~(RALINK_FE_RST | RALINK_ETH_RST);
+ sys_reg_write(RSTCTRL, val);
+ pr_info("trgmii_set_7621 Completed!!\n");
+}
+
+void trgmii_set_7530(void)
+{
+ u32 regValue;
+
+ mii_mgr_write(0, 13, 0x1f);
+ mii_mgr_write(0, 14, 0x404);
+ mii_mgr_write(0, 13, 0x401f);
+ mii_mgr_read(31, 0x7800, ®Value);
+ regValue = (regValue >> 9) & 0x3;
+ if (regValue == 0x3)
+ mii_mgr_write(0, 14, 0x0C00);/*25Mhz XTAL for 150Mhz CLK */
+ else if (regValue == 0x2)
+ mii_mgr_write(0, 14, 0x0780);/*40Mhz XTAL for 150Mhz CLK */
+
+ mdelay(1);
+
+ mii_mgr_write(0, 13, 0x1f);
+ mii_mgr_write(0, 14, 0x409);
+ mii_mgr_write(0, 13, 0x401f);
+ if (regValue == 0x3) /* 25MHz */
+ mii_mgr_write(0, 14, 0x57);
+ else
+ mii_mgr_write(0, 14, 0x87);
+ mdelay(1);
+
+ mii_mgr_write(0, 13, 0x1f);
+ mii_mgr_write(0, 14, 0x40a);
+ mii_mgr_write(0, 13, 0x401f);
+ if (regValue == 0x3) /* 25MHz */
+ mii_mgr_write(0, 14, 0x57);
+ else
+ mii_mgr_write(0, 14, 0x87);
+
+/* PLL BIAS en */
+ mii_mgr_write(0, 13, 0x1f);
+ mii_mgr_write(0, 14, 0x403);
+ mii_mgr_write(0, 13, 0x401f);
+ mii_mgr_write(0, 14, 0x1800);
+ mdelay(1);
+
+/* BIAS LPF en */
+ mii_mgr_write(0, 13, 0x1f);
+ mii_mgr_write(0, 14, 0x403);
+ mii_mgr_write(0, 13, 0x401f);
+ mii_mgr_write(0, 14, 0x1c00);
+
+/* sys PLL en */
+ mii_mgr_write(0, 13, 0x1f);
+ mii_mgr_write(0, 14, 0x401);
+ mii_mgr_write(0, 13, 0x401f);
+ mii_mgr_write(0, 14, 0xc020);
+
+/* LCDDDS PWDS */
+ mii_mgr_write(0, 13, 0x1f);
+ mii_mgr_write(0, 14, 0x406);
+ mii_mgr_write(0, 13, 0x401f);
+ mii_mgr_write(0, 14, 0xa030);
+ mdelay(1);
+
+/* GSW_2X_CLK */
+ mii_mgr_write(0, 13, 0x1f);
+ mii_mgr_write(0, 14, 0x410);
+ mii_mgr_write(0, 13, 0x401f);
+ mii_mgr_write(0, 14, 0x0003);
+ mii_mgr_write_cl45(0, 0x1f, 0x410, 0x0003);
+
+/* enable P6 */
+ mii_mgr_write(31, 0x3600, 0x5e33b);
+
+/* enable TRGMII */
+ mii_mgr_write(31, 0x7830, 0x1);
+
+ pr_info("trgmii_set_7530 Completed!!\n");
+}
+
+static void is_switch_vlan_table_busy(void)
+{
+ int j = 0;
+ unsigned int value = 0;
+
+ for (j = 0; j < 20; j++) {
+ mii_mgr_read(31, 0x90, &value);
+ if ((value & 0x80000000) == 0) { /* table busy */
+ break;
+ }
+ mdelay(70);
+ }
+ if (j == 20)
+ pr_info("set vlan timeout value=0x%x.\n", value);
+}
+
+static void lan_wan_partition(void)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ /*Set MT7530 */
+ if (ei_local->architecture & WAN_AT_P0) {
+ pr_info("set LAN/WAN WLLLL\n");
+ /*WLLLL, wan at P0 */
+ /*LAN/WAN ports as security mode */
+ mii_mgr_write(31, 0x2004, 0xff0003); /* port0 */
+ mii_mgr_write(31, 0x2104, 0xff0003); /* port1 */
+ mii_mgr_write(31, 0x2204, 0xff0003); /* port2 */
+ mii_mgr_write(31, 0x2304, 0xff0003); /* port3 */
+ mii_mgr_write(31, 0x2404, 0xff0003); /* port4 */
+ mii_mgr_write(31, 0x2504, 0xff0003); /* port5 */
+ mii_mgr_write(31, 0x2604, 0xff0003); /* port6 */
+
+ /*set PVID */
+ mii_mgr_write(31, 0x2014, 0x10002); /* port0 */
+ mii_mgr_write(31, 0x2114, 0x10001); /* port1 */
+ mii_mgr_write(31, 0x2214, 0x10001); /* port2 */
+ mii_mgr_write(31, 0x2314, 0x10001); /* port3 */
+ mii_mgr_write(31, 0x2414, 0x10001); /* port4 */
+ mii_mgr_write(31, 0x2514, 0x10002); /* port5 */
+ mii_mgr_write(31, 0x2614, 0x10001); /* port6 */
+ /*port6 */
+ /*VLAN member */
+ is_switch_vlan_table_busy();
+ mii_mgr_write(31, 0x94, 0x405e0001); /* VAWD1 */
+ mii_mgr_write(31, 0x90, 0x80001001); /* VTCR, VID=1 */
+ is_switch_vlan_table_busy();
+
+ mii_mgr_write(31, 0x94, 0x40210001); /* VAWD1 */
+ mii_mgr_write(31, 0x90, 0x80001002); /* VTCR, VID=2 */
+ is_switch_vlan_table_busy();
+ }
+ if (ei_local->architecture & WAN_AT_P4) {
+ pr_info("set LAN/WAN LLLLW\n");
+ /*LLLLW, wan at P4 */
+ /*LAN/WAN ports as security mode */
+ mii_mgr_write(31, 0x2004, 0xff0003); /* port0 */
+ mii_mgr_write(31, 0x2104, 0xff0003); /* port1 */
+ mii_mgr_write(31, 0x2204, 0xff0003); /* port2 */
+ mii_mgr_write(31, 0x2304, 0xff0003); /* port3 */
+ mii_mgr_write(31, 0x2404, 0xff0003); /* port4 */
+ mii_mgr_write(31, 0x2504, 0xff0003); /* port5 */
+ mii_mgr_write(31, 0x2604, 0xff0003); /* port6 */
+
+ /*set PVID */
+ mii_mgr_write(31, 0x2014, 0x10001); /* port0 */
+ mii_mgr_write(31, 0x2114, 0x10001); /* port1 */
+ mii_mgr_write(31, 0x2214, 0x10001); /* port2 */
+ mii_mgr_write(31, 0x2314, 0x10001); /* port3 */
+ mii_mgr_write(31, 0x2414, 0x10002); /* port4 */
+ mii_mgr_write(31, 0x2514, 0x10002); /* port5 */
+ mii_mgr_write(31, 0x2614, 0x10001); /* port6 */
+
+ /*VLAN member */
+ is_switch_vlan_table_busy();
+ mii_mgr_write(31, 0x94, 0x404f0001); /* VAWD1 */
+ mii_mgr_write(31, 0x90, 0x80001001); /* VTCR, VID=1 */
+ is_switch_vlan_table_busy();
+ mii_mgr_write(31, 0x94, 0x40300001); /* VAWD1 */
+ mii_mgr_write(31, 0x90, 0x80001002); /* VTCR, VID=2 */
+ is_switch_vlan_table_busy();
+ }
+}
+
+static void mt7530_phy_setting(void)
+{
+ u32 i;
+ u32 reg_value;
+
+ for (i = 0; i < 5; i++) {
+ /* Disable EEE */
+ mii_mgr_write_cl45(i, 0x7, 0x3c, 0);
+ /* Enable HW auto downshift */
+ mii_mgr_write(i, 31, 0x1);
+ mii_mgr_read(i, 0x14, ®_value);
+ reg_value |= (1 << 4);
+ mii_mgr_write(i, 0x14, reg_value);
+ /* Increase SlvDPSready time */
+ mii_mgr_write(i, 31, 0x52b5);
+ mii_mgr_write(i, 16, 0xafae);
+ mii_mgr_write(i, 18, 0x2f);
+ mii_mgr_write(i, 16, 0x8fae);
+ /* Incease post_update_timer */
+ mii_mgr_write(i, 31, 0x3);
+ mii_mgr_write(i, 17, 0x4b);
+ /* Adjust 100_mse_threshold */
+ mii_mgr_write_cl45(i, 0x1e, 0x123, 0xffff);
+ /* Disable mcc */
+ mii_mgr_write_cl45(i, 0x1e, 0xa6, 0x300);
+ }
+}
+
+static void setup_internal_gsw(void)
+{
+ void __iomem *gpio_base_virt = ioremap(ETH_GPIO_BASE, 0x1000);
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ u32 reg_value;
+ u32 xtal_mode;
+ u32 i;
+
+ if (ei_local->architecture &
+ (GE1_TRGMII_FORCE_2000 | GE1_TRGMII_FORCE_2600))
+ reg_bit_one(RALINK_SYSCTL_BASE + 0x2c, 11, 1);
+ else
+ reg_bit_zero(RALINK_SYSCTL_BASE + 0x2c, 11, 1);
+ reg_bit_one(ETHDMASYS_ETH_SW_BASE + 0x0390, 1, 1); /* TRGMII mode */
+
+ #if defined(CONFIG_GE1_RGMII_FORCE_1200)
+
+ if (ei_local->chip_name == MT7621_FE)
+ trgmii_set_7621();
+
+ #endif
+
+ /*Hardware reset Switch */
+
+ reg_bit_zero((void __iomem *)gpio_base_virt + 0x520, 1, 1);
+ mdelay(1);
+ reg_bit_one((void __iomem *)gpio_base_virt + 0x520, 1, 1);
+ mdelay(100);
+
+ /* Assert MT7623 RXC reset */
+ reg_bit_one(ETHDMASYS_ETH_SW_BASE + 0x0300, 31, 1);
+ /*For MT7623 reset MT7530 */
+ reg_bit_one(RALINK_SYSCTL_BASE + 0x34, 2, 1);
+ mdelay(1);
+ reg_bit_zero(RALINK_SYSCTL_BASE + 0x34, 2, 1);
+ mdelay(100);
+
+ /* Wait for Switch Reset Completed */
+ for (i = 0; i < 100; i++) {
+ mdelay(10);
+ mii_mgr_read(31, 0x7800, ®_value);
+ if (reg_value != 0) {
+ pr_info("MT7530 Reset Completed!!\n");
+ break;
+ }
+ if (i == 99)
+ pr_info("MT7530 Reset Timeout!!\n");
+ }
+
+ for (i = 0; i <= 4; i++) {
+ /*turn off PHY */
+ mii_mgr_read(i, 0x0, ®_value);
+ reg_value |= (0x1 << 11);
+ mii_mgr_write(i, 0x0, reg_value);
+ }
+ mii_mgr_write(31, 0x7000, 0x3); /* reset switch */
+ usleep_range(100, 110);
+
+ #if defined(CONFIG_GE1_RGMII_FORCE_1200)
+
+ if (ei_local->chip_name == MT7621_FE) {
+ trgmii_set_7530();
+ /* enable MDIO to control MT7530 */
+ reg_value = sys_reg_read(RALINK_SYSCTL_BASE + 0x60);
+ reg_value &= ~(0x3 << 12);
+ sys_reg_write(RALINK_SYSCTL_BASE + 0x60, reg_value);
+ }
+
+ #endif
+
+ /* (GE1, Force 1000M/FD, FC ON) */
+ sys_reg_write(RALINK_ETH_SW_BASE + 0x100, 0x2105e33b);
+ mii_mgr_write(31, 0x3600, 0x5e33b);
+ mii_mgr_read(31, 0x3600, ®_value);
+ /* (GE2, Link down) */
+ sys_reg_write(RALINK_ETH_SW_BASE + 0x200, 0x00008000);
+
+ mii_mgr_read(31, 0x7804, ®_value);
+ reg_value &= ~(1 << 8); /* Enable Port 6 */
+ reg_value |= (1 << 6); /* Disable Port 5 */
+ reg_value |= (1 << 13); /* Port 5 as GMAC, no Internal PHY */
+
+ if (ei_local->architecture & GMAC2) {
+ /*RGMII2=Normal mode */
+ reg_bit_zero(RALINK_SYSCTL_BASE + 0x60, 15, 1);
+
+ /*GMAC2= RGMII mode */
+ reg_bit_zero(SYSCFG1, 14, 2);
+ if (ei_local->architecture & GE2_RGMII_AN) {
+ mii_mgr_write(31, 0x3500, 0x56300);
+ /* (GE2, auto-polling) */
+ sys_reg_write(RALINK_ETH_SW_BASE + 0x200, 0x21056300);
+ reg_value |= (1 << 6); /* disable MT7530 P5 */
+ enable_auto_negotiate(ei_local);
+
+ } else {
+ /* MT7530 P5 Force 1000 */
+ mii_mgr_write(31, 0x3500, 0x5e33b);
+ /* (GE2, Force 1000) */
+ sys_reg_write(RALINK_ETH_SW_BASE + 0x200, 0x2105e33b);
+ reg_value &= ~(1 << 6); /* enable MT7530 P5 */
+ reg_value |= ((1 << 7) | (1 << 13) | (1 << 16));
+ if (ei_local->architecture & WAN_AT_P0)
+ reg_value |= (1 << 20);
+ else
+ reg_value &= ~(1 << 20);
+ }
+ }
+ reg_value &= ~(1 << 5);
+ reg_value |= (1 << 16); /* change HW-TRAP */
+ pr_info("change HW-TRAP to 0x%x\n", reg_value);
+ mii_mgr_write(31, 0x7804, reg_value);
+ mii_mgr_read(31, 0x7800, ®_value);
+ reg_value = (reg_value >> 9) & 0x3;
+ if (reg_value == 0x3) { /* 25Mhz Xtal */
+ xtal_mode = 1;
+ /*Do Nothing */
+ } else if (reg_value == 0x2) { /* 40Mhz */
+ xtal_mode = 2;
+ /* disable MT7530 core clock */
+ mii_mgr_write_cl45(0, 0x1f, 0x410, 0x0);
+
+ mii_mgr_write_cl45(0, 0x1f, 0x40d, 0x2020);
+ mii_mgr_write_cl45(0, 0x1f, 0x40e, 0x119);
+ mii_mgr_write_cl45(0, 0x1f, 0x40d, 0x2820);
+ usleep_range(20, 30); /* suggest by CD */
+ #if defined(CONFIG_GE1_RGMII_FORCE_1200)
+ mii_mgr_write_cl45(0, 0x1f, 0x410, 0x3);
+ #else
+ mii_mgr_write_cl45(0, 0x1f, 0x410, 0x1);
+ #endif
+
+ } else {
+ xtal_mode = 3;
+ /* TODO */}
+
+ /* set MT7530 central align */
+ #if !defined(CONFIG_GE1_RGMII_FORCE_1200) /* for RGMII 1000HZ */
+ mii_mgr_read(31, 0x7830, ®_value);
+ reg_value &= ~1;
+ reg_value |= 1 << 1;
+ mii_mgr_write(31, 0x7830, reg_value);
+
+ mii_mgr_read(31, 0x7a40, ®_value);
+ reg_value &= ~(1 << 30);
+ mii_mgr_write(31, 0x7a40, reg_value);
+
+ reg_value = 0x855;
+ mii_mgr_write(31, 0x7a78, reg_value);
+ #endif
+
+ mii_mgr_write(31, 0x7b00, 0x104); /* delay setting for 10/1000M */
+ mii_mgr_write(31, 0x7b04, 0x10); /* delay setting for 10/1000M */
+
+ /*Tx Driving */
+ mii_mgr_write(31, 0x7a54, 0x88); /* lower GE1 driving */
+ mii_mgr_write(31, 0x7a5c, 0x88); /* lower GE1 driving */
+ mii_mgr_write(31, 0x7a64, 0x88); /* lower GE1 driving */
+ mii_mgr_write(31, 0x7a6c, 0x88); /* lower GE1 driving */
+ mii_mgr_write(31, 0x7a74, 0x88); /* lower GE1 driving */
+ mii_mgr_write(31, 0x7a7c, 0x88); /* lower GE1 driving */
+ mii_mgr_write(31, 0x7810, 0x11); /* lower GE2 driving */
+ /*Set MT7623 TX Driving */
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0354, 0x88);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x035c, 0x88);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0364, 0x88);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x036c, 0x88);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0374, 0x88);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x037c, 0x88);
+
+ /* Set GE2 driving and slew rate */
+ if (ei_local->architecture & GE2_RGMII_AN)
+ sys_reg_write((void __iomem *)gpio_base_virt + 0xf00, 0xe00);
+ else
+ sys_reg_write((void __iomem *)gpio_base_virt + 0xf00, 0xa00);
+ /* set GE2 TDSEL */
+ sys_reg_write((void __iomem *)gpio_base_virt + 0x4c0, 0x5);
+ /* set GE2 TUNE */
+ sys_reg_write((void __iomem *)gpio_base_virt + 0xed0, 0);
+
+ if (ei_local->chip_name == MT7623_FE)
+ mt7530_trgmii_clock_setting(xtal_mode);
+ if (ei_local->architecture & GE1_RGMII_FORCE_1000) {
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0350, 0x55);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0358, 0x55);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0360, 0x55);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0368, 0x55);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0370, 0x55);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x0378, 0x855);
+ }
+
+ lan_wan_partition();
+ mt7530_phy_setting();
+ for (i = 0; i <= 4; i++) {
+ /*turn on PHY */
+ mii_mgr_read(i, 0x0, ®_value);
+ reg_value &= ~(0x1 << 11);
+ mii_mgr_write(i, 0x0, reg_value);
+ }
+
+ mii_mgr_read(31, 0x7808, ®_value);
+ reg_value |= (3 << 16); /* Enable INTR */
+ mii_mgr_write(31, 0x7808, reg_value);
+
+ iounmap(gpio_base_virt);
+}
+
+void setup_external_gsw(void)
+{
+ /* reduce RGMII2 PAD driving strength */
+ reg_bit_zero(PAD_RGMII2_MDIO_CFG, 4, 2);
+ /*enable MDIO */
+ reg_bit_zero(RALINK_SYSCTL_BASE + 0x60, 12, 2);
+
+ /*RGMII1=Normal mode */
+ reg_bit_zero(RALINK_SYSCTL_BASE + 0x60, 14, 1);
+ /*GMAC1= RGMII mode */
+ reg_bit_zero(SYSCFG1, 12, 2);
+
+ /* (GE1, Link down) */
+ sys_reg_write(RALINK_ETH_SW_BASE + 0x100, 0x00008000);
+
+ /*RGMII2=Normal mode */
+ reg_bit_zero(RALINK_SYSCTL_BASE + 0x60, 15, 1);
+ /*GMAC2= RGMII mode */
+ reg_bit_zero(SYSCFG1, 14, 2);
+
+ /* (GE2, Force 1000M/FD, FC ON) */
+ sys_reg_write(RALINK_ETH_SW_BASE + 0x200, 0x2105e33b);
+
+} int is_marvell_gigaphy(int ge)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ u32 phy_id0 = 0, phy_id1 = 0, phy_address;
+
+ if (ei_local->architecture & GE1_RGMII_AN)
+ phy_address = mac_to_gigaphy_mode_addr;
+ else
+ phy_address = mac_to_gigaphy_mode_addr2;
+
+ if (!mii_mgr_read(phy_address, 2, &phy_id0)) {
+ pr_info("\n Read PhyID 1 is Fail!!\n");
+ phy_id0 = 0;
+ }
+ if (!mii_mgr_read(phy_address, 3, &phy_id1)) {
+ pr_info("\n Read PhyID 1 is Fail!!\n");
+ phy_id1 = 0;
+ }
+
+ if ((phy_id0 == EV_MARVELL_PHY_ID0) && (phy_id1 == EV_MARVELL_PHY_ID1))
+ return 1;
+ return 0;
+}
+
+int is_vtss_gigaphy(int ge)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ u32 phy_id0 = 0, phy_id1 = 0, phy_address;
+
+ if (ei_local->architecture & GE1_RGMII_AN)
+ phy_address = mac_to_gigaphy_mode_addr;
+ else
+ phy_address = mac_to_gigaphy_mode_addr2;
+
+ if (!mii_mgr_read(phy_address, 2, &phy_id0)) {
+ pr_info("\n Read PhyID 1 is Fail!!\n");
+ phy_id0 = 0;
+ }
+ if (!mii_mgr_read(phy_address, 3, &phy_id1)) {
+ pr_info("\n Read PhyID 1 is Fail!!\n");
+ phy_id1 = 0;
+ }
+
+ if ((phy_id0 == EV_VTSS_PHY_ID0) && (phy_id1 == EV_VTSS_PHY_ID1))
+ return 1;
+ return 0;
+}
+
+void fe_sw_preinit(struct END_DEVICE *ei_local)
+{
+ struct device_node *np = ei_local->switch_np;
+ struct platform_device *pdev = of_find_device_by_node(np);
+ struct mtk_gsw *gsw;
+ int ret;
+
+ gsw = platform_get_drvdata(pdev);
+ if (!gsw) {
+ pr_info("Failed to get gsw\n");
+ return;
+ }
+
+ regulator_set_voltage(gsw->supply, 1000000, 1000000);
+ ret = regulator_enable(gsw->supply);
+ if (ret)
+ pr_info("Failed to enable mt7530 power: %d\n", ret);
+
+ if (gsw->mcm) {
+ regulator_set_voltage(gsw->b3v, 3300000, 3300000);
+ ret = regulator_enable(gsw->b3v);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to enable b3v: %d\n", ret);
+ } else {
+ ret = devm_gpio_request(&pdev->dev, gsw->reset_pin,
+ "mediatek,reset-pin");
+ if (ret)
+ pr_info("fail to devm_gpio_request\n");
+
+ gpio_direction_output(gsw->reset_pin, 0);
+ usleep_range(1000, 1100);
+ gpio_set_value(gsw->reset_pin, 1);
+ mdelay(100);
+ devm_gpio_free(&pdev->dev, gsw->reset_pin);
+ }
+}
+
+void set_sgmii_force_link(int port_num, int speed)
+{
+ void __iomem *virt_addr;
+ unsigned int reg_value;
+ unsigned int sgmii_reg_phya, sgmii_reg;
+
+ virt_addr = ioremap(ETHSYS_BASE, 0x20);
+ reg_value = sys_reg_read(virt_addr + 0x14);
+
+ if (port_num == 1) {
+ reg_value |= SGMII_CONFIG_0;
+ sgmii_reg_phya = SGMII_REG_PHYA_BASE0;
+ sgmii_reg = SGMII_REG_BASE0;
+ set_ge1_force_1000();
+ }
+ if (port_num == 2) {
+ reg_value |= SGMII_CONFIG_1;
+ sgmii_reg_phya = SGMII_REG_PHYA_BASE1;
+ sgmii_reg = SGMII_REG_BASE1;
+ set_ge2_force_1000();
+ }
+
+ sys_reg_write(virt_addr + 0x14, reg_value);
+ reg_value = sys_reg_read(virt_addr + 0x14);
+ iounmap(virt_addr);
+
+ /* Set SGMII GEN2 speed(2.5G) */
+ virt_addr = ioremap(sgmii_reg_phya, 0x100);
+ reg_value = sys_reg_read(virt_addr + 0x28);
+ reg_value |= speed << 2;
+ sys_reg_write(virt_addr + 0x28, reg_value);
+ iounmap(virt_addr);
+
+ virt_addr = ioremap(sgmii_reg, 0x100);
+ /* disable SGMII AN */
+ reg_value = sys_reg_read(virt_addr);
+ reg_value &= ~(1 << 12);
+ sys_reg_write(virt_addr, reg_value);
+ /* SGMII force mode setting */
+ reg_value = sys_reg_read(virt_addr + 0x20);
+ sys_reg_write(virt_addr + 0x20, 0x31120019);
+ reg_value = sys_reg_read(virt_addr + 0x20);
+ /* Release PHYA power down state */
+ reg_value = sys_reg_read(virt_addr + 0xe8);
+ reg_value &= ~(1 << 4);
+ sys_reg_write(virt_addr + 0xe8, reg_value);
+ iounmap(virt_addr);
+}
+
+void set_sgmii_an(int port_num)
+{
+ void __iomem *virt_addr;
+ unsigned int reg_value;
+ unsigned int sgmii_reg, sgmii_reg_phya;
+
+ virt_addr = ioremap(ETHSYS_BASE, 0x20);
+ reg_value = sys_reg_read(virt_addr + 0x14);
+
+ if (port_num == 1) {
+ reg_value |= SGMII_CONFIG_0;
+ sgmii_reg_phya = SGMII_REG_PHYA_BASE0;
+ sgmii_reg = SGMII_REG_BASE0;
+ }
+ if (port_num == 2) {
+ reg_value |= SGMII_CONFIG_1;
+ sgmii_reg_phya = SGMII_REG_PHYA_BASE1;
+ sgmii_reg = SGMII_REG_BASE1;
+ }
+
+ sys_reg_write(virt_addr + 0x14, reg_value);
+ iounmap(virt_addr);
+
+ /* set auto polling */
+ virt_addr = ioremap(ETHSYS_MAC_BASE, 0x300);
+ sys_reg_write(virt_addr + (0x100 * port_num), 0x21056300);
+ iounmap(virt_addr);
+
+ virt_addr = ioremap(sgmii_reg, 0x100);
+ /* set link timer */
+ sys_reg_write(virt_addr + 0x18, 0x186a0);
+ /* disable remote fault */
+ reg_value = sys_reg_read(virt_addr + 0x20);
+ reg_value |= 1 << 8;
+ sys_reg_write(virt_addr + 0x20, reg_value);
+ /* restart an */
+ reg_value = sys_reg_read(virt_addr);
+ reg_value |= 1 << 9;
+ sys_reg_write(virt_addr, reg_value);
+ /* Release PHYA power down state */
+ reg_value = sys_reg_read(virt_addr + 0xe8);
+ reg_value &= ~(1 << 4);
+ sys_reg_write(virt_addr + 0xe8, reg_value);
+ iounmap(virt_addr);
+}
+
+static void mt7622_esw_5port_gpio(void)
+{
+ u32 ret, value, i;
+
+ mii_mgr_write(0, 31, 0x2000); /* change G2 page */
+
+ ret = mii_mgr_read(0, 31, &value);
+ pr_debug("(%d) R31: %x!\n", ret, value);
+
+ mii_mgr_read(0, 25, &value);
+ value = 0xf020;
+ mii_mgr_write(0, 25, value);
+ mii_mgr_read(0, 25, &value);
+ pr_debug("G2_R25: %x!\n", value);
+
+ mii_mgr_write(0, 31, 0x7000); /* change G7 page */
+ mii_mgr_read(0, 22, &value);
+
+ if (value & 0x8000) {
+ pr_debug("G7_R22[15]: 1\n");
+ } else {
+ mii_mgr_write(0, 22, (value | (1 << 15)));
+ pr_debug("G7_R22[15]: set to 1\n");
+ }
+
+ mii_mgr_write(0, 31, 0x3000); /* change G3 page */
+ mii_mgr_read(0, 16, &value);
+ value |= (1 << 3);
+ mii_mgr_write(0, 16, value);
+
+ mii_mgr_read(0, 16, &value);
+ pr_debug("G3_R16: %x!\n", value);
+
+ mii_mgr_write(0, 31, 0x7000); /* change G7 page */
+ mii_mgr_read(0, 22, &value);
+ value |= (1 << 5);
+ mii_mgr_write(0, 22, value);
+
+ mii_mgr_read(0, 24, &value);
+ value &= 0xDFFF;
+ mii_mgr_write(0, 24, value);
+
+ mii_mgr_read(0, 24, &value);
+ value |= (1 << 14);
+ mii_mgr_write(0, 24, value);
+
+ mii_mgr_read(0, 22, &value);
+ pr_debug("G7_R22: %x!\n", value);
+
+ mii_mgr_read(0, 24, &value);
+ pr_debug("G7_R24: %x!\n", value);
+
+ for (i = 0; i <= 4; i++) {
+ mii_mgr_write(i, 31, 0x8000); /* change L0 page */
+
+ mii_mgr_read(i, 30, &value);
+ value |= 0x3FFF;
+ mii_mgr_write(i, 30, value);
+ mii_mgr_read(i, 30, &value);
+ pr_debug("port %d L0_R30: %x!\n", i, value);
+
+ mii_mgr_write(i, 31, 0xB000); /* change L3 page */
+
+ mii_mgr_read(i, 26, &value);
+ value |= (1 << 12);
+ mii_mgr_write(i, 26, value);
+
+ mii_mgr_read(i, 26, &value);
+ pr_debug("port %d L3_R26: %x!\n", i, value);
+
+ mii_mgr_read(i, 25, &value);
+ value |= (1 << 8);
+ value |= (1 << 12);
+ mii_mgr_write(i, 25, value);
+
+ mii_mgr_read(i, 25, &value);
+ pr_debug("port %d L3_R25: %x!\n", i, value);
+ }
+
+ mii_mgr_write(0, 31, 0x2000); /* change G2 page */
+
+ mii_mgr_read(0, 25, &value);
+
+ pr_debug("G2_R25 before: %x!\n", value);
+ /* value &= 0xFFFF3FFF; */
+ /* G2_R25: 1020!-->0020 */
+ /* value &= 0xFFFF2FFF; */
+ value = 0x20;
+ mii_mgr_write(0, 25, value);
+
+ mii_mgr_read(0, 25, &value);
+ pr_debug("G2_R25: %x!\n", value);
+
+ /* LDO */
+ mii_mgr_write(0, 31, 0x7000); /* change G7 page */
+
+ mii_mgr_read(0, 16, &value);
+ value |= (1 << 2);
+ mii_mgr_write(0, 16, value);
+
+ mii_mgr_read(0, 16, &value);
+ pr_debug("G7_R16: %x!\n", value);
+
+ /* BG */
+ mii_mgr_write(0, 31, 0x2000); /* change G2 page */
+
+ mii_mgr_read(0, 22, &value);
+ value |= (1 << 12);
+ value |= (1 << 13);
+ value |= (1 << 14);
+ mii_mgr_write(0, 22, value);
+
+ mii_mgr_read(0, 22, &value);
+ pr_debug("G2_R22: %x!\n", value);
+
+ mii_mgr_read(0, 22, &value);
+ value &= 0x7FFF;
+ mii_mgr_write(0, 22, value);
+
+ mii_mgr_read(0, 22, &value);
+ pr_debug("G2_R22: %x!\n", value);
+}
+
+void leopard_gmii_config(u8 enable)
+{
+ unsigned int reg_value = 0;
+ void __iomem *gpio_base_virt, *infra_base_virt;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ /*bit[1]: gphy connect GMAC0 or GMAC2 1:GMAC0. 0:GMAC2*/
+ /*bit[0]: Co-QPHY path selection 0:U3path, 1:SGMII*/
+ infra_base_virt = ioremap(INFRA_BASE, 0x10);
+ reg_value = sys_reg_read(infra_base_virt);
+ if (enable) {
+ reg_value = reg_value | 0x02;
+ sys_reg_write(infra_base_virt, reg_value);
+
+ mac_to_gigaphy_mode_addr = 0;
+ enable_auto_negotiate(ei_local);
+
+ /*port5 enable*/
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x90, 0x00007f7f);
+ /*port5 an mode, port6 fix*/
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0xc8, 0x20503bfa);
+ } else {
+ reg_value = reg_value & (~0x2);
+ sys_reg_write(infra_base_virt, reg_value);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x84, 0);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x90, 0x10007f7f);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0xc8, 0x05503f38);
+ }
+ /*10000710 GEPHY_CTRL0[9:6] = 0 */
+ gpio_base_virt = ioremap(GPIO_GO_BASE, 0x10);
+ reg_value = sys_reg_read(gpio_base_virt);
+ /*reg_value = reg_value & ~(0xfffff3cf);*/
+ reg_value = 0x10000820;
+ sys_reg_write(gpio_base_virt, reg_value);
+ iounmap(gpio_base_virt);
+ iounmap(infra_base_virt);
+}
+
+void fe_sw_init(void)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ unsigned int reg_value = 0;
+ void __iomem *gpio_base_virt, *infra_base_virt, *ethsys_base_virt;
+ //int i;
+ //u16 r0_tmp;
+
+ /* Case1: MT7623/MT7622 GE1 + GigaPhy */
+ if (ei_local->architecture & GE1_RGMII_AN) {
+ //enable_auto_negotiate(ei_local);
+ if (is_marvell_gigaphy(1)) {
+ if (ei_local->features & FE_FPGA_MODE) {
+ mii_mgr_read(mac_to_gigaphy_mode_addr, 9,
+ ®_value);
+ /* turn off 1000Base-T Advertisement
+ * (9.9=1000Full, 9.8=1000Half)
+ */
+ reg_value &= ~(3 << 8);
+ mii_mgr_write(mac_to_gigaphy_mode_addr,
+ 9, reg_value);
+
+ /*10Mbps, debug */
+ mii_mgr_write(mac_to_gigaphy_mode_addr,
+ 4, 0x461);
+
+ mii_mgr_read(mac_to_gigaphy_mode_addr, 0,
+ ®_value);
+ reg_value |= 1 << 9; /* restart AN */
+ mii_mgr_write(mac_to_gigaphy_mode_addr,
+ 0, reg_value);
+ }
+ }
+ if (is_vtss_gigaphy(1)) {
+ mii_mgr_write(mac_to_gigaphy_mode_addr, 31, 1);
+ mii_mgr_read(mac_to_gigaphy_mode_addr, 28,
+ ®_value);
+ pr_info("Vitesse phy skew: %x --> ", reg_value);
+ reg_value |= (0x3 << 12);
+ reg_value &= ~(0x3 << 14);
+ pr_info("%x\n", reg_value);
+ mii_mgr_write(mac_to_gigaphy_mode_addr, 28,
+ reg_value);
+ mii_mgr_write(mac_to_gigaphy_mode_addr, 31, 0);
+ }
+ }
+
+ /* Case2: RT3883/MT7621 GE2 + GigaPhy */
+ if (ei_local->architecture & GE2_RGMII_AN) {
+#if(0)
+ leopard_gmii_config(0);
+ enable_auto_negotiate(ei_local);
+ set_ge2_an();
+ set_ge2_gmii();
+ if (ei_local->chip_name == LEOPARD_FE) {
+ for (i = 1; i < 5; i++)
+ do_fe_phy_all_analog_cal(i);
+
+ do_ge_phy_all_analog_cal(0);
+ }
+#endif
+ if (is_marvell_gigaphy(2)) {
+ mii_mgr_read(mac_to_gigaphy_mode_addr2, 9,
+ ®_value);
+ /* turn off 1000Base-T Advertisement
+ * (9.9=1000Full, 9.8=1000Half)
+ */
+ reg_value &= ~(3 << 8);
+ mii_mgr_write(mac_to_gigaphy_mode_addr2, 9,
+ reg_value);
+
+ mii_mgr_read(mac_to_gigaphy_mode_addr2, 20,
+ ®_value);
+ /* Add delay to RX_CLK for RXD Outputs */
+ reg_value |= 1 << 7;
+ mii_mgr_write(mac_to_gigaphy_mode_addr2, 20,
+ reg_value);
+
+ mii_mgr_read(mac_to_gigaphy_mode_addr2, 0,
+ ®_value);
+ reg_value |= 1 << 15; /* PHY Software Reset */
+ mii_mgr_write(mac_to_gigaphy_mode_addr2, 0,
+ reg_value);
+ if (ei_local->features & FE_FPGA_MODE) {
+ mii_mgr_read(mac_to_gigaphy_mode_addr2,
+ 9, ®_value);
+ /* turn off 1000Base-T Advertisement
+ * (9.9=1000Full, 9.8=1000Half)
+ */
+ reg_value &= ~(3 << 8);
+ mii_mgr_write(mac_to_gigaphy_mode_addr2,
+ 9, reg_value);
+
+ /*10Mbps, debug */
+ mii_mgr_write(mac_to_gigaphy_mode_addr2,
+ 4, 0x461);
+
+ mii_mgr_read(mac_to_gigaphy_mode_addr2,
+ 0, ®_value);
+ reg_value |= 1 << 9; /* restart AN */
+ mii_mgr_write(mac_to_gigaphy_mode_addr2,
+ 0, reg_value);
+ }
+ }
+ if (is_vtss_gigaphy(2)) {
+ mii_mgr_write(mac_to_gigaphy_mode_addr2, 31, 1);
+ mii_mgr_read(mac_to_gigaphy_mode_addr2, 28,
+ ®_value);
+ pr_info("Vitesse phy skew: %x --> ", reg_value);
+ reg_value |= (0x3 << 12);
+ reg_value &= ~(0x3 << 14);
+ pr_info("%x\n", reg_value);
+ mii_mgr_write(mac_to_gigaphy_mode_addr2, 28,
+ reg_value);
+ mii_mgr_write(mac_to_gigaphy_mode_addr2, 31, 0);
+ }
+ }
+
+ /* Case3: MT7623 GE1 + Internal GigaSW */
+ if (ei_local->architecture &
+ (GE1_RGMII_FORCE_1000 | GE1_TRGMII_FORCE_2000 |
+ GE1_TRGMII_FORCE_2600)) {
+ if ((ei_local->chip_name == MT7623_FE) ||
+ (ei_local->chip_name == MT7621_FE))
+ setup_internal_gsw();
+ /* TODO
+ * else if (ei_local->features & FE_FPGA_MODE)
+ * setup_fpga_gsw();
+ * else
+ * sys_reg_write(MDIO_CFG, INIT_VALUE_OF_FORCE_1000_FD);
+ */
+ }
+
+ /* Case4: MT7623 GE2 + GigaSW */
+ if (ei_local->architecture & GE2_RGMII_FORCE_1000) {
+ set_ge2_force_1000();
+ if (ei_local->chip_name == MT7623_FE)
+ setup_external_gsw();
+ }
+ /*TODO
+ * else
+ * sys_reg_write(MDIO_CFG2, INIT_VALUE_OF_FORCE_1000_FD);
+ */
+
+ /* Case5: MT7622 embedded switch */
+ if (ei_local->architecture & RAETH_ESW) {
+ reg_value = sys_reg_read(ETHDMASYS_ETH_MAC_BASE + 0xC);
+ reg_value = reg_value | 0x1;
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0xC, reg_value);
+
+ if (ei_local->architecture & MT7622_EPHY) {
+ gpio_base_virt = ioremap(GPIO_GO_BASE, 0x100);
+ sys_reg_write(gpio_base_virt + 0xF0, 0xE0FFFFFF);
+ iounmap(gpio_base_virt);
+ gpio_base_virt = ioremap(GPIO_MODE_BASE, 0x100);
+ reg_value = sys_reg_read(gpio_base_virt + 0x90);
+ reg_value &= 0x0000ffff;
+ reg_value |= 0x22220000;
+ sys_reg_write(gpio_base_virt + 0x90, reg_value);
+ iounmap(gpio_base_virt);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x84, 0);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x90, 0x10007f7f);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0xc8, 0x05503f38);
+ } else if (ei_local->architecture & LEOPARD_EPHY) {
+ set_ge1_an();
+ /*port0 force link down*/
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x84, 0x8000000);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x8c, 0x02404040);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x98, 0x00007f7f);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x04, 0xfbffffff);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x9c, 0x0008a041);
+#if(0)
+ if (ei_local->architecture & LEOPARD_EPHY_GMII) {
+ leopard_gmii_config(1);
+ set_ge0_gmii();
+ } else {
+ leopard_gmii_config(0);
+ }
+#endif
+ }
+ }
+
+ /* clear SGMII setting */
+ if ((ei_local->chip_name == LEOPARD_FE) || (ei_local->chip_name == MT7622_FE)) {
+ ethsys_base_virt = ioremap(ETHSYS_BASE, 0x20);
+ reg_value = sys_reg_read(ethsys_base_virt + 0x14);
+ reg_value &= ~(3 << 8);
+ sys_reg_write(ethsys_base_virt + 0x14, reg_value);
+ }
+
+ if (ei_local->architecture & GE1_SGMII_FORCE_2500)
+ set_sgmii_force_link(1, 1);
+ else if (ei_local->architecture & GE1_SGMII_AN) {
+ enable_auto_negotiate(ei_local);
+ set_sgmii_an(1);
+ }
+ if (ei_local->chip_name == LEOPARD_FE) {
+ if (ei_local->architecture & GE2_RAETH_SGMII) {
+ /*bit[1]: gphy connect GMAC0 or GMAC2 1:GMAC0. 0:GMAC2*/
+ /*bit[0]: Co-QPHY path selection 0:U3path, 1:SGMII*/
+ infra_base_virt = ioremap(INFRA_BASE, 0x10);
+ reg_value = sys_reg_read(infra_base_virt);
+ reg_value = reg_value | 0x01;
+ sys_reg_write(infra_base_virt, reg_value);
+ iounmap(infra_base_virt);
+ }
+ }
+
+ if (ei_local->architecture & GE2_SGMII_FORCE_2500)
+ set_sgmii_force_link(2, 1);
+ else if (ei_local->architecture & GE2_SGMII_AN) {
+ enable_auto_negotiate(ei_local);
+ set_sgmii_an(2);
+ }
+
+ if (ei_local->architecture & MT7622_EPHY) {
+ //mt7622_ephy_cal();
+ } else if (ei_local->architecture & LEOPARD_EPHY) {
+#if(0)
+ leopard_ephy_cal();
+ tc_phy_write_l_reg(2, 1, 18, 0x21f);
+ tc_phy_write_l_reg(2, 1, 18, 0x22f);
+ tc_phy_write_l_reg(2, 1, 18, 0x23f);
+ tc_phy_write_l_reg(2, 1, 18, 0x24f);
+ tc_phy_write_l_reg(2, 1, 18, 0x4f);
+ tc_phy_write_l_reg(4, 1, 18, 0x21f);
+ tc_phy_write_l_reg(4, 1, 18, 0x22f);
+ tc_phy_write_l_reg(4, 1, 18, 0x2f);
+ r0_tmp = tc_phy_read_l_reg(3, 0, 0);
+ r0_tmp = r0_tmp | 0x200;
+ tc_phy_write_l_reg(3, 0, 0, r0_tmp);
+#endif
+ }
+
+ if (ei_local->chip_name == MT7621_FE) {
+ clk_prepare_enable(ei_local->clks[MTK_CLK_GP0]);
+
+ /* switch to esw */
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0xC, 0x1);
+
+ /* set agpio to 5-port ephy */
+ gpio_base_virt = ioremap(GPIO_GO_BASE, 0x100);
+ reg_value = sys_reg_read(gpio_base_virt + 0xF0);
+ reg_value &= 0xE0FFFFFF;
+ sys_reg_write(gpio_base_virt + 0xF0, reg_value);
+ iounmap(gpio_base_virt);
+
+ /* set ephy to 5-port gpio mode */
+ mt7622_esw_5port_gpio();
+
+ /* set agpio to 0-port ephy */
+ gpio_base_virt = ioremap(GPIO_GO_BASE, 0x100);
+ reg_value = sys_reg_read(gpio_base_virt + 0xF0);
+ reg_value |= BITS(24, 28);
+ sys_reg_write(gpio_base_virt + 0xF0, reg_value);
+ iounmap(gpio_base_virt);
+
+ /* switch back to gmac1 */
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0xC, 0x0);
+
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_GP0]);
+ }
+
+ if (ei_local->chip_name == MT7622_FE) {
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ gpio_base_virt = ioremap(GPIO_GO_BASE + 0x100, 0x100);
+ reg_value = sys_reg_read(gpio_base_virt + 0x70);
+ reg_value = reg_value | (1 << 30);
+ sys_reg_write(gpio_base_virt + 0x70, reg_value);
+ reg_value = sys_reg_read(gpio_base_virt + 0x8c);
+ reg_value = reg_value | (1 << 24);
+ sys_reg_write(gpio_base_virt + 0x8c, reg_value);
+ iounmap(gpio_base_virt);
+ }
+ }
+}
+
+void fe_sw_deinit(struct END_DEVICE *ei_local)
+{
+ struct device_node *np = ei_local->switch_np;
+ struct platform_device *pdev = of_find_device_by_node(np);
+ void __iomem *gpio_base_virt;
+ unsigned int reg_value;
+ struct mtk_gsw *gsw;
+ int ret;
+
+ gsw = platform_get_drvdata(pdev);
+ if (!gsw)
+ return;
+
+ ret = regulator_disable(gsw->supply);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to disable mt7530 power: %d\n", ret);
+
+ if (gsw->mcm) {
+ ret = regulator_disable(gsw->b3v);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to disable b3v: %d\n", ret);
+ }
+
+ if (ei_local->architecture & MT7622_EPHY) {
+ /* set ephy to 5-port gpio mode */
+ mt7622_esw_5port_gpio();
+
+ /* set agpio to 0-port ephy */
+ gpio_base_virt = ioremap(GPIO_GO_BASE, 0x100);
+ reg_value = sys_reg_read(gpio_base_virt + 0xF0);
+ reg_value |= BITS(24, 28);
+ sys_reg_write(gpio_base_virt + 0xF0, reg_value);
+ iounmap(gpio_base_virt);
+ } else if (ei_local->architecture & LEOPARD_EPHY) {
+ mt7622_esw_5port_gpio();
+ /*10000710 GEPHY_CTRL0[9:6] = 1 */
+ gpio_base_virt = ioremap(GPIO_GO_BASE, 0x10);
+ reg_value = sys_reg_read(gpio_base_virt);
+ reg_value = reg_value | 0x3c0;
+ sys_reg_write(gpio_base_virt, reg_value);
+ iounmap(gpio_base_virt);
+
+ gpio_base_virt = ioremap(GPIO_MODE_BASE, 0x100);
+ /*10217310 GPIO_MODE1 [31:16] = 0x0*/
+ reg_value = sys_reg_read(gpio_base_virt + 0x10);
+ reg_value &= 0x0000ffff;
+ reg_value = reg_value & (~0xffff0000);
+ sys_reg_write(gpio_base_virt + 0x10, reg_value);
+
+ /*10217320 GPIO_MODE2(gpio17/18/21/22/23)*/
+ reg_value = sys_reg_read(gpio_base_virt + 0x20);
+ reg_value = reg_value & (~0xfff00fff);
+ sys_reg_write(gpio_base_virt + 0x20, reg_value);
+ iounmap(gpio_base_virt);
+ }
+}
+
+static void esw_link_status_changed(int port_no, void *dev_id)
+{
+ unsigned int reg_val;
+
+ mii_mgr_read(31, (0x3008 + (port_no * 0x100)), ®_val);
+ if (reg_val & 0x1)
+ pr_info("ESW: Link Status Changed - Port%d Link UP\n", port_no);
+ else
+ pr_info("ESW: Link Status Changed - Port%d Link Down\n",
+ port_no);
+}
+
+irqreturn_t gsw_interrupt(int irq, void *resv)
+{
+ unsigned long flags;
+ unsigned int reg_int_val;
+ struct net_device *dev = dev_raether;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ void *dev_id = NULL;
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ mii_mgr_read(31, 0x700c, ®_int_val);
+
+ if (reg_int_val & P4_LINK_CH)
+ esw_link_status_changed(4, dev_id);
+
+ if (reg_int_val & P3_LINK_CH)
+ esw_link_status_changed(3, dev_id);
+ if (reg_int_val & P2_LINK_CH)
+ esw_link_status_changed(2, dev_id);
+ if (reg_int_val & P1_LINK_CH)
+ esw_link_status_changed(1, dev_id);
+ if (reg_int_val & P0_LINK_CH)
+ esw_link_status_changed(0, dev_id);
+
+ mii_mgr_write(31, 0x700c, 0x1f); /* ack switch link change */
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+u32 phy_tr_dbg(u8 phyaddr, char *type, u32 data_addr, u8 ch_num)
+{
+ u16 page_reg = 31;
+ u32 token_ring_debug_reg = 0x52B5;
+ u32 token_ring_control_reg = 0x10;
+ u32 token_ring_low_data_reg = 0x11;
+ u32 token_ring_high_data_reg = 0x12;
+ u16 ch_addr = 0;
+ u32 node_addr = 0;
+ u32 value = 0;
+ u32 value_high = 0;
+ u32 value_low = 0;
+
+ if (strncmp(type, "DSPF", 4) == 0) {
+ /* DSP Filter Debug Node*/
+ ch_addr = 0x02;
+ node_addr = 0x0D;
+ } else if (strncmp(type, "PMA", 3) == 0) {
+ /*PMA Debug Node*/
+ ch_addr = 0x01;
+ node_addr = 0x0F;
+ } else if (strncmp(type, "TR", 2) == 0) {
+ /* Timing Recovery Debug Node */
+ ch_addr = 0x01;
+ node_addr = 0x0D;
+ } else if (strncmp(type, "PCS", 3) == 0) {
+ /* R1000PCS Debug Node */
+ ch_addr = 0x02;
+ node_addr = 0x0F;
+ } else if (strncmp(type, "FFE", 3) == 0) {
+ /* FFE Debug Node */
+ ch_addr = ch_num;
+ node_addr = 0x04;
+ } else if (strncmp(type, "EC", 2) == 0) {
+ /* ECC Debug Node */
+ ch_addr = ch_num;
+ node_addr = 0x00;
+ } else if (strncmp(type, "ECT", 3) == 0) {
+ /* EC/Tail Debug Node */
+ ch_addr = ch_num;
+ node_addr = 0x01;
+ } else if (strncmp(type, "NC", 2) == 0) {
+ /* EC/NC Debug Node */
+ ch_addr = ch_num;
+ node_addr = 0x01;
+ } else if (strncmp(type, "DFEDC", 5) == 0) {
+ /* DFETail/DC Debug Node */
+ ch_addr = ch_num;
+ node_addr = 0x05;
+ } else if (strncmp(type, "DEC", 3) == 0) {
+ /* R1000DEC Debug Node */
+ ch_addr = 0x00;
+ node_addr = 0x07;
+ } else if (strncmp(type, "CRC", 3) == 0) {
+ /* R1000CRC Debug Node */
+ ch_addr = ch_num;
+ node_addr = 0x06;
+ } else if (strncmp(type, "AN", 2) == 0) {
+ /* Autoneg Debug Node */
+ ch_addr = 0x00;
+ node_addr = 0x0F;
+ } else if (strncmp(type, "CMI", 3) == 0) {
+ /* CMI Debug Node */
+ ch_addr = 0x03;
+ node_addr = 0x0F;
+ } else if (strncmp(type, "SUPV", 4) == 0) {
+ /* SUPV PHY Debug Node */
+ ch_addr = 0x00;
+ node_addr = 0x0D;
+ } else {
+ pr_info("Wrong TR register Type !");
+ return 0xFFFF;
+ }
+ data_addr = data_addr & 0x3F;
+
+ tc_mii_write(phyaddr, page_reg, token_ring_debug_reg);
+ tc_mii_write(phyaddr, token_ring_control_reg,
+ (1 << 15) | (1 << 13) | (ch_addr << 11) | (node_addr << 7) | (data_addr << 1));
+
+ value_low = tc_mii_read(phyaddr, token_ring_low_data_reg);
+ value_high = tc_mii_read(phyaddr, token_ring_high_data_reg);
+ value = value_low + ((value_high & 0x00FF) << 16);
+ pr_info("*%s => Phyaddr=%d, ch_addr=%d, node_addr=0x%X, data_addr=0x%X , value=0x%X\r\n",
+ type, phyaddr, ch_addr, node_addr, data_addr, value);
+ tc_mii_write(phyaddr, page_reg, 0x00);/* V1.11 */
+
+ return value;
+}
+
+void esw_show_debug_log(u32 phy_addr)
+{
+ u32 val;
+
+ val = phy_tr_dbg(phy_addr, "PMA", 0x38, 0);
+ pr_info("VgaStateA =0x%x\n", ((val >> 4) & 0x1F));
+ pr_info("VgaStateB =0x%x\n", ((val >> 9) & 0x1F));
+ pr_info("VgaStateC =0x%x\n", ((val >> 14) & 0x1F));
+ pr_info("VgaStateD =0x%x\n", ((val >> 19) & 0x1F));
+
+ /* pairA */
+ val = tc_phy_read_dev_reg(phy_addr, 0x1E, 0x9B);
+ pr_info("XX0 0x1E,0x9B =0x%x\n", val);
+ val = (val >> 8) & 0xFF;
+ pr_info("AA0 lch_mse_mdcA =0x%x\r\n", val);
+
+ /* Pair B */
+ val = tc_phy_read_dev_reg(phy_addr, 0x1E, 0x9B);
+ pr_info("XX1 0x1E,0x9B =0x%x\n", val);
+ val = (val) & 0xFF; /* V1.16 */
+ pr_info("AA1 lch_mse_mdcB =0x%x\r\n", val);
+ /* Pair C */
+ val = tc_phy_read_dev_reg(phy_addr, 0x1E, 0x9C);
+ pr_info("XX2 0x1E,0x9C =0x%x\n", val);
+ val = (val >> 8) & 0xFF;
+ pr_info("AA2 lch_mse_mdcC =0x%x\r\n", val);
+
+ /* Pair D */
+ val = tc_phy_read_dev_reg(phy_addr, 0x1E, 0x9C);
+ pr_info("XX3 0x1E,0x9C =0x%x\n", val);
+ val = (val) & 0xFF; /* V1.16 */
+ pr_info("AA3 lch_mse_mdcD =0x%x\r\n", val);
+}
+
+irqreturn_t esw_interrupt(int irq, void *resv)
+{
+ unsigned long flags;
+ u32 phy_val;
+ int i;
+ static unsigned int port_status[5] = {0, 0, 0, 0, 0};
+ struct net_device *dev = dev_raether;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ /* disable irq mask and ack irq status */
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x4, 0xffffffff);
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE, 0x04000000);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ for (i = 0; i < 5; i++) {
+ mii_mgr_read(i, 1, &phy_val);
+ if (port_status[i] != ((phy_val & 0x4) >> 2)) {
+ if (port_status[i] == 0) {
+ port_status[i] = 1;
+ pr_info("ESW: Link Status Changed - Port%d Link Up\n", i);
+ } else {
+ port_status[i] = 0;
+ pr_info("ESW: Link Status Changed - Port%d Link Down\n", i);
+ }
+ if (ei_local->architecture & LEOPARD_EPHY) {
+ if (i == 0)
+ esw_show_debug_log(i);/*port0 giga port*/
+ }
+ }
+ }
+ /* enable irq mask */
+ sys_reg_write(ETHDMASYS_ETH_SW_BASE + 0x4, 0xfbffffff);
+ return IRQ_HANDLED;
+}
+
+int ephy_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct ephy_ioctl_data *ioctl_data)
+{
+ int ret = 0;
+ unsigned int cmd;
+ u8 cnt = 0;
+ u8 port_num = 0;
+
+ cmd = ioctl_data->cmd;
+ pr_info("%s : cmd =%x\n", __func__, cmd);
+ switch (cmd) {
+ case RAETH_VBG_IEXT_CALIBRATION:
+ cnt = 0;
+ fe_cal_vbg_flag = 0; /*restart calibration*/
+ for (port_num = 0; port_num < 5; port_num++) {
+ while ((fe_cal_vbg_flag == 0) && (cnt < 0x3)) {
+ fe_cal_vbg(port_num, 1);
+ cnt++;
+ if (fe_cal_vbg_flag == 0)
+ pr_info(" VBG wait! (%d)\n", cnt);
+ }
+ }
+ break;
+
+ case RAETH_TXG_R50_CALIBRATION:
+ cnt = 0;
+ fe_cal_r50_flag = 0;
+ for (port_num = 0; port_num < 5; port_num++) {
+ while ((fe_cal_r50_flag == 0) && (cnt < 0x3)) {
+ fe_cal_r50(port_num, 1);
+ cnt++;
+ if (fe_cal_r50_flag == 0)
+ pr_info(" FE R50 wait! (%d)\n", cnt);
+ }
+ }
+ break;
+
+ case RAETH_TXG_OFFSET_CALIBRATION:
+ for (port_num = 0; port_num < 5; port_num++) {
+ cnt = 0;
+ fe_cal_tx_offset_flag = 0;
+ while ((fe_cal_tx_offset_flag == 0) && (cnt < 0x3)) {
+ fe_cal_tx_offset(port_num, 100);
+ cnt++;
+ if (fe_cal_tx_offset_flag == 0)
+ pr_info("FeTxOffsetAnaCal wait!(%d)\n",
+ cnt);
+ }
+ cnt = 0;
+ fe_cal_tx_offset_flag_mdix = 0;
+ while ((fe_cal_tx_offset_flag_mdix == 0) && (cnt < 0x3)) {
+ fe_cal_tx_offset_mdix(port_num, 100);
+ cnt++;
+ if (fe_cal_tx_offset_flag_mdix == 0)
+ pr_info
+ ("FeTxOffsetAnaCal mdix wait!(%d)\n",
+ cnt);
+ }
+ }
+ break;
+
+ case RAETH_TXG_AMP_CALIBRATION:
+ for (port_num = 0; port_num < 5; port_num++) {
+ cnt = 0;
+ fe_cal_flag = 0;
+ while ((fe_cal_flag == 0) && (cnt < 0x3)) {
+ fe_cal_tx_amp(port_num, 300);
+ cnt++;
+ if (fe_cal_flag == 0)
+ pr_info("FETxAmpAnaCal wait!(%d)\n",
+ cnt);
+ }
+ cnt = 0;
+ fe_cal_flag_mdix = 0;
+ while ((fe_cal_flag_mdix == 0) && (cnt < 0x3)) {
+ fe_cal_tx_amp_mdix(port_num, 300);
+ cnt++;
+ if (fe_cal_flag_mdix == 0)
+ pr_info
+ ("FETxAmpAnaCal mdix wait!(%d)\n",
+ cnt);
+ }
+ }
+ break;
+
+ case GE_TXG_R50_CALIBRATION:
+ cnt = 0;
+ ge_cal_r50_flag = 0;
+ while ((ge_cal_r50_flag == 0) && (cnt < 0x3)) {
+ ge_cal_r50(0, 20);
+ cnt++;
+ if (ge_cal_r50_flag == 0)
+ pr_info(" GE R50 wait! (%d)\n", cnt);
+ }
+ break;
+
+ case GE_TXG_OFFSET_CALIBRATION:
+ cnt = 0;
+ ge_cal_tx_offset_flag = 0;
+ while ((ge_cal_tx_offset_flag == 0) && (cnt < 0x3)) {
+ ge_cal_tx_offset(port_num, 20);
+ cnt++;
+ if (ge_cal_tx_offset_flag == 0)
+ pr_info("GeTxOffsetAnaCal wait!(%d)\n",
+ cnt);
+ }
+ break;
+
+ case GE_TXG_AMP_CALIBRATION:
+ cnt = 0;
+ ge_cal_flag = 0;
+ while ((ge_cal_flag == 0) && (cnt < 0x3)) {
+ ge_cal_tx_amp(port_num, 20);
+ cnt++;
+ if (ge_cal_flag == 0)
+ pr_info("GETxAmpAnaCal wait!(%d)\n",
+ cnt);
+ }
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct of_device_id mediatek_gsw_match[] = {
+ {.compatible = "mediatek,mt7623-gsw"},
+ {.compatible = "mediatek,mt7621-gsw"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
+
+static int mtk_gsw_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *pctl;
+ struct mtk_gsw *gsw;
+ int err;
+ const char *pm;
+
+ gsw = devm_kzalloc(&pdev->dev, sizeof(struct mtk_gsw), GFP_KERNEL);
+ if (!gsw)
+ return -ENOMEM;
+
+ gsw->dev = &pdev->dev;
+ gsw->trgmii_force = 2000;
+ gsw->irq = irq_of_parse_and_map(np, 0);
+ if (gsw->irq < 0)
+ return -EINVAL;
+
+ err = of_property_read_string(pdev->dev.of_node, "mcm", &pm);
+ if (!err && !strcasecmp(pm, "enable")) {
+ gsw->mcm = true;
+ pr_info("== MT7530 MCM ==\n");
+ }
+
+ gsw->ethsys = syscon_regmap_lookup_by_phandle(np, "mediatek,ethsys");
+ if (IS_ERR(gsw->ethsys)) {
+ pr_err("fail at %s %d\n", __func__, __LINE__);
+ return PTR_ERR(gsw->ethsys);
+ }
+
+ if (!gsw->mcm) {
+ gsw->reset_pin = of_get_named_gpio(np, "mediatek,reset-pin", 0);
+ if (gsw->reset_pin < 0) {
+ pr_err("fail at %s %d\n", __func__, __LINE__);
+ return -1;
+ }
+ pr_debug("reset_pin_port= %d\n", gsw->reset_pin);
+
+ pctl = of_parse_phandle(np, "mediatek,pctl-regmap", 0);
+ if (IS_ERR(pctl)) {
+ pr_err("fail at %s %d\n", __func__, __LINE__);
+ return PTR_ERR(pctl);
+ }
+
+ gsw->pctl = syscon_node_to_regmap(pctl);
+ if (IS_ERR(pctl)) {
+ pr_err("fail at %s %d\n", __func__, __LINE__);
+ return PTR_ERR(pctl);
+ }
+
+ gsw->pins = pinctrl_get(&pdev->dev);
+ if (gsw->pins) {
+ gsw->ps_reset =
+ pinctrl_lookup_state(gsw->pins, "reset");
+
+ if (IS_ERR(gsw->ps_reset)) {
+ dev_err(&pdev->dev,
+ "failed to lookup the gsw_reset state\n");
+ return PTR_ERR(gsw->ps_reset);
+ }
+ } else {
+ dev_err(&pdev->dev, "gsw get pinctrl fail\n");
+ return PTR_ERR(gsw->pins);
+ }
+ }
+
+ gsw->supply = devm_regulator_get(&pdev->dev, "mt7530");
+ if (IS_ERR(gsw->supply)) {
+ pr_info("fail at %s %d\n", __func__, __LINE__);
+ return PTR_ERR(gsw->supply);
+ }
+
+ if (gsw->mcm) {
+ gsw->b3v = devm_regulator_get(&pdev->dev, "b3v");
+ if (IS_ERR(gsw->b3v))
+ return PTR_ERR(gsw->b3v);
+ }
+
+ gsw->wllll = of_property_read_bool(np, "mediatek,wllll");
+
+ platform_set_drvdata(pdev, gsw);
+
+ return 0;
+}
+
+static int mtk_gsw_remove(struct platform_device *pdev)
+{
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver gsw_driver = {
+ .probe = mtk_gsw_probe,
+ .remove = mtk_gsw_remove,
+ .driver = {
+ .name = "mtk-gsw",
+ .owner = THIS_MODULE,
+ .of_match_table = mediatek_gsw_match,
+ },
+};
+
+module_platform_driver(gsw_driver);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.h
new file mode 100644
index 0000000..7d3a9ee
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/ra_switch.h
@@ -0,0 +1,95 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_SWITCH_H
+#define RA_SWITCH_H
+
+extern struct net_device *dev_raether;
+#define ANACAL_INIT 0x01
+#define ANACAL_ERROR 0xFD
+#define ANACAL_SATURATION 0xFE
+#define ANACAL_FINISH 0xFF
+#define ANACAL_PAIR_A 0
+#define ANACAL_PAIR_B 1
+#define ANACAL_PAIR_C 2
+#define ANACAL_PAIR_D 3
+#define DAC_IN_0V 0x000
+#define DAC_IN_2V 0x0f0
+#define TX_AMP_OFFSET_0MV 0x20
+#define TX_AMP_OFFSET_VALID_BITS 6
+#define FE_CAL_P0 0
+#define FE_CAL_P1 1
+#if defined(CONFIG_MACH_LEOPARD)
+#define FE_CAL_COMMON 1
+#else
+#define FE_CAL_COMMON 0
+#endif
+
+void fe_sw_init(void);
+void fe_sw_preinit(struct END_DEVICE *ei_local);
+void fe_sw_deinit(struct END_DEVICE *ei_local);
+void sw_ioctl(struct ra_switch_ioctl_data *ioctl_data);
+irqreturn_t esw_interrupt(int irq, void *resv);
+irqreturn_t gsw_interrupt(int irq, void *resv);
+
+/* struct mtk_gsw - the structure that holds the SoC specific data
+ * @dev: The Device struct
+ * @base: The base address
+ * @piac_offset: The PIAC base may change depending on SoC
+ * @irq: The IRQ we are using
+ * @port4: The port4 mode on MT7620
+ * @autopoll: Is MDIO autopolling enabled
+ * @ethsys: The ethsys register map
+ * @pctl: The pin control register map
+ * @clk_trgpll: The trgmii pll clock
+ */
+struct mtk_gsw {
+ struct mtk_eth *eth;
+ struct device *dev;
+ void __iomem *base;
+ u32 piac_offset;
+ int irq;
+ int port4;
+ unsigned long int autopoll;
+
+ struct regmap *ethsys;
+ struct regmap *pctl;
+
+ int trgmii_force;
+ bool wllll;
+ bool mcm;
+ struct pinctrl *pins;
+ struct pinctrl_state *ps_default;
+ struct pinctrl_state *ps_reset;
+ int reset_pin;
+ struct regulator *supply;
+ struct regulator *b3v;
+};
+
+extern u8 fe_cal_flag;
+extern u8 fe_cal_flag_mdix;
+extern u8 fe_cal_tx_offset_flag;
+extern u8 fe_cal_tx_offset_flag_mdix;
+extern u8 fe_cal_r50_flag;
+extern u8 fe_cal_vbg_flag;
+void fe_cal_r50(u8 port_num, u32 delay);
+void fe_cal_tx_amp(u8 port_num, u32 delay);
+void fe_cal_tx_amp_mdix(u8 port_num, u32 delay);
+void fe_cal_tx_offset(u8 port_num, u32 delay);
+void fe_cal_tx_offset_mdix(u8 port_num, u32 delay);
+void fe_cal_vbg(u8 port_num, u32 delay);
+/*giga port calibration*/
+void ge_cal_r50(u8 port_num, u32 delay);
+void ge_cal_tx_amp(u8 port_num, u32 delay);
+void ge_cal_tx_offset(u8 port_num, u32 delay);
+void do_ge_phy_all_analog_cal(u8 phyaddr);
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_config.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_config.h
new file mode 100644
index 0000000..428bbf7
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_config.h
@@ -0,0 +1,329 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RAETH_CONFIG_H
+#define RAETH_CONFIG_H
+
+/* compile flag for features */
+#define DELAY_INT
+
+#define CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+/*#define CONFIG_QDMA_QOS_WEB*/
+#define CONFIG_QDMA_QOS_MARK
+
+#if !defined(CONFIG_SOC_MT7621)
+#define CONFIG_RAETH_NAPI
+#define CONFIG_RAETH_TX_RX_INT_SEPARATION
+#define CONFIG_RAETH_NAPI_TX_RX
+//#define CONFIG_RAETH_NAPI_RX_ONLY
+#endif
+
+#if defined(CONFIG_SOC_MT7621)
+#define CONFIG_GE1_RGMII_FORCE_1000
+#define CONFIG_GE1_RGMII_FORCE_1200
+#define CONFIG_RA_NETWORK_TASKLET_BH
+#endif
+/*CONFIG_RA_NETWORK_TASKLET_BH*/
+/*CONFIG_RA_NETWORK_WORKQUEUE_BH*/
+/*CONFIG_RAETH_SPECIAL_TAG*/
+#define CONFIG_RAETH_CHECKSUM_OFFLOAD
+#if !defined(CONFIG_SOC_MT7621)
+//#define CONFIG_RAETH_HW_LRO
+#endif
+/* #define CONFIG_RAETH_HW_LRO_FORCE */
+/* #define CONFIG_RAETH_HW_LRO_DVT */
+//#define CONFIG_RAETH_HW_VLAN_TX
+/*CONFIG_RAETH_HW_VLAN_RX*/
+#define CONFIG_RAETH_TSO
+/*#define CONFIG_RAETH_ETHTOOL*/
+#define CONFIG_RAETH_QDMA
+/*CONFIG_RAETH_QDMATX_QDMARX*/
+/*CONFIG_HW_SFQ*/
+//#define CONFIG_RAETH_HW_IOCOHERENT
+#define CONFIG_RAETH_GMAC2
+/*#define CONFIG_RAETH_RSS_4RING*/
+/*#define CONFIG_RAETH_RSS_2RING*/
+/* definitions */
+#ifdef DELAY_INT
+#define FE_DLY_INT BIT(0)
+#else
+#define FE_DLY_INT (0)
+#endif
+#ifdef CONFIG_RAETH_HW_LRO
+#define FE_HW_LRO BIT(1)
+#else
+#define FE_HW_LRO (0)
+#endif
+#ifdef CONFIG_RAETH_HW_LRO_FORCE
+#define FE_HW_LRO_FPORT BIT(2)
+#else
+#define FE_HW_LRO_FPORT (0)
+#endif
+#ifdef CONFIG_RAETH_LRO
+#define FE_SW_LRO BIT(3)
+#else
+#define FE_SW_LRO (0)
+#endif
+#ifdef CONFIG_RAETH_QDMA
+#define FE_QDMA BIT(4)
+#else
+#define FE_QDMA (0)
+#endif
+#ifdef CONFIG_RAETH_NAPI
+#define FE_INT_NAPI BIT(5)
+#else
+#define FE_INT_NAPI (0)
+#endif
+#ifdef CONFIG_RA_NETWORK_WORKQUEUE_BH
+#define FE_INT_WORKQ BIT(6)
+#else
+#define FE_INT_WORKQ (0)
+#endif
+#ifdef CONFIG_RA_NETWORK_TASKLET_BH
+#define FE_INT_TASKLET BIT(7)
+#else
+#define FE_INT_TASKLET (0)
+#endif
+#ifdef CONFIG_RAETH_TX_RX_INT_SEPARATION
+#define FE_IRQ_SEPARATE BIT(8)
+#else
+#define FE_IRQ_SEPARATE (0)
+#endif
+#define FE_GE2_SUPPORT BIT(9)
+#ifdef CONFIG_RAETH_ETHTOOL
+#define FE_ETHTOOL BIT(10)
+#else
+#define FE_ETHTOOL (0)
+#endif
+#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
+#define FE_CSUM_OFFLOAD BIT(11)
+#else
+#define FE_CSUM_OFFLOAD (0)
+#endif
+#ifdef CONFIG_RAETH_TSO
+#define FE_TSO BIT(12)
+#else
+#define FE_TSO (0)
+#endif
+#ifdef CONFIG_RAETH_TSOV6
+#define FE_TSO_V6 BIT(13)
+#else
+#define FE_TSO_V6 (0)
+#endif
+#ifdef CONFIG_RAETH_HW_VLAN_TX
+#define FE_HW_VLAN_TX BIT(14)
+#else
+#define FE_HW_VLAN_TX (0)
+#endif
+#ifdef CONFIG_RAETH_HW_VLAN_RX
+#define FE_HW_VLAN_RX BIT(15)
+#else
+#define FE_HW_VLAN_RX (0)
+#endif
+#ifdef CONFIG_RAETH_QDMA
+#define FE_QDMA_TX BIT(16)
+#else
+#define FE_QDMA_TX (0)
+#endif
+#ifdef CONFIG_RAETH_QDMATX_QDMARX
+#define FE_QDMA_RX BIT(17)
+#else
+#define FE_QDMA_RX (0)
+#endif
+#ifdef CONFIG_HW_SFQ
+#define FE_HW_SFQ BIT(18)
+#else
+#define FE_HW_SFQ (0)
+#endif
+#define FE_HW_IOCOHERENT BIT(19)
+
+#ifdef CONFIG_MTK_FPGA
+#define FE_FPGA_MODE BIT(20)
+#else
+#define FE_FPGA_MODE (0)
+#endif
+
+#ifdef CONFIG_RAETH_RSS_4RING
+#define FE_RSS_4RING BIT(20)
+#else
+#define FE_RSS_4RING (0)
+#endif
+
+#ifdef CONFIG_RAETH_RSS_2RING
+#define FE_RSS_2RING BIT(2)
+#else
+#define FE_RSS_2RING (0)
+#endif
+
+#ifdef CONFIG_RAETH_HW_LRO_REASON_DBG
+#define FE_HW_LRO_DBG BIT(21)
+#else
+#define FE_HW_LRO_DBG (0)
+#endif
+#ifdef CONFIG_RAETH_INT_DBG
+#define FE_RAETH_INT_DBG BIT(22)
+#else
+#define FE_RAETH_INT_DBG (0)
+#endif
+#ifdef CONFIG_USER_SNMPD
+#define USER_SNMPD BIT(23)
+#else
+#define USER_SNMPD (0)
+#endif
+#ifdef CONFIG_TASKLET_WORKQUEUE_SW
+#define TASKLET_WORKQUEUE_SW BIT(24)
+#else
+#define TASKLET_WORKQUEUE_SW (0)
+#endif
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+#define FE_HW_NAT BIT(25)
+#else
+#define FE_HW_NAT (0)
+#endif
+#ifdef CONFIG_RAETH_NAPI_TX_RX
+#define FE_INT_NAPI_TX_RX BIT(26)
+#else
+#define FE_INT_NAPI_TX_RX (0)
+#endif
+#ifdef CONFIG_QDMA_MQ
+#define QDMA_MQ BIT(27)
+#else
+#define QDMA_MQ (0)
+#endif
+#ifdef CONFIG_RAETH_NAPI_RX_ONLY
+#define FE_INT_NAPI_RX_ONLY BIT(28)
+#else
+#define FE_INT_NAPI_RX_ONLY (0)
+#endif
+#ifdef CONFIG_QDMA_SUPPORT_QOS
+#define FE_QDMA_FQOS BIT(29)
+#else
+#define FE_QDMA_FQOS (0)
+#endif
+
+#ifdef CONFIG_QDMA_QOS_WEB
+#define QDMA_QOS_WEB BIT(30)
+#else
+#define QDMA_QOS_WEB (0)
+#endif
+
+#ifdef CONFIG_QDMA_QOS_MARK
+#define QDMA_QOS_MARK BIT(31)
+#else
+#define QDMA_QOS_MARK (0)
+#endif
+
+#define MT7626_FE (7626)
+#define MT7623_FE (7623)
+#define MT7622_FE (7622)
+#define MT7621_FE (7621)
+#define LEOPARD_FE (1985)
+#define MT7986_FE (1985)
+
+#define GMAC2 BIT(0)
+#define LAN_WAN_SUPPORT BIT(1)
+#define WAN_AT_P0 BIT(2)
+#define WAN_AT_P4 BIT(3)
+#if defined(CONFIG_GE1_RGMII_FORCE_1000)
+#define GE1_RGMII_FORCE_1000 BIT(4)
+#define GE1_TRGMII_FORCE_2000 (0)
+#define GE1_TRGMII_FORCE_2600 (0)
+#define MT7530_TRGMII_PLL_25M (0x0A00)
+#define MT7530_TRGMII_PLL_40M (0x0640)
+#elif defined(CONFIG_GE1_TRGMII_FORCE_2000)
+#define GE1_TRGMII_FORCE_2000 BIT(5)
+#define GE1_RGMII_FORCE_1000 (0)
+#define GE1_TRGMII_FORCE_2600 (0)
+#define MT7530_TRGMII_PLL_25M (0x1400)
+#define MT7530_TRGMII_PLL_40M (0x0C80)
+#elif defined(CONFIG_GE1_TRGMII_FORCE_2600)
+#define GE1_TRGMII_FORCE_2600 BIT(6)
+#define GE1_RGMII_FORCE_1000 (0)
+#define GE1_TRGMII_FORCE_2000 (0)
+#define MT7530_TRGMII_PLL_25M (0x1A00)
+#define MT7530_TRGMII_PLL_40M (0x1040)
+#define TRGMII
+#else
+#define GE1_RGMII_FORCE_1000 (0)
+#define GE1_TRGMII_FORCE_2000 (0)
+#define GE1_TRGMII_FORCE_2600 (0)
+#define MT7530_TRGMII_PLL_25M (0)
+#define MT7530_TRGMII_PLL_40M (0)
+#endif
+
+#define GE1_RGMII_AN BIT(7)
+#define GE1_SGMII_AN BIT(8)
+#define GE1_SGMII_FORCE_2500 BIT(9)
+#define GE1_RGMII_ONE_EPHY BIT(10)
+#define RAETH_ESW BIT(11)
+#define GE1_RGMII_NONE BIT(12)
+#define GE2_RGMII_FORCE_1000 BIT(13)
+#define GE2_RGMII_AN BIT(14)
+#define GE2_INTERNAL_GPHY BIT(15)
+#define GE2_SGMII_AN BIT(16)
+#define GE2_SGMII_FORCE_2500 BIT(17)
+#define MT7622_EPHY BIT(18)
+#define RAETH_SGMII BIT(19)
+#define GE2_RAETH_SGMII BIT(20)
+#define LEOPARD_EPHY BIT(21)
+#define SGMII_SWITCH BIT(22)
+#define LEOPARD_EPHY_GMII BIT(23)
+/* /#ifndef CONFIG_MAC_TO_GIGAPHY_MODE_ADDR */
+/* #define CONFIG_MAC_TO_GIGAPHY_MODE_ADDR (0) */
+/* #endif */
+/* #ifndef CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 */
+/* #define CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 (0) */
+/* #endif */
+
+/* macros */
+#define fe_features_config(end_device) \
+{ \
+end_device->features = 0; \
+end_device->features |= FE_DLY_INT; \
+end_device->features |= FE_HW_LRO; \
+end_device->features |= FE_HW_LRO_FPORT;\
+end_device->features |= FE_HW_LRO_DBG; \
+end_device->features |= FE_SW_LRO; \
+end_device->features |= FE_QDMA; \
+end_device->features |= FE_INT_NAPI; \
+end_device->features |= FE_INT_WORKQ; \
+end_device->features |= FE_INT_TASKLET; \
+end_device->features |= FE_IRQ_SEPARATE;\
+end_device->features |= FE_ETHTOOL; \
+end_device->features |= FE_CSUM_OFFLOAD;\
+end_device->features |= FE_TSO; \
+end_device->features |= FE_TSO_V6; \
+end_device->features |= FE_HW_VLAN_TX; \
+end_device->features |= FE_HW_VLAN_RX; \
+end_device->features |= FE_QDMA_TX; \
+end_device->features |= FE_QDMA_RX; \
+end_device->features |= FE_HW_SFQ; \
+end_device->features |= FE_FPGA_MODE; \
+end_device->features |= FE_HW_NAT; \
+end_device->features |= FE_INT_NAPI_TX_RX; \
+end_device->features |= FE_INT_NAPI_RX_ONLY; \
+end_device->features |= FE_QDMA_FQOS; \
+end_device->features |= QDMA_QOS_WEB; \
+end_device->features |= QDMA_QOS_MARK; \
+end_device->features |= FE_RSS_4RING; \
+end_device->features |= FE_RSS_2RING; \
+}
+
+#define fe_architecture_config(end_device) \
+{ \
+end_device->architecture = 0; \
+end_device->architecture |= GE1_TRGMII_FORCE_2000; \
+end_device->architecture |= GE1_TRGMII_FORCE_2600; \
+}
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_reg.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_reg.h
new file mode 100644
index 0000000..df57115
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raeth_reg.h
@@ -0,0 +1,1366 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RAETH_REG_H
+#define RAETH_REG_H
+
+#include <linux/mii.h> /* for struct mii_if_info in ra2882ethreg.h */
+#include <linux/version.h> /* check linux version */
+#include <linux/interrupt.h> /* for "struct tasklet_struct" */
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#include "raether.h"
+
+#define MAX_PACKET_SIZE 1514
+#define MIN_PACKET_SIZE 60
+#if defined(CONFIG_MACH_MT7623) || defined(CONFIG_SOC_MT7621)
+#define MAX_PTXD_LEN 0x3fff /* 16k */
+#define MAX_QTXD_LEN 0x3fff /* 16k */
+#else
+#define MAX_PTXD_LEN 0x3fff /* 16k */
+#define MAX_QTXD_LEN 0xffff
+#endif
+
+#define phys_to_bus(a) (a)
+
+extern void __iomem *ethdma_sysctl_base;
+extern void __iomem *ethdma_frame_engine_base;
+
+/* bits range: for example BITS(16,23) = 0xFF0000
+ * ==> (BIT(m)-1) = 0x0000FFFF ~(BIT(m)-1) => 0xFFFF0000
+ * ==> (BIT(n+1)-1) = 0x00FFFFFF
+ */
+#define BITS(m, n) (~(BIT(m) - 1) & ((BIT(n) - 1) | BIT(n)))
+
+#define ETHER_ADDR_LEN 6
+
+/* Phy Vender ID list */
+
+#define EV_ICPLUS_PHY_ID0 0x0243
+#define EV_ICPLUS_PHY_ID1 0x0D90
+#define EV_MARVELL_PHY_ID0 0x0141
+#define EV_MARVELL_PHY_ID1 0x0CC2
+#define EV_VTSS_PHY_ID0 0x0007
+#define EV_VTSS_PHY_ID1 0x0421
+
+#define ETHSYS_BASE 0x1b000000
+#define SGMII_CONFIG_0 BIT(9) /*SGMII path enable of GMAC1*/
+#define SGMII_CONFIG_1 BIT(8) /*SGMII path enable of GMAC1*/
+
+#if defined(CONFIG_PINCTRL_MT7622)
+#define SGMII_REG_BASE0 (0x1b128000)
+#define SGMII_REG_PHYA_BASE0 (0x1b12a000)
+#define SGMII_REG_BASE1 (0)
+#define SGMII_REG_PHYA_BASE1 (0)
+#elif defined(CONFIG_MACH_LEOPARD)
+#define SGMII_REG_BASE0 (0x1b128000)
+#define SGMII_REG_PHYA_BASE0 (0x1b128100)
+#define SGMII_REG_BASE1 (0x1b130000)
+#define SGMII_REG_PHYA_BASE1 (0x1b130100)
+#else
+#define SGMII_REG_BASE0 (0)
+#define SGMII_REG_PHYA_BASE0 (0)
+#define SGMII_REG_BASE1 (0)
+#define SGMII_REG_PHYA_BASE1 (0)
+#endif
+#define ETHSYS_MAC_BASE (0x1b110000)
+
+#if defined(CONFIG_MACH_LEOPARD)
+#define FE_RSTCTL 0x1B000034
+#define INFRA_BASE 0x1000070C
+#define GEPHY_CTRL0 0x10000710
+#define GPIO_GO_BASE GEPHY_CTRL0
+#define GPIO_MODE_BASE 0x10217300
+#else
+#define INFRA_BASE 0
+#define FE_RSTCTL 0
+#define GPIO_GO_BASE 0x10211800
+#define GPIO_MODE_BASE 0x10211300
+#endif
+
+/* ETHDMASYS base address
+ * for I2S/PCM/GDMA/HSDMA/FE/GMAC
+ */
+#define ETHDMASYS_BASE ethdma_sysctl_base
+#define ETHDMASYS_FRAME_ENGINE_BASE ethdma_frame_engine_base
+
+#define ETHDMASYS_SYSCTL_BASE ETHDMASYS_BASE
+#define ETHDMASYS_PPE_BASE (ETHDMASYS_FRAME_ENGINE_BASE + 0x0C00)
+#define ETHDMASYS_ETH_MAC_BASE (ETHDMASYS_FRAME_ENGINE_BASE + 0x10000)
+#if defined(CONFIG_MACH_MT7623) || defined(CONFIG_SOC_MT7621)
+#define ETHDMASYS_ETH_SW_BASE (ETHDMASYS_FRAME_ENGINE_BASE + 0x10000)
+#else
+#define ETHDMASYS_ETH_SW_BASE (ETHDMASYS_FRAME_ENGINE_BASE + 0x18000)
+#endif
+
+#define RALINK_FRAME_ENGINE_BASE ETHDMASYS_FRAME_ENGINE_BASE
+#define RALINK_PPE_BASE ETHDMASYS_PPE_BASE
+#define RALINK_SYSCTL_BASE ETHDMASYS_SYSCTL_BASE
+#define RALINK_ETH_MAC_BASE ETHDMASYS_ETH_MAC_BASE
+#define RALINK_ETH_SW_BASE ETHDMASYS_ETH_SW_BASE
+
+#define RSTCTL_FE_RST BIT(6)
+#define RALINK_FE_RST RSTCTL_FE_RST
+
+#define RSTCTL_ETH_RST BIT(23)
+#define RALINK_ETH_RST RSTCTL_ETH_RST
+
+/* FE_INT_STATUS */
+#define RX_COHERENT BIT(31)
+#define RX_DLY_INT BIT(30)
+#define TX_COHERENT BIT(29)
+#define TX_DLY_INT BIT(28)
+#define RING3_RX_DLY_INT BIT(27)
+#define RING2_RX_DLY_INT BIT(26)
+#define RING1_RX_DLY_INT BIT(25)
+#define RING0_RX_DLY_INT BIT(30)
+
+#define RSS_RX_INT0 (RX_DONE_INT0 | RX_DONE_INT1 | \
+ RING0_RX_DLY_INT | RING1_RX_DLY_INT)
+
+#define RSS_RX_RING0 (RX_DONE_INT0 | RING0_RX_DLY_INT)
+#define RSS_RX_RING1 (RX_DONE_INT1 | RING1_RX_DLY_INT)
+#define RSS_RX_RING2 (RX_DONE_INT2 | RING2_RX_DLY_INT)
+#define RSS_RX_RING3 (RX_DONE_INT3 | RING3_RX_DLY_INT)
+
+#define RSS_RX_INT1 (RX_DONE_INT2 | RX_DONE_INT3 | \
+ RING2_RX_DLY_INT | RING3_RX_DLY_INT)
+
+#define RSS_RX_DLY_INT0 (RING0_RX_DLY_INT | RING1_RX_DLY_INT)
+#define RSS_RX_DLY_INT1 (RING2_RX_DLY_INT | RING3_RX_DLY_INT)
+
+#define RSS_RX_DLY_INT (RING0_RX_DLY_INT | RING1_RX_DLY_INT | \
+ RING2_RX_DLY_INT | RING3_RX_DLY_INT)
+
+#define RXD_ERROR BIT(24)
+#define ALT_RPLC_INT3 BIT(23)
+#define ALT_RPLC_INT2 BIT(22)
+#define ALT_RPLC_INT1 BIT(21)
+
+#define RX_DONE_INT3 BIT(19)
+#define RX_DONE_INT2 BIT(18)
+#define RX_DONE_INT1 BIT(17)
+#define RX_DONE_INT0 BIT(16)
+
+#define TX_DONE_INT3 BIT(3)
+#define TX_DONE_INT2 BIT(2)
+#define TX_DONE_INT1 BIT(1)
+#define TX_DONE_INT0 BIT(0)
+
+#define RLS_COHERENT BIT(29)
+#define RLS_DLY_INT BIT(28)
+#define RLS_DONE_INT BIT(0)
+
+#define FE_INT_ALL (TX_DONE_INT3 | TX_DONE_INT2 | \
+ TX_DONE_INT1 | TX_DONE_INT0 | \
+ RX_DONE_INT0 | RX_DONE_INT1 | \
+ RX_DONE_INT2 | RX_DONE_INT3)
+
+#define QFE_INT_ALL (RLS_DONE_INT | RX_DONE_INT0 | \
+ RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3)
+#define QFE_INT_DLY_INIT (RLS_DLY_INT | RX_DLY_INT)
+#define RX_INT_ALL (RX_DONE_INT0 | RX_DONE_INT1 | \
+ RX_DONE_INT2 | RX_DONE_INT3 | \
+ RING0_RX_DLY_INT | RING1_RX_DLY_INT | \
+ RING2_RX_DLY_INT | RING3_RX_DLY_INT | RX_DLY_INT)
+#define TX_INT_ALL (TX_DONE_INT0 | TX_DLY_INT)
+
+#define NUM_QDMA_PAGE 512
+#define QDMA_PAGE_SIZE 2048
+
+/* SW_INT_STATUS */
+#define ESW_PHY_POLLING (RALINK_ETH_MAC_BASE + 0x0000)
+#define MAC1_WOL (RALINK_ETH_SW_BASE + 0x0110)
+#define WOL_INT_CLR BIT(17)
+#define WOL_INT_EN BIT(1)
+#define WOL_EN BIT(0)
+
+#define P5_LINK_CH BIT(5)
+#define P4_LINK_CH BIT(4)
+#define P3_LINK_CH BIT(3)
+#define P2_LINK_CH BIT(2)
+#define P1_LINK_CH BIT(1)
+#define P0_LINK_CH BIT(0)
+
+#define RX_BUF_ALLOC_SIZE 2000
+#define FASTPATH_HEADROOM 64
+
+#define ETHER_BUFFER_ALIGN 32 /* /// Align on a cache line */
+
+#define ETHER_ALIGNED_RX_SKB_ADDR(addr) \
+ ((((unsigned long)(addr) + ETHER_BUFFER_ALIGN - 1) & \
+ ~(ETHER_BUFFER_ALIGN - 1)) - (unsigned long)(addr))
+
+struct PSEUDO_ADAPTER {
+ struct net_device *raeth_dev;
+ struct net_device *pseudo_dev;
+ struct net_device_stats stat;
+ struct mii_if_info mii_info;
+};
+
+#define MAX_PSEUDO_ENTRY 1
+
+/* Register Categories Definition */
+#if 0
+#define FE_PSE_OFFSET 0x0000
+#define CDMA_OFFSET 0x0400
+#define GDM1_OFFSET 0x0500
+#define ADMA_OFFSET 0x0800
+#define CDMQ_OFFSET 0x1400
+#define GDM2_OFFSET 0x1500
+#define CDM_OFFSET 0x1600
+#define QDMA_OFFSET 0x1800
+#define RSS_OFFSET 0x3000
+#define EDMA0_OFFSET 0x3800
+#define EDMA1_OFFSET 0x3C00
+#else
+#define FE_PSE_OFFSET 0x0000
+#define CDMA_OFFSET 0x0400
+#define GDM1_OFFSET 0x0500
+#define ADMA_OFFSET 0x4000
+#define CDMQ_OFFSET 0x1400
+#define GDM2_OFFSET 0x1500
+#define CDM_OFFSET 0x1600
+#define QDMA_OFFSET 0x4400
+#define RSS_OFFSET 0x2800
+#define EDMA0_OFFSET 0x3800
+#define EDMA1_OFFSET 0x3C00
+#endif
+
+/* Register Map Detail */
+/* FE/PSE */
+#define SYSCFG1 (RALINK_SYSCTL_BASE + 0x14)
+#define CLK_CFG_0 (RALINK_SYSCTL_BASE + 0x2C)
+#define PAD_RGMII2_MDIO_CFG (RALINK_SYSCTL_BASE + 0x58)
+#define FE_GLO_CFG (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x00)
+#define FE_RST_GL (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x04)
+#define FE_INT_STATUS2 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x08)
+#define FOE_TS_T (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x10)
+#define FE_INT_ENABLE2 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x0c)
+#define FE_INT_GRP (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x20)
+#define PSE_FQ_CFG (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x40)
+#define CDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x44)
+#define GDMA1_FC_CFG (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x48)
+#define GDMA2_FC_CFG (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x4C)
+#define CDMA_OQ_STA (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x50)
+#define GDMA1_OQ_STA (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x54)
+#define GDMA2_OQ_STA (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x58)
+#define PSE_IQ_STA (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x5C)
+
+#define MAC1_LINK BIT(24)
+#define MAC2_LINK BIT(25)
+#define PDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x100)
+#define FE_GLO_MISC (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x124)
+#define PSE_IQ_REV1 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x140)
+#define PSE_IQ_REV2 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x144)
+#define PSE_IQ_REV3 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x148)
+#define PSE_IQ_REV4 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x14C)
+#define PSE_IQ_REV5 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x150)
+#define PSE_IQ_REV6 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x154)
+#define PSE_IQ_REV7 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x158)
+#define PSE_IQ_REV8 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x15C)
+#define PSE_OQ_TH1 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x160)
+#define PSE_OQ_TH2 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x164)
+#define PSE_OQ_TH3 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x168)
+#define PSE_OQ_TH4 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x16C)
+#define PSE_OQ_TH5 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x170)
+#define PSE_OQ_TH6 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x174)
+#define PSE_OQ_TH7 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x178)
+#define PSE_OQ_TH8 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x17C)
+#define FE_PSE_FREE (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x240)
+#define FE_DROP_FQ (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x244)
+#define FE_DROP_FC (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x248)
+#define FE_DROP_PPE (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x24c)
+/* GDM1 */
+#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDM1_OFFSET + 0x00)
+#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDM1_OFFSET + 0x04)
+#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDM1_OFFSET + 0x08)
+#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDM1_OFFSET + 0x0C)
+#define GDMA1_SCH_CFG GDMA1_SHPR_CFG
+/* CDMA */
+#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x000)
+#define CDMP_IG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x000)
+#define CDMP_EG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x004)
+#define GDMA_TX_GBCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x300)
+#define GDMA_TX_GPCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x304)
+#define GDMA_TX_SKIPCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x308)
+#define GDMA_TX_COLCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x30C)
+#define GDMA_RX_GBCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x320)
+#define GDMA_RX_GPCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x324)
+#define GDMA_RX_OERCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x328)
+#define GDMA_RX_FERCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x32C)
+#define GDMA_RX_SERCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x330)
+#define GDMA_RX_LERCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x334)
+#define GDMA_RX_CERCNT0 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x338)
+#define GDMA_RX_FCCNT1 (RALINK_FRAME_ENGINE_BASE + CDMA_OFFSET + 0x33C)
+/* ADMA */
+#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x000)
+#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x004)
+#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x008)
+#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x00C)
+#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x010)
+#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x014)
+#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x018)
+#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x01C)
+#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x020)
+#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x024)
+#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x028)
+#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x02C)
+#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x030)
+#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x034)
+#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x038)
+#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x03C)
+#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x100)
+#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x104)
+#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x108)
+#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x10C)
+#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x110)
+#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x114)
+#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x118)
+#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x11C)
+#define RX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x120)
+#define RX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x124)
+#define RX_CALC_IDX2 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x128)
+#define RX_DRX_IDX2 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x12C)
+#define RX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x130)
+#define RX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x134)
+#define RX_CALC_IDX3 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x138)
+#define RX_DRX_IDX3 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x13C)
+#define PDMA_INFO (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x200)
+#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x204)
+#define PDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x208)
+#define PDMA_RST_CFG (PDMA_RST_IDX)
+#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x20C)
+#define FREEQ_THRES (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x210)
+#define INT_STATUS (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x220)
+#define FE_INT_STATUS (INT_STATUS)
+#define INT_MASK (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x228)
+#define FE_INT_ENABLE (INT_MASK)
+#define SCH_Q01_CFG (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x280)
+#define SCH_Q23_CFG (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x284)
+#define PDMA_INT_GRP1 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x250)
+#define PDMA_INT_GRP2 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x254)
+#define PDMA_INT_GRP3 (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x22c)
+/* GDM2 */
+#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDM2_OFFSET + 0x00)
+#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDM2_OFFSET + 0x04)
+#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDM2_OFFSET + 0x08)
+#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDM2_OFFSET + 0x0C)
+#define GDMA2_SCH_CFG GDMA2_SHPR_CFG
+/* QDMA */
+#define QTX_CFG_0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x000)
+#define QTX_SCH_0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x004)
+#define QTX_HEAD_0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x008)
+#define QTX_TAIL_0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x00C)
+#define QTX_CFG_1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x010)
+#define QTX_SCH_1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x014)
+#define QTX_HEAD_1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x018)
+#define QTX_TAIL_1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x01C)
+#define QTX_CFG_2 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x020)
+#define QTX_SCH_2 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x024)
+#define QTX_HEAD_2 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x028)
+#define QTX_TAIL_2 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02C)
+#define QTX_CFG_3 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x030)
+#define QTX_SCH_3 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x034)
+#define QTX_HEAD_3 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x038)
+#define QTX_TAIL_3 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x03C)
+#define QTX_CFG_4 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x040)
+#define QTX_SCH_4 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x044)
+#define QTX_HEAD_4 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x048)
+#define QTX_TAIL_4 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x04C)
+#define QTX_CFG_5 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x050)
+#define QTX_SCH_5 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x054)
+#define QTX_HEAD_5 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x058)
+#define QTX_TAIL_5 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x05C)
+#define QTX_CFG_6 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x060)
+#define QTX_SCH_6 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x064)
+#define QTX_HEAD_6 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x068)
+#define QTX_TAIL_6 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x06C)
+#define QTX_CFG_7 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x070)
+#define QTX_SCH_7 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x074)
+#define QTX_HEAD_7 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x078)
+#define QTX_TAIL_7 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x07C)
+#define QTX_CFG_8 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x080)
+#define QTX_SCH_8 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x084)
+#define QTX_HEAD_8 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x088)
+#define QTX_TAIL_8 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x08C)
+#define QTX_CFG_9 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x090)
+#define QTX_SCH_9 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x094)
+#define QTX_HEAD_9 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x098)
+#define QTX_TAIL_9 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x09C)
+#define QTX_CFG_10 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0A0)
+#define QTX_SCH_10 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0A4)
+#define QTX_HEAD_10 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0A8)
+#define QTX_TAIL_10 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0AC)
+#define QTX_CFG_11 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0B0)
+#define QTX_SCH_11 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0B4)
+#define QTX_HEAD_11 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0B8)
+#define QTX_TAIL_11 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0BC)
+#define QTX_CFG_12 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0C0)
+#define QTX_SCH_12 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0C4)
+#define QTX_HEAD_12 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0C8)
+#define QTX_TAIL_12 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0CC)
+#define QTX_CFG_13 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0D0)
+#define QTX_SCH_13 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0D4)
+#define QTX_HEAD_13 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0D8)
+#define QTX_TAIL_13 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0DC)
+#define QTX_CFG_14 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0E0)
+#define QTX_SCH_14 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0E4)
+#define QTX_HEAD_14 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0E8)
+#define QTX_TAIL_14 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0EC)
+#define QTX_CFG_15 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0F0)
+#define QTX_SCH_15 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0F4)
+#define QTX_HEAD_15 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0F8)
+#define QTX_TAIL_15 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0FC)
+#define QRX_BASE_PTR_0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x100)
+#define QRX_MAX_CNT_0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x104)
+#define QRX_CRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x108)
+#define QRX_DRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x10C)
+#define QRX_BASE_PTR_1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x110)
+#define QRX_MAX_CNT_1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x114)
+#define QRX_CRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x118)
+#define QRX_DRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x11C)
+#define VQTX_TB_BASE_0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x180)
+#define VQTX_TB_BASE_1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x184)
+#define VQTX_TB_BASE_2 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x188)
+#define VQTX_TB_BASE_3 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x18C)
+#define QDMA_INFO (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x200)
+#define QDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x204)
+#define QDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x208)
+#define QDMA_RST_CFG (QDMA_RST_IDX)
+#define QDMA_DELAY_INT (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x20C)
+#define QDMA_FC_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x210)
+#define QDMA_TX_SCH (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x214)
+#define QDMA_INT_STS (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x218)
+#define QFE_INT_STATUS (QDMA_INT_STS)
+#define QDMA_INT_MASK (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x21C)
+#define QFE_INT_ENABLE (QDMA_INT_MASK)
+#define QDMA_TRTCM (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x220)
+#define QDMA_DATA0 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x224)
+#define QDMA_DATA1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x228)
+#define QDMA_RED_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x22C)
+#define QDMA_TEST (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x230)
+#define QDMA_DMA (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x234)
+#define QDMA_BMU (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x238)
+#define QDMA_HRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x240)
+#define QDMA_HRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x244)
+#define QDMA_SRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x248)
+#define QDMA_SRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x24C)
+#define QTX_MIB_IF (RALINK_FRAME_ENGINE_BASE + 0x1abc)
+#define QTX_CTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x300)
+#define QTX_DTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x304)
+#define QTX_FWD_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x308)
+#define QTX_CRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x310)
+#define QTX_DRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x314)
+#define QTX_RLS_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x318)
+#define QDMA_FQ_HEAD (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x320)
+#define QDMA_FQ_TAIL (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x324)
+#define QDMA_FQ_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x328)
+#define QDMA_FQ_BLEN (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x32C)
+#define QTX_Q0MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x350)
+#define QTX_Q1MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x354)
+#define QTX_Q2MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x358)
+#define QTX_Q3MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x35C)
+#define QTX_Q0MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x360)
+#define QTX_Q1MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x364)
+#define QTX_Q2MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x368)
+#define QTX_Q3MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x36C)
+#define QDMA_INT_GRP1 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x220)
+#define QDMA_INT_GRP2 (RALINK_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x224)
+
+#define DELAY_INT_INIT 0x8f0f8f0f
+#define FE_INT_DLY_INIT (TX_DLY_INT | RX_DLY_INT)
+#define RSS_INT_DLY_INT_2RING (RING0_RX_DLY_INT | RING1_RX_DLY_INT)
+#define RSS_INT_DLY_INT (RING0_RX_DLY_INT | RING1_RX_DLY_INT | \
+ RING2_RX_DLY_INT | RING3_RX_DLY_INT | TX_DLY_INT)
+
+/* LRO global control */
+/* Bits [15:0]:LRO_ALT_RFSH_TIMER, Bits [20:16]:LRO_ALT_TICK_TIMER */
+#define LRO_ALT_REFRESH_TIMER (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x001C)
+
+/* LRO auto-learn table info */
+#define PDMA_FE_ALT_CF8 (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x0300)
+#define PDMA_FE_ALT_SGL_CFC (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x0304)
+#define PDMA_FE_ALT_SEQ_CFC (RALINK_FRAME_ENGINE_BASE + FE_PSE_OFFSET + 0x0308)
+
+/* LRO controls */
+#define ADMA_LRO_CTRL_OFFSET (ADMA_OFFSET + 0x180)
+/*Bit [0]:LRO_EN, Bit [1]:LRO_IPv6_EN, Bit [2]:MULTIPLE_NON_LRO_RX_RING_EN,
+ * Bit [3]:MULTIPLE_RXD_PREFETCH_EN, Bit [4]:RXD_PREFETCH_EN,
+ * Bit [5]:LRO_DLY_INT_EN, Bit [6]:LRO_CRSN_BNW, Bit [7]:L3_CKS_UPD_EN,
+ * Bit [20]:first_ineligible_pkt_redirect_en, Bit [21]:cr_lro_alt_score_mode,
+ * Bit [22]:cr_lro_alt_rplc_mode, Bit [23]:cr_lro_l4_ctrl_psh_en,
+ * Bits [28:26]:LRO_RING_RELINGUISH_REQ, Bits [31:29]:LRO_RING_RELINGUISH_DONE
+ */
+#define ADMA_LRO_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ ADMA_LRO_CTRL_OFFSET + 0x00)
+/* Bits [31:0]:LRO_CPU_REASON */
+#define ADMA_LRO_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ ADMA_LRO_CTRL_OFFSET + 0x04)
+/* Bits [31:0]:AUTO_LEARN_LRO_ELIGIBLE_THRESHOLD */
+#define ADMA_LRO_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ ADMA_LRO_CTRL_OFFSET + 0x08)
+/*Bits [7:0]:LRO_MAX_AGGREGATED_CNT,
+ * Bits [11:8]:LRO_VLAN_EN, Bits [13:12]:LRO_VLAN_VID_CMP_DEPTH,
+ * Bit [14]:ADMA_FW_RSTN_REQ, Bit [15]:ADMA_MODE, Bits [31:16]:LRO_MIN_RXD_SDL0
+ */
+#define ADMA_LRO_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ ADMA_LRO_CTRL_OFFSET + 0x0C)
+
+/* LRO RX delay interrupt configurations */
+#define LRO_RX1_DLY_INT (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x0270)
+#define LRO_RX2_DLY_INT (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x0274)
+#define LRO_RX3_DLY_INT (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x0278)
+
+/* LRO auto-learn configurations */
+#define PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET (ADMA_OFFSET + 0x190)
+#define PDMA_LRO_ATL_OVERFLOW_ADJ (RALINK_FRAME_ENGINE_BASE + \
+ PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET)
+#define LRO_ALT_SCORE_DELTA (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x024c)
+
+/* LRO agg timer configurations */
+#define LRO_MAX_AGG_TIME (RALINK_FRAME_ENGINE_BASE + ADMA_OFFSET + 0x025c)
+
+/* LRO configurations of RX ring #0 */
+#define LRO_RXRING0_OFFSET (ADMA_OFFSET + 0x300)
+#define LRO_RX_RING0_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING0_OFFSET + 0x04)
+#define LRO_RX_RING0_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING0_OFFSET + 0x08)
+#define LRO_RX_RING0_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING0_OFFSET + 0x0C)
+#define LRO_RX_RING0_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING0_OFFSET + 0x10)
+#define LRO_RX_RING0_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING0_OFFSET + 0x28)
+/* Bit [8]:RING0_VLD, Bit [9]:RING0_MYIP_VLD */
+#define LRO_RX_RING0_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING0_OFFSET + 0x2C)
+#define LRO_RX_RING0_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING0_OFFSET + 0x30)
+/* LRO configurations of RX ring #1 */
+#define LRO_RXRING1_OFFSET (ADMA_OFFSET + 0x340)
+#define LRO_RX_RING1_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x00)
+#define LRO_RX_RING1_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x04)
+#define LRO_RX_RING1_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x08)
+#define LRO_RX_RING1_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x0C)
+#define LRO_RX_RING1_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x10)
+#define LRO_RX_RING1_SIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x14)
+#define LRO_RX_RING1_SIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x18)
+#define LRO_RX_RING1_SIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x1C)
+#define LRO_RX_RING1_SIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x20)
+#define LRO_RX_RING1_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x24)
+#define LRO_RX_RING1_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x28)
+#define LRO_RX_RING1_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x2C)
+#define LRO_RX_RING1_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING1_OFFSET + 0x30)
+#define LRO_RXRING2_OFFSET (ADMA_OFFSET + 0x380)
+#define LRO_RX_RING2_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x00)
+#define LRO_RX_RING2_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x04)
+#define LRO_RX_RING2_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x08)
+#define LRO_RX_RING2_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x0C)
+#define LRO_RX_RING2_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x10)
+#define LRO_RX_RING2_SIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x14)
+#define LRO_RX_RING2_SIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x18)
+#define LRO_RX_RING2_SIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x1C)
+#define LRO_RX_RING2_SIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x20)
+#define LRO_RX_RING2_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x24)
+#define LRO_RX_RING2_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x28)
+#define LRO_RX_RING2_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x2C)
+#define LRO_RX_RING2_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING2_OFFSET + 0x30)
+#define LRO_RXRING3_OFFSET (ADMA_OFFSET + 0x3C0)
+#define LRO_RX_RING3_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x00)
+#define LRO_RX_RING3_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x04)
+#define LRO_RX_RING3_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x08)
+#define LRO_RX_RING3_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x0C)
+#define LRO_RX_RING3_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x10)
+#define LRO_RX_RING3_SIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x14)
+#define LRO_RX_RING3_SIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x18)
+#define LRO_RX_RING3_SIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x1C)
+#define LRO_RX_RING3_SIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x20)
+#define LRO_RX_RING3_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x24)
+#define LRO_RX_RING3_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x28)
+#define LRO_RX_RING3_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x2C)
+#define LRO_RX_RING3_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+ LRO_RXRING3_OFFSET + 0x30)
+
+#define ADMA_DBG_OFFSET (ADMA_OFFSET + 0x230)
+#define ADMA_TX_DBG0 (RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x00)
+#define ADMA_TX_DBG1 (RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x04)
+#define ADMA_RX_DBG0 (RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x08)
+#define ADMA_RX_DBG1 (RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x0C)
+
+/********RSS CR ************/
+#define ADMA_RSS_GLO_CFG (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x00)
+#define ADMA_RSS_INDR_TABLE_DW0 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x50)
+#define ADMA_RSS_INDR_TABLE_DW1 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x54)
+#define ADMA_RSS_INDR_TABLE_DW2 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x58)
+#define ADMA_RSS_INDR_TABLE_DW3 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x5C)
+#define ADMA_RSS_INDR_TABLE_DW4 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x60)
+#define ADMA_RSS_INDR_TABLE_DW5 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x64)
+#define ADMA_RSS_INDR_TABLE_DW6 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x68)
+#define ADMA_RSS_INDR_TABLE_DW7 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x6C)
+
+#define ADMA_RSS_HASH_KEY_DW0 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x20)
+#define ADMA_RSS_HASH_KEY_DW1 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x24)
+#define ADMA_RSS_HASH_KEY_DW2 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x28)
+#define ADMA_RSS_HASH_KEY_DW3 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x2C)
+#define ADMA_RSS_HASH_KEY_DW4 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x30)
+#define ADMA_RSS_HASH_KEY_DW5 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x34)
+#define ADMA_RSS_HASH_KEY_DW6 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x38)
+#define ADMA_RSS_HASH_KEY_DW7 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x3C)
+#define ADMA_RSS_HASH_KEY_DW8 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x40)
+#define ADMA_RSS_HASH_KEY_DW9 (RALINK_FRAME_ENGINE_BASE + RSS_OFFSET + 0x44)
+/* LRO RX ring mode */
+#define PDMA_RX_NORMAL_MODE (0x0)
+#define PDMA_RX_PSE_MODE (0x1)
+#define PDMA_RX_FORCE_PORT (0x2)
+#define PDMA_RX_AUTO_LEARN (0x3)
+
+#define ADMA_RX_RING0 (0)
+#define ADMA_RX_RING1 (1)
+#define ADMA_RX_RING2 (2)
+#define ADMA_RX_RING3 (3)
+
+#define ADMA_RX_LEN0_MASK (0x3fff)
+#define ADMA_RX_LEN1_MASK (0x3)
+
+#define SET_ADMA_RX_LEN0(x) ((x) & ADMA_RX_LEN0_MASK)
+#define SET_ADMA_RX_LEN1(x) ((x) & ADMA_RX_LEN1_MASK)
+
+#define QDMA_PAGE (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x1F0)
+
+/*SFQ use*/
+#define VQTX_TB_BASE0 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0180)
+#define VQTX_TB_BASE1 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0184)
+#define VQTX_TB_BASE2 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0188)
+#define VQTX_TB_BASE3 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x018C)
+#define VQTX_GLO (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0280)
+#define VQTX_INVLD_PTR (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x028C)
+#define VQTX_NUM (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0290)
+#define VQTX_SCH (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0298)
+#define VQTX_HASH_CFG (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02A0)
+#define VQTX_HASH_SD (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02A4)
+#define VQTX_VLD_CFG (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02B0)
+#define VQTX_MIB_IF (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02BC)
+#define VQTX_MIB_PCNT (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02C0)
+#define VQTX_MIB_BCNT0 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02C4)
+#define VQTX_MIB_BCNT1 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x02C8)
+#define VQTX_0_BIND_QID (PQ0 << 0)
+#define VQTX_1_BIND_QID (PQ1 << 8)
+#define VQTX_2_BIND_QID (PQ2 << 16)
+#define VQTX_3_BIND_QID (PQ3 << 24)
+#define VQTX_4_BIND_QID (PQ4 << 0)
+#define VQTX_5_BIND_QID (PQ5 << 8)
+#define VQTX_6_BIND_QID (PQ6 << 16)
+#define VQTX_7_BIND_QID (PQ7 << 24)
+#define VQTX_TB_BASE4 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0190)
+#define VQTX_TB_BASE5 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0194)
+#define VQTX_TB_BASE6 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x0198)
+#define VQTX_TB_BASE7 (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0x019C)
+#define VQTX_0_3_BIND_QID (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0xBC0)
+#define VQTX_4_7_BIND_QID (ETHDMASYS_FRAME_ENGINE_BASE + QDMA_OFFSET + 0xBC4)
+#define PQ0 0
+#define PQ1 1
+#define PQ2 15
+#define PQ3 16
+#define PQ4 30
+#define PQ5 31
+#define PQ6 43
+#define PQ7 63
+
+#if defined(CONFIG_MACH_MT7623)
+#define VQ_NUM0 256
+#define VQ_NUM1 256
+#define VQ_NUM2 256
+#define VQ_NUM3 256
+#define VQ_NUM4 0
+#define VQ_NUM5 0
+#define VQ_NUM6 0
+#define VQ_NUM7 0
+#define VQTX_NUM_0 (4 << 0)
+#define VQTX_NUM_1 (4 << 4)
+#define VQTX_NUM_2 (4 << 8)
+#define VQTX_NUM_3 (4 << 12)
+#define VQTX_NUM_4 0
+#define VQTX_NUM_5 0
+#define VQTX_NUM_6 0
+#define VQTX_NUM_7 0
+#else
+#define VQ_NUM0 128
+#define VQ_NUM1 128
+#define VQ_NUM2 128
+#define VQ_NUM3 128
+#define VQ_NUM4 128
+#define VQ_NUM5 128
+#define VQ_NUM6 128
+#define VQ_NUM7 128
+#define VQTX_NUM_0 (3 << 0)
+#define VQTX_NUM_1 (3 << 4)
+#define VQTX_NUM_2 (3 << 8)
+#define VQTX_NUM_3 (3 << 12)
+#define VQTX_NUM_4 (3 << 16)
+#define VQTX_NUM_5 (3 << 20)
+#define VQTX_NUM_6 (3 << 24)
+#define VQTX_NUM_7 (3 << 28)
+#endif
+
+#define VQTX_MIB_EN BIT(17)
+
+/*HW IO-COHERNET BASE address*/
+#if defined(CONFIG_MACH_LEOPARD)
+#define HW_IOC_BASE 0x1B000080
+#define IOC_OFFSET 4
+#else
+#define HW_IOC_BASE 0x1B000400
+#define IOC_OFFSET 8
+#endif
+
+/*=========================================
+ * SFQ Table Format define
+ *=========================================
+ */
+struct SFQ_INFO1_T {
+ unsigned int VQHPTR;
+};
+
+struct SFQ_INFO2_T {
+ unsigned int VQTPTR;
+};
+
+struct SFQ_INFO3_T {
+ unsigned int QUE_DEPTH:16;
+ unsigned int DEFICIT_CNT:16;
+};
+
+struct SFQ_INFO4_T {
+ unsigned int RESV;
+};
+
+struct SFQ_INFO5_T {
+ unsigned int PKT_CNT;
+};
+
+struct SFQ_INFO6_T {
+ unsigned int BYTE_CNT;
+};
+
+struct SFQ_INFO7_T {
+ unsigned int BYTE_CNT;
+};
+
+struct SFQ_INFO8_T {
+ unsigned int RESV;
+};
+
+struct SFQ_table {
+ struct SFQ_INFO1_T sfq_info1;
+ struct SFQ_INFO2_T sfq_info2;
+ struct SFQ_INFO3_T sfq_info3;
+ struct SFQ_INFO4_T sfq_info4;
+ struct SFQ_INFO5_T sfq_info5;
+ struct SFQ_INFO6_T sfq_info6;
+ struct SFQ_INFO7_T sfq_info7;
+ struct SFQ_INFO8_T sfq_info8;
+};
+
+#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
+#define FE_GDM_RXID1_OFFSET (0x0130)
+#define FE_GDM_RXID1 (RALINK_FRAME_ENGINE_BASE + FE_GDM_RXID1_OFFSET)
+#define GDM_VLAN_PRI7_RXID_SEL BITS(30, 31)
+#define GDM_VLAN_PRI6_RXID_SEL BITS(28, 29)
+#define GDM_VLAN_PRI5_RXID_SEL BITS(26, 27)
+#define GDM_VLAN_PRI4_RXID_SEL BITS(24, 25)
+#define GDM_VLAN_PRI3_RXID_SEL BITS(22, 23)
+#define GDM_VLAN_PRI2_RXID_SEL BITS(20, 21)
+#define GDM_VLAN_PRI1_RXID_SEL BITS(18, 19)
+#define GDM_VLAN_PRI0_RXID_SEL BITS(16, 17)
+#define GDM_TCP_ACK_RXID_SEL BITS(4, 5)
+#define GDM_TCP_ACK_WZPC BIT(3)
+#define GDM_RXID_PRI_SEL BITS(0, 2)
+
+#define FE_GDM_RXID2_OFFSET (0x0134)
+#define FE_GDM_RXID2 (RALINK_FRAME_ENGINE_BASE + FE_GDM_RXID2_OFFSET)
+#define GDM_STAG7_RXID_SEL BITS(30, 31)
+#define GDM_STAG6_RXID_SEL BITS(28, 29)
+#define GDM_STAG5_RXID_SEL BITS(26, 27)
+#define GDM_STAG4_RXID_SEL BITS(24, 25)
+#define GDM_STAG3_RXID_SEL BITS(22, 23)
+#define GDM_STAG2_RXID_SEL BITS(20, 21)
+#define GDM_STAG1_RXID_SEL BITS(18, 19)
+#define GDM_STAG0_RXID_SEL BITS(16, 17)
+#define GDM_PID2_RXID_SEL BITS(2, 3)
+#define GDM_PID1_RXID_SEL BITS(0, 1)
+
+#define GDM_PRI_PID (0)
+#define GDM_PRI_VLAN_PID (1)
+#define GDM_PRI_ACK_PID (2)
+#define GDM_PRI_VLAN_ACK_PID (3)
+#define GDM_PRI_ACK_VLAN_PID (4)
+
+#define SET_GDM_VLAN_PRI_RXID_SEL(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(0x03 << (((x) << 1) + 16)); \
+reg_val |= ((y) & 0x3) << (((x) << 1) + 16); \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define SET_GDM_TCP_ACK_RXID_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(GDM_TCP_ACK_RXID_SEL); \
+reg_val |= ((x) & 0x3) << 4; \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define SET_GDM_TCP_ACK_WZPC(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(GDM_TCP_ACK_WZPC); \
+reg_val |= ((x) & 0x1) << 3; \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define SET_GDM_RXID_PRI_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(GDM_RXID_PRI_SEL); \
+reg_val |= (x) & 0x7; \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define GDM_STAG_RXID_SEL(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID2); \
+reg_val &= ~(0x03 << (((x) << 1) + 16)); \
+reg_val |= ((y) & 0x3) << (((x) << 1) + 16); \
+sys_reg_write(FE_GDM_RXID2, reg_val); \
+}
+
+#define SET_GDM_PID2_RXID_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID2); \
+reg_val &= ~(GDM_PID2_RXID_SEL); \
+reg_val |= ((x) & 0x3) << 2; \
+sys_reg_write(FE_GDM_RXID2, reg_val); \
+}
+
+#define SET_GDM_PID1_RXID_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID2); \
+reg_val &= ~(GDM_PID1_RXID_SEL); \
+reg_val |= ((x) & 0x3); \
+sys_reg_write(FE_GDM_RXID2, reg_val); \
+}
+
+#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
+/* Per Port Packet Counts in RT3052, added by bobtseng 2009.4.17. */
+#define PORT0_PKCOUNT (0xb01100e8)
+#define PORT1_PKCOUNT (0xb01100ec)
+#define PORT2_PKCOUNT (0xb01100f0)
+#define PORT3_PKCOUNT (0xb01100f4)
+#define PORT4_PKCOUNT (0xb01100f8)
+#define PORT5_PKCOUNT (0xb01100fc)
+
+#define sys_reg_read(phys) (__raw_readl((void __iomem *)phys))
+#define sys_reg_write(phys, val) (__raw_writel(val, (void __iomem *)phys))
+
+/* ====================================== */
+#define GDM1_DISPAD BIT(18)
+#define GDM1_DISCRC BIT(17)
+
+/* GDMA1 uni-cast frames destination port */
+#define GDM1_ICS_EN (0x1 << 22)
+#define GDM1_TCS_EN (0x1 << 21)
+#define GDM1_UCS_EN (0x1 << 20)
+#define GDM1_JMB_EN (0x1 << 19)
+#define GDM1_STRPCRC (0x1 << 16)
+#define GDM1_UFRC_P_CPU (0 << 12)
+
+/* GDMA1 broad-cast MAC address frames */
+#define GDM1_BFRC_P_CPU (0 << 8)
+
+/* GDMA1 multi-cast MAC address frames */
+#define GDM1_MFRC_P_CPU (0 << 4)
+
+/* GDMA1 other MAC address frames destination port */
+#define GDM1_OFRC_P_CPU (0 << 0)
+
+/* checksum generator registers are removed */
+#define ICS_GEN_EN (0 << 2)
+#define UCS_GEN_EN (0 << 1)
+#define TCS_GEN_EN (0 << 0)
+
+/* MDIO_CFG bit */
+#define MDIO_CFG_GP1_FC_TX BIT(11)
+#define MDIO_CFG_GP1_FC_RX BIT(10)
+
+/* ====================================== */
+/* ====================================== */
+#define GP1_LNK_DWN BIT(9)
+#define GP1_AN_FAIL BIT(8)
+/* ====================================== */
+/* ====================================== */
+#define PSE_RESET BIT(0)
+/* ====================================== */
+#define PST_DRX_IDX3 BIT(19)
+#define PST_DRX_IDX2 BIT(18)
+#define PST_DRX_IDX1 BIT(17)
+#define PST_DRX_IDX0 BIT(16)
+#define PST_DTX_IDX3 BIT(3)
+#define PST_DTX_IDX2 BIT(2)
+#define PST_DTX_IDX1 BIT(1)
+#define PST_DTX_IDX0 BIT(0)
+
+#define RX_2B_OFFSET BIT(31)
+#define CSR_CLKGATE_BYP BIT(30)
+#define MULTI_EN BIT(10)
+#define DESC_32B_EN BIT(8)
+#define TX_WB_DDONE BIT(6)
+#define RX_DMA_BUSY BIT(3)
+#define TX_DMA_BUSY BIT(1)
+#define RX_DMA_EN BIT(2)
+#define TX_DMA_EN BIT(0)
+
+#define PDMA_BT_SIZE_4DWORDS (0 << 4)
+#define PDMA_BT_SIZE_8DWORDS BIT(4)
+#define PDMA_BT_SIZE_16DWORDS (2 << 4)
+#define PDMA_BT_SIZE_32DWORDS (3 << 4)
+#define PDMA_DESC_32B_E (1 << 8)
+
+#define ADMA_RX_BT_SIZE_4DWORDS (0 << 11)
+#define ADMA_RX_BT_SIZE_8DWORDS BIT(11)
+#define ADMA_RX_BT_SIZE_16DWORDS (2 << 11)
+#define ADMA_RX_BT_SIZE_32DWORDS (3 << 11)
+
+/* Register bits.
+ */
+
+#define MACCFG_RXEN BIT(2)
+#define MACCFG_TXEN BIT(3)
+#define MACCFG_PROMISC BIT(18)
+#define MACCFG_RXMCAST BIT(19)
+#define MACCFG_FDUPLEX BIT(20)
+#define MACCFG_PORTSEL BIT(27)
+#define MACCFG_HBEATDIS BIT(28)
+
+#define DMACTL_SR BIT(1) /* Start/Stop Receive */
+#define DMACTL_ST BIT(13) /* Start/Stop Transmission Command */
+
+#define DMACFG_SWR BIT(0) /* Software Reset */
+#define DMACFG_BURST32 (32 << 8)
+
+#define DMASTAT_TS 0x00700000 /* Transmit Process State */
+#define DMASTAT_RS 0x000e0000 /* Receive Process State */
+
+#define MACCFG_INIT 0 /* (MACCFG_FDUPLEX) // | MACCFG_PORTSEL) */
+
+/* Descriptor bits.
+ */
+#define R_OWN 0x80000000 /* Own Bit */
+#define RD_RER 0x02000000 /* Receive End Of Ring */
+#define RD_LS 0x00000100 /* Last Descriptor */
+#define RD_ES 0x00008000 /* Error Summary */
+#define RD_CHAIN 0x01000000 /* Chained */
+
+/* Word 0 */
+#define T_OWN 0x80000000 /* Own Bit */
+#define TD_ES 0x00008000 /* Error Summary */
+
+/* Word 1 */
+#define TD_LS 0x40000000 /* Last Segment */
+#define TD_FS 0x20000000 /* First Segment */
+#define TD_TER 0x08000000 /* Transmit End Of Ring */
+#define TD_CHAIN 0x01000000 /* Chained */
+
+#define TD_SET 0x08000000 /* Setup Packet */
+
+#define POLL_DEMAND 1
+
+#define RSTCTL (0x34)
+#define RSTCTL_RSTENET1 BIT(19)
+#define RSTCTL_RSTENET2 BIT(20)
+
+#define INIT_VALUE_OF_RT2883_PSE_FQ_CFG 0xff908000
+#define INIT_VALUE_OF_PSE_FQFC_CFG 0x80504000
+#define INIT_VALUE_OF_FORCE_100_FD 0x1001BC01
+#define INIT_VALUE_OF_FORCE_1000_FD 0x1F01DC01
+
+/* Define Whole FE Reset Register */
+#define RSTCTRL (RALINK_SYSCTL_BASE + 0x34)
+#define RT2880_AGPIOCFG_REG (RALINK_SYSCTL_BASE + 0x3C)
+
+/*=========================================
+ * PDMA RX Descriptor Format define
+ *=========================================
+ */
+
+struct PDMA_RXD_INFO1_T {
+ unsigned int PDP0;
+};
+
+struct PDMA_RXD_INFO2_T {
+ unsigned int PLEN1:2;
+ unsigned int LRO_AGG_CNT:8;
+ unsigned int REV:3;
+ unsigned int FOE_ENTRY_32:1;
+ unsigned int REV1:1;
+ unsigned int TAG:1;
+ unsigned int PLEN0:14;
+ unsigned int LS0:1;
+ unsigned int DDONE_bit:1;
+};
+
+struct PDMA_RXD_INFO3_T {
+ unsigned int VID:16;
+ unsigned int TPID:16;
+};
+
+struct PDMA_RXD_INFO4_T {
+ unsigned int FOE_ENTRY:14;
+ unsigned int CRSN:5;
+ unsigned int SP:4;
+ unsigned int L4F:1;
+ unsigned int L4VLD:1;
+ unsigned int TACK:1;
+ unsigned int IP4F:1;
+ unsigned int IP4:1;
+ unsigned int IP6:1;
+ unsigned int UN_USE1:3;
+};
+
+struct PDMA_rxdesc {
+ struct PDMA_RXD_INFO1_T rxd_info1;
+ struct PDMA_RXD_INFO2_T rxd_info2;
+ struct PDMA_RXD_INFO3_T rxd_info3;
+ struct PDMA_RXD_INFO4_T rxd_info4;
+#ifdef CONFIG_32B_DESC
+ unsigned int rxd_info5;
+ unsigned int rxd_info6;
+ unsigned int rxd_info7;
+ unsigned int rxd_info8;
+#endif
+};
+
+/*=========================================
+ * PDMA TX Descriptor Format define
+ *=========================================
+ */
+struct PDMA_TXD_INFO1_T {
+ unsigned int SDP0;
+};
+
+struct PDMA_TXD_INFO2_T {
+ unsigned int SDL1:14;
+ unsigned int LS1_bit:1;
+ unsigned int BURST_bit:1;
+ unsigned int SDL0:14;
+ unsigned int LS0_bit:1;
+ unsigned int DDONE_bit:1;
+};
+
+struct PDMA_TXD_INFO3_T {
+ unsigned int SDP1;
+};
+
+struct PDMA_TXD_INFO4_T {
+ unsigned int VLAN_TAG:17; /* INSV(1)+VPRI(3)+CFI(1)+VID(12) */
+ unsigned int RESV:2;
+ unsigned int UDF:5;
+ unsigned int FPORT:4;
+ unsigned int TSO:1;
+ unsigned int TUI_CO:3;
+};
+
+struct PDMA_txdesc {
+ struct PDMA_TXD_INFO1_T txd_info1;
+ struct PDMA_TXD_INFO2_T txd_info2;
+ struct PDMA_TXD_INFO3_T txd_info3;
+ struct PDMA_TXD_INFO4_T txd_info4;
+#ifdef CONFIG_32B_DESC
+ unsigned int txd_info5;
+ unsigned int txd_info6;
+ unsigned int txd_info7;
+ unsigned int txd_info8;
+#endif
+};
+
+/*=========================================
+ * QDMA TX Descriptor Format define
+ *=========================================
+ */
+struct QDMA_TXD_INFO1_T {
+ unsigned int SDP;
+};
+
+struct QDMA_TXD_INFO2_T {
+ unsigned int NDP;
+};
+
+struct QDMA_TXD_INFO3_T {
+ unsigned int RSV0:6;
+ unsigned int RSV1:2;
+ unsigned int SDL:16;
+ unsigned int RSV2:6;
+ unsigned int LS:1;
+ unsigned int DDONE:1;
+};
+
+struct QDMA_TXD_INFO4_T {
+ unsigned int RSV0:6;
+ unsigned int RSV1:2;
+ unsigned int FPORT:4;
+ unsigned int RSV2:2;
+ unsigned int RSV3:2;
+ unsigned int QID:7;
+ unsigned int RSV4:1;
+ unsigned int RSV5:6;
+ unsigned int SWC:1;
+ unsigned int BURST:1;
+};
+
+struct QDMA_TXD_INFO5_T {
+ unsigned int PROT:3;
+ unsigned int RSV0:2;
+ unsigned int IPOFST:7;
+ unsigned int RSV1:2;
+ unsigned int VQID:10;
+ unsigned int RSV2:2;
+ unsigned int VQID0:1;
+ unsigned int RSV3:1;
+ unsigned int TUI_CO:3;
+ unsigned int TSO:1;
+};
+
+struct QDMA_TXD_INFO6_T {
+ unsigned int VLAN_TAG_1:16;
+ unsigned int INSV_1:1;
+ unsigned int RSV0:14;
+ unsigned int INSV_0:1;
+};
+
+struct QDMA_TXD_INFO7_T {
+ unsigned int VLAN_TAG_0:16;
+ unsigned int VPID_0:16;
+};
+
+struct QDMA_TXD_INFO8_T {
+ unsigned int RSV;
+};
+
+struct QDMA_txdesc {
+ struct QDMA_TXD_INFO1_T txd_info1;
+ struct QDMA_TXD_INFO2_T txd_info2;
+ struct QDMA_TXD_INFO3_T txd_info3;
+ struct QDMA_TXD_INFO4_T txd_info4;
+ struct QDMA_TXD_INFO5_T txd_info5;
+ struct QDMA_TXD_INFO6_T txd_info6;
+ struct QDMA_TXD_INFO7_T txd_info7;
+ struct QDMA_TXD_INFO8_T txd_info8;
+};
+
+#define QTXD_LEN (sizeof(struct QDMA_txdesc))
+#define PHY_ENABLE_AUTO_NEGO 0x1000
+#define PHY_RESTART_AUTO_NEGO 0x0200
+
+/* PHY_STAT_REG = 1; */
+#define PHY_AUTO_NEGO_COMP 0x0020
+#define PHY_LINK_STATUS 0x0004
+
+/* PHY_AUTO_NEGO_REG = 4; */
+#define PHY_CAP_10_HALF 0x0020
+#define PHY_CAP_10_FULL 0x0040
+#define PHY_CAP_100_HALF 0x0080
+#define PHY_CAP_100_FULL 0x0100
+
+/* proc definition */
+
+#define PROCREG_CONTROL_FILE "/var/run/procreg_control"
+#if 0
+#if defined(CONFIG_MACH_MT7623)
+#define PROCREG_DIR "mt7623"
+#elif defined(CONFIG_MACH_LEOPARD)
+#define PROCREG_DIR "leopard"
+#elif defined(CONFIG_PINCTRL_MT7622)
+#define PROCREG_DIR "mt7622"
+#elif defined(CONFIG_SOC_MT7621)
+#define PROCREG_DIR "mt7621"
+#endif
+#endif
+#define PROCREG_DIR "panther"
+#define PROCREG_SKBFREE "skb_free"
+#define PROCREG_TXRING "tx_ring"
+#define PROCREG_RXRING "rx_ring"
+#define PROCREG_RXRING1 "rx_ring1"
+#define PROCREG_RXRING2 "rx_ring2"
+#define PROCREG_RXRING3 "rx_ring3"
+#define PROCREG_NUM_OF_TXD "num_of_txd"
+#define PROCREG_TSO_LEN "tso_len"
+#define PROCREG_LRO_STATS "lro_stats"
+#define PROCREG_HW_LRO_STATS "hw_lro_stats"
+#define PROCREG_HW_LRO_AUTO_TLB "hw_lro_auto_tlb"
+#define PROCREG_HW_IO_COHERENT "hw_iocoherent"
+#define PROCREG_GMAC "gmac"
+#define PROCREG_GMAC2 "gmac2"
+#define PROCREG_CP0 "cp0"
+#define PROCREG_RAQOS "qos"
+#define PROCREG_READ_VAL "regread_value"
+#define PROCREG_WRITE_VAL "regwrite_value"
+#define PROCREG_ADDR "reg_addr"
+#define PROCREG_CTL "procreg_control"
+#define PROCREG_RXDONE_INTR "rxdone_intr_count"
+#define PROCREG_ESW_INTR "esw_intr_count"
+#define PROCREG_ESW_CNT "esw_cnt"
+#define PROCREG_ETH_CNT "eth_cnt"
+#define PROCREG_SNMP "snmp"
+#define PROCREG_SET_LAN_IP "set_lan_ip"
+#if defined(TASKLET_WORKQUEUE_SW)
+#define PROCREG_SCHE "schedule"
+#endif
+#define PROCREG_QDMA "qdma"
+#define PROCREG_INT_DBG "int_dbg"
+struct rt2880_reg_op_data {
+ char name[64];
+ unsigned int reg_addr;
+ unsigned int op;
+ unsigned int reg_value;
+};
+
+struct lro_counters {
+ u32 lro_aggregated;
+ u32 lro_flushed;
+ u32 lro_no_desc;
+};
+
+struct lro_para_struct {
+ unsigned int lan_ip1;
+};
+
+struct parse_result {
+ /* layer2 header */
+ u8 dmac[6];
+ u8 smac[6];
+
+ /* vlan header */
+ u16 vlan_tag;
+ u16 vlan1_gap;
+ u16 vlan1;
+ u16 vlan2_gap;
+ u16 vlan2;
+ u16 vlan_layer;
+
+ /* pppoe header */
+ u32 pppoe_gap;
+ u16 ppp_tag;
+ u16 pppoe_sid;
+
+ /* layer3 header */
+ u16 eth_type;
+ struct iphdr iph;
+ struct ipv6hdr ip6h;
+
+ /* layer4 header */
+ struct tcphdr th;
+ struct udphdr uh;
+
+ u32 pkt_type;
+ u8 is_mcast;
+};
+
+#define DMA_GLO_CFG PDMA_GLO_CFG
+
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define GDMA1_FWD_PORT 0x5555
+#define GDMA2_FWD_PORT 0x5555
+#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
+#define GDMA1_FWD_PORT 0x5555
+#define GDMA2_FWD_PORT 0x5555
+#else
+#define GDMA1_FWD_PORT 0x0000
+#define GDMA2_FWD_PORT 0x0000
+#endif
+
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
+#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
+#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
+#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
+#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
+#else
+#define RAETH_RX_CALC_IDX0 RX_CALC_IDX0
+#define RAETH_RX_CALC_IDX1 RX_CALC_IDX1
+#endif
+#define RAETH_RX_CALC_IDX2 RX_CALC_IDX2
+#define RAETH_RX_CALC_IDX3 RX_CALC_IDX3
+#define RAETH_FE_INT_STATUS FE_INT_STATUS
+#define RAETH_FE_INT_ALL FE_INT_ALL
+#define RAETH_FE_INT_ENABLE FE_INT_ENABLE
+#define RAETH_FE_INT_DLY_INIT FE_INT_DLY_INIT
+#define RAETH_FE_INT_SETTING (RX_DONE_INT0 | RX_DONE_INT1 | \
+ TX_DONE_INT0 | TX_DONE_INT1 | \
+ TX_DONE_INT2 | TX_DONE_INT3)
+#define QFE_INT_SETTING (RX_DONE_INT0 | RX_DONE_INT1 | \
+ TX_DONE_INT0 | TX_DONE_INT1 | \
+ TX_DONE_INT2 | TX_DONE_INT3)
+#define RAETH_TX_DLY_INT TX_DLY_INT
+#define RAETH_TX_DONE_INT0 TX_DONE_INT0
+#define RAETH_DLY_INT_CFG DLY_INT_CFG
+
+/* io-coherent for ethdmasys */
+#define IOC_ETH_PDMA BIT(0)
+#define IOC_ETH_QDMA BIT(1)
+
+#endif /* RAETH_REG_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.c
new file mode 100644
index 0000000..913eb9b
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.c
@@ -0,0 +1,3294 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_mac.h"
+#include "ra_ioctl.h"
+#include "ra_switch.h"
+#include "raether_hwlro.h"
+#include "ra_ethtool.h"
+
+void __iomem *ethdma_sysctl_base;
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+EXPORT_SYMBOL(ethdma_sysctl_base);
+#endif
+void __iomem *ethdma_frame_engine_base;
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+EXPORT_SYMBOL(ethdma_frame_engine_base);
+#endif
+struct net_device *dev_raether;
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+EXPORT_SYMBOL(dev_raether);
+#endif
+void __iomem *ethdma_mac_base;
+
+static int pending_recv;
+
+/* LRO support */
+unsigned int lan_ip;
+struct lro_para_struct lro_para;
+u32 gmac1_txq_num;
+EXPORT_SYMBOL(gmac1_txq_num);
+u32 gmac1_txq_txd_num;
+EXPORT_SYMBOL(gmac1_txq_txd_num);
+u32 gmac1_txd_num;
+EXPORT_SYMBOL(gmac1_txd_num);
+u32 gmac2_txq_num;
+EXPORT_SYMBOL(gmac2_txq_num);
+u32 gmac2_txq_txd_num;
+EXPORT_SYMBOL(gmac2_txq_txd_num);
+u32 gmac2_txd_num;
+EXPORT_SYMBOL(gmac2_txd_num);
+u32 num_rx_desc;
+EXPORT_SYMBOL(num_rx_desc);
+u32 num_tx_max_process;
+EXPORT_SYMBOL(num_tx_max_process);
+u32 num_tx_desc;
+EXPORT_SYMBOL(num_tx_desc);
+u32 total_txq_num;
+EXPORT_SYMBOL(total_txq_num);
+
+static const char *const mtk_clks_source_name[] = {
+ "ethif", "esw", "gp0", "gp1", "gp2",
+ "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
+ "sgmii1_tx250m", "sgmii1_rx250m", "sgmii1_cdr_ref", "sgmii1_cdr_fb",
+ "trgpll", "sgmipll", "eth1pll", "eth2pll", "eth", "sgmiitop"
+};
+
+/* reset frame engine */
+static void fe_reset(void)
+{
+ u32 val;
+
+ val = sys_reg_read(RSTCTRL);
+ val = val | RALINK_FE_RST;
+ sys_reg_write(RSTCTRL, val);
+
+ val = val & ~(RALINK_FE_RST);
+ sys_reg_write(RSTCTRL, val);
+}
+
+static void fe_gmac_reset(void)
+{
+ u32 val;
+ /*Reset GMAC */
+ /* sys_reg_write(RALINK_SYSCTL_BASE + 0x34, 0x00800000); */
+ /* sys_reg_write(RALINK_SYSCTL_BASE + 0x34, 0x00000000); */
+ val = sys_reg_read(RALINK_SYSCTL_BASE + 0x34);
+ val |= (1 << 23);
+ sys_reg_write(RALINK_SYSCTL_BASE + 0x34, val);
+ val &= ~(1 << 23);
+ sys_reg_write(RALINK_SYSCTL_BASE + 0x34, val);
+}
+
+/* Set the hardware MAC address. */
+static int ei_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ if (netif_running(dev))
+ return -EBUSY;
+
+ set_mac_address(dev->dev_addr);
+
+ return 0;
+}
+
+static int ei_set_mac2_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ if (netif_running(dev))
+ return -EBUSY;
+
+ set_mac2_address(dev->dev_addr);
+
+ return 0;
+}
+
+static void ei_reset_statistics(struct END_DEVICE *ei_local)
+{
+ ei_local->stat.tx_packets = 0;
+ ei_local->stat.tx_bytes = 0;
+ ei_local->stat.tx_dropped = 0;
+ ei_local->stat.tx_errors = 0;
+ ei_local->stat.tx_aborted_errors = 0;
+ ei_local->stat.tx_carrier_errors = 0;
+ ei_local->stat.tx_fifo_errors = 0;
+ ei_local->stat.tx_heartbeat_errors = 0;
+ ei_local->stat.tx_window_errors = 0;
+
+ ei_local->stat.rx_packets = 0;
+ ei_local->stat.rx_bytes = 0;
+ ei_local->stat.rx_dropped = 0;
+ ei_local->stat.rx_errors = 0;
+ ei_local->stat.rx_length_errors = 0;
+ ei_local->stat.rx_over_errors = 0;
+ ei_local->stat.rx_crc_errors = 0;
+ ei_local->stat.rx_frame_errors = 0;
+ ei_local->stat.rx_fifo_errors = 0;
+ ei_local->stat.rx_missed_errors = 0;
+
+ ei_local->stat.collisions = 0;
+}
+
+static inline void fe_rx_desc_init(struct PDMA_rxdesc *rx_ring,
+ dma_addr_t dma_addr)
+{
+ rx_ring->rxd_info1.PDP0 = dma_addr;
+ rx_ring->rxd_info2.PLEN0 = MAX_RX_LENGTH;
+ rx_ring->rxd_info2.LS0 = 0;
+ rx_ring->rxd_info2.DDONE_bit = 0;
+}
+
+static int rt2880_eth_recv(struct net_device *dev,
+ struct napi_struct *napi, int budget)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+ struct sk_buff *rx_skb;
+ unsigned int length = 0;
+ int rx_processed = 0;
+ struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+ unsigned int rx_dma_owner_idx, rx_next_idx;
+ void *rx_data, *rx_data_next, *new_data;
+ unsigned int skb_size;
+
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ rx_dma_owner_idx = (ei_local->rx_calc_idx[0] + 1) % num_rx_desc;
+#else
+ rx_dma_owner_idx = (sys_reg_read(RAETH_RX_CALC_IDX0) + 1) % num_rx_desc;
+#endif
+ rx_ring = &ei_local->rx_ring[0][rx_dma_owner_idx];
+ rx_data = ei_local->netrx_skb_data[0][rx_dma_owner_idx];
+
+ skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ for (;;) {
+ dma_addr_t dma_addr;
+
+ if ((rx_processed++ > budget) ||
+ (rx_ring->rxd_info2.DDONE_bit == 0))
+ break;
+
+ rx_next_idx = (rx_dma_owner_idx + 1) % num_rx_desc;
+ rx_ring_next = &ei_local->rx_ring[0][rx_next_idx];
+ rx_data_next = ei_local->netrx_skb_data[0][rx_next_idx];
+ prefetch(rx_ring_next);
+
+ /* We have to check the free memory size is big enough
+ * before pass the packet to cpu
+ */
+ new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ pr_err("skb not available...\n");
+ goto skb_err;
+ }
+
+ dma_addr = dma_map_single(dev->dev.parent,
+ new_data + NET_SKB_PAD,
+ MAX_RX_LENGTH, DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+ pr_err("[%s]dma_map_single() failed...\n", __func__);
+ raeth_free_skb_data(new_data);
+ goto skb_err;
+ }
+
+ rx_skb = raeth_build_skb(rx_data, skb_size);
+
+ if (unlikely(!rx_skb)) {
+ put_page(virt_to_head_page(rx_data));
+ pr_err("build_skb failed\n");
+ goto skb_err;
+ }
+ skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ length = rx_ring->rxd_info2.PLEN0;
+ dma_unmap_single(dev->dev.parent,
+ rx_ring->rxd_info1.PDP0,
+ length, DMA_FROM_DEVICE);
+
+ prefetch(rx_skb->data);
+
+ /* skb processing */
+ skb_put(rx_skb, length);
+
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2) {
+ if (likely(ei_local->pseudo_dev)) {
+ rx_skb->dev = ei_local->pseudo_dev;
+ rx_skb->protocol =
+ eth_type_trans(rx_skb,
+ ei_local->pseudo_dev);
+ } else {
+ pr_err("pseudo_dev is still not initialize ");
+ pr_err("but receive packet from GMAC2\n");
+ }
+ } else {
+ rx_skb->dev = dev;
+ rx_skb->protocol = eth_type_trans(rx_skb, dev);
+ }
+
+ /* rx checksum offload */
+ if (rx_ring->rxd_info4.L4VLD)
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (ppe_hook_rx_eth) {
+ if (IS_SPACE_AVAILABLE_HEAD(rx_skb)) {
+ *(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ FOE_ALG_HEAD(rx_skb) = 0;
+ FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+ FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+ }
+ if (IS_SPACE_AVAILABLE_TAIL(rx_skb)) {
+ *(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ FOE_ALG_TAIL(rx_skb) = 0;
+ FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+ FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+ }
+ }
+#endif
+
+ if (ei_local->features & FE_HW_VLAN_RX) {
+ if (rx_ring->rxd_info2.TAG)
+ __vlan_hwaccel_put_tag(rx_skb,
+ htons(ETH_P_8021Q),
+ rx_ring->rxd_info3.VID);
+ }
+
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if ((!ppe_hook_rx_eth) ||
+ (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+ if (ei_local->features & FE_INT_NAPI)
+ /* napi_gro_receive(napi, rx_skb); */
+ netif_receive_skb(rx_skb);
+ else
+ netif_rx(rx_skb);
+
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ }
+#endif
+
+ if (rx_ring->rxd_info4.SP == 2) {
+ p_ad->stat.rx_packets++;
+ p_ad->stat.rx_bytes += length;
+ } else {
+ ei_local->stat.rx_packets++;
+ ei_local->stat.rx_bytes += length;
+ }
+
+ /* init RX desc. */
+ fe_rx_desc_init(rx_ring, dma_addr);
+ ei_local->netrx_skb_data[0][rx_dma_owner_idx] = new_data;
+
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+
+ sys_reg_write(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ ei_local->rx_calc_idx[0] = rx_dma_owner_idx;
+#endif
+
+ /* Update to Next packet point that was received.
+ */
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ rx_dma_owner_idx = rx_next_idx;
+#else
+ rx_dma_owner_idx =
+ (sys_reg_read(RAETH_RX_CALC_IDX0) + 1) % num_rx_desc;
+#endif
+
+ /* use prefetched variable */
+ rx_ring = rx_ring_next;
+ rx_data = rx_data_next;
+ } /* for */
+
+ return rx_processed;
+
+skb_err:
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2)
+ p_ad->stat.rx_dropped++;
+ else
+ ei_local->stat.rx_dropped++;
+
+ /* Discard the rx packet */
+ fe_rx_desc_init(rx_ring, rx_ring->rxd_info1.PDP0);
+
+ /* make sure that all changes to the dma ring
+ * are flushed before we continue
+ */
+ wmb();
+
+ sys_reg_write(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ ei_local->rx_calc_idx[0] = rx_dma_owner_idx;
+#endif
+
+ return (budget + 1);
+}
+
+static int raeth_poll_full(struct napi_struct *napi, int budget)
+{
+ struct END_DEVICE *ei_local =
+ container_of(napi, struct END_DEVICE, napi);
+ struct net_device *netdev = ei_local->netdev;
+ unsigned long reg_int_val_rx, reg_int_val_tx;
+ unsigned long reg_int_mask_rx, reg_int_mask_tx;
+ unsigned long flags;
+ int tx_done = 0, rx_done = 0;
+
+ reg_int_val_tx = sys_reg_read(ei_local->fe_tx_int_status);
+ reg_int_val_rx = sys_reg_read(ei_local->fe_rx_int_status);
+
+ if (reg_int_val_tx & ei_local->tx_mask) {
+ /* Clear TX interrupt status */
+ sys_reg_write(ei_local->fe_tx_int_status, (TX_DLY_INT | TX_DONE_INT0));
+ tx_done = ei_local->ei_xmit_housekeeping(netdev,
+ num_tx_max_process);
+ }
+
+ if (reg_int_val_rx & ei_local->rx_mask) {
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RX_INT_ALL);
+ rx_done = ei_local->ei_eth_recv(netdev, napi, budget);
+ }
+
+ if (rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Enable TX/RX interrupts */
+ reg_int_mask_tx = sys_reg_read(ei_local->fe_tx_int_enable);
+ sys_reg_write(ei_local->fe_tx_int_enable,
+ reg_int_mask_tx | ei_local->tx_mask);
+ reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ reg_int_mask_rx | ei_local->rx_mask);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return rx_done;
+}
+
+static int raeth_poll_rx_rss0(struct napi_struct *napi, int budget)
+{
+ struct END_DEVICE *ei_local =
+ container_of(napi, struct END_DEVICE, napi_rx_rss0);
+ struct net_device *netdev = ei_local->netdev;
+ unsigned long reg_int_mask_rx;
+ unsigned long flags;
+ int rx_done = 0;
+
+ rx_done = ei_local->ei_eth_recv_rss0(netdev, napi, budget);
+ if (rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Enable RX interrupt */
+ reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ (reg_int_mask_rx | RING0_RX_DLY_INT));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return rx_done;
+}
+
+static int raeth_poll_rx_rss1(struct napi_struct *napi, int budget)
+{
+ struct END_DEVICE *ei_local =
+ container_of(napi, struct END_DEVICE, napi_rx_rss1);
+ struct net_device *netdev = ei_local->netdev;
+ unsigned long reg_int_mask_rx;
+ unsigned long flags;
+ int rx_done = 0;
+
+ rx_done = ei_local->ei_eth_recv_rss1(netdev, napi, budget);
+ if (rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Enable RX interrupt */
+ reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ (reg_int_mask_rx | RING1_RX_DLY_INT));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return rx_done;
+}
+
+static int raeth_poll_rx_rss2(struct napi_struct *napi, int budget)
+{
+ struct END_DEVICE *ei_local =
+ container_of(napi, struct END_DEVICE, napi_rx_rss2);
+ struct net_device *netdev = ei_local->netdev;
+ unsigned long reg_int_mask_rx;
+ unsigned long flags;
+ int rx_done = 0;
+
+ rx_done = ei_local->ei_eth_recv_rss2(netdev, napi, budget);
+ if (rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Enable RX interrupt */
+ reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ (reg_int_mask_rx | RING2_RX_DLY_INT));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return rx_done;
+}
+
+static int raeth_poll_rx_rss3(struct napi_struct *napi, int budget)
+{
+ struct END_DEVICE *ei_local =
+ container_of(napi, struct END_DEVICE, napi_rx_rss3);
+ struct net_device *netdev = ei_local->netdev;
+ unsigned long reg_int_mask_rx;
+ unsigned long flags;
+ int rx_done = 0;
+
+ rx_done = ei_local->ei_eth_recv_rss3(netdev, napi, budget);
+ if (rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Enable RX interrupt */
+ reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ (reg_int_mask_rx | RING3_RX_DLY_INT));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return rx_done;
+}
+
+static int raeth_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct END_DEVICE *ei_local =
+ container_of(napi, struct END_DEVICE, napi_rx);
+ struct net_device *netdev = ei_local->netdev;
+ unsigned long reg_int_mask_rx;
+ unsigned long flags;
+ int rx_done = 0;
+
+ rx_done = ei_local->ei_eth_recv(netdev, napi, budget);
+ if (rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Enable RX interrupt */
+ reg_int_mask_rx = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ (reg_int_mask_rx | ei_local->rx_mask));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return rx_done;
+}
+
+static int raeth_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct END_DEVICE *ei_local =
+ container_of(napi, struct END_DEVICE, napi_tx);
+ struct net_device *netdev = ei_local->netdev;
+ unsigned long reg_int_val_tx;
+ unsigned long reg_int_mask_tx;
+ unsigned long flags;
+ int tx_done = 0;
+
+ reg_int_val_tx = sys_reg_read(ei_local->fe_tx_int_status);
+
+ if (reg_int_val_tx & ei_local->tx_mask) {
+ /* Clear TX interrupt status */
+ sys_reg_write(ei_local->fe_tx_int_status, TX_INT_ALL);
+ tx_done = ei_local->ei_xmit_housekeeping(netdev,
+ num_tx_max_process);
+ }
+
+ napi_complete(napi);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+ /* Enable TX interrupts */
+ reg_int_mask_tx = sys_reg_read(ei_local->fe_tx_int_enable);
+ sys_reg_write(ei_local->fe_tx_int_enable,
+ reg_int_mask_tx | ei_local->tx_mask);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return 1;
+}
+
+static void ei_func_register(struct END_DEVICE *ei_local)
+{
+ /* TX handling */
+ if (ei_local->features & FE_QDMA_TX) {
+ ei_local->ei_start_xmit = ei_qdma_start_xmit;
+ ei_local->ei_xmit_housekeeping = ei_qdma_xmit_housekeeping;
+ ei_local->fe_tx_int_status = (void __iomem *)QFE_INT_STATUS;
+ ei_local->fe_tx_int_enable = (void __iomem *)QFE_INT_ENABLE;
+ } else {
+ ei_local->ei_start_xmit = ei_pdma_start_xmit;
+ ei_local->ei_xmit_housekeeping = ei_pdma_xmit_housekeeping;
+ ei_local->fe_tx_int_status =
+ (void __iomem *)RAETH_FE_INT_STATUS;
+ ei_local->fe_tx_int_enable =
+ (void __iomem *)RAETH_FE_INT_ENABLE;
+ }
+
+ /* RX handling */
+ if (ei_local->features & FE_QDMA_RX) {
+ ei_local->fe_rx_int_status = (void __iomem *)QFE_INT_STATUS;
+ ei_local->fe_rx_int_enable = (void __iomem *)QFE_INT_ENABLE;
+ } else {
+ ei_local->fe_rx_int_status =
+ (void __iomem *)RAETH_FE_INT_STATUS;
+ ei_local->fe_rx_int_enable =
+ (void __iomem *)RAETH_FE_INT_ENABLE;
+ }
+
+ /* HW LRO handling */
+ if (ei_local->features & FE_HW_LRO) {
+ ei_local->ei_eth_recv = fe_hw_lro_recv;
+ } else if (ei_local->features & FE_RSS_4RING) {
+ ei_local->ei_eth_recv_rss0 = fe_rss0_recv;
+ ei_local->ei_eth_recv_rss1 = fe_rss1_recv;
+ ei_local->ei_eth_recv_rss2 = fe_rss2_recv;
+ ei_local->ei_eth_recv_rss3 = fe_rss3_recv;
+ } else if (ei_local->features & FE_RSS_2RING) {
+ ei_local->ei_eth_recv_rss0 = fe_rss0_recv;
+ ei_local->ei_eth_recv_rss1 = fe_rss1_recv;
+ } else {
+ ei_local->ei_eth_recv = rt2880_eth_recv;
+ }
+}
+
+static int __init ei_init(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ fe_reset();
+
+ if (ei_local->features & FE_INT_NAPI) {
+ /* we run 2 devices on the same DMA ring */
+ /* so we need a dummy device for NAPI to work */
+ init_dummy_netdev(&ei_local->dummy_dev);
+
+ if (ei_local->features & FE_INT_NAPI_TX_RX) {
+ netif_napi_add(&ei_local->dummy_dev, &ei_local->napi_rx,
+ raeth_poll_rx, MTK_NAPI_WEIGHT);
+ netif_napi_add(&ei_local->dummy_dev, &ei_local->napi_tx,
+ raeth_poll_tx, MTK_NAPI_WEIGHT);
+
+ } else if (ei_local->features & FE_INT_NAPI_RX_ONLY) {
+ if (ei_local->features & FE_RSS_4RING) {
+ netif_napi_add(&ei_local->dummy_dev,
+ &ei_local->napi_rx_rss0,
+ raeth_poll_rx_rss0, MTK_NAPI_WEIGHT);
+ netif_napi_add(&ei_local->dummy_dev,
+ &ei_local->napi_rx_rss1,
+ raeth_poll_rx_rss1, MTK_NAPI_WEIGHT);
+ netif_napi_add(&ei_local->dummy_dev,
+ &ei_local->napi_rx_rss2,
+ raeth_poll_rx_rss2, MTK_NAPI_WEIGHT);
+ netif_napi_add(&ei_local->dummy_dev,
+ &ei_local->napi_rx_rss3,
+ raeth_poll_rx_rss3, MTK_NAPI_WEIGHT);
+ } else if (ei_local->features & FE_RSS_2RING) {
+ netif_napi_add(&ei_local->dummy_dev,
+ &ei_local->napi_rx_rss0,
+ raeth_poll_rx_rss0, MTK_NAPI_WEIGHT);
+ netif_napi_add(&ei_local->dummy_dev,
+ &ei_local->napi_rx_rss1,
+ raeth_poll_rx_rss1, MTK_NAPI_WEIGHT);
+ } else {
+ netif_napi_add(&ei_local->dummy_dev,
+ &ei_local->napi_rx,
+ raeth_poll_rx, MTK_NAPI_WEIGHT);
+ }
+ } else {
+ netif_napi_add(&ei_local->dummy_dev, &ei_local->napi,
+ raeth_poll_full, MTK_NAPI_WEIGHT);
+ }
+ }
+
+ spin_lock_init(&ei_local->page_lock);
+ spin_lock_init(&ei_local->irq_lock);
+ spin_lock_init(&ei_local->mdio_lock);
+ ether_setup(dev);
+
+ ei_func_register(ei_local);
+
+ /* init my IP */
+ strncpy(ei_local->lan_ip4_addr, FE_DEFAULT_LAN_IP, IP4_ADDR_LEN);
+
+ if (ei_local->chip_name == MT7621_FE) {
+ fe_gmac_reset();
+ fe_sw_init();
+ }
+
+ return 0;
+}
+
+static void ei_uninit(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ unregister_netdev(ei_local->pseudo_dev);
+ free_netdev(ei_local->pseudo_dev);
+ }
+
+ pr_info("Free ei_local and unregister netdev...\n");
+
+ debug_proc_exit();
+}
+static void ei_mac_addr_setting(struct net_device *dev)
+{
+ /* If the mac address is invalid, use random mac address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ }
+
+ ei_set_mac_addr(dev, dev->dev_addr);
+}
+
+static void ei_mac2_addr_setting(struct net_device *dev)
+{
+ /* If the mac address is invalid, use random mac address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ }
+}
+
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+static void fe_dma_rx_cal_idx_init(struct END_DEVICE *ei_local)
+{
+ if (unlikely(ei_local->features & FE_QDMA_RX)) {
+ ei_local->rx_calc_idx[0] = sys_reg_read(QRX_CRX_IDX_0);
+ } else { /* PDMA RX */
+ ei_local->rx_calc_idx[0] = sys_reg_read(RX_CALC_IDX0);
+ if (ei_local->features & (FE_HW_LRO | FE_RSS_4RING)) {
+ ei_local->rx_calc_idx[1] = sys_reg_read(RX_CALC_IDX1);
+ ei_local->rx_calc_idx[2] = sys_reg_read(RX_CALC_IDX2);
+ ei_local->rx_calc_idx[3] = sys_reg_read(RX_CALC_IDX3);
+ } else if (ei_local->features & FE_RSS_2RING) {
+ ei_local->rx_calc_idx[1] = sys_reg_read(RX_CALC_IDX1);
+ }
+ }
+}
+#endif
+
+static inline int ei_init_ptx_prx(struct net_device *dev)
+{
+ int err;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ err = fe_pdma_wait_dma_idle();
+ if (err)
+ return err;
+
+ err = fe_pdma_rx_dma_init(dev);
+ if (err)
+ return err;
+
+ if (ei_local->features & FE_HW_LRO) {
+ err = fe_hw_lro_init(dev);
+ if (err)
+ return err;
+ } else if (ei_local->features & FE_RSS_4RING) {
+ err = fe_rss_4ring_init(dev);
+ if (err)
+ return err;
+ } else if (ei_local->features & FE_RSS_2RING) {
+ err = fe_rss_2ring_init(dev);
+ if (err)
+ return err;
+ }
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ fe_dma_rx_cal_idx_init(ei_local);
+#endif
+
+ err = fe_pdma_tx_dma_init(dev);
+ if (err)
+ return err;
+
+ set_fe_pdma_glo_cfg();
+
+ /* enable RXD prefetch of ADMA */
+ SET_PDMA_LRO_RXD_PREFETCH_EN(ADMA_RXD_PREFETCH_EN |
+ ADMA_MULTI_RXD_PREFETCH_EN);
+
+ return 0;
+}
+
+static inline int ei_init_qtx_prx(struct net_device *dev)
+{
+ int err;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ err = fe_pdma_wait_dma_idle();
+ if (err)
+ return err;
+
+ err = fe_qdma_wait_dma_idle();
+ if (err)
+ return err;
+
+ err = fe_qdma_rx_dma_init(dev);
+ if (err)
+ return err;
+
+ err = fe_pdma_rx_dma_init(dev);
+ if (err)
+ return err;
+
+ if (ei_local->features & FE_HW_LRO) {
+ err = fe_hw_lro_init(dev);
+ if (err)
+ return err;
+ } else if (ei_local->features & FE_RSS_4RING) {
+ err = fe_rss_4ring_init(dev);
+ if (err)
+ return err;
+ } else if (ei_local->features & FE_RSS_2RING) {
+ err = fe_rss_2ring_init(dev);
+ if (err)
+ return err;
+ }
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ fe_dma_rx_cal_idx_init(ei_local);
+#endif
+
+ err = fe_qdma_tx_dma_init(dev);
+ if (err)
+ return err;
+
+ set_fe_pdma_glo_cfg();
+ set_fe_qdma_glo_cfg();
+
+ /* enable RXD prefetch of ADMA */
+ SET_PDMA_LRO_RXD_PREFETCH_EN(ADMA_RXD_PREFETCH_EN |
+ ADMA_MULTI_RXD_PREFETCH_EN);
+
+ return 0;
+}
+
+static inline int ei_init_qtx_qrx(struct net_device *dev)
+{
+ int err;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ err = fe_qdma_wait_dma_idle();
+ if (err)
+ return err;
+
+ err = fe_qdma_rx_dma_init(dev);
+ if (err)
+ return err;
+
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ fe_dma_rx_cal_idx_init(ei_local);
+#endif
+
+ err = fe_qdma_tx_dma_init(dev);
+ if (err)
+ return err;
+
+ set_fe_qdma_glo_cfg();
+
+ return 0;
+}
+
+static int ei_init_dma(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ if ((ei_local->features & FE_QDMA_TX) &&
+ (ei_local->features & FE_QDMA_RX))
+ return ei_init_qtx_qrx(dev);
+
+ if (ei_local->features & FE_QDMA_TX)
+ return ei_init_qtx_prx(dev);
+ else
+ return ei_init_ptx_prx(dev);
+}
+
+static void ei_deinit_dma(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ if (ei_local->features & FE_QDMA_TX) {
+ fe_qdma_tx_dma_deinit(dev);
+ fe_qdma_rx_dma_deinit(dev);
+ } else {
+ fe_pdma_tx_dma_deinit(dev);
+ }
+
+ if (!(ei_local->features & FE_QDMA_RX))
+ fe_pdma_rx_dma_deinit(dev);
+
+ if (ei_local->features & FE_HW_LRO)
+ fe_hw_lro_deinit(dev);
+ else if (ei_local->features & FE_RSS_4RING)
+ fe_rss_4ring_deinit(dev);
+ else if (ei_local->features & FE_RSS_2RING)
+ fe_rss_2ring_deinit(dev);
+
+ pr_info("Free TX/RX Ring Memory!\n");
+}
+
+/* MT7623 PSE reset workaround to do PSE reset */
+void fe_do_reset(void)
+{
+ u32 adma_rx_dbg0_r = 0;
+ u32 dbg_rx_curr_state, rx_fifo_wcnt;
+ u32 dbg_cdm_lro_rinf_afifo_rempty, dbg_cdm_eof_rdy_afifo_empty;
+ u32 reg_tmp, loop_count;
+ unsigned long flags;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ ei_local->fe_reset_times++;
+ /* do CDM/PDMA reset */
+ pr_crit("[%s] CDM/PDMA reset (%d times)!!!\n", __func__,
+ ei_local->fe_reset_times);
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ reg_tmp = sys_reg_read(FE_GLO_MISC);
+ reg_tmp |= 0x1;
+ sys_reg_write(FE_GLO_MISC, reg_tmp);
+ mdelay(10);
+ reg_tmp = sys_reg_read(ADMA_LRO_CTRL_DW3);
+ reg_tmp |= (0x1 << 14);
+ sys_reg_write(ADMA_LRO_CTRL_DW3, reg_tmp);
+ loop_count = 0;
+ do {
+ adma_rx_dbg0_r = sys_reg_read(ADMA_RX_DBG0);
+ dbg_rx_curr_state = (adma_rx_dbg0_r >> 16) & 0x7f;
+ rx_fifo_wcnt = (adma_rx_dbg0_r >> 8) & 0x3f;
+ dbg_cdm_lro_rinf_afifo_rempty = (adma_rx_dbg0_r >> 7) & 0x1;
+ dbg_cdm_eof_rdy_afifo_empty = (adma_rx_dbg0_r >> 6) & 0x1;
+ loop_count++;
+ if (loop_count >= 100) {
+ pr_err("[%s] loop_count timeout!!!\n", __func__);
+ break;
+ }
+ mdelay(10);
+ } while (((dbg_rx_curr_state != 0x17) && (dbg_rx_curr_state != 0x00)) ||
+ (rx_fifo_wcnt != 0) ||
+ (!dbg_cdm_lro_rinf_afifo_rempty) ||
+ (!dbg_cdm_eof_rdy_afifo_empty));
+ reg_tmp = sys_reg_read(ADMA_LRO_CTRL_DW3);
+ reg_tmp &= 0xffffbfff;
+ sys_reg_write(ADMA_LRO_CTRL_DW3, reg_tmp);
+ reg_tmp = sys_reg_read(FE_GLO_MISC);
+ reg_tmp &= 0xfffffffe;
+ sys_reg_write(FE_GLO_MISC, reg_tmp);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+}
+
+/* MT7623 PSE reset workaround to poll if PSE hang */
+static int fe_reset_thread(void *data)
+{
+ u32 adma_rx_dbg0_r = 0;
+ u32 dbg_rx_curr_state, rx_fifo_wcnt;
+ u32 dbg_cdm_lro_rinf_afifo_rempty, dbg_cdm_eof_rdy_afifo_empty;
+
+ pr_info("%s called\n", __func__);
+
+ for (;;) {
+ adma_rx_dbg0_r = sys_reg_read(ADMA_RX_DBG0);
+ dbg_rx_curr_state = (adma_rx_dbg0_r >> 16) & 0x7f;
+ rx_fifo_wcnt = (adma_rx_dbg0_r >> 8) & 0x3f;
+ dbg_cdm_lro_rinf_afifo_rempty = (adma_rx_dbg0_r >> 7) & 0x1;
+ dbg_cdm_eof_rdy_afifo_empty = (adma_rx_dbg0_r >> 6) & 0x1;
+
+ /* check if PSE P0 hang */
+ if (dbg_cdm_lro_rinf_afifo_rempty &&
+ dbg_cdm_eof_rdy_afifo_empty &&
+ (rx_fifo_wcnt & 0x20) &&
+ ((dbg_rx_curr_state == 0x17) ||
+ (dbg_rx_curr_state == 0x00))) {
+ fe_do_reset();
+ }
+
+ msleep_interruptible(FE_RESET_POLLING_MS);
+ if (kthread_should_stop())
+ break;
+ }
+
+ pr_info("%s leaved\n", __func__);
+ return 0;
+}
+
+static int phy_polling_thread(void *data)
+{
+ unsigned int link_status, link_speed, duplex;
+ unsigned int local_eee, lp_eee;
+ unsigned int fc_phy, fc_lp;
+ unsigned int val_tmp;
+
+ pr_info("%s called\n", __func__);
+ val_tmp = 1;
+ for (;;) {
+ mii_mgr_read(0x0, 0x1, &link_status);
+ link_status = (link_status >> 2) & 0x1;
+ if (link_status) {
+ mii_mgr_read(0x0, 0x4, &fc_phy);
+ mii_mgr_read(0x0, 0x5, &fc_lp);
+ if ((fc_phy & 0xc00) == (fc_lp & 0xc00))
+ val_tmp = val_tmp | 0x30;
+ else
+ val_tmp = val_tmp & (~0x30);
+ mii_mgr_read_cl45(0, 0x1e, 0xa2, &link_speed);
+ duplex = link_speed & 0x20;
+ if (duplex)
+ val_tmp = val_tmp | 0x2;
+ else
+ val_tmp = val_tmp & (~0x2);
+ link_speed = link_speed & 0xe;
+ val_tmp = val_tmp & (~0xc);
+ if (link_speed == 0x04)
+ val_tmp = val_tmp | (0x4);
+ else if (link_speed == 0x08)
+ val_tmp = val_tmp | (0x8);
+ mii_mgr_read_cl45(0, 0x7, 0x3c, &local_eee);
+ mii_mgr_read_cl45(0, 0x7, 0x3d, &lp_eee);
+ if ((local_eee & 0x4) == 4 && (lp_eee & 0x4) == 4)/*1g eee*/
+ val_tmp = val_tmp | 0x80;
+ if ((local_eee & 0x2) == 2 && ((lp_eee & 0x2) == 2))/*100m eee*/
+ val_tmp = val_tmp | 0x40;
+ val_tmp = val_tmp & 0xff;
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x2105e300 | val_tmp);
+ } else {
+ /*force link down*/
+ set_ge2_force_link_down();
+ }
+
+ msleep_interruptible(PHY_POLLING_MS);
+ if (kthread_should_stop())
+ break;
+ }
+
+ pr_info("%s leaved\n", __func__);
+ return 0;
+}
+
+#if 0
+static irqreturn_t ei_interrupt_napi_rx_only(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned int reg_int_mask;
+ unsigned long flags;
+
+ if (likely(napi_schedule_prep(&ei_local->napi_rx))) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RX_INT_ALL);
+
+ /* Disable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ reg_int_mask & ~(RX_INT_ALL));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ __napi_schedule(&ei_local->napi_rx);
+ }
+
+ return IRQ_HANDLED;
+}
+static irqreturn_t ei_interrupt_napi(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned long flags;
+
+ if (likely(napi_schedule_prep(&ei_local->napi))) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Disable TX interrupt */
+ sys_reg_write(ei_local->fe_tx_int_enable, 0);
+ /* Disable RX interrupt */
+ sys_reg_write(ei_local->fe_rx_int_enable, 0);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ __napi_schedule(&ei_local->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+#endif
+static irqreturn_t ei_interrupt_napi_sep(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned int reg_int_mask;
+ unsigned long flags;
+
+ if (likely(napi_schedule_prep(&ei_local->napi_tx))) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Disable TX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_tx_int_enable);
+ sys_reg_write(ei_local->fe_tx_int_enable,
+ reg_int_mask & ~(TX_INT_ALL));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ __napi_schedule(&ei_local->napi_tx);
+ }
+
+ if (likely(napi_schedule_prep(&ei_local->napi_rx))) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Disable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ reg_int_mask & ~(RX_INT_ALL));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ __napi_schedule(&ei_local->napi_rx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_interrupt(int irq, void *dev_id)
+{
+ unsigned long reg_int_val = 0;
+ unsigned long reg_int_val_p = 0;
+ unsigned long reg_int_val_q = 0;
+ unsigned long reg_int_mask = 0;
+ unsigned int recv = 0;
+
+ unsigned int transmit __maybe_unused = 0;
+ unsigned long flags;
+
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ if (!dev) {
+ pr_err("net_interrupt(): irq %x for unknown device.\n",
+ IRQ_ENET0);
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+ reg_int_val_p = sys_reg_read(RAETH_FE_INT_STATUS);
+
+ if (ei_local->features & FE_QDMA)
+ reg_int_val_q = sys_reg_read(QFE_INT_STATUS);
+ reg_int_val = reg_int_val_p | reg_int_val_q;
+
+ if (reg_int_val & ei_local->rx_mask)
+ recv = 1;
+ if (reg_int_val & ei_local->tx_mask)
+ transmit = 1;
+ if (ei_local->features & FE_QDMA)
+ sys_reg_write(QFE_INT_STATUS, reg_int_val_q);
+
+ ei_local->ei_xmit_housekeeping(dev, num_tx_max_process);
+
+ /* QWERT */
+ sys_reg_write(RAETH_FE_INT_STATUS, reg_int_val_p);
+
+ if (((recv == 1) || (pending_recv == 1)) &&
+ (ei_local->tx_ring_full == 0)) {
+ reg_int_mask = sys_reg_read(RAETH_FE_INT_ENABLE);
+
+ sys_reg_write(RAETH_FE_INT_ENABLE,
+ reg_int_mask & ~(ei_local->rx_mask));
+ /*QDMA RX*/
+ if (ei_local->features & FE_QDMA) {
+ reg_int_mask = sys_reg_read(QFE_INT_ENABLE);
+ if (ei_local->features & FE_DLY_INT)
+ sys_reg_write(QFE_INT_ENABLE,
+ reg_int_mask & ~(RX_DLY_INT));
+ else
+ sys_reg_write(QFE_INT_ENABLE,
+ reg_int_mask & ~(RX_DONE_INT0 |
+ RX_DONE_INT1 |
+ RX_DONE_INT2 |
+ RX_DONE_INT3));
+ }
+
+ pending_recv = 0;
+
+ if (ei_local->features & FE_INT_WORKQ)
+ schedule_work(&ei_local->rx_wq);
+ else
+ tasklet_hi_schedule(&ei_local->rx_tasklet);
+ } else if (recv == 1 && ei_local->tx_ring_full == 1) {
+ pending_recv = 1;
+ }
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned int reg_int_val;
+ unsigned long flags;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+ if (likely(reg_int_val & RX_INT_ALL)) {
+ if (likely(napi_schedule_prep(&ei_local->napi))) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Disable RX interrupt */
+ sys_reg_write(ei_local->fe_rx_int_enable, 0);
+ /* Disable TX interrupt */
+ sys_reg_write(ei_local->fe_tx_int_enable, 0);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ __napi_schedule(&ei_local->napi);
+ }
+ } else {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Ack other interrupt status except TX irqs */
+ reg_int_val &= ~(TX_INT_ALL);
+ sys_reg_write(ei_local->fe_rx_int_status, reg_int_val);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_g0(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned int reg_int_val, reg_int_val_0, reg_int_val_1, reg_int_mask;
+ unsigned long flags;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+ reg_int_val_0 = reg_int_val & RSS_RX_RING0;
+ reg_int_val_1 = reg_int_val & RSS_RX_RING1;
+ if (likely(reg_int_val_0)) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+ /* Disable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING0));
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING0);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+ if (likely(reg_int_val_1)) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+ /* Disable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING1));
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING1);
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+ if (likely(reg_int_val_0)) {
+ if (likely(napi_schedule_prep(&ei_local->napi_rx_rss0)))
+ __napi_schedule(&ei_local->napi_rx_rss0);
+ }
+
+ if (likely(reg_int_val_1)) {
+ if (likely(napi_schedule_prep(&ei_local->napi_rx_rss1)))
+ __napi_schedule(&ei_local->napi_rx_rss1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_rss0(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned int reg_int_val, reg_int_val_0, reg_int_mask;
+ unsigned long flags;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+ reg_int_val_0 = reg_int_val & RSS_RX_RING0;
+ if (likely(reg_int_val_0)) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+ /* Disable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING0));
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING0);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+ if (likely(reg_int_val_0)) {
+ if (likely(napi_schedule_prep(&ei_local->napi_rx_rss0)))
+ __napi_schedule(&ei_local->napi_rx_rss0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_rss1(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned int reg_int_val, reg_int_val_1, reg_int_mask;
+ unsigned long flags;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+ reg_int_val_1 = reg_int_val & RSS_RX_RING1;
+
+ if (likely(reg_int_val_1)) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+ /* Disable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING1));
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING1);
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+
+ if (likely(reg_int_val_1)) {
+ if (likely(napi_schedule_prep(&ei_local->napi_rx_rss1)))
+ __napi_schedule(&ei_local->napi_rx_rss1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_g1(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned int reg_int_val, reg_int_val_0, reg_int_val_1, reg_int_mask;
+ unsigned long flags;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+ reg_int_val_0 = reg_int_val & RSS_RX_RING2;
+ reg_int_val_1 = reg_int_val & RSS_RX_RING3;
+ if (likely(reg_int_val_0)) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+ /* Disable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING2));
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING2);
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+ if (likely(reg_int_val_1)) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+ /* Disable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable, reg_int_mask & ~(RSS_RX_RING3));
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RSS_RX_RING3);
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+ if (likely(reg_int_val_0)) {
+ if (likely(napi_schedule_prep(&ei_local->napi_rx_rss2)))
+ __napi_schedule(&ei_local->napi_rx_rss2);
+ }
+
+ if (likely(reg_int_val_1)) {
+ if (likely(napi_schedule_prep(&ei_local->napi_rx_rss3)))
+ __napi_schedule(&ei_local->napi_rx_rss3);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt_napi_sep(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned int reg_int_val, reg_int_mask;
+ unsigned long flags;
+
+ //pr_info("enter ei_rx_interrupt_napi_sep\n");
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+ if (likely(reg_int_val & RX_INT_ALL)) {
+ if (likely(napi_schedule_prep(&ei_local->napi_rx))) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RX_INT_ALL);
+
+ /* Disable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ reg_int_mask & ~(RX_INT_ALL));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ __napi_schedule(&ei_local->napi_rx);
+ }
+ } else {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Ack other interrupt status except TX irqs */
+ reg_int_val &= ~(TX_INT_ALL);
+ sys_reg_write(ei_local->fe_rx_int_status, reg_int_val);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+
+ //pr_info("leave ei_rx_interrupt_napi_sep\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_rx_interrupt(int irq, void *dev_id)
+{
+ unsigned long reg_int_val;
+ unsigned long reg_int_mask;
+ unsigned int recv = 0;
+ unsigned long flags;
+
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ reg_int_val = sys_reg_read(ei_local->fe_rx_int_status);
+ if (reg_int_val & RX_INT_ALL)
+ recv = 1;
+
+ /* Clear RX interrupt status */
+ sys_reg_write(ei_local->fe_rx_int_status, RX_INT_ALL);
+
+ if (likely(((recv == 1) || (pending_recv == 1)) &&
+ (ei_local->tx_ring_full == 0))) {
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ /* Disable RX interrupt */
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ reg_int_mask & ~(RX_INT_ALL));
+ pending_recv = 0;
+
+ if (likely(ei_local->features & FE_INT_TASKLET))
+ tasklet_hi_schedule(&ei_local->rx_tasklet);
+ else
+ schedule_work(&ei_local->rx_wq);
+ } else if (recv == 1 && ei_local->tx_ring_full == 1) {
+ pending_recv = 1;
+ }
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_tx_interrupt_napi(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned long flags;
+ unsigned int reg_int_val;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ reg_int_val = sys_reg_read(ei_local->fe_tx_int_status);
+ if (likely(reg_int_val & TX_INT_ALL)) {
+ if (likely(napi_schedule_prep(&ei_local->napi))) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Disable TX interrupt */
+ sys_reg_write(ei_local->fe_tx_int_enable, 0);
+ /* Disable RX interrupt */
+ sys_reg_write(ei_local->fe_rx_int_enable, 0);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ __napi_schedule(&ei_local->napi);
+ }
+ } else {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Ack other interrupt status except RX irqs */
+ reg_int_val &= ~(RX_INT_ALL);
+ sys_reg_write(ei_local->fe_tx_int_status, reg_int_val);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_tx_interrupt_napi_sep(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned long flags;
+ unsigned int reg_int_val, reg_int_mask;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ reg_int_val = sys_reg_read(ei_local->fe_tx_int_status);
+ if (likely(reg_int_val & TX_INT_ALL)) {
+ if (likely(napi_schedule_prep(&ei_local->napi_tx))) {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Disable TX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_tx_int_enable);
+ sys_reg_write(ei_local->fe_tx_int_enable,
+ reg_int_mask & ~(TX_INT_ALL));
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ __napi_schedule(&ei_local->napi_tx);
+ }
+ } else {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* Ack other interrupt status except RX irqs */
+ reg_int_val &= ~(RX_INT_ALL);
+ sys_reg_write(ei_local->fe_tx_int_status, reg_int_val);
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ei_tx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned long flags;
+ unsigned long reg_int_val, reg_int_mask;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ reg_int_val = sys_reg_read(ei_local->fe_tx_int_status);
+
+ if (likely(reg_int_val & TX_INT_ALL)) {
+ /* Disable TX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_tx_int_enable);
+ sys_reg_write(ei_local->fe_tx_int_enable,
+ reg_int_mask & ~(TX_INT_ALL));
+ /* Clear TX interrupt status */
+ sys_reg_write(ei_local->fe_tx_int_status, TX_INT_ALL);
+ ei_local->ei_xmit_housekeeping(netdev, num_tx_max_process);
+
+ /* Enable TX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_tx_int_enable);
+ sys_reg_write(ei_local->fe_tx_int_enable,
+ reg_int_mask | ei_local->tx_mask);
+ }
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+#if 0
+static irqreturn_t ei_fe_interrupt(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct END_DEVICE *ei_local;
+ unsigned long flags;
+ unsigned int reg_val;
+ unsigned int speed_mode;
+
+ if (unlikely(!netdev)) {
+ pr_info("net_interrupt(): irq %x for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ei_local = netdev_priv(netdev);
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ /* not to apply SGMII FC ECO for 100/10 */
+ if (ei_local->architecture & GE1_SGMII_AN) {
+ /* disable fe int */
+ sys_reg_write(FE_INT_ENABLE2, 0);
+ sys_reg_write(FE_INT_STATUS2, MAC1_LINK);
+ reg_val = sys_reg_read(ethdma_mac_base + 0x108);
+ if (reg_val & 0x1) {
+ speed_mode = (reg_val & 0x8) >> 3;
+ /* speed_mode: 0 for 100/10; 1 for else */
+ reg_val = sys_reg_read(ethdma_mac_base + 0x8);
+ if (speed_mode == 0)
+ reg_val |= 1 << 7;
+ else if (speed_mode == 1)
+ reg_val &= ~(1 << 7);
+ sys_reg_write(ethdma_mac_base + 0x8, reg_val);
+ }
+ sys_reg_write(FE_INT_ENABLE2, MAC1_LINK);
+ } else if (ei_local->architecture & GE2_SGMII_AN) {
+ /* disable fe int */
+ sys_reg_write(FE_INT_ENABLE2, 0);
+ sys_reg_write(FE_INT_STATUS2, MAC2_LINK);
+ reg_val = sys_reg_read(ethdma_mac_base + 0x208);
+ if (reg_val & 0x1) {
+ speed_mode = (reg_val & 0x8) >> 3;
+ /* speed_mode: 0 for 100/10; 1 for else */
+ reg_val = sys_reg_read(ethdma_mac_base + 0x8);
+ if (speed_mode == 0)
+ reg_val |= 1 << 7;
+ else if (speed_mode == 1)
+ reg_val &= ~(1 << 7);
+ sys_reg_write(ethdma_mac_base + 0x8, reg_val);
+ }
+ sys_reg_write(FE_INT_ENABLE2, MAC2_LINK);
+ }
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+#endif
+
+static inline void ei_receive(void)
+{
+ struct net_device *dev = dev_raether;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned long reg_int_mask;
+ int rx_processed;
+ unsigned long flags;
+
+ if (ei_local->tx_ring_full == 0) {
+ rx_processed = ei_local->ei_eth_recv(dev, NULL,
+ NUM_RX_MAX_PROCESS);
+ if (rx_processed > NUM_RX_MAX_PROCESS) {
+ if (likely(ei_local->features & FE_INT_TASKLET))
+ tasklet_hi_schedule(&ei_local->rx_tasklet);
+ else
+ schedule_work(&ei_local->rx_wq);
+ } else {
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+ /* Enable RX interrupt */
+ reg_int_mask = sys_reg_read(ei_local->fe_rx_int_enable);
+ sys_reg_write(ei_local->fe_rx_int_enable,
+ reg_int_mask | ei_local->rx_mask);
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+ }
+ } else {
+ if (likely(ei_local->features & FE_INT_TASKLET))
+ tasklet_schedule(&ei_local->rx_tasklet);
+ else
+ schedule_work(&ei_local->rx_wq);
+ }
+}
+
+static void ei_receive_tasklet(unsigned long unused)
+{
+ ei_receive();
+}
+
+static void ei_receive_workq(struct work_struct *work)
+{
+ ei_receive();
+}
+
+static int fe_int_enable(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ //struct device_node *np = ei_local->switch_np;
+ //struct platform_device *pdev = of_find_device_by_node(np);
+ int err0 = 0, err1 = 0, err2 = 0, err3 = 0;
+ //struct mtk_gsw *gsw;
+ unsigned int reg_val = 0;
+ unsigned long flags;
+
+ pr_err("fe_int_enable\n");
+ if (ei_local->architecture & (GE1_SGMII_AN | GE2_SGMII_AN)) {
+ //err0 = request_irq(ei_local->irq0, ei_fe_interrupt,
+ // IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, dev->name, dev);
+ } else if (ei_local->features & FE_INT_NAPI) {
+ if (ei_local->features & FE_INT_NAPI_TX_RX)
+ err0 =
+ request_irq(ei_local->irq0, ei_interrupt_napi_sep,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, dev->name, dev);
+ } else
+ err0 =
+ request_irq(ei_local->irq0, ei_interrupt, IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ dev->name, dev);
+
+ if (ei_local->features & FE_IRQ_SEPARATE) {
+ if (ei_local->features & FE_INT_NAPI) {
+ pr_err("FE_INT_NAPI\n");
+ if (ei_local->features & FE_INT_NAPI_TX_RX) {
+ pr_err("FE_INT_NAPI_TX_RX\n");
+ err1 =
+ request_irq(ei_local->irq1,
+ ei_tx_interrupt_napi_sep,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ "eth_tx", dev);
+ err2 =
+ request_irq(ei_local->irq2,
+ ei_rx_interrupt_napi_sep,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ "eth_rx", dev);
+ } else if (ei_local->features & FE_INT_NAPI_RX_ONLY) {
+ pr_err("FE_INT_NAPI_RX_ONLY\n");
+
+
+ if (ei_local->features & FE_RSS_4RING) {
+ err2 =
+ request_irq(ei_local->irq2,
+ ei_rx_interrupt_napi_g0,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ "eth_rx_g0", dev);
+ err3 =
+ request_irq(ei_local->irq3,
+ ei_rx_interrupt_napi_g1,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ "eth_rx_g1", dev);
+ } else if (ei_local->features & FE_RSS_2RING) {
+ err2 =
+ request_irq(ei_local->irq2,
+ ei_rx_interrupt_napi_rss0,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ "eth_rx_0", dev);
+ err3 =
+ request_irq(ei_local->irq3,
+ ei_rx_interrupt_napi_rss1,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ "eth_rx_1", dev);
+ }
+ } else {
+ pr_err("else\n");
+ err1 =
+ request_irq(ei_local->irq1,
+ ei_tx_interrupt_napi,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ "eth_tx", dev);
+ err2 =
+ request_irq(ei_local->irq2,
+ ei_rx_interrupt_napi,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ "eth_rx", dev);
+ }
+ } else {
+ pr_err("not FE_INT_NAPI\n");
+ err1 =
+ request_irq(ei_local->irq1, ei_tx_interrupt,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, "eth_tx", dev);
+ err2 =
+ request_irq(ei_local->irq2, ei_rx_interrupt,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, "eth_rx", dev);
+ }
+ }
+ pr_info("!!!!!! request done\n");
+
+
+
+ spin_lock_irqsave(&ei_local->irq_lock, flags);
+
+ if (ei_local->features & FE_DLY_INT) {
+ ei_local->tx_mask = RLS_DLY_INT;
+
+ if (ei_local->features & FE_RSS_4RING)
+ ei_local->rx_mask = RSS_RX_DLY_INT;
+ else if (ei_local->features & FE_RSS_2RING)
+ ei_local->rx_mask = RSS_RX_DLY_INT0;
+ else
+ ei_local->rx_mask = RX_DLY_INT;
+ } else {
+ ei_local->tx_mask = TX_DONE_INT0;
+ ei_local->rx_mask = RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3;
+ }
+
+ /* Enable PDMA interrupts */
+ if (ei_local->features & FE_DLY_INT) {
+ sys_reg_write(RAETH_DLY_INT_CFG, DELAY_INT_INIT);
+ if (ei_local->features & FE_RSS_4RING) {
+ sys_reg_write(LRO_RX1_DLY_INT, DELAY_INT_INIT);
+ sys_reg_write(LRO_RX2_DLY_INT, DELAY_INT_INIT);
+ sys_reg_write(LRO_RX3_DLY_INT, DELAY_INT_INIT);
+ sys_reg_write(RAETH_FE_INT_ENABLE, RSS_INT_DLY_INT);
+ } else if (ei_local->features & FE_RSS_2RING) {
+ sys_reg_write(LRO_RX1_DLY_INT, DELAY_INT_INIT);
+ sys_reg_write(RAETH_FE_INT_ENABLE, RSS_INT_DLY_INT_2RING);
+ } else {
+ sys_reg_write(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT);
+ }
+ } else {
+ sys_reg_write(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL);
+ }
+
+ /* Enable QDMA interrupts */
+ if (ei_local->features & FE_QDMA) {
+ if (ei_local->features & FE_DLY_INT) {
+ sys_reg_write(QDMA_DELAY_INT, DELAY_INT_INIT);
+ sys_reg_write(QFE_INT_ENABLE, QFE_INT_DLY_INIT);
+ } else {
+ sys_reg_write(QFE_INT_ENABLE, QFE_INT_ALL);
+ }
+ }
+
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+ if (ei_local->architecture & GE1_SGMII_AN)
+ sys_reg_write(FE_INT_ENABLE2, MAC1_LINK);
+ else if (ei_local->architecture & GE2_SGMII_AN)
+ sys_reg_write(FE_INT_ENABLE2, MAC2_LINK);
+ }
+
+ /* IRQ separation settings */
+ if (ei_local->features & FE_IRQ_SEPARATE) {
+ if (ei_local->features & FE_DLY_INT) {
+ /* PDMA setting */
+ sys_reg_write(PDMA_INT_GRP1, TX_DLY_INT);
+
+ if (ei_local->features & FE_RSS_4RING) {
+ /* Enable multipe rx ring delay interrupt */
+ reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0);
+ reg_val |= PDMA_LRO_DLY_INT_EN;
+ sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val);
+ sys_reg_write(PDMA_INT_GRP2, (RING0_RX_DLY_INT | RING1_RX_DLY_INT));
+ sys_reg_write(PDMA_INT_GRP3, (RING2_RX_DLY_INT | RING3_RX_DLY_INT));
+
+ } else if (ei_local->features & FE_RSS_2RING) {
+ reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0);
+ reg_val |= PDMA_LRO_DLY_INT_EN;
+ sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val);
+ sys_reg_write(PDMA_INT_GRP2, RING0_RX_DLY_INT);
+ sys_reg_write(PDMA_INT_GRP3, RING1_RX_DLY_INT);
+ } else {
+ sys_reg_write(PDMA_INT_GRP2, RX_DLY_INT);
+ }
+ /* QDMA setting */
+ sys_reg_write(QDMA_INT_GRP1, RLS_DLY_INT);
+ sys_reg_write(QDMA_INT_GRP2, RX_DLY_INT);
+ } else {
+ /* PDMA setting */
+ sys_reg_write(PDMA_INT_GRP1, TX_DONE_INT0);
+
+ /* QDMA setting */
+ sys_reg_write(QDMA_INT_GRP1, RLS_DONE_INT);
+ sys_reg_write(QDMA_INT_GRP2, RX_DONE_INT0 | RX_DONE_INT1);
+
+ if (ei_local->features & FE_RSS_4RING) {
+ sys_reg_write(PDMA_INT_GRP2, (RX_DONE_INT0 | RX_DONE_INT1));
+ sys_reg_write(PDMA_INT_GRP3, (RX_DONE_INT2 | RX_DONE_INT3));
+ } else if (ei_local->features & FE_RSS_2RING) {
+ sys_reg_write(PDMA_INT_GRP2, RX_DONE_INT0);
+ sys_reg_write(PDMA_INT_GRP3, RX_DONE_INT1);
+ } else {
+ sys_reg_write(PDMA_INT_GRP2, RX_DONE_INT0 | RX_DONE_INT1 |
+ RX_DONE_INT2 | RX_DONE_INT3);
+ }
+ }
+ /*leopard fe_int[0~3][223,224,225,219]*/
+ if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING))
+ sys_reg_write(FE_INT_GRP, 0x21021030);
+ else
+ sys_reg_write(FE_INT_GRP, 0x21021000);
+ }
+
+ if (ei_local->features & FE_INT_TASKLET) {
+ tasklet_init(&ei_local->rx_tasklet, ei_receive_tasklet, 0);
+ } else if (ei_local->features & FE_INT_WORKQ) {
+ INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
+ } else {
+ if (ei_local->features & FE_INT_NAPI_TX_RX) {
+ napi_enable(&ei_local->napi_tx);
+ napi_enable(&ei_local->napi_rx);
+ } else if (ei_local->features & FE_INT_NAPI_RX_ONLY) {
+ if (ei_local->features & FE_RSS_4RING) {
+ napi_enable(&ei_local->napi_rx_rss0);
+ napi_enable(&ei_local->napi_rx_rss1);
+ napi_enable(&ei_local->napi_rx_rss2);
+ napi_enable(&ei_local->napi_rx_rss3);
+ } else if (ei_local->features & FE_RSS_2RING) {
+ napi_enable(&ei_local->napi_rx_rss0);
+ napi_enable(&ei_local->napi_rx_rss1);
+ } else {
+ napi_enable(&ei_local->napi_rx);
+ }
+ } else {
+ napi_enable(&ei_local->napi);
+ }
+ }
+
+ spin_unlock_irqrestore(&ei_local->irq_lock, flags);
+
+ return 0;
+}
+
+static int fe_int_disable(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ /*always request irq0*/
+ free_irq(ei_local->irq0, dev);
+
+ if (ei_local->features & FE_IRQ_SEPARATE) {
+ free_irq(ei_local->irq1, dev);
+ free_irq(ei_local->irq2, dev);
+ }
+
+ if (ei_local->architecture & RAETH_ESW)
+ free_irq(ei_local->esw_irq, dev);
+
+ if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING))
+ free_irq(ei_local->irq3, dev);
+
+ cancel_work_sync(&ei_local->reset_task);
+
+ if (ei_local->features & FE_INT_WORKQ)
+ cancel_work_sync(&ei_local->rx_wq);
+ else if (ei_local->features & FE_INT_TASKLET)
+ tasklet_kill(&ei_local->rx_tasklet);
+
+ if (ei_local->features & FE_INT_NAPI) {
+ if (ei_local->features & FE_INT_NAPI_TX_RX) {
+ napi_disable(&ei_local->napi_tx);
+ napi_disable(&ei_local->napi_rx);
+ } else if (ei_local->features & FE_INT_NAPI_RX_ONLY) {
+ if (ei_local->features & FE_RSS_4RING) {
+ napi_disable(&ei_local->napi_rx_rss0);
+ napi_disable(&ei_local->napi_rx_rss1);
+ napi_disable(&ei_local->napi_rx_rss2);
+ napi_disable(&ei_local->napi_rx_rss3);
+ } else if (ei_local->features & FE_RSS_2RING) {
+ napi_disable(&ei_local->napi_rx_rss0);
+ napi_disable(&ei_local->napi_rx_rss1);
+ } else {
+ napi_disable(&ei_local->napi_rx);
+ }
+ } else {
+ napi_disable(&ei_local->napi);
+ }
+ }
+
+ return 0;
+}
+
+int forward_config(struct net_device *dev)
+{
+ unsigned int reg_val, reg_csg;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned int reg_val2 = 0;
+
+ if (ei_local->features & FE_HW_VLAN_TX) {
+ /*VLAN_IDX 0 = VLAN_ID 0
+ * .........
+ * VLAN_IDX 15 = VLAN ID 15
+ *
+ */
+ /* frame engine will push VLAN tag
+ * regarding to VIDX feild in Tx desc.
+ */
+ sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xa8, 0x00010000);
+ sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xac, 0x00030002);
+ sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xb0, 0x00050004);
+ sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xb4, 0x00070006);
+ sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xb8, 0x00090008);
+ sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xbc, 0x000b000a);
+ sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xc0, 0x000d000c);
+ sys_reg_write(RALINK_FRAME_ENGINE_BASE + 0xc4, 0x000f000e);
+ }
+
+ reg_val = sys_reg_read(GDMA1_FWD_CFG);
+ reg_csg = sys_reg_read(CDMA_CSG_CFG);
+
+ if (ei_local->features & FE_GE2_SUPPORT)
+ reg_val2 = sys_reg_read(GDMA2_FWD_CFG);
+
+ /* set unicast/multicast/broadcast frame to cpu */
+ reg_val &= ~0xFFFF;
+ reg_val |= GDMA1_FWD_PORT;
+ reg_csg &= ~0x7;
+
+ if (ei_local->features & FE_HW_VLAN_TX)
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+
+ if (ei_local->features & FE_HW_VLAN_RX) {
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ /* enable HW VLAN RX */
+ sys_reg_write(CDMP_EG_CTRL, 1);
+ }
+ if (ei_local->features & FE_CSUM_OFFLOAD) {
+ /* enable ipv4 header checksum check */
+ reg_val |= GDM1_ICS_EN;
+ reg_csg |= ICS_GEN_EN;
+
+ /* enable tcp checksum check */
+ reg_val |= GDM1_TCS_EN;
+ reg_csg |= TCS_GEN_EN;
+
+ /* enable udp checksum check */
+ reg_val |= GDM1_UCS_EN;
+ reg_csg |= UCS_GEN_EN;
+
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ reg_val2 &= ~0xFFFF;
+ reg_val2 |= GDMA2_FWD_PORT;
+ reg_val2 |= GDM1_ICS_EN;
+ reg_val2 |= GDM1_TCS_EN;
+ reg_val2 |= GDM1_UCS_EN;
+ }
+
+ if (ei_local->features & FE_HW_LRO)
+ dev->features |= NETIF_F_HW_CSUM;
+ else
+ /* Can checksum TCP/UDP over IPv4 */
+ dev->features |= NETIF_F_IP_CSUM;
+
+ if (ei_local->features & FE_TSO) {
+ dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_TSO;
+ }
+
+ if (ei_local->features & FE_TSO_V6) {
+ dev->features |= NETIF_F_TSO6;
+ /* Can checksum TCP/UDP over IPv6 */
+ dev->features |= NETIF_F_IPV6_CSUM;
+ }
+ } else { /* Checksum offload disabled */
+ /* disable ipv4 header checksum check */
+ reg_val &= ~GDM1_ICS_EN;
+ reg_csg &= ~ICS_GEN_EN;
+
+ /* disable tcp checksum check */
+ reg_val &= ~GDM1_TCS_EN;
+ reg_csg &= ~TCS_GEN_EN;
+
+ /* disable udp checksum check */
+ reg_val &= ~GDM1_UCS_EN;
+ reg_csg &= ~UCS_GEN_EN;
+
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ reg_val2 &= ~GDM1_ICS_EN;
+ reg_val2 &= ~GDM1_TCS_EN;
+ reg_val2 &= ~GDM1_UCS_EN;
+ }
+
+ /* disable checksum TCP/UDP over IPv4 */
+ dev->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ sys_reg_write(GDMA1_FWD_CFG, reg_val);
+ sys_reg_write(CDMA_CSG_CFG, reg_csg);
+ if (ei_local->features & FE_GE2_SUPPORT)
+ sys_reg_write(GDMA2_FWD_CFG, reg_val2);
+
+ dev->vlan_features = dev->features;
+
+ /*FE_RST_GLO register definition -
+ *Bit 0: PSE Rest
+ *Reset PSE after re-programming PSE_FQ_CFG.
+ */
+ reg_val = 0x1;
+ sys_reg_write(FE_RST_GL, reg_val);
+ sys_reg_write(FE_RST_GL, 0); /* update for RSTCTL issue */
+
+ reg_csg = sys_reg_read(CDMA_CSG_CFG);
+ reg_val = sys_reg_read(GDMA1_FWD_CFG);
+
+ if (ei_local->features & FE_GE2_SUPPORT)
+ reg_val = sys_reg_read(GDMA2_FWD_CFG);
+
+ return 1;
+}
+
+void virtif_setup_statistics(struct PSEUDO_ADAPTER *p_ad)
+{
+ p_ad->stat.tx_packets = 0;
+ p_ad->stat.tx_bytes = 0;
+ p_ad->stat.tx_dropped = 0;
+ p_ad->stat.tx_errors = 0;
+ p_ad->stat.tx_aborted_errors = 0;
+ p_ad->stat.tx_carrier_errors = 0;
+ p_ad->stat.tx_fifo_errors = 0;
+ p_ad->stat.tx_heartbeat_errors = 0;
+ p_ad->stat.tx_window_errors = 0;
+
+ p_ad->stat.rx_packets = 0;
+ p_ad->stat.rx_bytes = 0;
+ p_ad->stat.rx_dropped = 0;
+ p_ad->stat.rx_errors = 0;
+ p_ad->stat.rx_length_errors = 0;
+ p_ad->stat.rx_over_errors = 0;
+ p_ad->stat.rx_crc_errors = 0;
+ p_ad->stat.rx_frame_errors = 0;
+ p_ad->stat.rx_fifo_errors = 0;
+ p_ad->stat.rx_missed_errors = 0;
+
+ p_ad->stat.collisions = 0;
+}
+
+int virtualif_open(struct net_device *dev)
+{
+ struct PSEUDO_ADAPTER *p_pseudo_ad = netdev_priv(dev);
+ struct END_DEVICE *ei_local = netdev_priv(p_pseudo_ad->raeth_dev);
+
+
+ virtif_setup_statistics(p_pseudo_ad);
+
+ if (ei_local->features & FE_HW_VLAN_TX)
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+
+ if (ei_local->features & FE_HW_VLAN_RX)
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+ netif_start_queue(p_pseudo_ad->pseudo_dev);
+
+ return 0;
+}
+
+int virtualif_close(struct net_device *dev)
+{
+ struct PSEUDO_ADAPTER *p_pseudo_ad = netdev_priv(dev);
+
+ pr_info("%s: ===> virtualif_close\n", dev->name);
+
+ netif_stop_queue(p_pseudo_ad->pseudo_dev);
+
+ return 0;
+}
+
+int virtualif_send_packets(struct sk_buff *p_skb, struct net_device *dev)
+{
+ struct PSEUDO_ADAPTER *p_pseudo_ad = netdev_priv(dev);
+ struct END_DEVICE *ei_local;
+
+ if (!(p_pseudo_ad->raeth_dev->flags & IFF_UP)) {
+ dev_kfree_skb_any(p_skb);
+ return 0;
+ }
+ /* p_skb->cb[40]=0x5a; */
+ p_skb->dev = p_pseudo_ad->pseudo_dev;
+ ei_local = netdev_priv(p_pseudo_ad->raeth_dev);
+ ei_local->ei_start_xmit(p_skb, p_pseudo_ad->raeth_dev, 2);
+ return 0;
+}
+
+struct net_device_stats *virtualif_get_stats(struct net_device *dev)
+{
+ struct PSEUDO_ADAPTER *p_ad = netdev_priv(dev);
+
+ return &p_ad->stat;
+}
+
+int virtualif_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+ struct ra_mii_ioctl_data mii;
+ unsigned long ret;
+
+ switch (cmd) {
+ case RAETH_MII_READ:
+ ret = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+ mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
+ ret = copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
+ break;
+
+ case RAETH_MII_WRITE:
+ ret = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+ mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ei_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ if (!ei_local) {
+ pr_emerg
+ ("%s: %s passed a non-existent private pointer from net_dev!\n",
+ dev->name, __func__);
+ return -ENXIO;
+ }
+
+ if ((new_mtu > 4096) || (new_mtu < 64))
+ return -EINVAL;
+
+ if (new_mtu > 1500)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+#if 0
+static int ei_clock_enable(struct END_DEVICE *ei_local)
+{
+ unsigned long rate;
+ int ret;
+ void __iomem *clk_virt_base;
+ unsigned int reg_value;
+
+ pm_runtime_enable(ei_local->dev);
+ pm_runtime_get_sync(ei_local->dev);
+
+ clk_prepare_enable(ei_local->clks[MTK_CLK_ETH1PLL]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_ETH2PLL]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_ETHIF]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_ESW]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_GP1]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_GP2]);
+ /*enable frame engine clock*/
+ if (ei_local->chip_name == LEOPARD_FE)
+ clk_prepare_enable(ei_local->clks[MTK_CLK_FE]);
+
+ if (ei_local->architecture & RAETH_ESW)
+ clk_prepare_enable(ei_local->clks[MTK_CLK_GP0]);
+
+ if (ei_local->architecture &
+ (GE1_TRGMII_FORCE_2000 | GE1_TRGMII_FORCE_2600)) {
+ ret = clk_set_rate(ei_local->clks[MTK_CLK_TRGPLL], 500000000);
+ if (ret)
+ pr_err("Failed to set mt7530 trgmii pll: %d\n", ret);
+ rate = clk_get_rate(ei_local->clks[MTK_CLK_TRGPLL]);
+ pr_info("TRGMII_PLL rate = %ld\n", rate);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_TRGPLL]);
+ }
+
+ if (ei_local->architecture & RAETH_SGMII) {
+ if (ei_local->chip_name == LEOPARD_FE)
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_TOP]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMIPLL]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_TX250M]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_RX250M]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_CDR_REF]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII_CDR_FB]);
+ }
+
+ if (ei_local->architecture & GE2_RAETH_SGMII) {
+ clk_virt_base = ioremap(0x102100C0, 0x10);
+ reg_value = sys_reg_read(clk_virt_base);
+ reg_value = reg_value & (~0x8000); /*[bit15] = 0 */
+ /*pdn_sgmii_re_1 1: Enable clock off */
+ sys_reg_write(clk_virt_base, reg_value);
+ iounmap(clk_virt_base);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMIPLL]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII1_TX250M]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII1_RX250M]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII1_CDR_REF]);
+ clk_prepare_enable(ei_local->clks[MTK_CLK_SGMII1_CDR_FB]);
+ }
+
+ return 0;
+}
+#endif
+static int ei_clock_disable(struct END_DEVICE *ei_local)
+{
+ if (ei_local->chip_name == LEOPARD_FE)
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_FE]);
+ if (ei_local->architecture & RAETH_ESW)
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_GP0]);
+
+ if (ei_local->architecture &
+ (GE1_TRGMII_FORCE_2000 | GE1_TRGMII_FORCE_2600))
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_TRGPLL]);
+
+ if (ei_local->architecture & RAETH_SGMII) {
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMII_TX250M]);
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMII_RX250M]);
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMII_CDR_REF]);
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMII_CDR_FB]);
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_SGMIPLL]);
+ }
+
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_GP2]);
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_GP1]);
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_ESW]);
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_ETHIF]);
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_ETH2PLL]);
+ clk_disable_unprepare(ei_local->clks[MTK_CLK_ETH1PLL]);
+
+ pm_runtime_put_sync(ei_local->dev);
+ pm_runtime_disable(ei_local->dev);
+
+ return 0;
+}
+
+static struct ethtool_ops ra_ethtool_ops = {
+ .get_link = et_get_link,
+};
+
+static struct ethtool_ops ra_virt_ethtool_ops = {
+ .get_link = et_virt_get_link,
+};
+
+static const struct net_device_ops virtualif_netdev_ops = {
+ .ndo_open = virtualif_open,
+ .ndo_stop = virtualif_close,
+ .ndo_start_xmit = virtualif_send_packets,
+ .ndo_get_stats = virtualif_get_stats,
+ .ndo_set_mac_address = ei_set_mac2_addr,
+ .ndo_change_mtu = ei_change_mtu,
+ .ndo_do_ioctl = virtualif_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+void raeth_init_pseudo(struct END_DEVICE *p_ad, struct net_device *net_dev)
+{
+ int index;
+ struct net_device *dev;
+ struct PSEUDO_ADAPTER *p_pseudo_ad;
+ struct END_DEVICE *ei_local = netdev_priv(net_dev);
+
+ for (index = 0; index < MAX_PSEUDO_ENTRY; index++) {
+ dev = alloc_etherdev_mqs(sizeof(struct PSEUDO_ADAPTER),
+ gmac2_txq_num, 1);
+ if (!dev) {
+ pr_err("alloc_etherdev for PSEUDO_ADAPTER failed.\n");
+ return;
+ }
+ strncpy(dev->name, DEV2_NAME, sizeof(dev->name) - 1);
+ netif_set_real_num_tx_queues(dev, gmac2_txq_num);
+ netif_set_real_num_rx_queues(dev, 1);
+
+ ei_mac2_addr_setting(dev);
+ /*set my mac*/
+ set_mac2_address(dev->dev_addr);
+ ether_setup(dev);
+ p_pseudo_ad = netdev_priv(dev);
+
+ p_pseudo_ad->pseudo_dev = dev;
+ p_pseudo_ad->raeth_dev = net_dev;
+ p_ad->pseudo_dev = dev;
+
+ dev->netdev_ops = &virtualif_netdev_ops;
+
+ if (ei_local->features & FE_HW_LRO)
+ dev->features |= NETIF_F_HW_CSUM;
+ else
+ /* Can checksum TCP/UDP over IPv4 */
+ dev->features |= NETIF_F_IP_CSUM;
+
+ if (ei_local->features & FE_TSO) {
+ dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_TSO;
+ }
+
+ if (ei_local->features & FE_TSO_V6) {
+ dev->features |= NETIF_F_TSO6;
+ /* Can checksum TCP/UDP over IPv6 */
+ dev->features |= NETIF_F_IPV6_CSUM;
+ }
+
+ dev->vlan_features = dev->features;
+
+ if (ei_local->features & FE_ETHTOOL) {
+ dev->ethtool_ops = &ra_virt_ethtool_ops;
+ ethtool_virt_init(dev);
+ }
+
+ /* Register this device */
+ register_netdev(dev);
+ }
+}
+
+void ei_set_pse_threshold(void)
+{
+
+ sys_reg_write(PSE_IQ_REV1, 0x001a000e);
+ sys_reg_write(PSE_IQ_REV2, 0x01ff001a);
+ sys_reg_write(PSE_IQ_REV3, 0x000e01ff);
+ sys_reg_write(PSE_IQ_REV4, 0x000e000e);
+ sys_reg_write(PSE_IQ_REV5, 0x000e000e);
+ sys_reg_write(PSE_IQ_REV6, 0x000e000e);
+ sys_reg_write(PSE_IQ_REV7, 0x000e000e);
+ sys_reg_write(PSE_IQ_REV8, 0x000e000e);
+ sys_reg_write(PSE_OQ_TH1, 0x000f000a);
+ sys_reg_write(PSE_OQ_TH2, 0x001a000f);
+ sys_reg_write(PSE_OQ_TH3, 0x000f001a);
+ sys_reg_write(PSE_OQ_TH4, 0x01ff000f);
+ sys_reg_write(PSE_OQ_TH5, 0x000f000f);
+ sys_reg_write(PSE_OQ_TH6, 0x0006000f);
+ sys_reg_write(PSE_OQ_TH7, 0x00060006);
+ sys_reg_write(PSE_OQ_TH8, 0x00060006);
+}
+
+int ei_open(struct net_device *dev)
+{
+ int err;
+ struct END_DEVICE *ei_local;
+
+
+ ei_local = netdev_priv(dev);
+
+ if (!ei_local) {
+ pr_err("%s: ei_open passed a non-existent device!\n",
+ dev->name);
+ return -ENXIO;
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ pr_err("%s: Cannot reserve module\n", __func__);
+ return -1;
+ }
+
+ pr_info("Raeth %s (", RAETH_VERSION);
+ if (ei_local->features & FE_INT_NAPI)
+ pr_info("NAPI\n");
+ else if (ei_local->features & FE_INT_TASKLET)
+ pr_info("Tasklet");
+ else if (ei_local->features & FE_INT_WORKQ)
+ pr_info("Workqueue");
+ pr_info(")\n");
+
+ ei_reset_statistics(ei_local);
+
+ ei_set_pse_threshold();
+
+ err = ei_init_dma(dev);
+ if (err)
+ return err;
+
+ if (ei_local->chip_name != MT7621_FE) {
+ fe_gmac_reset();
+ fe_sw_init();
+ }
+
+ /* initialize fe and switch register */
+ if (ei_local->chip_name != LEOPARD_FE)
+ fe_sw_preinit(ei_local);
+
+
+ forward_config(dev);
+
+ if ((ei_local->chip_name == MT7623_FE) &&
+ (ei_local->features & FE_HW_LRO)) {
+ ei_local->kreset_task =
+ kthread_create(fe_reset_thread, NULL, "FE_reset_kthread");
+ if (IS_ERR(ei_local->kreset_task))
+ return PTR_ERR(ei_local->kreset_task);
+ wake_up_process(ei_local->kreset_task);
+ }
+
+ netif_start_queue(dev);
+
+ fe_int_enable(dev);
+
+ /*set hw my mac address*/
+ set_mac_address(dev->dev_addr);
+ if (ei_local->chip_name == LEOPARD_FE) {
+ /*phy led enable*/
+ mii_mgr_write_cl45(0, 0x1f, 0x21, 0x8008);
+ mii_mgr_write_cl45(0, 0x1f, 0x24, 0x8007);
+ mii_mgr_write_cl45(0, 0x1f, 0x25, 0x3f);
+ if ((ei_local->architecture & GE2_RGMII_AN)) {
+ mii_mgr_write(0, 9, 0x200);
+ mii_mgr_write(0, 0, 0x1340);
+ if (mac_to_gigaphy_mode_addr2 == 0) {
+ ei_local->kphy_poll_task =
+ kthread_create(phy_polling_thread, NULL, "phy_polling_kthread");
+ if (IS_ERR(ei_local->kphy_poll_task))
+ return PTR_ERR(ei_local->kphy_poll_task);
+ wake_up_process(ei_local->kphy_poll_task);
+ }
+ } else if (ei_local->architecture & LEOPARD_EPHY_GMII) {
+ mii_mgr_write(0, 9, 0x200);
+ mii_mgr_write(0, 0, 0x1340);
+ }
+ }
+ return 0;
+}
+
+int ei_close(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ fe_reset();
+
+ if ((ei_local->chip_name == MT7623_FE) &&
+ (ei_local->features & FE_HW_LRO))
+ kthread_stop(ei_local->kreset_task);
+
+ if (ei_local->chip_name == LEOPARD_FE) {
+ if (ei_local->architecture & GE2_RGMII_AN)
+ kthread_stop(ei_local->kphy_poll_task);
+ }
+
+ netif_stop_queue(dev);
+ ra2880stop(ei_local);
+
+ fe_int_disable(dev);
+
+ if (ei_local->features & FE_GE2_SUPPORT)
+ virtualif_close(ei_local->pseudo_dev);
+
+ ei_deinit_dma(dev);
+
+ if (ei_local->chip_name != LEOPARD_FE)
+ fe_sw_deinit(ei_local);
+
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static int ei_start_xmit_fake(struct sk_buff *skb, struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ return ei_local->ei_start_xmit(skb, dev, 1);
+}
+
+struct net_device_stats *ra_get_stats(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ return &ei_local->stat;
+}
+
+void dump_phy_reg(int port_no, int from, int to, int is_local, int page_no)
+{
+ u32 i = 0;
+ u32 temp = 0;
+ u32 r31 = 0;
+
+ if (is_local == 0) {
+ pr_info("\n\nGlobal Register Page %d\n", page_no);
+ pr_info("===============");
+ r31 |= 0 << 15; /* global */
+ r31 |= ((page_no & 0x7) << 12); /* page no */
+ mii_mgr_write(port_no, 31, r31); /* select global page x */
+ for (i = 16; i < 32; i++) {
+ if (i % 8 == 0)
+ pr_info("\n");
+ mii_mgr_read(port_no, i, &temp);
+ pr_info("%02d: %04X ", i, temp);
+ }
+ } else {
+ pr_info("\n\nLocal Register Port %d Page %d\n", port_no,
+ page_no);
+ pr_info("===============");
+ r31 |= 1 << 15; /* local */
+ r31 |= ((page_no & 0x7) << 12); /* page no */
+ mii_mgr_write(port_no, 31, r31); /* select local page x */
+ for (i = 16; i < 32; i++) {
+ if (i % 8 == 0)
+ pr_info("\n");
+ mii_mgr_read(port_no, i, &temp);
+ pr_info("%02d: %04X ", i, temp);
+ }
+ }
+ pr_info("\n");
+}
+
+int ei_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct esw_reg reg;
+ struct esw_rate ratelimit;
+ struct qdma_ioctl_data qdma_data;
+ struct ephy_ioctl_data ephy_data;
+
+ unsigned int offset = 0;
+ unsigned int value = 0;
+ int ret = 0;
+ unsigned long result;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ struct ra_mii_ioctl_data mii;
+ char ip_tmp[IP4_ADDR_LEN];
+
+ spin_lock_irq(&ei_local->page_lock);
+
+ switch (cmd) {
+ case RAETH_MII_READ:
+ result = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+ mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
+ result = copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
+ break;
+
+ case RAETH_MII_WRITE:
+ result = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+ mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
+ break;
+ case RAETH_MII_READ_CL45:
+ result = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+ mii_mgr_read_cl45(mii.port_num, mii.dev_addr, mii.reg_addr,
+ &mii.val_out);
+ result = copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
+ break;
+ case RAETH_MII_WRITE_CL45:
+ result = copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
+ mii_mgr_write_cl45(mii.port_num, mii.dev_addr, mii.reg_addr,
+ mii.val_in);
+ break;
+ case RAETH_ESW_REG_READ:
+ result = copy_from_user(®, ifr->ifr_data, sizeof(reg));
+ if (reg.off > REG_ESW_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+ reg.val = sys_reg_read(RALINK_ETH_SW_BASE + reg.off);
+ result = copy_to_user(ifr->ifr_data, ®, sizeof(reg));
+ break;
+ case RAETH_ESW_REG_WRITE:
+ result = copy_from_user(®, ifr->ifr_data, sizeof(reg));
+ if (reg.off > REG_ESW_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+ sys_reg_write(RALINK_ETH_SW_BASE + reg.off, reg.val);
+ break;
+ case RAETH_ESW_PHY_DUMP:
+ result = copy_from_user(®, ifr->ifr_data, sizeof(reg));
+ /* SPEC defined Register 0~15
+ * Global Register 16~31 for each page
+ * Local Register 16~31 for each page
+ */
+ pr_info("SPEC defined Register");
+ if (reg.val == 32) { /* dump all phy register */
+ int i = 0;
+
+ for (i = 0; i < 5; i++) {
+ pr_info("\n[Port %d]===============", i);
+ for (offset = 0; offset < 16; offset++) {
+ if (offset % 8 == 0)
+ pr_info("\n");
+ mii_mgr_read(i, offset, &value);
+ pr_info("%02d: %04X ", offset, value);
+ }
+ }
+ } else {
+ pr_info("\n[Port %d]===============", reg.val);
+ for (offset = 0; offset < 16; offset++) {
+ if (offset % 8 == 0)
+ pr_info("\n");
+ mii_mgr_read(reg.val, offset, &value);
+ pr_info("%02d: %04X ", offset, value);
+ }
+ }
+
+ /* global register page 0~4 */
+ for (offset = 0; offset < 5; offset++) {
+ if (reg.val == 32) /* dump all phy register */
+ dump_phy_reg(0, 16, 31, 0, offset);
+ else
+ dump_phy_reg(reg.val, 16, 31, 0, offset);
+ }
+
+ if (reg.val == 32) { /* dump all phy register */
+ /* local register port 0-port4 */
+ for (offset = 0; offset < 5; offset++) {
+ /* dump local page 0 */
+ dump_phy_reg(offset, 16, 31, 1, 0);
+ /* dump local page 1 */
+ dump_phy_reg(offset, 16, 31, 1, 1);
+ /* dump local page 2 */
+ dump_phy_reg(offset, 16, 31, 1, 2);
+ /* dump local page 3 */
+ dump_phy_reg(offset, 16, 31, 1, 3);
+ }
+ } else {
+ /* dump local page 0 */
+ dump_phy_reg(reg.val, 16, 31, 1, 0);
+ /* dump local page 1 */
+ dump_phy_reg(reg.val, 16, 31, 1, 1);
+ /* dump local page 2 */
+ dump_phy_reg(reg.val, 16, 31, 1, 2);
+ /* dump local page 3 */
+ dump_phy_reg(reg.val, 16, 31, 1, 3);
+ }
+ break;
+
+ case RAETH_ESW_INGRESS_RATE:
+ result = copy_from_user(&ratelimit, ifr->ifr_data,
+ sizeof(ratelimit));
+ offset = 0x1800 + (0x100 * ratelimit.port);
+ value = sys_reg_read(RALINK_ETH_SW_BASE + offset);
+
+ value &= 0xffff0000;
+ if (ratelimit.on_off == 1) {
+ value |= (ratelimit.on_off << 15);
+ if (ratelimit.bw < 100) {
+ value |= (0x0 << 8);
+ value |= ratelimit.bw;
+ } else if (ratelimit.bw < 1000) {
+ value |= (0x1 << 8);
+ value |= ratelimit.bw / 10;
+ } else if (ratelimit.bw < 10000) {
+ value |= (0x2 << 8);
+ value |= ratelimit.bw / 100;
+ } else if (ratelimit.bw < 100000) {
+ value |= (0x3 << 8);
+ value |= ratelimit.bw / 1000;
+ } else {
+ value |= (0x4 << 8);
+ value |= ratelimit.bw / 10000;
+ }
+ }
+ pr_info("offset = 0x%4x value=0x%x\n\r", offset, value);
+ mii_mgr_write(0x1f, offset, value);
+ break;
+
+ case RAETH_ESW_EGRESS_RATE:
+ result = copy_from_user(&ratelimit, ifr->ifr_data,
+ sizeof(ratelimit));
+ offset = 0x1040 + (0x100 * ratelimit.port);
+ value = sys_reg_read(RALINK_ETH_SW_BASE + offset);
+
+ value &= 0xffff0000;
+ if (ratelimit.on_off == 1) {
+ value |= (ratelimit.on_off << 15);
+ if (ratelimit.bw < 100) {
+ value |= (0x0 << 8);
+ value |= ratelimit.bw;
+ } else if (ratelimit.bw < 1000) {
+ value |= (0x1 << 8);
+ value |= ratelimit.bw / 10;
+ } else if (ratelimit.bw < 10000) {
+ value |= (0x2 << 8);
+ value |= ratelimit.bw / 100;
+ } else if (ratelimit.bw < 100000) {
+ value |= (0x3 << 8);
+ value |= ratelimit.bw / 1000;
+ } else {
+ value |= (0x4 << 8);
+ value |= ratelimit.bw / 10000;
+ }
+ }
+ pr_info("offset = 0x%4x value=0x%x\n\r", offset, value);
+ mii_mgr_write(0x1f, offset, value);
+ break;
+
+ case RAETH_SET_LAN_IP:
+ result = copy_from_user(ip_tmp, ifr->ifr_data, IP4_ADDR_LEN);
+ strncpy(ei_local->lan_ip4_addr, ip_tmp, IP4_ADDR_LEN);
+ pr_info("RAETH_SET_LAN_IP: %s\n", ei_local->lan_ip4_addr);
+
+
+ if (ei_local->features & FE_HW_LRO)
+ fe_set_hw_lro_my_ip(ei_local->lan_ip4_addr);
+ break;
+
+ case RAETH_QDMA_IOCTL:
+
+ ret =
+ copy_from_user(&qdma_data, ifr->ifr_data,
+ sizeof(qdma_data));
+ ei_qdma_ioctl(dev, ifr, &qdma_data);
+
+ break;
+
+ case RAETH_EPHY_IOCTL:
+
+ ret =
+ copy_from_user(&ephy_data, ifr->ifr_data,
+ sizeof(ephy_data));
+ ephy_ioctl(dev, ifr, &ephy_data);
+
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ spin_unlock_irq(&ei_local->page_lock);
+ return ret;
+}
+
+static const struct net_device_ops ei_netdev_ops = {
+ .ndo_init = ei_init,
+ .ndo_uninit = ei_uninit,
+ .ndo_open = ei_open,
+ .ndo_stop = ei_close,
+ .ndo_start_xmit = ei_start_xmit_fake,
+ .ndo_get_stats = ra_get_stats,
+ .ndo_set_mac_address = ei_set_mac_addr,
+ .ndo_change_mtu = ei_change_mtu,
+ .ndo_do_ioctl = ei_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = raeth_poll_full,
+#endif
+};
+
+void raeth_setup_dev_fptable(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ dev->netdev_ops = &ei_netdev_ops;
+
+ if (ei_local->features & FE_ETHTOOL)
+ dev->ethtool_ops = &ra_ethtool_ops;
+
+#define TX_TIMEOUT (5 * HZ)
+ dev->watchdog_timeo = TX_TIMEOUT;
+}
+
+void ei_ioc_setting(struct platform_device *pdev, struct END_DEVICE *ei_local)
+{
+ void __iomem *reg_virt;
+ /* unsigned int reg_val; */
+
+ if (ei_local->features & FE_HW_IOCOHERENT) {
+ pr_info("[Raether] HW IO coherent is enabled !\n");
+ /* enable S4 coherence function */
+ reg_virt = ioremap(0x10395000, 0x10);
+ sys_reg_write(reg_virt, 0x00000003);
+
+ /* Enable ETHSYS io coherence path */
+ /*reg_virt = ioremap(HW_IOC_BASE, 0x10);*/
+ /*reg_virt += IOC_OFFSET;*/
+ /*reg_val = sys_reg_read(reg_virt);*/
+
+ /*if (ei_local->features & FE_QDMA_FQOS)*/
+ /* reg_val |= IOC_ETH_PDMA;*/
+ /*else*/
+ /* reg_val |= IOC_ETH_PDMA | IOC_ETH_QDMA;*/
+
+ /*sys_reg_write(reg_virt, reg_val);*/
+ /*reg_virt -= IOC_OFFSET;*/
+ iounmap(reg_virt);
+
+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, TRUE);
+
+ if (ei_local->features & FE_QDMA_FQOS)
+ arch_setup_dma_ops(&ei_local->qdma_pdev->dev,
+ 0, 0, NULL, FALSE);
+ else
+ arch_setup_dma_ops(&ei_local->qdma_pdev->dev,
+ 0, 0, NULL, TRUE);
+ } else {
+ pr_info("[Raether] HW IO coherent is disabled !\n");
+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, FALSE);
+ arch_setup_dma_ops(&ei_local->qdma_pdev->dev,
+ 0, 0, NULL, FALSE);
+ }
+}
+
+void fe_chip_name_config(struct END_DEVICE *ei_local, struct platform_device *pdev)
+{
+ const char *pm;
+ int ret;
+
+ ret = of_property_read_string(pdev->dev.of_node, "compatible", &pm);
+
+ if (!ret && !strcasecmp(pm, "mediatek,mt7621-eth")) {
+ ei_local->chip_name = MT7621_FE;
+ pr_info("CHIP_ID = MT7621\n");
+ } else if (!strcasecmp(pm, "mediatek,mt7622-raeth")) {
+ ei_local->chip_name = MT7622_FE;
+ pr_info("CHIP_ID = MT7622\n");
+ } else if (!strcasecmp(pm, "mediatek,mt7623-eth")) {
+ ei_local->chip_name = MT7623_FE;
+ pr_info("CHIP_ID = MT7623\n");
+ } else if (!strcasecmp(pm, "mediatek,leopard-eth")) {
+ ei_local->chip_name = LEOPARD_FE;
+ pr_info("CHIP_ID = LEOPARD_FE\n");
+ } else if (!strcasecmp(pm, "mediatek,mt7986-eth")) {
+ ei_local->chip_name = MT7986_FE;
+ pr_info("CHIP_ID = MT7986_FE\n");
+ } else {
+ pr_info("CHIP_ID error\n");
+ }
+}
+
+void raeth_set_wol(bool enable)
+{
+ unsigned int reg_value = 0;
+
+ if (enable) {
+ reg_value = sys_reg_read(MAC1_WOL);
+ reg_value |= (WOL_INT_CLR | WOL_INT_EN | WOL_EN);
+ sys_reg_write(MAC1_WOL, reg_value);
+
+ } else {
+ reg_value = sys_reg_read(MAC1_WOL);
+ reg_value &= ~(WOL_INT_EN | WOL_EN);
+ sys_reg_write(MAC1_WOL, reg_value);
+ }
+}
+
+#if (0)
+static int raeth_resume(struct device *dev)
+{
+ raeth_set_wol(false);
+ return 0;
+}
+
+static int raeth_suspend(struct device *dev)
+{
+ raeth_set_wol(true);
+ return 0;
+}
+#endif
+u32 mac_to_gigaphy_mode_addr;
+u32 mac_to_gigaphy_mode_addr2;
+void raeth_arch_setting(struct END_DEVICE *ei_local, struct platform_device *pdev)
+{
+ const char *pm;
+ int ret;
+ u32 val;
+
+ ret = of_property_read_string(pdev->dev.of_node, "wan_at", &pm);
+ if (!ret) {
+ ei_local->architecture |= LAN_WAN_SUPPORT;
+ if (!ret && !strcasecmp(pm, "p4")) {
+ ei_local->architecture |= WAN_AT_P4;
+ pr_info("WAN at P4\n");
+ } else if (!strcasecmp(pm, "p0")) {
+ ei_local->architecture |= WAN_AT_P0;
+ pr_info("WAN at P0\n");
+ }
+ }
+ ret = of_property_read_string(pdev->dev.of_node, "gmac1-support", &pm);
+ if (!ret && !strcasecmp(pm, "sgmii-1")) {
+ ei_local->architecture |= RAETH_SGMII;
+ pr_info("GMAC1 support SGMII\n");
+ ret = of_property_read_string(pdev->dev.of_node, "sgmii-mode-1", &pm);
+ if (!ret && !strcasecmp(pm, "force-2500")) {
+ pr_info("GE1_SGMII_FORCE_2500\n");
+ ei_local->architecture |= GE1_SGMII_FORCE_2500;
+ } else if (!strcasecmp(pm, "an")) {
+ pr_info("GE1_SGMII_AN\n");
+ ei_local->architecture |= GE1_SGMII_AN;
+ of_property_read_u32(pdev->dev.of_node, "gmac1-phy-address", &val);
+ mac_to_gigaphy_mode_addr = val;
+ pr_info("mac_to_gigaphy_mode_addr = 0x%x\n", mac_to_gigaphy_mode_addr);
+ }
+ } else if (!strcasecmp(pm, "rgmii-1")) {
+ pr_info("GMAC1 support rgmii\n");
+ ret = of_property_read_string(pdev->dev.of_node, "rgmii-mode-1", &pm);
+ if (!ret && !strcasecmp(pm, "force-1000")) {
+ pr_info("GE1_RGMII_FORCE_1000\n");
+ ei_local->architecture |= GE1_RGMII_FORCE_1000;
+ } else if (!strcasecmp(pm, "an")) {
+ pr_info("GE1_RGMII_AN\n");
+ of_property_read_u32(pdev->dev.of_node, "gmac1-phy-address", &val);
+ mac_to_gigaphy_mode_addr = val;
+ ei_local->architecture |= GE1_RGMII_AN;
+ pr_info("mac_to_gigaphy_mode_addr = 0x%x\n", mac_to_gigaphy_mode_addr);
+ } else if (!strcasecmp(pm, "one-ephy")) {
+ pr_info("GE1_RGMII_ONE_EPHY\n");
+ ei_local->architecture |= GE1_RGMII_ONE_EPHY;
+ }
+
+ } else if (!strcasecmp(pm, "esw")) {
+ pr_info("Embedded 5-Port Switch\n");
+ ei_local->architecture |= RAETH_ESW;
+ if (ei_local->chip_name == MT7622_FE) {
+ ei_local->architecture |= MT7622_EPHY;
+ } else if (ei_local->chip_name == LEOPARD_FE) {
+ ret = of_property_read_string(pdev->dev.of_node, "gmac0", &pm);
+ if (!ret && !strcasecmp(pm, "gmii"))
+ ei_local->architecture |= LEOPARD_EPHY_GMII;
+ ei_local->architecture |= LEOPARD_EPHY;
+ }
+ } else if (!strcasecmp(pm, "none")) {
+ pr_info("GE1_RGMII_NONE\n");
+ ei_local->architecture |= GE1_RGMII_NONE;
+ } else {
+ pr_info("GE1 dts parsing error\n");
+ }
+
+ ret = of_property_read_string(pdev->dev.of_node, "gmac2-support", &pm);
+ if (!ret) {
+ ei_local->architecture |= GMAC2;
+ ei_local->features |= FE_GE2_SUPPORT;
+ }
+ if (!ret && !strcasecmp(pm, "sgmii-2")) {
+ ei_local->architecture |= GE2_RAETH_SGMII;
+ pr_info("GMAC2 support SGMII\n");
+ ret = of_property_read_string(pdev->dev.of_node, "sgmii-mode-2", &pm);
+ if (!ret && !strcasecmp(pm, "force-2500")) {
+ pr_info("GE2_SGMII_FORCE_2500\n");
+ ei_local->architecture |= GE2_SGMII_FORCE_2500;
+ ret = of_property_read_string(pdev->dev.of_node, "gmac2-force", &pm);
+ if (!ret && !strcasecmp(pm, "sgmii-switch")) {
+ ei_local->architecture |= SGMII_SWITCH;
+ pr_info("GE2_SGMII_FORCE LINK SWITCH\n");
+ }
+ } else if (!strcasecmp(pm, "an")) {
+ pr_info("GE2_SGMII_AN\n");
+ ei_local->architecture |= GE2_SGMII_AN;
+ of_property_read_u32(pdev->dev.of_node, "gmac2-phy-address", &val);
+ mac_to_gigaphy_mode_addr2 = val;
+ }
+ } else if (!strcasecmp(pm, "rgmii-2")) {
+ pr_info("GMAC2 support rgmii\n");
+ ret = of_property_read_string(pdev->dev.of_node, "rgmii-mode-2", &pm);
+ if (!ret && !strcasecmp(pm, "force-1000")) {
+ pr_info("GE2_RGMII_FORCE_1000\n");
+ ei_local->architecture |= GE2_RGMII_FORCE_1000;
+ } else if (!strcasecmp(pm, "an")) {
+ pr_info("RGMII_AN (External GigaPhy)\n");
+ of_property_read_u32(pdev->dev.of_node, "gmac2-phy-address", &val);
+ mac_to_gigaphy_mode_addr2 = val;
+ pr_info("mac_to_gigaphy_mode_addr2 = 0x%x\n", mac_to_gigaphy_mode_addr2);
+ ei_local->architecture |= GE2_RGMII_AN;
+ } else if (!strcasecmp(pm, "an-internal")) {
+ pr_info("RGMII_AN (Internal GigaPhy)\n");
+ ei_local->architecture |= GE2_INTERNAL_GPHY;
+ }
+ } else {
+ pr_info("GE2 no connect\n");
+ }
+}
+
+void fe_tx_rx_dec(struct END_DEVICE *ei_local, struct platform_device *pdev)
+{
+ u32 val;
+ u8 i;
+
+ of_property_read_u32(pdev->dev.of_node, "gmac1_txq_num", &val);
+ gmac1_txq_num = val;
+ of_property_read_u32(pdev->dev.of_node, "gmac1_txq_txd_num", &val);
+ gmac1_txq_txd_num = val;
+ gmac1_txd_num = gmac1_txq_num * gmac1_txq_txd_num;
+
+ of_property_read_u32(pdev->dev.of_node, "gmac2_txq_num", &val);
+ gmac2_txq_num = val;
+ of_property_read_u32(pdev->dev.of_node, "gmac2_txq_txd_num", &val);
+ gmac2_txq_txd_num = val;
+ gmac2_txd_num = gmac2_txq_num * gmac2_txq_txd_num;
+
+ num_tx_desc = gmac1_txd_num + gmac2_txd_num;
+ total_txq_num = gmac1_txq_num + gmac2_txq_num;
+
+ of_property_read_u32(pdev->dev.of_node, "num_rx_desc", &val);
+ num_rx_desc = val;
+ num_tx_max_process = num_tx_desc;
+
+ ei_local->free_skb = kmalloc_array(num_tx_desc, sizeof(struct sk_buff *), GFP_KERNEL);
+
+ ei_local->free_txd_num = kmalloc_array(total_txq_num, sizeof(atomic_t), GFP_KERNEL);
+ ei_local->free_txd_head = kmalloc_array(total_txq_num, sizeof(unsigned int), GFP_KERNEL);
+ ei_local->free_txd_tail = kmalloc_array(total_txq_num, sizeof(unsigned int), GFP_KERNEL);
+ ei_local->txd_pool_info = kmalloc_array(num_tx_desc, sizeof(unsigned int), GFP_KERNEL);
+ ei_local->skb_free = kmalloc_array(num_tx_desc, sizeof(struct sk_buff *), GFP_KERNEL);
+ ei_local->rls_cnt = kmalloc_array(total_txq_num, sizeof(unsigned int), GFP_KERNEL);
+ for (i = 0; i < MAX_RX_RING_NUM; i++)
+ ei_local->netrx_skb_data[i] =
+ kmalloc_array(num_rx_desc, sizeof(void *), GFP_KERNEL);
+ ei_local->netrx0_skb_data = kmalloc_array(num_rx_desc, sizeof(void *), GFP_KERNEL);
+}
+
+/* static struct wakeup_source eth_wake_lock; */
+
+static int rather_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct END_DEVICE *ei_local;
+ struct net_device *netdev;
+ struct device_node *node;
+ const char *mac_addr;
+ int ret;
+ //int i;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct END_DEVICE),
+ 1, 1);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ dev_raether = netdev;
+ ei_local = netdev_priv(netdev);
+ ei_local->dev = &pdev->dev;
+ ei_local->netdev = netdev;
+ fe_features_config(ei_local);
+ fe_architecture_config(ei_local);
+ fe_chip_name_config(ei_local, pdev);
+ raeth_arch_setting(ei_local, pdev);
+ fe_tx_rx_dec(ei_local, pdev);
+
+ ret = of_property_read_bool(pdev->dev.of_node, "dma-coherent");
+ if (ret) {
+ pr_err("HW_IOC supported\n");
+ ei_local->features |= FE_HW_IOCOHERENT;
+ }
+
+ if ((ei_local->features & FE_HW_IOCOHERENT) &&
+ (ei_local->features & FE_QDMA_FQOS)) {
+ pr_err("HW_IOC supported\n");
+ ei_local->qdma_pdev =
+ platform_device_alloc("QDMA", PLATFORM_DEVID_AUTO);
+ if (!ei_local->qdma_pdev) {
+ dev_err(&pdev->dev,
+ "QDMA platform device allocate fail!\n");
+ ret = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ ei_local->qdma_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ei_local->qdma_pdev->dev.dma_mask =
+ &ei_local->qdma_pdev->dev.coherent_dma_mask;
+ } else {
+ ei_local->qdma_pdev = pdev;
+ }
+
+ /* iomap registers */
+ node = of_parse_phandle(pdev->dev.of_node, "mediatek,ethsys", 0);
+ ethdma_sysctl_base = of_iomap(node, 0);
+ if (IS_ERR(ethdma_sysctl_base)) {
+ dev_err(&pdev->dev, "no ethdma_sysctl_base found\n");
+ return PTR_ERR(ethdma_sysctl_base);
+ }
+
+ ethdma_frame_engine_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ethdma_frame_engine_base)) {
+ dev_err(&pdev->dev, "no ethdma_frame_engine_base found\n");
+ return PTR_ERR(ethdma_frame_engine_base);
+ }
+
+ ethdma_mac_base = ioremap(0x15110000, 0x300);
+
+ /* get clock ctrl */
+#if (0)
+ if (ei_local->chip_name != MT7621_FE) {
+ for (i = 0; i < ARRAY_SIZE(ei_local->clks); i++) {
+ ei_local->clks[i] = devm_clk_get(&pdev->dev,
+ mtk_clks_source_name[i]);
+ if (IS_ERR(ei_local->clks[i])) {
+ if (PTR_ERR(ei_local->clks[i]) == -EPROBE_DEFER)
+ pr_info("!!!!!EPROBE_DEFER!!!!!\n");
+ pr_info("!!!!ENODEV!!!!! clks = %s\n", mtk_clks_source_name[i]);
+ }
+ }
+ }
+#endif
+
+ /* get gsw device node */
+ ei_local->switch_np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,switch", 0);
+
+#if 0
+ /* get MAC address */
+ mac_addr = of_get_mac_address(pdev->dev.of_node);
+ if (mac_addr)
+ ether_addr_copy(netdev->dev_addr, mac_addr);
+#endif
+
+ /* get IRQs */
+ ei_local->irq0 = platform_get_irq(pdev, 0);
+ if (ei_local->chip_name != MT7621_FE) {
+ ei_local->irq1 = platform_get_irq(pdev, 1);
+ ei_local->irq2 = platform_get_irq(pdev, 2);
+ }
+ if (ei_local->features & (FE_RSS_4RING | FE_RSS_2RING)) {
+ ei_local->irq3 = platform_get_irq(pdev, 3);
+ }
+
+ pr_err("ei_local->irq0 = %d; ei_local->irq1 = %d; ei_local->irq2 = %d\n", ei_local->irq0, ei_local->irq1, ei_local->irq2);
+#if (0)
+ if (ei_local->architecture & RAETH_ESW) {
+ if (ei_local->architecture & MT7622_EPHY)
+ ei_local->esw_irq = platform_get_irq(pdev, 3);
+ else if (ei_local->architecture & LEOPARD_EPHY)
+ ei_local->esw_irq = platform_get_irq(pdev, 4);
+ pr_info("ei_local->esw_irq = %d\n", ei_local->esw_irq);
+ }
+
+ if (0)
+ ei_clock_enable(ei_local);
+
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE)
+ ei_ioc_setting(pdev, ei_local);
+#endif
+ raeth_setup_dev_fptable(netdev);
+//alive 01
+ ei_mac_addr_setting(netdev);
+//dead 03
+ strncpy(netdev->name, DEV_NAME, sizeof(netdev->name) - 1);
+ netif_set_real_num_tx_queues(netdev, gmac1_txq_num);
+ netif_set_real_num_rx_queues(netdev, 1);
+
+ netdev->addr_len = 6;
+//dead 02
+ netdev->base_addr = (unsigned long)RALINK_FRAME_ENGINE_BASE;
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x100, 0x2105e303);
+ sys_reg_write(ETHDMASYS_ETH_MAC_BASE + 0x200, 0x2105e303);
+ /* net_device structure Init */
+ pr_info
+ ("%s %d rx/%d tx descriptors allocated, mtu = %d!\n",
+ RAETH_VERSION, num_rx_desc, num_tx_desc, netdev->mtu);
+//dead 01
+ if (ei_local->features & FE_ETHTOOL)
+ ethtool_init(netdev);
+ ret = debug_proc_init();
+ if (ret) {
+ dev_err(&pdev->dev, "error set debug proc\n");
+ goto err_free_dev;
+ }
+ /* Register net device for the driver */
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(&pdev->dev, "error bringing up device\n");
+ goto err_free_dev;
+ }
+ /*keep ethsys power domain on*/
+ device_init_wakeup(&pdev->dev, true);
+
+ pr_info("device_init_wakeup\n");
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ if (!ei_local->pseudo_dev)
+ raeth_init_pseudo(ei_local, netdev);
+
+ if (!ei_local->pseudo_dev)
+ pr_info("Open pseudo_dev failed.\n");
+ else
+ virtualif_open(ei_local->pseudo_dev);
+ }
+ return 0;
+
+err_free_dev:
+ free_netdev(netdev);
+ return ret;
+}
+
+static int raether_remove(struct platform_device *pdev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_QDMA_FQOS)
+ if (ei_local->qdma_pdev)
+ ei_local->qdma_pdev->dev.release
+ (&ei_local->qdma_pdev->dev);
+
+ ei_clock_disable(ei_local);
+
+ return 0;
+}
+
+#if (0)
+static const struct dev_pm_ops raeth_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(raeth_suspend, raeth_resume)
+};
+#endif
+static const char raeth_string[] = "RAETH_DRV";
+
+static const struct of_device_id raether_of_ids[] = {
+ {.compatible = "mediatek,mt7623-eth"},
+ {.compatible = "mediatek,mt7622-raeth"},
+ {.compatible = "mediatek,mt7621-eth"},
+ {.compatible = "mediatek,leopard-eth"},
+ {.compatible = "mediatek,mt7986-eth"},
+ {},
+};
+
+static struct platform_driver raeth_driver = {
+ .probe = rather_probe,
+ .remove = raether_remove,
+ .driver = {
+ .name = raeth_string,
+ .owner = THIS_MODULE,
+ .of_match_table = raether_of_ids,
+ /* .pm = &raeth_pm_ops, */
+ },
+};
+
+module_platform_driver(raeth_driver);
+MODULE_LICENSE("GPL");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.h
new file mode 100644
index 0000000..5316905
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether.h
@@ -0,0 +1,463 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA2882ETHEND_H
+#define RA2882ETHEND_H
+
+#include "raeth_config.h"
+#include "raeth_reg.h"
+#include "ra_dbg_proc.h"
+#include "ra_ioctl.h"
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/fs.h>
+#include <linux/mii.h>
+#include <linux/uaccess.h>
+#if defined(CONFIG_RAETH_TSO)
+#include <linux/tcp.h>
+#include <net/ipv6.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <linux/in.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_pppox.h>
+#endif
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ppp_defs.h>
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/dma-mapping.h>
+
+#if defined(CONFIG_MACH_MT7623)
+#include <linux/delay.h>
+#endif
+#include <linux/kthread.h>
+#include <linux/prefetch.h>
+
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+#include <net/ra_nat.h>
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define ETH_GPIO_BASE 0x10005000
+
+#if defined(CONFIG_QDMA_MQ)
+#define GMAC1_TXQ_NUM 3
+#define GMAC1_TXQ_TXD_NUM 512
+#define GMAC1_TXD_NUM (GMAC1_TXQ_NUM * GMAC1_TXQ_TXD_NUM)
+#define GMAC2_TXQ_NUM 1
+#define GMAC2_TXQ_TXD_NUM 128
+#define GMAC2_TXD_NUM (GMAC2_TXQ_NUM * GMAC2_TXQ_TXD_NUM)
+#define NUM_TX_DESC (GMAC1_TXD_NUM + GMAC2_TXD_NUM)
+#define TOTAL_TXQ_NUM (GMAC1_TXQ_NUM + GMAC2_TXQ_NUM)
+#else
+#define TOTAL_TXQ_NUM 2
+#endif
+
+#if defined(CONFIG_MACH_MT7623)
+#define NUM_RX_DESC 2048
+#define NUM_QRX_DESC 16
+#define NUM_PQ_RESV 4
+#define FFA 2048
+#define QUEUE_OFFSET 0x10
+#else
+#define NUM_QRX_DESC 16
+#define NUM_PQ_RESV 4
+#define FFA 512
+#define QUEUE_OFFSET 0x10
+#endif
+
+#if defined(CONFIG_PINCTRL_MT7622)
+#define NUM_PQ 64
+#else
+#define NUM_PQ 16
+#endif
+/* #define NUM_TX_MAX_PROCESS NUM_TX_DESC */
+#define NUM_RX_MAX_PROCESS 16
+
+#define MAX_RX_RING_NUM 4
+#define NUM_LRO_RX_DESC 16
+
+#define MAX_RX_LENGTH 1536
+
+#if defined(CONFIG_SUPPORT_OPENWRT)
+#define DEV_NAME "eth0"
+#define DEV2_NAME "eth1"
+#else
+#define DEV_NAME "eth2"
+#define DEV2_NAME "eth3"
+#endif
+
+#if defined(CONFIG_MACH_MT7623)
+#define GMAC0_OFFSET 0xE000
+#define GMAC2_OFFSET 0xE006
+#else
+#define GMAC0_OFFSET 0x28
+#define GMAC2_OFFSET 0x22
+#endif
+
+#if defined(CONFIG_MACH_MT7623)
+#define IRQ_ENET0 232
+#define IRQ_ENET1 231
+#define IRQ_ENET2 230
+#else
+/* NOTE(Nelson): prom version started from 20150806 */
+#define IRQ_ENET0 255
+#define IRQ_ENET1 256
+#define IRQ_ENET2 257
+#endif
+#define MTK_NAPI_WEIGHT 64
+
+#define RAETH_VERSION "STD_v0.1"
+
+/* MT7623 PSE reset workaround */
+#define FE_RESET_POLLING_MS (5000)
+
+/*LEOPARD POLLING*/
+#define PHY_POLLING_MS (1000)
+#define FE_DEFAULT_LAN_IP "192.168.1.1"
+#define IP4_ADDR_LEN 16
+
+#if defined(CONFIG_SOC_MT7621)
+#define MT_TRIGGER_LOW 0
+#else
+#define MT_TRIGGER_LOW IRQF_TRIGGER_LOW
+#endif
+
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+ MTK_CLK_ETHIF,
+ MTK_CLK_ESW,
+ MTK_CLK_GP0,
+ MTK_CLK_GP1,
+ MTK_CLK_GP2,
+ MTK_CLK_SGMII_TX250M,
+ MTK_CLK_SGMII_RX250M,
+ MTK_CLK_SGMII_CDR_REF,
+ MTK_CLK_SGMII_CDR_FB,
+ MTK_CLK_SGMII1_TX250M,
+ MTK_CLK_SGMII1_RX250M,
+ MTK_CLK_SGMII1_CDR_REF,
+ MTK_CLK_SGMII1_CDR_FB,
+ MTK_CLK_TRGPLL,
+ MTK_CLK_SGMIPLL,
+ MTK_CLK_ETH1PLL,
+ MTK_CLK_ETH2PLL,
+ MTK_CLK_FE,
+ MTK_CLK_SGMII_TOP,
+ MTK_CLK_MAX
+};
+
+struct END_DEVICE {
+ struct device *dev;
+ unsigned int tx_cpu_owner_idx0;
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ unsigned int rx_calc_idx[MAX_RX_RING_NUM];
+#endif
+ unsigned int tx_ring_full;
+ unsigned int tx_full; /* NOTE(Nelso): unused, can remove */
+
+ /* PDMA TX PTR */
+ dma_addr_t phy_tx_ring0;
+
+ /* QDMA TX PTR */
+ struct platform_device *qdma_pdev;
+ /* struct sk_buff *free_skb[NUM_TX_DESC]; */
+ struct sk_buff **free_skb;
+ unsigned int tx_dma_ptr;
+ unsigned int tx_cpu_ptr;
+ unsigned int tx_cpu_idx;
+ unsigned int rls_cpu_idx;
+ /* atomic_t free_txd_num[TOTAL_TXQ_NUM]; */
+ atomic_t *free_txd_num;
+ /* unsigned int free_txd_head[TOTAL_TXQ_NUM]; */
+ /* unsigned int free_txd_tail[TOTAL_TXQ_NUM]; */
+ unsigned int *free_txd_head;
+ unsigned int *free_txd_tail;
+ struct QDMA_txdesc *txd_pool;
+ dma_addr_t phy_txd_pool;
+ /* unsigned int txd_pool_info[NUM_TX_DESC]; */
+ unsigned int *txd_pool_info;
+ struct QDMA_txdesc *free_head;
+ unsigned int phy_free_head;
+ unsigned int *free_page_head;
+ dma_addr_t phy_free_page_head;
+ struct PDMA_rxdesc *qrx_ring;
+ dma_addr_t phy_qrx_ring;
+
+ /* TSO */
+ unsigned int skb_txd_num;
+
+ /* MT7623 workaround */
+ struct work_struct reset_task;
+
+ /* workqueue_bh */
+ struct work_struct rx_wq;
+
+ /* tasklet_bh */
+ struct tasklet_struct rx_tasklet;
+
+ /* struct sk_buff *skb_free[NUM_TX_DESC]; */
+ struct sk_buff **skb_free;
+ unsigned int free_idx;
+
+ struct net_device_stats stat; /* The new statistics table. */
+ spinlock_t page_lock; /* spin_lock for cr access critial section */
+ spinlock_t irq_lock; /* spin_lock for isr critial section */
+ spinlock_t mdio_lock; /* spin_lock for mdio reg access */
+ struct PDMA_txdesc *tx_ring0;
+ struct PDMA_rxdesc *rx_ring[MAX_RX_RING_NUM];
+ dma_addr_t phy_rx_ring[MAX_RX_RING_NUM];
+
+ /* void *netrx_skb_data[MAX_RX_RING_NUM][NUM_RX_DESC]; */
+ void **netrx_skb_data[MAX_RX_RING_NUM];
+
+ /* struct sk_buff *netrx0_skbuf[NUM_RX_DESC]; */
+ /*struct sk_buff **netrx0_skbuf;*/
+ void **netrx0_skb_data;
+ /* napi */
+ struct napi_struct napi;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_rx_rss0;
+ struct napi_struct napi_rx_rss1;
+ struct napi_struct napi_rx_rss2;
+ struct napi_struct napi_rx_rss3;
+ struct napi_struct napi_tx;
+ struct net_device dummy_dev;
+
+ /* clock control */
+ struct clk *clks[MTK_CLK_MAX];
+
+ /* gsw device node */
+ struct device_node *switch_np;
+
+ /* GE1 support */
+ struct net_device *netdev;
+ /* GE2 support */
+ struct net_device *pseudo_dev;
+ unsigned int is_pseudo;
+
+ struct mii_if_info mii_info;
+ struct lro_counters lro_counters;
+ struct vlan_group *vlgrp;
+
+ /* virtual base addr from device tree */
+ void __iomem *ethdma_sysctl_base;
+
+ unsigned int irq0;
+ unsigned int irq1;
+ unsigned int irq2;
+ unsigned int irq3;
+ unsigned int esw_irq;
+ void __iomem *fe_tx_int_status;
+ void __iomem *fe_tx_int_enable;
+ void __iomem *fe_rx_int_status;
+ void __iomem *fe_rx_int_enable;
+
+ unsigned int features;
+ unsigned int chip_name;
+ unsigned int architecture;
+
+ /* IP address */
+ char lan_ip4_addr[IP4_ADDR_LEN];
+
+ /* Function pointers */
+ int (*ei_start_xmit)(struct sk_buff *skb, struct net_device *netdev,
+ int gmac_no);
+ int (*ei_xmit_housekeeping)(struct net_device *netdev, int budget);
+ int (*ei_eth_recv)(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+ int (*ei_eth_recv_rss0)(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+ int (*ei_eth_recv_rss1)(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+ int (*ei_eth_recv_rss2)(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+ int (*ei_eth_recv_rss3)(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+ int (*ei_fill_tx_desc)(struct net_device *dev,
+ unsigned long *tx_cpu_owner_idx,
+ struct sk_buff *skb, int gmac_no);
+
+ /* MT7623 PSE reset workaround */
+ struct task_struct *kreset_task;
+ struct task_struct *kphy_poll_task;
+ unsigned int fe_reset_times;
+ unsigned int tx_mask;
+ unsigned int rx_mask;
+ unsigned int *rls_cnt;
+};
+
+struct net_device_stats *ra_get_stats(struct net_device *dev);
+
+int ei_open(struct net_device *dev);
+int ei_close(struct net_device *dev);
+
+int ra2882eth_init(void);
+void ra2882eth_cleanup_module(void);
+
+u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data);
+u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data);
+u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr);
+u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr,
+ u32 *read_data);
+u32 mii_mgr_write_cl45(u32 port_num, u32 dev_addr, u32 reg_addr,
+ u32 write_data);
+
+/* HNAT functions */
+#if defined(CONFIG_RA_NAT_NONE)
+static int (*ppe_hook_rx_eth)(struct sk_buff *skb);
+static int (*ppe_hook_tx_eth)(struct sk_buff *skb, int gmac_no);
+#else
+extern int (*ppe_hook_rx_eth)(struct sk_buff *skb);
+extern int (*ppe_hook_tx_eth)(struct sk_buff *skb, int gmac_no);
+#endif
+
+/* PDMA functions */
+int fe_pdma_wait_dma_idle(void);
+int fe_pdma_rx_dma_init(struct net_device *dev);
+int fe_pdma_tx_dma_init(struct net_device *dev);
+void fe_pdma_rx_dma_deinit(struct net_device *dev);
+void fe_pdma_tx_dma_deinit(struct net_device *dev);
+void set_fe_pdma_glo_cfg(void);
+int ei_pdma_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ int gmac_no);
+int ei_pdma_xmit_housekeeping(struct net_device *netdev,
+ int budget);
+int fe_fill_tx_desc(struct net_device *dev,
+ unsigned long *tx_cpu_owner_idx,
+ struct sk_buff *skb,
+ int gmac_no);
+int fe_fill_tx_desc_tso(struct net_device *dev,
+ unsigned long *tx_cpu_owner_idx,
+ struct sk_buff *skb,
+ int gmac_no);
+
+/* QDMA functions */
+int fe_qdma_wait_dma_idle(void);
+int fe_qdma_rx_dma_init(struct net_device *dev);
+int fe_qdma_tx_dma_init(struct net_device *dev);
+void fe_qdma_rx_dma_deinit(struct net_device *dev);
+void fe_qdma_tx_dma_deinit(struct net_device *dev);
+void set_fe_qdma_glo_cfg(void);
+int ei_qdma_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ int gmac_no);
+int ei_qdma_xmit_housekeeping(struct net_device *netdev, int budget);
+int ei_qdma_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct qdma_ioctl_data *ioctl_data);
+int ephy_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct ephy_ioctl_data *ioctl_data);
+/* HW LRO functions */
+int fe_hw_lro_init(struct net_device *dev);
+void fe_hw_lro_deinit(struct net_device *dev);
+int fe_hw_lro_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+void fe_set_hw_lro_my_ip(char *lan_ip_addr);
+
+int fe_rss_4ring_init(struct net_device *dev);
+void fe_rss_4ring_deinit(struct net_device *dev);
+int fe_rss_2ring_init(struct net_device *dev);
+void fe_rss_2ring_deinit(struct net_device *dev);
+int fe_rss0_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+int fe_rss1_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+int fe_rss2_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+int fe_rss3_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget);
+static inline void *raeth_alloc_skb_data(size_t size, gfp_t flags)
+{
+#ifdef CONFIG_ETH_SLAB_ALLOC_SKB
+ return kmalloc(size, flags);
+#else
+ return netdev_alloc_frag(size);
+#endif
+}
+
+static inline void raeth_free_skb_data(void *addr)
+{
+#ifdef CONFIG_ETH_SLAB_ALLOC_SKB
+ kfree(addr);
+#else
+ skb_free_frag(addr);
+#endif
+}
+
+static inline struct sk_buff *raeth_build_skb(void *data,
+ unsigned int frag_size)
+{
+#ifdef CONFIG_ETH_SLAB_ALLOC_SKB
+ return build_skb(data, 0);
+#else
+ return build_skb(data, frag_size);
+#endif
+}
+
+extern u32 gmac1_txq_num;
+extern u32 gmac1_txq_txd_num;
+extern u32 gmac1_txd_num;
+extern u32 gmac2_txq_num;
+extern u32 gmac2_txq_txd_num;
+extern u32 gmac2_txd_num;
+extern u32 num_rx_desc;
+extern u32 num_tx_max_process;
+extern u32 num_tx_desc;
+extern u32 total_txq_num;
+extern u32 mac_to_gigaphy_mode_addr;
+extern u32 mac_to_gigaphy_mode_addr2;
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.c
new file mode 100644
index 0000000..9d76dd0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.c
@@ -0,0 +1,619 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "raether_hwlro.h"
+#include "ra_mac.h"
+
+/* HW LRO Force port */
+int set_fe_lro_ring1_cfg(struct net_device *dev)
+{
+ unsigned int ip;
+
+ pr_debug("set_fe_lro_ring1_cfg()\n");
+
+ /* 1. Set RX ring mode to force port */
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
+
+ /* 2. Configure lro ring */
+ /* 2.1 set src/destination TCP ports */
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 1122);
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 3344);
+ /* 2.2 set src/destination IPs */
+ str_to_ip(&ip, "10.10.10.3");
+ sys_reg_write(LRO_RX_RING1_SIP_DW0, ip);
+ str_to_ip(&ip, "10.10.10.254");
+ sys_reg_write(LRO_RX_RING1_DIP_DW0, ip);
+ /* 2.3 IPv4 force port mode */
+ SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
+ /* 2.4 IPv6 force port mode */
+ SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING1, 1);
+
+ /* 3. Set Age timer: 10 msec. */
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
+
+ /* 4. Valid LRO ring */
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
+
+ return 0;
+}
+
+int set_fe_lro_ring2_cfg(struct net_device *dev)
+{
+ unsigned int ip;
+
+ pr_debug("set_fe_lro_ring2_cfg()\n");
+
+ /* 1. Set RX ring mode to force port */
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_FORCE_PORT);
+
+ /* 2. Configure lro ring */
+ /* 2.1 set src/destination TCP ports */
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 5566);
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 7788);
+ /* 2.2 set src/destination IPs */
+ str_to_ip(&ip, "10.10.10.3");
+ sys_reg_write(LRO_RX_RING2_SIP_DW0, ip);
+ str_to_ip(&ip, "10.10.10.254");
+ sys_reg_write(LRO_RX_RING2_DIP_DW0, ip);
+ /* 2.3 IPv4 force port mode */
+ SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
+ /* 2.4 IPv6 force port mode */
+ SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING2, 1);
+
+ /* 3. Set Age timer: 10 msec. */
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
+
+ /* 4. Valid LRO ring */
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
+
+ return 0;
+}
+
+int set_fe_lro_ring3_cfg(struct net_device *dev)
+{
+ unsigned int ip;
+
+ pr_debug("set_fe_lro_ring3_cfg()\n");
+
+ /* 1. Set RX ring mode to force port */
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_FORCE_PORT);
+
+ /* 2. Configure lro ring */
+ /* 2.1 set src/destination TCP ports */
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 9900);
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 99);
+ /* 2.2 set src/destination IPs */
+ str_to_ip(&ip, "10.10.10.3");
+ sys_reg_write(LRO_RX_RING3_SIP_DW0, ip);
+ str_to_ip(&ip, "10.10.10.254");
+ sys_reg_write(LRO_RX_RING3_DIP_DW0, ip);
+ /* 2.3 IPv4 force port mode */
+ SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
+ /* 2.4 IPv6 force port mode */
+ SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING3, 1);
+
+ /* 3. Set Age timer: 10 msec. */
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
+
+ /* 4. Valid LRO ring */
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
+
+ return 0;
+}
+
+int set_fe_lro_glo_cfg(struct net_device *dev)
+{
+ unsigned int reg_val = 0;
+
+ pr_debug("set_fe_lro_glo_cfg()\n");
+
+ /* 1 Set max AGG timer: 10 msec. */
+ SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
+
+ /* 2. Set max LRO agg count */
+ SET_PDMA_LRO_MAX_AGG_CNT(HW_LRO_MAX_AGG_CNT);
+
+ /* PDMA prefetch enable setting */
+ SET_PDMA_LRO_RXD_PREFETCH_EN(ADMA_RXD_PREFETCH_EN |
+ ADMA_MULTI_RXD_PREFETCH_EN);
+
+ /* 2.1 IPv4 checksum update enable */
+ SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
+
+ /* 3. Polling relinguish */
+ while (1) {
+ if (sys_reg_read(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
+ pr_warn("Polling HW LRO RELINGUISH...\n");
+ else
+ break;
+ }
+
+ /* 4. Enable LRO */
+ reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0);
+ reg_val |= PDMA_LRO_EN;
+ sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val);
+
+ return 0;
+}
+
+void fe_set_hw_lro_my_ip(char *lan_ip_addr)
+{
+ unsigned int lan_ip;
+
+ str_to_ip(&lan_ip, lan_ip_addr);
+ pr_info("[%s]lan_ip_addr = %s (lan_ip = 0x%x)\n",
+ __func__, lan_ip_addr, lan_ip);
+
+ /* Set my IP_1: LAN IP */
+ sys_reg_write(LRO_RX_RING0_DIP_DW0, lan_ip);
+ sys_reg_write(LRO_RX_RING0_DIP_DW1, 0);
+ sys_reg_write(LRO_RX_RING0_DIP_DW2, 0);
+ sys_reg_write(LRO_RX_RING0_DIP_DW3, 0);
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
+}
+
+/* HW LRO Auto-learn */
+int set_fe_lro_auto_cfg(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned int reg_val = 0;
+
+ pr_debug("set_fe_lro_auto_cfg()\n");
+
+ fe_set_hw_lro_my_ip(ei_local->lan_ip4_addr);
+
+ /* Set RX ring1~3 to auto-learn modes */
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
+
+ /* Valid LRO ring */
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
+
+ /* Set AGE timer */
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
+
+ /* Set max AGG timer */
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, HW_LRO_AGG_TIME);
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, HW_LRO_AGG_TIME);
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, HW_LRO_AGG_TIME);
+
+ /* Set max LRO agg count */
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
+
+ /* IPv6 LRO enable */
+ SET_PDMA_LRO_IPV6_EN(1);
+
+ /* IPv4 checksum update enable */
+ SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
+
+ /* TCP push option check disable */
+ /* SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(0); */
+
+ /* PDMA prefetch enable setting */
+ SET_PDMA_LRO_RXD_PREFETCH_EN(ADMA_RXD_PREFETCH_EN |
+ ADMA_MULTI_RXD_PREFETCH_EN);
+
+ /* switch priority comparison to packet count mode */
+ SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_PKT_CNT_MODE);
+
+ /* bandwidth threshold setting */
+ SET_PDMA_LRO_BW_THRESHOLD(HW_LRO_BW_THRE);
+
+ /* auto-learn score delta setting */
+ sys_reg_write(LRO_ALT_SCORE_DELTA, HW_LRO_REPLACE_DELTA);
+
+ /* Set ALT timer to 20us: (unit: 20us) */
+ SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
+ /* Set ALT refresh timer to 1 sec. (unit: 20us) */
+ SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
+
+ /* the least remaining room of SDL0 in RXD for lro aggregation */
+ SET_PDMA_LRO_MIN_RXD_SDL(HW_LRO_SDL_REMAIN_ROOM);
+
+ /* Polling relinguish */
+ while (1) {
+ if (sys_reg_read(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
+ pr_warn("Polling HW LRO RELINGUISH...\n");
+ else
+ break;
+ }
+
+ /* Enable HW LRO */
+ reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0);
+ reg_val |= PDMA_LRO_EN;
+
+ /*enable cpu reason black list*/
+ reg_val |= PDMA_LRO_CRSN_BNW;
+ sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val);
+
+ /*no use PPE cpu reason 0xff*/
+ sys_reg_write(ADMA_LRO_CTRL_DW1, 0xffffffff);
+
+ return 0;
+}
+
+int fe_hw_lro_init(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int skb_size;
+ int i, j;
+
+ skb_size = SKB_DATA_ALIGN(MAX_LRO_RX_LENGTH + NET_IP_ALIGN) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ /* Initial RX Ring 1 ~ 3 */
+ for (i = 1; i < MAX_RX_RING_NUM; i++) {
+ ei_local->rx_ring[i] =
+ dma_alloc_coherent(dev->dev.parent,
+ NUM_LRO_RX_DESC *
+ sizeof(struct PDMA_rxdesc),
+ &ei_local->phy_rx_ring[i],
+ GFP_ATOMIC | __GFP_ZERO);
+ for (j = 0; j < NUM_LRO_RX_DESC; j++) {
+ ei_local->netrx_skb_data[i][j] =
+ raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+
+ if (!ei_local->netrx_skb_data[i][j]) {
+ pr_err("rx skbuff buffer allocation failed!\n");
+ goto no_rx_mem;
+ }
+
+ memset(&ei_local->rx_ring[i][j], 0,
+ sizeof(struct PDMA_rxdesc));
+ ei_local->rx_ring[i][j].rxd_info2.DDONE_bit = 0;
+ ei_local->rx_ring[i][j].rxd_info2.LS0 = 0;
+ ei_local->rx_ring[i][j].rxd_info2.PLEN0 =
+ SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
+ ei_local->rx_ring[i][j].rxd_info2.PLEN1 =
+ SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
+ ei_local->rx_ring[i][j].rxd_info1.PDP0 =
+ dma_map_single(dev->dev.parent,
+ ei_local->netrx_skb_data[i][j] +
+ NET_SKB_PAD,
+ MAX_LRO_RX_LENGTH, DMA_FROM_DEVICE);
+ if (unlikely
+ (dma_mapping_error
+ (dev->dev.parent,
+ ei_local->rx_ring[i][j].rxd_info1.PDP0))) {
+ pr_err("[%s]dma_map_single() failed...\n",
+ __func__);
+ goto no_rx_mem;
+ }
+ }
+ pr_info("\nphy_rx_ring[%d] = 0x%08x, rx_ring[%d] = 0x%p\n",
+ i, (unsigned int)ei_local->phy_rx_ring[i],
+ i, (void __iomem *)ei_local->rx_ring[i]);
+ }
+
+ sys_reg_write(RX_BASE_PTR3, phys_to_bus((u32)ei_local->phy_rx_ring[3]));
+ sys_reg_write(RX_MAX_CNT3, cpu_to_le32((u32)NUM_LRO_RX_DESC));
+ sys_reg_write(RX_CALC_IDX3, cpu_to_le32((u32)(NUM_LRO_RX_DESC - 1)));
+ sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX3);
+ sys_reg_write(RX_BASE_PTR2, phys_to_bus((u32)ei_local->phy_rx_ring[2]));
+ sys_reg_write(RX_MAX_CNT2, cpu_to_le32((u32)NUM_LRO_RX_DESC));
+ sys_reg_write(RX_CALC_IDX2, cpu_to_le32((u32)(NUM_LRO_RX_DESC - 1)));
+ sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX2);
+ sys_reg_write(RX_BASE_PTR1, phys_to_bus((u32)ei_local->phy_rx_ring[1]));
+ sys_reg_write(RX_MAX_CNT1, cpu_to_le32((u32)NUM_LRO_RX_DESC));
+ sys_reg_write(RX_CALC_IDX1, cpu_to_le32((u32)(NUM_LRO_RX_DESC - 1)));
+ sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX1);
+
+ if (ei_local->features & FE_HW_LRO_FPORT) {
+ set_fe_lro_ring1_cfg(dev);
+ set_fe_lro_ring2_cfg(dev);
+ set_fe_lro_ring3_cfg(dev);
+ set_fe_lro_glo_cfg(dev);
+ } else {
+ set_fe_lro_auto_cfg(dev);
+ }
+
+ return 0;
+
+no_rx_mem:
+ return -ENOMEM;
+}
+
+void fe_hw_lro_deinit(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int i, j;
+
+ for (i = 1; i < MAX_RX_RING_NUM; i++) {
+ /* free RX Ring */
+ dma_free_coherent(dev->dev.parent,
+ NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
+ ei_local->rx_ring[i],
+ ei_local->phy_rx_ring[i]);
+ /* free RX data */
+ for (j = 0; j < NUM_LRO_RX_DESC; j++) {
+ raeth_free_skb_data(ei_local->netrx_skb_data[i][j]);
+ ei_local->netrx_skb_data[i][j] = NULL;
+ }
+ }
+}
+
+static inline void hw_lro_rx_desc_init(struct END_DEVICE *ei_local,
+ struct PDMA_rxdesc *rx_ring,
+ unsigned int rx_ring_no,
+ dma_addr_t dma_addr)
+{
+ if (rx_ring_no != 0) {
+ /* lro ring */
+ rx_ring->rxd_info2.PLEN0 =
+ SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
+ rx_ring->rxd_info2.PLEN1 =
+ SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
+ } else
+ /* normal ring */
+ rx_ring->rxd_info2.PLEN0 = MAX_RX_LENGTH;
+
+ rx_ring->rxd_info1.PDP0 = dma_addr;
+ rx_ring->rxd_info2.LS0 = 0;
+ rx_ring->rxd_info2.DDONE_bit = 0;
+}
+
+static int get_hw_lro_rx_ring(struct END_DEVICE *ei_local,
+ unsigned int rx_idx[])
+{
+ int i;
+
+ for (i = 0; i < MAX_RX_RING_NUM; i++)
+ if (ei_local->rx_ring[i][rx_idx[i]].rxd_info2.DDONE_bit == 1)
+ return i;
+
+ return 0;
+}
+
+static inline void __iomem *get_rx_cal_idx_reg(unsigned int rx_ring_no)
+{
+ return (void __iomem *)(RAETH_RX_CALC_IDX0 + (rx_ring_no << 4));
+}
+
+int fe_hw_lro_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+ struct sk_buff *rx_skb;
+ struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+ void *rx_data, *rx_data_next, *new_data;
+ unsigned int length = 0;
+ unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+ unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+ unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+ unsigned int skb_size, map_size;
+ void __iomem *rx_calc_idx_reg;
+ int rx_processed = 0;
+
+ /* get cpu owner indexes of rx rings */
+ rx_dma_owner_lro[0] = (ei_local->rx_calc_idx[0] + 1) % num_rx_desc;
+ rx_dma_owner_lro[1] = (ei_local->rx_calc_idx[1] + 1) % NUM_LRO_RX_DESC;
+ rx_dma_owner_lro[2] = (ei_local->rx_calc_idx[2] + 1) % NUM_LRO_RX_DESC;
+ rx_dma_owner_lro[3] = (ei_local->rx_calc_idx[3] + 1) % NUM_LRO_RX_DESC;
+
+ rx_ring_no = get_hw_lro_rx_ring(ei_local, rx_dma_owner_lro);
+ rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+ rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+ rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+ rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no);
+
+ for (;;) {
+ dma_addr_t dma_addr;
+
+ if ((rx_processed++ > budget) ||
+ (rx_ring->rxd_info2.DDONE_bit == 0))
+ break;
+
+ /* prefetch the next handling RXD */
+ if (rx_ring_no == 0) {
+ rx_dma_owner_lro[rx_ring_no] =
+ (rx_dma_owner_idx + 1) % num_rx_desc;
+ skb_size =
+ SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+ NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ map_size = MAX_RX_LENGTH;
+ } else {
+ rx_dma_owner_lro[rx_ring_no] =
+ (rx_dma_owner_idx + 1) % NUM_LRO_RX_DESC;
+ skb_size =
+ SKB_DATA_ALIGN(MAX_LRO_RX_LENGTH + NET_IP_ALIGN +
+ NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ map_size = MAX_LRO_RX_LENGTH;
+ }
+
+ rx_ring_no_next = get_hw_lro_rx_ring(ei_local,
+ rx_dma_owner_lro);
+ rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+ rx_ring_next =
+ &ei_local->rx_ring
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ rx_data_next =
+ ei_local->netrx_skb_data
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ prefetch(rx_ring_next);
+
+ /* We have to check the free memory size is big enough
+ * before pass the packet to cpu
+ */
+ new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ pr_err("skb not available...\n");
+ goto skb_err;
+ }
+
+ dma_addr = dma_map_single(dev->dev.parent,
+ new_data + NET_SKB_PAD,
+ map_size,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+ pr_err("[%s]dma_map_single() failed...\n", __func__);
+ raeth_free_skb_data(new_data);
+ goto skb_err;
+ }
+
+ rx_skb = raeth_build_skb(rx_data, skb_size);
+
+ if (unlikely(!rx_skb)) {
+ put_page(virt_to_head_page(rx_data));
+ pr_err("build_skb failed\n");
+ goto skb_err;
+ }
+ skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ length = (rx_ring->rxd_info2.PLEN1 << 14) |
+ rx_ring->rxd_info2.PLEN0;
+ dma_unmap_single(dev->dev.parent,
+ rx_ring->rxd_info1.PDP0,
+ length, DMA_FROM_DEVICE);
+
+ prefetch(rx_skb->data);
+
+ /* skb processing */
+ skb_put(rx_skb, length);
+
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2) {
+ if (ei_local->pseudo_dev) {
+ rx_skb->dev = ei_local->pseudo_dev;
+ rx_skb->protocol =
+ eth_type_trans(rx_skb,
+ ei_local->pseudo_dev);
+ } else {
+ pr_err
+ ("pseudo_dev is still not initialize ");
+ pr_err
+ ("but receive packet from GMAC2\n");
+ }
+ } else {
+ rx_skb->dev = dev;
+ rx_skb->protocol = eth_type_trans(rx_skb, dev);
+ }
+
+ /* rx checksum offload */
+ if (likely(rx_ring->rxd_info4.L4VLD))
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (ppe_hook_rx_eth) {
+ if (IS_SPACE_AVAILABLE_HEAD(rx_skb)) {
+ *(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ FOE_ALG_HEAD(rx_skb) = 0;
+ FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+ FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+ }
+ if (IS_SPACE_AVAILABLE_TAIL(rx_skb)) {
+ *(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ FOE_ALG_TAIL(rx_skb) = 0;
+ FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+ FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+ }
+ }
+#endif
+
+ /* HW LRO aggregation statistics */
+ if (ei_local->features & FE_HW_LRO_DBG) {
+ hw_lro_stats_update(rx_ring_no, rx_ring);
+ hw_lro_flush_stats_update(rx_ring_no, rx_ring);
+ }
+
+ if (ei_local->features & FE_HW_VLAN_RX) {
+ if (rx_ring->rxd_info2.TAG)
+ __vlan_hwaccel_put_tag(rx_skb,
+ htons(ETH_P_8021Q),
+ rx_ring->rxd_info3.VID);
+ }
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if ((!ppe_hook_rx_eth) ||
+ (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+ if (ei_local->features & FE_INT_NAPI)
+ /* napi_gro_receive(napi, rx_skb); */
+ netif_receive_skb(rx_skb);
+ else
+ netif_rx(rx_skb);
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ }
+#endif
+
+ if (rx_ring->rxd_info4.SP == 2) {
+ p_ad->stat.rx_packets++;
+ p_ad->stat.rx_bytes += length;
+ } else {
+ ei_local->stat.rx_packets++;
+ ei_local->stat.rx_bytes += length;
+ }
+
+ /* Init RX desc. */
+ hw_lro_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ dma_addr);
+ ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+ new_data;
+
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+
+ sys_reg_write(rx_calc_idx_reg, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ /* use prefetched variable */
+ rx_dma_owner_idx = rx_dma_owner_idx_next;
+ rx_ring_no = rx_ring_no_next;
+ rx_ring = rx_ring_next;
+ rx_data = rx_data_next;
+ rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no);
+ } /* for */
+
+ return rx_processed;
+
+skb_err:
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2)
+ p_ad->stat.rx_dropped++;
+ else
+ ei_local->stat.rx_dropped++;
+
+ /* Discard the rx packet */
+ hw_lro_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ rx_ring->rxd_info1.PDP0);
+ sys_reg_write(rx_calc_idx_reg, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ return (budget + 1);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.h
new file mode 100644
index 0000000..c319aca
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_hwlro.h
@@ -0,0 +1,403 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_HWLRO_H
+#define RA_HWLRO_H
+
+#include "raeth_reg.h"
+
+#define HW_LRO_TIMER_UNIT 1
+#define HW_LRO_REFRESH_TIME 50000
+#define HW_LRO_MAX_AGG_CNT 64
+#define HW_LRO_AGG_DELTA 1
+#define MAX_LRO_RX_LENGTH (PAGE_SIZE * 3)
+#define HW_LRO_AGG_TIME 10 /* 200us */
+#define HW_LRO_AGE_TIME 50 /* 1ms */
+#define HW_LRO_BW_THRE 3000
+#define HW_LRO_REPLACE_DELTA 1000
+#define HW_LRO_SDL_REMAIN_ROOM 1522
+
+struct PDMA_LRO_AUTO_TLB_INFO0_T {
+ unsigned int DTP:16;
+ unsigned int STP:16;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO1_T {
+ unsigned int SIP0:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO2_T {
+ unsigned int SIP1:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO3_T {
+ unsigned int SIP2:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO4_T {
+ unsigned int SIP3:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO5_T {
+ unsigned int VLAN_VID0:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO6_T {
+ unsigned int VLAN_VID1:16;
+ unsigned int VLAN_VID_VLD:4;
+ unsigned int CNT:12;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO7_T {
+ unsigned int DW_LEN:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO8_T {
+ unsigned int DIP_ID:2;
+ unsigned int IPV6:1;
+ unsigned int IPV4:1;
+ unsigned int RESV:27;
+ unsigned int VALID:1;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO {
+ struct PDMA_LRO_AUTO_TLB_INFO0_T auto_tlb_info0;
+ struct PDMA_LRO_AUTO_TLB_INFO1_T auto_tlb_info1;
+ struct PDMA_LRO_AUTO_TLB_INFO2_T auto_tlb_info2;
+ struct PDMA_LRO_AUTO_TLB_INFO3_T auto_tlb_info3;
+ struct PDMA_LRO_AUTO_TLB_INFO4_T auto_tlb_info4;
+ struct PDMA_LRO_AUTO_TLB_INFO5_T auto_tlb_info5;
+ struct PDMA_LRO_AUTO_TLB_INFO6_T auto_tlb_info6;
+ struct PDMA_LRO_AUTO_TLB_INFO7_T auto_tlb_info7;
+ struct PDMA_LRO_AUTO_TLB_INFO8_T auto_tlb_info8;
+};
+
+#define PDMA_LRO_EN BIT(0)
+#define PDMA_LRO_IPV6_EN BIT(1)
+#define PDMA_LRO_CRSN_BNW BIT(6)
+#define PDMA_LRO_IPV4_CSUM_UPDATE_EN BIT(7)
+#define PDMA_LRO_IPV4_CTRL_PUSH_EN BIT(23)
+#define PDMA_LRO_RXD_PREFETCH_EN BITS(3, 4)
+#define PDMA_NON_LRO_MULTI_EN BIT(2)
+#define PDMA_LRO_DLY_INT_EN BIT(5)
+#define PDMA_LRO_FUSH_REQ BITS(26, 28)
+#define PDMA_LRO_RELINGUISH BITS(29, 31)
+#define PDMA_LRO_FREQ_PRI_ADJ BITS(16, 19)
+#define PDMA_LRO_TPUT_PRE_ADJ BITS(8, 11)
+#define PDMA_LRO_TPUT_PRI_ADJ BITS(12, 15)
+#define PDMA_LRO_ALT_SCORE_MODE BIT(21)
+#define PDMA_LRO_RING_AGE1 BITS(22, 31)
+#define PDMA_LRO_RING_AGE2 BITS(0, 5)
+#define PDMA_LRO_RING_AGG BITS(10, 25)
+#define PDMA_LRO_RING_AGG_CNT1 BITS(26, 31)
+#define PDMA_LRO_RING_AGG_CNT2 BITS(0, 1)
+#define PDMA_LRO_ALT_TICK_TIMER BITS(16, 20)
+#define PDMA_LRO_LRO_MIN_RXD_SDL0 BITS(16, 31)
+
+#define PDMA_LRO_DLY_INT_EN_OFFSET (5)
+#define PDMA_LRO_TPUT_PRE_ADJ_OFFSET (8)
+#define PDMA_LRO_FREQ_PRI_ADJ_OFFSET (16)
+#define PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET (16)
+#define PDMA_LRO_TPUT_PRI_ADJ_OFFSET (12)
+#define PDMA_LRO_ALT_SCORE_MODE_OFFSET (21)
+#define PDMA_LRO_FUSH_REQ_OFFSET (26)
+#define PDMA_NON_LRO_MULTI_EN_OFFSET (2)
+#define PDMA_LRO_IPV6_EN_OFFSET (1)
+#define PDMA_LRO_RXD_PREFETCH_EN_OFFSET (3)
+#define PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET (7)
+#define PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET (23)
+#define PDMA_LRO_ALT_TICK_TIMER_OFFSET (16)
+
+#define PDMA_LRO_TPUT_OVERFLOW_ADJ BITS(12, 31)
+#define PDMA_LRO_CNT_OVERFLOW_ADJ BITS(0, 11)
+
+#define PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET (12)
+#define PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET (0)
+
+#define PDMA_LRO_ALT_BYTE_CNT_MODE (0)
+#define PDMA_LRO_ALT_PKT_CNT_MODE (1)
+
+/* LRO_RX_RING1_CTRL_DW1 offsets */
+#define PDMA_LRO_AGE_H_OFFSET (10)
+#define PDMA_LRO_RING_AGE1_OFFSET (22)
+#define PDMA_LRO_RING_AGG_CNT1_OFFSET (26)
+/* LRO_RX_RING1_CTRL_DW2 offsets */
+#define PDMA_RX_MODE_OFFSET (6)
+#define PDMA_RX_PORT_VALID_OFFSET (8)
+#define PDMA_RX_MYIP_VALID_OFFSET (9)
+#define PDMA_LRO_RING_AGE2_OFFSET (0)
+#define PDMA_LRO_RING_AGG_OFFSET (10)
+#define PDMA_LRO_RING_AGG_CNT2_OFFSET (0)
+/* LRO_RX_RING1_CTRL_DW3 offsets */
+#define PDMA_LRO_AGG_CNT_H_OFFSET (6)
+/* LRO_RX_RING1_STP_DTP_DW offsets */
+#define PDMA_RX_TCP_SRC_PORT_OFFSET (16)
+#define PDMA_RX_TCP_DEST_PORT_OFFSET (0)
+/* LRO_RX_RING1_CTRL_DW0 offsets */
+#define PDMA_RX_IPV4_FORCE_OFFSET (1)
+#define PDMA_RX_IPV6_FORCE_OFFSET (0)
+
+#define ADMA_MULTI_RXD_PREFETCH_EN BIT(3)
+#define ADMA_RXD_PREFETCH_EN BIT(4)
+
+#define SET_PDMA_LRO_MAX_AGG_CNT(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW3); \
+reg_val &= ~0xff; \
+reg_val |= ((x) & 0xff); \
+sys_reg_write(ADMA_LRO_CTRL_DW3, reg_val); \
+}
+
+#define SET_PDMA_LRO_FLUSH_REQ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_FUSH_REQ; \
+reg_val |= ((x) & 0x7) << PDMA_LRO_FUSH_REQ_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_IPV6_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_IPV6_EN; \
+reg_val |= ((x) & 0x1) << PDMA_LRO_IPV6_EN_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_RXD_PREFETCH_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_RXD_PREFETCH_EN; \
+reg_val |= (x); \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_IPV4_CSUM_UPDATE_EN; \
+reg_val |= ((x) & 0x1) << PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_IPV4_CTRL_PUSH_EN; \
+reg_val |= ((x) & 0x1) << PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_NON_LRO_MULTI_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~(PDMA_NON_LRO_MULTI_EN); \
+reg_val |= ((x) & 0x1) << PDMA_NON_LRO_MULTI_EN_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_FREQ_PRI_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_FREQ_PRI_ADJ; \
+reg_val |= ((x) & 0xf) << PDMA_LRO_FREQ_PRI_ADJ_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_TPUT_PRE_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_TPUT_PRE_ADJ; \
+reg_val |= ((x) & 0xf) << PDMA_LRO_TPUT_PRE_ADJ_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_TPUT_PRI_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_TPUT_PRI_ADJ; \
+reg_val |= ((x) & 0xf) << PDMA_LRO_TPUT_PRI_ADJ_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_ALT_SCORE_MODE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_ALT_SCORE_MODE; \
+reg_val |= ((x) & 0x1) << PDMA_LRO_ALT_SCORE_MODE_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_DLY_INT_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_DLY_INT_EN; \
+reg_val |= ((x) & 0x1) << PDMA_LRO_DLY_INT_EN_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_BW_THRESHOLD(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW2); \
+reg_val = (x); \
+sys_reg_write(ADMA_LRO_CTRL_DW2, reg_val); \
+}
+
+#define SET_PDMA_LRO_MIN_RXD_SDL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW3); \
+reg_val &= ~PDMA_LRO_LRO_MIN_RXD_SDL0; \
+reg_val |= ((x) & 0xffff) << PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET; \
+sys_reg_write(ADMA_LRO_CTRL_DW3, reg_val); \
+}
+
+#define SET_PDMA_LRO_TPUT_OVERFLOW_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(PDMA_LRO_ATL_OVERFLOW_ADJ); \
+reg_val &= ~PDMA_LRO_TPUT_OVERFLOW_ADJ; \
+reg_val |= ((x) & 0xfffff) << PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET; \
+sys_reg_write(PDMA_LRO_ATL_OVERFLOW_ADJ, reg_val); \
+}
+
+#define SET_PDMA_LRO_CNT_OVERFLOW_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(PDMA_LRO_ATL_OVERFLOW_ADJ); \
+reg_val &= ~PDMA_LRO_CNT_OVERFLOW_ADJ; \
+reg_val |= ((x) & 0xfff) << PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET; \
+sys_reg_write(PDMA_LRO_ATL_OVERFLOW_ADJ, reg_val); \
+}
+
+#define SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(x) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_ALT_REFRESH_TIMER); \
+reg_val &= ~PDMA_LRO_ALT_TICK_TIMER; \
+reg_val |= ((x) & 0x1f) << PDMA_LRO_ALT_TICK_TIMER_OFFSET; \
+sys_reg_write(LRO_ALT_REFRESH_TIMER, reg_val); \
+}
+
+#define SET_PDMA_LRO_ALT_REFRESH_TIMER(x) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_ALT_REFRESH_TIMER); \
+reg_val &= ~0xffff; \
+reg_val |= ((x) & 0xffff); \
+sys_reg_write(LRO_ALT_REFRESH_TIMER, reg_val); \
+}
+
+#define SET_PDMA_LRO_MAX_AGG_TIME(x) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_MAX_AGG_TIME); \
+reg_val &= ~0xffff; \
+reg_val |= ((x) & 0xffff); \
+sys_reg_write(LRO_MAX_AGG_TIME, reg_val); \
+}
+
+#define SET_PDMA_RXRING_MODE(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~(0x3 << PDMA_RX_MODE_OFFSET); \
+reg_val |= (y) << PDMA_RX_MODE_OFFSET; \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_MYIP_VALID(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_MYIP_VALID_OFFSET); \
+reg_val |= ((y) & 0x1) << PDMA_RX_MYIP_VALID_OFFSET; \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_VALID(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_PORT_VALID_OFFSET); \
+reg_val |= ((y) & 0x1) << PDMA_RX_PORT_VALID_OFFSET; \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_TCP_SRC_PORT(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_STP_DTP_DW + \
+ (((x) - 1) << 6)); \
+reg_val &= ~(0xffff << PDMA_RX_TCP_SRC_PORT_OFFSET); \
+reg_val |= (y) << PDMA_RX_TCP_SRC_PORT_OFFSET; \
+sys_reg_write(LRO_RX_RING1_STP_DTP_DW + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_TCP_DEST_PORT(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_STP_DTP_DW + \
+ (((x) - 1) << 6)); \
+reg_val &= ~(0xffff << PDMA_RX_TCP_DEST_PORT_OFFSET); \
+reg_val |= (y) << PDMA_RX_TCP_DEST_PORT_OFFSET; \
+sys_reg_write(LRO_RX_RING1_STP_DTP_DW + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_IPV4_FORCE_MODE(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_IPV4_FORCE_OFFSET); \
+reg_val |= (y) << PDMA_RX_IPV4_FORCE_OFFSET; \
+sys_reg_write(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_IPV6_FORCE_MODE(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_IPV6_FORCE_OFFSET); \
+reg_val |= (y) << PDMA_RX_IPV6_FORCE_OFFSET; \
+sys_reg_write(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_AGE_TIME(x, y) \
+{ \
+unsigned int reg_val1 = sys_reg_read(LRO_RX_RING0_CTRL_DW1 + ((x) << 6)); \
+unsigned int reg_val2 = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val1 &= ~PDMA_LRO_RING_AGE1; \
+reg_val2 &= ~PDMA_LRO_RING_AGE2; \
+reg_val1 |= ((y) & 0x3ff) << PDMA_LRO_RING_AGE1_OFFSET; \
+reg_val2 |= (((y) >> PDMA_LRO_AGE_H_OFFSET) & 0x03f) << \
+ PDMA_LRO_RING_AGE2_OFFSET;\
+sys_reg_write(LRO_RX_RING0_CTRL_DW1 + ((x) << 6), reg_val1); \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val2); \
+}
+
+#define SET_PDMA_RXRING_AGG_TIME(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~PDMA_LRO_RING_AGG; \
+reg_val |= ((y) & 0xffff) << PDMA_LRO_RING_AGG_OFFSET; \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_MAX_AGG_CNT(x, y) \
+{ \
+unsigned int reg_val1 = sys_reg_read(LRO_RX_RING1_CTRL_DW2 + \
+ (((x) - 1) << 6)); \
+unsigned int reg_val2 = sys_reg_read(LRO_RX_RING1_CTRL_DW3 + \
+ (((x) - 1) << 6)); \
+reg_val1 &= ~PDMA_LRO_RING_AGG_CNT1; \
+reg_val2 &= ~PDMA_LRO_RING_AGG_CNT2; \
+reg_val1 |= ((y) & 0x3f) << PDMA_LRO_RING_AGG_CNT1_OFFSET; \
+reg_val2 |= (((y) >> PDMA_LRO_AGG_CNT_H_OFFSET) & 0x03) << \
+ PDMA_LRO_RING_AGG_CNT2_OFFSET; \
+sys_reg_write(LRO_RX_RING1_CTRL_DW2 + (((x) - 1) << 6), reg_val1); \
+sys_reg_write(LRO_RX_RING1_CTRL_DW3 + (((x) - 1) << 6), reg_val2); \
+}
+
+/* HW LRO debug functions */
+void hw_lro_stats_update(unsigned int ring_num,
+ struct PDMA_rxdesc *rx_ring);
+void hw_lro_flush_stats_update(unsigned int ring_num,
+ struct PDMA_rxdesc *rx_ring);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_pdma.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_pdma.c
new file mode 100644
index 0000000..344f3d5
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_pdma.c
@@ -0,0 +1,770 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+
+int fe_pdma_wait_dma_idle(void)
+{
+ unsigned int reg_val;
+ unsigned int loop_cnt = 0;
+
+ while (1) {
+ if (loop_cnt++ > 1000)
+ break;
+ reg_val = sys_reg_read(PDMA_GLO_CFG);
+ if ((reg_val & RX_DMA_BUSY)) {
+ pr_warn("\n RX_DMA_BUSY !!! ");
+ continue;
+ }
+ if ((reg_val & TX_DMA_BUSY)) {
+ pr_warn("\n TX_DMA_BUSY !!! ");
+ continue;
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+int fe_pdma_rx_dma_init(struct net_device *dev)
+{
+ int i;
+ unsigned int skb_size;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ dma_addr_t dma_addr;
+
+ skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ /* Initial RX Ring 0 */
+ ei_local->rx_ring[0] = dma_alloc_coherent(dev->dev.parent,
+ num_rx_desc *
+ sizeof(struct PDMA_rxdesc),
+ &ei_local->phy_rx_ring[0],
+ GFP_ATOMIC | __GFP_ZERO);
+ pr_debug("\nphy_rx_ring[0] = 0x%08x, rx_ring[0] = 0x%p\n",
+ (unsigned int)ei_local->phy_rx_ring[0],
+ (void *)ei_local->rx_ring[0]);
+
+ for (i = 0; i < num_rx_desc; i++) {
+ ei_local->netrx_skb_data[0][i] =
+ raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+ if (!ei_local->netrx_skb_data[0][i]) {
+ pr_err("rx skbuff buffer allocation failed!");
+ goto no_rx_mem;
+ }
+
+ memset(&ei_local->rx_ring[0][i], 0, sizeof(struct PDMA_rxdesc));
+ ei_local->rx_ring[0][i].rxd_info2.DDONE_bit = 0;
+ ei_local->rx_ring[0][i].rxd_info2.LS0 = 0;
+ ei_local->rx_ring[0][i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
+ dma_addr = dma_map_single(dev->dev.parent,
+ ei_local->netrx_skb_data[0][i] +
+ NET_SKB_PAD,
+ MAX_RX_LENGTH,
+ DMA_FROM_DEVICE);
+ ei_local->rx_ring[0][i].rxd_info1.PDP0 = dma_addr;
+ if (unlikely
+ (dma_mapping_error
+ (dev->dev.parent,
+ ei_local->rx_ring[0][i].rxd_info1.PDP0))) {
+ pr_err("[%s]dma_map_single() failed...\n", __func__);
+ goto no_rx_mem;
+ }
+ }
+
+ /* Tell the adapter where the RX rings are located. */
+ sys_reg_write(RX_BASE_PTR0, phys_to_bus((u32)ei_local->phy_rx_ring[0]));
+ sys_reg_write(RX_MAX_CNT0, cpu_to_le32((u32)num_rx_desc));
+ sys_reg_write(RX_CALC_IDX0, cpu_to_le32((u32)(num_rx_desc - 1)));
+
+ sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX0);
+
+ return 0;
+
+no_rx_mem:
+ return -ENOMEM;
+}
+
+int fe_pdma_tx_dma_init(struct net_device *dev)
+{
+ int i;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+
+ for (i = 0; i < num_tx_desc; i++)
+ ei_local->skb_free[i] = 0;
+
+ ei_local->tx_ring_full = 0;
+ ei_local->free_idx = 0;
+ ei_local->tx_ring0 =
+ dma_alloc_coherent(dev->dev.parent,
+ num_tx_desc * sizeof(struct PDMA_txdesc),
+ &ei_local->phy_tx_ring0,
+ GFP_ATOMIC | __GFP_ZERO);
+ pr_debug("\nphy_tx_ring = 0x%08x, tx_ring = 0x%p\n",
+ (unsigned int)ei_local->phy_tx_ring0,
+ (void *)ei_local->tx_ring0);
+
+ for (i = 0; i < num_tx_desc; i++) {
+ memset(&ei_local->tx_ring0[i], 0, sizeof(struct PDMA_txdesc));
+ ei_local->tx_ring0[i].txd_info2.LS0_bit = 1;
+ ei_local->tx_ring0[i].txd_info2.DDONE_bit = 1;
+ }
+
+ /* Tell the adapter where the TX rings are located. */
+ sys_reg_write(TX_BASE_PTR0, phys_to_bus((u32)ei_local->phy_tx_ring0));
+ sys_reg_write(TX_MAX_CNT0, cpu_to_le32((u32)num_tx_desc));
+ sys_reg_write(TX_CTX_IDX0, 0);
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ ei_local->tx_cpu_owner_idx0 = 0;
+#endif
+ sys_reg_write(PDMA_RST_CFG, PST_DTX_IDX0);
+
+ return 0;
+}
+
+void fe_pdma_rx_dma_deinit(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int i;
+
+ /* free RX Ring */
+ dma_free_coherent(dev->dev.parent,
+ num_rx_desc * sizeof(struct PDMA_rxdesc),
+ ei_local->rx_ring[0], ei_local->phy_rx_ring[0]);
+
+ /* free RX data */
+ for (i = 0; i < num_rx_desc; i++) {
+ raeth_free_skb_data(ei_local->netrx_skb_data[0][i]);
+ ei_local->netrx_skb_data[0][i] = NULL;
+ }
+}
+
+void fe_pdma_tx_dma_deinit(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int i;
+
+ /* free TX Ring */
+ if (ei_local->tx_ring0)
+ dma_free_coherent(dev->dev.parent,
+ num_tx_desc *
+ sizeof(struct PDMA_txdesc),
+ ei_local->tx_ring0,
+ ei_local->phy_tx_ring0);
+
+ /* free TX data */
+ for (i = 0; i < num_tx_desc; i++) {
+ if ((ei_local->skb_free[i] != 0) &&
+ (ei_local->skb_free[i] != (struct sk_buff *)0xFFFFFFFF))
+ dev_kfree_skb_any(ei_local->skb_free[i]);
+ }
+}
+
+void set_fe_pdma_glo_cfg(void)
+{
+ unsigned int dma_glo_cfg = 0;
+
+ dma_glo_cfg =
+ (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS |
+ MULTI_EN | ADMA_RX_BT_SIZE_32DWORDS);
+// dma_glo_cfg |= (RX_2B_OFFSET);
+
+ sys_reg_write(PDMA_GLO_CFG, dma_glo_cfg);
+}
+
+/* @brief cal txd number for a page
+ *
+ * @parm size
+ *
+ * @return frag_txd_num
+ */
+static inline unsigned int pdma_cal_frag_txd_num(unsigned int size)
+{
+ unsigned int frag_txd_num = 0;
+
+ if (size == 0)
+ return 0;
+ while (size > 0) {
+ if (size > MAX_PTXD_LEN) {
+ frag_txd_num++;
+ size -= MAX_PTXD_LEN;
+ } else {
+ frag_txd_num++;
+ size = 0;
+ }
+ }
+ return frag_txd_num;
+}
+
+int fe_fill_tx_desc(struct net_device *dev,
+ unsigned long *tx_cpu_owner_idx,
+ struct sk_buff *skb,
+ int gmac_no)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ struct PDMA_txdesc *tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+ struct PDMA_TXD_INFO2_T txd_info2_tmp;
+ struct PDMA_TXD_INFO4_T txd_info4_tmp;
+
+ tx_ring->txd_info1.SDP0 = virt_to_phys(skb->data);
+ txd_info2_tmp.SDL0 = skb->len;
+ txd_info4_tmp.FPORT = gmac_no;
+ txd_info4_tmp.TSO = 0;
+
+ if (ei_local->features & FE_CSUM_OFFLOAD) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ txd_info4_tmp.TUI_CO = 7;
+ else
+ txd_info4_tmp.TUI_CO = 0;
+ }
+
+ if (ei_local->features & FE_HW_VLAN_TX) {
+ if (skb_vlan_tag_present(skb))
+ txd_info4_tmp.VLAN_TAG =
+ 0x10000 | skb_vlan_tag_get(skb);
+ else
+ txd_info4_tmp.VLAN_TAG = 0;
+ }
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
+ if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
+ if (ppe_hook_rx_eth) {
+ /* PPE */
+ txd_info4_tmp.FPORT = 4;
+ FOE_MAGIC_TAG(skb) = 0;
+ }
+ }
+ } else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
+ if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
+ if (ppe_hook_rx_eth) {
+ /* PPE */
+ txd_info4_tmp.FPORT = 4;
+ FOE_MAGIC_TAG(skb) = 0;
+ }
+ }
+ }
+#endif
+
+ txd_info2_tmp.LS0_bit = 1;
+ txd_info2_tmp.DDONE_bit = 0;
+
+ tx_ring->txd_info4 = txd_info4_tmp;
+ tx_ring->txd_info2 = txd_info2_tmp;
+
+ return 0;
+}
+
+static int fe_fill_tx_tso_data(struct END_DEVICE *ei_local,
+ unsigned int frag_offset,
+ unsigned int frag_size,
+ unsigned long *tx_cpu_owner_idx,
+ unsigned int nr_frags,
+ int gmac_no)
+{
+ struct PSEUDO_ADAPTER *p_ad;
+ unsigned int size;
+ unsigned int frag_txd_num;
+ struct PDMA_txdesc *tx_ring;
+
+ frag_txd_num = pdma_cal_frag_txd_num(frag_size);
+ tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+ while (frag_txd_num > 0) {
+ if (frag_size < MAX_PTXD_LEN)
+ size = frag_size;
+ else
+ size = MAX_PTXD_LEN;
+
+ if (ei_local->skb_txd_num % 2 == 0) {
+ *tx_cpu_owner_idx =
+ (*tx_cpu_owner_idx + 1) % num_tx_desc;
+ tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+ while (tx_ring->txd_info2.DDONE_bit == 0) {
+ if (gmac_no == 2) {
+ p_ad =
+ netdev_priv(ei_local->pseudo_dev);
+ p_ad->stat.tx_errors++;
+ } else {
+ ei_local->stat.tx_errors++;
+ }
+ }
+ tx_ring->txd_info1.SDP0 = frag_offset;
+ tx_ring->txd_info2.SDL0 = size;
+ if (((nr_frags == 0)) && (frag_txd_num == 1))
+ tx_ring->txd_info2.LS0_bit = 1;
+ else
+ tx_ring->txd_info2.LS0_bit = 0;
+ tx_ring->txd_info2.DDONE_bit = 0;
+ tx_ring->txd_info4.FPORT = gmac_no;
+ } else {
+ tx_ring->txd_info3.SDP1 = frag_offset;
+ tx_ring->txd_info2.SDL1 = size;
+ if (((nr_frags == 0)) && (frag_txd_num == 1))
+ tx_ring->txd_info2.LS1_bit = 1;
+ else
+ tx_ring->txd_info2.LS1_bit = 0;
+ }
+ frag_offset += size;
+ frag_size -= size;
+ frag_txd_num--;
+ ei_local->skb_txd_num++;
+ }
+
+ return 0;
+}
+
+static int fe_fill_tx_tso_frag(struct net_device *netdev,
+ struct sk_buff *skb,
+ unsigned long *tx_cpu_owner_idx,
+ int gmac_no)
+{
+ struct END_DEVICE *ei_local = netdev_priv(netdev);
+ struct PSEUDO_ADAPTER *p_ad;
+ unsigned int size;
+ unsigned int frag_txd_num;
+ skb_frag_t * frag;
+ unsigned int nr_frags;
+ unsigned int frag_offset, frag_size;
+ struct PDMA_txdesc *tx_ring;
+ int i = 0, j = 0, unmap_idx = 0;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ frag_offset = 0;
+ frag_size = skb_frag_size(frag);
+ frag_txd_num = pdma_cal_frag_txd_num(frag_size);
+
+ while (frag_txd_num > 0) {
+ if (frag_size < MAX_PTXD_LEN)
+ size = frag_size;
+ else
+ size = MAX_PTXD_LEN;
+
+ if (ei_local->skb_txd_num % 2 == 0) {
+ *tx_cpu_owner_idx =
+ (*tx_cpu_owner_idx + 1) % num_tx_desc;
+ tx_ring =
+ &ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+ while (tx_ring->txd_info2.DDONE_bit == 0) {
+ if (gmac_no == 2) {
+ p_ad =
+ netdev_priv
+ (ei_local->pseudo_dev);
+ p_ad->stat.tx_errors++;
+ } else {
+ ei_local->stat.tx_errors++;
+ }
+ }
+
+ tx_ring->txd_info1.SDP0 = skb_frag_dma_map(netdev->dev.parent, frag, frag_offset, size, DMA_TO_DEVICE);
+
+ if (unlikely
+ (dma_mapping_error
+ (netdev->dev.parent,
+ tx_ring->txd_info1.SDP0))) {
+ pr_err
+ ("[%s]dma_map_page() failed\n",
+ __func__);
+ goto err_dma;
+ }
+
+ tx_ring->txd_info2.SDL0 = size;
+
+ if ((frag_txd_num == 1) &&
+ (i == (nr_frags - 1)))
+ tx_ring->txd_info2.LS0_bit = 1;
+ else
+ tx_ring->txd_info2.LS0_bit = 0;
+ tx_ring->txd_info2.DDONE_bit = 0;
+ tx_ring->txd_info4.FPORT = gmac_no;
+ } else {
+ tx_ring->txd_info3.SDP1 = skb_frag_dma_map(netdev->dev.parent, frag, frag_offset, size, DMA_TO_DEVICE);
+
+ if (unlikely
+ (dma_mapping_error
+ (netdev->dev.parent,
+ tx_ring->txd_info3.SDP1))) {
+ pr_err
+ ("[%s]dma_map_page() failed\n",
+ __func__);
+ goto err_dma;
+ }
+ tx_ring->txd_info2.SDL1 = size;
+ if ((frag_txd_num == 1) &&
+ (i == (nr_frags - 1)))
+ tx_ring->txd_info2.LS1_bit = 1;
+ else
+ tx_ring->txd_info2.LS1_bit = 0;
+ }
+ frag_offset += size;
+ frag_size -= size;
+ frag_txd_num--;
+ ei_local->skb_txd_num++;
+ }
+ }
+
+ return 0;
+
+err_dma:
+ /* unmap dma */
+ j = *tx_cpu_owner_idx;
+ unmap_idx = i;
+ for (i = 0; i < unmap_idx; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ frag_size = skb_frag_size(frag);
+ frag_txd_num = pdma_cal_frag_txd_num(frag_size);
+
+ while (frag_txd_num > 0) {
+ if (frag_size < MAX_PTXD_LEN)
+ size = frag_size;
+ else
+ size = MAX_PTXD_LEN;
+ if (ei_local->skb_txd_num % 2 == 0) {
+ j = (j + 1) % num_tx_desc;
+ dma_unmap_page(netdev->dev.parent,
+ ei_local->tx_ring0[j].
+ txd_info1.SDP0,
+ ei_local->tx_ring0[j].
+ txd_info2.SDL0, DMA_TO_DEVICE);
+ /* reinit txd */
+ ei_local->tx_ring0[j].txd_info2.LS0_bit = 1;
+ ei_local->tx_ring0[j].txd_info2.DDONE_bit = 1;
+ } else {
+ dma_unmap_page(netdev->dev.parent,
+ ei_local->tx_ring0[j].
+ txd_info3.SDP1,
+ ei_local->tx_ring0[j].
+ txd_info2.SDL1, DMA_TO_DEVICE);
+ /* reinit txd */
+ ei_local->tx_ring0[j].txd_info2.LS1_bit = 1;
+ }
+ frag_size -= size;
+ frag_txd_num--;
+ ei_local->skb_txd_num++;
+ }
+ }
+
+ return -1;
+}
+
+int fe_fill_tx_desc_tso(struct net_device *dev,
+ unsigned long *tx_cpu_owner_idx,
+ struct sk_buff *skb,
+ int gmac_no)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ struct iphdr *iph = NULL;
+ struct ipv6hdr *ip6h = NULL;
+ struct tcphdr *th = NULL;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int len, offset;
+ int err;
+ struct PDMA_txdesc *tx_ring = &ei_local->tx_ring0[*tx_cpu_owner_idx];
+
+ tx_ring->txd_info4.FPORT = gmac_no;
+ tx_ring->txd_info4.TSO = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_ring->txd_info4.TUI_CO = 7;
+ else
+ tx_ring->txd_info4.TUI_CO = 0;
+
+ if (ei_local->features & FE_HW_VLAN_TX) {
+ if (skb_vlan_tag_present(skb))
+ tx_ring->txd_info4.VLAN_TAG =
+ 0x10000 | skb_vlan_tag_get(skb);
+ else
+ tx_ring->txd_info4.VLAN_TAG = 0;
+ }
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
+ if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
+ if (ppe_hook_rx_eth) {
+ /* PPE */
+ tx_ring->txd_info4.FPORT = 4;
+ FOE_MAGIC_TAG(skb) = 0;
+ }
+ }
+ } else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
+ if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
+ if (ppe_hook_rx_eth) {
+ /* PPE */
+ tx_ring->txd_info4.FPORT = 4;
+ FOE_MAGIC_TAG(skb) = 0;
+ }
+ }
+ }
+#endif
+ ei_local->skb_txd_num = 1;
+
+ /* skb data handle */
+ len = skb->len - skb->data_len;
+ offset = virt_to_phys(skb->data);
+ tx_ring->txd_info1.SDP0 = offset;
+ if (len < MAX_PTXD_LEN) {
+ tx_ring->txd_info2.SDL0 = len;
+ tx_ring->txd_info2.LS0_bit = nr_frags ? 0 : 1;
+ len = 0;
+ } else {
+ tx_ring->txd_info2.SDL0 = MAX_PTXD_LEN;
+ tx_ring->txd_info2.LS0_bit = 0;
+ len -= MAX_PTXD_LEN;
+ offset += MAX_PTXD_LEN;
+ }
+
+ if (len > 0)
+ fe_fill_tx_tso_data(ei_local, offset, len,
+ tx_cpu_owner_idx, nr_frags, gmac_no);
+
+ /* skb fragments handle */
+ if (nr_frags > 0) {
+ err = fe_fill_tx_tso_frag(dev, skb, tx_cpu_owner_idx, gmac_no);
+ if (unlikely(err))
+ return err;
+ }
+
+ /* fill in MSS info in tcp checksum field */
+ if (skb_shinfo(skb)->gso_segs > 1) {
+ /* TCP over IPv4 */
+ iph = (struct iphdr *)skb_network_header(skb);
+ if ((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
+ th = (struct tcphdr *)skb_transport_header(skb);
+ tx_ring->txd_info4.TSO = 1;
+ th->check = htons(skb_shinfo(skb)->gso_size);
+ dma_sync_single_for_device(dev->dev.parent,
+ virt_to_phys(th),
+ sizeof(struct tcphdr),
+ DMA_TO_DEVICE);
+ }
+
+ /* TCP over IPv6 */
+ if (ei_local->features & FE_TSO_V6) {
+ ip6h = (struct ipv6hdr *)skb_network_header(skb);
+ if ((ip6h->nexthdr == NEXTHDR_TCP) &&
+ (ip6h->version == 6)) {
+ th = (struct tcphdr *)skb_transport_header(skb);
+ tx_ring->txd_info4.TSO = 1;
+ th->check = htons(skb_shinfo(skb)->gso_size);
+ dma_sync_single_for_device(dev->dev.parent,
+ virt_to_phys(th),
+ sizeof(struct
+ tcphdr),
+ DMA_TO_DEVICE);
+ }
+ }
+ }
+ tx_ring->txd_info2.DDONE_bit = 0;
+
+ return 0;
+}
+
+static inline int rt2880_pdma_eth_send(struct net_device *dev,
+ struct sk_buff *skb, int gmac_no,
+ unsigned int num_of_frag)
+{
+ unsigned int length = skb->len;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ unsigned long tx_cpu_owner_idx0 = ei_local->tx_cpu_owner_idx0;
+#else
+ unsigned long tx_cpu_owner_idx0 = sys_reg_read(TX_CTX_IDX0);
+#endif
+ struct PSEUDO_ADAPTER *p_ad;
+ int err;
+
+ while (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0) {
+ if (gmac_no == 2) {
+ if (ei_local->pseudo_dev) {
+ p_ad = netdev_priv(ei_local->pseudo_dev);
+ p_ad->stat.tx_errors++;
+ } else {
+ pr_err
+ ("pseudo_dev is still not initialize ");
+ pr_err
+ ("but receive packet from GMAC2\n");
+ }
+ } else {
+ ei_local->stat.tx_errors++;
+ }
+ }
+
+ if (num_of_frag > 1)
+ err = fe_fill_tx_desc_tso(dev, &tx_cpu_owner_idx0,
+ skb, gmac_no);
+ else
+ err = fe_fill_tx_desc(dev, &tx_cpu_owner_idx0, skb, gmac_no);
+ if (err)
+ return err;
+
+ tx_cpu_owner_idx0 = (tx_cpu_owner_idx0 + 1) % num_tx_desc;
+ while (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0) {
+ if (gmac_no == 2) {
+ p_ad = netdev_priv(ei_local->pseudo_dev);
+ p_ad->stat.tx_errors++;
+ } else {
+ ei_local->stat.tx_errors++;
+ }
+ }
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ ei_local->tx_cpu_owner_idx0 = tx_cpu_owner_idx0;
+#endif
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ sys_reg_write(TX_CTX_IDX0, cpu_to_le32((u32)tx_cpu_owner_idx0));
+
+ if (gmac_no == 2) {
+ p_ad = netdev_priv(ei_local->pseudo_dev);
+ p_ad->stat.tx_packets++;
+ p_ad->stat.tx_bytes += length;
+ } else {
+ ei_local->stat.tx_packets++;
+ ei_local->stat.tx_bytes += length;
+ }
+
+ return length;
+}
+
+int ei_pdma_start_xmit(struct sk_buff *skb, struct net_device *dev, int gmac_no)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned long tx_cpu_owner_idx;
+ unsigned int tx_cpu_owner_idx_next, tx_cpu_owner_idx_next2;
+ unsigned int num_of_txd, num_of_frag;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
+ skb_frag_t * frag;
+ struct PSEUDO_ADAPTER *p_ad;
+ unsigned int tx_cpu_cal_idx;
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (ppe_hook_tx_eth) {
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (FOE_MAGIC_TAG(skb) != FOE_MAGIC_PPE)
+#endif
+ if (ppe_hook_tx_eth(skb, gmac_no) != 1) {
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ }
+#endif
+
+// dev->trans_start = jiffies; /* save the timestamp */
+ netif_trans_update(dev);
+ spin_lock(&ei_local->page_lock);
+ dma_sync_single_for_device(dev->dev.parent, virt_to_phys(skb->data),
+ skb->len, DMA_TO_DEVICE);
+
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+ tx_cpu_owner_idx = ei_local->tx_cpu_owner_idx0;
+#else
+ tx_cpu_owner_idx = sys_reg_read(TX_CTX_IDX0);
+#endif
+
+ if (ei_local->features & FE_TSO) {
+ num_of_txd = pdma_cal_frag_txd_num(skb->len - skb->data_len);
+ if (nr_frags != 0) {
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ num_of_txd += pdma_cal_frag_txd_num(skb_frag_size(frag));
+
+ }
+ }
+ num_of_frag = num_of_txd;
+ num_of_txd = (num_of_txd + 1) >> 1;
+ } else {
+ num_of_frag = 1;
+ num_of_txd = 1;
+ }
+
+ tx_cpu_owner_idx_next = (tx_cpu_owner_idx + num_of_txd) % num_tx_desc;
+
+ if ((ei_local->skb_free[tx_cpu_owner_idx_next] == 0) &&
+ (ei_local->skb_free[tx_cpu_owner_idx] == 0)) {
+ if (rt2880_pdma_eth_send(dev, skb, gmac_no, num_of_frag) < 0) {
+ dev_kfree_skb_any(skb);
+ if (gmac_no == 2) {
+ p_ad = netdev_priv(ei_local->pseudo_dev);
+ p_ad->stat.tx_dropped++;
+ } else {
+ ei_local->stat.tx_dropped++;
+ }
+ goto tx_err;
+ }
+
+ tx_cpu_owner_idx_next2 =
+ (tx_cpu_owner_idx_next + 1) % num_tx_desc;
+
+ if (ei_local->skb_free[tx_cpu_owner_idx_next2] != 0)
+ ei_local->tx_ring_full = 1;
+ } else {
+ if (gmac_no == 2) {
+ p_ad = netdev_priv(ei_local->pseudo_dev);
+ p_ad->stat.tx_dropped++;
+ } else {
+ ei_local->stat.tx_dropped++;
+ }
+
+ dev_kfree_skb_any(skb);
+ spin_unlock(&ei_local->page_lock);
+ return NETDEV_TX_OK;
+ }
+
+ /* SG: use multiple TXD to send the packet (only have one skb) */
+ tx_cpu_cal_idx = (tx_cpu_owner_idx + num_of_txd - 1) % num_tx_desc;
+ ei_local->skb_free[tx_cpu_cal_idx] = skb;
+ while (--num_of_txd)
+ /* MAGIC ID */
+ ei_local->skb_free[(--tx_cpu_cal_idx) % num_tx_desc] =
+ (struct sk_buff *)0xFFFFFFFF;
+
+tx_err:
+ spin_unlock(&ei_local->page_lock);
+ return NETDEV_TX_OK;
+}
+
+int ei_pdma_xmit_housekeeping(struct net_device *netdev, int budget)
+{
+ struct END_DEVICE *ei_local = netdev_priv(netdev);
+ struct PDMA_txdesc *tx_desc;
+ unsigned long skb_free_idx;
+ int tx_processed = 0;
+
+ tx_desc = ei_local->tx_ring0;
+ skb_free_idx = ei_local->free_idx;
+
+ while (budget &&
+ (ei_local->skb_free[skb_free_idx] != 0) &&
+ (tx_desc[skb_free_idx].txd_info2.DDONE_bit == 1)) {
+ if (ei_local->skb_free[skb_free_idx] !=
+ (struct sk_buff *)0xFFFFFFFF)
+ dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
+
+ ei_local->skb_free[skb_free_idx] = 0;
+ skb_free_idx = (skb_free_idx + 1) % num_tx_desc;
+ budget--;
+ tx_processed++;
+ }
+
+ ei_local->tx_ring_full = 0;
+ ei_local->free_idx = skb_free_idx;
+
+ return tx_processed;
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.c
new file mode 100644
index 0000000..a2414c4
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.c
@@ -0,0 +1,1509 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "ra_ioctl.h"
+#include "raether_qdma.h"
+
+/* skb->mark to queue mapping table */
+struct QDMA_txdesc *free_head;
+
+/* ioctl */
+unsigned int M2Q_table[64] = { 0 };
+EXPORT_SYMBOL(M2Q_table);
+unsigned int lan_wan_separate;
+EXPORT_SYMBOL(lan_wan_separate);
+struct sk_buff *magic_id = (struct sk_buff *)0xFFFFFFFF;
+
+/* CONFIG_HW_SFQ */
+unsigned int web_sfq_enable;
+#define HW_SFQ_UP 3
+#define HW_SFQ_DL 1
+
+#define sfq_debug 0
+struct SFQ_table *sfq0;
+struct SFQ_table *sfq1;
+struct SFQ_table *sfq2;
+struct SFQ_table *sfq3;
+
+#define KSEG1 0xa0000000
+#define PHYS_TO_VIRT(x) phys_to_virt(x)
+#define VIRT_TO_PHYS(x) virt_to_phys(x)
+/* extern void set_fe_dma_glo_cfg(void); */
+struct parse_result sfq_parse_result;
+
+/**
+ *
+ * @brief: get the TXD index from its address
+ *
+ * @param: cpu_ptr
+ *
+ * @return: TXD index
+*/
+
+/**
+ * @brief cal txd number for a page
+ *
+ * @parm size
+ *
+ * @return frag_txd_num
+ */
+
+static inline unsigned int cal_frag_txd_num(unsigned int size)
+{
+ unsigned int frag_txd_num = 0;
+
+ if (size == 0)
+ return 0;
+ while (size > 0) {
+ if (size > MAX_QTXD_LEN) {
+ frag_txd_num++;
+ size -= MAX_QTXD_LEN;
+ } else {
+ frag_txd_num++;
+ size = 0;
+ }
+ }
+ return frag_txd_num;
+}
+
+/**
+ * @brief get free TXD from TXD queue
+ *
+ * @param free_txd
+ *
+ * @return
+ */
+static inline int get_free_txd(struct END_DEVICE *ei_local, int ring_no)
+{
+ unsigned int tmp_idx;
+
+ tmp_idx = ei_local->free_txd_head[ring_no];
+ ei_local->free_txd_head[ring_no] = ei_local->txd_pool_info[tmp_idx];
+ atomic_sub(1, &ei_local->free_txd_num[ring_no]);
+ return tmp_idx;
+}
+
+static inline unsigned int get_phy_addr(struct END_DEVICE *ei_local,
+ unsigned int idx)
+{
+ return ei_local->phy_txd_pool + (idx * QTXD_LEN);
+}
+
+/**
+ * @brief add free TXD into TXD queue
+ *
+ * @param free_txd
+ *
+ * @return
+ */
+static inline void put_free_txd(struct END_DEVICE *ei_local, int free_txd_idx)
+{
+ ei_local->txd_pool_info[ei_local->free_txd_tail[0]] = free_txd_idx;
+ ei_local->free_txd_tail[0] = free_txd_idx;
+}
+
+void init_pseudo_link_list(struct END_DEVICE *ei_local)
+{
+ int i;
+
+ for (i = 0; i < gmac1_txq_num; i++) {
+ atomic_set(&ei_local->free_txd_num[i], gmac1_txq_txd_num);
+ ei_local->free_txd_head[i] = gmac1_txq_txd_num * i;
+ ei_local->free_txd_tail[i] = gmac1_txq_txd_num * (i + 1) - 1;
+ }
+ for (i = 0; i < gmac2_txq_num; i++) {
+ atomic_set(&ei_local->free_txd_num[i + gmac1_txq_num],
+ gmac2_txq_txd_num);
+ ei_local->free_txd_head[i + gmac1_txq_num] =
+ gmac1_txd_num + gmac2_txq_txd_num * i;
+ ei_local->free_txd_tail[i + gmac1_txq_num] =
+ gmac1_txd_num + gmac2_txq_txd_num * (i + 1) - 1;
+ }
+}
+
+static inline int ring_no_mapping(int txd_idx)
+{
+ int i;
+
+ if (txd_idx < gmac1_txd_num) {
+ for (i = 0; i < gmac1_txq_num; i++) {
+ if (txd_idx < (gmac1_txq_txd_num * (i + 1)))
+ return i;
+ }
+ }
+
+ txd_idx -= gmac1_txd_num;
+ for (i = 0; i < gmac2_txq_num; i++) {
+ if (txd_idx < (gmac2_txq_txd_num * (i + 1)))
+ return (i + gmac1_txq_num);
+ }
+ pr_err("txd index out of range\n");
+ return 0;
+}
+
+/*define qdma initial alloc*/
+/**
+ * @brief
+ *
+ * @param net_dev
+ *
+ * @return 0: fail
+ * 1: success
+ */
+bool qdma_tx_desc_alloc(void)
+{
+ struct net_device *dev = dev_raether;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned int txd_idx;
+ int i = 0;
+
+ ei_local->txd_pool =
+ dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ QTXD_LEN * num_tx_desc,
+ &ei_local->phy_txd_pool, GFP_KERNEL);
+ pr_err("txd_pool=%p phy_txd_pool=%p\n", ei_local->txd_pool,
+ (void *)ei_local->phy_txd_pool);
+
+ if (!ei_local->txd_pool) {
+ pr_err("adapter->txd_pool allocation failed!\n");
+ return 0;
+ }
+ pr_err("ei_local->skb_free start address is 0x%p.\n",
+ ei_local->skb_free);
+ /* set all txd_pool_info to 0. */
+ for (i = 0; i < num_tx_desc; i++) {
+ ei_local->skb_free[i] = 0;
+ ei_local->txd_pool_info[i] = i + 1;
+ ei_local->txd_pool[i].txd_info3.LS = 1;
+ ei_local->txd_pool[i].txd_info3.DDONE = 1;
+ }
+
+ init_pseudo_link_list(ei_local);
+
+ /* get free txd from txd pool */
+ txd_idx = get_free_txd(ei_local, 0);
+ ei_local->tx_cpu_idx = txd_idx;
+ /* add null TXD for transmit */
+ sys_reg_write(QTX_CTX_PTR, get_phy_addr(ei_local, txd_idx));
+ sys_reg_write(QTX_DTX_PTR, get_phy_addr(ei_local, txd_idx));
+
+ /* get free txd from txd pool */
+ txd_idx = get_free_txd(ei_local, 0);
+ ei_local->rls_cpu_idx = txd_idx;
+ /* add null TXD for release */
+ sys_reg_write(QTX_CRX_PTR, get_phy_addr(ei_local, txd_idx));
+ sys_reg_write(QTX_DRX_PTR, get_phy_addr(ei_local, txd_idx));
+
+ /*Reserve 4 TXD for each physical queue */
+ if (ei_local->chip_name == MT7623_FE || ei_local->chip_name == MT7621_FE ||
+ ei_local->chip_name == LEOPARD_FE) {
+ //for (i = 0; i < NUM_PQ; i++)
+ for (i = 0; i < 16; i++)
+ sys_reg_write(QTX_CFG_0 + QUEUE_OFFSET * i,
+ (NUM_PQ_RESV | (NUM_PQ_RESV << 8)));
+ }
+
+ sys_reg_write(QTX_SCH_1, 0x80000000);
+#if 0
+ if (ei_local->chip_name == MT7622_FE) {
+ for (i = 0; i < NUM_PQ; i++) {
+ if (i <= 15) {
+ sys_reg_write(QDMA_PAGE, 0);
+ sys_reg_write(QTX_CFG_0 + QUEUE_OFFSET * i,
+ (NUM_PQ_RESV |
+ (NUM_PQ_RESV << 8)));
+ } else if (i > 15 && i <= 31) {
+ sys_reg_write(QDMA_PAGE, 1);
+ sys_reg_write(QTX_CFG_0 +
+ QUEUE_OFFSET * (i - 16),
+ (NUM_PQ_RESV |
+ (NUM_PQ_RESV << 8)));
+ } else if (i > 31 && i <= 47) {
+ sys_reg_write(QDMA_PAGE, 2);
+ sys_reg_write(QTX_CFG_0 +
+ QUEUE_OFFSET * (i - 32),
+ (NUM_PQ_RESV |
+ (NUM_PQ_RESV << 8)));
+ } else if (i > 47 && i <= 63) {
+ sys_reg_write(QDMA_PAGE, 3);
+ sys_reg_write(QTX_CFG_0 +
+ QUEUE_OFFSET * (i - 48),
+ (NUM_PQ_RESV |
+ (NUM_PQ_RESV << 8)));
+ }
+ }
+ sys_reg_write(QDMA_PAGE, 0);
+ }
+#endif
+
+ return 1;
+}
+
+bool sfq_init(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+ unsigned int reg_val;
+ dma_addr_t sfq_phy0;
+ dma_addr_t sfq_phy1;
+ dma_addr_t sfq_phy2;
+ dma_addr_t sfq_phy3;
+ struct SFQ_table *sfq0 = NULL;
+ struct SFQ_table *sfq1 = NULL;
+ struct SFQ_table *sfq2 = NULL;
+ struct SFQ_table *sfq3 = NULL;
+
+ dma_addr_t sfq_phy4;
+ dma_addr_t sfq_phy5;
+ dma_addr_t sfq_phy6;
+ dma_addr_t sfq_phy7;
+ struct SFQ_table *sfq4 = NULL;
+ struct SFQ_table *sfq5 = NULL;
+ struct SFQ_table *sfq6 = NULL;
+ struct SFQ_table *sfq7 = NULL;
+
+ int i = 0;
+
+ reg_val = sys_reg_read(VQTX_GLO);
+ reg_val = reg_val | VQTX_MIB_EN;
+ /* Virtual table extends to 32bytes */
+ sys_reg_write(VQTX_GLO, reg_val);
+ reg_val = sys_reg_read(VQTX_GLO);
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+ sys_reg_write(VQTX_NUM,
+ (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) |
+ (VQTX_NUM_3) | (VQTX_NUM_4) | (VQTX_NUM_5) |
+ (VQTX_NUM_6) | (VQTX_NUM_7));
+ } else {
+ sys_reg_write(VQTX_NUM,
+ (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) |
+ (VQTX_NUM_3));
+ }
+
+ /* 10 s change hash algorithm */
+ sys_reg_write(VQTX_HASH_CFG, 0xF002710);
+
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE)
+ sys_reg_write(VQTX_VLD_CFG, 0xeca86420);
+ else
+ sys_reg_write(VQTX_VLD_CFG, 0xc840);
+ sys_reg_write(VQTX_HASH_SD, 0x0D);
+ sys_reg_write(QDMA_FC_THRES, 0x9b9b4444);
+ sys_reg_write(QDMA_HRED1, 0);
+ sys_reg_write(QDMA_HRED2, 0);
+ sys_reg_write(QDMA_SRED1, 0);
+ sys_reg_write(QDMA_SRED2, 0);
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+ sys_reg_write(VQTX_0_3_BIND_QID,
+ (VQTX_0_BIND_QID) | (VQTX_1_BIND_QID) |
+ (VQTX_2_BIND_QID) | (VQTX_3_BIND_QID));
+ sys_reg_write(VQTX_4_7_BIND_QID,
+ (VQTX_4_BIND_QID) | (VQTX_5_BIND_QID) |
+ (VQTX_6_BIND_QID) | (VQTX_7_BIND_QID));
+ pr_err("VQTX_0_3_BIND_QID =%x\n",
+ sys_reg_read(VQTX_0_3_BIND_QID));
+ pr_err("VQTX_4_7_BIND_QID =%x\n",
+ sys_reg_read(VQTX_4_7_BIND_QID));
+ }
+
+ sfq0 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ VQ_NUM0 * sizeof(struct SFQ_table), &sfq_phy0,
+ GFP_KERNEL);
+
+ memset(sfq0, 0x0, VQ_NUM0 * sizeof(struct SFQ_table));
+ for (i = 0; i < VQ_NUM0; i++) {
+ sfq0[i].sfq_info1.VQHPTR = 0xdeadbeef;
+ sfq0[i].sfq_info2.VQTPTR = 0xdeadbeef;
+ }
+ sfq1 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ VQ_NUM1 * sizeof(struct SFQ_table), &sfq_phy1,
+ GFP_KERNEL);
+ memset(sfq1, 0x0, VQ_NUM1 * sizeof(struct SFQ_table));
+ for (i = 0; i < VQ_NUM1; i++) {
+ sfq1[i].sfq_info1.VQHPTR = 0xdeadbeef;
+ sfq1[i].sfq_info2.VQTPTR = 0xdeadbeef;
+ }
+
+ sfq2 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ VQ_NUM2 * sizeof(struct SFQ_table), &sfq_phy2,
+ GFP_KERNEL);
+ memset(sfq2, 0x0, VQ_NUM2 * sizeof(struct SFQ_table));
+ for (i = 0; i < VQ_NUM2; i++) {
+ sfq2[i].sfq_info1.VQHPTR = 0xdeadbeef;
+ sfq2[i].sfq_info2.VQTPTR = 0xdeadbeef;
+ }
+
+ sfq3 = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ VQ_NUM3 * sizeof(struct SFQ_table), &sfq_phy3,
+ GFP_KERNEL);
+ memset(sfq3, 0x0, VQ_NUM3 * sizeof(struct SFQ_table));
+ for (i = 0; i < VQ_NUM3; i++) {
+ sfq3[i].sfq_info1.VQHPTR = 0xdeadbeef;
+ sfq3[i].sfq_info2.VQTPTR = 0xdeadbeef;
+ }
+ if (unlikely((!sfq0)) || unlikely((!sfq1)) ||
+ unlikely((!sfq2)) || unlikely((!sfq3))) {
+ pr_err("QDMA SFQ0~3 VQ not available...\n");
+ return 1;
+ }
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+ sfq4 =
+ dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ VQ_NUM4 * sizeof(struct SFQ_table),
+ &sfq_phy4, GFP_KERNEL);
+ memset(sfq4, 0x0, VQ_NUM4 * sizeof(struct SFQ_table));
+ for (i = 0; i < VQ_NUM4; i++) {
+ sfq4[i].sfq_info1.VQHPTR = 0xdeadbeef;
+ sfq4[i].sfq_info2.VQTPTR = 0xdeadbeef;
+ }
+ sfq5 =
+ dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ VQ_NUM5 * sizeof(struct SFQ_table),
+ &sfq_phy5, GFP_KERNEL);
+ memset(sfq5, 0x0, VQ_NUM5 * sizeof(struct SFQ_table));
+ for (i = 0; i < VQ_NUM5; i++) {
+ sfq5[i].sfq_info1.VQHPTR = 0xdeadbeef;
+ sfq5[i].sfq_info2.VQTPTR = 0xdeadbeef;
+ }
+ sfq6 =
+ dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ VQ_NUM6 * sizeof(struct SFQ_table),
+ &sfq_phy6, GFP_KERNEL);
+ memset(sfq6, 0x0, VQ_NUM6 * sizeof(struct SFQ_table));
+ for (i = 0; i < VQ_NUM6; i++) {
+ sfq6[i].sfq_info1.VQHPTR = 0xdeadbeef;
+ sfq6[i].sfq_info2.VQTPTR = 0xdeadbeef;
+ }
+ sfq7 =
+ dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ VQ_NUM7 * sizeof(struct SFQ_table),
+ &sfq_phy7, GFP_KERNEL);
+ memset(sfq7, 0x0, VQ_NUM7 * sizeof(struct SFQ_table));
+ for (i = 0; i < VQ_NUM7; i++) {
+ sfq7[i].sfq_info1.VQHPTR = 0xdeadbeef;
+ sfq7[i].sfq_info2.VQTPTR = 0xdeadbeef;
+ }
+ if (unlikely((!sfq4)) || unlikely((!sfq5)) ||
+ unlikely((!sfq6)) || unlikely((!sfq7))) {
+ pr_err("QDMA SFQ4~7 VQ not available...\n");
+ return 1;
+ }
+ }
+
+ pr_err("*****sfq_phy0 is 0x%p!!!*******\n", (void *)sfq_phy0);
+ pr_err("*****sfq_phy1 is 0x%p!!!*******\n", (void *)sfq_phy1);
+ pr_err("*****sfq_phy2 is 0x%p!!!*******\n", (void *)sfq_phy2);
+ pr_err("*****sfq_phy3 is 0x%p!!!*******\n", (void *)sfq_phy3);
+ pr_err("*****sfq_virt0 is 0x%p!!!*******\n", sfq0);
+ pr_err("*****sfq_virt1 is 0x%p!!!*******\n", sfq1);
+ pr_err("*****sfq_virt2 is 0x%p!!!*******\n", sfq2);
+ pr_err("*****sfq_virt3 is 0x%p!!!*******\n", sfq3);
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+ pr_err("*****sfq_phy4 is 0x%p!!!*******\n", (void *)sfq_phy4);
+ pr_err("*****sfq_phy5 is 0x%p!!!*******\n", (void *)sfq_phy5);
+ pr_err("*****sfq_phy6 is 0x%p!!!*******\n", (void *)sfq_phy6);
+ pr_err("*****sfq_phy7 is 0x%p!!!*******\n", (void *)sfq_phy7);
+ pr_err("*****sfq_virt4 is 0x%p!!!*******\n", sfq4);
+ pr_err("*****sfq_virt5 is 0x%p!!!*******\n", sfq5);
+ pr_err("*****sfq_virt6 is 0x%p!!!*******\n", sfq6);
+ pr_err("*****sfq_virt7 is 0x%p!!!*******\n", sfq7);
+ }
+
+ sys_reg_write(VQTX_TB_BASE0, (u32)sfq_phy0);
+ sys_reg_write(VQTX_TB_BASE1, (u32)sfq_phy1);
+ sys_reg_write(VQTX_TB_BASE2, (u32)sfq_phy2);
+ sys_reg_write(VQTX_TB_BASE3, (u32)sfq_phy3);
+ if (ei_local->chip_name == MT7622_FE || ei_local->chip_name == LEOPARD_FE) {
+ sys_reg_write(VQTX_TB_BASE4, (u32)sfq_phy4);
+ sys_reg_write(VQTX_TB_BASE5, (u32)sfq_phy5);
+ sys_reg_write(VQTX_TB_BASE6, (u32)sfq_phy6);
+ sys_reg_write(VQTX_TB_BASE7, (u32)sfq_phy7);
+ }
+
+ return 0;
+}
+
+bool fq_qdma_init(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ /* struct QDMA_txdesc *free_head = NULL; */
+ dma_addr_t phy_free_head;
+ dma_addr_t phy_free_tail;
+ unsigned int *free_page_head = NULL;
+ dma_addr_t phy_free_page_head;
+ int i;
+
+ free_head = dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ NUM_QDMA_PAGE *
+ QTXD_LEN, &phy_free_head, GFP_KERNEL);
+
+ if (unlikely(!free_head)) {
+ pr_err("QDMA FQ decriptor not available...\n");
+ return 0;
+ }
+ memset(free_head, 0x0, QTXD_LEN * NUM_QDMA_PAGE);
+
+ free_page_head =
+ dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ NUM_QDMA_PAGE * QDMA_PAGE_SIZE,
+ &phy_free_page_head, GFP_KERNEL);
+
+ if (unlikely(!free_page_head)) {
+ pr_err("QDMA FQ page not available...\n");
+ return 0;
+ }
+ for (i = 0; i < NUM_QDMA_PAGE; i++) {
+ free_head[i].txd_info1.SDP =
+ (phy_free_page_head + (i * QDMA_PAGE_SIZE));
+ if (i < (NUM_QDMA_PAGE - 1)) {
+ free_head[i].txd_info2.NDP =
+ (phy_free_head + ((i + 1) * QTXD_LEN));
+ }
+ free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
+ }
+ phy_free_tail =
+ (phy_free_head + (u32)((NUM_QDMA_PAGE - 1) * QTXD_LEN));
+
+ pr_err("phy_free_head is 0x%p!!!\n", (void *)phy_free_head);
+ pr_err("phy_free_tail_phy is 0x%p!!!\n", (void *)phy_free_tail);
+ sys_reg_write(QDMA_FQ_HEAD, (u32)phy_free_head);
+ sys_reg_write(QDMA_FQ_TAIL, (u32)phy_free_tail);
+ sys_reg_write(QDMA_FQ_CNT, ((num_tx_desc << 16) | NUM_QDMA_PAGE));
+ sys_reg_write(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
+ pr_info("gmac1_txd_num:%d; gmac2_txd_num:%d; num_tx_desc:%d\n",
+ gmac1_txd_num, gmac2_txd_num, num_tx_desc);
+ ei_local->free_head = free_head;
+ ei_local->phy_free_head = phy_free_head;
+ ei_local->free_page_head = free_page_head;
+ ei_local->phy_free_page_head = phy_free_page_head;
+ ei_local->tx_ring_full = 0;
+ return 1;
+}
+
+int sfq_prot;
+
+#if (sfq_debug)
+int udp_source_port;
+int tcp_source_port;
+int ack_packt;
+#endif
+int sfq_parse_layer_info(struct sk_buff *skb)
+{
+ struct vlan_hdr *vh_sfq = NULL;
+ struct ethhdr *eth_sfq = NULL;
+ struct iphdr *iph_sfq = NULL;
+ struct ipv6hdr *ip6h_sfq = NULL;
+ struct tcphdr *th_sfq = NULL;
+ struct udphdr *uh_sfq = NULL;
+
+ memset(&sfq_parse_result, 0, sizeof(sfq_parse_result));
+ eth_sfq = (struct ethhdr *)skb->data;
+ ether_addr_copy(sfq_parse_result.dmac, eth_sfq->h_dest);
+ ether_addr_copy(sfq_parse_result.smac, eth_sfq->h_source);
+ /* memcpy(sfq_parse_result.dmac, eth_sfq->h_dest, ETH_ALEN); */
+ /* memcpy(sfq_parse_result.smac, eth_sfq->h_source, ETH_ALEN); */
+ sfq_parse_result.eth_type = eth_sfq->h_proto;
+
+ if (sfq_parse_result.eth_type == htons(ETH_P_8021Q)) {
+ sfq_parse_result.vlan1_gap = VLAN_HLEN;
+ vh_sfq = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+ sfq_parse_result.eth_type = vh_sfq->h_vlan_encapsulated_proto;
+ } else {
+ sfq_parse_result.vlan1_gap = 0;
+ }
+
+ /* set layer4 start addr */
+ if ((sfq_parse_result.eth_type == htons(ETH_P_IP)) ||
+ (sfq_parse_result.eth_type == htons(ETH_P_PPP_SES) &&
+ sfq_parse_result.ppp_tag == htons(PPP_IP))) {
+ iph_sfq =
+ (struct iphdr *)(skb->data + ETH_HLEN +
+ (sfq_parse_result.vlan1_gap));
+
+ /* prepare layer3/layer4 info */
+ memcpy(&sfq_parse_result.iph, iph_sfq, sizeof(struct iphdr));
+ if (iph_sfq->protocol == IPPROTO_TCP) {
+ th_sfq =
+ (struct tcphdr *)(skb->data + ETH_HLEN +
+ (sfq_parse_result.vlan1_gap) +
+ (iph_sfq->ihl * 4));
+ memcpy(&sfq_parse_result.th, th_sfq,
+ sizeof(struct tcphdr));
+#if (sfq_debug)
+ tcp_source_port = ntohs(sfq_parse_result.th.source);
+ udp_source_port = 0;
+ /* tcp ack packet */
+ if (ntohl(sfq_parse_result.iph.saddr) == 0xa0a0a04)
+ ack_packt = 1;
+ else
+ ack_packt = 0;
+#endif
+ sfq_prot = 2; /* IPV4_HNAPT */
+ if (iph_sfq->frag_off & htons(IP_MF | IP_OFFSET))
+ return 1;
+ } else if (iph_sfq->protocol == IPPROTO_UDP) {
+ uh_sfq =
+ (struct udphdr *)(skb->data + ETH_HLEN +
+ (sfq_parse_result.vlan1_gap) +
+ iph_sfq->ihl * 4);
+ memcpy(&sfq_parse_result.uh, uh_sfq,
+ sizeof(struct udphdr));
+#if (sfq_debug)
+ udp_source_port = ntohs(sfq_parse_result.uh.source);
+ tcp_source_port = 0;
+ ack_packt = 0;
+#endif
+ sfq_prot = 2; /* IPV4_HNAPT */
+ if (iph_sfq->frag_off & htons(IP_MF | IP_OFFSET))
+ return 1;
+ } else {
+ sfq_prot = 1;
+ }
+ } else if (sfq_parse_result.eth_type == htons(ETH_P_IPV6) ||
+ (sfq_parse_result.eth_type == htons(ETH_P_PPP_SES) &&
+ sfq_parse_result.ppp_tag == htons(PPP_IPV6))) {
+ ip6h_sfq =
+ (struct ipv6hdr *)(skb->data + ETH_HLEN +
+ (sfq_parse_result.vlan1_gap));
+ if (ip6h_sfq->nexthdr == NEXTHDR_TCP) {
+ sfq_prot = 4; /* IPV6_5T */
+#if (sfq_debug)
+ if (ntohl(sfq_parse_result.ip6h.saddr.s6_addr32[3]) ==
+ 8)
+ ack_packt = 1;
+ else
+ ack_packt = 0;
+#endif
+ } else if (ip6h_sfq->nexthdr == NEXTHDR_UDP) {
+#if (sfq_debug)
+ ack_packt = 0;
+#endif
+ sfq_prot = 4; /* IPV6_5T */
+
+ } else {
+ sfq_prot = 3; /* IPV6_3T */
+ }
+ }
+ return 0;
+}
+
+int rt2880_qdma_eth_send(struct END_DEVICE *ei_local, struct net_device *dev,
+ struct sk_buff *skb, int gmac_no, int ring_no)
+{
+ unsigned int length = skb->len;
+ struct QDMA_txdesc *cpu_ptr, *prev_cpu_ptr;
+ struct QDMA_txdesc dummy_desc;
+ struct PSEUDO_ADAPTER *p_ad;
+ unsigned long flags;
+ unsigned int next_txd_idx, qidx;
+
+ cpu_ptr = &dummy_desc;
+ /* 2. prepare data */
+ dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
+ virt_to_phys(skb->data),
+ skb->len, DMA_TO_DEVICE);
+ /* cpu_ptr->txd_info1.SDP = VIRT_TO_PHYS(skb->data); */
+ cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
+ cpu_ptr->txd_info3.SDL = skb->len;
+ if (ei_local->features & FE_HW_SFQ) {
+ sfq_parse_layer_info(skb);
+ cpu_ptr->txd_info5.VQID0 = 1; /* 1:HW hash 0:CPU */
+ cpu_ptr->txd_info5.PROT = sfq_prot;
+ /* no vlan */
+ cpu_ptr->txd_info5.IPOFST = 14 + (sfq_parse_result.vlan1_gap);
+ }
+ cpu_ptr->txd_info4.FPORT = gmac_no;
+
+ if (ei_local->features & FE_CSUM_OFFLOAD) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cpu_ptr->txd_info5.TUI_CO = 7;
+ else
+ cpu_ptr->txd_info5.TUI_CO = 0;
+ }
+
+ if (ei_local->features & FE_HW_VLAN_TX) {
+ if (skb_vlan_tag_present(skb)) {
+ cpu_ptr->txd_info6.INSV_1 = 1;
+ cpu_ptr->txd_info6.VLAN_TAG_1 = skb_vlan_tag_get(skb);
+ cpu_ptr->txd_info4.QID = skb_vlan_tag_get(skb);
+ } else {
+ cpu_ptr->txd_info4.QID = ring_no;
+ cpu_ptr->txd_info6.INSV_1 = 0;
+ cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
+ }
+ } else {
+ cpu_ptr->txd_info6.INSV_1 = 0;
+ cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
+ }
+ cpu_ptr->txd_info4.QID = 0;
+ /* cpu_ptr->txd_info4.QID = ring_no; */
+
+ if ((ei_local->features & QDMA_QOS_MARK) && (skb->mark != 0)) {
+ if (skb->mark < 64) {
+ qidx = M2Q_table[skb->mark];
+ cpu_ptr->txd_info4.QID = ((qidx & 0x30) >> 4);
+ cpu_ptr->txd_info4.QID = (qidx & 0x0f);
+ } else {
+ pr_debug("skb->mark out of range\n");
+ cpu_ptr->txd_info4.QID = 0;
+ cpu_ptr->txd_info4.QID = 0;
+ }
+ }
+ /* QoS Web UI used */
+ if ((ei_local->features & QDMA_QOS_WEB) && (lan_wan_separate == 1)) {
+ if (web_sfq_enable == 1 && (skb->mark == 2)) {
+ if (gmac_no == 1)
+ cpu_ptr->txd_info4.QID = HW_SFQ_DL;
+ else
+ cpu_ptr->txd_info4.QID = HW_SFQ_UP;
+ } else if (gmac_no == 2) {
+ cpu_ptr->txd_info4.QID += 8;
+ }
+ }
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
+ if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
+ if (ppe_hook_rx_eth) {
+ cpu_ptr->txd_info4.FPORT = 3; /* PPE */
+ FOE_MAGIC_TAG(skb) = 0;
+ }
+ }
+ } else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
+ if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
+ if (ppe_hook_rx_eth) {
+ cpu_ptr->txd_info4.FPORT = 3; /* PPE */
+ FOE_MAGIC_TAG(skb) = 0;
+ }
+ }
+ }
+#endif
+
+ /* dma_sync_single_for_device(NULL, virt_to_phys(skb->data), */
+ /* skb->len, DMA_TO_DEVICE); */
+ cpu_ptr->txd_info4.SWC = 1;
+
+ /* 5. move CPU_PTR to new TXD */
+ cpu_ptr->txd_info5.TSO = 0;
+ cpu_ptr->txd_info3.LS = 1;
+ cpu_ptr->txd_info3.DDONE = 0;
+ next_txd_idx = get_free_txd(ei_local, ring_no);
+ cpu_ptr->txd_info2.NDP = get_phy_addr(ei_local, next_txd_idx);
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ prev_cpu_ptr = ei_local->txd_pool + ei_local->tx_cpu_idx;
+ /* update skb_free */
+ ei_local->skb_free[ei_local->tx_cpu_idx] = skb;
+ /* update tx cpu idx */
+ ei_local->tx_cpu_idx = next_txd_idx;
+ /* update txd info */
+ prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
+ prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
+ prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
+ prev_cpu_ptr->txd_info5 = dummy_desc.txd_info5;
+ prev_cpu_ptr->txd_info6 = dummy_desc.txd_info6;
+ prev_cpu_ptr->txd_info7 = dummy_desc.txd_info7;
+ prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
+ /* NOTE: add memory barrier to avoid
+ * DMA access memory earlier than memory written
+ */
+ wmb();
+ /* update CPU pointer */
+ sys_reg_write(QTX_CTX_PTR,
+ get_phy_addr(ei_local, ei_local->tx_cpu_idx));
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ if (gmac_no == 2) {
+ if (ei_local->pseudo_dev) {
+ p_ad = netdev_priv(ei_local->pseudo_dev);
+ p_ad->stat.tx_packets++;
+
+ p_ad->stat.tx_bytes += length;
+ }
+ } else {
+ ei_local->stat.tx_packets++;
+ ei_local->stat.tx_bytes += skb->len;
+ }
+ } else {
+ ei_local->stat.tx_packets++;
+ ei_local->stat.tx_bytes += skb->len;
+ }
+ if (ei_local->features & FE_INT_NAPI) {
+ if (ei_local->tx_full == 1) {
+ ei_local->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+
+ return length;
+}
+
+int rt2880_qdma_eth_send_tso(struct END_DEVICE *ei_local,
+ struct net_device *dev, struct sk_buff *skb,
+ int gmac_no, int ring_no)
+{
+ unsigned int length = skb->len;
+ struct QDMA_txdesc *cpu_ptr, *prev_cpu_ptr;
+ struct QDMA_txdesc dummy_desc;
+ struct QDMA_txdesc init_dummy_desc;
+ int ctx_idx;
+ struct iphdr *iph = NULL;
+ struct QDMA_txdesc *init_cpu_ptr;
+ struct tcphdr *th = NULL;
+ skb_frag_t * frag;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int len, size, frag_txd_num, qidx;
+ dma_addr_t offset;
+ unsigned long flags;
+ int i;
+ int init_qid, init_qid1;
+ struct ipv6hdr *ip6h = NULL;
+ struct PSEUDO_ADAPTER *p_ad;
+
+ init_cpu_ptr = &init_dummy_desc;
+ cpu_ptr = &init_dummy_desc;
+
+ len = length - skb->data_len;
+ dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
+ virt_to_phys(skb->data),
+ len,
+ DMA_TO_DEVICE);
+ offset = virt_to_phys(skb->data);
+ cpu_ptr->txd_info1.SDP = offset;
+ if (len > MAX_QTXD_LEN) {
+ cpu_ptr->txd_info3.SDL = MAX_QTXD_LEN;
+ cpu_ptr->txd_info3.LS = 0;
+ len -= MAX_QTXD_LEN;
+ offset += MAX_QTXD_LEN;
+ } else {
+ cpu_ptr->txd_info3.SDL = len;
+ cpu_ptr->txd_info3.LS = nr_frags ? 0 : 1;
+ len = 0;
+ }
+ if (ei_local->features & FE_HW_SFQ) {
+ sfq_parse_layer_info(skb);
+
+ cpu_ptr->txd_info5.VQID0 = 1;
+ cpu_ptr->txd_info5.PROT = sfq_prot;
+ /* no vlan */
+ cpu_ptr->txd_info5.IPOFST = 14 + (sfq_parse_result.vlan1_gap);
+ }
+ if (gmac_no == 1)
+ cpu_ptr->txd_info4.FPORT = 1;
+ else
+ cpu_ptr->txd_info4.FPORT = 2;
+
+ cpu_ptr->txd_info5.TSO = 0;
+ cpu_ptr->txd_info4.QID = 0;
+ /* cpu_ptr->txd_info4.QID = ring_no; */
+ if ((ei_local->features & QDMA_QOS_MARK) && (skb->mark != 0)) {
+ if (skb->mark < 64) {
+ qidx = M2Q_table[skb->mark];
+ cpu_ptr->txd_info4.QID = qidx;
+
+ } else {
+ pr_debug("skb->mark out of range\n");
+ cpu_ptr->txd_info4.QID = 0;
+
+ }
+ }
+ if (ei_local->features & FE_CSUM_OFFLOAD) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cpu_ptr->txd_info5.TUI_CO = 7;
+ else
+ cpu_ptr->txd_info5.TUI_CO = 0;
+ }
+
+ if (ei_local->features & FE_HW_VLAN_TX) {
+ if (skb_vlan_tag_present(skb)) {
+ cpu_ptr->txd_info6.INSV_1 = 1;
+ cpu_ptr->txd_info6.VLAN_TAG_1 = skb_vlan_tag_get(skb);
+ cpu_ptr->txd_info4.QID = skb_vlan_tag_get(skb);
+ } else {
+ cpu_ptr->txd_info4.QID = ring_no;
+ cpu_ptr->txd_info6.INSV_1 = 0;
+ cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
+ }
+ } else {
+ cpu_ptr->txd_info6.INSV_1 = 0;
+ cpu_ptr->txd_info6.VLAN_TAG_1 = 0;
+ }
+
+ if ((ei_local->features & FE_GE2_SUPPORT) && (lan_wan_separate == 1)) {
+ if (web_sfq_enable == 1 && (skb->mark == 2)) {
+ if (gmac_no == 1)
+ cpu_ptr->txd_info4.QID = HW_SFQ_DL;
+ else
+ cpu_ptr->txd_info4.QID = HW_SFQ_UP;
+ } else if (gmac_no == 2) {
+ cpu_ptr->txd_info4.QID += 8;
+ }
+ }
+ /*debug multi tx queue */
+ init_qid = cpu_ptr->txd_info4.QID;
+ init_qid1 = cpu_ptr->txd_info4.QID;
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)) {
+ if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE) {
+ if (ppe_hook_rx_eth) {
+ cpu_ptr->txd_info4.FPORT = 3; /* PPE */
+ FOE_MAGIC_TAG(skb) = 0;
+ }
+ }
+ } else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)) {
+ if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE) {
+ if (ppe_hook_rx_eth) {
+ cpu_ptr->txd_info4.FPORT = 3; /* PPE */
+ FOE_MAGIC_TAG(skb) = 0;
+ }
+ }
+ }
+#endif
+
+ cpu_ptr->txd_info4.SWC = 1;
+
+ ctx_idx = get_free_txd(ei_local, ring_no);
+ cpu_ptr->txd_info2.NDP = get_phy_addr(ei_local, ctx_idx);
+ /*prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
+ *prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
+ *prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
+ *prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
+ */
+ if (len > 0) {
+ frag_txd_num = cal_frag_txd_num(len);
+ for (frag_txd_num = frag_txd_num; frag_txd_num > 0;
+ frag_txd_num--) {
+ if (len < MAX_QTXD_LEN)
+ size = len;
+ else
+ size = MAX_QTXD_LEN;
+
+ cpu_ptr = (ei_local->txd_pool + (ctx_idx));
+ dummy_desc.txd_info1 = cpu_ptr->txd_info1;
+ dummy_desc.txd_info2 = cpu_ptr->txd_info2;
+ dummy_desc.txd_info3 = cpu_ptr->txd_info3;
+ dummy_desc.txd_info4 = cpu_ptr->txd_info4;
+ dummy_desc.txd_info5 = cpu_ptr->txd_info5;
+ dummy_desc.txd_info6 = cpu_ptr->txd_info6;
+ dummy_desc.txd_info7 = cpu_ptr->txd_info7;
+ prev_cpu_ptr = cpu_ptr;
+ cpu_ptr = &dummy_desc;
+ cpu_ptr->txd_info4.QID = init_qid;
+ cpu_ptr->txd_info4.QID = init_qid1;
+ cpu_ptr->txd_info1.SDP = offset;
+ cpu_ptr->txd_info3.SDL = size;
+ if ((nr_frags == 0) && (frag_txd_num == 1))
+ cpu_ptr->txd_info3.LS = 1;
+ else
+ cpu_ptr->txd_info3.LS = 0;
+ cpu_ptr->txd_info3.DDONE = 0;
+ cpu_ptr->txd_info4.SWC = 1;
+ if (cpu_ptr->txd_info3.LS == 1)
+ ei_local->skb_free[ctx_idx] = skb;
+ else
+ ei_local->skb_free[ctx_idx] = magic_id;
+ ctx_idx = get_free_txd(ei_local, ring_no);
+ cpu_ptr->txd_info2.NDP =
+ get_phy_addr(ei_local, ctx_idx);
+ prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
+ prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
+ prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
+ prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
+ prev_cpu_ptr->txd_info5 = dummy_desc.txd_info5;
+ prev_cpu_ptr->txd_info6 = dummy_desc.txd_info6;
+ prev_cpu_ptr->txd_info7 = dummy_desc.txd_info7;
+ offset += size;
+ len -= size;
+ }
+ }
+
+ for (i = 0; i < nr_frags; i++) {
+ /* 1. set or get init value for current fragment */
+ offset = 0;
+ frag = &skb_shinfo(skb)->frags[i];
+ len = skb_frag_size(frag);
+ frag_txd_num = cal_frag_txd_num(len);
+ for (frag_txd_num = frag_txd_num;
+ frag_txd_num > 0; frag_txd_num--) {
+ /* 2. size will be assigned to SDL
+ * and can't be larger than MAX_TXD_LEN
+ */
+ if (len < MAX_QTXD_LEN)
+ size = len;
+ else
+ size = MAX_QTXD_LEN;
+
+ /* 3. Update TXD info */
+ cpu_ptr = (ei_local->txd_pool + (ctx_idx));
+ dummy_desc.txd_info1 = cpu_ptr->txd_info1;
+ dummy_desc.txd_info2 = cpu_ptr->txd_info2;
+ dummy_desc.txd_info3 = cpu_ptr->txd_info3;
+ dummy_desc.txd_info4 = cpu_ptr->txd_info4;
+ dummy_desc.txd_info5 = cpu_ptr->txd_info5;
+ dummy_desc.txd_info6 = cpu_ptr->txd_info6;
+ dummy_desc.txd_info7 = cpu_ptr->txd_info7;
+ prev_cpu_ptr = cpu_ptr;
+ cpu_ptr = &dummy_desc;
+ cpu_ptr->txd_info4.QID = init_qid;
+ cpu_ptr->txd_info4.QID = init_qid1;
+ cpu_ptr->txd_info1.SDP = skb_frag_dma_map(&ei_local->qdma_pdev->dev, frag, offset, size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error
+ (&ei_local->qdma_pdev->dev,
+ cpu_ptr->txd_info1.SDP)))
+ pr_err("[%s]dma_map_page() failed...\n",
+ __func__);
+
+ cpu_ptr->txd_info3.SDL = size;
+
+ if ((i == (nr_frags - 1)) && (frag_txd_num == 1))
+ cpu_ptr->txd_info3.LS = 1;
+ else
+ cpu_ptr->txd_info3.LS = 0;
+ cpu_ptr->txd_info3.DDONE = 0;
+ cpu_ptr->txd_info4.SWC = 1;
+ /* 4. Update skb_free for housekeeping */
+ if (cpu_ptr->txd_info3.LS == 1)
+ ei_local->skb_free[ctx_idx] = skb;
+ else
+ ei_local->skb_free[ctx_idx] = magic_id;
+
+ /* 5. Get next TXD */
+ ctx_idx = get_free_txd(ei_local, ring_no);
+ cpu_ptr->txd_info2.NDP =
+ get_phy_addr(ei_local, ctx_idx);
+ prev_cpu_ptr->txd_info1 = dummy_desc.txd_info1;
+ prev_cpu_ptr->txd_info2 = dummy_desc.txd_info2;
+ prev_cpu_ptr->txd_info3 = dummy_desc.txd_info3;
+ prev_cpu_ptr->txd_info4 = dummy_desc.txd_info4;
+ prev_cpu_ptr->txd_info5 = dummy_desc.txd_info5;
+ prev_cpu_ptr->txd_info6 = dummy_desc.txd_info6;
+ prev_cpu_ptr->txd_info7 = dummy_desc.txd_info7;
+ /* 6. Update offset and len. */
+ offset += size;
+ len -= size;
+ }
+ }
+
+ if (skb_shinfo(skb)->gso_segs > 1) {
+ /* TsoLenUpdate(skb->len); */
+
+ /* TCP over IPv4 */
+ iph = (struct iphdr *)skb_network_header(skb);
+ if ((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
+ th = (struct tcphdr *)skb_transport_header(skb);
+
+ init_cpu_ptr->txd_info5.TSO = 1;
+
+ th->check = htons(skb_shinfo(skb)->gso_size);
+
+ dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
+ virt_to_phys(th),
+ sizeof(struct
+ tcphdr),
+ DMA_TO_DEVICE);
+ }
+ if (ei_local->features & FE_TSO_V6) {
+ ip6h = (struct ipv6hdr *)skb_network_header(skb);
+ if ((ip6h->nexthdr == NEXTHDR_TCP) &&
+ (ip6h->version == 6)) {
+ th = (struct tcphdr *)skb_transport_header(skb);
+ init_cpu_ptr->txd_info5.TSO = 1;
+ th->check = htons(skb_shinfo(skb)->gso_size);
+ dma_sync_single_for_device(&ei_local->qdma_pdev->dev,
+ virt_to_phys(th),
+ sizeof(struct
+ tcphdr),
+ DMA_TO_DEVICE);
+ }
+ }
+
+ if (ei_local->features & FE_HW_SFQ) {
+ init_cpu_ptr->txd_info5.VQID0 = 1;
+ init_cpu_ptr->txd_info5.PROT = sfq_prot;
+ /* no vlan */
+ init_cpu_ptr->txd_info5.IPOFST =
+ 14 + (sfq_parse_result.vlan1_gap);
+ }
+ }
+ /* dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE); */
+
+ init_cpu_ptr->txd_info3.DDONE = 0;
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ prev_cpu_ptr = ei_local->txd_pool + ei_local->tx_cpu_idx;
+ ei_local->skb_free[ei_local->tx_cpu_idx] = magic_id;
+ ei_local->tx_cpu_idx = ctx_idx;
+ prev_cpu_ptr->txd_info1 = init_dummy_desc.txd_info1;
+ prev_cpu_ptr->txd_info2 = init_dummy_desc.txd_info2;
+ prev_cpu_ptr->txd_info4 = init_dummy_desc.txd_info4;
+ prev_cpu_ptr->txd_info3 = init_dummy_desc.txd_info3;
+ prev_cpu_ptr->txd_info5 = init_dummy_desc.txd_info5;
+ prev_cpu_ptr->txd_info6 = init_dummy_desc.txd_info6;
+ prev_cpu_ptr->txd_info7 = init_dummy_desc.txd_info7;
+
+ /* NOTE: add memory barrier to avoid
+ * DMA access memory earlier than memory written
+ */
+ wmb();
+ sys_reg_write(QTX_CTX_PTR,
+ get_phy_addr(ei_local, ei_local->tx_cpu_idx));
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ if (gmac_no == 2) {
+ if (ei_local->pseudo_dev) {
+ p_ad = netdev_priv(ei_local->pseudo_dev);
+ p_ad->stat.tx_packets++;
+ p_ad->stat.tx_bytes += length;
+ }
+ } else {
+ ei_local->stat.tx_packets++;
+ ei_local->stat.tx_bytes += skb->len;
+ }
+ } else {
+ ei_local->stat.tx_packets++;
+ ei_local->stat.tx_bytes += skb->len;
+ }
+ if (ei_local->features & FE_INT_NAPI) {
+ if (ei_local->tx_full == 1) {
+ ei_local->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+
+ return length;
+}
+
+/* QDMA functions */
+int fe_qdma_wait_dma_idle(void)
+{
+ unsigned int reg_val;
+
+ while (1) {
+ reg_val = sys_reg_read(QDMA_GLO_CFG);
+ if ((reg_val & RX_DMA_BUSY)) {
+ pr_err("\n RX_DMA_BUSY !!! ");
+ continue;
+ }
+ if ((reg_val & TX_DMA_BUSY)) {
+ pr_err("\n TX_DMA_BUSY !!! ");
+ continue;
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+int fe_qdma_rx_dma_init(struct net_device *dev)
+{
+ int i;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned int skb_size;
+ /* Initial QDMA RX Ring */
+
+ skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ ei_local->qrx_ring =
+ dma_alloc_coherent(&ei_local->qdma_pdev->dev,
+ NUM_QRX_DESC * sizeof(struct PDMA_rxdesc),
+ &ei_local->phy_qrx_ring,
+ GFP_ATOMIC | __GFP_ZERO);
+ for (i = 0; i < NUM_QRX_DESC; i++) {
+ ei_local->netrx0_skb_data[i] =
+ raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+ if (!ei_local->netrx0_skb_data[i]) {
+ pr_err("rx skbuff buffer allocation failed!");
+ goto no_rx_mem;
+ }
+
+ memset(&ei_local->qrx_ring[i], 0, sizeof(struct PDMA_rxdesc));
+ ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
+ ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
+ ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
+ ei_local->qrx_ring[i].rxd_info1.PDP0 =
+ dma_map_single(&ei_local->qdma_pdev->dev,
+ ei_local->netrx0_skb_data[i] +
+ NET_SKB_PAD,
+ MAX_RX_LENGTH,
+ DMA_FROM_DEVICE);
+ if (unlikely
+ (dma_mapping_error
+ (&ei_local->qdma_pdev->dev,
+ ei_local->qrx_ring[i].rxd_info1.PDP0))) {
+ pr_err("[%s]dma_map_single() failed...\n", __func__);
+ goto no_rx_mem;
+ }
+ }
+ pr_err("\nphy_qrx_ring = 0x%p, qrx_ring = 0x%p\n",
+ (void *)ei_local->phy_qrx_ring, ei_local->qrx_ring);
+
+ /* Tell the adapter where the RX rings are located. */
+ sys_reg_write(QRX_BASE_PTR_0,
+ phys_to_bus((u32)ei_local->phy_qrx_ring));
+ sys_reg_write(QRX_MAX_CNT_0, cpu_to_le32((u32)NUM_QRX_DESC));
+ sys_reg_write(QRX_CRX_IDX_0, cpu_to_le32((u32)(NUM_QRX_DESC - 1)));
+
+ sys_reg_write(QDMA_RST_CFG, PST_DRX_IDX0);
+ ei_local->rx_ring[0] = ei_local->qrx_ring;
+
+ return 0;
+
+no_rx_mem:
+ return -ENOMEM;
+}
+
+int fe_qdma_tx_dma_init(struct net_device *dev)
+{
+ bool pass;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ if (ei_local->features & FE_HW_SFQ)
+ sfq_init(dev);
+ /*tx desc alloc, add a NULL TXD to HW */
+ pass = qdma_tx_desc_alloc();
+ if (!pass)
+ return -1;
+
+ pass = fq_qdma_init(dev);
+ if (!pass)
+ return -1;
+
+ return 0;
+}
+
+void fe_qdma_rx_dma_deinit(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int i;
+
+ /* free RX Ring */
+ dma_free_coherent(&ei_local->qdma_pdev->dev,
+ NUM_QRX_DESC * sizeof(struct PDMA_rxdesc),
+ ei_local->qrx_ring, ei_local->phy_qrx_ring);
+
+ /* free RX skb */
+ for (i = 0; i < NUM_QRX_DESC; i++) {
+ raeth_free_skb_data(ei_local->netrx0_skb_data[i]);
+ ei_local->netrx0_skb_data[i] = NULL;
+ }
+}
+
+void fe_qdma_tx_dma_deinit(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int i;
+
+ /* free TX Ring */
+ if (ei_local->txd_pool)
+ dma_free_coherent(&ei_local->qdma_pdev->dev,
+ num_tx_desc * QTXD_LEN,
+ ei_local->txd_pool, ei_local->phy_txd_pool);
+ if (ei_local->free_head)
+ dma_free_coherent(&ei_local->qdma_pdev->dev,
+ NUM_QDMA_PAGE * QTXD_LEN,
+ ei_local->free_head, ei_local->phy_free_head);
+ if (ei_local->free_page_head)
+ dma_free_coherent(&ei_local->qdma_pdev->dev,
+ NUM_QDMA_PAGE * QDMA_PAGE_SIZE,
+ ei_local->free_page_head,
+ ei_local->phy_free_page_head);
+
+ /* free TX data */
+ for (i = 0; i < num_tx_desc; i++) {
+ if ((ei_local->skb_free[i] != (struct sk_buff *)0xFFFFFFFF) &&
+ (ei_local->skb_free[i] != 0))
+ dev_kfree_skb_any(ei_local->skb_free[i]);
+ }
+}
+
+void set_fe_qdma_glo_cfg(void)
+{
+ unsigned int reg_val;
+ unsigned int dma_glo_cfg = 0;
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ reg_val = sys_reg_read(QDMA_GLO_CFG);
+ reg_val &= 0x000000FF;
+
+ sys_reg_write(QDMA_GLO_CFG, reg_val);
+ reg_val = sys_reg_read(QDMA_GLO_CFG);
+
+ /* Enable randon early drop and set drop threshold automatically */
+ if (!(ei_local->features & FE_HW_SFQ))
+ sys_reg_write(QDMA_FC_THRES, 0x4444);
+ sys_reg_write(QDMA_HRED2, 0x0);
+
+ dma_glo_cfg =
+ (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS | PDMA_DESC_32B_E);
+ dma_glo_cfg |= (RX_2B_OFFSET);
+ sys_reg_write(QDMA_GLO_CFG, dma_glo_cfg);
+
+ pr_err("Enable QDMA TX NDP coherence check and re-read mechanism\n");
+ reg_val = sys_reg_read(QDMA_GLO_CFG);
+ reg_val = reg_val | 0x400 | 0x100000;
+ sys_reg_write(QDMA_GLO_CFG, reg_val);
+ //sys_reg_write(QDMA_GLO_CFG, 0x95404575);
+ sys_reg_write(QDMA_GLO_CFG, 0x95404475);
+ pr_err("***********QDMA_GLO_CFG=%x\n", sys_reg_read(QDMA_GLO_CFG));
+}
+
+int ei_qdma_start_xmit(struct sk_buff *skb, struct net_device *dev, int gmac_no)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned int num_of_txd = 0;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
+ skb_frag_t * frag;
+ struct PSEUDO_ADAPTER *p_ad;
+ int ring_no;
+
+ ring_no = skb->queue_mapping + (gmac_no - 1) * gmac1_txq_num;
+
+#if defined(CONFIG_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (ppe_hook_tx_eth) {
+ if (ppe_hook_tx_eth(skb, gmac_no) != 1) {
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ }
+#endif
+
+// dev->trans_start = jiffies; /* save the timestamp */
+ netif_trans_update(dev);
+ /*spin_lock_irqsave(&ei_local->page_lock, flags); */
+
+ /* check free_txd_num before calling rt288_eth_send() */
+
+ if (ei_local->features & FE_TSO) {
+ num_of_txd += cal_frag_txd_num(skb->len - skb->data_len);
+ if (nr_frags != 0) {
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ num_of_txd += cal_frag_txd_num(skb_frag_size(frag));
+ }
+ }
+ } else {
+ num_of_txd = 1;
+ }
+
+/* if ((ei_local->free_txd_num > num_of_txd + 1)) { */
+ if (likely(atomic_read(&ei_local->free_txd_num[ring_no]) >
+ (num_of_txd + 1))) {
+ if (num_of_txd == 1)
+ rt2880_qdma_eth_send(ei_local, dev, skb,
+ gmac_no, ring_no);
+ else
+ rt2880_qdma_eth_send_tso(ei_local, dev, skb,
+ gmac_no, ring_no);
+ } else {
+ if (ei_local->features & FE_GE2_SUPPORT) {
+ if (gmac_no == 2) {
+ if (ei_local->pseudo_dev) {
+ p_ad =
+ netdev_priv(ei_local->pseudo_dev);
+ p_ad->stat.tx_dropped++;
+ }
+ } else {
+ ei_local->stat.tx_dropped++;
+ }
+ } else {
+ ei_local->stat.tx_dropped++;
+ }
+ /* kfree_skb(skb); */
+ dev_kfree_skb_any(skb);
+ /* spin_unlock_irqrestore(&ei_local->page_lock, flags); */
+ return 0;
+ }
+ /* spin_unlock_irqrestore(&ei_local->page_lock, flags); */
+ return 0;
+}
+
+int ei_qdma_xmit_housekeeping(struct net_device *netdev, int budget)
+{
+ struct END_DEVICE *ei_local = netdev_priv(netdev);
+
+ dma_addr_t dma_ptr;
+ struct QDMA_txdesc *cpu_ptr = NULL;
+ dma_addr_t tmp_ptr;
+ unsigned int ctx_offset = 0;
+ unsigned int dtx_offset = 0;
+ unsigned int rls_cnt[TOTAL_TXQ_NUM] = { 0 };
+ int ring_no;
+ int i;
+
+ dma_ptr = (dma_addr_t)sys_reg_read(QTX_DRX_PTR);
+ ctx_offset = ei_local->rls_cpu_idx;
+ dtx_offset = (dma_ptr - ei_local->phy_txd_pool) / QTXD_LEN;
+ cpu_ptr = (ei_local->txd_pool + (ctx_offset));
+ while (ctx_offset != dtx_offset) {
+ /* 1. keep cpu next TXD */
+ tmp_ptr = (dma_addr_t)cpu_ptr->txd_info2.NDP;
+ ring_no = ring_no_mapping(ctx_offset);
+ rls_cnt[ring_no]++;
+ /* 2. release TXD */
+ ei_local->txd_pool_info[ei_local->free_txd_tail[ring_no]] =
+ ctx_offset;
+ ei_local->free_txd_tail[ring_no] = ctx_offset;
+ /* atomic_add(1, &ei_local->free_txd_num[ring_no]); */
+ /* 3. update ctx_offset and free skb memory */
+ ctx_offset = (tmp_ptr - ei_local->phy_txd_pool) / QTXD_LEN;
+ if (ei_local->features & FE_TSO) {
+ if (ei_local->skb_free[ctx_offset] != magic_id) {
+ dev_kfree_skb_any(ei_local->skb_free
+ [ctx_offset]);
+ }
+ } else {
+ dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
+ }
+ ei_local->skb_free[ctx_offset] = 0;
+ /* 4. update cpu_ptr */
+ cpu_ptr = (ei_local->txd_pool + ctx_offset);
+ }
+ for (i = 0; i < TOTAL_TXQ_NUM; i++) {
+ if (rls_cnt[i] > 0)
+ atomic_add(rls_cnt[i], &ei_local->free_txd_num[i]);
+ }
+ /* atomic_add(rls_cnt, &ei_local->free_txd_num[0]); */
+ ei_local->rls_cpu_idx = ctx_offset;
+ netif_wake_queue(netdev);
+ if (ei_local->features & FE_GE2_SUPPORT)
+ netif_wake_queue(ei_local->pseudo_dev);
+ ei_local->tx_ring_full = 0;
+ sys_reg_write(QTX_CRX_PTR,
+ (ei_local->phy_txd_pool + (ctx_offset * QTXD_LEN)));
+
+ return 0;
+}
+
+int ei_qdma_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct qdma_ioctl_data *data)
+{
+ int ret = 0;
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ unsigned int cmd;
+
+ cmd = data->cmd;
+
+ switch (cmd) {
+ case RAETH_QDMA_REG_READ:
+
+ if (data->off > REG_HQOS_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ei_local->chip_name == MT7622_FE) { /* harry */
+ unsigned int page = 0;
+
+ /* q16~q31: 0x100 <= data->off < 0x200
+ * q32~q47: 0x200 <= data->off < 0x300
+ * q48~q63: 0x300 <= data->off < 0x400
+ */
+ if (data->off >= 0x100 && data->off < 0x200) {
+ page = 1;
+ data->off = data->off - 0x100;
+ } else if (data->off >= 0x200 && data->off < 0x300) {
+ page = 2;
+ data->off = data->off - 0x200;
+ } else if (data->off >= 0x300 && data->off < 0x400) {
+ page = 3;
+ data->off = data->off - 0x300;
+ } else {
+ page = 0;
+ }
+ /*magic number for ioctl identify CR 0x1b101a14*/
+ if (data->off == 0x777) {
+ page = 0;
+ data->off = 0x214;
+ }
+
+ sys_reg_write(QDMA_PAGE, page);
+ /* pr_debug("page=%d, data->off =%x\n", page, data->off); */
+ }
+
+ data->val = sys_reg_read(QTX_CFG_0 + data->off);
+ pr_info("read reg off:%x val:%x\n", data->off, data->val);
+ ret = copy_to_user(ifr->ifr_data, data, sizeof(*data));
+ sys_reg_write(QDMA_PAGE, 0);
+ if (ret) {
+ pr_info("ret=%d\n", ret);
+ ret = -EFAULT;
+ }
+ break;
+ case RAETH_QDMA_REG_WRITE:
+
+ if (data->off > REG_HQOS_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ei_local->chip_name == MT7622_FE) { /* harry */
+ unsigned int page = 0;
+ /*QoS must enable QDMA drop packet policy*/
+ sys_reg_write(QDMA_FC_THRES, 0x83834444);
+ /* q16~q31: 0x100 <= data->off < 0x200
+ * q32~q47: 0x200 <= data->off < 0x300
+ * q48~q63: 0x300 <= data->off < 0x400
+ */
+ if (data->off >= 0x100 && data->off < 0x200) {
+ page = 1;
+ data->off = data->off - 0x100;
+ } else if (data->off >= 0x200 && data->off < 0x300) {
+ page = 2;
+ data->off = data->off - 0x200;
+ } else if (data->off >= 0x300 && data->off < 0x400) {
+ page = 3;
+ data->off = data->off - 0x300;
+ } else {
+ page = 0;
+ }
+ /*magic number for ioctl identify CR 0x1b101a14*/
+ if (data->off == 0x777) {
+ page = 0;
+ data->off = 0x214;
+ }
+ sys_reg_write(QDMA_PAGE, page);
+ /*pr_info("data->val =%x\n", data->val);*/
+ sys_reg_write(QTX_CFG_0 + data->off, data->val);
+ sys_reg_write(QDMA_PAGE, 0);
+ } else {
+ sys_reg_write(QTX_CFG_0 + data->off, data->val);
+ }
+ /* pr_ino("write reg off:%x val:%x\n", data->off, data->val); */
+ break;
+ case RAETH_QDMA_QUEUE_MAPPING:
+ if ((data->off & 0x100) == 0x100) {
+ lan_wan_separate = 1;
+ data->off &= 0xff;
+ } else {
+ lan_wan_separate = 0;
+ data->off &= 0xff;
+ }
+ M2Q_table[data->off] = data->val;
+ break;
+ case RAETH_QDMA_SFQ_WEB_ENABLE:
+ if (ei_local->features & FE_HW_SFQ) {
+ if ((data->val) == 0x1)
+ web_sfq_enable = 1;
+ else
+ web_sfq_enable = 0;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.h
new file mode 100644
index 0000000..ce1af4d
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_qdma.h
@@ -0,0 +1,20 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RAETHER_QDMA_H
+#define RAETHER_QDMA_H
+
+extern struct net_device *dev_raether;
+void set_fe_dma_glo_cfg(void);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.c
new file mode 100644
index 0000000..972c4e0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.c
@@ -0,0 +1,1222 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "raether.h"
+#include "raether_rss.h"
+#include "raether_hwlro.h"
+#include "ra_mac.h"
+
+static struct proc_dir_entry *proc_rss_ring1, *proc_rss_ring2, *proc_rss_ring3;
+
+int fe_rss_4ring_init(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int skb_size;
+ int i, j;
+
+ skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ /* Initial RX Ring 1 ~ 3 */
+ for (i = 1; i < MAX_RX_RING_NUM; i++) {
+ ei_local->rx_ring[i] =
+ dma_alloc_coherent(dev->dev.parent,
+ NUM_RSS_RX_DESC *
+ sizeof(struct PDMA_rxdesc),
+ &ei_local->phy_rx_ring[i],
+ GFP_ATOMIC | __GFP_ZERO);
+ for (j = 0; j < NUM_RSS_RX_DESC; j++) {
+ ei_local->netrx_skb_data[i][j] =
+ raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+
+ if (!ei_local->netrx_skb_data[i][j]) {
+ pr_info("rx skbuff buffer allocation failed!\n");
+ goto no_rx_mem;
+ }
+
+ memset(&ei_local->rx_ring[i][j], 0,
+ sizeof(struct PDMA_rxdesc));
+ ei_local->rx_ring[i][j].rxd_info2.DDONE_bit = 0;
+ ei_local->rx_ring[i][j].rxd_info2.LS0 = 0;
+ ei_local->rx_ring[i][j].rxd_info2.PLEN0 =
+ SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
+ ei_local->rx_ring[i][j].rxd_info1.PDP0 =
+ dma_map_single(dev->dev.parent,
+ ei_local->netrx_skb_data[i][j] +
+ NET_SKB_PAD,
+ MAX_RX_LENGTH, DMA_FROM_DEVICE);
+ if (unlikely
+ (dma_mapping_error
+ (dev->dev.parent,
+ ei_local->rx_ring[i][j].rxd_info1.PDP0))) {
+ pr_info("[%s]dma_map_single() failed...\n",
+ __func__);
+ goto no_rx_mem;
+ }
+ }
+ pr_info("\nphy_rx_ring[%d] = 0x%08x, rx_ring[%d] = 0x%p\n",
+ i, (unsigned int)ei_local->phy_rx_ring[i],
+ i, (void __iomem *)ei_local->rx_ring[i]);
+ }
+
+ sys_reg_write(RX_BASE_PTR3, phys_to_bus((u32)ei_local->phy_rx_ring[3]));
+ sys_reg_write(RX_MAX_CNT3, cpu_to_le32((u32)NUM_RSS_RX_DESC));
+ sys_reg_write(RX_CALC_IDX3, cpu_to_le32((u32)(NUM_RSS_RX_DESC - 1)));
+ sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX3);
+ sys_reg_write(RX_BASE_PTR2, phys_to_bus((u32)ei_local->phy_rx_ring[2]));
+ sys_reg_write(RX_MAX_CNT2, cpu_to_le32((u32)NUM_RSS_RX_DESC));
+ sys_reg_write(RX_CALC_IDX2, cpu_to_le32((u32)(NUM_RSS_RX_DESC - 1)));
+ sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX2);
+ sys_reg_write(RX_BASE_PTR1, phys_to_bus((u32)ei_local->phy_rx_ring[1]));
+ sys_reg_write(RX_MAX_CNT1, cpu_to_le32((u32)NUM_RSS_RX_DESC));
+ sys_reg_write(RX_CALC_IDX1, cpu_to_le32((u32)(NUM_RSS_RX_DESC - 1)));
+ sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX1);
+
+ /* 1. Set RX ring1~3 to pse modes */
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_PSE_MODE);
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_PSE_MODE);
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_PSE_MODE);
+
+ /* 2. Enable non-lro multiple rx */
+ SET_PDMA_NON_LRO_MULTI_EN(1); /* MRX EN */
+
+ /*Hash Type*/
+ SET_PDMA_RSS_IPV4_TYPE(7);
+ SET_PDMA_RSS_IPV6_TYPE(7);
+ /* 3. Select the size of indirection table */
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW0, 0x39393939);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW1, 0x93939393);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW2, 0x39399393);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW3, 0x93933939);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW4, 0x39393939);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW5, 0x93939393);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW6, 0x39399393);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW7, 0x93933939);
+ /* 4. Pause */
+ SET_PDMA_RSS_CFG_REQ(1);
+
+ /* 5. Enable RSS */
+ SET_PDMA_RSS_EN(1);
+
+ /* 6. Release pause */
+ SET_PDMA_RSS_CFG_REQ(0);
+
+ return 0;
+
+no_rx_mem:
+ return -ENOMEM;
+}
+
+void fe_rss_4ring_deinit(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int i, j;
+
+ for (i = 1; i < MAX_RX_RING_NUM; i++) {
+ /* free RX Ring */
+ dma_free_coherent(dev->dev.parent,
+ NUM_RSS_RX_DESC * sizeof(struct PDMA_rxdesc),
+ ei_local->rx_ring[i],
+ ei_local->phy_rx_ring[i]);
+ /* free RX data */
+ for (j = 0; j < NUM_RSS_RX_DESC; j++) {
+ raeth_free_skb_data(ei_local->netrx_skb_data[i][j]);
+ ei_local->netrx_skb_data[i][j] = NULL;
+ }
+ }
+}
+
+int fe_rss_2ring_init(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int skb_size;
+ int i, j;
+
+ skb_size = SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ for (i = 1; i < MAX_RX_RING_NUM_2RING; i++) {
+ ei_local->rx_ring[i] =
+ dma_alloc_coherent(dev->dev.parent,
+ NUM_RSS_RX_DESC *
+ sizeof(struct PDMA_rxdesc),
+ &ei_local->phy_rx_ring[i],
+ GFP_ATOMIC | __GFP_ZERO);
+ for (j = 0; j < NUM_RSS_RX_DESC; j++) {
+ ei_local->netrx_skb_data[i][j] =
+ raeth_alloc_skb_data(skb_size, GFP_KERNEL);
+
+ if (!ei_local->netrx_skb_data[i][j]) {
+ pr_info("rx skbuff buffer allocation failed!\n");
+ goto no_rx_mem;
+ }
+
+ memset(&ei_local->rx_ring[i][j], 0,
+ sizeof(struct PDMA_rxdesc));
+ ei_local->rx_ring[i][j].rxd_info2.DDONE_bit = 0;
+ ei_local->rx_ring[i][j].rxd_info2.LS0 = 0;
+ ei_local->rx_ring[i][j].rxd_info2.PLEN0 =
+ SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
+ ei_local->rx_ring[i][j].rxd_info1.PDP0 =
+ dma_map_single(dev->dev.parent,
+ ei_local->netrx_skb_data[i][j] +
+ NET_SKB_PAD,
+ MAX_RX_LENGTH, DMA_FROM_DEVICE);
+ if (unlikely
+ (dma_mapping_error
+ (dev->dev.parent,
+ ei_local->rx_ring[i][j].rxd_info1.PDP0))) {
+ pr_info("[%s]dma_map_single() failed...\n",
+ __func__);
+ goto no_rx_mem;
+ }
+ }
+ pr_info("\nphy_rx_ring[%d] = 0x%08x, rx_ring[%d] = 0x%p\n",
+ i, (unsigned int)ei_local->phy_rx_ring[i],
+ i, (void __iomem *)ei_local->rx_ring[i]);
+ }
+
+ sys_reg_write(RX_BASE_PTR1, phys_to_bus((u32)ei_local->phy_rx_ring[1]));
+ sys_reg_write(RX_MAX_CNT1, cpu_to_le32((u32)NUM_RSS_RX_DESC));
+ sys_reg_write(RX_CALC_IDX1, cpu_to_le32((u32)(NUM_RSS_RX_DESC - 1)));
+ sys_reg_write(PDMA_RST_CFG, PST_DRX_IDX1);
+
+ /* 1. Set RX ring1~3 to pse modes */
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_PSE_MODE);
+
+ /* 2. Enable non-lro multiple rx */
+ SET_PDMA_NON_LRO_MULTI_EN(1); /* MRX EN */
+
+ /*Hash Type*/
+ SET_PDMA_RSS_IPV4_TYPE(7);
+ SET_PDMA_RSS_IPV6_TYPE(7);
+ /* 3. Select the size of indirection table */
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW0, 0x44444444);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW1, 0x44444444);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW2, 0x44444444);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW3, 0x44444444);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW4, 0x44444444);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW5, 0x44444444);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW6, 0x44444444);
+ SET_PDMA_RSS_CR_VALUE(ADMA_RSS_INDR_TABLE_DW7, 0x44444444);
+ /* 4. Pause */
+ SET_PDMA_RSS_CFG_REQ(1);
+
+ /* 5. Enable RSS */
+ SET_PDMA_RSS_EN(1);
+
+ /* 6. Release pause */
+ SET_PDMA_RSS_CFG_REQ(0);
+
+ return 0;
+
+no_rx_mem:
+ return -ENOMEM;
+}
+
+void fe_rss_2ring_deinit(struct net_device *dev)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ int i, j;
+
+ for (i = 1; i < MAX_RX_RING_NUM_2RING; i++) {
+ /* free RX Ring */
+ dma_free_coherent(dev->dev.parent,
+ NUM_RSS_RX_DESC * sizeof(struct PDMA_rxdesc),
+ ei_local->rx_ring[i],
+ ei_local->phy_rx_ring[i]);
+ /* free RX data */
+ for (j = 0; j < NUM_RSS_RX_DESC; j++) {
+ raeth_free_skb_data(ei_local->netrx_skb_data[i][j]);
+ ei_local->netrx_skb_data[i][j] = NULL;
+ }
+ }
+}
+
+static inline void hw_rss_rx_desc_init(struct END_DEVICE *ei_local,
+ struct PDMA_rxdesc *rx_ring,
+ unsigned int rx_ring_no,
+ dma_addr_t dma_addr)
+{
+ rx_ring->rxd_info2.PLEN0 = MAX_RX_LENGTH;
+ rx_ring->rxd_info1.PDP0 = dma_addr;
+ rx_ring->rxd_info2.LS0 = 0;
+ rx_ring->rxd_info2.DDONE_bit = 0;
+}
+
+static inline void __iomem *get_rx_cal_idx_reg(unsigned int rx_ring_no)
+{
+ return (void __iomem *)(RAETH_RX_CALC_IDX0 + (rx_ring_no << 4));
+}
+
+int fe_rss0_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+ struct sk_buff *rx_skb;
+ struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+ void *rx_data, *rx_data_next, *new_data;
+ unsigned int length = 0;
+ unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+ unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+ unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+ unsigned int skb_size, map_size;
+ /* void __iomem *rx_calc_idx_reg; */
+ int rx_processed = 0;
+
+ /* get cpu owner indexes of rx rings */
+ rx_dma_owner_lro[0] = (ei_local->rx_calc_idx[0] + 1) % num_rx_desc;
+
+ rx_ring_no = 0;
+ rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+ rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+ rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+ /* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+
+ for (;;) {
+ dma_addr_t dma_addr;
+
+ if ((rx_processed++ > budget) ||
+ (rx_ring->rxd_info2.DDONE_bit == 0))
+ break;
+
+ /* prefetch the next handling RXD */
+
+ rx_dma_owner_lro[rx_ring_no] =
+ (rx_dma_owner_idx + 1) % num_rx_desc;
+ skb_size =
+ SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+ NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ map_size = MAX_RX_LENGTH;
+
+ /* rx_ring_no_next = get_rss_rx_ring(ei_local, rx_dma_owner_lro, group); */
+ rx_ring_no_next = rx_ring_no;
+ rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+
+ rx_ring_next =
+ &ei_local->rx_ring
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ rx_data_next =
+ ei_local->netrx_skb_data
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ prefetch(rx_ring_next);
+
+ /* We have to check the free memory size is big enough
+ * before pass the packet to cpu
+ */
+ new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ pr_info("skb not available...\n");
+ goto skb_err;
+ }
+
+ dma_addr = dma_map_single(dev->dev.parent,
+ new_data + NET_SKB_PAD,
+ map_size,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+ pr_info("[%s]dma_map_single() failed...\n", __func__);
+ raeth_free_skb_data(new_data);
+ goto skb_err;
+ }
+
+ rx_skb = raeth_build_skb(rx_data, skb_size);
+
+ if (unlikely(!rx_skb)) {
+ put_page(virt_to_head_page(rx_data));
+ pr_info("build_skb failed\n");
+ goto skb_err;
+ }
+ skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ length = rx_ring->rxd_info2.PLEN0;
+ dma_unmap_single(dev->dev.parent,
+ rx_ring->rxd_info1.PDP0,
+ length, DMA_FROM_DEVICE);
+
+ prefetch(rx_skb->data);
+
+ /* skb processing */
+ skb_put(rx_skb, length);
+
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2) {
+ if (ei_local->pseudo_dev) {
+ rx_skb->dev = ei_local->pseudo_dev;
+ rx_skb->protocol =
+ eth_type_trans(rx_skb,
+ ei_local->pseudo_dev);
+ } else {
+ pr_info
+ ("pseudo_dev is still not initialize ");
+ pr_info
+ ("but receive packet from GMAC2\n");
+ }
+ } else {
+ rx_skb->dev = dev;
+ rx_skb->protocol = eth_type_trans(rx_skb, dev);
+ }
+
+ /* rx checksum offload */
+ if (likely(rx_ring->rxd_info4.L4VLD))
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (ppe_hook_rx_eth) {
+ if (IS_SPACE_AVAILABLE_HEAD(rx_skb)) {
+ *(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ FOE_ALG_HEAD(rx_skb) = 0;
+ FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+ FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+ }
+ if (IS_SPACE_AVAILABLE_TAIL(rx_skb)) {
+ *(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ FOE_ALG_TAIL(rx_skb) = 0;
+ FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+ FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+ }
+ }
+#endif
+ if (ei_local->features & FE_HW_VLAN_RX) {
+ if (rx_ring->rxd_info2.TAG)
+ __vlan_hwaccel_put_tag(rx_skb,
+ htons(ETH_P_8021Q),
+ rx_ring->rxd_info3.VID);
+ }
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if ((!ppe_hook_rx_eth) ||
+ (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+ if (ei_local->features & FE_INT_NAPI) {
+ /* napi_gro_receive(napi, rx_skb); */
+ netif_receive_skb(rx_skb);
+ } else {
+ netif_rx(rx_skb);
+ }
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ }
+#endif
+
+ if (rx_ring->rxd_info4.SP == 2) {
+ p_ad->stat.rx_packets++;
+ p_ad->stat.rx_bytes += length;
+ } else {
+ ei_local->stat.rx_packets++;
+ ei_local->stat.rx_bytes += length;
+ }
+
+ /* Init RX desc. */
+ hw_rss_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ dma_addr);
+ ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+ new_data;
+
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+ sys_reg_write(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ /* use prefetched variable */
+ rx_dma_owner_idx = rx_dma_owner_idx_next;
+ rx_ring_no = rx_ring_no_next;
+ rx_ring = rx_ring_next;
+ rx_data = rx_data_next;
+ /* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+ } /* for */
+
+ return rx_processed;
+
+skb_err:
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2)
+ p_ad->stat.rx_dropped++;
+ else
+ ei_local->stat.rx_dropped++;
+
+ /* Discard the rx packet */
+ hw_rss_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ rx_ring->rxd_info1.PDP0);
+ sys_reg_write(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ return (budget + 1);
+}
+
+int fe_rss1_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+ struct sk_buff *rx_skb;
+ struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+ void *rx_data, *rx_data_next, *new_data;
+ unsigned int length = 0;
+ unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+ unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+ unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+ unsigned int skb_size, map_size;
+ /* void __iomem *rx_calc_idx_reg; */
+ int rx_processed = 0;
+
+ /* get cpu owner indexes of rx rings */
+ rx_dma_owner_lro[1] = (ei_local->rx_calc_idx[1] + 1) % NUM_RSS_RX_DESC;
+
+ rx_ring_no = 1;
+ rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+ rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+ rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+ /* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+
+ for (;;) {
+ dma_addr_t dma_addr;
+
+ if ((rx_processed++ > budget) ||
+ (rx_ring->rxd_info2.DDONE_bit == 0))
+ break;
+
+ /* prefetch the next handling RXD */
+
+ rx_dma_owner_lro[rx_ring_no] =
+ (rx_dma_owner_idx + 1) % NUM_RSS_RX_DESC;
+ skb_size =
+ SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+ NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ map_size = MAX_RX_LENGTH;
+
+ /* rx_ring_no_next = get_rss_rx_ring(ei_local, rx_dma_owner_lro, group); */
+ rx_ring_no_next = rx_ring_no;
+ rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+
+ rx_ring_next =
+ &ei_local->rx_ring
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ rx_data_next =
+ ei_local->netrx_skb_data
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ prefetch(rx_ring_next);
+
+ /* We have to check the free memory size is big enough
+ * before pass the packet to cpu
+ */
+ new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ pr_info("skb not available...\n");
+ goto skb_err;
+ }
+
+ dma_addr = dma_map_single(dev->dev.parent,
+ new_data + NET_SKB_PAD,
+ map_size,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+ pr_info("[%s]dma_map_single() failed...\n", __func__);
+ raeth_free_skb_data(new_data);
+ goto skb_err;
+ }
+
+ rx_skb = raeth_build_skb(rx_data, skb_size);
+
+ if (unlikely(!rx_skb)) {
+ put_page(virt_to_head_page(rx_data));
+ pr_info("build_skb failed\n");
+ goto skb_err;
+ }
+ skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ length = rx_ring->rxd_info2.PLEN0;
+ dma_unmap_single(dev->dev.parent,
+ rx_ring->rxd_info1.PDP0,
+ length, DMA_FROM_DEVICE);
+
+ prefetch(rx_skb->data);
+
+ /* skb processing */
+ skb_put(rx_skb, length);
+
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2) {
+ if (ei_local->pseudo_dev) {
+ rx_skb->dev = ei_local->pseudo_dev;
+ rx_skb->protocol =
+ eth_type_trans(rx_skb,
+ ei_local->pseudo_dev);
+ } else {
+ pr_info
+ ("pseudo_dev is still not initialize ");
+ pr_info
+ ("but receive packet from GMAC2\n");
+ }
+ } else {
+ rx_skb->dev = dev;
+ rx_skb->protocol = eth_type_trans(rx_skb, dev);
+ }
+
+ /* rx checksum offload */
+ if (likely(rx_ring->rxd_info4.L4VLD))
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (ppe_hook_rx_eth) {
+ *(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ *(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ FOE_ALG_HEAD(rx_skb) = 0;
+ FOE_ALG_TAIL(rx_skb) = 0;
+ FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+ FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+ FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+ FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+ }
+#endif
+ if (ei_local->features & FE_HW_VLAN_RX) {
+ if (rx_ring->rxd_info2.TAG)
+ __vlan_hwaccel_put_tag(rx_skb,
+ htons(ETH_P_8021Q),
+ rx_ring->rxd_info3.VID);
+ }
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if ((!ppe_hook_rx_eth) ||
+ (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+ if (ei_local->features & FE_INT_NAPI) {
+ /* napi_gro_receive(napi, rx_skb); */
+ netif_receive_skb(rx_skb);
+ } else {
+ netif_rx(rx_skb);
+ }
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ }
+#endif
+
+ if (rx_ring->rxd_info4.SP == 2) {
+ p_ad->stat.rx_packets++;
+ p_ad->stat.rx_bytes += length;
+ } else {
+ ei_local->stat.rx_packets++;
+ ei_local->stat.rx_bytes += length;
+ }
+
+ /* Init RX desc. */
+ hw_rss_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ dma_addr);
+ ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+ new_data;
+
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+ sys_reg_write(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ /* use prefetched variable */
+ rx_dma_owner_idx = rx_dma_owner_idx_next;
+ rx_ring_no = rx_ring_no_next;
+ rx_ring = rx_ring_next;
+ rx_data = rx_data_next;
+ /* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+ } /* for */
+
+ return rx_processed;
+
+skb_err:
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2)
+ p_ad->stat.rx_dropped++;
+ else
+ ei_local->stat.rx_dropped++;
+
+ /* Discard the rx packet */
+ hw_rss_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ rx_ring->rxd_info1.PDP0);
+ sys_reg_write(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ return (budget + 1);
+}
+
+int fe_rss2_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+ struct sk_buff *rx_skb;
+ struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+ void *rx_data, *rx_data_next, *new_data;
+ unsigned int length = 0;
+ unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+ unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+ unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+ unsigned int skb_size, map_size;
+ /* void __iomem *rx_calc_idx_reg; */
+ int rx_processed = 0;
+
+ /* get cpu owner indexes of rx rings */
+ rx_dma_owner_lro[2] = (ei_local->rx_calc_idx[2] + 1) % NUM_RSS_RX_DESC;
+
+ rx_ring_no = 2;
+ rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+ rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+ rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+ /* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+
+ for (;;) {
+ dma_addr_t dma_addr;
+
+ if ((rx_processed++ > budget) ||
+ (rx_ring->rxd_info2.DDONE_bit == 0))
+ break;
+
+ /* prefetch the next handling RXD */
+
+ rx_dma_owner_lro[rx_ring_no] =
+ (rx_dma_owner_idx + 1) % NUM_RSS_RX_DESC;
+ skb_size =
+ SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+ NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ map_size = MAX_RX_LENGTH;
+
+ /* rx_ring_no_next = get_rss_rx_ring(ei_local, rx_dma_owner_lro, group); */
+ rx_ring_no_next = rx_ring_no;
+ rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+
+ rx_ring_next =
+ &ei_local->rx_ring
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ rx_data_next =
+ ei_local->netrx_skb_data
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ prefetch(rx_ring_next);
+
+ /* We have to check the free memory size is big enough
+ * before pass the packet to cpu
+ */
+ new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ pr_info("skb not available...\n");
+ goto skb_err;
+ }
+
+ dma_addr = dma_map_single(dev->dev.parent,
+ new_data + NET_SKB_PAD,
+ map_size,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+ pr_info("[%s]dma_map_single() failed...\n", __func__);
+ raeth_free_skb_data(new_data);
+ goto skb_err;
+ }
+
+ rx_skb = raeth_build_skb(rx_data, skb_size);
+
+ if (unlikely(!rx_skb)) {
+ put_page(virt_to_head_page(rx_data));
+ pr_info("build_skb failed\n");
+ goto skb_err;
+ }
+ skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ length = rx_ring->rxd_info2.PLEN0;
+ dma_unmap_single(dev->dev.parent,
+ rx_ring->rxd_info1.PDP0,
+ length, DMA_FROM_DEVICE);
+
+ prefetch(rx_skb->data);
+
+ /* skb processing */
+ skb_put(rx_skb, length);
+
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2) {
+ if (ei_local->pseudo_dev) {
+ rx_skb->dev = ei_local->pseudo_dev;
+ rx_skb->protocol =
+ eth_type_trans(rx_skb,
+ ei_local->pseudo_dev);
+ } else {
+ pr_info
+ ("pseudo_dev is still not initialize ");
+ pr_info
+ ("but receive packet from GMAC2\n");
+ }
+ } else {
+ rx_skb->dev = dev;
+ rx_skb->protocol = eth_type_trans(rx_skb, dev);
+ }
+
+ /* rx checksum offload */
+ if (likely(rx_ring->rxd_info4.L4VLD))
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (ppe_hook_rx_eth) {
+ *(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ *(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ FOE_ALG_HEAD(rx_skb) = 0;
+ FOE_ALG_TAIL(rx_skb) = 0;
+ FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+ FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+ FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+ FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+ }
+#endif
+ if (ei_local->features & FE_HW_VLAN_RX) {
+ if (rx_ring->rxd_info2.TAG)
+ __vlan_hwaccel_put_tag(rx_skb,
+ htons(ETH_P_8021Q),
+ rx_ring->rxd_info3.VID);
+ }
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if ((!ppe_hook_rx_eth) ||
+ (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+ if (ei_local->features & FE_INT_NAPI) {
+ /* napi_gro_receive(napi, rx_skb); */
+ netif_receive_skb(rx_skb);
+ } else {
+ netif_rx(rx_skb);
+ }
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ }
+#endif
+
+ if (rx_ring->rxd_info4.SP == 2) {
+ p_ad->stat.rx_packets++;
+ p_ad->stat.rx_bytes += length;
+ } else {
+ ei_local->stat.rx_packets++;
+ ei_local->stat.rx_bytes += length;
+ }
+
+ /* Init RX desc. */
+ hw_rss_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ dma_addr);
+ ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+ new_data;
+
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+
+ sys_reg_write(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ /* use prefetched variable */
+ rx_dma_owner_idx = rx_dma_owner_idx_next;
+ rx_ring_no = rx_ring_no_next;
+ rx_ring = rx_ring_next;
+ rx_data = rx_data_next;
+ /* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+ } /* for */
+
+ return rx_processed;
+
+skb_err:
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2)
+ p_ad->stat.rx_dropped++;
+ else
+ ei_local->stat.rx_dropped++;
+
+ /* Discard the rx packet */
+ hw_rss_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ rx_ring->rxd_info1.PDP0);
+ sys_reg_write(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ return (budget + 1);
+}
+
+int fe_rss3_recv(struct net_device *dev,
+ struct napi_struct *napi,
+ int budget)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev);
+ struct PSEUDO_ADAPTER *p_ad = netdev_priv(ei_local->pseudo_dev);
+ struct sk_buff *rx_skb;
+ struct PDMA_rxdesc *rx_ring, *rx_ring_next;
+ void *rx_data, *rx_data_next, *new_data;
+ unsigned int length = 0;
+ unsigned int rx_ring_no = 0, rx_ring_no_next = 0;
+ unsigned int rx_dma_owner_idx, rx_dma_owner_idx_next;
+ unsigned int rx_dma_owner_lro[MAX_RX_RING_NUM];
+ unsigned int skb_size, map_size;
+ /* void __iomem *rx_calc_idx_reg; */
+ int rx_processed = 0;
+
+ /* get cpu owner indexes of rx rings */
+ rx_dma_owner_lro[3] = (ei_local->rx_calc_idx[3] + 1) % NUM_RSS_RX_DESC;
+ rx_ring_no = 3;
+ rx_dma_owner_idx = rx_dma_owner_lro[rx_ring_no];
+ rx_ring = &ei_local->rx_ring[rx_ring_no][rx_dma_owner_idx];
+ rx_data = ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx];
+ /* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+
+ for (;;) {
+ dma_addr_t dma_addr;
+
+ if ((rx_processed++ > budget) ||
+ (rx_ring->rxd_info2.DDONE_bit == 0))
+ break;
+
+ /* prefetch the next handling RXD */
+
+ rx_dma_owner_lro[rx_ring_no] =
+ (rx_dma_owner_idx + 1) % NUM_RSS_RX_DESC;
+ skb_size =
+ SKB_DATA_ALIGN(MAX_RX_LENGTH + NET_IP_ALIGN +
+ NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ map_size = MAX_RX_LENGTH;
+
+ /* rx_ring_no_next = get_rss_rx_ring(ei_local, rx_dma_owner_lro, group); */
+ rx_ring_no_next = rx_ring_no;
+ rx_dma_owner_idx_next = rx_dma_owner_lro[rx_ring_no_next];
+
+ rx_ring_next =
+ &ei_local->rx_ring
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ rx_data_next =
+ ei_local->netrx_skb_data
+ [rx_ring_no_next][rx_dma_owner_idx_next];
+ prefetch(rx_ring_next);
+
+ /* We have to check the free memory size is big enough
+ * before pass the packet to cpu
+ */
+ new_data = raeth_alloc_skb_data(skb_size, GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ pr_info("skb not available...\n");
+ goto skb_err;
+ }
+
+ dma_addr = dma_map_single(dev->dev.parent,
+ new_data + NET_SKB_PAD,
+ map_size,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev->dev.parent, dma_addr))) {
+ pr_info("[%s]dma_map_single() failed...\n", __func__);
+ raeth_free_skb_data(new_data);
+ goto skb_err;
+ }
+
+ rx_skb = raeth_build_skb(rx_data, skb_size);
+
+ if (unlikely(!rx_skb)) {
+ put_page(virt_to_head_page(rx_data));
+ pr_info("build_skb failed\n");
+ goto skb_err;
+ }
+ skb_reserve(rx_skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ length = rx_ring->rxd_info2.PLEN0;
+ dma_unmap_single(dev->dev.parent,
+ rx_ring->rxd_info1.PDP0,
+ length, DMA_FROM_DEVICE);
+
+ prefetch(rx_skb->data);
+
+ /* skb processing */
+ skb_put(rx_skb, length);
+
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2) {
+ if (ei_local->pseudo_dev) {
+ rx_skb->dev = ei_local->pseudo_dev;
+ rx_skb->protocol =
+ eth_type_trans(rx_skb,
+ ei_local->pseudo_dev);
+ } else {
+ pr_info
+ ("pseudo_dev is still not initialize ");
+ pr_info
+ ("but receive packet from GMAC2\n");
+ }
+ } else {
+ rx_skb->dev = dev;
+ rx_skb->protocol = eth_type_trans(rx_skb, dev);
+ }
+
+ /* rx checksum offload */
+ if (likely(rx_ring->rxd_info4.L4VLD))
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ rx_skb->ip_summed = CHECKSUM_NONE;
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if (ppe_hook_rx_eth) {
+ *(uint32_t *)(FOE_INFO_START_ADDR_HEAD(rx_skb)) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ *(uint32_t *)(FOE_INFO_START_ADDR_TAIL(rx_skb) + 2) =
+ *(uint32_t *)&rx_ring->rxd_info4;
+ FOE_ALG_HEAD(rx_skb) = 0;
+ FOE_ALG_TAIL(rx_skb) = 0;
+ FOE_MAGIC_TAG_HEAD(rx_skb) = FOE_MAGIC_GE;
+ FOE_MAGIC_TAG_TAIL(rx_skb) = FOE_MAGIC_GE;
+ FOE_TAG_PROTECT_HEAD(rx_skb) = TAG_PROTECT;
+ FOE_TAG_PROTECT_TAIL(rx_skb) = TAG_PROTECT;
+ }
+#endif
+ if (ei_local->features & FE_HW_VLAN_RX) {
+ if (rx_ring->rxd_info2.TAG)
+ __vlan_hwaccel_put_tag(rx_skb,
+ htons(ETH_P_8021Q),
+ rx_ring->rxd_info3.VID);
+ }
+/* ra_sw_nat_hook_rx return 1 --> continue
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
+ */
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ if ((!ppe_hook_rx_eth) ||
+ (ppe_hook_rx_eth && ppe_hook_rx_eth(rx_skb))) {
+#endif
+ if (ei_local->features & FE_INT_NAPI) {
+ /* napi_gro_receive(napi, rx_skb); */
+ netif_receive_skb(rx_skb);
+ } else {
+ netif_rx(rx_skb);
+ }
+
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+ }
+#endif
+
+ if (rx_ring->rxd_info4.SP == 2) {
+ p_ad->stat.rx_packets++;
+ p_ad->stat.rx_bytes += length;
+ } else {
+ ei_local->stat.rx_packets++;
+ ei_local->stat.rx_bytes += length;
+ }
+
+ /* Init RX desc. */
+ hw_rss_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ dma_addr);
+ ei_local->netrx_skb_data[rx_ring_no][rx_dma_owner_idx] =
+ new_data;
+
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+
+ sys_reg_write(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ /* use prefetched variable */
+ rx_dma_owner_idx = rx_dma_owner_idx_next;
+ rx_ring_no = rx_ring_no_next;
+ rx_ring = rx_ring_next;
+ rx_data = rx_data_next;
+ /* rx_calc_idx_reg = get_rx_cal_idx_reg(rx_ring_no); */
+ } /* for */
+
+ return rx_processed;
+
+skb_err:
+ /* rx packet from GE2 */
+ if (rx_ring->rxd_info4.SP == 2)
+ p_ad->stat.rx_dropped++;
+ else
+ ei_local->stat.rx_dropped++;
+
+ /* Discard the rx packet */
+ hw_rss_rx_desc_init(ei_local,
+ rx_ring,
+ rx_ring_no,
+ rx_ring->rxd_info1.PDP0);
+ sys_reg_write(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
+ ei_local->rx_calc_idx[rx_ring_no] = rx_dma_owner_idx;
+
+ return (budget + 1);
+}
+
+int rx_rss_ring_read(struct seq_file *seq, void *v,
+ struct PDMA_rxdesc *rx_ring_p)
+{
+ struct PDMA_rxdesc *rx_ring;
+ int i = 0;
+
+ rx_ring =
+ kmalloc(sizeof(struct PDMA_rxdesc) * NUM_RSS_RX_DESC, GFP_KERNEL);
+ if (!rx_ring) {
+ seq_puts(seq, " allocate temp rx_ring fail.\n");
+ return 0;
+ }
+
+ for (i = 0; i < NUM_RSS_RX_DESC; i++)
+ memcpy(&rx_ring[i], &rx_ring_p[i], sizeof(struct PDMA_rxdesc));
+
+ for (i = 0; i < NUM_RSS_RX_DESC; i++) {
+ seq_printf(seq, "%d: %08x %08x %08x %08x\n", i,
+ *(int *)&rx_ring[i].rxd_info1,
+ *(int *)&rx_ring[i].rxd_info2,
+ *(int *)&rx_ring[i].rxd_info3,
+ *(int *)&rx_ring[i].rxd_info4);
+ }
+
+ kfree(rx_ring);
+ return 0;
+}
+
+int rss_ring1_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ rx_rss_ring_read(seq, v, ei_local->rx_ring[1]);
+
+ return 0;
+}
+
+int rss_ring2_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ rx_rss_ring_read(seq, v, ei_local->rx_ring[2]);
+
+ return 0;
+}
+
+int rss_ring3_read(struct seq_file *seq, void *v)
+{
+ struct END_DEVICE *ei_local = netdev_priv(dev_raether);
+
+ rx_rss_ring_read(seq, v, ei_local->rx_ring[3]);
+
+ return 0;
+}
+
+static int rx_ring1_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rss_ring1_read, NULL);
+}
+
+static int rx_ring2_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rss_ring2_read, NULL);
+}
+
+static int rx_ring3_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rss_ring3_read, NULL);
+}
+
+static const struct file_operations rss_ring1_fops = {
+ .owner = THIS_MODULE,
+ .open = rx_ring1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static const struct file_operations rss_ring2_fops = {
+ .owner = THIS_MODULE,
+ .open = rx_ring2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static const struct file_operations rss_ring3_fops = {
+ .owner = THIS_MODULE,
+ .open = rx_ring3_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+int rss_debug_proc_init(struct proc_dir_entry *proc_reg_dir)
+{
+ proc_rss_ring1 =
+ proc_create(PROCREG_RXRING1, 0, proc_reg_dir, &rss_ring1_fops);
+ if (!proc_rss_ring1)
+ pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING1);
+
+ proc_rss_ring2 =
+ proc_create(PROCREG_RXRING2, 0, proc_reg_dir, &rss_ring2_fops);
+ if (!proc_rss_ring2)
+ pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING2);
+
+ proc_rss_ring3 =
+ proc_create(PROCREG_RXRING3, 0, proc_reg_dir, &rss_ring3_fops);
+ if (!proc_rss_ring3)
+ pr_info("!! FAIL to create %s PROC !!\n", PROCREG_RXRING3);
+
+ return 0;
+}
+EXPORT_SYMBOL(rss_debug_proc_init);
+
+void rss_debug_proc_exit(struct proc_dir_entry *proc_reg_dir)
+{
+ if (proc_rss_ring1)
+ remove_proc_entry(PROCREG_RXRING1, proc_reg_dir);
+ if (proc_rss_ring2)
+ remove_proc_entry(PROCREG_RXRING2, proc_reg_dir);
+ if (proc_rss_ring3)
+ remove_proc_entry(PROCREG_RXRING3, proc_reg_dir);
+}
+EXPORT_SYMBOL(rss_debug_proc_exit);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.h
new file mode 100644
index 0000000..07c073f
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/raeth/raether_rss.h
@@ -0,0 +1,104 @@
+/* Copyright 2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_RSS_H
+#define RA_RSS_H
+
+#include "raeth_reg.h"
+
+#define NUM_RSS_RX_DESC 1024
+#define MAX_RX_RING_NUM_2RING 2
+
+/******RSS define*******/
+#define PDMA_RSS_EN BIT(0)
+#define PDMA_RSS_BUSY BIT(1)
+#define PDMA_RSS_CFG_REQ BIT(2)
+#define PDMA_RSS_CFG_RDY BIT(3)
+#define PDMA_RSS_INDR_TBL_SIZE BITS(4, 6)
+#define PDMA_RSS_IPV6_TYPE BITS(8, 10)
+#define PDMA_RSS_IPV4_TYPE BITS(12, 14)
+#define PDMA_RSS_IPV6_TUPLE_EN BITS(16, 20)
+#define PDMA_RSS_IPV4_TUPLE_EN BITS(24, 28)
+
+#define PDMA_RSS_EN_OFFSET (0)
+#define PDMA_RSS_BUSY_OFFSET (1)
+#define PDMA_RSS_CFG_REQ_OFFSET (2)
+#define PDMA_RSS_CFG_RDY_OFFSET (3)
+#define PDMA_RSS_INDR_TBL_SIZE_OFFSET (4)
+#define PDMA_RSS_IPV6_TYPE_OFFSET (8)
+#define PDMA_RSS_IPV4_TYPE_OFFSET (12)
+#define PDMA_RSS_IPV6_TUPLE_EN_OFFSET (16)
+#define PDMA_RSS_IPV4_TUPLE_EN_OFFSET (24)
+
+#define SET_PDMA_RSS_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_EN); \
+reg_val |= ((x) & 0x1) << PDMA_RSS_EN_OFFSET; \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_CFG_REQ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_CFG_REQ); \
+reg_val |= ((x) & 0x1) << PDMA_RSS_CFG_REQ_OFFSET; \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV4_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV4_TYPE); \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV4_TYPE_OFFSET; \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV6_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV6_TYPE); \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV6_TYPE_OFFSET; \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV4_TUPLE_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV4_TYPE); \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV4_TUPLE_EN_OFFSET; \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV6_TUPLE_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV6_TYPE); \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV6_TUPLE_EN_OFFSET; \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_INDR_TBL_SIZE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_INDR_TBL_SIZE); \
+reg_val |= ((x) & 0x7) << PDMA_RSS_INDR_TBL_SIZE_OFFSET; \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_CR_VALUE(x, y) \
+{ \
+unsigned int reg_val = y; \
+sys_reg_write(x, reg_val); \
+}
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/Makefile b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/Makefile
new file mode 100755
index 0000000..e304fcb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for MediaTek MT753x gigabit switch
+#
+
+obj-$(CONFIG_MT753X_GSW) += mt753x.o
+
+mt753x-$(CONFIG_SWCONFIG) += mt753x_swconfig.o
+
+mt753x-y += mt753x_mdio.o mt7530.o mt7531.o \
+ mt753x_common.o mt753x_vlan.o mt753x_nl.o
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7530.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7530.c
new file mode 100755
index 0000000..7853e27
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7530.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include "mt753x.h"
+#include "mt753x_regs.h"
+
+/* MT7530 registers */
+
+/* Unique fields of PMCR for MT7530 */
+#define FORCE_MODE BIT(15)
+
+/* Unique fields of GMACCR for MT7530 */
+#define VLAN_SUPT_NO_S 14
+#define VLAN_SUPT_NO_M 0x1c000
+#define LATE_COL_DROP BIT(13)
+
+/* Unique fields of (M)HWSTRAP for MT7530 */
+#define BOND_OPTION BIT(24)
+#define P5_PHY0_SEL BIT(20)
+#define CHG_TRAP BIT(16)
+#define LOOPDET_DIS BIT(14)
+#define P5_INTF_SEL_GMAC5 BIT(13)
+#define SMI_ADDR_S 11
+#define SMI_ADDR_M 0x1800
+#define XTAL_FSEL_S 9
+#define XTAL_FSEL_M 0x600
+#define P6_INTF_DIS BIT(8)
+#define P5_INTF_MODE_RGMII BIT(7)
+#define P5_INTF_DIS_S BIT(6)
+#define C_MDIO_BPS_S BIT(5)
+#define EEPROM_EN_S BIT(4)
+
+/* PHY EEE Register bitmap of define */
+#define PHY_DEV07 0x07
+#define PHY_DEV07_REG_03C 0x3c
+
+/* PHY Extend Register 0x14 bitmap of define */
+#define PHY_EXT_REG_14 0x14
+
+/* Fields of PHY_EXT_REG_14 */
+#define PHY_EN_DOWN_SHFIT BIT(4)
+
+/* PHY Token Ring Register 0x10 bitmap of define */
+#define PHY_TR_REG_10 0x10
+
+/* PHY Token Ring Register 0x12 bitmap of define */
+#define PHY_TR_REG_12 0x12
+
+/* PHY LPI PCS/DSP Control Register bitmap of define */
+#define PHY_LPI_REG_11 0x11
+
+/* PHY DEV 0x1e Register bitmap of define */
+#define PHY_DEV1E 0x1e
+#define PHY_DEV1E_REG_123 0x123
+#define PHY_DEV1E_REG_A6 0xa6
+
+/* Values of XTAL_FSEL */
+#define XTAL_20MHZ 1
+#define XTAL_40MHZ 2
+#define XTAL_25MHZ 3
+
+/* Top single control CR define */
+#define TOP_SIG_CTRL 0x7808
+
+/* TOP_SIG_CTRL Register bitmap of define */
+#define OUTPUT_INTR_S 16
+#define OUTPUT_INTR_M 0x30000
+
+#define P6ECR 0x7830
+#define P6_INTF_MODE_TRGMII BIT(0)
+
+#define TRGMII_TXCTRL 0x7a40
+#define TRAIN_TXEN BIT(31)
+#define TXC_INV BIT(30)
+#define TX_DOEO BIT(29)
+#define TX_RST BIT(28)
+
+#define TRGMII_TD0_CTRL 0x7a50
+#define TRGMII_TD1_CTRL 0x7a58
+#define TRGMII_TD2_CTRL 0x7a60
+#define TRGMII_TD3_CTRL 0x7a68
+#define TRGMII_TXCTL_CTRL 0x7a70
+#define TRGMII_TCK_CTRL 0x7a78
+#define TRGMII_TD_CTRL(n) (0x7a50 + (n) * 8)
+#define NUM_TRGMII_CTRL 6
+#define TX_DMPEDRV BIT(31)
+#define TX_DM_SR BIT(15)
+#define TX_DMERODT BIT(14)
+#define TX_DMOECTL BIT(13)
+#define TX_TAP_S 8
+#define TX_TAP_M 0xf00
+#define TX_TRAIN_WD_S 0
+#define TX_TRAIN_WD_M 0xff
+
+#define TRGMII_TD0_ODT 0x7a54
+#define TRGMII_TD1_ODT 0x7a5c
+#define TRGMII_TD2_ODT 0x7a64
+#define TRGMII_TD3_ODT 0x7a6c
+#define TRGMII_TXCTL_ODT 0x7574
+#define TRGMII_TCK_ODT 0x757c
+#define TRGMII_TD_ODT(n) (0x7a54 + (n) * 8)
+#define NUM_TRGMII_ODT 6
+#define TX_DM_DRVN_PRE_S 30
+#define TX_DM_DRVN_PRE_M 0xc0000000
+#define TX_DM_DRVP_PRE_S 28
+#define TX_DM_DRVP_PRE_M 0x30000000
+#define TX_DM_TDSEL_S 24
+#define TX_DM_TDSEL_M 0xf000000
+#define TX_ODTEN BIT(23)
+#define TX_DME_PRE BIT(20)
+#define TX_DM_DRVNT0 BIT(19)
+#define TX_DM_DRVPT0 BIT(18)
+#define TX_DM_DRVNTE BIT(17)
+#define TX_DM_DRVPTE BIT(16)
+#define TX_DM_ODTN_S 12
+#define TX_DM_ODTN_M 0x7000
+#define TX_DM_ODTP_S 8
+#define TX_DM_ODTP_M 0x700
+#define TX_DM_DRVN_S 4
+#define TX_DM_DRVN_M 0xf0
+#define TX_DM_DRVP_S 0
+#define TX_DM_DRVP_M 0x0f
+
+#define P5RGMIIRXCR 0x7b00
+#define CSR_RGMII_RCTL_CFG_S 24
+#define CSR_RGMII_RCTL_CFG_M 0x7000000
+#define CSR_RGMII_RXD_CFG_S 16
+#define CSR_RGMII_RXD_CFG_M 0x70000
+#define CSR_RGMII_EDGE_ALIGN BIT(8)
+#define CSR_RGMII_RXC_90DEG_CFG_S 4
+#define CSR_RGMII_RXC_90DEG_CFG_M 0xf0
+#define CSR_RGMII_RXC_0DEG_CFG_S 0
+#define CSR_RGMII_RXC_0DEG_CFG_M 0x0f
+
+#define P5RGMIITXCR 0x7b04
+#define CSR_RGMII_TXEN_CFG_S 16
+#define CSR_RGMII_TXEN_CFG_M 0x70000
+#define CSR_RGMII_TXD_CFG_S 8
+#define CSR_RGMII_TXD_CFG_M 0x700
+#define CSR_RGMII_TXC_CFG_S 0
+#define CSR_RGMII_TXC_CFG_M 0x1f
+
+#define CHIP_REV 0x7ffc
+#define CHIP_NAME_S 16
+#define CHIP_NAME_M 0xffff0000
+#define CHIP_REV_S 0
+#define CHIP_REV_M 0x0f
+
+/* MMD registers */
+#define CORE_PLL_GROUP2 0x401
+#define RG_SYSPLL_EN_NORMAL BIT(15)
+#define RG_SYSPLL_VODEN BIT(14)
+#define RG_SYSPLL_POSDIV_S 5
+#define RG_SYSPLL_POSDIV_M 0x60
+
+#define CORE_PLL_GROUP4 0x403
+#define RG_SYSPLL_DDSFBK_EN BIT(12)
+#define RG_SYSPLL_BIAS_EN BIT(11)
+#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+
+#define CORE_PLL_GROUP5 0x404
+#define RG_LCDDS_PCW_NCPO1_S 0
+#define RG_LCDDS_PCW_NCPO1_M 0xffff
+
+#define CORE_PLL_GROUP6 0x405
+#define RG_LCDDS_PCW_NCPO0_S 0
+#define RG_LCDDS_PCW_NCPO0_M 0xffff
+
+#define CORE_PLL_GROUP7 0x406
+#define RG_LCDDS_PWDB BIT(15)
+#define RG_LCDDS_ISO_EN BIT(13)
+#define RG_LCCDS_C_S 4
+#define RG_LCCDS_C_M 0x70
+#define RG_LCDDS_PCW_NCPO_CHG BIT(3)
+
+#define CORE_PLL_GROUP10 0x409
+#define RG_LCDDS_SSC_DELTA_S 0
+#define RG_LCDDS_SSC_DELTA_M 0xfff
+
+#define CORE_PLL_GROUP11 0x40a
+#define RG_LCDDS_SSC_DELTA1_S 0
+#define RG_LCDDS_SSC_DELTA1_M 0xfff
+
+#define CORE_GSWPLL_GCR_1 0x040d
+#define GSWPLL_PREDIV_S 14
+#define GSWPLL_PREDIV_M 0xc000
+#define GSWPLL_POSTDIV_200M_S 12
+#define GSWPLL_POSTDIV_200M_M 0x3000
+#define GSWPLL_EN_PRE BIT(11)
+#define GSWPLL_FBKSEL BIT(10)
+#define GSWPLL_BP BIT(9)
+#define GSWPLL_BR BIT(8)
+#define GSWPLL_FBKDIV_200M_S 0
+#define GSWPLL_FBKDIV_200M_M 0xff
+
+#define CORE_GSWPLL_GCR_2 0x040e
+#define GSWPLL_POSTDIV_500M_S 8
+#define GSWPLL_POSTDIV_500M_M 0x300
+#define GSWPLL_FBKDIV_500M_S 0
+#define GSWPLL_FBKDIV_500M_M 0xff
+
+#define TRGMII_GSW_CLK_CG 0x0410
+#define TRGMIICK_EN BIT(1)
+#define GSWCK_EN BIT(0)
+
+static int mt7530_mii_read(struct gsw_mt753x *gsw, int phy, int reg)
+{
+ if (phy < MT753X_NUM_PHYS)
+ phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
+
+ return mdiobus_read(gsw->host_bus, phy, reg);
+}
+
+static void mt7530_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val)
+{
+ if (phy < MT753X_NUM_PHYS)
+ phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
+
+ mdiobus_write(gsw->host_bus, phy, reg, val);
+}
+
+static int mt7530_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg)
+{
+ u16 val;
+
+ if (addr < MT753X_NUM_PHYS)
+ addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+ mutex_lock(&gsw->host_bus->mdio_lock);
+
+ gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
+ (MMD_ADDR << MMD_CMD_S) |
+ ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
+
+ gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, reg);
+
+ gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
+ (MMD_DATA << MMD_CMD_S) |
+ ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
+
+ val = gsw->host_bus->read(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG);
+
+ mutex_unlock(&gsw->host_bus->mdio_lock);
+
+ return val;
+}
+
+static void mt7530_mmd_write(struct gsw_mt753x *gsw, int addr, int devad,
+ u16 reg, u16 val)
+{
+ if (addr < MT753X_NUM_PHYS)
+ addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+ mutex_lock(&gsw->host_bus->mdio_lock);
+
+ gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
+ (MMD_ADDR << MMD_CMD_S) |
+ ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
+
+ gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, reg);
+
+ gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
+ (MMD_DATA << MMD_CMD_S) |
+ ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
+
+ gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, val);
+
+ mutex_unlock(&gsw->host_bus->mdio_lock);
+}
+
+static void mt7530_core_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val)
+{
+ gsw->mmd_write(gsw, 0, 0x1f, reg, val);
+}
+
+static void mt7530_trgmii_setting(struct gsw_mt753x *gsw)
+{
+ u16 i;
+
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP5, 0x0780);
+ mdelay(1);
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP6, 0);
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP10, 0x87);
+ mdelay(1);
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP11, 0x87);
+
+ /* PLL BIAS enable */
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP4,
+ RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN);
+ mdelay(1);
+
+ /* PLL LPF enable */
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP4,
+ RG_SYSPLL_DDSFBK_EN |
+ RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
+
+ /* sys PLL enable */
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP2,
+ RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+ (1 << RG_SYSPLL_POSDIV_S));
+
+ /* LCDDDS PWDS */
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP7,
+ (3 << RG_LCCDS_C_S) |
+ RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+ mdelay(1);
+
+ /* Enable MT7530 TRGMII clock */
+ mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, GSWCK_EN | TRGMIICK_EN);
+
+ /* lower Tx Driving */
+ for (i = 0 ; i < NUM_TRGMII_ODT; i++)
+ mt753x_reg_write(gsw, TRGMII_TD_ODT(i),
+ (4 << TX_DM_DRVP_S) | (4 << TX_DM_DRVN_S));
+}
+
+static void mt7530_rgmii_setting(struct gsw_mt753x *gsw)
+{
+ u32 val;
+
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP5, 0x0c80);
+ mdelay(1);
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP6, 0);
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP10, 0x87);
+ mdelay(1);
+ mt7530_core_reg_write(gsw, CORE_PLL_GROUP11, 0x87);
+
+ val = mt753x_reg_read(gsw, TRGMII_TXCTRL);
+ val &= ~TXC_INV;
+ mt753x_reg_write(gsw, TRGMII_TXCTRL, val);
+
+ mt753x_reg_write(gsw, TRGMII_TCK_CTRL,
+ (8 << TX_TAP_S) | (0x55 << TX_TRAIN_WD_S));
+}
+
+static int mt7530_mac_port_setup(struct gsw_mt753x *gsw)
+{
+ u32 hwstrap, p6ecr = 0, p5mcr, p6mcr, phyad;
+
+ hwstrap = mt753x_reg_read(gsw, MHWSTRAP);
+ hwstrap &= ~(P6_INTF_DIS | P5_INTF_MODE_RGMII | P5_INTF_DIS_S);
+ hwstrap |= P5_INTF_SEL_GMAC5;
+ if (!gsw->port5_cfg.enabled) {
+ p5mcr = FORCE_MODE;
+ hwstrap |= P5_INTF_DIS_S;
+ } else {
+ p5mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
+ MAC_MODE | MAC_TX_EN | MAC_RX_EN |
+ BKOFF_EN | BACKPR_EN;
+
+ if (gsw->port5_cfg.force_link) {
+ p5mcr |= FORCE_MODE | FORCE_LINK | FORCE_RX_FC |
+ FORCE_TX_FC;
+ p5mcr |= gsw->port5_cfg.speed << FORCE_SPD_S;
+
+ if (gsw->port5_cfg.duplex)
+ p5mcr |= FORCE_DPX;
+ }
+
+ switch (gsw->port5_cfg.phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ hwstrap |= P5_INTF_MODE_RGMII;
+ break;
+ default:
+ dev_info(gsw->dev, "%s is not supported by port5\n",
+ phy_modes(gsw->port5_cfg.phy_mode));
+ p5mcr = FORCE_MODE;
+ hwstrap |= P5_INTF_DIS_S;
+ }
+
+ /* Port5 to PHY direct mode */
+ if (of_property_read_u32(gsw->port5_cfg.np, "phy-address",
+ &phyad))
+ goto parse_p6;
+
+ if (phyad != 0 && phyad != 4) {
+ dev_info(gsw->dev,
+ "Only PHY 0/4 can be connected to Port 5\n");
+ goto parse_p6;
+ }
+
+ hwstrap &= ~P5_INTF_SEL_GMAC5;
+ if (phyad == 0)
+ hwstrap |= P5_PHY0_SEL;
+ else
+ hwstrap &= ~P5_PHY0_SEL;
+ }
+
+parse_p6:
+ if (!gsw->port6_cfg.enabled) {
+ p6mcr = FORCE_MODE;
+ hwstrap |= P6_INTF_DIS;
+ } else {
+ p6mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
+ MAC_MODE | MAC_TX_EN | MAC_RX_EN |
+ BKOFF_EN | BACKPR_EN;
+
+ if (gsw->port6_cfg.force_link) {
+ p6mcr |= FORCE_MODE | FORCE_LINK | FORCE_RX_FC |
+ FORCE_TX_FC;
+ p6mcr |= gsw->port6_cfg.speed << FORCE_SPD_S;
+
+ if (gsw->port6_cfg.duplex)
+ p6mcr |= FORCE_DPX;
+ }
+
+ switch (gsw->port6_cfg.phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ p6ecr = BIT(1);
+ break;
+ case PHY_INTERFACE_MODE_TRGMII:
+ /* set MT7530 central align */
+ p6ecr = BIT(0);
+ break;
+ default:
+ dev_info(gsw->dev, "%s is not supported by port6\n",
+ phy_modes(gsw->port6_cfg.phy_mode));
+ p6mcr = FORCE_MODE;
+ hwstrap |= P6_INTF_DIS;
+ }
+ }
+
+ mt753x_reg_write(gsw, MHWSTRAP, hwstrap);
+ mt753x_reg_write(gsw, P6ECR, p6ecr);
+
+ mt753x_reg_write(gsw, PMCR(5), p5mcr);
+ mt753x_reg_write(gsw, PMCR(6), p6mcr);
+
+ return 0;
+}
+
+static void mt7530_core_pll_setup(struct gsw_mt753x *gsw)
+{
+ u32 hwstrap;
+
+ hwstrap = mt753x_reg_read(gsw, HWSTRAP);
+
+ switch ((hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S) {
+ case XTAL_40MHZ:
+ /* Disable MT7530 core clock */
+ mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, 0);
+
+ /* disable MT7530 PLL */
+ mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_1,
+ (2 << GSWPLL_POSTDIV_200M_S) |
+ (32 << GSWPLL_FBKDIV_200M_S));
+
+ /* For MT7530 core clock = 500Mhz */
+ mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_2,
+ (1 << GSWPLL_POSTDIV_500M_S) |
+ (25 << GSWPLL_FBKDIV_500M_S));
+
+ /* Enable MT7530 PLL */
+ mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_1,
+ (2 << GSWPLL_POSTDIV_200M_S) |
+ (32 << GSWPLL_FBKDIV_200M_S) |
+ GSWPLL_EN_PRE);
+
+ usleep_range(20, 40);
+
+ /* Enable MT7530 core clock */
+ mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, GSWCK_EN);
+ break;
+ default:
+ /* TODO: PLL settings for 20/25MHz */
+ break;
+ }
+
+ hwstrap = mt753x_reg_read(gsw, HWSTRAP);
+ hwstrap |= CHG_TRAP;
+ if (gsw->direct_phy_access)
+ hwstrap &= ~C_MDIO_BPS_S;
+ else
+ hwstrap |= C_MDIO_BPS_S;
+
+ mt753x_reg_write(gsw, MHWSTRAP, hwstrap);
+
+ if (gsw->port6_cfg.enabled &&
+ gsw->port6_cfg.phy_mode == PHY_INTERFACE_MODE_TRGMII) {
+ mt7530_trgmii_setting(gsw);
+ } else {
+ /* RGMII */
+ mt7530_rgmii_setting(gsw);
+ }
+
+ /* delay setting for 10/1000M */
+ mt753x_reg_write(gsw, P5RGMIIRXCR,
+ CSR_RGMII_EDGE_ALIGN |
+ (2 << CSR_RGMII_RXC_0DEG_CFG_S));
+ mt753x_reg_write(gsw, P5RGMIITXCR, 0x14 << CSR_RGMII_TXC_CFG_S);
+}
+
+static int mt7530_sw_detect(struct gsw_mt753x *gsw, struct chip_rev *crev)
+{
+ u32 rev;
+
+ rev = mt753x_reg_read(gsw, CHIP_REV);
+
+ if (((rev & CHIP_NAME_M) >> CHIP_NAME_S) == MT7530) {
+ if (crev) {
+ crev->rev = rev & CHIP_REV_M;
+ crev->name = "MT7530";
+ }
+
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void mt7530_phy_setting(struct gsw_mt753x *gsw)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < MT753X_NUM_PHYS; i++) {
+ /* Disable EEE */
+ gsw->mmd_write(gsw, i, PHY_DEV07, PHY_DEV07_REG_03C, 0);
+
+ /* Enable HW auto downshift */
+ gsw->mii_write(gsw, i, 0x1f, 0x1);
+ val = gsw->mii_read(gsw, i, PHY_EXT_REG_14);
+ val |= PHY_EN_DOWN_SHFIT;
+ gsw->mii_write(gsw, i, PHY_EXT_REG_14, val);
+
+ /* Increase SlvDPSready time */
+ gsw->mii_write(gsw, i, 0x1f, 0x52b5);
+ gsw->mii_write(gsw, i, PHY_TR_REG_10, 0xafae);
+ gsw->mii_write(gsw, i, PHY_TR_REG_12, 0x2f);
+ gsw->mii_write(gsw, i, PHY_TR_REG_10, 0x8fae);
+
+ /* Increase post_update_timer */
+ gsw->mii_write(gsw, i, 0x1f, 0x3);
+ gsw->mii_write(gsw, i, PHY_LPI_REG_11, 0x4b);
+ gsw->mii_write(gsw, i, 0x1f, 0);
+
+ /* Adjust 100_mse_threshold */
+ gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_123, 0xffff);
+
+ /* Disable mcc */
+ gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_A6, 0x300);
+ }
+}
+
+static inline bool get_phy_access_mode(const struct device_node *np)
+{
+ return of_property_read_bool(np, "mt7530,direct-phy-access");
+}
+
+static int mt7530_sw_init(struct gsw_mt753x *gsw)
+{
+ int i;
+ u32 val;
+
+ gsw->direct_phy_access = get_phy_access_mode(gsw->dev->of_node);
+
+ /* Force MT7530 to use (in)direct PHY access */
+ val = mt753x_reg_read(gsw, HWSTRAP);
+ val |= CHG_TRAP;
+ if (gsw->direct_phy_access)
+ val &= ~C_MDIO_BPS_S;
+ else
+ val |= C_MDIO_BPS_S;
+ mt753x_reg_write(gsw, MHWSTRAP, val);
+
+ /* Read PHY address base from HWSTRAP */
+ gsw->phy_base = (((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3) + 8;
+ gsw->phy_base &= MT753X_SMI_ADDR_MASK;
+
+ if (gsw->direct_phy_access) {
+ gsw->mii_read = mt7530_mii_read;
+ gsw->mii_write = mt7530_mii_write;
+ gsw->mmd_read = mt7530_mmd_read;
+ gsw->mmd_write = mt7530_mmd_write;
+ } else {
+ gsw->mii_read = mt753x_mii_read;
+ gsw->mii_write = mt753x_mii_write;
+ gsw->mmd_read = mt753x_mmd_ind_read;
+ gsw->mmd_write = mt753x_mmd_ind_write;
+ }
+
+ for (i = 0; i < MT753X_NUM_PHYS; i++) {
+ val = gsw->mii_read(gsw, i, MII_BMCR);
+ val |= BMCR_PDOWN;
+ gsw->mii_write(gsw, i, MII_BMCR, val);
+ }
+
+ /* Force MAC link down before reset */
+ mt753x_reg_write(gsw, PMCR(5), FORCE_MODE);
+ mt753x_reg_write(gsw, PMCR(6), FORCE_MODE);
+
+ /* Switch soft reset */
+ /* BUG: sw reset causes gsw int flooding */
+ mt753x_reg_write(gsw, SYS_CTRL, SW_PHY_RST | SW_SYS_RST | SW_REG_RST);
+ usleep_range(10, 20);
+
+ /* global mac control settings configuration */
+ mt753x_reg_write(gsw, GMACCR,
+ LATE_COL_DROP | (15 << MTCC_LMT_S) |
+ (2 << MAX_RX_JUMBO_S) | RX_PKT_LEN_MAX_JUMBO);
+
+ /* Output INTR selected */
+ val = mt753x_reg_read(gsw, TOP_SIG_CTRL);
+ val &= ~OUTPUT_INTR_M;
+ val |= (3 << OUTPUT_INTR_S);
+ mt753x_reg_write(gsw, TOP_SIG_CTRL, val);
+
+ mt7530_core_pll_setup(gsw);
+ mt7530_mac_port_setup(gsw);
+
+ return 0;
+}
+
+static int mt7530_sw_post_init(struct gsw_mt753x *gsw)
+{
+ int i;
+ u32 val;
+
+ mt7530_phy_setting(gsw);
+
+ for (i = 0; i < MT753X_NUM_PHYS; i++) {
+ val = gsw->mii_read(gsw, i, MII_BMCR);
+ val &= ~BMCR_PDOWN;
+ gsw->mii_write(gsw, i, MII_BMCR, val);
+ }
+
+ return 0;
+}
+
+struct mt753x_sw_id mt7530_id = {
+ .model = MT7530,
+ .detect = mt7530_sw_detect,
+ .init = mt7530_sw_init,
+ .post_init = mt7530_sw_post_init
+};
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7531.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7531.c
new file mode 100755
index 0000000..7253042
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt7531.c
@@ -0,0 +1,1058 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Zhanguo Ju <zhanguo.ju@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+
+#include "mt753x.h"
+#include "mt753x_regs.h"
+
+/* MT7531 registers */
+#define SGMII_REG_BASE 0x5000
+#define SGMII_REG_PORT_BASE 0x1000
+#define SGMII_REG(p, r) (SGMII_REG_BASE + \
+ (p) * SGMII_REG_PORT_BASE + (r))
+#define PCS_CONTROL_1(p) SGMII_REG(p, 0x00)
+#define SGMII_MODE(p) SGMII_REG(p, 0x20)
+#define QPHY_PWR_STATE_CTRL(p) SGMII_REG(p, 0xe8)
+#define ANA_CKBG(p) SGMII_REG(p, 0x100)
+#define ANA_DA_FORCE_MODE1(p) SGMII_REG(p, 0x110)
+#define PHYA_CTRL_SIGNAL3(p) SGMII_REG(p, 0x128)
+#define PHYA_ANA_SYSPLL(p) SGMII_REG(p, 0x158)
+
+/* Fields of PCS_CONTROL_1 */
+#define SGMII_LINK_STATUS BIT(18)
+#define SGMII_AN_ENABLE BIT(12)
+#define SGMII_AN_RESTART BIT(9)
+
+/* Fields of SGMII_MODE */
+#define SGMII_REMOTE_FAULT_DIS BIT(8)
+#define SGMII_IF_MODE_FORCE_DUPLEX BIT(4)
+#define SGMII_IF_MODE_FORCE_SPEED_S 0x2
+#define SGMII_IF_MODE_FORCE_SPEED_M 0x0c
+#define SGMII_IF_MODE_ADVERT_AN BIT(1)
+
+/* Values of SGMII_IF_MODE_FORCE_SPEED */
+#define SGMII_IF_MODE_FORCE_SPEED_10 0
+#define SGMII_IF_MODE_FORCE_SPEED_100 1
+#define SGMII_IF_MODE_FORCE_SPEED_1000 2
+
+/* Fields of QPHY_PWR_STATE_CTRL */
+#define PHYA_PWD BIT(4)
+
+/* Fields of ANA_CKBG */
+#define SSUSB_PLL_SSC_EN BIT(21)
+
+/* Fields of ANA_DA_FORCE_MODE1 */
+#define FORCE_PLL_SSC_EN BIT(30)
+
+/* Fields of PHYA_CTRL_SIGNAL3 */
+#define RG_TPHY_SPEED_S 2
+#define RG_TPHY_SPEED_M 0x0c
+
+/* Values of RG_TPHY_SPEED */
+#define RG_TPHY_SPEED_1000 0
+#define RG_TPHY_SPEED_2500 1
+
+/* Fields of PHYA_ANA_SYSPLL */
+#define RG_VUSB10_ON BIT(29)
+
+/* Unique fields of (M)HWSTRAP for MT7531 */
+#define XTAL_FSEL_S 7
+#define XTAL_FSEL_M BIT(7)
+#define PHY_EN BIT(6)
+#define CHG_STRAP BIT(8)
+
+/* Efuse Register Define */
+#define GBE_EFUSE 0x7bc8
+#define GBE_SEL_EFUSE_EN BIT(0)
+
+/* PHY ENABLE Register bitmap define */
+#define PHY_DEV1F 0x1f
+#define PHY_DEV1F_REG_44 0x44
+#define PHY_DEV1F_REG_104 0x104
+#define PHY_DEV1F_REG_10A 0x10a
+#define PHY_DEV1F_REG_10B 0x10b
+#define PHY_DEV1F_REG_10C 0x10c
+#define PHY_DEV1F_REG_10D 0x10d
+#define PHY_DEV1F_REG_268 0x268
+#define PHY_DEV1F_REG_269 0x269
+#define PHY_DEV1F_REG_26A 0x26A
+#define PHY_DEV1F_REG_403 0x403
+
+/* Fields of PHY_DEV1F_REG_403 */
+#define GBE_EFUSE_SETTING BIT(3)
+#define PHY_EN_BYPASS_MODE BIT(4)
+#define POWER_ON_OFF BIT(5)
+#define PHY_PLL_M GENMASK(9, 8)
+#define PHY_PLL_SEL(x) (((x) << 8) & GENMASK(9, 8))
+
+/* PHY EEE Register bitmap of define */
+#define PHY_DEV07 0x07
+#define PHY_DEV07_REG_03C 0x3c
+
+/* PHY Extend Register 0x14 bitmap of define */
+#define PHY_EXT_REG_14 0x14
+
+/* Fields of PHY_EXT_REG_14 */
+#define PHY_EN_DOWN_SHFIT BIT(4)
+
+/* PHY Extend Register 0x17 bitmap of define */
+#define PHY_EXT_REG_17 0x17
+
+/* Fields of PHY_EXT_REG_17 */
+#define PHY_LINKDOWN_POWER_SAVING_EN BIT(4)
+
+/* PHY PMA Register 0x17 bitmap of define */
+#define SLV_DSP_READY_TIME_S 15
+#define SLV_DSP_READY_TIME_M (0xff << SLV_DSP_READY_TIME_S)
+
+/* PHY PMA Register 0x18 bitmap of define */
+#define ENABLE_RANDOM_UPDATE_TRIGGER BIT(8)
+
+/* PHY DEV 0x1e Register bitmap of define */
+#define PHY_DEV1E 0x1e
+#define PHY_TX_MLT3_BASE 0x0
+#define PHY_DEV1E_REG_13 0x13
+#define PHY_DEV1E_REG_14 0x14
+#define PHY_DEV1E_REG_41 0x41
+#define PHY_DEV1E_REG_A6 0xa6
+#define PHY_DEV1E_REG_0C6 0x0c6
+#define PHY_DEV1E_REG_0FE 0x0fe
+#define PHY_DEV1E_REG_123 0x123
+#define PHY_DEV1E_REG_141 0x141
+#define PHY_DEV1E_REG_189 0x189
+#define PHY_DEV1E_REG_234 0x234
+
+/* Fields of PHY_DEV1E_REG_0C6 */
+#define PHY_POWER_SAVING_S 8
+#define PHY_POWER_SAVING_M 0x300
+#define PHY_POWER_SAVING_TX 0x0
+
+/* Fields of PHY_DEV1E_REG_189 */
+#define DESCRAMBLER_CLEAR_EN 0x1
+
+/* Fields of PHY_DEV1E_REG_234 */
+#define TR_OPEN_LOOP_EN BIT(0)
+
+/* Port debug count register */
+#define DBG_CNT_BASE 0x3018
+#define DBG_CNT_PORT_BASE 0x100
+#define DBG_CNT(p) (DBG_CNT_BASE + \
+ (p) * DBG_CNT_PORT_BASE)
+#define DIS_CLR BIT(31)
+
+/* Values of XTAL_FSEL_S */
+#define XTAL_40MHZ 0
+#define XTAL_25MHZ 1
+
+#define PLLGP_EN 0x7820
+#define EN_COREPLL BIT(2)
+#define SW_CLKSW BIT(1)
+#define SW_PLLGP BIT(0)
+
+#define PLLGP_CR0 0x78a8
+#define RG_COREPLL_EN BIT(22)
+#define RG_COREPLL_POSDIV_S 23
+#define RG_COREPLL_POSDIV_M 0x3800000
+#define RG_COREPLL_SDM_PCW_S 1
+#define RG_COREPLL_SDM_PCW_M 0x3ffffe
+#define RG_COREPLL_SDM_PCW_CHG BIT(0)
+
+/* TOP Signals Status Register */
+#define TOP_SIG_SR 0x780c
+#define PAD_MCM_SMI_EN BIT(0)
+#define PAD_DUAL_SGMII_EN BIT(1)
+
+/* RGMII and SGMII PLL clock */
+#define ANA_PLLGP_CR2 0x78b0
+#define ANA_PLLGP_CR5 0x78bc
+
+/* GPIO mode define */
+#define GPIO_MODE_REGS(x) (0x7c0c + (((x) / 8) * 4))
+#define GPIO_MODE_S 4
+
+/* GPIO GROUP IOLB SMT0 Control */
+#define SMT0_IOLB 0x7f04
+#define SMT_IOLB_5_SMI_MDC_EN BIT(5)
+
+/* Unique fields of PMCR for MT7531 */
+#define FORCE_MODE_EEE1G BIT(25)
+#define FORCE_MODE_EEE100 BIT(26)
+#define FORCE_MODE_TX_FC BIT(27)
+#define FORCE_MODE_RX_FC BIT(28)
+#define FORCE_MODE_DPX BIT(29)
+#define FORCE_MODE_SPD BIT(30)
+#define FORCE_MODE_LNK BIT(31)
+#define FORCE_MODE BIT(15)
+
+#define CHIP_REV 0x781C
+#define CHIP_NAME_S 16
+#define CHIP_NAME_M 0xffff0000
+#define CHIP_REV_S 0
+#define CHIP_REV_M 0x0f
+#define CHIP_REV_E1 0x0
+
+#define CLKGEN_CTRL 0x7500
+#define CLK_SKEW_OUT_S 8
+#define CLK_SKEW_OUT_M 0x300
+#define CLK_SKEW_IN_S 6
+#define CLK_SKEW_IN_M 0xc0
+#define RXCLK_NO_DELAY BIT(5)
+#define TXCLK_NO_REVERSE BIT(4)
+#define GP_MODE_S 1
+#define GP_MODE_M 0x06
+#define GP_CLK_EN BIT(0)
+
+#define CPGC_CTRL 0xB0
+#define COL_EN BIT(0)
+#define COL_CLK_EN BIT(1)
+#define COL_RST_N BIT(2)
+#define COL_BUSY BIT(3)
+
+/* Values of GP_MODE */
+#define GP_MODE_RGMII 0
+#define GP_MODE_MII 1
+#define GP_MODE_REV_MII 2
+
+/* Values of CLK_SKEW_IN */
+#define CLK_SKEW_IN_NO_CHANGE 0
+#define CLK_SKEW_IN_DELAY_100PPS 1
+#define CLK_SKEW_IN_DELAY_200PPS 2
+#define CLK_SKEW_IN_REVERSE 3
+
+/* Values of CLK_SKEW_OUT */
+#define CLK_SKEW_OUT_NO_CHANGE 0
+#define CLK_SKEW_OUT_DELAY_100PPS 1
+#define CLK_SKEW_OUT_DELAY_200PPS 2
+#define CLK_SKEW_OUT_REVERSE 3
+
+/* Proprietory Control Register of Internal Phy device 0x1e */
+#define RXADC_CONTROL_3 0xc2
+#define RXADC_LDO_CONTROL_2 0xd3
+
+/* Proprietory Control Register of Internal Phy device 0x1f */
+#define TXVLD_DA_271 0x271
+#define TXVLD_DA_272 0x272
+#define TXVLD_DA_273 0x273
+
+/* gpio pinmux pins and functions define */
+static int gpio_int_pins[] = {0};
+static int gpio_int_funcs[] = {1};
+static int gpio_mdc_pins[] = {11, 20};
+static int gpio_mdc_funcs[] = {2, 2};
+static int gpio_mdio_pins[] = {12, 21};
+static int gpio_mdio_funcs[] = {2, 2};
+
+static int mt7531_set_port_sgmii_force_mode(struct gsw_mt753x *gsw, u32 port,
+ struct mt753x_port_cfg *port_cfg)
+{
+ u32 speed, port_base, val;
+ ktime_t timeout;
+ u32 timeout_us;
+
+ if (port < 5 || port >= MT753X_NUM_PORTS) {
+ dev_info(gsw->dev, "port %d is not a SGMII port\n", port);
+ return -EINVAL;
+ }
+
+ port_base = port - 5;
+
+ switch (port_cfg->speed) {
+ case MAC_SPD_1000:
+ speed = RG_TPHY_SPEED_1000;
+ break;
+ case MAC_SPD_2500:
+ speed = RG_TPHY_SPEED_2500;
+ break;
+ default:
+ dev_info(gsw->dev, "invalid SGMII speed idx %d for port %d\n",
+ port_cfg->speed, port);
+
+ speed = RG_TPHY_SPEED_1000;
+ }
+
+ /* Step 1: Speed select register setting */
+ val = mt753x_reg_read(gsw, PHYA_CTRL_SIGNAL3(port_base));
+ val &= ~RG_TPHY_SPEED_M;
+ val |= speed << RG_TPHY_SPEED_S;
+ mt753x_reg_write(gsw, PHYA_CTRL_SIGNAL3(port_base), val);
+
+ /* Step 2 : Disable AN */
+ val = mt753x_reg_read(gsw, PCS_CONTROL_1(port_base));
+ val &= ~SGMII_AN_ENABLE;
+ mt753x_reg_write(gsw, PCS_CONTROL_1(port_base), val);
+
+ /* Step 3: SGMII force mode setting */
+ val = mt753x_reg_read(gsw, SGMII_MODE(port_base));
+ val &= ~SGMII_IF_MODE_ADVERT_AN;
+ val &= ~SGMII_IF_MODE_FORCE_SPEED_M;
+ val |= SGMII_IF_MODE_FORCE_SPEED_1000 << SGMII_IF_MODE_FORCE_SPEED_S;
+ val |= SGMII_IF_MODE_FORCE_DUPLEX;
+ /* For sgmii force mode, 0 is full duplex and 1 is half duplex */
+ if (port_cfg->duplex)
+ val &= ~SGMII_IF_MODE_FORCE_DUPLEX;
+
+ mt753x_reg_write(gsw, SGMII_MODE(port_base), val);
+
+ /* Step 4: XXX: Disable Link partner's AN and set force mode */
+
+ /* Step 5: XXX: Special setting for PHYA ==> reserved for flexible */
+
+ /* Step 6 : Release PHYA power down state */
+ val = mt753x_reg_read(gsw, QPHY_PWR_STATE_CTRL(port_base));
+ val &= ~PHYA_PWD;
+ mt753x_reg_write(gsw, QPHY_PWR_STATE_CTRL(port_base), val);
+
+ /* Step 7 : Polling SGMII_LINK_STATUS */
+ timeout_us = 2000000;
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+ while (1) {
+ val = mt753x_reg_read(gsw, PCS_CONTROL_1(port_base));
+ val &= SGMII_LINK_STATUS;
+
+ if (val)
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mt7531_set_port_sgmii_an_mode(struct gsw_mt753x *gsw, u32 port,
+ struct mt753x_port_cfg *port_cfg)
+{
+ u32 speed, port_base, val;
+ ktime_t timeout;
+ u32 timeout_us;
+
+ if (port < 5 || port >= MT753X_NUM_PORTS) {
+ dev_info(gsw->dev, "port %d is not a SGMII port\n", port);
+ return -EINVAL;
+ }
+
+ port_base = port - 5;
+
+ switch (port_cfg->speed) {
+ case MAC_SPD_1000:
+ speed = RG_TPHY_SPEED_1000;
+ break;
+ case MAC_SPD_2500:
+ speed = RG_TPHY_SPEED_2500;
+ break;
+ default:
+ dev_info(gsw->dev, "invalid SGMII speed idx %d for port %d\n",
+ port_cfg->speed, port);
+
+ speed = RG_TPHY_SPEED_1000;
+ }
+
+ /* Step 1: Speed select register setting */
+ val = mt753x_reg_read(gsw, PHYA_CTRL_SIGNAL3(port_base));
+ val &= ~RG_TPHY_SPEED_M;
+ val |= speed << RG_TPHY_SPEED_S;
+ mt753x_reg_write(gsw, PHYA_CTRL_SIGNAL3(port_base), val);
+
+ /* Step 2: Remote fault disable */
+ val = mt753x_reg_read(gsw, SGMII_MODE(port));
+ val |= SGMII_REMOTE_FAULT_DIS;
+ mt753x_reg_write(gsw, SGMII_MODE(port), val);
+
+ /* Step 3: Setting Link partner's AN enable = 1 */
+
+ /* Step 4: Setting Link partner's device ability for speed/duplex */
+
+ /* Step 5: AN re-start */
+ val = mt753x_reg_read(gsw, PCS_CONTROL_1(port));
+ val |= SGMII_AN_RESTART;
+ mt753x_reg_write(gsw, PCS_CONTROL_1(port), val);
+
+ /* Step 6: Special setting for PHYA ==> reserved for flexible */
+
+ /* Step 7 : Polling SGMII_LINK_STATUS */
+ timeout_us = 2000000;
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+ while (1) {
+ val = mt753x_reg_read(gsw, PCS_CONTROL_1(port_base));
+ val &= SGMII_LINK_STATUS;
+
+ if (val)
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void mt7531_sgmii_ssc(struct gsw_mt753x *gsw, u32 port, int enable)
+{
+ u32 val;
+ u32 port_base = port - 5;
+
+ if (enable) {
+ val = mt753x_reg_read(gsw, ANA_CKBG(port_base));
+ val |= SSUSB_PLL_SSC_EN;
+ mt753x_reg_write(gsw, ANA_CKBG(port_base), val);
+
+ val = mt753x_reg_read(gsw, ANA_DA_FORCE_MODE1(port_base));
+ val |= FORCE_PLL_SSC_EN;
+ mt753x_reg_write(gsw, ANA_DA_FORCE_MODE1(port_base), val);
+ } else {
+ val = mt753x_reg_read(gsw, ANA_CKBG(port_base));
+ val &= ~SSUSB_PLL_SSC_EN;
+ mt753x_reg_write(gsw, ANA_CKBG(port_base), val);
+
+ val = mt753x_reg_read(gsw, ANA_DA_FORCE_MODE1(port_base));
+ val &= ~FORCE_PLL_SSC_EN;
+ mt753x_reg_write(gsw, ANA_DA_FORCE_MODE1(port_base), val);
+ }
+}
+
+static int mt7531_set_port_rgmii(struct gsw_mt753x *gsw, u32 port)
+{
+ u32 val;
+
+ if (port != 5) {
+ dev_info(gsw->dev, "RGMII mode is not available for port %d\n",
+ port);
+ return -EINVAL;
+ }
+
+ val = mt753x_reg_read(gsw, CLKGEN_CTRL);
+ val |= GP_CLK_EN;
+ val &= ~GP_MODE_M;
+ val |= GP_MODE_RGMII << GP_MODE_S;
+ val |= TXCLK_NO_REVERSE;
+ val |= RXCLK_NO_DELAY;
+ val &= ~CLK_SKEW_IN_M;
+ val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
+ val &= ~CLK_SKEW_OUT_M;
+ val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
+ mt753x_reg_write(gsw, CLKGEN_CTRL, val);
+
+ return 0;
+}
+
+static int mt7531_mac_port_setup(struct gsw_mt753x *gsw, u32 port,
+ struct mt753x_port_cfg *port_cfg)
+{
+ u32 pmcr;
+ u32 speed;
+
+ if (port < 5 || port >= MT753X_NUM_PORTS) {
+ dev_info(gsw->dev, "port %d is not a MAC port\n", port);
+ return -EINVAL;
+ }
+
+ if (port_cfg->enabled) {
+ pmcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
+ MAC_MODE | MAC_TX_EN | MAC_RX_EN |
+ BKOFF_EN | BACKPR_EN;
+
+ if (port_cfg->force_link) {
+ /* PMCR's speed field 0x11 is reserved,
+ * sw should set 0x10
+ */
+ speed = port_cfg->speed;
+ if (port_cfg->speed == MAC_SPD_2500)
+ speed = MAC_SPD_1000;
+
+ pmcr |= FORCE_MODE_LNK | FORCE_LINK |
+ FORCE_MODE_SPD | FORCE_MODE_DPX |
+ FORCE_MODE_RX_FC | FORCE_MODE_TX_FC |
+ FORCE_RX_FC | FORCE_TX_FC |
+ (speed << FORCE_SPD_S);
+
+ if (port_cfg->duplex)
+ pmcr |= FORCE_DPX;
+ }
+ } else {
+ pmcr = FORCE_MODE_LNK;
+ }
+
+ switch (port_cfg->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ mt7531_set_port_rgmii(gsw, port);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (port_cfg->force_link)
+ mt7531_set_port_sgmii_force_mode(gsw, port, port_cfg);
+ else
+ mt7531_set_port_sgmii_an_mode(gsw, port, port_cfg);
+
+ mt7531_sgmii_ssc(gsw, port, port_cfg->ssc_on);
+ break;
+ default:
+ if (port_cfg->enabled)
+ dev_info(gsw->dev, "%s is not supported by port %d\n",
+ phy_modes(port_cfg->phy_mode), port);
+
+ pmcr = FORCE_MODE_LNK;
+ }
+
+ mt753x_reg_write(gsw, PMCR(port), pmcr);
+
+ return 0;
+}
+
+static void mt7531_core_pll_setup(struct gsw_mt753x *gsw)
+{
+ u32 val;
+ u32 top_sig;
+ u32 hwstrap;
+ u32 xtal;
+
+ val = mt753x_reg_read(gsw, CHIP_REV);
+ top_sig = mt753x_reg_read(gsw, TOP_SIG_SR);
+ hwstrap = mt753x_reg_read(gsw, HWSTRAP);
+ if ((val & CHIP_REV_M) > 0)
+ xtal = (top_sig & PAD_MCM_SMI_EN) ? XTAL_40MHZ : XTAL_25MHZ;
+ else
+ xtal = (hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S;
+
+ /* dump HW strap and XTAL */
+ dev_info(gsw->dev, "HWSTRAP=0x%x XTAL=%dMHz\n", hwstrap,
+ (xtal == XTAL_25MHZ) ? 25 : 40);
+
+ /* Only BE needs additional setting */
+ if (top_sig & PAD_DUAL_SGMII_EN)
+ return;
+
+ /* Disable Port5 SGMII clearly */
+ val = mt753x_reg_read(gsw, PHYA_ANA_SYSPLL(0));
+ val &= ~RG_VUSB10_ON;
+ mt753x_reg_write(gsw, PHYA_ANA_SYSPLL(0), val);
+
+ switch (xtal) {
+ case XTAL_25MHZ:
+ /* Step 1 : Disable MT7531 COREPLL */
+ val = mt753x_reg_read(gsw, PLLGP_EN);
+ val &= ~EN_COREPLL;
+ mt753x_reg_write(gsw, PLLGP_EN, val);
+
+ /* Step 2: switch to XTAL output */
+ val = mt753x_reg_read(gsw, PLLGP_EN);
+ val |= SW_CLKSW;
+ mt753x_reg_write(gsw, PLLGP_EN, val);
+
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val &= ~RG_COREPLL_EN;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+ /* Step 3: disable PLLGP and enable program PLLGP */
+ val = mt753x_reg_read(gsw, PLLGP_EN);
+ val |= SW_PLLGP;
+ mt753x_reg_write(gsw, PLLGP_EN, val);
+
+ /* Step 4: program COREPLL output frequency to 500MHz */
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val &= ~RG_COREPLL_POSDIV_M;
+ val |= 2 << RG_COREPLL_POSDIV_S;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+ usleep_range(25, 35);
+
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+ /* Set feedback divide ratio update signal to high */
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val |= RG_COREPLL_SDM_PCW_CHG;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+ /* Wait for at least 16 XTAL clocks */
+ usleep_range(10, 20);
+
+ /* Step 5: set feedback divide ratio update signal to low */
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_CHG;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+ /* Enable 325M clock for SGMII */
+ mt753x_reg_write(gsw, ANA_PLLGP_CR5, 0xad0000);
+
+ /* Enable 250SSC clock for RGMII */
+ mt753x_reg_write(gsw, ANA_PLLGP_CR2, 0x4f40000);
+
+ /* Step 6: Enable MT7531 PLL */
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val |= RG_COREPLL_EN;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+ val = mt753x_reg_read(gsw, PLLGP_EN);
+ val |= EN_COREPLL;
+ mt753x_reg_write(gsw, PLLGP_EN, val);
+ usleep_range(25, 35);
+
+ break;
+ case XTAL_40MHZ:
+ /* Step 1 : Disable MT7531 COREPLL */
+ val = mt753x_reg_read(gsw, PLLGP_EN);
+ val &= ~EN_COREPLL;
+ mt753x_reg_write(gsw, PLLGP_EN, val);
+
+ /* Step 2: switch to XTAL output */
+ val = mt753x_reg_read(gsw, PLLGP_EN);
+ val |= SW_CLKSW;
+ mt753x_reg_write(gsw, PLLGP_EN, val);
+
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val &= ~RG_COREPLL_EN;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+ /* Step 3: disable PLLGP and enable program PLLGP */
+ val = mt753x_reg_read(gsw, PLLGP_EN);
+ val |= SW_PLLGP;
+ mt753x_reg_write(gsw, PLLGP_EN, val);
+
+ /* Step 4: program COREPLL output frequency to 500MHz */
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val &= ~RG_COREPLL_POSDIV_M;
+ val |= 2 << RG_COREPLL_POSDIV_S;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+ usleep_range(25, 35);
+
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+ /* Set feedback divide ratio update signal to high */
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val |= RG_COREPLL_SDM_PCW_CHG;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+ /* Wait for at least 16 XTAL clocks */
+ usleep_range(10, 20);
+
+ /* Step 5: set feedback divide ratio update signal to low */
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_CHG;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+ /* Enable 325M clock for SGMII */
+ mt753x_reg_write(gsw, ANA_PLLGP_CR5, 0xad0000);
+
+ /* Enable 250SSC clock for RGMII */
+ mt753x_reg_write(gsw, ANA_PLLGP_CR2, 0x4f40000);
+
+ /* Step 6: Enable MT7531 PLL */
+ val = mt753x_reg_read(gsw, PLLGP_CR0);
+ val |= RG_COREPLL_EN;
+ mt753x_reg_write(gsw, PLLGP_CR0, val);
+
+ val = mt753x_reg_read(gsw, PLLGP_EN);
+ val |= EN_COREPLL;
+ mt753x_reg_write(gsw, PLLGP_EN, val);
+ usleep_range(25, 35);
+ break;
+ }
+}
+
+static int mt7531_internal_phy_calibration(struct gsw_mt753x *gsw)
+{
+ return 0;
+}
+
+static int mt7531_sw_detect(struct gsw_mt753x *gsw, struct chip_rev *crev)
+{
+ u32 rev, topsig;
+
+ rev = mt753x_reg_read(gsw, CHIP_REV);
+
+ if (((rev & CHIP_NAME_M) >> CHIP_NAME_S) == MT7531) {
+ if (crev) {
+ topsig = mt753x_reg_read(gsw, TOP_SIG_SR);
+
+ crev->rev = rev & CHIP_REV_M;
+ crev->name = topsig & PAD_DUAL_SGMII_EN ?
+ "MT7531AE" : "MT7531BE";
+ }
+
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void pinmux_set_mux_7531(struct gsw_mt753x *gsw, u32 pin, u32 mode)
+{
+ u32 val;
+
+ val = mt753x_reg_read(gsw, GPIO_MODE_REGS(pin));
+ val &= ~(0xf << (pin & 7) * GPIO_MODE_S);
+ val |= mode << (pin & 7) * GPIO_MODE_S;
+ mt753x_reg_write(gsw, GPIO_MODE_REGS(pin), val);
+}
+
+static int mt7531_set_gpio_pinmux(struct gsw_mt753x *gsw)
+{
+ u32 group = 0;
+ struct device_node *np = gsw->dev->of_node;
+
+ /* Set GPIO 0 interrupt mode */
+ pinmux_set_mux_7531(gsw, gpio_int_pins[0], gpio_int_funcs[0]);
+
+ of_property_read_u32(np, "mediatek,mdio_master_pinmux", &group);
+
+ /* group = 0: do nothing, 1: 1st group (AE), 2: 2nd group (BE) */
+ if (group > 0 && group <= 2) {
+ group--;
+ pinmux_set_mux_7531(gsw, gpio_mdc_pins[group],
+ gpio_mdc_funcs[group]);
+ pinmux_set_mux_7531(gsw, gpio_mdio_pins[group],
+ gpio_mdio_funcs[group]);
+ }
+
+ return 0;
+}
+
+static void mt7531_phy_pll_setup(struct gsw_mt753x *gsw)
+{
+ u32 hwstrap;
+ u32 val;
+
+ val = mt753x_reg_read(gsw, CHIP_REV);
+ if ((val & CHIP_REV_M) > 0)
+ return;
+
+ hwstrap = mt753x_reg_read(gsw, HWSTRAP);
+
+ switch ((hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S) {
+ case XTAL_25MHZ:
+ /* disable pll auto calibration */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_104, 0x608);
+
+ /* change pll sel */
+ val = gsw->mmd_read(gsw, 0, PHY_DEV1F,
+ PHY_DEV1F_REG_403);
+ val &= ~(PHY_PLL_M);
+ val |= PHY_PLL_SEL(3);
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
+
+ /* set divider ratio */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F,
+ PHY_DEV1F_REG_10A, 0x1009);
+
+ /* set divider ratio */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10B, 0x7c6);
+
+ /* capacitance and resistance adjustment */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F,
+ PHY_DEV1F_REG_10C, 0xa8be);
+
+ break;
+ case XTAL_40MHZ:
+ /* disable pll auto calibration */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_104, 0x608);
+
+ /* change pll sel */
+ val = gsw->mmd_read(gsw, 0, PHY_DEV1F,
+ PHY_DEV1F_REG_403);
+ val &= ~(PHY_PLL_M);
+ val |= PHY_PLL_SEL(3);
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
+
+ /* set divider ratio */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F,
+ PHY_DEV1F_REG_10A, 0x1018);
+
+ /* set divider ratio */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10B, 0xc676);
+
+ /* capacitance and resistance adjustment */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F,
+ PHY_DEV1F_REG_10C, 0xd8be);
+ break;
+ }
+
+ /* power down pll. additional delay is not required via mdio access */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10D, 0x10);
+
+ /* power up pll */
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10D, 0x14);
+}
+
+/* 12 registers for TX_MLT3 waveform tuning.
+ * 012 345 678 9ab
+ * 1 __
+ * _/ \_
+ * 0_/ \
+ * \_ _/
+ * -1 \__/
+ */
+static void mt7531_phy_100m_eye_diag_setting(struct gsw_mt753x *gsw, u32 port)
+{
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x0, 0x187);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x1, 0x1c9);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x2, 0x1c6);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x3, 0x182);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x4, 0x208);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x5, 0x205);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x6, 0x384);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x7, 0x3cb);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x8, 0x3c4);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x9, 0x30a);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0xa, 0x00b);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0xb, 0x002);
+}
+
+static void mt7531_phy_setting(struct gsw_mt753x *gsw)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < MT753X_NUM_PHYS; i++) {
+ mt7531_phy_100m_eye_diag_setting(gsw, i);
+
+ /* Enable HW auto downshift */
+ gsw->mii_write(gsw, i, 0x1f, 0x1);
+ val = gsw->mii_read(gsw, i, PHY_EXT_REG_14);
+ val |= PHY_EN_DOWN_SHFIT;
+ gsw->mii_write(gsw, i, PHY_EXT_REG_14, val);
+
+ /* Decrease SlvDPSready time */
+ val = mt753x_tr_read(gsw, i, PMA_CH, PMA_NOD, PMA_17);
+ val &= ~SLV_DSP_READY_TIME_M;
+ val |= 0xc << SLV_DSP_READY_TIME_S;
+ mt753x_tr_write(gsw, i, PMA_CH, PMA_NOD, PMA_17, val);
+
+ /* Enable Random Update Mechanism */
+ val = mt753x_tr_read(gsw, i, PMA_CH, PMA_NOD, PMA_18);
+ val |= ENABLE_RANDOM_UPDATE_TRIGGER;
+ mt753x_tr_write(gsw, i, PMA_CH, PMA_NOD, PMA_18, val);
+
+ /* PHY link down power saving enable */
+ val = gsw->mii_read(gsw, i, PHY_EXT_REG_17);
+ val |= PHY_LINKDOWN_POWER_SAVING_EN;
+ gsw->mii_write(gsw, i, PHY_EXT_REG_17, val);
+
+ val = gsw->mmd_read(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_0C6);
+ val &= ~PHY_POWER_SAVING_M;
+ val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
+ gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_0C6, val);
+
+ /* Timing Recovery for GbE slave mode */
+ mt753x_tr_write(gsw, i, PMA_CH, PMA_NOD, PMA_01, 0x6fb90a);
+ mt753x_tr_write(gsw, i, DSP_CH, DSP_NOD, DSP_06, 0x2ebaef);
+ val = gsw->mmd_read(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_234);
+ val |= TR_OPEN_LOOP_EN;
+ gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_234, val);
+
+ /* Enable Asymmetric Pause Capability */
+ val = gsw->mii_read(gsw, i, MII_ADVERTISE);
+ val |= ADVERTISE_PAUSE_ASYM;
+ gsw->mii_write(gsw, i, MII_ADVERTISE, val);
+ }
+}
+
+static void mt7531_adjust_line_driving(struct gsw_mt753x *gsw, u32 port)
+{
+ /* For ADC timing margin window for LDO calibration */
+ gsw->mmd_write(gsw, port, PHY_DEV1E, RXADC_LDO_CONTROL_2, 0x2222);
+
+ /* Adjust AD sample timing */
+ gsw->mmd_write(gsw, port, PHY_DEV1E, RXADC_CONTROL_3, 0x4444);
+
+ /* Adjust Line driver current for different mode */
+ gsw->mmd_write(gsw, port, PHY_DEV1F, TXVLD_DA_271, 0x2ca5);
+
+ /* Adjust Line driver current for different mode */
+ gsw->mmd_write(gsw, port, PHY_DEV1F, TXVLD_DA_272, 0xc6b);
+
+ /* Adjust Line driver gain for 10BT from 1000BT calibration result */
+ gsw->mmd_write(gsw, port, PHY_DEV1F, TXVLD_DA_273, 0x3000);
+
+ /* Adjust RX Echo path filter */
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_0FE, 0x2);
+
+ /* Adjust RX HVGA bias current */
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_41, 0x3333);
+
+ /* Adjust TX class AB driver 1 */
+ gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_268, 0x384);
+
+ /* Adjust TX class AB driver 2 */
+ gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_269, 0x1114);
+
+ /* Adjust DAC delay for TX Pairs */
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_13, 0x404);
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_14, 0x404);
+
+ /* Adjust DAC digital delay for TX Delay */
+ gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_44, 0xc0);
+
+ /* Adjust Line driver compensation cap for stability concern due to
+ * increase current.
+ */
+ gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_26A, 0x3333);
+}
+
+static void mt7531_eee_setting(struct gsw_mt753x *gsw, u32 port)
+{
+ u32 val;
+
+ /* Disable EEE */
+ gsw->mmd_write(gsw, port, PHY_DEV07, PHY_DEV07_REG_03C, 0);
+
+ /* Disable generate signal to clear the scramble_lock when lpi mode */
+ val = gsw->mmd_read(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_189);
+ val &= ~DESCRAMBLER_CLEAR_EN;
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_189, val);
+
+ /* Roll back EEE Slave Mode */
+ gsw->mmd_write(gsw, port, 0x1e, 0x2d1, 0);
+ mt753x_tr_write(gsw, port, DSP_CH, DSP_NOD, DSP_08, 0x1b);
+ mt753x_tr_write(gsw, port, DSP_CH, DSP_NOD, DSP_0f, 0);
+ mt753x_tr_write(gsw, port, DSP_CH, DSP_NOD, DSP_10, 0x5000);
+
+ /* Adjust 100_mse_threshold */
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_123, 0xffff);
+
+ /* Disable mcc */
+ gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_A6, 0x300);
+}
+
+static void mt7531_afifo_reset(struct gsw_mt753x *gsw, int enable)
+{
+ int p;
+ u32 val;
+
+ if (enable) {
+ for (p = 0; p < MT753X_NUM_PORTS; p++) {
+ val = mt753x_reg_read(gsw, DBG_CNT(p));
+ val &= ~DIS_CLR;
+ mt753x_reg_write(gsw, DBG_CNT(p), val);
+ }
+ } else {
+ for (p = 0; p < MT753X_NUM_PORTS; p++) {
+ val = mt753x_reg_read(gsw, DBG_CNT(p));
+ val |= DIS_CLR;
+ mt753x_reg_write(gsw, DBG_CNT(p), val);
+ }
+ }
+}
+
+static int mt7531_sw_init(struct gsw_mt753x *gsw)
+{
+ int i;
+ u32 val;
+
+ gsw->phy_base = (gsw->smi_addr + 1) & MT753X_SMI_ADDR_MASK;
+
+ gsw->mii_read = mt753x_mii_read;
+ gsw->mii_write = mt753x_mii_write;
+ gsw->mmd_read = mt753x_mmd_read;
+ gsw->mmd_write = mt753x_mmd_write;
+
+ gsw->hw_phy_cal = of_property_read_bool(gsw->dev->of_node, "mediatek,hw_phy_cal");
+
+ for (i = 0; i < MT753X_NUM_PHYS; i++) {
+ val = gsw->mii_read(gsw, i, MII_BMCR);
+ val |= BMCR_ISOLATE;
+ gsw->mii_write(gsw, i, MII_BMCR, val);
+ }
+
+ /* Force MAC link down before reset */
+ mt753x_reg_write(gsw, PMCR(5), FORCE_MODE_LNK);
+ mt753x_reg_write(gsw, PMCR(6), FORCE_MODE_LNK);
+
+ /* Switch soft reset */
+ mt753x_reg_write(gsw, SYS_CTRL, SW_SYS_RST | SW_REG_RST);
+ usleep_range(10, 20);
+
+ /* Enable MDC input Schmitt Trigger */
+ val = mt753x_reg_read(gsw, SMT0_IOLB);
+ mt753x_reg_write(gsw, SMT0_IOLB, val | SMT_IOLB_5_SMI_MDC_EN);
+
+ /* Set 7531 gpio pinmux */
+ mt7531_set_gpio_pinmux(gsw);
+
+ mt7531_core_pll_setup(gsw);
+ mt7531_mac_port_setup(gsw, 5, &gsw->port5_cfg);
+ mt7531_mac_port_setup(gsw, 6, &gsw->port6_cfg);
+
+ /* Global mac control settings */
+ mt753x_reg_write(gsw, GMACCR,
+ (15 << MTCC_LMT_S) | (15 << MAX_RX_JUMBO_S) |
+ RX_PKT_LEN_MAX_JUMBO);
+
+ /* Enable Collision Poll */
+ val = mt753x_reg_read(gsw, CPGC_CTRL);
+ val |= COL_CLK_EN;
+ mt753x_reg_write(gsw, CPGC_CTRL, val);
+ val |= COL_RST_N;
+ mt753x_reg_write(gsw, CPGC_CTRL, val);
+ val |= COL_EN;
+ mt753x_reg_write(gsw, CPGC_CTRL, val);
+
+ /* Disable AFIFO reset for extra short IPG */
+ mt7531_afifo_reset(gsw, 0);
+
+ return 0;
+}
+
+static int mt7531_sw_post_init(struct gsw_mt753x *gsw)
+{
+ int i;
+ u32 val;
+
+ /* Let internal PHYs only Tx constant data in configure stage. */
+ for (i = 0; i < MT753X_NUM_PHYS; i++)
+ gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_141, 0x200);
+
+ /* Internal PHYs might be enabled by HW Bootstrapping, or bootloader.
+ * Turn off PHYs before setup PHY PLL.
+ */
+ val = gsw->mmd_read(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403);
+ val |= PHY_EN_BYPASS_MODE;
+ val |= POWER_ON_OFF;
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
+
+ mt7531_phy_pll_setup(gsw);
+
+ /* Enable Internal PHYs before phy setting */
+ val = gsw->mmd_read(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403);
+ val |= PHY_EN_BYPASS_MODE;
+ val &= ~POWER_ON_OFF;
+ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
+
+ mt7531_phy_setting(gsw);
+
+ for (i = 0; i < MT753X_NUM_PHYS; i++) {
+ val = gsw->mii_read(gsw, i, MII_BMCR);
+ val &= ~BMCR_ISOLATE;
+ gsw->mii_write(gsw, i, MII_BMCR, val);
+ }
+
+ for (i = 0; i < MT753X_NUM_PHYS; i++) {
+ mt7531_adjust_line_driving(gsw, i);
+ mt7531_eee_setting(gsw, i);
+ }
+
+ /* Restore internal PHYs normal Tx function after configure stage. */
+ for (i = 0; i < MT753X_NUM_PHYS; i++)
+ gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_141, 0x0);
+
+ mt7531_internal_phy_calibration(gsw);
+
+ return 0;
+}
+
+struct mt753x_sw_id mt7531_id = {
+ .model = MT7531,
+ .detect = mt7531_sw_detect,
+ .init = mt7531_sw_init,
+ .post_init = mt7531_sw_post_init
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhanguo Ju <zhanguo.ju@mediatek.com>");
+MODULE_DESCRIPTION("Driver for MediaTek MT753x Gigabit Switch");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x.h b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x.h
new file mode 100755
index 0000000..732bda1
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#ifndef _MT753X_H_
+#define _MT753X_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/workqueue.h>
+#include <linux/gpio/consumer.h>
+
+#ifdef CONFIG_SWCONFIG
+#include <linux/switch.h>
+#endif
+
+#include "mt753x_vlan.h"
+
+#define MT753X_DFL_CPU_PORT 6
+#define MT753X_NUM_PHYS 5
+
+#define MT753X_DFL_SMI_ADDR 0x1f
+#define MT753X_SMI_ADDR_MASK 0x1f
+
+struct gsw_mt753x;
+
+enum mt753x_model {
+ MT7530 = 0x7530,
+ MT7531 = 0x7531
+};
+
+struct mt753x_port_cfg {
+ struct device_node *np;
+ int phy_mode;
+ u32 enabled: 1;
+ u32 force_link: 1;
+ u32 speed: 2;
+ u32 duplex: 1;
+ bool ssc_on;
+ bool stag_on;
+};
+
+struct mt753x_phy {
+ struct gsw_mt753x *gsw;
+ struct net_device netdev;
+ struct phy_device *phydev;
+};
+
+struct gsw_mt753x {
+ u32 id;
+
+ struct device *dev;
+ struct mii_bus *host_bus;
+ struct mii_bus *gphy_bus;
+ struct mutex mii_lock; /* MII access lock */
+ u32 smi_addr;
+ u32 phy_base;
+ int direct_phy_access;
+
+ enum mt753x_model model;
+ const char *name;
+
+ struct mt753x_port_cfg port5_cfg;
+ struct mt753x_port_cfg port6_cfg;
+
+ bool hw_phy_cal;
+ bool phy_status_poll;
+ struct mt753x_phy phys[MT753X_NUM_PHYS];
+// int phy_irqs[PHY_MAX_ADDR]; //FIXME
+
+ int phy_link_sts;
+
+ int irq;
+ int reset_pin;
+ struct work_struct irq_worker;
+
+#ifdef CONFIG_SWCONFIG
+ struct switch_dev swdev;
+ u32 cpu_port;
+#endif
+
+ int global_vlan_enable;
+ struct mt753x_vlan_entry vlan_entries[MT753X_NUM_VLANS];
+ struct mt753x_port_entry port_entries[MT753X_NUM_PORTS];
+
+ int (*mii_read)(struct gsw_mt753x *gsw, int phy, int reg);
+ void (*mii_write)(struct gsw_mt753x *gsw, int phy, int reg, u16 val);
+
+ int (*mmd_read)(struct gsw_mt753x *gsw, int addr, int devad, u16 reg);
+ void (*mmd_write)(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+ u16 val);
+
+ struct list_head list;
+};
+
+struct chip_rev {
+ const char *name;
+ u32 rev;
+};
+
+struct mt753x_sw_id {
+ enum mt753x_model model;
+ int (*detect)(struct gsw_mt753x *gsw, struct chip_rev *crev);
+ int (*init)(struct gsw_mt753x *gsw);
+ int (*post_init)(struct gsw_mt753x *gsw);
+};
+
+extern struct list_head mt753x_devs;
+
+struct gsw_mt753x *mt753x_get_gsw(u32 id);
+struct gsw_mt753x *mt753x_get_first_gsw(void);
+void mt753x_put_gsw(void);
+void mt753x_lock_gsw(void);
+
+u32 mt753x_reg_read(struct gsw_mt753x *gsw, u32 reg);
+void mt753x_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val);
+
+int mt753x_mii_read(struct gsw_mt753x *gsw, int phy, int reg);
+void mt753x_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val);
+
+int mt753x_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg);
+void mt753x_mmd_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+ u16 val);
+
+int mt753x_mmd_ind_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg);
+void mt753x_mmd_ind_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+ u16 val);
+
+int mt753x_tr_read(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr);
+void mt753x_tr_write(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr,
+ u32 data);
+
+void mt753x_irq_worker(struct work_struct *work);
+void mt753x_irq_enable(struct gsw_mt753x *gsw);
+
+int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr);
+int extphy_init(struct gsw_mt753x *gsw, int addr);
+
+/* MDIO Indirect Access Registers */
+#define MII_MMD_ACC_CTL_REG 0x0d
+#define MMD_CMD_S 14
+#define MMD_CMD_M 0xc000
+#define MMD_DEVAD_S 0
+#define MMD_DEVAD_M 0x1f
+
+/* MMD_CMD: MMD commands */
+#define MMD_ADDR 0
+#define MMD_DATA 1
+
+#define MII_MMD_ADDR_DATA_REG 0x0e
+
+/* Procedure of MT753x Internal Register Access
+ *
+ * 1. Internal Register Address
+ *
+ * The MT753x has a 16-bit register address and each register is 32-bit.
+ * This means the lowest two bits are not used as the register address is
+ * 4-byte aligned.
+ *
+ * Rest of the valid bits are divided into two parts:
+ * Bit 15..6 is the Page address
+ * Bit 5..2 is the low address
+ *
+ * -------------------------------------------------------------------
+ * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
+ * |----------------------------------------|---------------|--------|
+ * | Page Address | Address | Unused |
+ * -------------------------------------------------------------------
+ *
+ * 2. MDIO access timing
+ *
+ * The MT753x uses the following MDIO timing for a single register read
+ *
+ * Phase 1: Write Page Address
+ * -------------------------------------------------------------------
+ * | ST | OP | PHY_ADDR | TYPE | RSVD | TA | RSVD | PAGE_ADDR |
+ * -------------------------------------------------------------------
+ * | 01 | 01 | 11111 | 1 | 1111 | xx | 00000 | REG_ADDR[15..6] |
+ * -------------------------------------------------------------------
+ *
+ * Phase 2: Write low Address & Read low word
+ * -------------------------------------------------------------------
+ * | ST | OP | PHY_ADDR | TYPE | LOW_ADDR | TA | DATA |
+ * -------------------------------------------------------------------
+ * | 01 | 10 | 11111 | 0 | REG_ADDR[5..2] | xx | DATA[15..0] |
+ * -------------------------------------------------------------------
+ *
+ * Phase 3: Read high word
+ * -------------------------------------------------------------------
+ * | ST | OP | PHY_ADDR | TYPE | RSVD | TA | DATA |
+ * -------------------------------------------------------------------
+ * | 01 | 10 | 11111 | 1 | 0000 | xx | DATA[31..16] |
+ * -------------------------------------------------------------------
+ *
+ * The MT753x uses the following MDIO timing for a single register write
+ *
+ * Phase 1: Write Page Address (The same as read)
+ *
+ * Phase 2: Write low Address and low word
+ * -------------------------------------------------------------------
+ * | ST | OP | PHY_ADDR | TYPE | LOW_ADDR | TA | DATA |
+ * -------------------------------------------------------------------
+ * | 01 | 01 | 11111 | 0 | REG_ADDR[5..2] | xx | DATA[15..0] |
+ * -------------------------------------------------------------------
+ *
+ * Phase 3: write high word
+ * -------------------------------------------------------------------
+ * | ST | OP | PHY_ADDR | TYPE | RSVD | TA | DATA |
+ * -------------------------------------------------------------------
+ * | 01 | 01 | 11111 | 1 | 0000 | xx | DATA[31..16] |
+ * -------------------------------------------------------------------
+ *
+ */
+
+/* Internal Register Address fields */
+#define MT753X_REG_PAGE_ADDR_S 6
+#define MT753X_REG_PAGE_ADDR_M 0xffc0
+#define MT753X_REG_ADDR_S 2
+#define MT753X_REG_ADDR_M 0x3c
+#endif /* _MT753X_H_ */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_mdio.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_mdio.c
new file mode 100755
index 0000000..06a1114
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_mdio.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+#include <linux/hrtimer.h>
+#include <linux/mii.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+#include <linux/of_irq.h>
+#include <linux/phy.h>
+
+#include "mt753x.h"
+#include "mt753x_swconfig.h"
+#include "mt753x_regs.h"
+#include "mt753x_nl.h"
+#include "mt7530.h"
+#include "mt7531.h"
+
+static u32 mt753x_id;
+struct list_head mt753x_devs;
+static DEFINE_MUTEX(mt753x_devs_lock);
+
+static struct mt753x_sw_id *mt753x_sw_ids[] = {
+ &mt7530_id,
+ &mt7531_id,
+};
+
+u32 mt753x_reg_read(struct gsw_mt753x *gsw, u32 reg)
+{
+ u32 high, low;
+
+ mutex_lock(&gsw->host_bus->mdio_lock);
+
+ gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x1f,
+ (reg & MT753X_REG_PAGE_ADDR_M) >> MT753X_REG_PAGE_ADDR_S);
+
+ low = gsw->host_bus->read(gsw->host_bus, gsw->smi_addr,
+ (reg & MT753X_REG_ADDR_M) >> MT753X_REG_ADDR_S);
+
+ high = gsw->host_bus->read(gsw->host_bus, gsw->smi_addr, 0x10);
+
+ mutex_unlock(&gsw->host_bus->mdio_lock);
+
+ return (high << 16) | (low & 0xffff);
+}
+
+void mt753x_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val)
+{
+ mutex_lock(&gsw->host_bus->mdio_lock);
+
+ gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x1f,
+ (reg & MT753X_REG_PAGE_ADDR_M) >> MT753X_REG_PAGE_ADDR_S);
+
+ gsw->host_bus->write(gsw->host_bus, gsw->smi_addr,
+ (reg & MT753X_REG_ADDR_M) >> MT753X_REG_ADDR_S, val & 0xffff);
+
+ gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x10, val >> 16);
+
+ mutex_unlock(&gsw->host_bus->mdio_lock);
+}
+
+/* Indirect MDIO clause 22/45 access */
+static int mt753x_mii_rw(struct gsw_mt753x *gsw, int phy, int reg, u16 data,
+ u32 cmd, u32 st)
+{
+ ktime_t timeout;
+ u32 val, timeout_us;
+ int ret = 0;
+
+ timeout_us = 100000;
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+ while (1) {
+ val = mt753x_reg_read(gsw, PHY_IAC);
+
+ if ((val & PHY_ACS_ST) == 0)
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+ }
+
+ val = (st << MDIO_ST_S) |
+ ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
+ ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
+ ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
+
+ if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
+ val |= data & MDIO_RW_DATA_M;
+
+ mt753x_reg_write(gsw, PHY_IAC, val | PHY_ACS_ST);
+
+ timeout_us = 100000;
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+ while (1) {
+ val = mt753x_reg_read(gsw, PHY_IAC);
+
+ if ((val & PHY_ACS_ST) == 0)
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+ }
+
+ if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
+ val = mt753x_reg_read(gsw, PHY_IAC);
+ ret = val & MDIO_RW_DATA_M;
+ }
+
+ return ret;
+}
+
+int mt753x_mii_read(struct gsw_mt753x *gsw, int phy, int reg)
+{
+ int val;
+
+ if (phy < MT753X_NUM_PHYS)
+ phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
+
+ mutex_lock(&gsw->mii_lock);
+ val = mt753x_mii_rw(gsw, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
+ mutex_unlock(&gsw->mii_lock);
+
+ return val;
+}
+
+void mt753x_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val)
+{
+ if (phy < MT753X_NUM_PHYS)
+ phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
+
+ mutex_lock(&gsw->mii_lock);
+ mt753x_mii_rw(gsw, phy, reg, val, MDIO_CMD_WRITE, MDIO_ST_C22);
+ mutex_unlock(&gsw->mii_lock);
+}
+
+int mt753x_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg)
+{
+ int val;
+
+ if (addr < MT753X_NUM_PHYS)
+ addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+ mutex_lock(&gsw->mii_lock);
+ mt753x_mii_rw(gsw, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
+ val = mt753x_mii_rw(gsw, addr, devad, 0, MDIO_CMD_READ_C45,
+ MDIO_ST_C45);
+ mutex_unlock(&gsw->mii_lock);
+
+ return val;
+}
+
+void mt753x_mmd_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+ u16 val)
+{
+ if (addr < MT753X_NUM_PHYS)
+ addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+ mutex_lock(&gsw->mii_lock);
+ mt753x_mii_rw(gsw, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
+ mt753x_mii_rw(gsw, addr, devad, val, MDIO_CMD_WRITE, MDIO_ST_C45);
+ mutex_unlock(&gsw->mii_lock);
+}
+
+int mt753x_mmd_ind_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg)
+{
+ u16 val;
+
+ if (addr < MT753X_NUM_PHYS)
+ addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+ mutex_lock(&gsw->mii_lock);
+
+ mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
+ (MMD_ADDR << MMD_CMD_S) |
+ ((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
+ MDIO_CMD_WRITE, MDIO_ST_C22);
+
+ mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, reg,
+ MDIO_CMD_WRITE, MDIO_ST_C22);
+
+ mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
+ (MMD_DATA << MMD_CMD_S) |
+ ((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
+ MDIO_CMD_WRITE, MDIO_ST_C22);
+
+ val = mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, 0,
+ MDIO_CMD_READ, MDIO_ST_C22);
+
+ mutex_unlock(&gsw->mii_lock);
+
+ return val;
+}
+
+void mt753x_mmd_ind_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
+ u16 val)
+{
+ if (addr < MT753X_NUM_PHYS)
+ addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+ mutex_lock(&gsw->mii_lock);
+
+ mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
+ (MMD_ADDR << MMD_CMD_S) |
+ ((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
+ MDIO_CMD_WRITE, MDIO_ST_C22);
+
+ mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, reg,
+ MDIO_CMD_WRITE, MDIO_ST_C22);
+
+ mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
+ (MMD_DATA << MMD_CMD_S) |
+ ((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
+ MDIO_CMD_WRITE, MDIO_ST_C22);
+
+ mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, val,
+ MDIO_CMD_WRITE, MDIO_ST_C22);
+
+ mutex_unlock(&gsw->mii_lock);
+}
+
+static inline int mt753x_get_duplex(const struct device_node *np)
+{
+ return of_property_read_bool(np, "full-duplex");
+}
+
+static void mt753x_load_port_cfg(struct gsw_mt753x *gsw)
+{
+ struct device_node *port_np;
+ struct device_node *fixed_link_node;
+ struct mt753x_port_cfg *port_cfg;
+ u32 port;
+
+ for_each_child_of_node(gsw->dev->of_node, port_np) {
+ if (!of_device_is_compatible(port_np, "mediatek,mt753x-port"))
+ continue;
+
+ if (!of_device_is_available(port_np))
+ continue;
+
+ if (of_property_read_u32(port_np, "reg", &port))
+ continue;
+
+ switch (port) {
+ case 5:
+ port_cfg = &gsw->port5_cfg;
+ break;
+ case 6:
+ port_cfg = &gsw->port6_cfg;
+ break;
+ default:
+ continue;
+ }
+
+ if (port_cfg->enabled) {
+ dev_info(gsw->dev, "duplicated node for port%d\n",
+ port_cfg->phy_mode);
+ continue;
+ }
+
+ port_cfg->np = port_np;
+
+ port_cfg->phy_mode = of_get_phy_mode(port_np);
+ if (port_cfg->phy_mode < 0) {
+ dev_info(gsw->dev, "incorrect phy-mode %d\n", port);
+ continue;
+ }
+
+ fixed_link_node = of_get_child_by_name(port_np, "fixed-link");
+ if (fixed_link_node) {
+ u32 speed;
+
+ port_cfg->force_link = 1;
+ port_cfg->duplex = mt753x_get_duplex(fixed_link_node);
+
+ if (of_property_read_u32(fixed_link_node, "speed",
+ &speed)) {
+ speed = 0;
+ continue;
+ }
+
+ of_node_put(fixed_link_node);
+
+ switch (speed) {
+ case 10:
+ port_cfg->speed = MAC_SPD_10;
+ break;
+ case 100:
+ port_cfg->speed = MAC_SPD_100;
+ break;
+ case 1000:
+ port_cfg->speed = MAC_SPD_1000;
+ break;
+ case 2500:
+ port_cfg->speed = MAC_SPD_2500;
+ break;
+ default:
+ dev_info(gsw->dev, "incorrect speed %d\n",
+ speed);
+ continue;
+ }
+ }
+
+ port_cfg->ssc_on = of_property_read_bool(port_cfg->np,
+ "mediatek,ssc-on");
+ port_cfg->stag_on = of_property_read_bool(port_cfg->np,
+ "mediatek,stag-on");
+ port_cfg->enabled = 1;
+ }
+}
+
+void mt753x_tr_write(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr,
+ u32 data)
+{
+ ktime_t timeout;
+ u32 timeout_us;
+ u32 val;
+
+ if (addr < MT753X_NUM_PHYS)
+ addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+ gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE);
+
+ val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+ timeout_us = 100000;
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+ while (1) {
+ val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+ if (!!(val & PHY_TR_PKT_XMT_STA))
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ goto out;
+ }
+
+ gsw->mii_write(gsw, addr, PHY_TR_LOW_DATA, PHY_TR_LOW_VAL(data));
+ gsw->mii_write(gsw, addr, PHY_TR_HIGH_DATA, PHY_TR_HIGH_VAL(data));
+ val = PHY_TR_PKT_XMT_STA | (PHY_TR_WRITE << PHY_TR_WR_S) |
+ (ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) |
+ (daddr << PHY_TR_DATA_ADDR_S);
+ gsw->mii_write(gsw, addr, PHY_TR_CTRL, val);
+
+ timeout_us = 100000;
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+ while (1) {
+ val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+ if (!!(val & PHY_TR_PKT_XMT_STA))
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ goto out;
+ }
+out:
+ gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
+}
+
+int mt753x_tr_read(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr)
+{
+ ktime_t timeout;
+ u32 timeout_us;
+ u32 val;
+ u8 val_h;
+
+ if (addr < MT753X_NUM_PHYS)
+ addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
+
+ gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE);
+
+ val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+ timeout_us = 100000;
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+ while (1) {
+ val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+ if (!!(val & PHY_TR_PKT_XMT_STA))
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
+ return -ETIMEDOUT;
+ }
+ }
+
+ val = PHY_TR_PKT_XMT_STA | (PHY_TR_READ << PHY_TR_WR_S) |
+ (ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) |
+ (daddr << PHY_TR_DATA_ADDR_S);
+ gsw->mii_write(gsw, addr, PHY_TR_CTRL, val);
+
+ timeout_us = 100000;
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+ while (1) {
+ val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
+
+ if (!!(val & PHY_TR_PKT_XMT_STA))
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
+ return -ETIMEDOUT;
+ }
+ }
+
+ val = gsw->mii_read(gsw, addr, PHY_TR_LOW_DATA);
+ val_h = gsw->mii_read(gsw, addr, PHY_TR_HIGH_DATA);
+ val |= (val_h << 16);
+
+ gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
+
+ return val;
+}
+
+static void mt753x_add_gsw(struct gsw_mt753x *gsw)
+{
+ mutex_lock(&mt753x_devs_lock);
+ gsw->id = mt753x_id++;
+ INIT_LIST_HEAD(&gsw->list);
+ list_add_tail(&gsw->list, &mt753x_devs);
+ mutex_unlock(&mt753x_devs_lock);
+}
+
+static void mt753x_remove_gsw(struct gsw_mt753x *gsw)
+{
+ mutex_lock(&mt753x_devs_lock);
+ list_del(&gsw->list);
+ mutex_unlock(&mt753x_devs_lock);
+}
+
+
+struct gsw_mt753x *mt753x_get_gsw(u32 id)
+{
+ struct gsw_mt753x *dev;
+
+ mutex_lock(&mt753x_devs_lock);
+
+ list_for_each_entry(dev, &mt753x_devs, list) {
+ if (dev->id == id)
+ return dev;
+ }
+
+ mutex_unlock(&mt753x_devs_lock);
+
+ return NULL;
+}
+
+struct gsw_mt753x *mt753x_get_first_gsw(void)
+{
+ struct gsw_mt753x *dev;
+
+ mutex_lock(&mt753x_devs_lock);
+
+ list_for_each_entry(dev, &mt753x_devs, list)
+ return dev;
+
+ mutex_unlock(&mt753x_devs_lock);
+
+ return NULL;
+}
+
+void mt753x_put_gsw(void)
+{
+ mutex_unlock(&mt753x_devs_lock);
+}
+
+void mt753x_lock_gsw(void)
+{
+ mutex_lock(&mt753x_devs_lock);
+}
+
+static int mt753x_hw_reset(struct gsw_mt753x *gsw)
+{
+ struct device_node *np = gsw->dev->of_node;
+ struct reset_control *rstc;
+ int mcm;
+ int ret = -EINVAL;
+
+ mcm = of_property_read_bool(np, "mediatek,mcm");
+ if (mcm) {
+ rstc = devm_reset_control_get(gsw->dev, "mcm");
+ ret = IS_ERR(rstc);
+ if (IS_ERR(rstc)) {
+ dev_err(gsw->dev, "Missing reset ctrl of switch\n");
+ return ret;
+ }
+
+ reset_control_assert(rstc);
+ msleep(30);
+ reset_control_deassert(rstc);
+
+ gsw->reset_pin = -1;
+ return 0;
+ }
+
+ gsw->reset_pin = of_get_named_gpio(np, "reset-gpios", 0);
+ if (gsw->reset_pin < 0) {
+ dev_err(gsw->dev, "Missing reset pin of switch\n");
+ return ret;
+ }
+
+ ret = devm_gpio_request(gsw->dev, gsw->reset_pin, "mt753x-reset");
+ if (ret) {
+ dev_info(gsw->dev, "Failed to request gpio %d\n",
+ gsw->reset_pin);
+ return ret;
+ }
+
+ gpio_direction_output(gsw->reset_pin, 0);
+ msleep(30);
+ gpio_set_value(gsw->reset_pin, 1);
+ msleep(500);
+
+ return 0;
+}
+#if 1 //XDXDXDXD
+static int mt753x_mdio_read(struct mii_bus *bus, int addr, int reg)
+{
+ struct gsw_mt753x *gsw = bus->priv;
+
+ return gsw->mii_read(gsw, addr, reg);
+}
+
+static int mt753x_mdio_write(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct gsw_mt753x *gsw = bus->priv;
+
+ gsw->mii_write(gsw, addr, reg, val);
+
+ return 0;
+}
+
+static const struct net_device_ops mt753x_dummy_netdev_ops = {
+};
+
+static void mt753x_phy_link_handler(struct net_device *dev)
+{
+ struct mt753x_phy *phy = container_of(dev, struct mt753x_phy, netdev);
+ struct phy_device *phydev = phy->phydev;
+ struct gsw_mt753x *gsw = phy->gsw;
+ u32 port = phy - gsw->phys;
+
+ if (phydev->link) {
+ dev_info(gsw->dev,
+ "Port %d Link is Up - %s/%s - flow control %s\n",
+ port, phy_speed_to_str(phydev->speed),
+ (phydev->duplex == DUPLEX_FULL) ? "Full" : "Half",
+ phydev->pause ? "rx/tx" : "off");
+ } else {
+ dev_info(gsw->dev, "Port %d Link is Down\n", port);
+ }
+}
+
+static void mt753x_connect_internal_phys(struct gsw_mt753x *gsw,
+ struct device_node *mii_np)
+{
+ struct device_node *phy_np;
+ struct mt753x_phy *phy;
+ int phy_mode;
+ u32 phyad;
+
+ if (!mii_np)
+ return;
+
+ for_each_child_of_node(mii_np, phy_np) {
+ if (of_property_read_u32(phy_np, "reg", &phyad))
+ continue;
+
+ if (phyad >= MT753X_NUM_PHYS)
+ continue;
+
+ phy_mode = of_get_phy_mode(phy_np);
+ if (phy_mode < 0) {
+ dev_info(gsw->dev, "incorrect phy-mode %d for PHY %d\n",
+ phy_mode, phyad);
+ continue;
+ }
+
+ phy = &gsw->phys[phyad];
+ phy->gsw = gsw;
+
+ init_dummy_netdev(&phy->netdev);
+ phy->netdev.netdev_ops = &mt753x_dummy_netdev_ops;
+
+ phy->phydev = of_phy_connect(&phy->netdev, phy_np,
+ mt753x_phy_link_handler, 0, phy_mode);
+ if (!phy->phydev) {
+ dev_info(gsw->dev, "could not connect to PHY %d\n",
+ phyad);
+ continue;
+ }
+
+ phy_start(phy->phydev);
+ }
+}
+
+static void mt753x_disconnect_internal_phys(struct gsw_mt753x *gsw)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gsw->phys); i++) {
+ if (gsw->phys[i].phydev) {
+ phy_stop(gsw->phys[i].phydev);
+ phy_disconnect(gsw->phys[i].phydev);
+ gsw->phys[i].phydev = NULL;
+ }
+ }
+}
+
+static int mt753x_mdio_register(struct gsw_mt753x *gsw)
+{
+ struct device_node *mii_np;
+ int i, ret;
+
+ mii_np = of_get_child_by_name(gsw->dev->of_node, "mdio-bus");
+ if (mii_np && !of_device_is_available(mii_np)) {
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ gsw->gphy_bus = devm_mdiobus_alloc(gsw->dev);
+ if (!gsw->gphy_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ gsw->gphy_bus->name = "mt753x_mdio";
+ gsw->gphy_bus->read = mt753x_mdio_read;
+ gsw->gphy_bus->write = mt753x_mdio_write;
+ gsw->gphy_bus->priv = gsw;
+ gsw->gphy_bus->parent = gsw->dev;
+ gsw->gphy_bus->phy_mask = BIT(MT753X_NUM_PHYS) - 1;
+// gsw->gphy_bus->irq = gsw->phy_irqs;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ gsw->gphy_bus->irq[i] = PHY_POLL;
+
+ if (mii_np)
+ snprintf(gsw->gphy_bus->id, MII_BUS_ID_SIZE, "%s@%s",
+ mii_np->name, gsw->dev->of_node->name);
+ else
+ snprintf(gsw->gphy_bus->id, MII_BUS_ID_SIZE, "mdio@%s",
+ gsw->dev->of_node->name);
+
+ ret = of_mdiobus_register(gsw->gphy_bus, mii_np);
+
+ if (ret) {
+ devm_mdiobus_free(gsw->dev, gsw->gphy_bus);
+ gsw->gphy_bus = NULL;
+ } else {
+ if (gsw->phy_status_poll)
+ mt753x_connect_internal_phys(gsw, mii_np);
+ }
+
+err_put_node:
+ if (mii_np)
+ of_node_put(mii_np);
+
+ return ret;
+}
+#endif
+
+static irqreturn_t mt753x_irq_handler(int irq, void *dev)
+{
+ struct gsw_mt753x *gsw = dev;
+
+ disable_irq_nosync(gsw->irq);
+
+ schedule_work(&gsw->irq_worker);
+
+ return IRQ_HANDLED;
+}
+
+static int mt753x_probe(struct platform_device *pdev)
+{
+ struct gsw_mt753x *gsw;
+ struct mt753x_sw_id *sw;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *mdio;
+ struct mii_bus *mdio_bus;
+ int ret = -EINVAL;
+ struct chip_rev rev;
+ struct mt753x_mapping *map;
+ int i;
+
+ mdio = of_parse_phandle(np, "mediatek,mdio", 0);
+ if (!mdio)
+ return -EINVAL;
+
+ mdio_bus = of_mdio_find_bus(mdio);
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
+
+ gsw = devm_kzalloc(&pdev->dev, sizeof(struct gsw_mt753x), GFP_KERNEL);
+ if (!gsw)
+ return -ENOMEM;
+
+ gsw->host_bus = mdio_bus;
+ gsw->dev = &pdev->dev;
+ mutex_init(&gsw->mii_lock);
+
+ /* Switch hard reset */
+ if (mt753x_hw_reset(gsw))
+ goto fail;
+
+ /* Fetch the SMI address dirst */
+ if (of_property_read_u32(np, "mediatek,smi-addr", &gsw->smi_addr))
+ gsw->smi_addr = MT753X_DFL_SMI_ADDR;
+
+ /* Get LAN/WAN port mapping */
+ map = mt753x_find_mapping(np);
+ if (map) {
+ mt753x_apply_mapping(gsw, map);
+ gsw->global_vlan_enable = 1;
+ dev_info(gsw->dev, "LAN/WAN VLAN setting=%s\n", map->name);
+ }
+
+ /* Load MAC port configurations */
+ mt753x_load_port_cfg(gsw);
+
+ /* Check for valid switch and then initialize */
+ for (i = 0; i < ARRAY_SIZE(mt753x_sw_ids); i++) {
+ if (!mt753x_sw_ids[i]->detect(gsw, &rev)) {
+ sw = mt753x_sw_ids[i];
+
+ gsw->name = rev.name;
+ gsw->model = sw->model;
+
+ dev_info(gsw->dev, "Switch is MediaTek %s rev %d",
+ gsw->name, rev.rev);
+
+ /* Initialize the switch */
+ ret = sw->init(gsw);
+ if (ret)
+ goto fail;
+
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(mt753x_sw_ids)) {
+ dev_err(gsw->dev, "No mt753x switch found\n");
+ goto fail;
+ }
+
+ gsw->irq = platform_get_irq(pdev, 0);
+ if (gsw->irq >= 0) {
+ ret = devm_request_irq(gsw->dev, gsw->irq, mt753x_irq_handler,
+ 0, dev_name(gsw->dev), gsw);
+ if (ret) {
+ dev_err(gsw->dev, "Failed to request irq %d\n",
+ gsw->irq);
+ goto fail;
+ }
+
+ INIT_WORK(&gsw->irq_worker, mt753x_irq_worker);
+ }
+
+ platform_set_drvdata(pdev, gsw);
+
+ gsw->phy_status_poll = of_property_read_bool(gsw->dev->of_node,
+ "mediatek,phy-poll");
+
+ mt753x_add_gsw(gsw);
+#if 1 //XDXD
+ mt753x_mdio_register(gsw);
+#endif
+
+ mt753x_swconfig_init(gsw);
+
+ if (sw->post_init)
+ sw->post_init(gsw);
+
+ if (gsw->irq >= 0)
+ mt753x_irq_enable(gsw);
+
+ return 0;
+
+fail:
+ devm_kfree(&pdev->dev, gsw);
+
+ return ret;
+}
+
+static int mt753x_remove(struct platform_device *pdev)
+{
+ struct gsw_mt753x *gsw = platform_get_drvdata(pdev);
+
+ if (gsw->irq >= 0)
+ cancel_work_sync(&gsw->irq_worker);
+
+ if (gsw->reset_pin >= 0)
+ devm_gpio_free(&pdev->dev, gsw->reset_pin);
+
+#ifdef CONFIG_SWCONFIG
+ mt753x_swconfig_destroy(gsw);
+#endif
+
+#if 1 //XDXD
+ mt753x_disconnect_internal_phys(gsw);
+
+ mdiobus_unregister(gsw->gphy_bus);
+#endif
+
+ mt753x_remove_gsw(gsw);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id mt753x_ids[] = {
+ { .compatible = "mediatek,mt753x" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, mt753x_ids);
+
+static struct platform_driver mt753x_driver = {
+ .probe = mt753x_probe,
+ .remove = mt753x_remove,
+ .driver = {
+ .name = "mt753x",
+ .of_match_table = mt753x_ids,
+ },
+};
+
+static int __init mt753x_init(void)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&mt753x_devs);
+ ret = platform_driver_register(&mt753x_driver);
+
+ mt753x_nl_init();
+
+ return ret;
+}
+module_init(mt753x_init);
+
+static void __exit mt753x_exit(void)
+{
+ mt753x_nl_exit();
+
+ platform_driver_unregister(&mt753x_driver);
+}
+module_exit(mt753x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
+MODULE_DESCRIPTION("Driver for MediaTek MT753x Gigabit Switch");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_nl.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_nl.c
new file mode 100755
index 0000000..a04c701
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_nl.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Sirui Zhao <Sirui.Zhao@mediatek.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/genetlink.h>
+
+#include "mt753x.h"
+#include "mt753x_nl.h"
+
+struct mt753x_nl_cmd_item {
+ enum mt753x_cmd cmd;
+ bool require_dev;
+ int (*process)(struct genl_info *info, struct gsw_mt753x *gsw);
+ u32 nr_required_attrs;
+ const enum mt753x_attr *required_attrs;
+};
+
+static int mt753x_nl_response(struct sk_buff *skb, struct genl_info *info);
+
+static const struct nla_policy mt753x_nl_cmd_policy[] = {
+ [MT753X_ATTR_TYPE_MESG] = { .type = NLA_STRING },
+ [MT753X_ATTR_TYPE_PHY] = { .type = NLA_S32 },
+ [MT753X_ATTR_TYPE_REG] = { .type = NLA_S32 },
+ [MT753X_ATTR_TYPE_VAL] = { .type = NLA_S32 },
+ [MT753X_ATTR_TYPE_DEV_NAME] = { .type = NLA_S32 },
+ [MT753X_ATTR_TYPE_DEV_ID] = { .type = NLA_S32 },
+ [MT753X_ATTR_TYPE_DEVAD] = { .type = NLA_S32 },
+};
+
+static const struct genl_ops mt753x_nl_ops[] = {
+ {
+ .cmd = MT753X_CMD_REQUEST,
+ .doit = mt753x_nl_response,
+// .policy = mt753x_nl_cmd_policy,
+ .flags = GENL_ADMIN_PERM,
+ }, {
+ .cmd = MT753X_CMD_READ,
+ .doit = mt753x_nl_response,
+// .policy = mt753x_nl_cmd_policy,
+ .flags = GENL_ADMIN_PERM,
+ }, {
+ .cmd = MT753X_CMD_WRITE,
+ .doit = mt753x_nl_response,
+// .policy = mt753x_nl_cmd_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static struct genl_family mt753x_nl_family = {
+ .name = MT753X_GENL_NAME,
+ .version = MT753X_GENL_VERSION,
+ .maxattr = MT753X_NR_ATTR_TYPE,
+ .ops = mt753x_nl_ops,
+ .n_ops = ARRAY_SIZE(mt753x_nl_ops),
+ .policy = mt753x_nl_cmd_policy,
+};
+
+static int mt753x_nl_list_devs(char *buff, int size)
+{
+ struct gsw_mt753x *gsw;
+ int len, total = 0;
+ char buf[80];
+
+ memset(buff, 0, size);
+
+ mt753x_lock_gsw();
+
+ list_for_each_entry(gsw, &mt753x_devs, list) {
+ len = snprintf(buf, sizeof(buf),
+ "id: %d, model: %s, node: %s\n",
+ gsw->id, gsw->name, gsw->dev->of_node->name);
+ strncat(buff, buf, size - total);
+ total += len;
+ }
+
+ mt753x_put_gsw();
+
+ return total;
+}
+
+static int mt753x_nl_prepare_reply(struct genl_info *info, u8 cmd,
+ struct sk_buff **skbp)
+{
+ struct sk_buff *msg;
+ void *reply;
+
+ if (!info)
+ return -EINVAL;
+
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ /* Construct send-back message header */
+ reply = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+ &mt753x_nl_family, 0, cmd);
+ if (!reply) {
+ nlmsg_free(msg);
+ return -EINVAL;
+ }
+
+ *skbp = msg;
+ return 0;
+}
+
+static int mt753x_nl_send_reply(struct sk_buff *skb, struct genl_info *info)
+{
+ struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
+ void *reply = genlmsg_data(genlhdr);
+
+ /* Finalize a generic netlink message (update message header) */
+ genlmsg_end(skb, reply);
+
+ /* reply to a request */
+ return genlmsg_reply(skb, info);
+}
+
+static s32 mt753x_nl_get_s32(struct genl_info *info, enum mt753x_attr attr,
+ s32 defval)
+{
+ struct nlattr *na;
+
+ na = info->attrs[attr];
+ if (na)
+ return nla_get_s32(na);
+
+ return defval;
+}
+
+static int mt753x_nl_get_u32(struct genl_info *info, enum mt753x_attr attr,
+ u32 *val)
+{
+ struct nlattr *na;
+
+ na = info->attrs[attr];
+ if (na) {
+ *val = nla_get_u32(na);
+ return 0;
+ }
+
+ return -1;
+}
+
+static struct gsw_mt753x *mt753x_nl_parse_find_gsw(struct genl_info *info)
+{
+ struct gsw_mt753x *gsw;
+ struct nlattr *na;
+ int gsw_id;
+
+ na = info->attrs[MT753X_ATTR_TYPE_DEV_ID];
+ if (na) {
+ gsw_id = nla_get_s32(na);
+ if (gsw_id >= 0)
+ gsw = mt753x_get_gsw(gsw_id);
+ else
+ gsw = mt753x_get_first_gsw();
+ } else {
+ gsw = mt753x_get_first_gsw();
+ }
+
+ return gsw;
+}
+
+static int mt753x_nl_get_swdevs(struct genl_info *info, struct gsw_mt753x *gsw)
+{
+ struct sk_buff *rep_skb = NULL;
+ char dev_info[512];
+ int ret;
+
+ ret = mt753x_nl_list_devs(dev_info, sizeof(dev_info));
+ if (!ret) {
+ pr_info("No switch registered\n");
+ return -EINVAL;
+ }
+
+ ret = mt753x_nl_prepare_reply(info, MT753X_CMD_REPLY, &rep_skb);
+ if (ret < 0)
+ goto err;
+
+ ret = nla_put_string(rep_skb, MT753X_ATTR_TYPE_MESG, dev_info);
+ if (ret < 0)
+ goto err;
+
+ return mt753x_nl_send_reply(rep_skb, info);
+
+err:
+ if (rep_skb)
+ nlmsg_free(rep_skb);
+
+ return ret;
+}
+
+static int mt753x_nl_reply_read(struct genl_info *info, struct gsw_mt753x *gsw)
+{
+ struct sk_buff *rep_skb = NULL;
+ s32 phy, devad, reg;
+ int value;
+ int ret = 0;
+
+ phy = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_PHY, -1);
+ devad = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_DEVAD, -1);
+ reg = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_REG, -1);
+
+ if (reg < 0)
+ goto err;
+
+ ret = mt753x_nl_prepare_reply(info, MT753X_CMD_READ, &rep_skb);
+ if (ret < 0)
+ goto err;
+
+ if (phy >= 0) {
+ if (devad < 0)
+ value = gsw->mii_read(gsw, phy, reg);
+ else
+ value = gsw->mmd_read(gsw, phy, devad, reg);
+ } else {
+ value = mt753x_reg_read(gsw, reg);
+ }
+
+ ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_REG, reg);
+ if (ret < 0)
+ goto err;
+
+ ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_VAL, value);
+ if (ret < 0)
+ goto err;
+
+ return mt753x_nl_send_reply(rep_skb, info);
+
+err:
+ if (rep_skb)
+ nlmsg_free(rep_skb);
+
+ return ret;
+}
+
+static int mt753x_nl_reply_write(struct genl_info *info, struct gsw_mt753x *gsw)
+{
+ struct sk_buff *rep_skb = NULL;
+ s32 phy, devad, reg;
+ u32 value;
+ int ret = 0;
+
+ phy = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_PHY, -1);
+ devad = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_DEVAD, -1);
+ reg = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_REG, -1);
+
+ if (mt753x_nl_get_u32(info, MT753X_ATTR_TYPE_VAL, &value))
+ goto err;
+
+ if (reg < 0)
+ goto err;
+
+ ret = mt753x_nl_prepare_reply(info, MT753X_CMD_WRITE, &rep_skb);
+ if (ret < 0)
+ goto err;
+
+ if (phy >= 0) {
+ if (devad < 0)
+ gsw->mii_write(gsw, phy, reg, value);
+ else
+ gsw->mmd_write(gsw, phy, devad, reg, value);
+ } else {
+ mt753x_reg_write(gsw, reg, value);
+ }
+
+ ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_REG, reg);
+ if (ret < 0)
+ goto err;
+
+ ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_VAL, value);
+ if (ret < 0)
+ goto err;
+
+ return mt753x_nl_send_reply(rep_skb, info);
+
+err:
+ if (rep_skb)
+ nlmsg_free(rep_skb);
+
+ return ret;
+}
+
+static const enum mt753x_attr mt753x_nl_cmd_read_attrs[] = {
+ MT753X_ATTR_TYPE_REG
+};
+
+static const enum mt753x_attr mt753x_nl_cmd_write_attrs[] = {
+ MT753X_ATTR_TYPE_REG,
+ MT753X_ATTR_TYPE_VAL
+};
+
+static const struct mt753x_nl_cmd_item mt753x_nl_cmds[] = {
+ {
+ .cmd = MT753X_CMD_REQUEST,
+ .require_dev = false,
+ .process = mt753x_nl_get_swdevs
+ }, {
+ .cmd = MT753X_CMD_READ,
+ .require_dev = true,
+ .process = mt753x_nl_reply_read,
+ .required_attrs = mt753x_nl_cmd_read_attrs,
+ .nr_required_attrs = ARRAY_SIZE(mt753x_nl_cmd_read_attrs),
+ }, {
+ .cmd = MT753X_CMD_WRITE,
+ .require_dev = true,
+ .process = mt753x_nl_reply_write,
+ .required_attrs = mt753x_nl_cmd_write_attrs,
+ .nr_required_attrs = ARRAY_SIZE(mt753x_nl_cmd_write_attrs),
+ }
+};
+
+static int mt753x_nl_response(struct sk_buff *skb, struct genl_info *info)
+{
+ struct genlmsghdr *hdr = nlmsg_data(info->nlhdr);
+ const struct mt753x_nl_cmd_item *cmditem = NULL;
+ struct gsw_mt753x *gsw = NULL;
+ u32 sat_req_attrs = 0;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(mt753x_nl_cmds); i++) {
+ if (hdr->cmd == mt753x_nl_cmds[i].cmd) {
+ cmditem = &mt753x_nl_cmds[i];
+ break;
+ }
+ }
+
+ if (!cmditem) {
+ pr_info("mt753x-nl: unknown cmd %u\n", hdr->cmd);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cmditem->nr_required_attrs; i++) {
+ if (info->attrs[cmditem->required_attrs[i]])
+ sat_req_attrs++;
+ }
+
+ if (sat_req_attrs != cmditem->nr_required_attrs) {
+ pr_info("mt753x-nl: missing required attr(s) for cmd %u\n",
+ hdr->cmd);
+ return -EINVAL;
+ }
+
+ if (cmditem->require_dev) {
+ gsw = mt753x_nl_parse_find_gsw(info);
+ if (!gsw) {
+ pr_info("mt753x-nl: failed to find switch dev\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = cmditem->process(info, gsw);
+
+ mt753x_put_gsw();
+
+ return ret;
+}
+
+int __init mt753x_nl_init(void)
+{
+ int ret;
+
+ ret = genl_register_family(&mt753x_nl_family);
+ if (ret) {
+ pr_info("mt753x-nl: genl_register_family_with_ops failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void __exit mt753x_nl_exit(void)
+{
+ genl_unregister_family(&mt753x_nl_family);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_regs.h b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_regs.h
new file mode 100755
index 0000000..1784873
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_regs.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#ifndef _MT753X_REGS_H_
+#define _MT753X_REGS_H_
+
+#include <linux/bitops.h>
+
+/* Values of Egress TAG Control */
+#define ETAG_CTRL_UNTAG 0
+#define ETAG_CTRL_TAG 2
+#define ETAG_CTRL_SWAP 1
+#define ETAG_CTRL_STACK 3
+
+#define VTCR 0x90
+#define VAWD1 0x94
+#define VAWD2 0x98
+
+/* Fields of VTCR */
+#define VTCR_BUSY BIT(31)
+#define IDX_INVLD BIT(16)
+#define VTCR_FUNC_S 12
+#define VTCR_FUNC_M 0xf000
+#define VTCR_VID_S 0
+#define VTCR_VID_M 0xfff
+
+/* Values of VTCR_FUNC */
+#define VTCR_READ_VLAN_ENTRY 0
+#define VTCR_WRITE_VLAN_ENTRY 1
+#define VTCR_INVD_VLAN_ENTRY 2
+#define VTCR_ENABLE_VLAN_ENTRY 3
+#define VTCR_READ_ACL_ENTRY 4
+#define VTCR_WRITE_ACL_ENTRY 5
+#define VTCR_READ_TRTCM_TABLE 6
+#define VTCR_WRITE_TRTCM_TABLE 7
+#define VTCR_READ_ACL_MASK_ENTRY 8
+#define VTCR_WRITE_ACL_MASK_ENTRY 9
+#define VTCR_READ_ACL_RULE_ENTRY 10
+#define VTCR_WRITE_ACL_RULE_ENTRY 11
+#define VTCR_READ_ACL_RATE_ENTRY 12
+#define VTCR_WRITE_ACL_RATE_ENTRY 13
+
+/* VLAN entry fields */
+/* VAWD1 */
+#define PORT_STAG BIT(31)
+#define IVL_MAC BIT(30)
+#define EG_CON BIT(29)
+#define VTAG_EN BIT(28)
+#define COPY_PRI BIT(27)
+#define USER_PRI_S 24
+#define USER_PRI_M 0x7000000
+#define PORT_MEM_S 16
+#define PORT_MEM_M 0xff0000
+#define S_TAG1_S 4
+#define S_TAG1_M 0xfff0
+#define FID_S 1
+#define FID_M 0x0e
+#define VENTRY_VALID BIT(0)
+
+/* VAWD2 */
+#define S_TAG2_S 16
+#define S_TAG2_M 0xffff0000
+#define PORT_ETAG_S(p) ((p) * 2)
+#define PORT_ETAG_M 0x03
+
+#define PORT_CTRL_BASE 0x2000
+#define PORT_CTRL_PORT_OFFSET 0x100
+#define PORT_CTRL_REG(p, r) (PORT_CTRL_BASE + \
+ (p) * PORT_CTRL_PORT_OFFSET + (r))
+#define CKGCR(p) PORT_CTRL_REG(p, 0x00)
+#define PCR(p) PORT_CTRL_REG(p, 0x04)
+#define PIC(p) PORT_CTRL_REG(p, 0x08)
+#define PSC(p) PORT_CTRL_REG(p, 0x0c)
+#define PVC(p) PORT_CTRL_REG(p, 0x10)
+#define PPBV1(p) PORT_CTRL_REG(p, 0x14)
+#define PPBV2(p) PORT_CTRL_REG(p, 0x18)
+#define BSR(p) PORT_CTRL_REG(p, 0x1c)
+#define STAG01 PORT_CTRL_REG(p, 0x20)
+#define STAG23 PORT_CTRL_REG(p, 0x24)
+#define STAG45 PORT_CTRL_REG(p, 0x28)
+#define STAG67 PORT_CTRL_REG(p, 0x2c)
+
+#define PPBV(p, g) (PPBV1(p) + ((g) / 2) * 4)
+
+/* Fields of PCR */
+#define MLDV2_EN BIT(30)
+#define EG_TAG_S 28
+#define EG_TAG_M 0x30000000
+#define PORT_PRI_S 24
+#define PORT_PRI_M 0x7000000
+#define PORT_MATRIX_S 16
+#define PORT_MATRIX_M 0xff0000
+#define UP2DSCP_EN BIT(12)
+#define UP2TAG_EN BIT(11)
+#define ACL_EN BIT(10)
+#define PORT_TX_MIR BIT(9)
+#define PORT_RX_MIR BIT(8)
+#define ACL_MIR BIT(7)
+#define MIS_PORT_FW_S 4
+#define MIS_PORT_FW_M 0x70
+#define VLAN_MIS BIT(2)
+#define PORT_VLAN_S 0
+#define PORT_VLAN_M 0x03
+
+/* Values of PORT_VLAN */
+#define PORT_MATRIX_MODE 0
+#define FALLBACK_MODE 1
+#define CHECK_MODE 2
+#define SECURITY_MODE 3
+
+/* Fields of PVC */
+#define STAG_VPID_S 16
+#define STAG_VPID_M 0xffff0000
+#define DIS_PVID BIT(15)
+#define FORCE_PVID BIT(14)
+#define PT_VPM BIT(12)
+#define PT_OPTION BIT(11)
+#define PVC_EG_TAG_S 8
+#define PVC_EG_TAG_M 0x700
+#define VLAN_ATTR_S 6
+#define VLAN_ATTR_M 0xc0
+#define PVC_PORT_STAG BIT(5)
+#define BC_LKYV_EN BIT(4)
+#define MC_LKYV_EN BIT(3)
+#define UC_LKYV_EN BIT(2)
+#define ACC_FRM_S 0
+#define ACC_FRM_M 0x03
+
+/* Values of VLAN_ATTR */
+#define VA_USER_PORT 0
+#define VA_STACK_PORT 1
+#define VA_TRANSLATION_PORT 2
+#define VA_TRANSPARENT_PORT 3
+
+/* Fields of PPBV */
+#define GRP_PORT_PRI_S(g) (((g) % 2) * 16 + 13)
+#define GRP_PORT_PRI_M 0x07
+#define GRP_PORT_VID_S(g) (((g) % 2) * 16)
+#define GRP_PORT_VID_M 0xfff
+
+#define PORT_MAC_CTRL_BASE 0x3000
+#define PORT_MAC_CTRL_PORT_OFFSET 0x100
+#define PORT_MAC_CTRL_REG(p, r) (PORT_MAC_CTRL_BASE + \
+ (p) * PORT_MAC_CTRL_PORT_OFFSET + (r))
+#define PMCR(p) PORT_MAC_CTRL_REG(p, 0x00)
+#define PMEEECR(p) PORT_MAC_CTRL_REG(p, 0x04)
+#define PMSR(p) PORT_MAC_CTRL_REG(p, 0x08)
+#define PINT_EN(p) PORT_MAC_CTRL_REG(p, 0x10)
+#define PINT_STS(p) PORT_MAC_CTRL_REG(p, 0x14)
+
+#define GMACCR (PORT_MAC_CTRL_BASE + 0xe0)
+#define TXCRC_EN BIT(19)
+#define RXCRC_EN BIT(18)
+#define PRMBL_LMT_EN BIT(17)
+#define MTCC_LMT_S 9
+#define MTCC_LMT_M 0x1e00
+#define MAX_RX_JUMBO_S 2
+#define MAX_RX_JUMBO_M 0x3c
+#define MAX_RX_PKT_LEN_S 0
+#define MAX_RX_PKT_LEN_M 0x3
+
+/* Values of MAX_RX_PKT_LEN */
+#define RX_PKT_LEN_1518 0
+#define RX_PKT_LEN_1536 1
+#define RX_PKT_LEN_1522 2
+#define RX_PKT_LEN_MAX_JUMBO 3
+
+/* Fields of PMCR */
+#define IPG_CFG_S 18
+#define IPG_CFG_M 0xc0000
+#define EXT_PHY BIT(17)
+#define MAC_MODE BIT(16)
+#define MAC_TX_EN BIT(14)
+#define MAC_RX_EN BIT(13)
+#define MAC_PRE BIT(11)
+#define BKOFF_EN BIT(9)
+#define BACKPR_EN BIT(8)
+#define FORCE_EEE1G BIT(7)
+#define FORCE_EEE1000 BIT(6)
+#define FORCE_RX_FC BIT(5)
+#define FORCE_TX_FC BIT(4)
+#define FORCE_SPD_S 2
+#define FORCE_SPD_M 0x0c
+#define FORCE_DPX BIT(1)
+#define FORCE_LINK BIT(0)
+
+/* Fields of PMSR */
+#define EEE1G_STS BIT(7)
+#define EEE100_STS BIT(6)
+#define RX_FC_STS BIT(5)
+#define TX_FC_STS BIT(4)
+#define MAC_SPD_STS_S 2
+#define MAC_SPD_STS_M 0x0c
+#define MAC_DPX_STS BIT(1)
+#define MAC_LNK_STS BIT(0)
+
+/* Values of MAC_SPD_STS */
+#define MAC_SPD_10 0
+#define MAC_SPD_100 1
+#define MAC_SPD_1000 2
+#define MAC_SPD_2500 3
+
+/* Values of IPG_CFG */
+#define IPG_96BIT 0
+#define IPG_96BIT_WITH_SHORT_IPG 1
+#define IPG_64BIT 2
+
+#define MIB_COUNTER_BASE 0x4000
+#define MIB_COUNTER_PORT_OFFSET 0x100
+#define MIB_COUNTER_REG(p, r) (MIB_COUNTER_BASE + \
+ (p) * MIB_COUNTER_PORT_OFFSET + (r))
+#define STATS_TDPC 0x00
+#define STATS_TCRC 0x04
+#define STATS_TUPC 0x08
+#define STATS_TMPC 0x0C
+#define STATS_TBPC 0x10
+#define STATS_TCEC 0x14
+#define STATS_TSCEC 0x18
+#define STATS_TMCEC 0x1C
+#define STATS_TDEC 0x20
+#define STATS_TLCEC 0x24
+#define STATS_TXCEC 0x28
+#define STATS_TPPC 0x2C
+#define STATS_TL64PC 0x30
+#define STATS_TL65PC 0x34
+#define STATS_TL128PC 0x38
+#define STATS_TL256PC 0x3C
+#define STATS_TL512PC 0x40
+#define STATS_TL1024PC 0x44
+#define STATS_TOC 0x48
+#define STATS_RDPC 0x60
+#define STATS_RFPC 0x64
+#define STATS_RUPC 0x68
+#define STATS_RMPC 0x6C
+#define STATS_RBPC 0x70
+#define STATS_RAEPC 0x74
+#define STATS_RCEPC 0x78
+#define STATS_RUSPC 0x7C
+#define STATS_RFEPC 0x80
+#define STATS_ROSPC 0x84
+#define STATS_RJEPC 0x88
+#define STATS_RPPC 0x8C
+#define STATS_RL64PC 0x90
+#define STATS_RL65PC 0x94
+#define STATS_RL128PC 0x98
+#define STATS_RL256PC 0x9C
+#define STATS_RL512PC 0xA0
+#define STATS_RL1024PC 0xA4
+#define STATS_ROC 0xA8
+#define STATS_RDPC_CTRL 0xB0
+#define STATS_RDPC_ING 0xB4
+#define STATS_RDPC_ARL 0xB8
+
+#define SYS_CTRL 0x7000
+#define SW_PHY_RST BIT(2)
+#define SW_SYS_RST BIT(1)
+#define SW_REG_RST BIT(0)
+
+#define SYS_INT_EN 0x7008
+#define SYS_INT_STS 0x700c
+#define MAC_PC_INT BIT(16)
+#define PHY_INT(p) BIT((p) + 8)
+#define PHY_LC_INT(p) BIT(p)
+
+#define PHY_IAC 0x701c
+#define PHY_ACS_ST BIT(31)
+#define MDIO_REG_ADDR_S 25
+#define MDIO_REG_ADDR_M 0x3e000000
+#define MDIO_PHY_ADDR_S 20
+#define MDIO_PHY_ADDR_M 0x1f00000
+#define MDIO_CMD_S 18
+#define MDIO_CMD_M 0xc0000
+#define MDIO_ST_S 16
+#define MDIO_ST_M 0x30000
+#define MDIO_RW_DATA_S 0
+#define MDIO_RW_DATA_M 0xffff
+
+/* MDIO_CMD: MDIO commands */
+#define MDIO_CMD_ADDR 0
+#define MDIO_CMD_WRITE 1
+#define MDIO_CMD_READ 2
+#define MDIO_CMD_READ_C45 3
+
+/* MDIO_ST: MDIO start field */
+#define MDIO_ST_C45 0
+#define MDIO_ST_C22 1
+
+#define HWSTRAP 0x7800
+#define MHWSTRAP 0x7804
+
+/* Internal GPHY Page Control Register */
+#define PHY_CL22_PAGE_CTRL 0x1f
+#define PHY_TR_PAGE 0x52b5
+
+/* Internal GPHY Token Ring Access Registers */
+#define PHY_TR_CTRL 0x10
+#define PHY_TR_LOW_DATA 0x11
+#define PHY_TR_HIGH_DATA 0x12
+
+/* Fields of PHY_TR_CTRL */
+#define PHY_TR_PKT_XMT_STA BIT(15)
+#define PHY_TR_WR_S 13
+#define PHY_TR_CH_ADDR_S 11
+#define PHY_TR_NODE_ADDR_S 7
+#define PHY_TR_DATA_ADDR_S 1
+
+enum phy_tr_wr {
+ PHY_TR_WRITE = 0,
+ PHY_TR_READ = 1,
+};
+
+/* Helper macro for GPHY Token Ring Access */
+#define PHY_TR_LOW_VAL(x) ((x) & 0xffff)
+#define PHY_TR_HIGH_VAL(x) (((x) & 0xff0000) >> 16)
+
+/* Token Ring Channels */
+#define PMA_CH 0x1
+#define DSP_CH 0x2
+
+/* Token Ring Nodes */
+#define PMA_NOD 0xf
+#define DSP_NOD 0xd
+
+/* Token Ring register range */
+enum tr_pma_reg_addr {
+ PMA_MIN = 0x0,
+ PMA_01 = 0x1,
+ PMA_17 = 0x17,
+ PMA_18 = 0x18,
+ PMA_MAX = 0x3d,
+};
+
+enum tr_dsp_reg_addr {
+ DSP_MIN = 0x0,
+ DSP_06 = 0x6,
+ DSP_08 = 0x8,
+ DSP_0f = 0xf,
+ DSP_10 = 0x10,
+ DSP_MAX = 0x3e,
+};
+#endif /* _MT753X_REGS_H_ */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c
new file mode 100755
index 0000000..7a05952
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include <linux/if.h>
+#include <linux/list.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/bitops.h>
+#include <net/genetlink.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
+#include <linux/workqueue.h>
+#include <linux/of_device.h>
+
+#include "mt753x.h"
+#include "mt753x_swconfig.h"
+#include "mt753x_regs.h"
+
+#define MT753X_PORT_MIB_TXB_ID 18 /* TxByte */
+#define MT753X_PORT_MIB_RXB_ID 37 /* RxByte */
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+struct mt753x_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+static const struct mt753x_mib_desc mt753x_mibs[] = {
+ MIB_DESC(1, STATS_TDPC, "TxDrop"),
+ MIB_DESC(1, STATS_TCRC, "TxCRC"),
+ MIB_DESC(1, STATS_TUPC, "TxUni"),
+ MIB_DESC(1, STATS_TMPC, "TxMulti"),
+ MIB_DESC(1, STATS_TBPC, "TxBroad"),
+ MIB_DESC(1, STATS_TCEC, "TxCollision"),
+ MIB_DESC(1, STATS_TSCEC, "TxSingleCol"),
+ MIB_DESC(1, STATS_TMCEC, "TxMultiCol"),
+ MIB_DESC(1, STATS_TDEC, "TxDefer"),
+ MIB_DESC(1, STATS_TLCEC, "TxLateCol"),
+ MIB_DESC(1, STATS_TXCEC, "TxExcCol"),
+ MIB_DESC(1, STATS_TPPC, "TxPause"),
+ MIB_DESC(1, STATS_TL64PC, "Tx64Byte"),
+ MIB_DESC(1, STATS_TL65PC, "Tx65Byte"),
+ MIB_DESC(1, STATS_TL128PC, "Tx128Byte"),
+ MIB_DESC(1, STATS_TL256PC, "Tx256Byte"),
+ MIB_DESC(1, STATS_TL512PC, "Tx512Byte"),
+ MIB_DESC(1, STATS_TL1024PC, "Tx1024Byte"),
+ MIB_DESC(2, STATS_TOC, "TxByte"),
+ MIB_DESC(1, STATS_RDPC, "RxDrop"),
+ MIB_DESC(1, STATS_RFPC, "RxFiltered"),
+ MIB_DESC(1, STATS_RUPC, "RxUni"),
+ MIB_DESC(1, STATS_RMPC, "RxMulti"),
+ MIB_DESC(1, STATS_RBPC, "RxBroad"),
+ MIB_DESC(1, STATS_RAEPC, "RxAlignErr"),
+ MIB_DESC(1, STATS_RCEPC, "RxCRC"),
+ MIB_DESC(1, STATS_RUSPC, "RxUnderSize"),
+ MIB_DESC(1, STATS_RFEPC, "RxFragment"),
+ MIB_DESC(1, STATS_ROSPC, "RxOverSize"),
+ MIB_DESC(1, STATS_RJEPC, "RxJabber"),
+ MIB_DESC(1, STATS_RPPC, "RxPause"),
+ MIB_DESC(1, STATS_RL64PC, "Rx64Byte"),
+ MIB_DESC(1, STATS_RL65PC, "Rx65Byte"),
+ MIB_DESC(1, STATS_RL128PC, "Rx128Byte"),
+ MIB_DESC(1, STATS_RL256PC, "Rx256Byte"),
+ MIB_DESC(1, STATS_RL512PC, "Rx512Byte"),
+ MIB_DESC(1, STATS_RL1024PC, "Rx1024Byte"),
+ MIB_DESC(2, STATS_ROC, "RxByte"),
+ MIB_DESC(1, STATS_RDPC_CTRL, "RxCtrlDrop"),
+ MIB_DESC(1, STATS_RDPC_ING, "RxIngDrop"),
+ MIB_DESC(1, STATS_RDPC_ARL, "RxARLDrop")
+};
+
+enum {
+ /* Global attributes. */
+ MT753X_ATTR_ENABLE_VLAN,
+};
+
+static int mt753x_get_vlan_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+ val->value.i = gsw->global_vlan_enable;
+
+ return 0;
+}
+
+static int mt753x_set_vlan_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+ gsw->global_vlan_enable = val->value.i != 0;
+
+ return 0;
+}
+
+static int mt753x_get_port_pvid(struct switch_dev *dev, int port, int *val)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+ if (port >= MT753X_NUM_PORTS)
+ return -EINVAL;
+
+ *val = mt753x_reg_read(gsw, PPBV1(port));
+ *val &= GRP_PORT_VID_M;
+
+ return 0;
+}
+
+static int mt753x_set_port_pvid(struct switch_dev *dev, int port, int pvid)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+ if (port >= MT753X_NUM_PORTS)
+ return -EINVAL;
+
+ if (pvid < MT753X_MIN_VID || pvid > MT753X_MAX_VID)
+ return -EINVAL;
+
+ gsw->port_entries[port].pvid = pvid;
+
+ return 0;
+}
+
+static int mt753x_get_vlan_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+ u32 member;
+ u32 etags;
+ int i;
+
+ val->len = 0;
+
+ if (val->port_vlan < 0 || val->port_vlan >= MT753X_NUM_VLANS)
+ return -EINVAL;
+
+ mt753x_vlan_ctrl(gsw, VTCR_READ_VLAN_ENTRY, val->port_vlan);
+
+ member = mt753x_reg_read(gsw, VAWD1);
+ member &= PORT_MEM_M;
+ member >>= PORT_MEM_S;
+
+ etags = mt753x_reg_read(gsw, VAWD2);
+
+ for (i = 0; i < MT753X_NUM_PORTS; i++) {
+ struct switch_port *p;
+ int etag;
+
+ if (!(member & BIT(i)))
+ continue;
+
+ p = &val->value.ports[val->len++];
+ p->id = i;
+
+ etag = (etags >> PORT_ETAG_S(i)) & PORT_ETAG_M;
+
+ if (etag == ETAG_CTRL_TAG)
+ p->flags |= BIT(SWITCH_PORT_FLAG_TAGGED);
+ else if (etag != ETAG_CTRL_UNTAG)
+ dev_info(gsw->dev,
+ "vlan egress tag control neither untag nor tag.\n");
+ }
+
+ return 0;
+}
+
+static int mt753x_set_vlan_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+ u8 member = 0;
+ u8 etags = 0;
+ int i;
+
+ if (val->port_vlan < 0 || val->port_vlan >= MT753X_NUM_VLANS ||
+ val->len > MT753X_NUM_PORTS)
+ return -EINVAL;
+
+ for (i = 0; i < val->len; i++) {
+ struct switch_port *p = &val->value.ports[i];
+
+ if (p->id >= MT753X_NUM_PORTS)
+ return -EINVAL;
+
+ member |= BIT(p->id);
+
+ if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED))
+ etags |= BIT(p->id);
+ }
+
+ gsw->vlan_entries[val->port_vlan].member = member;
+ gsw->vlan_entries[val->port_vlan].etags = etags;
+
+ return 0;
+}
+
+static int mt753x_set_vid(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+ int vlan;
+ u16 vid;
+
+ vlan = val->port_vlan;
+ vid = (u16)val->value.i;
+
+ if (vlan < 0 || vlan >= MT753X_NUM_VLANS)
+ return -EINVAL;
+
+ if (vid < MT753X_MIN_VID || vid > MT753X_MAX_VID)
+ return -EINVAL;
+
+ gsw->vlan_entries[vlan].vid = vid;
+ return 0;
+}
+
+static int mt753x_get_vid(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ val->value.i = val->port_vlan;
+ return 0;
+}
+
+static int mt753x_get_port_link(struct switch_dev *dev, int port,
+ struct switch_port_link *link)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+ u32 speed, pmsr;
+
+ if (port < 0 || port >= MT753X_NUM_PORTS)
+ return -EINVAL;
+
+ pmsr = mt753x_reg_read(gsw, PMSR(port));
+
+ link->link = pmsr & MAC_LNK_STS;
+ link->duplex = pmsr & MAC_DPX_STS;
+ speed = (pmsr & MAC_SPD_STS_M) >> MAC_SPD_STS_S;
+
+ switch (speed) {
+ case MAC_SPD_10:
+ link->speed = SWITCH_PORT_SPEED_10;
+ break;
+ case MAC_SPD_100:
+ link->speed = SWITCH_PORT_SPEED_100;
+ break;
+ case MAC_SPD_1000:
+ link->speed = SWITCH_PORT_SPEED_1000;
+ break;
+ case MAC_SPD_2500:
+ /* TODO: swconfig has no support for 2500 now */
+ link->speed = SWITCH_PORT_SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int mt753x_set_port_link(struct switch_dev *dev, int port,
+ struct switch_port_link *link)
+{
+#ifndef MODULE
+ if (port >= MT753X_NUM_PHYS)
+ return -EINVAL;
+
+ return switch_generic_set_link(dev, port, link);
+#else
+ return -ENOTSUPP;
+#endif
+}
+
+static u64 get_mib_counter(struct gsw_mt753x *gsw, int i, int port)
+{
+ unsigned int offset;
+ u64 lo, hi, hi2;
+
+ offset = mt753x_mibs[i].offset;
+
+ if (mt753x_mibs[i].size == 1)
+ return mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset));
+
+ do {
+ hi = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset + 4));
+ lo = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset));
+ hi2 = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset + 4));
+ } while (hi2 != hi);
+
+ return (hi << 32) | lo;
+}
+
+static int mt753x_get_port_mib(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ static char buf[4096];
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+ int i, len = 0;
+
+ if (val->port_vlan >= MT753X_NUM_PORTS)
+ return -EINVAL;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "Port %d MIB counters\n", val->port_vlan);
+
+ for (i = 0; i < ARRAY_SIZE(mt753x_mibs); ++i) {
+ u64 counter;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%-11s: ", mt753x_mibs[i].name);
+ counter = get_mib_counter(gsw, i, val->port_vlan);
+ len += snprintf(buf + len, sizeof(buf) - len, "%llu\n",
+ counter);
+ }
+
+ val->value.s = buf;
+ val->len = len;
+ return 0;
+}
+
+static int mt753x_get_port_stats(struct switch_dev *dev, int port,
+ struct switch_port_stats *stats)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+ if (port < 0 || port >= MT753X_NUM_PORTS)
+ return -EINVAL;
+
+ stats->tx_bytes = get_mib_counter(gsw, MT753X_PORT_MIB_TXB_ID, port);
+ stats->rx_bytes = get_mib_counter(gsw, MT753X_PORT_MIB_RXB_ID, port);
+
+ return 0;
+}
+
+static void mt753x_port_isolation(struct gsw_mt753x *gsw)
+{
+ int i;
+
+ for (i = 0; i < MT753X_NUM_PORTS; i++)
+ mt753x_reg_write(gsw, PCR(i),
+ BIT(gsw->cpu_port) << PORT_MATRIX_S);
+
+ mt753x_reg_write(gsw, PCR(gsw->cpu_port), PORT_MATRIX_M);
+
+ for (i = 0; i < MT753X_NUM_PORTS; i++) {
+ u32 pvc_mode = 0x8100 << STAG_VPID_S;
+
+ if ((gsw->port5_cfg.stag_on && i == 5) ||
+ (gsw->port6_cfg.stag_on && i == 6))
+ pvc_mode |= PVC_PORT_STAG;
+ else
+ pvc_mode |= (VA_TRANSPARENT_PORT << VLAN_ATTR_S);
+
+ mt753x_reg_write(gsw, PVC(i), pvc_mode);
+ }
+}
+
+static int mt753x_apply_config(struct switch_dev *dev)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+ if (!gsw->global_vlan_enable) {
+ mt753x_port_isolation(gsw);
+ return 0;
+ }
+
+ mt753x_apply_vlan_config(gsw);
+
+ return 0;
+}
+
+static int mt753x_reset_switch(struct switch_dev *dev)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+ int i;
+
+ memset(gsw->port_entries, 0, sizeof(gsw->port_entries));
+ memset(gsw->vlan_entries, 0, sizeof(gsw->vlan_entries));
+
+ /* set default vid of each vlan to the same number of vlan, so the vid
+ * won't need be set explicitly.
+ */
+ for (i = 0; i < MT753X_NUM_VLANS; i++)
+ gsw->vlan_entries[i].vid = i;
+
+ return 0;
+}
+
+static int mt753x_phy_read16(struct switch_dev *dev, int addr, u8 reg,
+ u16 *value)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+ *value = gsw->mii_read(gsw, addr, reg);
+
+ return 0;
+}
+
+static int mt753x_phy_write16(struct switch_dev *dev, int addr, u8 reg,
+ u16 value)
+{
+ struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
+
+ gsw->mii_write(gsw, addr, reg, value);
+
+ return 0;
+}
+
+static const struct switch_attr mt753x_global[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan",
+ .description = "VLAN mode (1:enabled)",
+ .max = 1,
+ .id = MT753X_ATTR_ENABLE_VLAN,
+ .get = mt753x_get_vlan_enable,
+ .set = mt753x_set_vlan_enable,
+ }
+};
+
+static const struct switch_attr mt753x_port[] = {
+ {
+ .type = SWITCH_TYPE_STRING,
+ .name = "mib",
+ .description = "Get MIB counters for port",
+ .get = mt753x_get_port_mib,
+ .set = NULL,
+ },
+};
+
+static const struct switch_attr mt753x_vlan[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "vid",
+ .description = "VLAN ID (0-4094)",
+ .set = mt753x_set_vid,
+ .get = mt753x_get_vid,
+ .max = 4094,
+ },
+};
+
+static const struct switch_dev_ops mt753x_swdev_ops = {
+ .attr_global = {
+ .attr = mt753x_global,
+ .n_attr = ARRAY_SIZE(mt753x_global),
+ },
+ .attr_port = {
+ .attr = mt753x_port,
+ .n_attr = ARRAY_SIZE(mt753x_port),
+ },
+ .attr_vlan = {
+ .attr = mt753x_vlan,
+ .n_attr = ARRAY_SIZE(mt753x_vlan),
+ },
+ .get_vlan_ports = mt753x_get_vlan_ports,
+ .set_vlan_ports = mt753x_set_vlan_ports,
+ .get_port_pvid = mt753x_get_port_pvid,
+ .set_port_pvid = mt753x_set_port_pvid,
+ .get_port_link = mt753x_get_port_link,
+ .set_port_link = mt753x_set_port_link,
+ .get_port_stats = mt753x_get_port_stats,
+ .apply_config = mt753x_apply_config,
+ .reset_switch = mt753x_reset_switch,
+ .phy_read16 = mt753x_phy_read16,
+ .phy_write16 = mt753x_phy_write16,
+};
+
+int mt753x_swconfig_init(struct gsw_mt753x *gsw)
+{
+ struct device_node *np = gsw->dev->of_node;
+ struct switch_dev *swdev;
+ int ret;
+
+ if (of_property_read_u32(np, "mediatek,cpuport", &gsw->cpu_port))
+ gsw->cpu_port = MT753X_DFL_CPU_PORT;
+
+ swdev = &gsw->swdev;
+
+ swdev->name = gsw->name;
+ swdev->alias = gsw->name;
+ swdev->cpu_port = gsw->cpu_port;
+ swdev->ports = MT753X_NUM_PORTS;
+ swdev->vlans = MT753X_NUM_VLANS;
+ swdev->ops = &mt753x_swdev_ops;
+
+ ret = register_switch(swdev, NULL);
+ if (ret) {
+ dev_notice(gsw->dev, "Failed to register switch %s\n",
+ swdev->name);
+ return ret;
+ }
+
+ mt753x_apply_config(swdev);
+
+ return 0;
+}
+
+void mt753x_swconfig_destroy(struct gsw_mt753x *gsw)
+{
+ unregister_switch(&gsw->swdev);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_vlan.c b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_vlan.c
new file mode 100755
index 0000000..c806566
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/phy/mtk/mt753x/mt753x_vlan.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ */
+
+#include "mt753x.h"
+#include "mt753x_regs.h"
+
+struct mt753x_mapping mt753x_def_mapping[] = {
+ {
+ .name = "llllw",
+ .pvids = { 1, 1, 1, 1, 2, 2, 1 },
+ .members = { 0, 0x4f, 0x30 },
+ .etags = { 0, 0, 0 },
+ .vids = { 0, 1, 2 },
+ }, {
+ .name = "wllll",
+ .pvids = { 2, 1, 1, 1, 1, 2, 1 },
+ .members = { 0, 0x5e, 0x21 },
+ .etags = { 0, 0, 0 },
+ .vids = { 0, 1, 2 },
+ }, {
+ .name = "lwlll",
+ .pvids = { 1, 2, 1, 1, 1, 2, 1 },
+ .members = { 0, 0x5d, 0x22 },
+ .etags = { 0, 0, 0 },
+ .vids = { 0, 1, 2 },
+ },
+};
+
+void mt753x_vlan_ctrl(struct gsw_mt753x *gsw, u32 cmd, u32 val)
+{
+ int i;
+
+ mt753x_reg_write(gsw, VTCR,
+ VTCR_BUSY | ((cmd << VTCR_FUNC_S) & VTCR_FUNC_M) |
+ (val & VTCR_VID_M));
+
+ for (i = 0; i < 300; i++) {
+ u32 val = mt753x_reg_read(gsw, VTCR);
+
+ if ((val & VTCR_BUSY) == 0)
+ break;
+
+ usleep_range(1000, 1100);
+ }
+
+ if (i == 300)
+ dev_info(gsw->dev, "vtcr timeout\n");
+}
+
+static void mt753x_write_vlan_entry(struct gsw_mt753x *gsw, int vlan, u16 vid,
+ u8 ports, u8 etags)
+{
+ int port;
+ u32 val;
+
+ /* vlan port membership */
+ if (ports)
+ mt753x_reg_write(gsw, VAWD1,
+ IVL_MAC | VTAG_EN | VENTRY_VALID |
+ ((ports << PORT_MEM_S) & PORT_MEM_M));
+ else
+ mt753x_reg_write(gsw, VAWD1, 0);
+
+ /* egress mode */
+ val = 0;
+ for (port = 0; port < MT753X_NUM_PORTS; port++) {
+ if (etags & BIT(port))
+ val |= ETAG_CTRL_TAG << PORT_ETAG_S(port);
+ else
+ val |= ETAG_CTRL_UNTAG << PORT_ETAG_S(port);
+ }
+ mt753x_reg_write(gsw, VAWD2, val);
+
+ /* write to vlan table */
+ mt753x_vlan_ctrl(gsw, VTCR_WRITE_VLAN_ENTRY, vid);
+}
+
+void mt753x_apply_vlan_config(struct gsw_mt753x *gsw)
+{
+ int i, j;
+ u8 tag_ports;
+ u8 untag_ports;
+
+ /* set all ports as security mode */
+ for (i = 0; i < MT753X_NUM_PORTS; i++)
+ mt753x_reg_write(gsw, PCR(i),
+ PORT_MATRIX_M | SECURITY_MODE);
+
+ /* check if a port is used in tag/untag vlan egress mode */
+ tag_ports = 0;
+ untag_ports = 0;
+
+ for (i = 0; i < MT753X_NUM_VLANS; i++) {
+ u8 member = gsw->vlan_entries[i].member;
+ u8 etags = gsw->vlan_entries[i].etags;
+
+ if (!member)
+ continue;
+
+ for (j = 0; j < MT753X_NUM_PORTS; j++) {
+ if (!(member & BIT(j)))
+ continue;
+
+ if (etags & BIT(j))
+ tag_ports |= 1u << j;
+ else
+ untag_ports |= 1u << j;
+ }
+ }
+
+ /* set all untag-only ports as transparent and the rest as user port */
+ for (i = 0; i < MT753X_NUM_PORTS; i++) {
+ u32 pvc_mode = 0x8100 << STAG_VPID_S;
+
+ if (untag_ports & BIT(i) && !(tag_ports & BIT(i)))
+ pvc_mode = (0x8100 << STAG_VPID_S) |
+ (VA_TRANSPARENT_PORT << VLAN_ATTR_S);
+
+ if ((gsw->port5_cfg.stag_on && i == 5) ||
+ (gsw->port6_cfg.stag_on && i == 6))
+ pvc_mode = (0x8100 << STAG_VPID_S) | PVC_PORT_STAG;
+
+ mt753x_reg_write(gsw, PVC(i), pvc_mode);
+ }
+
+ /* first clear the switch vlan table */
+ for (i = 0; i < MT753X_NUM_VLANS; i++)
+ mt753x_write_vlan_entry(gsw, i, i, 0, 0);
+
+ /* now program only vlans with members to avoid
+ * clobbering remapped entries in later iterations
+ */
+ for (i = 0; i < MT753X_NUM_VLANS; i++) {
+ u16 vid = gsw->vlan_entries[i].vid;
+ u8 member = gsw->vlan_entries[i].member;
+ u8 etags = gsw->vlan_entries[i].etags;
+
+ if (member)
+ mt753x_write_vlan_entry(gsw, i, vid, member, etags);
+ }
+
+ /* Port Default PVID */
+ for (i = 0; i < MT753X_NUM_PORTS; i++) {
+ int vlan = gsw->port_entries[i].pvid;
+ u16 pvid = 0;
+ u32 val;
+
+ if (vlan < MT753X_NUM_VLANS && gsw->vlan_entries[vlan].member)
+ pvid = gsw->vlan_entries[vlan].vid;
+
+ val = mt753x_reg_read(gsw, PPBV1(i));
+ val &= ~GRP_PORT_VID_M;
+ val |= pvid;
+ mt753x_reg_write(gsw, PPBV1(i), val);
+ }
+}
+
+struct mt753x_mapping *mt753x_find_mapping(struct device_node *np)
+{
+ const char *map;
+ int i;
+
+ if (of_property_read_string(np, "mediatek,portmap", &map))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mt753x_def_mapping); i++)
+ if (!strcmp(map, mt753x_def_mapping[i].name))
+ return &mt753x_def_mapping[i];
+
+ return NULL;
+}
+
+void mt753x_apply_mapping(struct gsw_mt753x *gsw, struct mt753x_mapping *map)
+{
+ int i = 0;
+
+ for (i = 0; i < MT753X_NUM_PORTS; i++)
+ gsw->port_entries[i].pvid = map->pvids[i];
+
+ for (i = 0; i < MT753X_NUM_VLANS; i++) {
+ gsw->vlan_entries[i].member = map->members[i];
+ gsw->vlan_entries[i].etags = map->etags[i];
+ gsw->vlan_entries[i].vid = map->vids[i];
+ }
+}