[][Add initial mtk feed for OpenWRT v21.02]
[Description]
Add initial mtk feed for OpenWRT v21.02
[Release-log]
N/A
Change-Id: I8051c6ba87f1ccf26c02fdd88a17d66f63c0b101
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/4495320
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Kconfig b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Kconfig
new file mode 100755
index 0000000..b097f52
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Kconfig
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_MEDIATEK
+ bool "MediaTek ethernet driver"
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
+ ---help---
+ If you have a Mediatek SoC with ethernet, say Y.
+
+if NET_VENDOR_MEDIATEK
+
+config NET_MEDIATEK_SOC
+ tristate "MediaTek SoC Gigabit Ethernet support"
+ select PHYLINK
+ ---help---
+ This driver supports the gigabit ethernet MACs in the
+ MediaTek SoC family.
+
+config MEDIATEK_NETSYS_V2
+ tristate "MediaTek Ethernet NETSYS V2 support"
+ depends on ARCH_MEDIATEK && NET_MEDIATEK_SOC
+ ---help---
+ This options enable MTK Ethernet NETSYS V2 support
+
+config NET_MEDIATEK_HNAT
+ tristate "MediaTek HW NAT support"
+ depends on NET_MEDIATEK_SOC && NF_CONNTRACK && IP_NF_NAT
+ ---help---
+ This driver supports the hardward Network Address Translation
+ in the MediaTek MT7986/MT2701/MT7622/MT7629/MT7621 chipset
+ family.
+
+config NET_MEDIATEK_HW_QOS
+ bool "Mediatek HW QoS support"
+ depends on NET_MEDIATEK_HNAT
+ default n
+ ---help---
+ This driver supports the hardward
+ quality of service (QoS) control
+ for the hardware NAT in the
+ MediaTek chipset family.
+
+endif #NET_VENDOR_MEDIATEK
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile
new file mode 100755
index 0000000..f046e73
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
+mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o
+obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
new file mode 100755
index 0000000..82aa6ca
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
@@ -0,0 +1,840 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#include <linux/trace_seq.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/of_mdio.h>
+
+#include "mtk_eth_soc.h"
+#include "mtk_eth_dbg.h"
+
+struct mtk_eth_debug {
+ struct dentry *root;
+};
+
+struct mtk_eth *g_eth;
+
+struct mtk_eth_debug eth_debug;
+
+void mt7530_mdio_w32(struct mtk_eth *eth, u32 reg, u32 val)
+{
+ mutex_lock(ð->mii_bus->mdio_lock);
+
+ _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
+ _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
+ _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
+
+ mutex_unlock(ð->mii_bus->mdio_lock);
+}
+
+u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
+{
+ u16 high, low;
+
+ mutex_lock(ð->mii_bus->mdio_lock);
+
+ _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
+ low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
+ high = _mtk_mdio_read(eth, 0x1f, 0x10);
+
+ mutex_unlock(ð->mii_bus->mdio_lock);
+
+ return (high << 16) | (low & 0xffff);
+}
+
+void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+{
+ mtk_w32(eth, val, reg + 0x10000);
+}
+EXPORT_SYMBOL(mtk_switch_w32);
+
+u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg)
+{
+ return mtk_r32(eth, reg + 0x10000);
+}
+EXPORT_SYMBOL(mtk_switch_r32);
+
+static int mtketh_debug_show(struct seq_file *m, void *private)
+{
+ struct mtk_eth *eth = m->private;
+ struct mtk_mac *mac = 0;
+ u32 d;
+ int i, j = 0;
+
+ for (i = 0 ; i < MTK_MAX_DEVS ; i++) {
+ if (!eth->mac[i] ||
+ of_phy_is_fixed_link(eth->mac[i]->of_node))
+ continue;
+ mac = eth->mac[i];
+#if 0 //FIXME
+ while (j < 30) {
+ d = _mtk_mdio_read(eth, mac->phy_dev->addr, j);
+
+ seq_printf(m, "phy=%d, reg=0x%08x, data=0x%08x\n",
+ mac->phy_dev->addr, j, d);
+ j++;
+ }
+#endif
+ }
+ return 0;
+}
+
+static int mtketh_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtketh_debug_show, inode->i_private);
+}
+
+static const struct file_operations mtketh_debug_fops = {
+ .open = mtketh_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private)
+{
+ struct mtk_eth *eth = m->private;
+ u32 offset, data;
+ int i;
+ struct mt7530_ranges {
+ u32 start;
+ u32 end;
+ } ranges[] = {
+ {0x0, 0xac},
+ {0x1000, 0x10e0},
+ {0x1100, 0x1140},
+ {0x1200, 0x1240},
+ {0x1300, 0x1340},
+ {0x1400, 0x1440},
+ {0x1500, 0x1540},
+ {0x1600, 0x1640},
+ {0x1800, 0x1848},
+ {0x1900, 0x1948},
+ {0x1a00, 0x1a48},
+ {0x1b00, 0x1b48},
+ {0x1c00, 0x1c48},
+ {0x1d00, 0x1d48},
+ {0x1e00, 0x1e48},
+ {0x1f60, 0x1ffc},
+ {0x2000, 0x212c},
+ {0x2200, 0x222c},
+ {0x2300, 0x232c},
+ {0x2400, 0x242c},
+ {0x2500, 0x252c},
+ {0x2600, 0x262c},
+ {0x3000, 0x3014},
+ {0x30c0, 0x30f8},
+ {0x3100, 0x3114},
+ {0x3200, 0x3214},
+ {0x3300, 0x3314},
+ {0x3400, 0x3414},
+ {0x3500, 0x3514},
+ {0x3600, 0x3614},
+ {0x4000, 0x40d4},
+ {0x4100, 0x41d4},
+ {0x4200, 0x42d4},
+ {0x4300, 0x43d4},
+ {0x4400, 0x44d4},
+ {0x4500, 0x45d4},
+ {0x4600, 0x46d4},
+ {0x4f00, 0x461c},
+ {0x7000, 0x7038},
+ {0x7120, 0x7124},
+ {0x7800, 0x7804},
+ {0x7810, 0x7810},
+ {0x7830, 0x7830},
+ {0x7a00, 0x7a7c},
+ {0x7b00, 0x7b04},
+ {0x7e00, 0x7e04},
+ {0x7ffc, 0x7ffc},
+ };
+
+ if (!mt7530_exist(eth))
+ return -EOPNOTSUPP;
+
+ if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) &&
+ (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) {
+ seq_puts(m, "no switch found\n");
+ return 0;
+ }
+
+ for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) {
+ for (offset = ranges[i].start;
+ offset <= ranges[i].end; offset += 4) {
+ data = mt7530_mdio_r32(eth, offset);
+ seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n",
+ offset, data);
+ }
+ }
+
+ return 0;
+}
+
+static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private);
+}
+
+static const struct file_operations mtketh_debug_mt7530sw_fops = {
+ .open = mtketh_debug_mt7530sw_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file,
+ const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ struct mtk_eth *eth = file->private_data;
+ char buf[32], *token, *p = buf;
+ u32 reg, value, phy;
+ int ret;
+
+ if (!mt7530_exist(eth))
+ return -EOPNOTSUPP;
+
+ if (*off != 0)
+ return 0;
+
+ if (len > sizeof(buf) - 1)
+ len = sizeof(buf) - 1;
+
+ ret = strncpy_from_user(buf, ptr, len);
+ if (ret < 0)
+ return ret;
+ buf[len] = '\0';
+
+ token = strsep(&p, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)&phy))
+ return -EINVAL;
+
+ token = strsep(&p, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)®))
+ return -EINVAL;
+
+ token = strsep(&p, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)&value))
+ return -EINVAL;
+
+ pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
+ 0x1f, reg, value);
+ mt7530_mdio_w32(eth, reg, value);
+ pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
+ 0x1f, reg, mt7530_mdio_r32(eth, reg));
+
+ return len;
+}
+
+static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ struct mtk_eth *eth = file->private_data;
+ char buf[32], *token, *p = buf;
+ u32 reg, value, phy;
+ int ret;
+
+ if (*off != 0)
+ return 0;
+
+ if (len > sizeof(buf) - 1)
+ len = sizeof(buf) - 1;
+
+ ret = strncpy_from_user(buf, ptr, len);
+ if (ret < 0)
+ return ret;
+ buf[len] = '\0';
+
+ token = strsep(&p, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)&phy))
+ return -EINVAL;
+
+ token = strsep(&p, " ");
+
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)®))
+ return -EINVAL;
+
+ token = strsep(&p, " ");
+
+ if (!token)
+ return -EINVAL;
+ if (kstrtoul(token, 16, (unsigned long *)&value))
+ return -EINVAL;
+
+ pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__,
+ phy, reg, value);
+
+ _mtk_mdio_write(eth, phy, reg, value);
+
+ pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__,
+ phy, reg, _mtk_mdio_read(eth, phy, reg));
+
+ return len;
+}
+
+static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ struct mtk_eth *eth = file->private_data;
+
+ schedule_work(ð->pending_work);
+ return len;
+}
+
+static const struct file_operations fops_reg_w = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = mtketh_debugfs_write,
+ .llseek = noop_llseek,
+};
+
+static const struct file_operations fops_eth_reset = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = mtketh_debugfs_reset,
+ .llseek = noop_llseek,
+};
+
+static const struct file_operations fops_mt7530sw_reg_w = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = mtketh_mt7530sw_debugfs_write,
+ .llseek = noop_llseek,
+};
+
+void mtketh_debugfs_exit(struct mtk_eth *eth)
+{
+ debugfs_remove_recursive(eth_debug.root);
+}
+
+int mtketh_debugfs_init(struct mtk_eth *eth)
+{
+ int ret = 0;
+
+ eth_debug.root = debugfs_create_dir("mtketh", NULL);
+ if (!eth_debug.root) {
+ dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__);
+ ret = -ENOMEM;
+ }
+
+ debugfs_create_file("phy_regs", S_IRUGO,
+ eth_debug.root, eth, &mtketh_debug_fops);
+ debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR,
+ eth_debug.root, eth, &fops_reg_w);
+ debugfs_create_file("reset", S_IFREG | S_IWUSR,
+ eth_debug.root, eth, &fops_eth_reset);
+ if (mt7530_exist(eth)) {
+ debugfs_create_file("mt7530sw_regs", S_IRUGO,
+ eth_debug.root, eth,
+ &mtketh_debug_mt7530sw_fops);
+ debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR,
+ eth_debug.root, eth,
+ &fops_mt7530sw_reg_w);
+ }
+ return ret;
+}
+
+void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
+ u32 *read_data)
+{
+ if (mt7530_exist(eth) && phy_addr == 31)
+ *read_data = mt7530_mdio_r32(eth, phy_register);
+
+ else
+ *read_data = _mtk_mdio_read(eth, phy_addr, phy_register);
+}
+
+void mii_mgr_write_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
+ u32 write_data)
+{
+ if (mt7530_exist(eth) && phy_addr == 31)
+ mt7530_mdio_w32(eth, phy_register, write_data);
+
+ else
+ _mtk_mdio_write(eth, phy_addr, phy_register, write_data);
+}
+
+static void mii_mgr_read_cl45(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data)
+{
+ mtk_cl45_ind_read(eth, port, devad, reg, data);
+}
+
+static void mii_mgr_write_cl45(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data)
+{
+ mtk_cl45_ind_write(eth, port, devad, reg, data);
+}
+
+int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_mii_ioctl_data mii;
+ struct mtk_esw_reg reg;
+
+ switch (cmd) {
+ case MTKETH_MII_READ:
+ if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+ goto err_copy;
+ mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num,
+ &mii.val_out);
+ if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
+ goto err_copy;
+
+ return 0;
+ case MTKETH_MII_WRITE:
+ if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+ goto err_copy;
+ mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num,
+ mii.val_in);
+
+ return 0;
+ case MTKETH_MII_READ_CL45:
+ if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+ goto err_copy;
+ mii_mgr_read_cl45(eth, mii.port_num, mii.dev_addr, mii.reg_addr,
+ &mii.val_out);
+ if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii)))
+ goto err_copy;
+
+ return 0;
+ case MTKETH_MII_WRITE_CL45:
+ if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii)))
+ goto err_copy;
+ mii_mgr_write_cl45(eth, mii.port_num, mii.dev_addr, mii.reg_addr,
+ mii.val_in);
+ return 0;
+ case MTKETH_ESW_REG_READ:
+ if (!mt7530_exist(eth))
+ return -EOPNOTSUPP;
+ if (copy_from_user(®, ifr->ifr_data, sizeof(reg)))
+ goto err_copy;
+ if (reg.off > REG_ESW_MAX)
+ return -EINVAL;
+ reg.val = mtk_switch_r32(eth, reg.off);
+
+ if (copy_to_user(ifr->ifr_data, ®, sizeof(reg)))
+ goto err_copy;
+
+ return 0;
+ case MTKETH_ESW_REG_WRITE:
+ if (!mt7530_exist(eth))
+ return -EOPNOTSUPP;
+ if (copy_from_user(®, ifr->ifr_data, sizeof(reg)))
+ goto err_copy;
+ if (reg.off > REG_ESW_MAX)
+ return -EINVAL;
+ mtk_switch_w32(eth, reg.val, reg.off);
+
+ return 0;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+err_copy:
+ return -EFAULT;
+}
+
+int esw_cnt_read(struct seq_file *seq, void *v)
+{
+ unsigned int pkt_cnt = 0;
+ int i = 0;
+ struct mtk_eth *eth = g_eth;
+ unsigned int mib_base = MTK_GDM1_TX_GBCNT;
+
+ seq_puts(seq, "\n <<CPU>>\n");
+ seq_puts(seq, " |\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, "| <<PSE>> |\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, " |\n");
+ seq_puts(seq, "+-----------------------------------------------+\n");
+ seq_puts(seq, "| <<GDMA>> |\n");
+ seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n",
+ mtk_r32(eth, mib_base));
+ seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n",
+ mtk_r32(eth, mib_base+0x08));
+ seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n",
+ mtk_r32(eth, mib_base+0x10));
+ seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n",
+ mtk_r32(eth, mib_base+0x14));
+ seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n",
+ mtk_r32(eth, mib_base+0x18));
+ seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n",
+ mtk_r32(eth, mib_base+0x1C));
+ seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n",
+ mtk_r32(eth, mib_base+0x20));
+ seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n",
+ mtk_r32(eth, mib_base+0x24));
+ seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n",
+ mtk_r32(eth, mib_base+0x28));
+ seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n",
+ mtk_r32(eth, mib_base+0x2C));
+ seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n",
+ mtk_r32(eth, mib_base+0x30));
+ seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n",
+ mtk_r32(eth, mib_base+0x38));
+ seq_puts(seq, "| |\n");
+ seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n",
+ mtk_r32(eth, mib_base+0x40));
+ seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n",
+ mtk_r32(eth, mib_base+0x48));
+ seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n",
+ mtk_r32(eth, mib_base+0x50));
+ seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n",
+ mtk_r32(eth, mib_base+0x54));
+ seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n",
+ mtk_r32(eth, mib_base+0x58));
+ seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n",
+ mtk_r32(eth, mib_base+0x5C));
+ seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n",
+ mtk_r32(eth, mib_base+0x60));
+ seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n",
+ mtk_r32(eth, mib_base+0x64));
+ seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n",
+ mtk_r32(eth, mib_base+0x68));
+ seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n",
+ mtk_r32(eth, mib_base+0x6C));
+ seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n",
+ mtk_r32(eth, mib_base+0x70));
+ seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n",
+ mtk_r32(eth, mib_base+0x78));
+ seq_puts(seq, "+-----------------------------------------------+\n");
+
+ if (!mt7530_exist(eth))
+ return 0;
+
+#define DUMP_EACH_PORT(base) \
+ do { \
+ for (i = 0; i < 7; i++) { \
+ pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));\
+ seq_printf(seq, "%8u ", pkt_cnt); \
+ } \
+ seq_puts(seq, "\n"); \
+ } while (0)
+
+ seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n",
+ "Port0", "Port1", "Port2", "Port3", "Port4", "Port5",
+ "Port6");
+ seq_puts(seq, "Tx Drop Packet :");
+ DUMP_EACH_PORT(0x4000);
+ seq_puts(seq, "Tx CRC Error :");
+ DUMP_EACH_PORT(0x4004);
+ seq_puts(seq, "Tx Unicast Packet :");
+ DUMP_EACH_PORT(0x4008);
+ seq_puts(seq, "Tx Multicast Packet :");
+ DUMP_EACH_PORT(0x400C);
+ seq_puts(seq, "Tx Broadcast Packet :");
+ DUMP_EACH_PORT(0x4010);
+ seq_puts(seq, "Tx Collision Event :");
+ DUMP_EACH_PORT(0x4014);
+ seq_puts(seq, "Tx Pause Packet :");
+ DUMP_EACH_PORT(0x402C);
+ seq_puts(seq, "Rx Drop Packet :");
+ DUMP_EACH_PORT(0x4060);
+ seq_puts(seq, "Rx Filtering Packet :");
+ DUMP_EACH_PORT(0x4064);
+ seq_puts(seq, "Rx Unicast Packet :");
+ DUMP_EACH_PORT(0x4068);
+ seq_puts(seq, "Rx Multicast Packet :");
+ DUMP_EACH_PORT(0x406C);
+ seq_puts(seq, "Rx Broadcast Packet :");
+ DUMP_EACH_PORT(0x4070);
+ seq_puts(seq, "Rx Alignment Error :");
+ DUMP_EACH_PORT(0x4074);
+ seq_puts(seq, "Rx CRC Error :");
+ DUMP_EACH_PORT(0x4078);
+ seq_puts(seq, "Rx Undersize Error :");
+ DUMP_EACH_PORT(0x407C);
+ seq_puts(seq, "Rx Fragment Error :");
+ DUMP_EACH_PORT(0x4080);
+ seq_puts(seq, "Rx Oversize Error :");
+ DUMP_EACH_PORT(0x4084);
+ seq_puts(seq, "Rx Jabber Error :");
+ DUMP_EACH_PORT(0x4088);
+ seq_puts(seq, "Rx Pause Packet :");
+ DUMP_EACH_PORT(0x408C);
+ mt7530_mdio_w32(eth, 0x4fe0, 0xf0);
+ mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0);
+
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+
+static int switch_count_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, esw_cnt_read, 0);
+}
+
+static const struct file_operations switch_count_fops = {
+ .owner = THIS_MODULE,
+ .open = switch_count_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static struct proc_dir_entry *proc_tx_ring, *proc_rx_ring;
+
+int tx_ring_read(struct seq_file *seq, void *v)
+{
+ struct mtk_tx_ring *ring = &g_eth->tx_ring;
+ struct mtk_tx_dma *tx_ring;
+ int i = 0;
+
+ tx_ring =
+ kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
+ if (!tx_ring) {
+ seq_puts(seq, " allocate temp tx_ring fail.\n");
+ return 0;
+ }
+
+ for (i = 0; i < MTK_DMA_SIZE; i++)
+ tx_ring[i] = ring->dma[i];
+
+ seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
+ seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
+ seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ dma_addr_t tmp = ring->phys + i * sizeof(*tx_ring);
+
+ seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
+ *(int *)&tx_ring[i].txd1, *(int *)&tx_ring[i].txd2,
+ *(int *)&tx_ring[i].txd3, *(int *)&tx_ring[i].txd4);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ seq_printf(seq, " %08x %08x %08x %08x",
+ *(int *)&tx_ring[i].txd5, *(int *)&tx_ring[i].txd6,
+ *(int *)&tx_ring[i].txd7, *(int *)&tx_ring[i].txd8);
+#endif
+ seq_printf(seq, "\n");
+ }
+
+ kfree(tx_ring);
+ return 0;
+}
+
+static int tx_ring_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tx_ring_read, NULL);
+}
+
+static const struct file_operations tx_ring_fops = {
+ .owner = THIS_MODULE,
+ .open = tx_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+int rx_ring_read(struct seq_file *seq, void *v)
+{
+ struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
+ struct mtk_rx_dma *rx_ring;
+
+ int i = 0;
+
+ rx_ring =
+ kmalloc(sizeof(struct mtk_rx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
+ if (!rx_ring) {
+ seq_puts(seq, " allocate temp rx_ring fail.\n");
+ return 0;
+ }
+
+ for (i = 0; i < MTK_DMA_SIZE; i++)
+ rx_ring[i] = ring->dma[i];
+
+ seq_printf(seq, "next to read: %d\n",
+ NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ seq_printf(seq, "%d: %08x %08x %08x %08x", i,
+ *(int *)&rx_ring[i].rxd1, *(int *)&rx_ring[i].rxd2,
+ *(int *)&rx_ring[i].rxd3, *(int *)&rx_ring[i].rxd4);
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ seq_printf(seq, " %08x %08x %08x %08x",
+ *(int *)&rx_ring[i].rxd5, *(int *)&rx_ring[i].rxd6,
+ *(int *)&rx_ring[i].rxd7, *(int *)&rx_ring[i].rxd8);
+#endif
+ seq_printf(seq, "\n");
+ }
+
+ kfree(rx_ring);
+ return 0;
+}
+
+static int rx_ring_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rx_ring_read, NULL);
+}
+
+static const struct file_operations rx_ring_fops = {
+ .owner = THIS_MODULE,
+ .open = rx_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+int dbg_regs_read(struct seq_file *seq, void *v)
+{
+ struct mtk_eth *eth = g_eth;
+
+ seq_puts(seq, " <<PSE DEBUG REG DUMP>>\n");
+ seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
+ mtk_r32(eth, MTK_PSE_FQFC_CFG));
+ seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_IQ_STA(0)));
+ seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_IQ_STA(1)));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_IQ_STA(2)));
+ seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_IQ_STA(3)));
+ }
+
+ seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(0)));
+ seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(1)));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(2)));
+ seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(3)));
+ }
+
+ seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
+ mtk_r32(eth, MTK_QDMA_FQ_CNT));
+ seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
+ mtk_r32(eth, MTK_FE_PSE_FREE));
+ seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
+ mtk_r32(eth, MTK_FE_DROP_FQ));
+ seq_printf(seq, "| FE_DROP_FC : %08x |\n",
+ mtk_r32(eth, MTK_FE_DROP_FC));
+ seq_printf(seq, "| FE_DROP_PPE : %08x |\n",
+ mtk_r32(eth, MTK_FE_DROP_PPE));
+ seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n",
+ mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
+ seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
+ mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
+ seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
+ mtk_r32(eth, MTK_MAC_MCR(0)));
+ seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
+ mtk_r32(eth, MTK_MAC_MCR(1)));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
+ mtk_r32(eth, MTK_FE_CDM1_FSM));
+ seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
+ mtk_r32(eth, MTK_FE_CDM2_FSM));
+ seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
+ mtk_r32(eth, MTK_FE_GDM1_FSM));
+ seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
+ mtk_r32(eth, MTK_FE_GDM2_FSM));
+ }
+
+ return 0;
+}
+
+static int dbg_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_regs_read, 0);
+}
+
+static const struct file_operations dbg_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = dbg_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+#define PROCREG_ESW_CNT "esw_cnt"
+#define PROCREG_TXRING "tx_ring"
+#define PROCREG_RXRING "rx_ring"
+#define PROCREG_DIR "mtketh"
+#define PROCREG_DBG_REGS "dbg_regs"
+
+struct proc_dir_entry *proc_reg_dir;
+static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs;
+
+int debug_proc_init(struct mtk_eth *eth)
+{
+ g_eth = eth;
+
+ if (!proc_reg_dir)
+ proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL);
+
+ proc_tx_ring =
+ proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops);
+ if (!proc_tx_ring)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
+
+ proc_rx_ring =
+ proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
+ if (!proc_rx_ring)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
+
+ proc_esw_cnt =
+ proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops);
+ if (!proc_esw_cnt)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
+
+ proc_dbg_regs =
+ proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops);
+ if (!proc_dbg_regs)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS);
+
+ return 0;
+}
+
+void debug_proc_exit(void)
+{
+ if (proc_tx_ring)
+ remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
+ if (proc_rx_ring)
+ remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
+
+ if (proc_esw_cnt)
+ remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir);
+
+ if (proc_reg_dir)
+ remove_proc_entry(PROCREG_DIR, 0);
+
+ if (proc_dbg_regs)
+ remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir);
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h
new file mode 100755
index 0000000..c7924f4
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#ifndef MTK_ETH_DBG_H
+#define MTK_ETH_DBG_H
+
+/* Debug Purpose Register */
+#define MTK_PSE_FQFC_CFG 0x100
+#define MTK_FE_CDM1_FSM 0x220
+#define MTK_FE_CDM2_FSM 0x224
+#define MTK_FE_GDM1_FSM 0x228
+#define MTK_FE_GDM2_FSM 0x22C
+#define MTK_FE_PSE_FREE 0x240
+#define MTK_FE_DROP_FQ 0x244
+#define MTK_FE_DROP_FC 0x248
+#define MTK_FE_DROP_PPE 0x24C
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define MTK_PSE_IQ_STA(x) (0x180 + (x) * 0x4)
+#define MTK_PSE_OQ_STA(x) (0x1A0 + (x) * 0x4)
+#else
+#define MTK_PSE_IQ_STA(x) (0x110 + (x) * 0x4)
+#define MTK_PSE_OQ_STA(x) (0x118 + (x) * 0x4)
+#endif
+
+#define MTKETH_MII_READ 0x89F3
+#define MTKETH_MII_WRITE 0x89F4
+#define MTKETH_ESW_REG_READ 0x89F1
+#define MTKETH_ESW_REG_WRITE 0x89F2
+#define MTKETH_MII_READ_CL45 0x89FC
+#define MTKETH_MII_WRITE_CL45 0x89FD
+#define REG_ESW_MAX 0xFC
+
+struct mtk_esw_reg {
+ unsigned int off;
+ unsigned int val;
+};
+
+struct mtk_mii_ioctl_data {
+ unsigned int phy_id;
+ unsigned int reg_num;
+ unsigned int val_in;
+ unsigned int val_out;
+ unsigned int port_num;
+ unsigned int dev_addr;
+ unsigned int reg_addr;
+};
+
+#if defined(CONFIG_NET_DSA_MT7530) || defined(CONFIG_MT753X_GSW)
+static inline bool mt7530_exist(struct mtk_eth *eth)
+{
+ return true;
+}
+#else
+static inline bool mt7530_exist(struct mtk_eth *eth)
+{
+ return false;
+}
+#endif
+
+extern u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg);
+extern u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
+ u32 phy_register, u32 write_data);
+
+extern u32 mtk_cl45_ind_read(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data);
+extern u32 mtk_cl45_ind_write(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data);
+
+int debug_proc_init(struct mtk_eth *eth);
+void debug_proc_exit(void);
+
+int mtketh_debugfs_init(struct mtk_eth *eth);
+void mtketh_debugfs_exit(struct mtk_eth *eth);
+int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+#endif /* MTK_ETH_DBG_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c
new file mode 100755
index 0000000..ef11cf3
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for configuring path from GMAC/GDM to target PHY
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+struct mtk_eth_muxc {
+ const char *name;
+ int cap_bit;
+ int (*set_path)(struct mtk_eth *eth, int path);
+};
+
+static const char *mtk_eth_path_name(int path)
+{
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_RGMII:
+ return "gmac1_rgmii";
+ case MTK_ETH_PATH_GMAC1_TRGMII:
+ return "gmac1_trgmii";
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ return "gmac1_sgmii";
+ case MTK_ETH_PATH_GMAC2_RGMII:
+ return "gmac2_rgmii";
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ return "gmac2_sgmii";
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ return "gmac2_gephy";
+ case MTK_ETH_PATH_GDM1_ESW:
+ return "gdm1_esw";
+ default:
+ return "unknown path";
+ }
+}
+
+static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
+{
+ bool updated = true;
+ u32 val, mask, set;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = 0;
+ break;
+ case MTK_ETH_PATH_GDM1_ESW:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = MTK_MUX_TO_ESW;
+ break;
+ default:
+ updated = false;
+ break;
+ };
+
+ if (updated) {
+ val = mtk_r32(eth, MTK_MAC_MISC);
+ val = (val & mask) | set;
+ mtk_w32(eth, val, MTK_MAC_MISC);
+ }
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val = ~(u32)GEPHY_MAC_SEL;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val = CO_QPHY_SEL;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val = SYSCFG0_SGMII_GMAC1;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val = SYSCFG0_SGMII_GMAC2;
+ break;
+ case MTK_ETH_PATH_GMAC1_RGMII:
+ case MTK_ETH_PATH_GMAC2_RGMII:
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= SYSCFG0_SGMII_MASK;
+
+ if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
+ (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
+ val = 0;
+ else
+ updated = false;
+ break;
+ default:
+ updated = false;
+ break;
+ };
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val |= SYSCFG0_SGMII_GMAC1_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val |= SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ default:
+ updated = false;
+ };
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static const struct mtk_eth_muxc mtk_eth_muxc[] = {
+ {
+ .name = "mux_gdm1_to_gmac1_esw",
+ .cap_bit = MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
+ .set_path = set_mux_gdm1_to_gmac1_esw,
+ }, {
+ .name = "mux_gmac2_gmac0_to_gephy",
+ .cap_bit = MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
+ .set_path = set_mux_gmac2_gmac0_to_gephy,
+ }, {
+ .name = "mux_u3_gmac2_to_qphy",
+ .cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
+ .set_path = set_mux_u3_gmac2_to_qphy,
+ }, {
+ .name = "mux_gmac1_gmac2_to_sgmii_rgmii",
+ .cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
+ .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
+ }, {
+ .name = "mux_gmac12_to_gephy_sgmii",
+ .cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
+ .set_path = set_mux_gmac12_to_gephy_sgmii,
+ },
+};
+
+static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
+{
+ int i, err = 0;
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, path)) {
+ dev_err(eth->dev, "path %s isn't support on the SoC\n",
+ mtk_eth_path_name(path));
+ return -EINVAL;
+ }
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
+ return 0;
+
+ /* Setup MUX in path fabric */
+ for (i = 0; i < ARRAY_SIZE(mtk_eth_muxc); i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, mtk_eth_muxc[i].cap_bit)) {
+ err = mtk_eth_muxc[i].set_path(eth, path);
+ if (err)
+ goto out;
+ } else {
+ dev_dbg(eth->dev, "mux %s isn't present on the SoC\n",
+ mtk_eth_muxc[i].name);
+ }
+ }
+
+out:
+ return err;
+}
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
+ MTK_ETH_PATH_GMAC2_SGMII;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path = 0;
+
+ if (mac_id == 1)
+ path = MTK_ETH_PATH_GMAC2_GEPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
+ MTK_ETH_PATH_GMAC2_RGMII;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
new file mode 100755
index 0000000..5aa0bc0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -0,0 +1,3465 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/if_vlan.h>
+#include <linux/reset.h>
+#include <linux/tcp.h>
+#include <linux/interrupt.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/phylink.h>
+#include <net/dsa.h>
+
+#include "mtk_eth_soc.h"
+#include "mtk_eth_dbg.h"
+
+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+#include "mtk_hnat/nf_hnat_mtk.h"
+#endif
+
+static int mtk_msg_level = -1;
+module_param_named(msg_level, mtk_msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
+
+#define MTK_ETHTOOL_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+
+/* strings used by ethtool */
+static const struct mtk_ethtool_stats {
+ char str[ETH_GSTRING_LEN];
+ u32 offset;
+} mtk_ethtool_stats[] = {
+ MTK_ETHTOOL_STAT(tx_bytes),
+ MTK_ETHTOOL_STAT(tx_packets),
+ MTK_ETHTOOL_STAT(tx_skip),
+ MTK_ETHTOOL_STAT(tx_collisions),
+ MTK_ETHTOOL_STAT(rx_bytes),
+ MTK_ETHTOOL_STAT(rx_packets),
+ MTK_ETHTOOL_STAT(rx_overflow),
+ MTK_ETHTOOL_STAT(rx_fcs_errors),
+ MTK_ETHTOOL_STAT(rx_short_errors),
+ MTK_ETHTOOL_STAT(rx_long_errors),
+ MTK_ETHTOOL_STAT(rx_checksum_errors),
+ MTK_ETHTOOL_STAT(rx_flow_control_packets),
+};
+
+static const char * const mtk_clks_source_name[] = {
+ "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
+ "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
+ "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
+ "sgmii_ck", "eth2pll", "wocpu0","wocpu1",
+};
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+{
+ __raw_writel(val, eth->base + reg);
+}
+
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
+{
+ return __raw_readl(eth->base + reg);
+}
+
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+{
+ u32 val;
+
+ val = mtk_r32(eth, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(eth, val, reg);
+ return reg;
+}
+
+static int mtk_mdio_busy_wait(struct mtk_eth *eth)
+{
+ unsigned long t_start = jiffies;
+
+ while (1) {
+ if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
+ return 0;
+ if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
+ break;
+ usleep_range(10, 20);
+ }
+
+ dev_err(eth->dev, "mdio: MDIO timeout\n");
+ return -1;
+}
+
+u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
+ u32 phy_register, u32 write_data)
+{
+ if (mtk_mdio_busy_wait(eth))
+ return -1;
+
+ write_data &= 0xffff;
+
+ mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
+ (phy_register << PHY_IAC_REG_SHIFT) |
+ (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
+ MTK_PHY_IAC);
+
+ if (mtk_mdio_busy_wait(eth))
+ return -1;
+
+ return 0;
+}
+
+u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+{
+ u32 d;
+
+ if (mtk_mdio_busy_wait(eth))
+ return 0xffff;
+
+ mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
+ (phy_reg << PHY_IAC_REG_SHIFT) |
+ (phy_addr << PHY_IAC_ADDR_SHIFT),
+ MTK_PHY_IAC);
+
+ if (mtk_mdio_busy_wait(eth))
+ return 0xffff;
+
+ d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
+
+ return d;
+}
+
+static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
+ int phy_reg, u16 val)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
+}
+
+static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read(eth, phy_addr, phy_reg);
+}
+
+u32 mtk_cl45_ind_read(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 *data)
+{
+ mutex_lock(ð->mii_bus->mdio_lock);
+
+ _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
+ _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
+ _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
+ *data = _mtk_mdio_read(eth, port, MII_MMD_ADDR_DATA_REG);
+
+ mutex_unlock(ð->mii_bus->mdio_lock);
+
+ return 0;
+}
+
+u32 mtk_cl45_ind_write(struct mtk_eth *eth, u32 port, u32 devad, u32 reg, u32 data)
+{
+ mutex_lock(ð->mii_bus->mdio_lock);
+
+ _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad);
+ _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg);
+ _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
+ _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, data);
+
+ mutex_unlock(ð->mii_bus->mdio_lock);
+
+ return 0;
+}
+
+static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ u32 val;
+
+ /* Check DDR memory type.
+ * Currently TRGMII mode with DDR2 memory is not supported.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
+ if (interface == PHY_INTERFACE_MODE_TRGMII &&
+ val & SYSCFG_DRAM_TYPE_DDR2) {
+ dev_err(eth->dev,
+ "TRGMII mode with DDR2 memory is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
+ ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_MT7621_MASK, val);
+
+ return 0;
+}
+
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface, int speed)
+{
+ u32 val;
+ int ret;
+
+ if (interface == PHY_INTERFACE_MODE_TRGMII) {
+ mtk_w32(eth, TRGMII_MODE, INTF_MODE);
+ val = 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+ return;
+ }
+
+ val = (speed == SPEED_1000) ?
+ INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
+ mtk_w32(eth, val, INTF_MODE);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_CLK_SEL362_5,
+ ETHSYS_TRGMII_CLK_SEL362_5);
+
+ val = (speed == SPEED_1000) ? 250000000 : 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+
+ val = (speed == SPEED_1000) ?
+ RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_RCK_CTRL);
+
+ val = (speed == SPEED_1000) ?
+ TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_TCK_CTRL);
+}
+
+static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new, sid, i;
+ int val, ge_mode, err;
+
+ /* MT76x8 has no hardware settings between for the MAC */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ mac->interface != state->interface) {
+ /* Setup soc pin functions */
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ if (mac->id)
+ goto err_phy;
+ if (!MTK_HAS_CAPS(mac->hw->soc->caps,
+ MTK_GMAC1_TRGMII))
+ goto err_phy;
+ /* fall through */
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_RMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
+ err = mtk_gmac_rgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ err = mtk_gmac_sgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
+ err = mtk_gmac_gephy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ default:
+ goto err_phy;
+ }
+
+ /* Setup clock for 1st gmac */
+ if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps,
+ MTK_TRGMII_MT7621_CLK)) {
+ if (mt7621_gmac0_rgmii_adjust(mac->hw,
+ state->interface))
+ goto err_phy;
+ } else {
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->interface,
+ state->speed);
+
+ /* mt7623_pad_clk_setup */
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ mtk_w32(mac->hw,
+ TD_DM_DRVP(8) | TD_DM_DRVN(8),
+ TRGMII_TD_ODT(i));
+
+ /* Assert/release MT7623 RXC reset */
+ mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
+ TRGMII_RCK_CTRL);
+ mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
+ }
+ }
+
+ ge_mode = 0;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ ge_mode = 1;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ ge_mode = 2;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (mac->id)
+ goto err_phy;
+ ge_mode = 3;
+ break;
+ default:
+ break;
+ }
+
+ /* put the gmac into the right mode */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
+ val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+
+ mac->interface = state->interface;
+ }
+
+ /* SGMII */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface)) {
+ /* The path GMAC to SGMII will be enabled once the SGMIISYS is
+ * being setup done.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK,
+ ~(u32)SYSCFG0_SGMII_MASK);
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac->id;
+
+ /* Setup SGMIISYS with the determined property */
+ if (state->interface != PHY_INTERFACE_MODE_SGMII)
+ err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
+ state);
+ else if (phylink_autoneg_inband(mode))
+ err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
+
+ if (err)
+ goto init_err;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+ } else if (phylink_autoneg_inband(mode)) {
+ dev_err(eth->dev,
+ "In-band mode not supported in non SGMII mode!\n");
+ return;
+ }
+
+ /* Setup gmac */
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
+ MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
+ MAC_MCR_FORCE_RX_FC);
+ mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+
+ switch (state->speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ mcr_new |= MAC_MCR_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr_new |= MAC_MCR_SPEED_100;
+ break;
+ }
+ if (state->duplex == DUPLEX_FULL) {
+ mcr_new |= MAC_MCR_FORCE_DPX;
+ if (state->pause & MLO_PAUSE_TX)
+ mcr_new |= MAC_MCR_FORCE_TX_FC;
+ if (state->pause & MLO_PAUSE_RX)
+ mcr_new |= MAC_MCR_FORCE_RX_FC;
+ }
+
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+
+ return;
+
+err_phy:
+ dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
+ mac->id, phy_modes(state->interface));
+ return;
+
+init_err:
+ dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
+ mac->id, phy_modes(state->interface), err);
+}
+
+static int mtk_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
+
+ state->link = (pmsr & MAC_MSR_LINK);
+ state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
+
+ switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case MAC_MSR_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MAC_MSR_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
+ if (pmsr & MAC_MSR_RX_FC)
+ state->pause |= MLO_PAUSE_RX;
+ if (pmsr & MAC_MSR_TX_FC)
+ state->pause |= MLO_PAUSE_TX;
+
+ return 1;
+}
+
+static void mtk_mac_an_restart(struct phylink_config *config)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+
+ mtk_sgmii_restart_an(mac->hw, mac->id);
+}
+
+static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
+
+static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phy)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
+
+static void mtk_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
+ phy_interface_mode_is_rgmii(state->interface)) &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
+ !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
+ (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface)))) {
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 1000baseT_Half);
+ /* fall through */
+ case PHY_INTERFACE_MODE_SGMII:
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ /* fall through */
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_NA:
+ default:
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ break;
+ }
+
+ if (state->interface == PHY_INTERFACE_MODE_NA) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+ }
+
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
+}
+
+static const struct phylink_mac_ops mtk_phylink_ops = {
+ .validate = mtk_validate,
+ .mac_link_state = mtk_mac_link_state,
+ .mac_an_restart = mtk_mac_an_restart,
+ .mac_config = mtk_mac_config,
+ .mac_link_down = mtk_mac_link_down,
+ .mac_link_up = mtk_mac_link_up,
+};
+
+static int mtk_mdio_init(struct mtk_eth *eth)
+{
+ struct device_node *mii_np;
+ int ret;
+
+ mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ if (!mii_np) {
+ dev_err(eth->dev, "no %s child node found", "mdio-bus");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(mii_np)) {
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ eth->mii_bus = devm_mdiobus_alloc(eth->dev);
+ if (!eth->mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ eth->mii_bus->name = "mdio";
+ eth->mii_bus->read = mtk_mdio_read;
+ eth->mii_bus->write = mtk_mdio_write;
+ eth->mii_bus->priv = eth;
+ eth->mii_bus->parent = eth->dev;
+
+ snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
+ ret = of_mdiobus_register(eth->mii_bus, mii_np);
+
+err_put_node:
+ of_node_put(mii_np);
+ return ret;
+}
+
+static void mtk_mdio_cleanup(struct mtk_eth *eth)
+{
+ if (!eth->mii_bus)
+ return;
+
+ mdiobus_unregister(eth->mii_bus);
+}
+
+static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->tx_irq_lock, flags);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
+ spin_unlock_irqrestore(ð->tx_irq_lock, flags);
+}
+
+static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->tx_irq_lock, flags);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
+ spin_unlock_irqrestore(ð->tx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(ð->rx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(ð->rx_irq_lock, flags);
+}
+
+static int mtk_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret = eth_mac_addr(dev, p);
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ const char *macaddr = dev->dev_addr;
+
+ if (ret)
+ return ret;
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ spin_lock_bh(&mac->hw->page_lock);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MT7628_SDM_MAC_ADRH);
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MT7628_SDM_MAC_ADRL);
+ } else {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MTK_GDMA_MAC_ADRH(mac->id));
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MTK_GDMA_MAC_ADRL(mac->id));
+ }
+ spin_unlock_bh(&mac->hw->page_lock);
+
+ return 0;
+}
+
+void mtk_stats_update_mac(struct mtk_mac *mac)
+{
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ unsigned int base = MTK_GDM1_TX_GBCNT;
+ u64 stats;
+
+ base += hw_stats->reg_offset;
+
+ u64_stats_update_begin(&hw_stats->syncp);
+
+ hw_stats->rx_bytes += mtk_r32(mac->hw, base);
+ stats = mtk_r32(mac->hw, base + 0x04);
+ if (stats)
+ hw_stats->rx_bytes += (stats << 32);
+ hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
+ hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
+ hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
+ hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
+ hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
+ hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
+ hw_stats->rx_flow_control_packets +=
+ mtk_r32(mac->hw, base + 0x24);
+ hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
+ hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
+ hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
+ stats = mtk_r32(mac->hw, base + 0x34);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+ u64_stats_update_end(&hw_stats->syncp);
+}
+
+static void mtk_stats_update(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->mac[i] || !eth->mac[i]->hw_stats)
+ continue;
+ if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
+ mtk_stats_update_mac(eth->mac[i]);
+ spin_unlock(ð->mac[i]->hw_stats->stats_lock);
+ }
+ }
+}
+
+static void mtk_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ unsigned int start;
+
+ if (netif_running(dev) && netif_device_present(dev)) {
+ if (spin_trylock_bh(&hw_stats->stats_lock)) {
+ mtk_stats_update_mac(mac);
+ spin_unlock_bh(&hw_stats->stats_lock);
+ }
+ }
+
+ do {
+ start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
+ storage->rx_packets = hw_stats->rx_packets;
+ storage->tx_packets = hw_stats->tx_packets;
+ storage->rx_bytes = hw_stats->rx_bytes;
+ storage->tx_bytes = hw_stats->tx_bytes;
+ storage->collisions = hw_stats->tx_collisions;
+ storage->rx_length_errors = hw_stats->rx_short_errors +
+ hw_stats->rx_long_errors;
+ storage->rx_over_errors = hw_stats->rx_overflow;
+ storage->rx_crc_errors = hw_stats->rx_fcs_errors;
+ storage->rx_errors = hw_stats->rx_checksum_errors;
+ storage->tx_aborted_errors = hw_stats->tx_skip;
+ } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
+
+ storage->tx_errors = dev->stats.tx_errors;
+ storage->rx_dropped = dev->stats.rx_dropped;
+ storage->tx_dropped = dev->stats.tx_dropped;
+}
+
+static inline int mtk_max_frag_size(int mtu)
+{
+ /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
+ if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
+ mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
+ return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+static inline int mtk_max_buf_size(int frag_size)
+{
+ int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
+
+ return buf_size;
+}
+
+static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
+ struct mtk_rx_dma *dma_rxd)
+{
+ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
+ rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+#endif
+}
+
+/* the qdma core needs scratch memory to be setup */
+static int mtk_init_fq_dma(struct mtk_eth *eth)
+{
+ dma_addr_t phy_ring_tail;
+ int cnt = MTK_DMA_SIZE;
+ dma_addr_t dma_addr;
+ int i;
+
+ if (!eth->soc->has_sram) {
+ eth->scratch_ring = dma_alloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ ð->phy_scratch_ring,
+ GFP_ATOMIC);
+ } else {
+ eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET;
+ }
+
+ if (unlikely(!eth->scratch_ring))
+ return -ENOMEM;
+
+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
+ GFP_KERNEL);
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dev,
+ eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ return -ENOMEM;
+
+ phy_ring_tail = eth->phy_scratch_ring +
+ (sizeof(struct mtk_tx_dma) * (cnt - 1));
+
+ for (i = 0; i < cnt; i++) {
+ eth->scratch_ring[i].txd1 =
+ (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
+ if (i < cnt - 1)
+ eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
+ ((i + 1) * sizeof(struct mtk_tx_dma)));
+ eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
+
+ eth->scratch_ring[i].txd4 = 0;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ if (eth->soc->has_sram && ((sizeof(struct mtk_tx_dma)) > 16)) {
+ eth->scratch_ring[i].txd5 = 0;
+ eth->scratch_ring[i].txd6 = 0;
+ eth->scratch_ring[i].txd7 = 0;
+ eth->scratch_ring[i].txd8 = 0;
+ }
+#endif
+ }
+
+ mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
+ mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
+ mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
+
+ return 0;
+}
+
+static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
+{
+ void *ret = ring->dma;
+
+ return ret + (desc - ring->phys);
+}
+
+static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+ struct mtk_tx_dma *txd)
+{
+ int idx = txd - ring->dma;
+
+ return &ring->buf[idx];
+}
+
+static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
+ struct mtk_tx_dma *dma)
+{
+ return ring->dma_pdma - ring->dma + dma;
+}
+
+static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+{
+ return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+}
+
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+ dma_unmap_single(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+ } else {
+ if (dma_unmap_len(tx_buf, dma_len0)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_unmap_len(tx_buf, dma_len1)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr1),
+ dma_unmap_len(tx_buf, dma_len1),
+ DMA_TO_DEVICE);
+ }
+ }
+
+ tx_buf->flags = 0;
+ if (tx_buf->skb &&
+ (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+}
+
+static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+ struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
+ size_t size, int idx)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ } else {
+ if (idx & 1) {
+ txd->txd3 = mapped_addr;
+ txd->txd2 |= TX_DMA_PLEN1(size);
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, size);
+ } else {
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ txd->txd1 = mapped_addr;
+ txd->txd2 = TX_DMA_PLEN0(size);
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ }
+ }
+}
+
+static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+ int tx_num, struct mtk_tx_ring *ring, bool gso)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_dma *itxd, *txd;
+ struct mtk_tx_dma *itxd_pdma, *txd_pdma;
+ struct mtk_tx_buf *itx_buf, *tx_buf;
+ dma_addr_t mapped_addr;
+ unsigned int nr_frags;
+ int i, n_desc = 1;
+ u32 txd4 = 0, fport;
+ u32 qid = 0;
+ int k = 0;
+
+ itxd = ring->next_free;
+ itxd_pdma = qdma_to_pdma(ring, itxd);
+ if (itxd == ring->last_free)
+ return -ENOMEM;
+
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ memset(itx_buf, 0, sizeof(*itx_buf));
+
+ mapped_addr = dma_map_single(eth->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ return -ENOMEM;
+
+ WRITE_ONCE(itxd->txd1, mapped_addr);
+ itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+ MTK_TX_FLAGS_FPORT1;
+ setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+ k++);
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ qid = skb->mark & (MTK_QDMA_TX_MASK);
+#endif
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ u32 txd5 = 0, txd6 = 0;
+ /* set the forward port */
+ fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2;
+ txd4 |= fport;
+
+ if (gso)
+ txd5 |= TX_DMA_TSO_V2;
+
+ /* TX Checksum offload */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ txd5 |= TX_DMA_CHKSUM_V2;
+
+ /* VLAN header offload */
+ if (skb_vlan_tag_present(skb))
+ txd6 |= TX_DMA_INS_VLAN_V2 | skb_vlan_tag_get(skb);
+
+ txd4 = txd4 | TX_DMA_SWC_V2;
+
+ WRITE_ONCE(itxd->txd3, (TX_DMA_PLEN0(skb_headlen(skb)) |
+ (!nr_frags * TX_DMA_LS0)));
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ WRITE_ONCE(itxd->txd5, txd5);
+ WRITE_ONCE(itxd->txd6, txd6);
+#endif
+ } else {
+ /* set the forward port */
+ fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ txd4 |= fport;
+
+ if (gso)
+ txd4 |= TX_DMA_TSO;
+
+ /* TX Checksum offload */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ txd4 |= TX_DMA_CHKSUM;
+
+ /* VLAN header offload */
+ if (skb_vlan_tag_present(skb))
+ txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
+
+ WRITE_ONCE(itxd->txd3,
+ TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
+ (!nr_frags * TX_DMA_LS0) | QID_LOW_BITS(qid));
+ }
+ /* TX SG offload */
+ txd = itxd;
+ txd_pdma = qdma_to_pdma(ring, txd);
+
+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+ if (HNAT_SKB_CB2(skb)->magic == 0x78681415) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ txd4 &= ~(0xf << TX_DMA_FPORT_SHIFT_V2);
+ txd4 |= 0x4 << TX_DMA_FPORT_SHIFT_V2;
+ } else {
+ txd4 &= ~(0x7 << TX_DMA_FPORT_SHIFT);
+ txd4 |= 0x4 << TX_DMA_FPORT_SHIFT;
+ }
+ }
+
+ trace_printk("[%s] nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
+ __func__, nr_frags, HNAT_SKB_CB2(skb)->magic, txd4);
+#endif
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ unsigned int offset = 0;
+ int frag_size = skb_frag_size(frag);
+
+ while (frag_size) {
+ bool last_frag = false;
+ unsigned int frag_map_size;
+ bool new_desc = true;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+ (i & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto err_dma;
+
+ n_desc++;
+ } else {
+ new_desc = false;
+ }
+
+
+ frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
+ mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
+ frag_map_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ goto err_dma;
+
+ if (i == nr_frags - 1 &&
+ (frag_size - frag_map_size) == 0)
+ last_frag = true;
+
+ WRITE_ONCE(txd->txd1, mapped_addr);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2)) {
+ WRITE_ONCE(txd->txd3, (TX_DMA_PLEN0(frag_map_size) |
+ last_frag * TX_DMA_LS0));
+ WRITE_ONCE(txd->txd4, fport | TX_DMA_SWC_V2 |
+ QID_BITS_V2(qid));
+ } else {
+ WRITE_ONCE(txd->txd3,
+ (TX_DMA_SWC | QID_LOW_BITS(qid) |
+ TX_DMA_PLEN0(frag_map_size) |
+ last_frag * TX_DMA_LS0));
+ WRITE_ONCE(txd->txd4,
+ fport | QID_HIGH_BITS(qid));
+ }
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+ tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+ MTK_TX_FLAGS_FPORT1;
+
+ setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
+ frag_map_size, k++);
+
+ frag_size -= frag_map_size;
+ offset += frag_map_size;
+ }
+ }
+
+ /* store skb to cleanup */
+ itx_buf->skb = skb;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2))
+ WRITE_ONCE(itxd->txd4, txd4 | QID_BITS_V2(qid));
+ else
+ WRITE_ONCE(itxd->txd4, txd4 | QID_HIGH_BITS(qid));
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (k & 0x1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ netdev_sent_queue(dev, skb->len);
+ skb_tx_timestamp(skb);
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+ !netdev_xmit_more())
+ mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ } else {
+ int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
+ ring->dma_size);
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ }
+
+ return 0;
+
+err_dma:
+ do {
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+
+ /* unmap dma */
+ mtk_tx_unmap(eth, tx_buf);
+
+ itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
+ itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
+ itxd_pdma = qdma_to_pdma(ring, itxd);
+ } while (itxd != txd);
+
+ return -ENOMEM;
+}
+
+static inline int mtk_cal_txd_req(struct sk_buff *skb)
+{
+ int i, nfrags;
+ skb_frag_t *frag;
+
+ nfrags = 1;
+ if (skb_is_gso(skb)) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+ MTK_TX_DMA_BUF_LEN);
+ }
+ } else {
+ nfrags += skb_shinfo(skb)->nr_frags;
+ }
+
+ return nfrags;
+}
+
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ if (netif_queue_stopped(eth->netdev[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_wake_queue(eth->netdev[i]);
+ }
+}
+
+static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ struct net_device_stats *stats = &dev->stats;
+ bool gso = false;
+ int tx_num;
+
+ /* normally we can rely on the stack not calling this more than once,
+ * however we have 2 queues running on the same ring so we need to lock
+ * the ring access
+ */
+ spin_lock(ð->page_lock);
+
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+ goto drop;
+
+ tx_num = mtk_cal_txd_req(skb);
+ if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
+ netif_stop_queue(dev);
+ netif_err(eth, tx_queued, dev,
+ "Tx Ring full when queue awake!\n");
+ spin_unlock(ð->page_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* TSO: fill MSS info in tcp checksum field */
+ if (skb_is_gso(skb)) {
+ if (skb_cow_head(skb, 0)) {
+ netif_warn(eth, tx_err, dev,
+ "GSO expand head fail.\n");
+ goto drop;
+ }
+
+ if (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ gso = true;
+ tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
+ }
+ }
+
+ if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
+ goto drop;
+
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
+ netif_stop_queue(dev);
+
+ spin_unlock(ð->page_lock);
+
+ return NETDEV_TX_OK;
+
+drop:
+ spin_unlock(ð->page_lock);
+ stats->tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
+{
+ int i;
+ struct mtk_rx_ring *ring;
+ int idx;
+
+ if (!eth->hwlro)
+ return ð->rx_ring[0];
+
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = ð->rx_ring[i];
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+ if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+ }
+ }
+
+ return NULL;
+}
+
+static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring;
+ int i;
+
+ if (!eth->hwlro) {
+ ring = ð->rx_ring[0];
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ } else {
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = ð->rx_ring[i];
+ if (ring->calc_idx_update) {
+ ring->calc_idx_update = false;
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ }
+ }
+ }
+}
+
+static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring;
+ int idx;
+ struct sk_buff *skb;
+ u8 *data, *new_data;
+ struct mtk_rx_dma *rxd, trxd;
+ int done = 0;
+
+ while (done < budget) {
+ struct net_device *netdev;
+ unsigned int pktlen;
+ dma_addr_t dma_addr;
+ int mac;
+
+ ring = mtk_get_rx_ring(eth);
+ if (unlikely(!ring))
+ goto rx_done;
+
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+ rxd = &ring->dma[idx];
+ data = ring->data[idx];
+
+ mtk_rx_get_desc(&trxd, rxd);
+ if (!(trxd.rxd2 & RX_DMA_DONE))
+ break;
+
+ /* find out which mac the packet come from. values start at 1 */
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mac = 0;
+ } else {
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+ mac = RX_DMA_GET_SPORT(trxd.rxd5) - 1;
+ else
+#endif
+ mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ?
+ 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
+ }
+
+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+ !eth->netdev[mac]))
+ goto release_desc;
+
+ netdev = eth->netdev[mac];
+
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+ goto release_desc;
+
+ /* alloc new buffer */
+ new_data = napi_alloc_frag(ring->frag_size);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+ dma_addr = dma_map_single(eth->dev,
+ new_data + NET_SKB_PAD +
+ eth->ip_align,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ /* receive data */
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ dma_unmap_single(eth->dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+ skb->dev = netdev;
+ skb_put(skb, pktlen);
+
+ if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) &&
+ (trxd.rxd4 & eth->rx_dma_l4_valid)) ||
+ (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2) &&
+ (trxd.rxd3 & eth->rx_dma_l4_valid)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
+ if (trxd.rxd4 & RX_DMA_VTAG_V2)
+ __vlan_hwaccel_put_tag(skb,
+ htons(RX_DMA_VPID_V2(trxd.rxd3,
+ trxd.rxd4)),
+ RX_DMA_VID_V2(trxd.rxd4));
+ } else {
+ if (trxd.rxd2 & RX_DMA_VTAG)
+ __vlan_hwaccel_put_tag(skb,
+ htons(RX_DMA_VPID(trxd.rxd3)),
+ RX_DMA_VID(trxd.rxd3));
+ }
+
+ /* If netdev is attached to dsa switch, the special
+ * tag inserted in VLAN field by switch hardware can
+ * be offload by RX HW VLAN offload. Clears the VLAN
+ * information from @skb to avoid unexpected 8021d
+ * handler before packet enter dsa framework.
+ */
+ if (netdev_uses_dsa(netdev))
+ __vlan_hwaccel_clear_tag(skb);
+ }
+
+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+ *(u32 *)(skb->head) = trxd.rxd5;
+ else
+#endif
+ *(u32 *)(skb->head) = trxd.rxd4;
+
+ skb_hnat_alg(skb) = 0;
+ skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+
+ if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
+ trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
+ __func__, skb_hnat_reason(skb));
+ skb->pkt_type = PACKET_HOST;
+ }
+
+ trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n",
+ __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
+ skb_hnat_reason(skb), skb_hnat_alg(skb));
+#endif
+
+ skb_record_rx_queue(skb, 0);
+ napi_gro_receive(napi, skb);
+
+ ring->data[idx] = new_data;
+ rxd->rxd1 = (unsigned int)dma_addr;
+
+release_desc:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ rxd->rxd2 = RX_DMA_LSO;
+ else
+ rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+
+ ring->calc_idx = idx;
+
+ done++;
+ }
+
+rx_done:
+ if (done) {
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+ mtk_update_rx_cpu_idx(eth);
+ }
+
+ return done;
+}
+
+static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ struct mtk_tx_dma *desc;
+ struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
+ u32 cpu, dma;
+
+ cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
+ dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+
+ desc = mtk_qdma_phys_to_virt(ring, cpu);
+
+ while ((cpu != dma) && budget) {
+ u32 next_cpu = desc->txd2;
+ int mac = 0;
+
+ if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
+ break;
+
+ desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
+
+ tx_buf = mtk_desc_to_tx_buf(ring, desc);
+ if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
+ mac = 1;
+
+ skb = tx_buf->skb;
+ if (!skb)
+ break;
+
+ if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ bytes[mac] += skb->len;
+ done[mac]++;
+ budget--;
+ }
+ mtk_tx_unmap(eth, tx_buf);
+
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = next_cpu;
+ }
+
+ mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+
+ return budget;
+}
+
+static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ struct mtk_tx_dma *desc;
+ struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
+ u32 cpu, dma;
+
+ cpu = ring->cpu_idx;
+ dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+
+ while ((cpu != dma) && budget) {
+ tx_buf = &ring->buf[cpu];
+ skb = tx_buf->skb;
+ if (!skb)
+ break;
+
+ if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ bytes[0] += skb->len;
+ done[0]++;
+ budget--;
+ }
+
+ mtk_tx_unmap(eth, tx_buf);
+
+ desc = &ring->dma[cpu];
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
+ }
+
+ ring->cpu_idx = cpu;
+
+ return budget;
+}
+
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ unsigned int done[MTK_MAX_DEVS];
+ unsigned int bytes[MTK_MAX_DEVS];
+ int total = 0, i;
+
+ memset(done, 0, sizeof(done));
+ memset(bytes, 0, sizeof(bytes));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
+ else
+ budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i] || !done[i])
+ continue;
+ netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
+ total += done[i];
+ }
+
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
+ mtk_wake_queue(eth);
+
+ return total;
+}
+
+static void mtk_handle_status_irq(struct mtk_eth *eth)
+{
+ u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
+
+ if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
+ mtk_stats_update(eth);
+ mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
+ MTK_INT_STATUS2);
+ }
+}
+
+static int mtk_napi_tx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ u32 status, mask;
+ int tx_done = 0;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
+ tx_done = mtk_poll_tx(eth, budget);
+
+ if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, eth->tx_int_status_reg);
+ mask = mtk_r32(eth, eth->tx_int_mask_reg);
+ dev_info(eth->dev,
+ "done tx %d, intr 0x%08x/0x%x\n",
+ tx_done, status, mask);
+ }
+
+ if (tx_done == budget)
+ return budget;
+
+ status = mtk_r32(eth, eth->tx_int_status_reg);
+ if (status & MTK_TX_DONE_INT)
+ return budget;
+
+ napi_complete(napi);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+
+ return tx_done;
+}
+
+static int mtk_napi_rx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ u32 status, mask;
+ int rx_done = 0;
+ int remain_budget = budget;
+
+ mtk_handle_status_irq(eth);
+
+poll_again:
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ rx_done = mtk_poll_rx(napi, remain_budget, eth);
+
+ if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+ mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ dev_info(eth->dev,
+ "done rx %d, intr 0x%08x/0x%x\n",
+ rx_done, status, mask);
+ }
+ if (rx_done == remain_budget)
+ return budget;
+
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+ if (status & MTK_RX_DONE_INT) {
+ remain_budget -= rx_done;
+ goto poll_again;
+ }
+ napi_complete(napi);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+
+ return rx_done + budget - remain_budget;
+}
+
+static int mtk_tx_alloc(struct mtk_eth *eth)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ int i, sz = sizeof(*ring->dma);
+
+ ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
+ GFP_KERNEL);
+ if (!ring->buf)
+ goto no_tx_mem;
+
+ if (!eth->soc->has_sram)
+ ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_ATOMIC);
+ else {
+ ring->dma = eth->scratch_ring + MTK_DMA_SIZE;
+ ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz;
+ }
+
+ if (!ring->dma)
+ goto no_tx_mem;
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ int next = (i + 1) % MTK_DMA_SIZE;
+ u32 next_ptr = ring->phys + next * sz;
+
+ ring->dma[i].txd2 = next_ptr;
+ ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ ring->dma[i].txd4 = 0;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ if (eth->soc->has_sram && ( sz > 16)) {
+ ring->dma[i].txd5 = 0;
+ ring->dma[i].txd6 = 0;
+ ring->dma[i].txd7 = 0;
+ ring->dma[i].txd8 = 0;
+ }
+#endif
+ }
+
+ /* On MT7688 (PDMA only) this driver uses the ring->dma structs
+ * only as the framework. The real HW descriptors are the PDMA
+ * descriptors in ring->dma_pdma.
+ */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys_pdma,
+ GFP_ATOMIC);
+ if (!ring->dma_pdma)
+ goto no_tx_mem;
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
+ ring->dma_pdma[i].txd4 = 0;
+ }
+ }
+
+ ring->dma_size = MTK_DMA_SIZE;
+ atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
+ ring->next_free = &ring->dma[0];
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->thresh = MAX_SKB_FRAGS;
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
+ mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_CRX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_DRX_PTR);
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
+ MTK_QTX_CFG(0));
+ } else {
+ mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
+ mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
+ mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+ }
+
+ return 0;
+
+no_tx_mem:
+ return -ENOMEM;
+}
+
+static void mtk_tx_clean(struct mtk_eth *eth)
+{
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ int i;
+
+ if (ring->buf) {
+ for (i = 0; i < MTK_DMA_SIZE; i++)
+ mtk_tx_unmap(eth, &ring->buf[i]);
+ kfree(ring->buf);
+ ring->buf = NULL;
+ }
+
+ if (!eth->soc->has_sram && ring->dma) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma),
+ ring->dma,
+ ring->phys);
+ ring->dma = NULL;
+ }
+
+ if (ring->dma_pdma) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
+ ring->dma_pdma,
+ ring->phys_pdma);
+ ring->dma_pdma = NULL;
+ }
+}
+
+static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+{
+ struct mtk_rx_ring *ring;
+ int rx_data_len, rx_dma_size;
+ int i;
+
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ if (ring_no)
+ return -EINVAL;
+ ring = ð->rx_ring_qdma;
+ } else {
+ ring = ð->rx_ring[ring_no];
+ }
+
+ if (rx_flag == MTK_RX_FLAGS_HWLRO) {
+ rx_data_len = MTK_MAX_LRO_RX_LENGTH;
+ rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ } else {
+ rx_data_len = ETH_DATA_LEN;
+ rx_dma_size = MTK_DMA_SIZE;
+ }
+
+ ring->frag_size = mtk_max_frag_size(rx_data_len);
+ ring->buf_size = mtk_max_buf_size(ring->frag_size);
+ ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
+ GFP_KERNEL);
+ if (!ring->data)
+ return -ENOMEM;
+
+ for (i = 0; i < rx_dma_size; i++) {
+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ if (!ring->data[i])
+ return -ENOMEM;
+ }
+
+ if ((!eth->soc->has_sram) || (eth->soc->has_sram
+ && (rx_flag != MTK_RX_FLAGS_NORMAL)))
+ ring->dma = dma_alloc_coherent(eth->dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
+ else {
+ struct mtk_tx_ring *tx_ring = ð->tx_ring;
+ ring->dma = (struct mtk_rx_dma *)(tx_ring->dma + MTK_DMA_SIZE);
+ ring->phys = tx_ring->phys + MTK_DMA_SIZE * sizeof(*tx_ring->dma);
+ }
+
+ if (!ring->dma)
+ return -ENOMEM;
+
+ for (i = 0; i < rx_dma_size; i++) {
+ dma_addr_t dma_addr = dma_map_single(eth->dev,
+ ring->data[i] + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ return -ENOMEM;
+ ring->dma[i].rxd1 = (unsigned int)dma_addr;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ ring->dma[i].rxd2 = RX_DMA_LSO;
+ else
+ ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+
+ ring->dma[i].rxd3 = 0;
+ ring->dma[i].rxd4 = 0;
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+ if (eth->soc->has_sram && ((sizeof(struct mtk_rx_dma)) > 16)) {
+ ring->dma[i].rxd5 = 0;
+ ring->dma[i].rxd6 = 0;
+ ring->dma[i].rxd7 = 0;
+ ring->dma[i].rxd8 = 0;
+ }
+#endif
+ }
+ ring->dma_size = rx_dma_size;
+ ring->calc_idx_update = false;
+ ring->calc_idx = rx_dma_size - 1;
+ ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ?
+ MTK_QRX_CRX_IDX_CFG(ring_no) :
+ MTK_PRX_CRX_IDX_CFG(ring_no);
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no));
+ mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no));
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX);
+ } else {
+ mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
+ mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
+ }
+
+ return 0;
+}
+
+static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram)
+{
+ int i;
+
+ if (ring->data && ring->dma) {
+ for (i = 0; i < ring->dma_size; i++) {
+ if (!ring->data[i])
+ continue;
+ if (!ring->dma[i].rxd1)
+ continue;
+ dma_unmap_single(eth->dev,
+ ring->dma[i].rxd1,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+ skb_free_frag(ring->data[i]);
+ }
+ kfree(ring->data);
+ ring->data = NULL;
+ }
+
+ if(in_sram)
+ return;
+
+ if (ring->dma) {
+ dma_free_coherent(eth->dev,
+ ring->dma_size * sizeof(*ring->dma),
+ ring->dma,
+ ring->phys);
+ ring->dma = NULL;
+ }
+}
+
+static int mtk_hwlro_rx_init(struct mtk_eth *eth)
+{
+ int i;
+ u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
+ u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
+
+ /* set LRO rings to auto-learn modes */
+ ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
+
+ /* validate LRO ring */
+ ring_ctrl_dw2 |= MTK_RING_VLD;
+
+ /* set AGE timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
+ ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
+
+ /* set max AGG timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
+
+ /* set max LRO AGG count */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
+ ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
+
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
+ }
+
+ /* IPv4 checksum update enable */
+ lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
+
+ /* switch priority comparison to packet count mode */
+ lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
+
+ /* bandwidth threshold setting */
+ mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
+
+ /* auto-learn score delta setting */
+ mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
+
+ /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
+ mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
+ MTK_PDMA_LRO_ALT_REFRESH_TIMER);
+
+ /* set HW LRO mode & the max aggregation count for rx packets */
+ lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
+
+ /* the minimal remaining room of SDL0 in RXD for lro aggregation */
+ lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
+
+ /* enable HW LRO */
+ lro_ctrl_dw0 |= MTK_LRO_EN;
+
+ mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
+ mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
+
+ return 0;
+}
+
+static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
+{
+ int i;
+ u32 val;
+
+ /* relinquish lro rings, flush aggregated packets */
+ mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
+
+ /* wait for relinquishments done */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
+ if (val & MTK_LRO_RING_RELINQUISH_DONE) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+
+ /* invalidate lro rings */
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
+
+ /* disable HW LRO */
+ mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
+}
+
+static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
+
+ /* validate the IP setting */
+ mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+}
+
+static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
+}
+
+static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i])
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int mtk_hwlro_add_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if ((fsp->flow_type != TCP_V4_FLOW) ||
+ (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
+ (fsp->location > 1))
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
+
+ return 0;
+}
+
+static int mtk_hwlro_del_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if (fsp->location > 1)
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+
+ return 0;
+}
+
+static void mtk_hwlro_netdev_disable(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int i, hwlro_idx;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ mac->hwlro_ip[i] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+ }
+
+ mac->hwlro_ip_cnt = 0;
+}
+
+static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ /* only tcp dst ipv4 is meaningful, others are meaningless */
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
+ fsp->m_u.tcp_ip4_spec.ip4dst = 0;
+
+ fsp->h_u.tcp_ip4_spec.ip4src = 0;
+ fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+ fsp->h_u.tcp_ip4_spec.psrc = 0;
+ fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+ fsp->h_u.tcp_ip4_spec.pdst = 0;
+ fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+ fsp->h_u.tcp_ip4_spec.tos = 0;
+ fsp->m_u.tcp_ip4_spec.tos = 0xff;
+
+ return 0;
+}
+
+static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i]) {
+ rule_locs[cnt] = i;
+ cnt++;
+ }
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static netdev_features_t mtk_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!(features & NETIF_F_LRO)) {
+ struct mtk_mac *mac = netdev_priv(dev);
+ int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ if (ip_cnt) {
+ netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
+
+ features |= NETIF_F_LRO;
+ }
+ }
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
+ netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
+
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+ }
+
+ return features;
+}
+
+static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int err = 0;
+
+ if (!((dev->features ^ features) & MTK_SET_FEATURES))
+ return 0;
+
+ if (!(features & NETIF_F_LRO))
+ mtk_hwlro_netdev_disable(dev);
+
+ if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
+ mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
+ else
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+ return err;
+}
+
+/* wait for DMA to finish whatever it is doing before we start using it again */
+static int mtk_dma_busy_wait(struct mtk_eth *eth)
+{
+ unsigned long t_start = jiffies;
+
+ while (1) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ } else {
+ if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ }
+
+ if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
+ break;
+ }
+
+ dev_err(eth->dev, "DMA init timeout\n");
+ return -1;
+}
+
+static int mtk_dma_init(struct mtk_eth *eth)
+{
+ int err;
+ u32 i;
+
+ if (mtk_dma_busy_wait(eth))
+ return -EBUSY;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* QDMA needs scratch memory for internal reordering of the
+ * descriptors
+ */
+ err = mtk_init_fq_dma(eth);
+ if (err)
+ return err;
+ }
+
+ err = mtk_tx_alloc(eth);
+ if (err)
+ return err;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
+ if (err)
+ return err;
+ }
+
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
+ if (err)
+ return err;
+
+ if (eth->hwlro) {
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
+ if (err)
+ return err;
+ }
+ err = mtk_hwlro_rx_init(eth);
+ if (err)
+ return err;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* Enable random early drop and set drop threshold
+ * automatically
+ */
+ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
+ FC_THRES_MIN, MTK_QDMA_FC_THRES);
+ mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ }
+
+ return 0;
+}
+
+static void mtk_dma_free(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++)
+ if (eth->netdev[i])
+ netdev_reset_queue(eth->netdev[i]);
+ if ( !eth->soc->has_sram && eth->scratch_ring) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring,
+ eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+ }
+ mtk_tx_clean(eth);
+ mtk_rx_clean(eth, ð->rx_ring[0],1);
+ mtk_rx_clean(eth, ð->rx_ring_qdma,0);
+
+ if (eth->hwlro) {
+ mtk_hwlro_rx_uninit(eth);
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_rx_clean(eth, ð->rx_ring[i],0);
+ }
+
+ kfree(eth->scratch_head);
+}
+
+static void mtk_tx_timeout(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ eth->netdev[mac->id]->stats.tx_errors++;
+ netif_err(eth, tx_err, dev,
+ "transmit timed out\n");
+ schedule_work(ð->pending_work);
+}
+
+static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (likely(napi_schedule_prep(ð->rx_napi))) {
+ __napi_schedule(ð->rx_napi);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (likely(napi_schedule_prep(ð->tx_napi))) {
+ __napi_schedule(ð->tx_napi);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
+ if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
+ mtk_handle_irq_rx(irq, _eth);
+ }
+ if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ mtk_handle_irq_tx(irq, _eth);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mtk_poll_controller(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+}
+#endif
+
+static int mtk_start_dma(struct mtk_eth *eth)
+{
+ u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ int err;
+
+ err = mtk_dma_init(eth);
+ if (err) {
+ mtk_dma_free(eth);
+ return err;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_TX_V2))
+ mtk_w32(eth,
+ MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
+ MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
+ MTK_RESV_BUF | MTK_WCOMP_EN |
+ MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
+ MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG);
+ else
+ mtk_w32(eth,
+ MTK_TX_DMA_EN |
+ MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_BT_32DWORDS,
+ MTK_QDMA_GLO_CFG);
+
+ mtk_w32(eth,
+ MTK_RX_DMA_EN | rx_2b_offset |
+ MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
+ MTK_PDMA_GLO_CFG);
+ } else {
+ mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
+ MTK_PDMA_GLO_CFG);
+ }
+
+ return 0;
+}
+
+static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+{
+ int i;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+
+ /* default setup the forward port to send frame to PDMA */
+ val &= ~0xffff;
+
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+
+ val |= config;
+
+ if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
+ val |= MTK_GDMA_SPECIAL_TAG;
+
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
+ }
+ /* Reset and enable PSE */
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+ mtk_w32(eth, 0, MTK_RST_GL);
+}
+
+static int mtk_open(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int err;
+
+ err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
+ if (err) {
+ netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
+ err);
+ return err;
+ }
+
+ /* we run 2 netdevs on the same dma ring so we only bring it up once */
+ if (!refcount_read(ð->dma_refcnt)) {
+ int err = mtk_start_dma(eth);
+
+ if (err)
+ return err;
+
+ mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
+
+ /* Indicates CDM to parse the MTK special tag from CPU */
+ if (netdev_uses_dsa(dev)) {
+ u32 val;
+ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+ val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
+ }
+
+ napi_enable(ð->tx_napi);
+ napi_enable(ð->rx_napi);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ refcount_set(ð->dma_refcnt, 1);
+ }
+ else
+ refcount_inc(ð->dma_refcnt);
+
+ phylink_start(mac->phylink);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
+{
+ u32 val;
+ int i;
+
+ /* stop the dma engine */
+ spin_lock_bh(ð->page_lock);
+ val = mtk_r32(eth, glo_cfg);
+ mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
+ glo_cfg);
+ spin_unlock_bh(ð->page_lock);
+
+ /* wait for dma stop */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, glo_cfg);
+ if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+}
+
+static int mtk_stop(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ phylink_stop(mac->phylink);
+
+ netif_tx_disable(dev);
+
+ phylink_disconnect_phy(mac->phylink);
+
+ /* only shutdown DMA if this is the last user */
+ if (!refcount_dec_and_test(ð->dma_refcnt))
+ return 0;
+
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ napi_disable(ð->tx_napi);
+ napi_disable(ð->rx_napi);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+
+ mtk_dma_free(eth);
+
+ return 0;
+}
+
+static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
+{
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ reset_bits);
+
+ usleep_range(1000, 1100);
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ ~reset_bits);
+ mdelay(10);
+}
+
+static void mtk_clk_disable(struct mtk_eth *eth)
+{
+ int clk;
+
+ for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
+ clk_disable_unprepare(eth->clks[clk]);
+}
+
+static int mtk_clk_enable(struct mtk_eth *eth)
+{
+ int clk, ret;
+
+ for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
+ ret = clk_prepare_enable(eth->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ return 0;
+
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(eth->clks[clk]);
+
+ return ret;
+}
+
+static int mtk_hw_init(struct mtk_eth *eth)
+{
+ int i, val, ret;
+
+ if (test_and_set_bit(MTK_HW_INIT, ð->state))
+ return 0;
+
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
+
+ ret = mtk_clk_enable(eth);
+ if (ret)
+ goto err_disable_pm;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ ret = device_reset(eth->dev);
+ if (ret) {
+ dev_err(eth->dev, "MAC reset failed!\n");
+ goto err_disable_pm;
+ }
+
+ /* enable interrupt delay for RX */
+ mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+
+ /* disable delay and normal interrupt */
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+ return 0;
+ }
+
+ /* Non-MT7628 handling... */
+ ethsys_reset(eth, RSTCTRL_FE);
+ ethsys_reset(eth, RSTCTRL_PPE);
+
+ /* Set FE to PDMAv2 if necessary */
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+ mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
+
+ if (eth->pctl) {
+ /* Set GE2 driving and slew rate */
+ regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+
+ /* set GE2 TDSEL */
+ regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+
+ /* set GE2 TUNE */
+ regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+ }
+
+ /* Set linkdown as the default for each GMAC. Its own MCR would be set
+ * up with the more appropriate value when mtk_mac_config call is being
+ * invoked.
+ */
+ for (i = 0; i < MTK_MAC_COUNT; i++)
+ mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
+
+ /* Enable RX VLan Offloading */
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+ /* enable interrupt delay for RX/TX */
+ mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT);
+
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+ /* FE int grouping */
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2)) {
+ /* PSE config input/output queue threshold */
+ mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
+ mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
+ mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
+
+ mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
+ mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
+ mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
+ mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
+ mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
+ mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
+ }
+
+ return 0;
+
+err_disable_pm:
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+
+ return ret;
+}
+
+static int mtk_hw_deinit(struct mtk_eth *eth)
+{
+ if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
+ return 0;
+
+ mtk_clk_disable(eth);
+
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+
+ return 0;
+}
+
+static int __init mtk_init(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ const char *mac_addr;
+
+ mac_addr = of_get_mac_address(mac->of_node);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(dev->dev_addr, mac_addr);
+
+ /* If the mac address is invalid, use random mac address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ eth_hw_addr_random(dev);
+ dev_err(eth->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ return 0;
+}
+
+static void mtk_uninit(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ phylink_disconnect_phy(mac->phylink);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+}
+
+static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phylink_mii_ioctl(mac->phylink, ifr, cmd);
+ default:
+ /* default invoke the mtk_eth_dbg handler */
+ return mtk_do_priv_ioctl(dev, ifr, cmd);
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void mtk_pending_work(struct work_struct *work)
+{
+ struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
+ int err, i;
+ unsigned long restart = 0;
+
+ rtnl_lock();
+
+ dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
+
+ while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
+ cpu_relax();
+
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ __set_bit(i, &restart);
+ }
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
+
+ /* restart underlying hardware such as power, clock, pin mux
+ * and the connected phy
+ */
+ mtk_hw_deinit(eth);
+
+ if (eth->dev->pins)
+ pinctrl_select_state(eth->dev->pins->p,
+ eth->dev->pins->default_state);
+ mtk_hw_init(eth);
+
+ /* restart DMA and enable IRQs */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!test_bit(i, &restart))
+ continue;
+ err = mtk_open(eth->netdev[i]);
+ if (err) {
+ netif_alert(eth, ifup, eth->netdev[i],
+ "Driver up/down cycle failed, closing device.\n");
+ dev_close(eth->netdev[i]);
+ }
+ }
+
+ dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+
+ clear_bit_unlock(MTK_RESETTING, ð->state);
+
+ rtnl_unlock();
+}
+
+static int mtk_free_dev(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ free_netdev(eth->netdev[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_unreg_dev(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ unregister_netdev(eth->netdev[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_cleanup(struct mtk_eth *eth)
+{
+ mtk_unreg_dev(eth);
+ mtk_free_dev(eth);
+ cancel_work_sync(ð->pending_work);
+
+ return 0;
+}
+
+static int mtk_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(ndev);
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ return phylink_ethtool_ksettings_get(mac->phylink, cmd);
+}
+
+static int mtk_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(ndev);
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ return phylink_ethtool_ksettings_set(mac->phylink, cmd);
+}
+
+static void mtk_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+ info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
+}
+
+static u32 mtk_get_msglevel(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return mac->hw->msg_enable;
+}
+
+static void mtk_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ mac->hw->msg_enable = value;
+}
+
+static int mtk_nway_reset(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ if (!mac->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_nway_reset(mac->phylink);
+}
+
+static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
+ memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int mtk_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(mtk_ethtool_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mtk_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hwstats = mac->hw_stats;
+ u64 *data_src, *data_dst;
+ unsigned int start;
+ int i;
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return;
+
+ if (netif_running(dev) && netif_device_present(dev)) {
+ if (spin_trylock_bh(&hwstats->stats_lock)) {
+ mtk_stats_update_mac(mac);
+ spin_unlock_bh(&hwstats->stats_lock);
+ }
+ }
+
+ data_src = (u64 *)hwstats;
+
+ do {
+ data_dst = data;
+ start = u64_stats_fetch_begin_irq(&hwstats->syncp);
+
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
+ *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+ } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
+}
+
+static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (dev->hw_features & NETIF_F_LRO) {
+ cmd->data = MTK_MAX_RX_RING_NUM;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (dev->hw_features & NETIF_F_LRO) {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ cmd->rule_cnt = mac->hwlro_ip_cnt;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_entry(dev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_all(dev, cmd,
+ rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_add_ipaddr(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_del_ipaddr(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops mtk_ethtool_ops = {
+ .get_link_ksettings = mtk_get_link_ksettings,
+ .set_link_ksettings = mtk_set_link_ksettings,
+ .get_drvinfo = mtk_get_drvinfo,
+ .get_msglevel = mtk_get_msglevel,
+ .set_msglevel = mtk_set_msglevel,
+ .nway_reset = mtk_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mtk_get_strings,
+ .get_sset_count = mtk_get_sset_count,
+ .get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_rxnfc = mtk_get_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
+};
+
+static const struct net_device_ops mtk_netdev_ops = {
+ .ndo_init = mtk_init,
+ .ndo_uninit = mtk_uninit,
+ .ndo_open = mtk_open,
+ .ndo_stop = mtk_stop,
+ .ndo_start_xmit = mtk_start_xmit,
+ .ndo_set_mac_address = mtk_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = mtk_do_ioctl,
+ .ndo_tx_timeout = mtk_tx_timeout,
+ .ndo_get_stats64 = mtk_get_stats64,
+ .ndo_fix_features = mtk_fix_features,
+ .ndo_set_features = mtk_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mtk_poll_controller,
+#endif
+};
+
+static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+{
+ const __be32 *_id = of_get_property(np, "reg", NULL);
+ struct phylink *phylink;
+ int phy_mode, id, err;
+ struct mtk_mac *mac;
+
+ if (!_id) {
+ dev_err(eth->dev, "missing mac id\n");
+ return -EINVAL;
+ }
+
+ id = be32_to_cpup(_id);
+ if (id >= MTK_MAC_COUNT) {
+ dev_err(eth->dev, "%d is not a valid mac id\n", id);
+ return -EINVAL;
+ }
+
+ if (eth->netdev[id]) {
+ dev_err(eth->dev, "duplicate mac id found: %d\n", id);
+ return -EINVAL;
+ }
+
+ eth->netdev[id] = alloc_etherdev(sizeof(*mac));
+ if (!eth->netdev[id]) {
+ dev_err(eth->dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+ mac = netdev_priv(eth->netdev[id]);
+ eth->mac[id] = mac;
+ mac->id = id;
+ mac->hw = eth;
+ mac->of_node = np;
+
+ memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
+ mac->hwlro_ip_cnt = 0;
+
+ mac->hw_stats = devm_kzalloc(eth->dev,
+ sizeof(*mac->hw_stats),
+ GFP_KERNEL);
+ if (!mac->hw_stats) {
+ dev_err(eth->dev, "failed to allocate counter memory\n");
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+ spin_lock_init(&mac->hw_stats->stats_lock);
+ u64_stats_init(&mac->hw_stats->syncp);
+ mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
+
+ /* phylink create */
+ phy_mode = of_get_phy_mode(np);
+ if (phy_mode < 0) {
+ dev_err(eth->dev, "incorrect phy-mode\n");
+ err = -EINVAL;
+ goto free_netdev;
+ }
+
+ /* mac config is not set */
+ mac->interface = PHY_INTERFACE_MODE_NA;
+ mac->mode = MLO_AN_PHY;
+ mac->speed = SPEED_UNKNOWN;
+
+ mac->phylink_config.dev = ð->netdev[id]->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&mac->phylink_config,
+ of_fwnode_handle(mac->of_node),
+ phy_mode, &mtk_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto free_netdev;
+ }
+
+ mac->phylink = phylink;
+
+ SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+ eth->netdev[id]->watchdog_timeo = 5 * HZ;
+ eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
+ eth->netdev[id]->base_addr = (unsigned long)eth->base;
+
+ eth->netdev[id]->hw_features = eth->soc->hw_features;
+ if (eth->hwlro)
+ eth->netdev[id]->hw_features |= NETIF_F_LRO;
+
+ eth->netdev[id]->vlan_features = eth->soc->hw_features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
+ eth->netdev[id]->features |= eth->soc->hw_features;
+ eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
+
+ eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->dev.of_node = np;
+
+ return 0;
+
+free_netdev:
+ free_netdev(eth->netdev[id]);
+ return err;
+}
+
+static int mtk_probe(struct platform_device *pdev)
+{
+ struct device_node *mac_np;
+ struct mtk_eth *eth;
+ int err, i;
+
+ eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
+ if (!eth)
+ return -ENOMEM;
+
+ eth->soc = of_device_get_match_data(&pdev->dev);
+
+ eth->dev = &pdev->dev;
+ eth->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(eth->base))
+ return PTR_ERR(eth->base);
+
+ if(eth->soc->has_sram) {
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
+ } else {
+ eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+ eth->ip_align = NET_IP_ALIGN;
+ } else {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_RX_V2))
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2;
+ else
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
+ }
+
+ spin_lock_init(ð->page_lock);
+ spin_lock_init(ð->tx_irq_lock);
+ spin_lock_init(ð->rx_irq_lock);
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,ethsys");
+ if (IS_ERR(eth->ethsys)) {
+ dev_err(&pdev->dev, "no ethsys regmap found\n");
+ return PTR_ERR(eth->ethsys);
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
+ eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,infracfg");
+ if (IS_ERR(eth->infra)) {
+ dev_err(&pdev->dev, "no infracfg regmap found\n");
+ return PTR_ERR(eth->infra);
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
+ GFP_KERNEL);
+ if (!eth->sgmii)
+ return -ENOMEM;
+
+ err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
+ eth->soc->ana_rgc3);
+
+ if (err)
+ return err;
+ }
+
+ if (eth->soc->required_pctl) {
+ eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,pctl");
+ if (IS_ERR(eth->pctl)) {
+ dev_err(&pdev->dev, "no pctl regmap found\n");
+ return PTR_ERR(eth->pctl);
+ }
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
+ eth->irq[i] = eth->irq[0];
+ else
+ eth->irq[i] = platform_get_irq(pdev, i);
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ return -ENXIO;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+ eth->clks[i] = devm_clk_get(eth->dev,
+ mtk_clks_source_name[i]);
+ if (IS_ERR(eth->clks[i])) {
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (eth->soc->required_clks & BIT(i)) {
+ dev_err(&pdev->dev, "clock %s not found\n",
+ mtk_clks_source_name[i]);
+ return -EINVAL;
+ }
+ eth->clks[i] = NULL;
+ }
+ }
+
+ eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+ INIT_WORK(ð->pending_work, mtk_pending_work);
+
+ err = mtk_hw_init(eth);
+ if (err)
+ return err;
+
+ eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
+
+ for_each_child_of_node(pdev->dev.of_node, mac_np) {
+ if (!of_device_is_compatible(mac_np,
+ "mediatek,eth-mac"))
+ continue;
+
+ if (!of_device_is_available(mac_np))
+ continue;
+
+ err = mtk_add_mac(eth, mac_np);
+ if (err) {
+ of_node_put(mac_np);
+ goto err_deinit_hw;
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
+ err = devm_request_irq(eth->dev, eth->irq[0],
+ mtk_handle_irq, 0,
+ dev_name(eth->dev), eth);
+ } else {
+ err = devm_request_irq(eth->dev, eth->irq[1],
+ mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
+
+ err = devm_request_irq(eth->dev, eth->irq[2],
+ mtk_handle_irq_rx, 0,
+ dev_name(eth->dev), eth);
+ }
+ if (err)
+ goto err_free_dev;
+
+ /* No MT7628/88 support yet */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ err = mtk_mdio_init(eth);
+ if (err)
+ goto err_free_dev;
+ }
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ err = register_netdev(eth->netdev[i]);
+ if (err) {
+ dev_err(eth->dev, "error bringing up device\n");
+ goto err_deinit_mdio;
+ } else
+ netif_info(eth, probe, eth->netdev[i],
+ "mediatek frame engine at 0x%08lx, irq %d\n",
+ eth->netdev[i]->base_addr, eth->irq[0]);
+ }
+
+ /* we run 2 devices on the same DMA ring so we need a dummy device
+ * for NAPI to work
+ */
+ init_dummy_netdev(ð->dummy_dev);
+ netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
+ MTK_NAPI_WEIGHT);
+ netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
+ MTK_NAPI_WEIGHT);
+
+ mtketh_debugfs_init(eth);
+ debug_proc_init(eth);
+
+ platform_set_drvdata(pdev, eth);
+
+ return 0;
+
+err_deinit_mdio:
+ mtk_mdio_cleanup(eth);
+err_free_dev:
+ mtk_free_dev(eth);
+err_deinit_hw:
+ mtk_hw_deinit(eth);
+
+ return err;
+}
+
+static int mtk_remove(struct platform_device *pdev)
+{
+ struct mtk_eth *eth = platform_get_drvdata(pdev);
+ struct mtk_mac *mac;
+ int i;
+
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ mac = netdev_priv(eth->netdev[i]);
+ phylink_disconnect_phy(mac->phylink);
+ }
+
+ mtk_hw_deinit(eth);
+
+ netif_napi_del(ð->tx_napi);
+ netif_napi_del(ð->rx_napi);
+ mtk_cleanup(eth);
+ mtk_mdio_cleanup(eth);
+
+ return 0;
+}
+
+static const struct mtk_soc_data mt2701_data = {
+ .caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7621_data = {
+ .caps = MT7621_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7621_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7622_data = {
+ .ana_rgc3 = 0x2028,
+ .caps = MT7622_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7622_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7623_data = {
+ .caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7629_data = {
+ .ana_rgc3 = 0x128,
+ .caps = MT7629_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7629_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = false,
+};
+
+static const struct mtk_soc_data mt7986_data = {
+ .ana_rgc3 = 0x128,
+ .caps = MT7986_CAPS,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = true,
+};
+
+static const struct mtk_soc_data rt5350_data = {
+ .caps = MT7628_CAPS,
+ .hw_features = MTK_HW_FEATURES_MT7628,
+ .required_clks = MT7628_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_sram = false,
+};
+
+const struct of_device_id of_mtk_match[] = {
+ { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
+ { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
+ { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
+ { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
+ { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_mtk_match);
+
+static struct platform_driver mtk_driver = {
+ .probe = mtk_probe,
+ .remove = mtk_remove,
+ .driver = {
+ .name = "mtk_soc_eth",
+ .of_match_table = of_mtk_match,
+ },
+};
+
+module_platform_driver(mtk_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
new file mode 100755
index 0000000..f240e63
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -0,0 +1,1091 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#ifndef MTK_ETH_H
+#define MTK_ETH_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/refcount.h>
+#include <linux/phylink.h>
+
+#define MTK_QDMA_PAGE_SIZE 2048
+#define MTK_MAX_RX_LENGTH 1536
+#define MTK_DMA_SIZE 2048
+#define MTK_NAPI_WEIGHT 256
+#define MTK_MAC_COUNT 2
+#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
+#define MTK_DMA_DUMMY_DESC 0xffffffff
+#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_SG | NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_IPV6_CSUM)
+#define MTK_SET_FEATURES (NETIF_F_LRO | \
+ NETIF_F_HW_VLAN_CTAG_RX)
+#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
+#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+
+#define MTK_MAX_RX_RING_NUM 4
+#define MTK_HW_LRO_DMA_SIZE 8
+
+#define MTK_MAX_LRO_RX_LENGTH (4096 * 3)
+#define MTK_MAX_LRO_IP_CNT 2
+#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */
+#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */
+#define MTK_HW_LRO_AGG_TIME 10 /* 200us */
+#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */
+#define MTK_HW_LRO_MAX_AGG_CNT 64
+#define MTK_HW_LRO_BW_THRE 3000
+#define MTK_HW_LRO_REPLACE_DELTA 1000
+#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
+
+/* Frame Engine Global Reset Register */
+#define MTK_RST_GL 0x04
+#define RST_GL_PSE BIT(0)
+
+/* Frame Engine Interrupt Status Register */
+#define MTK_INT_STATUS2 0x08
+#define MTK_GDM1_AF BIT(28)
+#define MTK_GDM2_AF BIT(29)
+
+/* PDMA HW LRO Alter Flow Timer Register */
+#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c
+
+/* Frame Engine Interrupt Grouping Register */
+#define MTK_FE_INT_GRP 0x20
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMQ_IG_CTRL 0x1400
+#define MTK_CDMQ_STAG_EN BIT(0)
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMP_IG_CTRL 0x400
+#define MTK_CDMP_STAG_EN BIT(0)
+
+/* CDMP Exgress Control Register */
+#define MTK_CDMP_EG_CTRL 0x404
+
+/* GDM Exgress Control Register */
+#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
+#define MTK_GDMA_SPECIAL_TAG BIT(24)
+#define MTK_GDMA_ICS_EN BIT(22)
+#define MTK_GDMA_TCS_EN BIT(21)
+#define MTK_GDMA_UCS_EN BIT(20)
+#define MTK_GDMA_TO_PDMA 0x0
+#define MTK_GDMA_DROP_ALL 0x7777
+
+/* Unicast Filter MAC Address Register - Low */
+#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
+
+/* Unicast Filter MAC Address Register - High */
+#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+
+/* Internal SRAM offset */
+#define MTK_ETH_SRAM_OFFSET 0x40000
+
+/* FE global misc reg*/
+#define MTK_FE_GLO_MISC 0x124
+
+/* PSE Input Queue Reservation Register*/
+#define PSE_IQ_REV(x) (0x140 + ((x - 1) * 0x4))
+
+/* PSE Output Queue Threshold Register*/
+#define PSE_OQ_TH(x) (0x160 + ((x - 1) * 0x4))
+
+#define MTK_PDMA_V2 BIT(4)
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define CONFIG_MEDIATEK_NETSYS_RX_V2 1
+
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define PDMA_BASE 0x6000
+#else
+#define PDMA_BASE 0x4000
+#endif
+
+#define QDMA_BASE 0x4400
+#else
+#define PDMA_BASE 0x0800
+#define QDMA_BASE 0x1800
+#endif
+/* PDMA RX Base Pointer Register */
+#define MTK_PRX_BASE_PTR0 (PDMA_BASE + 0x100)
+#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
+
+/* PDMA RX Maximum Count Register */
+#define MTK_PRX_MAX_CNT0 (MTK_PRX_BASE_PTR0 + 0x04)
+#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
+
+/* PDMA RX CPU Pointer Register */
+#define MTK_PRX_CRX_IDX0 (MTK_PRX_BASE_PTR0 + 0x08)
+#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+
+/* PDMA HW LRO Control Registers */
+#define MTK_PDMA_LRO_CTRL_DW0 (PDMA_BASE + 0x180)
+#define MTK_LRO_EN BIT(0)
+#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
+#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+
+#define MTK_PDMA_LRO_CTRL_DW1 (MTK_PDMA_LRO_CTRL_DW0 + 0x04)
+#define MTK_PDMA_LRO_CTRL_DW2 (MTK_PDMA_LRO_CTRL_DW0 + 0x08)
+#define MTK_PDMA_LRO_CTRL_DW3 (MTK_PDMA_LRO_CTRL_DW0 + 0x0c)
+#define MTK_ADMA_MODE BIT(15)
+#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
+
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_GLO_CFG (PDMA_BASE + 0x204)
+#define MTK_MULTI_EN BIT(10)
+#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
+
+/* PDMA Reset Index Register */
+#define MTK_PDMA_RST_IDX (PDMA_BASE + 0x208)
+#define MTK_PST_DRX_IDX0 BIT(16)
+#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
+
+/* PDMA Delay Interrupt Register */
+#define MTK_PDMA_DELAY_INT (PDMA_BASE + 0x20c)
+#define MTK_PDMA_DELAY_RX_EN BIT(15)
+#define MTK_PDMA_DELAY_RX_PINT 4
+#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
+#define MTK_PDMA_DELAY_RX_PTIME 4
+#define MTK_PDMA_DELAY_RX_DELAY \
+ (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
+ (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
+
+/* PDMA Interrupt Status Register */
+#define MTK_PDMA_INT_STATUS (PDMA_BASE + 0x220)
+
+/* PDMA Interrupt Mask Register */
+#define MTK_PDMA_INT_MASK (PDMA_BASE + 0x228)
+
+/* PDMA HW LRO Alter Flow Delta Register */
+#define MTK_PDMA_LRO_ALT_SCORE_DELTA (PDMA_BASE + 0x24c)
+
+/* PDMA Interrupt grouping registers */
+#define MTK_PDMA_INT_GRP1 (PDMA_BASE + 0x250)
+#define MTK_PDMA_INT_GRP2 (PDMA_BASE + 0x254)
+
+/* PDMA HW LRO IP Setting Registers */
+#define MTK_LRO_RX_RING0_DIP_DW0 (PDMA_BASE + 0x304)
+#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
+#define MTK_RING_MYIP_VLD BIT(9)
+
+/* PDMA HW LRO Ring Control Registers */
+#define MTK_LRO_RX_RING0_CTRL_DW1 (PDMA_BASE + 0x328)
+#define MTK_LRO_RX_RING0_CTRL_DW2 (PDMA_BASE + 0x32c)
+#define MTK_LRO_RX_RING0_CTRL_DW3 (PDMA_BASE + 0x330)
+#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
+#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
+#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
+#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
+#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
+#define MTK_RING_AUTO_LERAN_MODE (3 << 6)
+#define MTK_RING_VLD BIT(8)
+#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
+#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
+#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
+
+/* QDMA TX Queue Configuration Registers */
+#define MTK_QTX_CFG(x) (QDMA_BASE + (x * 0x10))
+#define QDMA_RES_THRES 4
+
+/* QDMA TX Queue Scheduler Registers */
+#define MTK_QTX_SCH(x) (QDMA_BASE + 4 + (x * 0x10))
+
+/* QDMA RX Base Pointer Register */
+#define MTK_QRX_BASE_PTR0 (QDMA_BASE + 0x100)
+#define MTK_QRX_BASE_PTR_CFG(x) (MTK_QRX_BASE_PTR0 + ((x) * 0x10))
+
+/* QDMA RX Maximum Count Register */
+#define MTK_QRX_MAX_CNT0 (QDMA_BASE + 0x104)
+#define MTK_QRX_MAX_CNT_CFG(x) (MTK_QRX_MAX_CNT0 + ((x) * 0x10))
+
+/* QDMA RX CPU Pointer Register */
+#define MTK_QRX_CRX_IDX0 (QDMA_BASE + 0x108)
+#define MTK_QRX_CRX_IDX_CFG(x) (MTK_QRX_CRX_IDX0 + ((x) * 0x10))
+
+/* QDMA RX DMA Pointer Register */
+#define MTK_QRX_DRX_IDX0 (QDMA_BASE + 0x10c)
+
+/* QDMA Global Configuration Register */
+#define MTK_QDMA_GLO_CFG (QDMA_BASE + 0x204)
+#define MTK_RX_2B_OFFSET BIT(31)
+#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_NDP_CO_PRO BIT(10)
+#define MTK_TX_WB_DDONE BIT(6)
+#define MTK_DMA_SIZE_16DWORDS (2 << 4)
+#define MTK_DMA_SIZE_32DWORDS (3 << 4)
+#define MTK_RX_DMA_BUSY BIT(3)
+#define MTK_TX_DMA_BUSY BIT(1)
+#define MTK_RX_DMA_EN BIT(2)
+#define MTK_TX_DMA_EN BIT(0)
+#define MTK_DMA_BUSY_TIMEOUT HZ
+
+/* QDMA V2 Global Configuration Register */
+#define MTK_CHK_DDONE_EN BIT(28)
+#define MTK_DMAD_WR_WDONE BIT(26)
+#define MTK_WCOMP_EN BIT(24)
+#define MTK_RESV_BUF (0x40 << 16)
+#define MTK_MUTLI_CNT (0x4 << 12)
+
+/* QDMA Reset Index Register */
+#define MTK_QDMA_RST_IDX (QDMA_BASE + 0x208)
+
+/* QDMA Delay Interrupt Register */
+#define MTK_QDMA_DELAY_INT (QDMA_BASE + 0x20c)
+
+/* QDMA Flow Control Register */
+#define MTK_QDMA_FC_THRES (QDMA_BASE + 0x210)
+#define FC_THRES_DROP_MODE BIT(20)
+#define FC_THRES_DROP_EN (7 << 16)
+#define FC_THRES_MIN 0x4444
+
+/* QDMA Interrupt Status Register */
+#define MTK_QDMA_INT_STATUS (QDMA_BASE + 0x218)
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define MTK_RX_DONE_DLY BIT(14)
+#else
+#define MTK_RX_DONE_DLY BIT(30)
+#endif
+#define MTK_RX_DONE_INT3 BIT(19)
+#define MTK_RX_DONE_INT2 BIT(18)
+#define MTK_RX_DONE_INT1 BIT(17)
+#define MTK_RX_DONE_INT0 BIT(16)
+#define MTK_TX_DONE_INT3 BIT(3)
+#define MTK_TX_DONE_INT2 BIT(2)
+#define MTK_TX_DONE_INT1 BIT(1)
+#define MTK_TX_DONE_INT0 BIT(0)
+#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
+#define MTK_TX_DONE_DLY BIT(28)
+#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
+
+/* QDMA Interrupt grouping registers */
+#define MTK_QDMA_INT_GRP1 (QDMA_BASE + 0x220)
+#define MTK_QDMA_INT_GRP2 (QDMA_BASE + 0x224)
+#define MTK_RLS_DONE_INT BIT(0)
+
+/* QDMA Interrupt Status Register */
+#define MTK_QDMA_INT_MASK (QDMA_BASE + 0x21c)
+
+/* QDMA Interrupt Mask Register */
+#define MTK_QDMA_HRED2 (QDMA_BASE + 0x244)
+
+/* QDMA TX Forward CPU Pointer Register */
+#define MTK_QTX_CTX_PTR (QDMA_BASE +0x300)
+
+/* QDMA TX Forward DMA Pointer Register */
+#define MTK_QTX_DTX_PTR (QDMA_BASE +0x304)
+
+/* QDMA TX Release CPU Pointer Register */
+#define MTK_QTX_CRX_PTR (QDMA_BASE +0x310)
+
+/* QDMA TX Release DMA Pointer Register */
+#define MTK_QTX_DRX_PTR (QDMA_BASE +0x314)
+
+/* QDMA FQ Head Pointer Register */
+#define MTK_QDMA_FQ_HEAD (QDMA_BASE +0x320)
+
+/* QDMA FQ Head Pointer Register */
+#define MTK_QDMA_FQ_TAIL (QDMA_BASE +0x324)
+
+/* QDMA FQ Free Page Counter Register */
+#define MTK_QDMA_FQ_CNT (QDMA_BASE +0x328)
+
+/* QDMA FQ Free Page Buffer Length Register */
+#define MTK_QDMA_FQ_BLEN (QDMA_BASE +0x32c)
+
+/* GMA1 Received Good Byte Count Register */
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define MTK_GDM1_TX_GBCNT 0x1C00
+#else
+#define MTK_GDM1_TX_GBCNT 0x2400
+#endif
+#define MTK_STAT_OFFSET 0x40
+
+/* QDMA TX NUM */
+#define MTK_QDMA_TX_NUM 16
+#define MTK_QDMA_TX_MASK ((MTK_QDMA_TX_NUM) - 1)
+#define QID_LOW_BITS(x) ((x) & 0xf)
+#define QID_HIGH_BITS(x) ((((x) >> 4) & 0x3) << 20)
+#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
+
+/* QDMA V2 descriptor txd6 */
+#define TX_DMA_INS_VLAN_V2 BIT(16)
+
+/* QDMA V2 descriptor txd5 */
+#define TX_DMA_CHKSUM_V2 (0x7 << 28)
+#define TX_DMA_TSO_V2 BIT(31)
+
+/* QDMA V2 descriptor txd4 */
+#define TX_DMA_FPORT_SHIFT_V2 8
+#define TX_DMA_FPORT_MASK_V2 0xf
+#define TX_DMA_SWC_V2 BIT(30)
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define MTK_TX_DMA_BUF_LEN 0xffff
+#define MTK_TX_DMA_BUF_SHIFT 8
+#else
+#define MTK_TX_DMA_BUF_LEN 0x3fff
+#define MTK_TX_DMA_BUF_SHIFT 16
+#endif
+
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define MTK_RX_DMA_BUF_LEN 0xffff
+#define MTK_RX_DMA_BUF_SHIFT 8
+#define RX_DMA_SPORT_SHIFT 26
+#define RX_DMA_SPORT_MASK 0xf
+#else
+#define MTK_RX_DMA_BUF_LEN 0x3fff
+#define MTK_RX_DMA_BUF_SHIFT 16
+#define RX_DMA_SPORT_SHIFT 19
+#define RX_DMA_SPORT_MASK 0x7
+#endif
+
+/* QDMA descriptor txd4 */
+#define TX_DMA_CHKSUM (0x7 << 29)
+#define TX_DMA_TSO BIT(28)
+#define TX_DMA_FPORT_SHIFT 25
+#define TX_DMA_FPORT_MASK 0x7
+#define TX_DMA_INS_VLAN BIT(16)
+
+/* QDMA descriptor txd3 */
+#define TX_DMA_OWNER_CPU BIT(31)
+#define TX_DMA_LS0 BIT(30)
+#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << MTK_TX_DMA_BUF_SHIFT)
+#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
+#define TX_DMA_SWC BIT(14)
+#define TX_DMA_SDL(_x) (TX_DMA_PLEN0(_x))
+
+/* PDMA on MT7628 */
+#define TX_DMA_DONE BIT(31)
+#define TX_DMA_LS1 BIT(14)
+#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
+
+/* QDMA descriptor rxd2 */
+#define RX_DMA_DONE BIT(31)
+#define RX_DMA_LSO BIT(30)
+#define RX_DMA_PLEN0(_x) (((_x) & MTK_RX_DMA_BUF_LEN) << MTK_RX_DMA_BUF_SHIFT)
+#define RX_DMA_GET_PLEN0(_x) (((_x) >> MTK_RX_DMA_BUF_SHIFT) & MTK_RX_DMA_BUF_LEN)
+#define RX_DMA_VTAG BIT(15)
+
+/* QDMA descriptor rxd3 */
+#define RX_DMA_VID(_x) ((_x) & VLAN_VID_MASK)
+#define RX_DMA_TCI(_x) ((_x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID(_x) (((_x) >> 16) & 0xffff)
+
+/* QDMA descriptor rxd4 */
+#define RX_DMA_L4_VALID BIT(24)
+#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
+#define RX_DMA_SPECIAL_TAG BIT(22) /* switch header in packet */
+
+#define RX_DMA_GET_SPORT(_x) (((_x) >> RX_DMA_SPORT_SHIFT) & RX_DMA_SPORT_MASK)
+
+/* PDMA V2 descriptor rxd3 */
+#define RX_DMA_VTAG_V2 BIT(0)
+#define RX_DMA_L4_VALID_V2 BIT(2)
+
+/* PDMA V2 descriptor rxd4 */
+#define RX_DMA_VID_V2(_x) RX_DMA_VID(_x)
+#define RX_DMA_TCI_V2(_x) (((_x) >> 1) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID_V2(x3, x4) ((((x3) & 1) << 15) | (((x4) >> 17) & 0x7fff))
+
+/* PHY Indirect Access Control registers */
+#define MTK_PHY_IAC 0x10004
+#define PHY_IAC_ACCESS BIT(31)
+#define PHY_IAC_READ BIT(19)
+#define PHY_IAC_WRITE BIT(18)
+#define PHY_IAC_START BIT(16)
+#define PHY_IAC_ADDR_SHIFT 20
+#define PHY_IAC_REG_SHIFT 25
+#define PHY_IAC_TIMEOUT HZ
+
+#define MTK_MAC_MISC 0x1000c
+#define MTK_MUX_TO_ESW BIT(0)
+
+/* Mac control registers */
+#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
+#define MAC_MCR_MAX_RX_1536 BIT(24)
+#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
+#define MAC_MCR_FORCE_MODE BIT(15)
+#define MAC_MCR_TX_EN BIT(14)
+#define MAC_MCR_RX_EN BIT(13)
+#define MAC_MCR_BACKOFF_EN BIT(9)
+#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_FORCE_RX_FC BIT(5)
+#define MAC_MCR_FORCE_TX_FC BIT(4)
+#define MAC_MCR_SPEED_1000 BIT(3)
+#define MAC_MCR_SPEED_100 BIT(2)
+#define MAC_MCR_FORCE_DPX BIT(1)
+#define MAC_MCR_FORCE_LINK BIT(0)
+#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+
+/* Mac status registers */
+#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
+#define MAC_MSR_EEE1G BIT(7)
+#define MAC_MSR_EEE100M BIT(6)
+#define MAC_MSR_RX_FC BIT(5)
+#define MAC_MSR_TX_FC BIT(4)
+#define MAC_MSR_SPEED_1000 BIT(3)
+#define MAC_MSR_SPEED_100 BIT(2)
+#define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)
+#define MAC_MSR_DPX BIT(1)
+#define MAC_MSR_LINK BIT(0)
+
+/* TRGMII RXC control register */
+#define TRGMII_RCK_CTRL 0x10300
+#define DQSI0(x) ((x << 0) & GENMASK(6, 0))
+#define DQSI1(x) ((x << 8) & GENMASK(14, 8))
+#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define RXC_RST BIT(31)
+#define RXC_DQSISEL BIT(30)
+#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
+#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+
+#define NUM_TRGMII_CTRL 5
+
+/* TRGMII RXC control register */
+#define TRGMII_TCK_CTRL 0x10340
+#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define TXC_INV BIT(30)
+#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
+#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+
+/* TRGMII TX Drive Strength */
+#define TRGMII_TD_ODT(i) (0x10354 + 8 * (i))
+#define TD_DM_DRVP(x) ((x) & 0xf)
+#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
+
+/* TRGMII Interface mode register */
+#define INTF_MODE 0x10390
+#define TRGMII_INTF_DIS BIT(0)
+#define TRGMII_MODE BIT(1)
+#define TRGMII_CENTRAL_ALIGNED BIT(2)
+#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
+#define INTF_MODE_RGMII_10_100 0
+
+/* GPIO port control registers for GMAC 2*/
+#define GPIO_OD33_CTRL8 0x4c0
+#define GPIO_BIAS_CTRL 0xed0
+#define GPIO_DRV_SEL10 0xf00
+
+/* ethernet subsystem chip id register */
+#define ETHSYS_CHIPID0_3 0x0
+#define ETHSYS_CHIPID4_7 0x4
+#define MT7623_ETH 7623
+#define MT7622_ETH 7622
+#define MT7621_ETH 7621
+
+/* ethernet system control register */
+#define ETHSYS_SYSCFG 0x10
+#define SYSCFG_DRAM_TYPE_DDR2 BIT(4)
+
+/* ethernet subsystem config register */
+#define ETHSYS_SYSCFG0 0x14
+#define SYSCFG0_GE_MASK 0x3
+#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
+#define SYSCFG0_SGMII_MASK GENMASK(9, 8)
+#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
+#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
+
+
+/* ethernet subsystem clock register */
+#define ETHSYS_CLKCFG0 0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+#define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6))
+#define ETHSYS_TRGMII_MT7621_APLL BIT(6)
+#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
+
+/* ethernet reset control register */
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_PPE BIT(31)
+
+/* SGMII subsystem config registers */
+/* Register to auto-negotiation restart */
+#define SGMSYS_PCS_CONTROL_1 0x0
+#define SGMII_AN_RESTART BIT(9)
+#define SGMII_ISOLATE BIT(10)
+#define SGMII_AN_ENABLE BIT(12)
+#define SGMII_LINK_STATYS BIT(18)
+#define SGMII_AN_ABILITY BIT(19)
+#define SGMII_AN_COMPLETE BIT(21)
+#define SGMII_PCS_FAULT BIT(23)
+#define SGMII_AN_EXPANSION_CLR BIT(30)
+
+/* Register to programmable link timer, the unit in 2 * 8ns */
+#define SGMSYS_PCS_LINK_TIMER 0x18
+#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0))
+
+/* Register to control remote fault */
+#define SGMSYS_SGMII_MODE 0x20
+#define SGMII_IF_MODE_BIT0 BIT(0)
+#define SGMII_SPEED_DUPLEX_AN BIT(1)
+#define SGMII_SPEED_10 0x0
+#define SGMII_SPEED_100 BIT(2)
+#define SGMII_SPEED_1000 BIT(3)
+#define SGMII_DUPLEX_FULL BIT(4)
+#define SGMII_IF_MODE_BIT5 BIT(5)
+#define SGMII_REMOTE_FAULT_DIS BIT(8)
+#define SGMII_CODE_SYNC_SET_VAL BIT(9)
+#define SGMII_CODE_SYNC_SET_EN BIT(10)
+#define SGMII_SEND_AN_ERROR_EN BIT(11)
+#define SGMII_IF_MODE_MASK GENMASK(5, 1)
+
+/* Register to set SGMII speed, ANA RG_ Control Signals III*/
+#define SGMSYS_ANA_RG_CS3 0x2028
+#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
+#define RG_PHY_SPEED_1_25G 0x0
+#define RG_PHY_SPEED_3_125G BIT(2)
+
+/* Register to power up QPHY */
+#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
+#define SGMII_PHYA_PWD BIT(4)
+
+/* Infrasys subsystem config registers */
+#define INFRA_MISC2 0x70c
+#define CO_QPHY_SEL BIT(0)
+#define GEPHY_MAC_SEL BIT(1)
+
+/*MDIO control*/
+#define MII_MMD_ACC_CTL_REG 0x0d
+#define MII_MMD_ADDR_DATA_REG 0x0e
+#define MMD_OP_MODE_DATA BIT(14)
+
+/* MT7628/88 specific stuff */
+#define MT7628_PDMA_OFFSET 0x0800
+#define MT7628_SDM_OFFSET 0x0c00
+
+#define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00)
+#define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04)
+#define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08)
+#define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c)
+#define MT7628_PST_DTX_IDX0 BIT(0)
+
+#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
+#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+
+struct mtk_rx_dma {
+ unsigned int rxd1;
+ unsigned int rxd2;
+ unsigned int rxd3;
+ unsigned int rxd4;
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+ unsigned int rxd5;
+ unsigned int rxd6;
+ unsigned int rxd7;
+ unsigned int rxd8;
+#endif
+} __packed __aligned(4);
+
+struct mtk_tx_dma {
+ unsigned int txd1;
+ unsigned int txd2;
+ unsigned int txd3;
+ unsigned int txd4;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ unsigned int txd5;
+ unsigned int txd6;
+ unsigned int txd7;
+ unsigned int txd8;
+#endif
+} __packed __aligned(4);
+
+struct mtk_eth;
+struct mtk_mac;
+
+/* struct mtk_hw_stats - the structure that holds the traffic statistics.
+ * @stats_lock: make sure that stats operations are atomic
+ * @reg_offset: the status register offset of the SoC
+ * @syncp: the refcount
+ *
+ * All of the supported SoCs have hardware counters for traffic statistics.
+ * Whenever the status IRQ triggers we can read the latest stats from these
+ * counters and store them in this struct.
+ */
+struct mtk_hw_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_skip;
+ u64 tx_collisions;
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_overflow;
+ u64 rx_fcs_errors;
+ u64 rx_short_errors;
+ u64 rx_long_errors;
+ u64 rx_checksum_errors;
+ u64 rx_flow_control_packets;
+
+ spinlock_t stats_lock;
+ u32 reg_offset;
+ struct u64_stats_sync syncp;
+};
+
+enum mtk_tx_flags {
+ /* PDMA descriptor can point at 1-2 segments. This enum allows us to
+ * track how memory was allocated so that it can be freed properly.
+ */
+ MTK_TX_FLAGS_SINGLE0 = 0x01,
+ MTK_TX_FLAGS_PAGE0 = 0x02,
+
+ /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
+ * SKB out instead of looking up through hardware TX descriptor.
+ */
+ MTK_TX_FLAGS_FPORT0 = 0x04,
+ MTK_TX_FLAGS_FPORT1 = 0x08,
+};
+
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+ MTK_CLK_ETHIF,
+ MTK_CLK_SGMIITOP,
+ MTK_CLK_ESW,
+ MTK_CLK_GP0,
+ MTK_CLK_GP1,
+ MTK_CLK_GP2,
+ MTK_CLK_FE,
+ MTK_CLK_TRGPLL,
+ MTK_CLK_SGMII_TX_250M,
+ MTK_CLK_SGMII_RX_250M,
+ MTK_CLK_SGMII_CDR_REF,
+ MTK_CLK_SGMII_CDR_FB,
+ MTK_CLK_SGMII2_TX_250M,
+ MTK_CLK_SGMII2_RX_250M,
+ MTK_CLK_SGMII2_CDR_REF,
+ MTK_CLK_SGMII2_CDR_FB,
+ MTK_CLK_SGMII_CK,
+ MTK_CLK_ETH2PLL,
+ MTK_CLK_WOCPU0,
+ MTK_CLK_WOCPU1,
+ MTK_CLK_MAX
+};
+
+#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
+ BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \
+ BIT(MTK_CLK_TRGPLL))
+#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
+ BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_GP2) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII_CK) | \
+ BIT(MTK_CLK_ETH2PLL))
+#define MT7621_CLKS_BITMAP (0)
+#define MT7628_CLKS_BITMAP (0)
+#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
+ BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT(MTK_CLK_SGMII_CK) | \
+ BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
+
+#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB))
+
+enum mtk_dev_state {
+ MTK_HW_INIT,
+ MTK_RESETTING
+};
+
+/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
+ * by the TX descriptor s
+ * @skb: The SKB pointer of the packet being sent
+ * @dma_addr0: The base addr of the first segment
+ * @dma_len0: The length of the first segment
+ * @dma_addr1: The base addr of the second segment
+ * @dma_len1: The length of the second segment
+ */
+struct mtk_tx_buf {
+ struct sk_buff *skb;
+ u32 flags;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr0);
+ DEFINE_DMA_UNMAP_LEN(dma_len0);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr1);
+ DEFINE_DMA_UNMAP_LEN(dma_len1);
+};
+
+/* struct mtk_tx_ring - This struct holds info describing a TX ring
+ * @dma: The descriptor ring
+ * @buf: The memory pointed at by the ring
+ * @phys: The physical addr of tx_buf
+ * @next_free: Pointer to the next free descriptor
+ * @last_free: Pointer to the last free descriptor
+ * @thresh: The threshold of minimum amount of free descriptors
+ * @free_count: QDMA uses a linked list. Track how many free descriptors
+ * are present
+ */
+struct mtk_tx_ring {
+ struct mtk_tx_dma *dma;
+ struct mtk_tx_buf *buf;
+ dma_addr_t phys;
+ struct mtk_tx_dma *next_free;
+ struct mtk_tx_dma *last_free;
+ u16 thresh;
+ atomic_t free_count;
+ int dma_size;
+ struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */
+ dma_addr_t phys_pdma;
+ int cpu_idx;
+};
+
+/* PDMA rx ring mode */
+enum mtk_rx_flags {
+ MTK_RX_FLAGS_NORMAL = 0,
+ MTK_RX_FLAGS_HWLRO,
+ MTK_RX_FLAGS_QDMA,
+};
+
+/* struct mtk_rx_ring - This struct holds info describing a RX ring
+ * @dma: The descriptor ring
+ * @data: The memory pointed at by the ring
+ * @phys: The physical addr of rx_buf
+ * @frag_size: How big can each fragment be
+ * @buf_size: The size of each packet buffer
+ * @calc_idx: The current head of ring
+ */
+struct mtk_rx_ring {
+ struct mtk_rx_dma *dma;
+ u8 **data;
+ dma_addr_t phys;
+ u16 frag_size;
+ u16 buf_size;
+ u16 dma_size;
+ bool calc_idx_update;
+ u16 calc_idx;
+ u32 crx_idx_reg;
+};
+
+enum mkt_eth_capabilities {
+ MTK_RGMII_BIT = 0,
+ MTK_TRGMII_BIT,
+ MTK_SGMII_BIT,
+ MTK_ESW_BIT,
+ MTK_GEPHY_BIT,
+ MTK_MUX_BIT,
+ MTK_INFRA_BIT,
+ MTK_SHARED_SGMII_BIT,
+ MTK_HWLRO_BIT,
+ MTK_SHARED_INT_BIT,
+ MTK_TRGMII_MT7621_CLK_BIT,
+ MTK_QDMA_BIT,
+ MTK_NETSYS_TX_V2_BIT,
+ MTK_NETSYS_RX_V2_BIT,
+ MTK_SOC_MT7628_BIT,
+
+ /* MUX BITS*/
+ MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
+ MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
+ MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
+ MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
+
+ /* PATH BITS */
+ MTK_ETH_PATH_GMAC1_RGMII_BIT,
+ MTK_ETH_PATH_GMAC1_TRGMII_BIT,
+ MTK_ETH_PATH_GMAC1_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_RGMII_BIT,
+ MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_GEPHY_BIT,
+ MTK_ETH_PATH_GDM1_ESW_BIT,
+};
+
+/* Supported hardware group on SoCs */
+#define MTK_RGMII BIT(MTK_RGMII_BIT)
+#define MTK_TRGMII BIT(MTK_TRGMII_BIT)
+#define MTK_SGMII BIT(MTK_SGMII_BIT)
+#define MTK_ESW BIT(MTK_ESW_BIT)
+#define MTK_GEPHY BIT(MTK_GEPHY_BIT)
+#define MTK_MUX BIT(MTK_MUX_BIT)
+#define MTK_INFRA BIT(MTK_INFRA_BIT)
+#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT)
+#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
+#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
+#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA BIT(MTK_QDMA_BIT)
+#define MTK_NETSYS_TX_V2 BIT(MTK_NETSYS_TX_V2_BIT)
+#define MTK_NETSYS_RX_V2 BIT(MTK_NETSYS_RX_V2_BIT)
+#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
+
+#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
+ BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
+#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
+ BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
+#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
+ BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
+ BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
+#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
+ BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
+
+/* Supported path present on SoCs */
+#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
+#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
+
+#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
+#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
+#define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII)
+#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
+#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
+#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
+
+/* MUXes present on SoCs */
+/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
+#define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX)
+
+/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
+#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \
+ (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA)
+
+/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
+#define MTK_MUX_U3_GMAC2_TO_QPHY \
+ (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA)
+
+/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
+#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
+ (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
+ MTK_SHARED_SGMII)
+
+/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
+#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
+ (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
+
+#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
+
+#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
+ MTK_GMAC2_RGMII | MTK_SHARED_INT | \
+ MTK_TRGMII_MT7621_CLK | MTK_QDMA)
+
+#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
+ MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
+ MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
+
+#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
+ MTK_QDMA)
+
+#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628)
+
+#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+ MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
+ MTK_MUX_U3_GMAC2_TO_QPHY | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
+
+#ifdef CONFIG_MEDIATEK_NETSYS_RX_V2
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_NETSYS_TX_V2 | MTK_NETSYS_RX_V2)
+#else
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_NETSYS_TX_V2)
+#endif
+
+/* struct mtk_eth_data - This is the structure holding all differences
+ * among various plaforms
+ * @ana_rgc3: The offset for register ANA_RGC3 related to
+ * sgmiisys syscon
+ * @caps Flags shown the extra capability for the SoC
+ * @hw_features Flags shown HW features
+ * @required_clks Flags shown the bitmap for required clocks on
+ * the target SoC
+ * @required_pctl A bool value to show whether the SoC requires
+ * the extra setup for those pins used by GMAC.
+ */
+struct mtk_soc_data {
+ u32 ana_rgc3;
+ u32 caps;
+ u32 required_clks;
+ bool required_pctl;
+ netdev_features_t hw_features;
+ bool has_sram;
+};
+
+/* currently no SoC has more than 2 macs */
+#define MTK_MAX_DEVS 2
+
+#define MTK_SGMII_PHYSPEED_AN BIT(31)
+#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0)
+#define MTK_SGMII_PHYSPEED_1000 BIT(0)
+#define MTK_SGMII_PHYSPEED_2500 BIT(1)
+#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
+
+/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
+ * characteristics
+ * @regmap: The register map pointing at the range used to setup
+ * SGMII modes
+ * @flags: The enum refers to which mode the sgmii wants to run on
+ * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ */
+
+struct mtk_sgmii {
+ struct regmap *regmap[MTK_MAX_DEVS];
+ u32 flags[MTK_MAX_DEVS];
+ u32 ana_rgc3;
+};
+
+/* struct mtk_eth - This is the main datasructure for holding the state
+ * of the driver
+ * @dev: The device pointer
+ * @base: The mapped register i/o base
+ * @page_lock: Make sure that register operations are atomic
+ * @tx_irq__lock: Make sure that IRQ register operations are atomic
+ * @rx_irq__lock: Make sure that IRQ register operations are atomic
+ * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
+ * dummy for NAPI to work
+ * @netdev: The netdev instances
+ * @mac: Each netdev is linked to a physical MAC
+ * @irq: The IRQ that we are using
+ * @msg_enable: Ethtool msg level
+ * @ethsys: The register map pointing at the range used to setup
+ * MII modes
+ * @infra: The register map pointing at the range used to setup
+ * SGMII and GePHY path
+ * @pctl: The register map pointing at the range used to setup
+ * GMAC port drive/slew values
+ * @dma_refcnt: track how many netdevs are using the DMA engine
+ * @tx_ring: Pointer to the memory holding info about the TX ring
+ * @rx_ring: Pointer to the memory holding info about the RX ring
+ * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
+ * @tx_napi: The TX NAPI struct
+ * @rx_napi: The RX NAPI struct
+ * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
+ * @scratch_head: The scratch memory that scratch_ring points to.
+ * @clks: clock array for all clocks required
+ * @mii_bus: If there is a bus we need to create an instance for it
+ * @pending_work: The workqueue used to reset the dma ring
+ * @state: Initialization and runtime state of the device
+ * @soc: Holding specific data among vaious SoCs
+ */
+
+struct mtk_eth {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t page_lock;
+ spinlock_t tx_irq_lock;
+ spinlock_t rx_irq_lock;
+ struct net_device dummy_dev;
+ struct net_device *netdev[MTK_MAX_DEVS];
+ struct mtk_mac *mac[MTK_MAX_DEVS];
+ int irq[3];
+ u32 msg_enable;
+ unsigned long sysclk;
+ struct regmap *ethsys;
+ struct regmap *infra;
+ struct mtk_sgmii *sgmii;
+ struct regmap *pctl;
+ bool hwlro;
+ refcount_t dma_refcnt;
+ struct mtk_tx_ring tx_ring;
+ struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
+ struct mtk_rx_ring rx_ring_qdma;
+ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
+ struct mtk_tx_dma *scratch_ring;
+ dma_addr_t phy_scratch_ring;
+ void *scratch_head;
+ struct clk *clks[MTK_CLK_MAX];
+
+ struct mii_bus *mii_bus;
+ struct work_struct pending_work;
+ unsigned long state;
+
+ const struct mtk_soc_data *soc;
+
+ u32 tx_int_mask_reg;
+ u32 tx_int_status_reg;
+ u32 rx_dma_l4_valid;
+ int ip_align;
+};
+
+/* struct mtk_mac - the structure that holds the info about the MACs of the
+ * SoC
+ * @id: The number of the MAC
+ * @interface: Interface mode kept for detecting change in hw settings
+ * @of_node: Our devicetree node
+ * @hw: Backpointer to our main datastruture
+ * @hw_stats: Packet statistics counter
+ */
+struct mtk_mac {
+ int id;
+ phy_interface_t interface;
+ unsigned int mode;
+ int speed;
+ struct device_node *of_node;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct mtk_eth *hw;
+ struct mtk_hw_stats *hw_stats;
+ __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
+ int hwlro_ip_cnt;
+};
+
+/* the struct describing the SoC. these are declared in the soc_xyz.c files */
+extern const struct of_device_id of_mtk_match[];
+
+/* read the hardware status register */
+void mtk_stats_update_mac(struct mtk_mac *mac);
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
+ u32 ana_rgc3);
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+ const struct phylink_link_state *state);
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
+
+#endif /* MTK_ETH_H */
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/Makefile b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/Makefile
new file mode 100644
index 0000000..bf1bbcb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/Makefile
@@ -0,0 +1,5 @@
+ccflags-y=-Werror
+
+obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtkhnat.o
+mtkhnat-objs := hnat.o hnat_nf_hook.o hnat_debugfs.o hnat_mcast.o
+mtkhnat-$(CONFIG_NET_DSA_MT7530) += hnat_stag.o
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
new file mode 100644
index 0000000..3e87791
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
@@ -0,0 +1,665 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/if.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "nf_hnat_mtk.h"
+#include "hnat.h"
+
+struct mtk_hnat *hnat_priv;
+
+int (*ra_sw_nat_hook_rx)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_rx);
+int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_tx);
+
+void (*ppe_dev_register_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_register_hook);
+void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_unregister_hook);
+
+static void hnat_sma_build_entry(struct timer_list *t)
+{
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
+}
+
+void hnat_cache_ebl(int enable)
+{
+ cr_set_field(hnat_priv->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_CAH_CTRL, CAH_EN, enable);
+}
+
+static void hnat_reset_timestamp(struct timer_list *t)
+{
+ struct foe_entry *entry;
+ int hash_index;
+
+ hnat_cache_ebl(0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 0);
+ writel(0, hnat_priv->fe_base + 0x0010);
+
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.state == BIND)
+ entry->bfib1.time_stamp =
+ readl(hnat_priv->fe_base + 0x0010) & (0xFFFF);
+ }
+
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 1);
+ hnat_cache_ebl(1);
+
+ mod_timer(&hnat_priv->hnat_reset_timestamp_timer, jiffies + 14400 * HZ);
+}
+
+static void cr_set_bits(void __iomem *reg, u32 bs)
+{
+ u32 val = readl(reg);
+
+ val |= bs;
+ writel(val, reg);
+}
+
+static void cr_clr_bits(void __iomem *reg, u32 bs)
+{
+ u32 val = readl(reg);
+
+ val &= ~bs;
+ writel(val, reg);
+}
+
+void cr_set_field(void __iomem *reg, u32 field, u32 val)
+{
+ unsigned int tv = readl(reg);
+
+ tv &= ~field;
+ tv |= ((val) << (ffs((unsigned int)field) - 1));
+ writel(tv, reg);
+}
+
+/*boundary entry can't be used to accelerate data flow*/
+static void exclude_boundary_entry(struct foe_entry *foe_table_cpu)
+{
+ int entry_base = 0;
+ int bad_entry, i, j;
+ struct foe_entry *foe_entry;
+ /*these entries are boundary every 128 entries*/
+ int boundary_entry_offset[8] = { 12, 25, 38, 51, 76, 89, 102, 115};
+
+ if (!foe_table_cpu)
+ return;
+
+ for (i = 0; entry_base < hnat_priv->foe_etry_num; i++) {
+ /* set boundary entries as static*/
+ for (j = 0; j < 8; j++) {
+ bad_entry = entry_base + boundary_entry_offset[j];
+ foe_entry = &foe_table_cpu[bad_entry];
+ foe_entry->udib1.sta = 1;
+ }
+ entry_base = (i + 1) * 128;
+ }
+}
+
+void set_gmac_ppe_fwd(int id, int enable)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = hnat_priv->fe_base + (id ? GDMA2_FWD_CFG : GDMA1_FWD_CFG);
+
+ if (enable) {
+ cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE);
+
+ return;
+ }
+
+ /*disabled */
+ val = readl(reg);
+ if ((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE)
+ cr_set_field(reg, GDM_ALL_FRC_MASK,
+ BITS_GDM_ALL_FRC_P_CPU_PDMA);
+}
+
+static int hnat_start(void)
+{
+ u32 foe_table_sz;
+ u32 foe_mib_tb_sz;
+ int etry_num_cfg;
+
+ /* mapp the FOE table */
+ for (etry_num_cfg = DEF_ETRY_NUM_CFG ; etry_num_cfg >= 0 ; etry_num_cfg--, hnat_priv->foe_etry_num /= 2) {
+ foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
+ hnat_priv->foe_table_cpu = dma_alloc_coherent(
+ hnat_priv->dev, foe_table_sz, &hnat_priv->foe_table_dev, GFP_KERNEL);
+
+ if (hnat_priv->foe_table_cpu)
+ break;
+ }
+
+ if (!hnat_priv->foe_table_cpu)
+ return -1;
+ dev_info(hnat_priv->dev, "FOE entry number = %d\n", hnat_priv->foe_etry_num);
+
+ writel(hnat_priv->foe_table_dev, hnat_priv->ppe_base + PPE_TB_BASE);
+ memset(hnat_priv->foe_table_cpu, 0, foe_table_sz);
+
+ if (hnat_priv->data->version == MTK_HNAT_V1)
+ exclude_boundary_entry(hnat_priv->foe_table_cpu);
+
+ if (hnat_priv->data->per_flow_accounting) {
+ foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
+ hnat_priv->foe_mib_cpu = dma_alloc_coherent(hnat_priv->dev, foe_mib_tb_sz,
+ &hnat_priv->foe_mib_dev, GFP_KERNEL);
+ if (!hnat_priv->foe_mib_cpu)
+ return -1;
+ writel(hnat_priv->foe_mib_dev, hnat_priv->ppe_base + PPE_MIB_TB_BASE);
+ memset(hnat_priv->foe_mib_cpu, 0, foe_mib_tb_sz);
+
+ hnat_priv->acct =
+ kzalloc(hnat_priv->foe_etry_num * sizeof(struct hnat_accounting),
+ GFP_KERNEL);
+ if (!hnat_priv->acct)
+ return -1;
+ }
+ /* setup hashing */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TB_ETRY_NUM, etry_num_cfg);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, HASH_MODE, HASH_MODE_1);
+ writel(HASH_SEED_KEY, hnat_priv->ppe_base + PPE_HASH_SEED);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, XMODE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TB_ENTRY_SIZE, ENTRY_80B);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
+
+ /* set ip proto */
+ writel(0xFFFFFFFF, hnat_priv->ppe_base + PPE_IP_PROT_CHK);
+
+ /* setup caching */
+ hnat_cache_ebl(1);
+
+ /* enable FOE */
+ cr_set_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+ BIT_UDP_IP4F_NAT_EN | BIT_IPV4_NAT_EN | BIT_IPV4_NAPT_EN |
+ BIT_IPV4_NAT_FRAG_EN | BIT_IPV4_HASH_GREK |
+ BIT_IPV4_DSL_EN | BIT_IPV6_6RD_EN |
+ BIT_IPV6_3T_ROUTE_EN | BIT_IPV6_5T_ROUTE_EN);
+
+ if (hnat_priv->data->version == MTK_HNAT_V4)
+ cr_set_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+ BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
+
+ /* setup FOE aging */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, NTU_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UNBD_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_UNB_AGE, UNB_MNP, 1000);
+ cr_set_field(hnat_priv->ppe_base + PPE_UNB_AGE, UNB_DLTA, 3);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, FIN_AGE, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_0, UDP_DLTA, 12);
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_0, NTU_DLTA, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, FIN_DLTA, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, TCP_DLTA, 7);
+
+ /* setup FOE ka */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SCAN_MODE, 2);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, KA_CFG, 3);
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, KA_T, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, TCP_KA, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, UDP_KA, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_1, NTU_KA, 1);
+
+ /* setup FOE rate limit */
+ cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_0, QURT_LMT, 16383);
+ cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_0, HALF_LMT, 16383);
+ cr_set_field(hnat_priv->ppe_base + PPE_BIND_LMT_1, FULL_LMT, 16383);
+ /* setup binding threshold as 30 packets per second */
+ cr_set_field(hnat_priv->ppe_base + PPE_BNDR, BIND_RATE, 0x1E);
+
+ /* setup FOE cf gen */
+ cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, PPE_EN, 1);
+ writel(0, hnat_priv->ppe_base + PPE_DFT_CPORT); /* pdma */
+ /* writel(0x55555555, hnat_priv->ppe_base + PPE_DFT_CPORT); */ /* qdma */
+ cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, TTL0_DRP, 0);
+
+ if (hnat_priv->data->version == MTK_HNAT_V4) {
+ writel(0xcb777, hnat_priv->ppe_base + PPE_DFT_CPORT1);
+ writel(0x7f, hnat_priv->ppe_base + PPE_SBW_CTRL);
+ }
+
+ /*enable ppe mib counter*/
+ if (hnat_priv->data->per_flow_accounting) {
+ cr_set_field(hnat_priv->ppe_base + PPE_MIB_CFG, MIB_EN, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_MIB_CFG, MIB_READ_CLEAR, 1);
+ cr_set_field(hnat_priv->ppe_base + PPE_MIB_CAH_CTRL, MIB_CAH_EN, 1);
+ }
+
+ hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
+
+ dev_info(hnat_priv->dev, "hwnat start\n");
+
+ return 0;
+}
+
+static int ppe_busy_wait(void)
+{
+ unsigned long t_start = jiffies;
+ u32 r = 0;
+
+ while (1) {
+ r = readl((hnat_priv->ppe_base + 0x0));
+ if (!(r & BIT(31)))
+ return 0;
+ if (time_after(jiffies, t_start + HZ))
+ break;
+ usleep_range(10, 20);
+ }
+
+ dev_notice(hnat_priv->dev, "ppe:%s timeout\n", __func__);
+
+ return -1;
+}
+
+static void hnat_stop(void)
+{
+ u32 foe_table_sz;
+ u32 foe_mib_tb_sz;
+ struct foe_entry *entry, *end;
+ u32 r1 = 0, r2 = 0;
+
+ /* send all traffic back to the DMA engine */
+ set_gmac_ppe_fwd(0, 0);
+ set_gmac_ppe_fwd(1, 0);
+
+ dev_info(hnat_priv->dev, "hwnat stop\n");
+
+ if (hnat_priv->foe_table_cpu) {
+ entry = hnat_priv->foe_table_cpu;
+ end = hnat_priv->foe_table_cpu + hnat_priv->foe_etry_num;
+ while (entry < end) {
+ entry->bfib1.state = INVALID;
+ entry++;
+ }
+ }
+ /* disable caching */
+ hnat_cache_ebl(0);
+
+ /* flush cache has to be ahead of hnat disable --*/
+ cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, PPE_EN, 0);
+
+ /* disable scan mode and keep-alive */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SCAN_MODE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, KA_CFG, 0);
+
+ ppe_busy_wait();
+
+ /* disable FOE */
+ cr_clr_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+ BIT_IPV4_NAPT_EN | BIT_IPV4_NAT_EN | BIT_IPV4_NAT_FRAG_EN |
+ BIT_IPV6_HASH_GREK | BIT_IPV4_DSL_EN |
+ BIT_IPV6_6RD_EN | BIT_IPV6_3T_ROUTE_EN |
+ BIT_IPV6_5T_ROUTE_EN | BIT_FUC_FOE | BIT_FMC_FOE);
+
+ if (hnat_priv->data->version == MTK_HNAT_V4)
+ cr_clr_bits(hnat_priv->ppe_base + PPE_FLOW_CFG,
+ BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
+
+ /* disable FOE aging */
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, NTU_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UNBD_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, TCP_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, UDP_AGE, 0);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, FIN_AGE, 0);
+
+ r1 = readl(hnat_priv->fe_base + 0x100);
+ r2 = readl(hnat_priv->fe_base + 0x10c);
+
+ dev_info(hnat_priv->dev, "0x100 = 0x%x, 0x10c = 0x%x\n", r1, r2);
+
+ if (((r1 & 0xff00) >> 0x8) >= (r1 & 0xff) ||
+ ((r1 & 0xff00) >> 0x8) >= (r2 & 0xff)) {
+ dev_info(hnat_priv->dev, "reset pse\n");
+ writel(0x1, hnat_priv->fe_base + 0x4);
+ }
+
+ /* free the FOE table */
+ foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
+ if (hnat_priv->foe_table_cpu)
+ dma_free_coherent(hnat_priv->dev, foe_table_sz, hnat_priv->foe_table_cpu,
+ hnat_priv->foe_table_dev);
+ writel(0, hnat_priv->ppe_base + PPE_TB_BASE);
+
+ if (hnat_priv->data->per_flow_accounting) {
+ foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
+ if (hnat_priv->foe_mib_cpu)
+ dma_free_coherent(hnat_priv->dev, foe_mib_tb_sz,
+ hnat_priv->foe_mib_cpu, hnat_priv->foe_mib_dev);
+ writel(0, hnat_priv->ppe_base + PPE_MIB_TB_BASE);
+ kfree(hnat_priv->acct);
+ }
+}
+
+static void hnat_release_netdev(void)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (ext_entry->dev)
+ dev_put(ext_entry->dev);
+ ext_if_del(ext_entry);
+ kfree(ext_entry);
+ }
+
+ if (hnat_priv->g_ppdev)
+ dev_put(hnat_priv->g_ppdev);
+}
+
+static struct notifier_block nf_hnat_netdevice_nb __read_mostly = {
+ .notifier_call = nf_hnat_netdevice_event,
+};
+
+static struct notifier_block nf_hnat_netevent_nb __read_mostly = {
+ .notifier_call = nf_hnat_netevent_handler,
+};
+
+int hnat_enable_hook(void)
+{
+ /* register hook functions used by WHNAT module.
+ */
+ if (hnat_priv->data->whnat) {
+ ra_sw_nat_hook_rx =
+ (hnat_priv->data->version == MTK_HNAT_V4) ?
+ mtk_sw_nat_hook_rx : NULL;
+ ra_sw_nat_hook_tx = mtk_sw_nat_hook_tx;
+ ppe_dev_register_hook = mtk_ppe_dev_register_hook;
+ ppe_dev_unregister_hook = mtk_ppe_dev_unregister_hook;
+ }
+
+ if (hnat_register_nf_hooks())
+ return -1;
+
+ hook_toggle = 1;
+
+ return 0;
+}
+
+int hnat_disable_hook(void)
+{
+ int hash_index;
+ struct foe_entry *entry;
+
+ ra_sw_nat_hook_tx = NULL;
+ ra_sw_nat_hook_rx = NULL;
+ hnat_unregister_nf_hooks();
+
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_ONLY_FWD_CPU);
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.state == BIND) {
+ entry->ipv4_hnapt.udib1.state = INVALID;
+ entry->ipv4_hnapt.udib1.time_stamp =
+ readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
+ }
+ }
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+
+ mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
+ hook_toggle = 0;
+
+ return 0;
+}
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+static struct packet_type mtk_pack_type __read_mostly = {
+ .type = HQOS_MAGIC_TAG,
+ .func = mtk_hqos_ptype_cb,
+};
+#endif
+
+static int hnat_probe(struct platform_device *pdev)
+{
+ int i;
+ int err = 0;
+ int index = 0;
+ struct resource *res;
+ const char *name;
+ struct device_node *np;
+ unsigned int val;
+ struct property *prop;
+ struct extdev_entry *ext_entry;
+ const struct of_device_id *match;
+
+ hnat_priv = devm_kzalloc(&pdev->dev, sizeof(struct mtk_hnat), GFP_KERNEL);
+ if (!hnat_priv)
+ return -ENOMEM;
+
+ hnat_priv->foe_etry_num = DEF_ETRY_NUM;
+
+ match = of_match_device(of_hnat_match, &pdev->dev);
+ hnat_priv->data = (struct mtk_hnat_data *)match->data;
+
+ hnat_priv->dev = &pdev->dev;
+ np = hnat_priv->dev->of_node;
+
+ err = of_property_read_string(np, "mtketh-wan", &name);
+ if (err < 0)
+ return -EINVAL;
+
+ strncpy(hnat_priv->wan, (char *)name, IFNAMSIZ);
+ dev_info(&pdev->dev, "wan = %s\n", hnat_priv->wan);
+
+ err = of_property_read_string(np, "mtketh-lan", &name);
+ if (err < 0)
+ strncpy(hnat_priv->lan, "eth0", IFNAMSIZ);
+ else
+ strncpy(hnat_priv->lan, (char *)name, IFNAMSIZ);
+ dev_info(&pdev->dev, "lan = %s\n", hnat_priv->lan);
+
+ err = of_property_read_string(np, "mtketh-ppd", &name);
+ if (err < 0)
+ strncpy(hnat_priv->ppd, "eth0", IFNAMSIZ);
+ else
+ strncpy(hnat_priv->ppd, (char *)name, IFNAMSIZ);
+ dev_info(&pdev->dev, "ppd = %s\n", hnat_priv->ppd);
+
+ /*get total gmac num in hnat*/
+ err = of_property_read_u32_index(np, "mtketh-max-gmac", 0, &val);
+
+ if (err < 0)
+ return -EINVAL;
+
+ hnat_priv->gmac_num = val;
+
+ dev_info(&pdev->dev, "gmac num = %d\n", hnat_priv->gmac_num);
+
+ err = of_property_read_u32_index(np, "mtkdsa-wan-port", 0, &val);
+
+ if (err < 0) {
+ hnat_priv->wan_dsa_port = NONE_DSA_PORT;
+ } else {
+ hnat_priv->wan_dsa_port = val;
+ dev_info(&pdev->dev, "wan dsa port = %d\n", hnat_priv->wan_dsa_port);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ hnat_priv->fe_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ if (!hnat_priv->fe_base)
+ return -EADDRNOTAVAIL;
+
+ hnat_priv->ppe_base = (hnat_priv->data->version == MTK_HNAT_V4) ?
+ hnat_priv->fe_base + 0x2600 : hnat_priv->fe_base + 0xe00;
+
+ err = hnat_init_debugfs(hnat_priv);
+ if (err)
+ return err;
+
+ prop = of_find_property(np, "ext-devices", NULL);
+ for (name = of_prop_next_string(prop, NULL); name;
+ name = of_prop_next_string(prop, name), index++) {
+ ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
+ if (!ext_entry) {
+ err = -ENOMEM;
+ goto err_out1;
+ }
+ strncpy(ext_entry->name, (char *)name, IFNAMSIZ);
+ ext_if_add(ext_entry);
+ }
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ dev_info(&pdev->dev, "ext devices = %s\n", ext_entry->name);
+ }
+
+ hnat_priv->lvid = 1;
+ hnat_priv->wvid = 2;
+
+ err = hnat_start();
+ if (err)
+ goto err_out;
+
+ if (hnat_priv->data->whnat) {
+ err = whnat_adjust_nf_hooks();
+ if (err)
+ goto err_out;
+ }
+
+ err = hnat_enable_hook();
+ if (err)
+ goto err_out;
+
+ register_netdevice_notifier(&nf_hnat_netdevice_nb);
+ register_netevent_notifier(&nf_hnat_netevent_nb);
+ if (hnat_priv->data->mcast)
+ hnat_mcast_enable();
+ timer_setup(&hnat_priv->hnat_sma_build_entry_timer, hnat_sma_build_entry, 0);
+ if (hnat_priv->data->version == MTK_HNAT_V3) {
+ timer_setup(&hnat_priv->hnat_reset_timestamp_timer, hnat_reset_timestamp, 0);
+ hnat_priv->hnat_reset_timestamp_timer.expires = jiffies;
+ add_timer(&hnat_priv->hnat_reset_timestamp_timer);
+ }
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (IS_GMAC1_MODE)
+ dev_add_pack(&mtk_pack_type);
+#endif
+
+ return 0;
+
+err_out:
+ hnat_stop();
+err_out1:
+ hnat_deinit_debugfs(hnat_priv);
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ ext_if_del(ext_entry);
+ kfree(ext_entry);
+ }
+ return err;
+}
+
+static int hnat_remove(struct platform_device *pdev)
+{
+ unregister_netdevice_notifier(&nf_hnat_netdevice_nb);
+ unregister_netevent_notifier(&nf_hnat_netevent_nb);
+ hnat_disable_hook();
+
+ if (hnat_priv->data->mcast)
+ hnat_mcast_disable();
+
+ hnat_stop();
+ hnat_deinit_debugfs(hnat_priv);
+ hnat_release_netdev();
+ del_timer_sync(&hnat_priv->hnat_sma_build_entry_timer);
+ if (hnat_priv->data->version == MTK_HNAT_V3)
+ del_timer_sync(&hnat_priv->hnat_reset_timestamp_timer);
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (IS_GMAC1_MODE)
+ dev_remove_pack(&mtk_pack_type);
+#endif
+
+ return 0;
+}
+
+static const struct mtk_hnat_data hnat_data_v1 = {
+ .num_of_sch = 2,
+ .whnat = false,
+ .per_flow_accounting = false,
+ .mcast = false,
+ .version = MTK_HNAT_V1,
+};
+
+static const struct mtk_hnat_data hnat_data_v2 = {
+ .num_of_sch = 2,
+ .whnat = true,
+ .per_flow_accounting = true,
+ .mcast = false,
+ .version = MTK_HNAT_V2,
+};
+
+static const struct mtk_hnat_data hnat_data_v3 = {
+ .num_of_sch = 4,
+ .whnat = false,
+ .per_flow_accounting = false,
+ .mcast = false,
+ .version = MTK_HNAT_V3,
+};
+
+static const struct mtk_hnat_data hnat_data_v4 = {
+ .num_of_sch = 4,
+ .whnat = true,
+ .per_flow_accounting = true,
+ .mcast = false,
+ .version = MTK_HNAT_V4,
+};
+
+const struct of_device_id of_hnat_match[] = {
+ { .compatible = "mediatek,mtk-hnat", .data = &hnat_data_v3 },
+ { .compatible = "mediatek,mtk-hnat_v1", .data = &hnat_data_v1 },
+ { .compatible = "mediatek,mtk-hnat_v2", .data = &hnat_data_v2 },
+ { .compatible = "mediatek,mtk-hnat_v3", .data = &hnat_data_v3 },
+ { .compatible = "mediatek,mtk-hnat_v4", .data = &hnat_data_v4 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_hnat_match);
+
+static struct platform_driver hnat_driver = {
+ .probe = hnat_probe,
+ .remove = hnat_remove,
+ .driver = {
+ .name = "mediatek_soc_hnat",
+ .of_match_table = of_hnat_match,
+ },
+};
+
+module_platform_driver(hnat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Mediatek Hardware NAT");
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
new file mode 100644
index 0000000..336b4ad
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
@@ -0,0 +1,925 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <net/netevent.h>
+#include <linux/mod_devicetable.h>
+#include "hnat_mcast.h"
+
+/*--------------------------------------------------------------------------*/
+/* Register Offset*/
+/*--------------------------------------------------------------------------*/
+#define PPE_GLO_CFG 0x00
+#define PPE_FLOW_CFG 0x04
+#define PPE_IP_PROT_CHK 0x08
+#define PPE_IP_PROT_0 0x0C
+#define PPE_IP_PROT_1 0x10
+#define PPE_IP_PROT_2 0x14
+#define PPE_IP_PROT_3 0x18
+#define PPE_TB_CFG 0x1C
+#define PPE_TB_BASE 0x20
+#define PPE_TB_USED 0x24
+#define PPE_BNDR 0x28
+#define PPE_BIND_LMT_0 0x2C
+#define PPE_BIND_LMT_1 0x30
+#define PPE_KA 0x34
+#define PPE_UNB_AGE 0x38
+#define PPE_BND_AGE_0 0x3C
+#define PPE_BND_AGE_1 0x40
+#define PPE_HASH_SEED 0x44
+#define PPE_DFT_CPORT 0x48
+#define PPE_DFT_CPORT1 0x4C
+#define PPE_MCAST_PPSE 0x84
+#define PPE_MCAST_L_0 0x88
+#define PPE_MCAST_H_0 0x8C
+#define PPE_MCAST_L_1 0x90
+#define PPE_MCAST_H_1 0x94
+#define PPE_MCAST_L_2 0x98
+#define PPE_MCAST_H_2 0x9C
+#define PPE_MCAST_L_3 0xA0
+#define PPE_MCAST_H_3 0xA4
+#define PPE_MCAST_L_4 0xA8
+#define PPE_MCAST_H_4 0xAC
+#define PPE_MCAST_L_5 0xB0
+#define PPE_MCAST_H_5 0xB4
+#define PPE_MCAST_L_6 0xBC
+#define PPE_MCAST_H_6 0xC0
+#define PPE_MCAST_L_7 0xC4
+#define PPE_MCAST_H_7 0xC8
+#define PPE_MCAST_L_8 0xCC
+#define PPE_MCAST_H_8 0xD0
+#define PPE_MCAST_L_9 0xD4
+#define PPE_MCAST_H_9 0xD8
+#define PPE_MCAST_L_A 0xDC
+#define PPE_MCAST_H_A 0xE0
+#define PPE_MCAST_L_B 0xE4
+#define PPE_MCAST_H_B 0xE8
+#define PPE_MCAST_L_C 0xEC
+#define PPE_MCAST_H_C 0xF0
+#define PPE_MCAST_L_D 0xF4
+#define PPE_MCAST_H_D 0xF8
+#define PPE_MCAST_L_E 0xFC
+#define PPE_MCAST_H_E 0xE0
+#define PPE_MCAST_L_F 0x100
+#define PPE_MCAST_H_F 0x104
+#define PPE_MCAST_L_10 0xC00
+#define PPE_MCAST_H_10 0xC04
+#define PPE_MTU_DRP 0x108
+#define PPE_MTU_VLYR_0 0x10C
+#define PPE_MTU_VLYR_1 0x110
+#define PPE_MTU_VLYR_2 0x114
+#define PPE_VPM_TPID 0x118
+#define PPE_CAH_CTRL 0x120
+#define PPE_CAH_TAG_SRH 0x124
+#define PPE_CAH_LINE_RW 0x128
+#define PPE_CAH_WDATA 0x12C
+#define PPE_CAH_RDATA 0x130
+
+#define PPE_MIB_CFG 0X134
+#define PPE_MIB_TB_BASE 0X138
+#define PPE_MIB_SER_CR 0X13C
+#define PPE_MIB_SER_R0 0X140
+#define PPE_MIB_SER_R1 0X144
+#define PPE_MIB_SER_R2 0X148
+#define PPE_MIB_CAH_CTRL 0X150
+#define PPE_MIB_CAH_TAG_SRH 0X154
+#define PPE_MIB_CAH_LINE_RW 0X158
+#define PPE_MIB_CAH_WDATA 0X15C
+#define PPE_MIB_CAH_RDATA 0X160
+#define PPE_SBW_CTRL 0x174
+
+#define GDMA1_FWD_CFG 0x500
+#define GDMA2_FWD_CFG 0x1500
+
+#define QTX_CFG(x) (QDMA_BASE + ((x) * 0x10))
+#define QTX_SCH(x) (QDMA_BASE + 0x4 + ((x) * 0x10))
+#define QDMA_PAGE (QDMA_BASE + 0x1f0)
+#define QDMA_TX_2SCH_BASE (QDMA_BASE + 0x214)
+#define QTX_MIB_IF (QDMA_BASE + 0x2bc)
+#define QDMA_TX_4SCH_BASE(x) (QDMA_BASE + 0x398 + (((x) >> 1) * 0x4))
+
+/*--------------------------------------------------------------------------*/
+/* Register Mask*/
+/*--------------------------------------------------------------------------*/
+/* PPE_TB_CFG mask */
+#define TB_ETRY_NUM (0x7 << 0) /* RW */
+#define TB_ENTRY_SIZE (0x1 << 3) /* RW */
+#define SMA (0x3 << 4) /* RW */
+#define NTU_AGE (0x1 << 7) /* RW */
+#define UNBD_AGE (0x1 << 8) /* RW */
+#define TCP_AGE (0x1 << 9) /* RW */
+#define UDP_AGE (0x1 << 10) /* RW */
+#define FIN_AGE (0x1 << 11) /* RW */
+#define KA_CFG (0x3 << 12)
+#define HASH_MODE (0x3 << 14) /* RW */
+#define SCAN_MODE (0x3 << 16) /* RW */
+#define XMODE (0x3 << 18) /* RW */
+
+/*PPE_CAH_CTRL mask*/
+#define CAH_EN (0x1 << 0) /* RW */
+#define CAH_X_MODE (0x1 << 9) /* RW */
+
+/*PPE_UNB_AGE mask*/
+#define UNB_DLTA (0xff << 0) /* RW */
+#define UNB_MNP (0xffff << 16) /* RW */
+
+/*PPE_BND_AGE_0 mask*/
+#define UDP_DLTA (0xffff << 0) /* RW */
+#define NTU_DLTA (0xffff << 16) /* RW */
+
+/*PPE_BND_AGE_1 mask*/
+#define TCP_DLTA (0xffff << 0) /* RW */
+#define FIN_DLTA (0xffff << 16) /* RW */
+
+/*PPE_KA mask*/
+#define KA_T (0xffff << 0) /* RW */
+#define TCP_KA (0xff << 16) /* RW */
+#define UDP_KA (0xff << 24) /* RW */
+
+/*PPE_BIND_LMT_0 mask*/
+#define QURT_LMT (0x3ff << 0) /* RW */
+#define HALF_LMT (0x3ff << 16) /* RW */
+
+/*PPE_BIND_LMT_1 mask*/
+#define FULL_LMT (0x3fff << 0) /* RW */
+#define NTU_KA (0xff << 16) /* RW */
+
+/*PPE_BNDR mask*/
+#define BIND_RATE (0xffff << 0) /* RW */
+#define PBND_RD_PRD (0xffff << 16) /* RW */
+
+/*PPE_GLO_CFG mask*/
+#define PPE_EN (0x1 << 0) /* RW */
+#define TTL0_DRP (0x1 << 4) /* RW */
+#define MCAST_TB_EN (0x1 << 7) /* RW */
+#define MCAST_HASH (0x3 << 12) /* RW */
+
+#define MC_P3_PPSE (0xf << 12) /* RW */
+#define MC_P2_PPSE (0xf << 8) /* RW */
+#define MC_P1_PPSE (0xf << 4) /* RW */
+#define MC_P0_PPSE (0xf << 0) /* RW */
+
+#define MIB_EN (0x1 << 0) /* RW */
+#define MIB_READ_CLEAR (0X1 << 1) /* RW */
+#define MIB_CAH_EN (0X1 << 0) /* RW */
+
+/*GDMA_FWD_CFG mask */
+#define GDM_UFRC_MASK (0x7 << 12) /* RW */
+#define GDM_BFRC_MASK (0x7 << 8) /*RW*/
+#define GDM_MFRC_MASK (0x7 << 4) /*RW*/
+#define GDM_OFRC_MASK (0x7 << 0) /*RW*/
+#define GDM_ALL_FRC_MASK \
+ (GDM_UFRC_MASK | GDM_BFRC_MASK | GDM_MFRC_MASK | GDM_OFRC_MASK)
+
+/*QDMA_PAGE mask*/
+#define QTX_CFG_PAGE (0xf << 0) /* RW */
+
+/*QTX_MIB_IF mask*/
+#define MIB_ON_QTX_CFG (0x1 << 31) /* RW */
+#define VQTX_MIB_EN (0x1 << 28) /* RW */
+
+/*--------------------------------------------------------------------------*/
+/* Descriptor Structure */
+/*--------------------------------------------------------------------------*/
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+struct hnat_unbind_info_blk {
+ u32 time_stamp : 8;
+ u32 sp : 4;
+ u32 pcnt : 8;
+ u32 ilgf : 1;
+ u32 mc : 1;
+ u32 preb : 1;
+ u32 pkt_type : 5;
+ u32 state : 2;
+ u32 udp : 1;
+ u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_bind_info_blk {
+ u32 time_stamp : 8;
+ u32 sp : 4;
+ u32 mc : 1;
+ u32 ka : 1; /* keep alive */
+ u32 vlan_layer : 3;
+ u32 psn : 1; /* egress packet has PPPoE session */
+ u32 vpm : 1; /* 0:ethertype remark, 1:0x8100(CR default) */
+ u32 ps : 1; /* packet sampling */
+ u32 cah : 1; /* cacheable flag */
+ u32 rmt : 1; /* remove tunnel ip header (6rd/dslite only) */
+ u32 ttl : 1;
+ u32 pkt_type : 5;
+ u32 state : 2;
+ u32 udp : 1;
+ u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_info_blk2 {
+ u32 qid : 7; /* QID in Qos Port */
+ u32 port_mg : 1;
+ u32 fqos : 1; /* force to PSE QoS port */
+ u32 dp : 4; /* force to PSE port x */
+ u32 mcast : 1; /* multicast this packet to CPU */
+ u32 pcpl : 1; /* OSBN */
+ u32 mibf : 1;
+ u32 alen : 1;
+ u32 rxid : 2;
+ u32 winfoi : 1;
+ u32 port_ag : 4;
+ u32 dscp : 8; /* DSCP value */
+} __packed;
+
+struct hnat_winfo {
+ u32 bssid : 6; /* WiFi Bssidx */
+ u32 wcid : 10; /* WiFi wtable Idx */
+} __packed;
+
+#else
+struct hnat_unbind_info_blk {
+ u32 time_stamp : 8;
+ u32 pcnt : 16; /* packet count */
+ u32 preb : 1;
+ u32 pkt_type : 3;
+ u32 state : 2;
+ u32 udp : 1;
+ u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_bind_info_blk {
+ u32 time_stamp : 15;
+ u32 ka : 1; /* keep alive */
+ u32 vlan_layer : 3;
+ u32 psn : 1; /* egress packet has PPPoE session */
+ u32 vpm : 1; /* 0:ethertype remark, 1:0x8100(CR default) */
+ u32 ps : 1; /* packet sampling */
+ u32 cah : 1; /* cacheable flag */
+ u32 rmt : 1; /* remove tunnel ip header (6rd/dslite only) */
+ u32 ttl : 1;
+ u32 pkt_type : 3;
+ u32 state : 2;
+ u32 udp : 1;
+ u32 sta : 1; /* static entry */
+} __packed;
+
+struct hnat_info_blk2 {
+ u32 qid : 4; /* QID in Qos Port */
+ u32 fqos : 1; /* force to PSE QoS port */
+ u32 dp : 3; /* force to PSE port x
+ * 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP
+ */
+ u32 mcast : 1; /* multicast this packet to CPU */
+ u32 pcpl : 1; /* OSBN */
+ u32 mibf : 1; /* 0:off 1:on PPE MIB counter */
+ u32 alen : 1; /* 0:post 1:pre packet length in accounting */
+ u32 port_mg : 6; /* port meter group */
+ u32 port_ag : 6; /* port account group */
+ u32 dscp : 8; /* DSCP value */
+} __packed;
+
+struct hnat_winfo {
+ u32 bssid : 6; /* WiFi Bssidx */
+ u32 wcid : 8; /* WiFi wtable Idx */
+ u32 rxid : 2; /* WiFi Ring idx */
+} __packed;
+#endif
+
+/* info blk2 for WHNAT */
+struct hnat_info_blk2_whnat {
+ u32 qid : 4; /* QID[3:0] in Qos Port */
+ u32 fqos : 1; /* force to PSE QoS port */
+ u32 dp : 3; /* force to PSE port x
+ * 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP
+ */
+ u32 mcast : 1; /* multicast this packet to CPU */
+ u32 pcpl : 1; /* OSBN */
+ u32 mibf : 1; /* 0:off 1:on PPE MIB counter */
+ u32 alen : 1; /* 0:post 1:pre packet length in accounting */
+ u32 qid2 : 2; /* QID[5:4] in Qos Port */
+ u32 resv : 2;
+ u32 wdmaid : 1; /* 0:to pcie0 dev 1:to pcie1 dev */
+ u32 winfoi : 1; /* 0:off 1:on Wi-Fi hwnat support */
+ u32 port_ag : 6; /* port account group */
+ u32 dscp : 8; /* DSCP value */
+} __packed;
+
+struct hnat_ipv4_hnapt {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 sip;
+ u32 dip;
+ u16 dport;
+ u16 sport;
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+ u32 new_sip;
+ u32 new_dip;
+ u16 new_dport;
+ u16 new_sport;
+ u16 m_timestamp; /* For mcast*/
+ u16 resv1;
+ u32 resv2;
+ u32 resv3 : 26;
+ u32 act_dp : 6; /* UDF */
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+#endif
+} __packed;
+
+struct hnat_ipv4_dslite {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 sip;
+ u32 dip;
+ u16 dport;
+ u16 sport;
+
+ u32 tunnel_sipv6_0;
+ u32 tunnel_sipv6_1;
+ u32 tunnel_sipv6_2;
+ u32 tunnel_sipv6_3;
+
+ u32 tunnel_dipv6_0;
+ u32 tunnel_dipv6_1;
+ u32 tunnel_dipv6_2;
+ u32 tunnel_dipv6_3;
+
+ u8 flow_lbl[3]; /* in order to consist with Linux kernel (should be 20bits) */
+ u8 priority; /* in order to consist with Linux kernel (should be 8bits) */
+ u32 hop_limit : 8;
+ u32 resv2 : 18;
+ u32 act_dp : 6; /* UDF */
+
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+ u32 new_sip;
+ u32 new_dip;
+ u16 new_dport;
+ u16 new_sport;
+#endif
+} __packed;
+
+struct hnat_ipv6_3t_route {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 ipv6_sip0;
+ u32 ipv6_sip1;
+ u32 ipv6_sip2;
+ u32 ipv6_sip3;
+ u32 ipv6_dip0;
+ u32 ipv6_dip1;
+ u32 ipv6_dip2;
+ u32 ipv6_dip3;
+ u32 prot : 8;
+ u32 resv : 24;
+
+ u32 resv1;
+ u32 resv2;
+ u32 resv3;
+ u32 resv4 : 26;
+ u32 act_dp : 6; /* UDF */
+
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+#endif
+} __packed;
+
+struct hnat_ipv6_5t_route {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 ipv6_sip0;
+ u32 ipv6_sip1;
+ u32 ipv6_sip2;
+ u32 ipv6_sip3;
+ u32 ipv6_dip0;
+ u32 ipv6_dip1;
+ u32 ipv6_dip2;
+ u32 ipv6_dip3;
+ u16 dport;
+ u16 sport;
+
+ u32 resv1;
+ u32 resv2;
+ u32 resv3;
+ u32 resv4 : 26;
+ u32 act_dp : 6; /* UDF */
+
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+#endif
+} __packed;
+
+struct hnat_ipv6_6rd {
+ union {
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_unbind_info_blk udib1;
+ u32 info_blk1;
+ };
+ u32 ipv6_sip0;
+ u32 ipv6_sip1;
+ u32 ipv6_sip2;
+ u32 ipv6_sip3;
+ u32 ipv6_dip0;
+ u32 ipv6_dip1;
+ u32 ipv6_dip2;
+ u32 ipv6_dip3;
+ u16 dport;
+ u16 sport;
+
+ u32 tunnel_sipv4;
+ u32 tunnel_dipv4;
+ u32 hdr_chksum : 16;
+ u32 dscp : 8;
+ u32 ttl : 8;
+ u32 flag : 3;
+ u32 resv1 : 13;
+ u32 per_flow_6rd_id : 1;
+ u32 resv2 : 9;
+ u32 act_dp : 6; /* UDF */
+
+ union {
+ struct hnat_info_blk2 iblk2;
+ struct hnat_info_blk2_whnat iblk2w;
+ u32 info_blk2;
+ };
+
+ u16 vlan1;
+ u16 etype;
+ u32 dmac_hi;
+ union {
+#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
+ struct hnat_winfo winfo;
+#endif
+ u16 vlan2;
+ };
+ u16 dmac_lo;
+ u32 smac_hi;
+ u16 pppoe_id;
+ u16 smac_lo;
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ u16 minfo;
+ struct hnat_winfo winfo;
+ u32 resv3;
+ u32 resv4;
+ u16 new_dport;
+ u16 new_sport;
+#endif
+} __packed;
+
+struct foe_entry {
+ union {
+ struct hnat_unbind_info_blk udib1;
+ struct hnat_bind_info_blk bfib1;
+ struct hnat_ipv4_hnapt ipv4_hnapt;
+ struct hnat_ipv4_dslite ipv4_dslite;
+ struct hnat_ipv6_3t_route ipv6_3t_route;
+ struct hnat_ipv6_5t_route ipv6_5t_route;
+ struct hnat_ipv6_6rd ipv6_6rd;
+ };
+};
+
+/* If user wants to change default FOE entry number, both DEF_ETRY_NUM and
+ * DEF_ETRY_NUM_CFG need to be modified.
+ */
+#define DEF_ETRY_NUM 8192
+/* feasible values : 16384, 8192, 4096, 2048, 1024 */
+#define DEF_ETRY_NUM_CFG TABLE_8K
+/* corresponding values : TABLE_16K, TABLE_8K, TABLE_4K, TABLE_2K, TABLE_1K */
+#define MAX_EXT_DEVS (0x3fU)
+#define MAX_IF_NUM 64
+
+struct mib_entry {
+ u32 byt_cnt_l;
+ u16 byt_cnt_h;
+ u32 pkt_cnt_l;
+ u8 pkt_cnt_h;
+ u8 resv0;
+ u32 resv1;
+} __packed;
+
+struct hnat_accounting {
+ u64 bytes;
+ u64 packets;
+};
+
+enum mtk_hnat_version {
+ MTK_HNAT_V1 = 1, /* version 1: mt7621, mt7623 */
+ MTK_HNAT_V2, /* version 2: mt7622 */
+ MTK_HNAT_V3, /* version 3: mt7629 */
+ MTK_HNAT_V4, /* version 4: mt7986 */
+};
+
+struct mtk_hnat_data {
+ u8 num_of_sch;
+ bool whnat;
+ bool per_flow_accounting;
+ bool mcast;
+ enum mtk_hnat_version version;
+};
+
+struct mtk_hnat {
+ struct device *dev;
+ void __iomem *fe_base;
+ void __iomem *ppe_base;
+ struct foe_entry *foe_table_cpu;
+ dma_addr_t foe_table_dev;
+ u8 enable;
+ u8 enable1;
+ struct dentry *root;
+ struct debugfs_regset32 *regset;
+
+ struct mib_entry *foe_mib_cpu;
+ dma_addr_t foe_mib_dev;
+ struct hnat_accounting *acct;
+ const struct mtk_hnat_data *data;
+
+ /*devices we plays for*/
+ char wan[IFNAMSIZ];
+ char lan[IFNAMSIZ];
+ char ppd[IFNAMSIZ];
+ u16 lvid;
+ u16 wvid;
+
+ struct reset_control *rstc;
+
+ u8 gmac_num;
+ u8 wan_dsa_port;
+ struct ppe_mcast_table *pmcast;
+
+ u32 foe_etry_num;
+ struct net_device *g_ppdev;
+ struct net_device *wifi_hook_if[MAX_IF_NUM];
+ struct extdev_entry *ext_if[MAX_EXT_DEVS];
+ struct timer_list hnat_sma_build_entry_timer;
+ struct timer_list hnat_reset_timestamp_timer;
+ struct timer_list hnat_mcast_check_timer;
+};
+
+struct extdev_entry {
+ char name[IFNAMSIZ];
+ struct net_device *dev;
+};
+
+struct tcpudphdr {
+ __be16 src;
+ __be16 dst;
+};
+
+enum FoeEntryState { INVALID = 0, UNBIND = 1, BIND = 2, FIN = 3 };
+
+enum FoeIpAct {
+ IPV4_HNAPT = 0,
+ IPV4_HNAT = 1,
+ IPV4_DSLITE = 3,
+ IPV6_3T_ROUTE = 4,
+ IPV6_5T_ROUTE = 5,
+ IPV6_6RD = 7,
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ IPV4_MAP_T = 8,
+ IPV4_MAP_E = 9,
+#else
+ IPV4_MAP_T = 6,
+ IPV4_MAP_E = 6,
+#endif
+};
+
+/*--------------------------------------------------------------------------*/
+/* Common Definition*/
+/*--------------------------------------------------------------------------*/
+
+#define HNAT_SW_VER "1.1.0"
+#define HASH_SEED_KEY 0x12345678
+
+/*PPE_TB_CFG value*/
+#define ENTRY_80B 1
+#define ENTRY_64B 0
+#define TABLE_1K 0
+#define TABLE_2K 1
+#define TABLE_4K 2
+#define TABLE_8K 3
+#define TABLE_16K 4
+#define SMA_DROP 0 /* Drop the packet */
+#define SMA_DROP2 1 /* Drop the packet */
+#define SMA_ONLY_FWD_CPU 2 /* Only Forward to CPU */
+#define SMA_FWD_CPU_BUILD_ENTRY 3 /* Forward to CPU and build new FOE entry */
+#define HASH_MODE_0 0
+#define HASH_MODE_1 1
+#define HASH_MODE_2 2
+#define HASH_MODE_3 3
+
+/*PPE_FLOW_CFG*/
+#define BIT_FUC_FOE BIT(2)
+#define BIT_FMC_FOE BIT(1)
+#define BIT_FBC_FOE BIT(0)
+#define BIT_UDP_IP4F_NAT_EN BIT(7) /*Enable IPv4 fragment + UDP packet NAT*/
+#define BIT_IPV6_3T_ROUTE_EN BIT(8)
+#define BIT_IPV6_5T_ROUTE_EN BIT(9)
+#define BIT_IPV6_6RD_EN BIT(10)
+#define BIT_IPV4_NAT_EN BIT(12)
+#define BIT_IPV4_NAPT_EN BIT(13)
+#define BIT_IPV4_DSL_EN BIT(14)
+#define BIT_MIB_BUSY BIT(16)
+#define BIT_IPV4_NAT_FRAG_EN BIT(17)
+#define BIT_IPV4_HASH_GREK BIT(19)
+#define BIT_IPV6_HASH_GREK BIT(20)
+#define BIT_IPV4_MAPE_EN BIT(21)
+#define BIT_IPV4_MAPT_EN BIT(22)
+
+/*GDMA_FWD_CFG value*/
+#define BITS_GDM_UFRC_P_PPE (NR_PPE_PORT << 12)
+#define BITS_GDM_BFRC_P_PPE (NR_PPE_PORT << 8)
+#define BITS_GDM_MFRC_P_PPE (NR_PPE_PORT << 4)
+#define BITS_GDM_OFRC_P_PPE (NR_PPE_PORT << 0)
+#define BITS_GDM_ALL_FRC_P_PPE \
+ (BITS_GDM_UFRC_P_PPE | BITS_GDM_BFRC_P_PPE | BITS_GDM_MFRC_P_PPE | \
+ BITS_GDM_OFRC_P_PPE)
+
+#define BITS_GDM_UFRC_P_CPU_PDMA (NR_PDMA_PORT << 12)
+#define BITS_GDM_BFRC_P_CPU_PDMA (NR_PDMA_PORT << 8)
+#define BITS_GDM_MFRC_P_CPU_PDMA (NR_PDMA_PORT << 4)
+#define BITS_GDM_OFRC_P_CPU_PDMA (NR_PDMA_PORT << 0)
+#define BITS_GDM_ALL_FRC_P_CPU_PDMA \
+ (BITS_GDM_UFRC_P_CPU_PDMA | BITS_GDM_BFRC_P_CPU_PDMA | \
+ BITS_GDM_MFRC_P_CPU_PDMA | BITS_GDM_OFRC_P_CPU_PDMA)
+
+#define BITS_GDM_UFRC_P_CPU_QDMA (NR_QDMA_PORT << 12)
+#define BITS_GDM_BFRC_P_CPU_QDMA (NR_QDMA_PORT << 8)
+#define BITS_GDM_MFRC_P_CPU_QDMA (NR_QDMA_PORT << 4)
+#define BITS_GDM_OFRC_P_CPU_QDMA (NR_QDMA_PORT << 0)
+#define BITS_GDM_ALL_FRC_P_CPU_QDMA \
+ (BITS_GDM_UFRC_P_CPU_QDMA | BITS_GDM_BFRC_P_CPU_QDMA | \
+ BITS_GDM_MFRC_P_CPU_QDMA | BITS_GDM_OFRC_P_CPU_QDMA)
+
+#define BITS_GDM_UFRC_P_DISCARD (NR_DISCARD << 12)
+#define BITS_GDM_BFRC_P_DISCARD (NR_DISCARD << 8)
+#define BITS_GDM_MFRC_P_DISCARD (NR_DISCARD << 4)
+#define BITS_GDM_OFRC_P_DISCARD (NR_DISCARD << 0)
+#define BITS_GDM_ALL_FRC_P_DISCARD \
+ (BITS_GDM_UFRC_P_DISCARD | BITS_GDM_BFRC_P_DISCARD | \
+ BITS_GDM_MFRC_P_DISCARD | BITS_GDM_OFRC_P_DISCARD)
+
+#define hnat_is_enabled(hnat_priv) (hnat_priv->enable)
+#define hnat_enabled(hnat_priv) (hnat_priv->enable = 1)
+#define hnat_disabled(hnat_priv) (hnat_priv->enable = 0)
+#define hnat_is_enabled1(hnat_priv) (hnat_priv->enable1)
+#define hnat_enabled1(hnat_priv) (hnat_priv->enable1 = 1)
+#define hnat_disabled1(hnat_priv) (hnat_priv->enable1 = 0)
+
+#define entry_hnat_is_bound(e) (e->bfib1.state == BIND)
+#define entry_hnat_state(e) (e->bfib1.state)
+
+#define skb_hnat_is_hashed(skb) \
+ (skb_hnat_entry(skb) != 0x3fff && skb_hnat_entry(skb) < hnat_priv->foe_etry_num)
+#define FROM_GE_LAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_LAN)
+#define FROM_GE_WAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_WAN)
+#define FROM_GE_PPD(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_PPD)
+#define FROM_GE_VIRTUAL(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL)
+#define FROM_EXT(skb) (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
+#define FOE_MAGIC_GE_LAN 0x1
+#define FOE_MAGIC_GE_WAN 0x2
+#define FOE_MAGIC_EXT 0x3
+#define FOE_MAGIC_GE_VIRTUAL 0x4
+#define FOE_MAGIC_GE_PPD 0x5
+#define FOE_MAGIC_WED0 0x6
+#define FOE_MAGIC_WED1 0x7
+#define FOE_INVALID 0xf
+#define index6b(i) (0x3fU - i)
+
+#define IPV4_HNAPT 0
+#define IPV4_HNAT 1
+#define IP_FORMAT(addr) \
+ (((unsigned char *)&addr)[3], ((unsigned char *)&addr)[2], \
+ ((unsigned char *)&addr)[1], ((unsigned char *)&addr)[0])
+
+/*PSE Ports*/
+#define NR_PDMA_PORT 0
+#define NR_GMAC1_PORT 1
+#define NR_GMAC2_PORT 2
+#define NR_WHNAT_WDMA_PORT 3
+#define NR_PPE_PORT 4
+#define NR_QDMA_PORT 5
+#define NR_DISCARD 7
+#define NR_WDMA0_PORT 8
+#define NR_WDMA1_PORT 9
+#define LAN_DEV_NAME hnat_priv->lan
+#define IS_WAN(dev) \
+ (!strncmp((dev)->name, hnat_priv->wan, strlen(hnat_priv->wan)))
+#define IS_LAN(dev) (!strncmp(dev->name, LAN_DEV_NAME, strlen(LAN_DEV_NAME)))
+#define IS_BR(dev) (!strncmp(dev->name, "br", 2))
+#define IS_WHNAT(dev) \
+ ((hnat_priv->data->whnat && \
+ (get_wifi_hook_if_index_from_dev(dev) != 0)) ? 1 : 0)
+#define IS_EXT(dev) ((get_index_from_dev(dev) != 0) ? 1 : 0)
+#define IS_PPD(dev) (!strcmp(dev->name, hnat_priv->ppd))
+#define IS_IPV4_HNAPT(x) (((x)->bfib1.pkt_type == IPV4_HNAPT) ? 1 : 0)
+#define IS_IPV4_HNAT(x) (((x)->bfib1.pkt_type == IPV4_HNAT) ? 1 : 0)
+#define IS_IPV4_GRP(x) (IS_IPV4_HNAPT(x) | IS_IPV4_HNAT(x))
+#define IS_IPV4_DSLITE(x) (((x)->bfib1.pkt_type == IPV4_DSLITE) ? 1 : 0)
+#define IS_IPV4_MAPE(x) (((x)->bfib1.pkt_type == IPV4_MAP_E) ? 1 : 0)
+#define IS_IPV4_MAPT(x) (((x)->bfib1.pkt_type == IPV4_MAP_T) ? 1 : 0)
+#define IS_IPV6_3T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_3T_ROUTE) ? 1 : 0)
+#define IS_IPV6_5T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_5T_ROUTE) ? 1 : 0)
+#define IS_IPV6_6RD(x) (((x)->bfib1.pkt_type == IPV6_6RD) ? 1 : 0)
+#define IS_IPV6_GRP(x) \
+ (IS_IPV6_3T_ROUTE(x) | IS_IPV6_5T_ROUTE(x) | IS_IPV6_6RD(x) | \
+ IS_IPV4_DSLITE(x) | IS_IPV4_MAPE(x) | IS_IPV4_MAPT(x))
+#define IS_BOND_MODE (!strncmp(LAN_DEV_NAME, "bond", 4))
+#define IS_GMAC1_MODE ((hnat_priv->gmac_num == 1) ? 1 : 0)
+
+#define es(entry) (entry_state[entry->bfib1.state])
+#define ei(entry, end) (hnat_priv->foe_etry_num - (int)(end - entry))
+#define pt(entry) (packet_type[entry->ipv4_hnapt.bfib1.pkt_type])
+#define ipv4_smac(mac, e) \
+ ({ \
+ mac[0] = e->ipv4_hnapt.smac_hi[3]; \
+ mac[1] = e->ipv4_hnapt.smac_hi[2]; \
+ mac[2] = e->ipv4_hnapt.smac_hi[1]; \
+ mac[3] = e->ipv4_hnapt.smac_hi[0]; \
+ mac[4] = e->ipv4_hnapt.smac_lo[1]; \
+ mac[5] = e->ipv4_hnapt.smac_lo[0]; \
+ })
+#define ipv4_dmac(mac, e) \
+ ({ \
+ mac[0] = e->ipv4_hnapt.dmac_hi[3]; \
+ mac[1] = e->ipv4_hnapt.dmac_hi[2]; \
+ mac[2] = e->ipv4_hnapt.dmac_hi[1]; \
+ mac[3] = e->ipv4_hnapt.dmac_hi[0]; \
+ mac[4] = e->ipv4_hnapt.dmac_lo[1]; \
+ mac[5] = e->ipv4_hnapt.dmac_lo[0]; \
+ })
+
+#define IS_DSA_LAN(dev) (!strncmp(dev->name, "lan", 3))
+#define IS_DSA_WAN(dev) (!strncmp(dev->name, "wan", 3))
+#define NONE_DSA_PORT 0xff
+#define MAX_CRSN_NUM 32
+#define IPV6_HDR_LEN 40
+
+/*QDMA_PAGE value*/
+#define NUM_OF_Q_PER_PAGE 16
+
+/*IPv6 Header*/
+#ifndef NEXTHDR_IPIP
+#define NEXTHDR_IPIP 4
+#endif
+
+extern const struct of_device_id of_hnat_match[];
+extern struct mtk_hnat *hnat_priv;
+
+#if defined(CONFIG_NET_DSA_MT7530)
+void hnat_dsa_fill_stag(const struct net_device *netdev,
+ struct foe_entry *entry,
+ struct flow_offload_hw_path *hw_path,
+ u16 eth_proto, int mape);
+
+static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)
+{
+ return (priv->wan_dsa_port != NONE_DSA_PORT);
+}
+#else
+static inline void hnat_dsa_fill_stag(const struct net_device *netdev,
+ struct foe_entry *entry,
+ struct flow_offload_hw_path *hw_path,
+ u16 eth_proto, int mape)
+{
+}
+
+static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)
+{
+ return false;
+}
+#endif
+
+void hnat_deinit_debugfs(struct mtk_hnat *h);
+int hnat_init_debugfs(struct mtk_hnat *h);
+int hnat_register_nf_hooks(void);
+void hnat_unregister_nf_hooks(void);
+int whnat_adjust_nf_hooks(void);
+int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *unused);
+extern int dbg_cpu_reason;
+extern int debug_level;
+extern int hook_toggle;
+extern int mape_toggle;
+
+int ext_if_add(struct extdev_entry *ext_entry);
+int ext_if_del(struct extdev_entry *ext_entry);
+void cr_set_field(void __iomem *reg, u32 field, u32 val);
+int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no);
+int mtk_sw_nat_hook_rx(struct sk_buff *skb);
+void mtk_ppe_dev_register_hook(struct net_device *dev);
+void mtk_ppe_dev_unregister_hook(struct net_device *dev);
+int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
+ void *ptr);
+int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
+ void *ptr);
+uint32_t foe_dump_pkt(struct sk_buff *skb);
+uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb);
+int hnat_enable_hook(void);
+int hnat_disable_hook(void);
+void hnat_cache_ebl(int enable);
+void set_gmac_ppe_fwd(int gmac_no, int enable);
+int entry_delete(int index);
+
+static inline u16 foe_timestamp(struct mtk_hnat *h)
+{
+ return (readl(hnat_priv->fe_base + 0x0010)) & 0xffff;
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
new file mode 100644
index 0000000..4ae9128
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
@@ -0,0 +1,1952 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/iopoll.h>
+
+#include "hnat.h"
+#include "nf_hnat_mtk.h"
+#include "../mtk_eth_soc.h"
+
+int dbg_entry_state = BIND;
+typedef int (*debugfs_write_func)(int par1);
+int debug_level;
+int dbg_cpu_reason;
+int hook_toggle;
+int mape_toggle;
+unsigned int dbg_cpu_reason_cnt[MAX_CRSN_NUM];
+
+static const char * const entry_state[] = { "INVALID", "UNBIND", "BIND", "FIN" };
+
+static const char * const packet_type[] = {
+ "IPV4_HNAPT", "IPV4_HNAT", "IPV6_1T_ROUTE", "IPV4_DSLITE",
+ "IPV6_3T_ROUTE", "IPV6_5T_ROUTE", "REV", "IPV6_6RD",
+ "IPV4_MAP_T", "IPV4_MAP_E",
+};
+
+static uint8_t *show_cpu_reason(struct sk_buff *skb)
+{
+ static u8 buf[32];
+
+ switch (skb_hnat_reason(skb)) {
+ case TTL_0:
+ return "IPv4(IPv6) TTL(hop limit)\n";
+ case HAS_OPTION_HEADER:
+ return "Ipv4(IPv6) has option(extension) header\n";
+ case NO_FLOW_IS_ASSIGNED:
+ return "No flow is assigned\n";
+ case IPV4_WITH_FRAGMENT:
+ return "IPv4 HNAT doesn't support IPv4 /w fragment\n";
+ case IPV4_HNAPT_DSLITE_WITH_FRAGMENT:
+ return "IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n";
+ case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP:
+ return "IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n";
+ case IPV6_5T_6RD_WITHOUT_TCP_UDP:
+ return "IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n";
+ case TCP_FIN_SYN_RST:
+ return "Ingress packet is TCP fin/syn/rst\n";
+ case UN_HIT:
+ return "FOE Un-hit\n";
+ case HIT_UNBIND:
+ return "FOE Hit unbind\n";
+ case HIT_UNBIND_RATE_REACH:
+ return "FOE Hit unbind & rate reach\n";
+ case HIT_BIND_TCP_FIN:
+ return "Hit bind PPE TCP FIN entry\n";
+ case HIT_BIND_TTL_1:
+ return "Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1\n";
+ case HIT_BIND_WITH_VLAN_VIOLATION:
+ return "Hit bind and VLAN replacement violation\n";
+ case HIT_BIND_KEEPALIVE_UC_OLD_HDR:
+ return "Hit bind and keep alive with unicast old-header packet\n";
+ case HIT_BIND_KEEPALIVE_MC_NEW_HDR:
+ return "Hit bind and keep alive with multicast new-header packet\n";
+ case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+ return "Hit bind and keep alive with duplicate old-header packet\n";
+ case HIT_BIND_FORCE_TO_CPU:
+ return "FOE Hit bind & force to CPU\n";
+ case HIT_BIND_EXCEED_MTU:
+ return "Hit bind and exceed MTU\n";
+ case HIT_BIND_MULTICAST_TO_CPU:
+ return "Hit bind multicast packet to CPU\n";
+ case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+ return "Hit bind multicast packet to GMAC & CPU\n";
+ case HIT_PRE_BIND:
+ return "Pre bind\n";
+ }
+
+ sprintf(buf, "CPU Reason Error - %X\n", skb_hnat_entry(skb));
+ return buf;
+}
+
+uint32_t foe_dump_pkt(struct sk_buff *skb)
+{
+ struct foe_entry *entry;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+ pr_info("\nRx===<FOE_Entry=%d>=====\n", skb_hnat_entry(skb));
+ pr_info("RcvIF=%s\n", skb->dev->name);
+ pr_info("FOE_Entry=%d\n", skb_hnat_entry(skb));
+ pr_info("CPU Reason=%s", show_cpu_reason(skb));
+ pr_info("ALG=%d\n", skb_hnat_alg(skb));
+ pr_info("SP=%d\n", skb_hnat_sport(skb));
+
+ /* some special alert occurred, so entry_num is useless (just skip it) */
+ if (skb_hnat_entry(skb) == 0x3fff)
+ return 1;
+
+ /* PPE: IPv4 packet=IPV4_HNAT IPv6 packet=IPV6_ROUTE */
+ if (IS_IPV4_GRP(entry)) {
+ __be32 saddr = htonl(entry->ipv4_hnapt.sip);
+ __be32 daddr = htonl(entry->ipv4_hnapt.dip);
+
+ pr_info("Information Block 1=%x\n",
+ entry->ipv4_hnapt.info_blk1);
+ pr_info("SIP=%pI4\n", &saddr);
+ pr_info("DIP=%pI4\n", &daddr);
+ pr_info("SPORT=%d\n", entry->ipv4_hnapt.sport);
+ pr_info("DPORT=%d\n", entry->ipv4_hnapt.dport);
+ pr_info("Information Block 2=%x\n",
+ entry->ipv4_hnapt.info_blk2);
+ pr_info("State = %s, proto = %s\n", entry->bfib1.state == 0 ?
+ "Invalid" : entry->bfib1.state == 1 ?
+ "Unbind" : entry->bfib1.state == 2 ?
+ "BIND" : entry->bfib1.state == 3 ?
+ "FIN" : "Unknown",
+ entry->ipv4_hnapt.bfib1.udp == 0 ?
+ "TCP" : entry->ipv4_hnapt.bfib1.udp == 1 ?
+ "UDP" : "Unknown");
+ } else if (IS_IPV6_GRP(entry)) {
+ pr_info("Information Block 1=%x\n",
+ entry->ipv6_5t_route.info_blk1);
+ pr_info("IPv6_SIP=%08X:%08X:%08X:%08X\n",
+ entry->ipv6_5t_route.ipv6_sip0,
+ entry->ipv6_5t_route.ipv6_sip1,
+ entry->ipv6_5t_route.ipv6_sip2,
+ entry->ipv6_5t_route.ipv6_sip3);
+ pr_info("IPv6_DIP=%08X:%08X:%08X:%08X\n",
+ entry->ipv6_5t_route.ipv6_dip0,
+ entry->ipv6_5t_route.ipv6_dip1,
+ entry->ipv6_5t_route.ipv6_dip2,
+ entry->ipv6_5t_route.ipv6_dip3);
+ pr_info("SPORT=%d\n", entry->ipv6_5t_route.sport);
+ pr_info("DPORT=%d\n", entry->ipv6_5t_route.dport);
+ pr_info("Information Block 2=%x\n",
+ entry->ipv6_5t_route.info_blk2);
+ pr_info("State = %s, proto = %s\n", entry->bfib1.state == 0 ?
+ "Invalid" : entry->bfib1.state == 1 ?
+ "Unbind" : entry->bfib1.state == 2 ?
+ "BIND" : entry->bfib1.state == 3 ?
+ "FIN" : "Unknown",
+ entry->ipv6_5t_route.bfib1.udp == 0 ?
+ "TCP" : entry->ipv6_5t_route.bfib1.udp == 1 ?
+ "UDP" : "Unknown");
+ } else {
+ pr_info("unknown Pkt_type=%d\n", entry->bfib1.pkt_type);
+ }
+
+ pr_info("==================================\n");
+ return 1;
+}
+
+uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb)
+{
+ switch (skb_hnat_reason(skb)) {
+ case TTL_0:
+ dbg_cpu_reason_cnt[0]++;
+ return 0;
+ case HAS_OPTION_HEADER:
+ dbg_cpu_reason_cnt[1]++;
+ return 0;
+ case NO_FLOW_IS_ASSIGNED:
+ dbg_cpu_reason_cnt[2]++;
+ return 0;
+ case IPV4_WITH_FRAGMENT:
+ dbg_cpu_reason_cnt[3]++;
+ return 0;
+ case IPV4_HNAPT_DSLITE_WITH_FRAGMENT:
+ dbg_cpu_reason_cnt[4]++;
+ return 0;
+ case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP:
+ dbg_cpu_reason_cnt[5]++;
+ return 0;
+ case IPV6_5T_6RD_WITHOUT_TCP_UDP:
+ dbg_cpu_reason_cnt[6]++;
+ return 0;
+ case TCP_FIN_SYN_RST:
+ dbg_cpu_reason_cnt[7]++;
+ return 0;
+ case UN_HIT:
+ dbg_cpu_reason_cnt[8]++;
+ return 0;
+ case HIT_UNBIND:
+ dbg_cpu_reason_cnt[9]++;
+ return 0;
+ case HIT_UNBIND_RATE_REACH:
+ dbg_cpu_reason_cnt[10]++;
+ return 0;
+ case HIT_BIND_TCP_FIN:
+ dbg_cpu_reason_cnt[11]++;
+ return 0;
+ case HIT_BIND_TTL_1:
+ dbg_cpu_reason_cnt[12]++;
+ return 0;
+ case HIT_BIND_WITH_VLAN_VIOLATION:
+ dbg_cpu_reason_cnt[13]++;
+ return 0;
+ case HIT_BIND_KEEPALIVE_UC_OLD_HDR:
+ dbg_cpu_reason_cnt[14]++;
+ return 0;
+ case HIT_BIND_KEEPALIVE_MC_NEW_HDR:
+ dbg_cpu_reason_cnt[15]++;
+ return 0;
+ case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+ dbg_cpu_reason_cnt[16]++;
+ return 0;
+ case HIT_BIND_FORCE_TO_CPU:
+ dbg_cpu_reason_cnt[17]++;
+ return 0;
+ case HIT_BIND_EXCEED_MTU:
+ dbg_cpu_reason_cnt[18]++;
+ return 0;
+ case HIT_BIND_MULTICAST_TO_CPU:
+ dbg_cpu_reason_cnt[19]++;
+ return 0;
+ case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+ dbg_cpu_reason_cnt[20]++;
+ return 0;
+ case HIT_PRE_BIND:
+ dbg_cpu_reason_cnt[21]++;
+ return 0;
+ }
+
+ return 0;
+}
+
+int hnat_set_usage(int level)
+{
+ debug_level = level;
+ pr_info("Read cpu_reason count: cat /sys/kernel/debug/hnat/cpu_reason\n\n");
+ pr_info("====================Advanced Settings====================\n");
+ pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/cpu_reason\n\n");
+ pr_info("Commands: [type] [option]\n");
+ pr_info(" 0 0~7 Set debug_level(0~7), current debug_level=%d\n",
+ debug_level);
+ pr_info(" 1 cpu_reason Track entries of the set cpu_reason\n");
+ pr_info(" Set type=1 will change debug_level=7\n");
+ pr_info("cpu_reason list:\n");
+ pr_info(" 2 IPv4(IPv6) TTL(hop limit) = 0\n");
+ pr_info(" 3 IPv4(IPv6) has option(extension) header\n");
+ pr_info(" 7 No flow is assigned\n");
+ pr_info(" 8 IPv4 HNAT doesn't support IPv4 /w fragment\n");
+ pr_info(" 9 IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n");
+ pr_info(" 10 IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n");
+ pr_info(" 11 IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n");
+ pr_info(" 12 Ingress packet is TCP fin/syn/rst\n");
+ pr_info(" 13 FOE Un-hit\n");
+ pr_info(" 14 FOE Hit unbind\n");
+ pr_info(" 15 FOE Hit unbind & rate reach\n");
+ pr_info(" 16 Hit bind PPE TCP FIN entry\n");
+ pr_info(" 17 Hit bind PPE entry and TTL(hop limit) = 1\n");
+ pr_info(" 18 Hit bind and VLAN replacement violation\n");
+ pr_info(" 19 Hit bind and keep alive with unicast old-header packet\n");
+ pr_info(" 20 Hit bind and keep alive with multicast new-header packet\n");
+ pr_info(" 21 Hit bind and keep alive with duplicate old-header packet\n");
+ pr_info(" 22 FOE Hit bind & force to CPU\n");
+ pr_info(" 23 HIT_BIND_WITH_OPTION_HEADER\n");
+ pr_info(" 24 Switch clone multicast packet to CPU\n");
+ pr_info(" 25 Switch clone multicast packet to GMAC1 & CPU\n");
+ pr_info(" 26 HIT_PRE_BIND\n");
+ pr_info(" 27 HIT_BIND_PACKET_SAMPLING\n");
+ pr_info(" 28 Hit bind and exceed MTU\n");
+
+ return 0;
+}
+
+int hnat_cpu_reason(int cpu_reason)
+{
+ dbg_cpu_reason = cpu_reason;
+ debug_level = 7;
+ pr_info("show cpu reason = %d\n", cpu_reason);
+
+ return 0;
+}
+
+int entry_set_usage(int level)
+{
+ debug_level = level;
+ pr_info("Show all entries(default state=bind): cat /sys/kernel/debug/hnat/hnat_entry\n\n");
+ pr_info("====================Advanced Settings====================\n");
+ pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/hnat_entry\n\n");
+ pr_info("Commands: [type] [option]\n");
+ pr_info(" 0 0~7 Set debug_level(0~7), current debug_level=%d\n",
+ debug_level);
+ pr_info(" 1 0~3 Change tracking state\n");
+ pr_info(" (0:invalid; 1:unbind; 2:bind; 3:fin)\n");
+ pr_info(" 2 <entry_idx> Show specific foe entry info. of assigned <entry_idx>\n");
+ pr_info(" 3 <entry_idx> Delete specific foe entry of assigned <entry_idx>\n");
+
+ return 0;
+}
+
+int entry_set_state(int state)
+{
+ dbg_entry_state = state;
+ pr_info("ENTRY STATE = %s\n", dbg_entry_state == 0 ?
+ "Invalid" : dbg_entry_state == 1 ?
+ "Unbind" : dbg_entry_state == 2 ?
+ "BIND" : dbg_entry_state == 3 ?
+ "FIN" : "Unknown");
+ return 0;
+}
+
+int entry_detail(int index)
+{
+ struct foe_entry *entry;
+ struct mtk_hnat *h = hnat_priv;
+ u32 *p;
+ u32 i = 0;
+ u32 print_cnt;
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ __be32 saddr, daddr, nsaddr, ndaddr;
+
+ entry = h->foe_table_cpu + index;
+ saddr = htonl(entry->ipv4_hnapt.sip);
+ daddr = htonl(entry->ipv4_hnapt.dip);
+ nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+ ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+ p = (uint32_t *)entry;
+ pr_info("==========<Flow Table Entry=%d (%p)>===============\n", index,
+ entry);
+ if (debug_level >= 2) {
+ print_cnt = 20;
+ for (i = 0; i < print_cnt; i++)
+ pr_info("%02d: %08X\n", i, *(p + i));
+ }
+ pr_info("-----------------<Flow Info>------------------\n");
+ pr_info("Information Block 1: %08X\n", entry->ipv4_hnapt.info_blk1);
+
+ if (IS_IPV4_HNAPT(entry)) {
+ pr_info("Information Block 2: %08X (FP=%d FQOS=%d QID=%d)",
+ entry->ipv4_hnapt.info_blk2,
+ entry->ipv4_hnapt.iblk2.dp,
+ entry->ipv4_hnapt.iblk2.fqos,
+ entry->ipv4_hnapt.iblk2.qid);
+ pr_info("Create IPv4 HNAPT entry\n");
+ pr_info("IPv4 Org IP/Port: %pI4:%d->%pI4:%d\n", &saddr,
+ entry->ipv4_hnapt.sport, &daddr,
+ entry->ipv4_hnapt.dport);
+ pr_info("IPv4 New IP/Port: %pI4:%d->%pI4:%d\n", &nsaddr,
+ entry->ipv4_hnapt.new_sport, &ndaddr,
+ entry->ipv4_hnapt.new_dport);
+ } else if (IS_IPV4_HNAT(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv4_hnapt.info_blk2);
+ pr_info("Create IPv4 HNAT entry\n");
+ pr_info("IPv4 Org IP: %pI4->%pI4\n", &saddr, &daddr);
+ pr_info("IPv4 New IP: %pI4->%pI4\n", &nsaddr, &ndaddr);
+ } else if (IS_IPV4_DSLITE(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv4_dslite.info_blk2);
+ pr_info("Create IPv4 Ds-Lite entry\n");
+ pr_info("IPv4 Ds-Lite: %pI4:%d->%pI4:%d\n", &saddr,
+ entry->ipv4_dslite.sport, &daddr,
+ entry->ipv4_dslite.dport);
+ pr_info("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+ entry->ipv4_dslite.tunnel_sipv6_0,
+ entry->ipv4_dslite.tunnel_sipv6_1,
+ entry->ipv4_dslite.tunnel_sipv6_2,
+ entry->ipv4_dslite.tunnel_sipv6_3,
+ entry->ipv4_dslite.tunnel_dipv6_0,
+ entry->ipv4_dslite.tunnel_dipv6_1,
+ entry->ipv4_dslite.tunnel_dipv6_2,
+ entry->ipv4_dslite.tunnel_dipv6_3);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ } else if (IS_IPV4_MAPE(entry)) {
+ nsaddr = htonl(entry->ipv4_dslite.new_sip);
+ ndaddr = htonl(entry->ipv4_dslite.new_dip);
+
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv4_dslite.info_blk2);
+ pr_info("Create IPv4 MAP-E entry\n");
+ pr_info("IPv4 MAP-E Org IP/Port: %pI4:%d->%pI4:%d\n",
+ &saddr, entry->ipv4_dslite.sport,
+ &daddr, entry->ipv4_dslite.dport);
+ pr_info("IPv4 MAP-E New IP/Port: %pI4:%d->%pI4:%d\n",
+ &nsaddr, entry->ipv4_dslite.new_sport,
+ &ndaddr, entry->ipv4_dslite.new_dport);
+ pr_info("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+ entry->ipv4_dslite.tunnel_sipv6_0,
+ entry->ipv4_dslite.tunnel_sipv6_1,
+ entry->ipv4_dslite.tunnel_sipv6_2,
+ entry->ipv4_dslite.tunnel_sipv6_3,
+ entry->ipv4_dslite.tunnel_dipv6_0,
+ entry->ipv4_dslite.tunnel_dipv6_1,
+ entry->ipv4_dslite.tunnel_dipv6_2,
+ entry->ipv4_dslite.tunnel_dipv6_3);
+#endif
+ } else if (IS_IPV6_3T_ROUTE(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv6_3t_route.info_blk2);
+ pr_info("Create IPv6 3-Tuple entry\n");
+ pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Prot=%d)\n",
+ entry->ipv6_3t_route.ipv6_sip0,
+ entry->ipv6_3t_route.ipv6_sip1,
+ entry->ipv6_3t_route.ipv6_sip2,
+ entry->ipv6_3t_route.ipv6_sip3,
+ entry->ipv6_3t_route.ipv6_dip0,
+ entry->ipv6_3t_route.ipv6_dip1,
+ entry->ipv6_3t_route.ipv6_dip2,
+ entry->ipv6_3t_route.ipv6_dip3,
+ entry->ipv6_3t_route.prot);
+ } else if (IS_IPV6_5T_ROUTE(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv6_5t_route.info_blk2);
+ pr_info("Create IPv6 5-Tuple entry\n");
+ pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+ entry->ipv6_5t_route.ipv6_sip0,
+ entry->ipv6_5t_route.ipv6_sip1,
+ entry->ipv6_5t_route.ipv6_sip2,
+ entry->ipv6_5t_route.ipv6_sip3,
+ entry->ipv6_5t_route.sport,
+ entry->ipv6_5t_route.ipv6_dip0,
+ entry->ipv6_5t_route.ipv6_dip1,
+ entry->ipv6_5t_route.ipv6_dip2,
+ entry->ipv6_5t_route.ipv6_dip3,
+ entry->ipv6_5t_route.dport);
+ } else if (IS_IPV6_6RD(entry)) {
+ pr_info("Information Block 2: %08X\n",
+ entry->ipv6_6rd.info_blk2);
+ pr_info("Create IPv6 6RD entry\n");
+ pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+ entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+ entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+ entry->ipv6_6rd.sport, entry->ipv6_6rd.ipv6_dip0,
+ entry->ipv6_6rd.ipv6_dip1, entry->ipv6_6rd.ipv6_dip2,
+ entry->ipv6_6rd.ipv6_dip3, entry->ipv6_6rd.dport);
+ }
+ if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry)) {
+ *((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi);
+ *((u16 *)&h_source[4]) = swab16(entry->ipv4_hnapt.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+ *((u16 *)&h_dest[4]) = swab16(entry->ipv4_hnapt.dmac_lo);
+ pr_info("SMAC=%pM => DMAC=%pM\n", h_source, h_dest);
+ pr_info("State = %s, ", entry->bfib1.state == 0 ?
+ "Invalid" : entry->bfib1.state == 1 ?
+ "Unbind" : entry->bfib1.state == 2 ?
+ "BIND" : entry->bfib1.state == 3 ?
+ "FIN" : "Unknown");
+ pr_info("Vlan_Layer = %u, ", entry->bfib1.vlan_layer);
+ pr_info("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+ entry->ipv4_hnapt.etype, entry->ipv4_hnapt.vlan1,
+ entry->ipv4_hnapt.vlan2);
+ pr_info("multicast = %d, pppoe = %d, proto = %s\n",
+ entry->ipv4_hnapt.iblk2.mcast,
+ entry->ipv4_hnapt.bfib1.psn,
+ entry->ipv4_hnapt.bfib1.udp == 0 ?
+ "TCP" : entry->ipv4_hnapt.bfib1.udp == 1 ?
+ "UDP" : "Unknown");
+ pr_info("=========================================\n\n");
+ } else {
+ *((u32 *)h_source) = swab32(entry->ipv6_5t_route.smac_hi);
+ *((u16 *)&h_source[4]) = swab16(entry->ipv6_5t_route.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+ *((u16 *)&h_dest[4]) = swab16(entry->ipv6_5t_route.dmac_lo);
+ pr_info("SMAC=%pM => DMAC=%pM\n", h_source, h_dest);
+ pr_info("State = %s, ", entry->bfib1.state == 0 ?
+ "Invalid" : entry->bfib1.state == 1 ?
+ "Unbind" : entry->bfib1.state == 2 ?
+ "BIND" : entry->bfib1.state == 3 ?
+ "FIN" : "Unknown");
+
+ pr_info("Vlan_Layer = %u, ", entry->bfib1.vlan_layer);
+ pr_info("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+ entry->ipv6_5t_route.etype, entry->ipv6_5t_route.vlan1,
+ entry->ipv6_5t_route.vlan2);
+ pr_info("multicast = %d, pppoe = %d, proto = %s\n",
+ entry->ipv6_5t_route.iblk2.mcast,
+ entry->ipv6_5t_route.bfib1.psn,
+ entry->ipv6_5t_route.bfib1.udp == 0 ?
+ "TCP" : entry->ipv6_5t_route.bfib1.udp == 1 ?
+ "UDP" : "Unknown");
+ pr_info("=========================================\n\n");
+ }
+ return 0;
+}
+
+int entry_delete(int index)
+{
+ struct foe_entry *entry;
+ struct mtk_hnat *h = hnat_priv;
+
+ entry = h->foe_table_cpu + index;
+ memset(entry, 0, sizeof(struct foe_entry));
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+
+ pr_info("delete entry idx = %d\n", index);
+
+ return 0;
+}
+EXPORT_SYMBOL(entry_delete);
+
+int cr_set_usage(int level)
+{
+ debug_level = level;
+ pr_info("Dump hnat CR: cat /sys/kernel/debug/hnat/hnat_setting\n\n");
+ pr_info("====================Advanced Settings====================\n");
+ pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/hnat_setting\n\n");
+ pr_info("Commands: [type] [option]\n");
+ pr_info(" 0 0~7 Set debug_level(0~7), current debug_level=%d\n",
+ debug_level);
+ pr_info(" 1 0~65535 Set binding threshold\n");
+ pr_info(" 2 0~65535 Set TCP bind lifetime\n");
+ pr_info(" 3 0~65535 Set FIN bind lifetime\n");
+ pr_info(" 4 0~65535 Set UDP bind lifetime\n");
+ pr_info(" 5 0~255 Set TCP keep alive interval\n");
+ pr_info(" 6 0~255 Set UDP keep alive interval\n");
+
+ return 0;
+}
+
+int binding_threshold(int threshold)
+{
+ pr_info("Binding Threshold =%d\n", threshold);
+ writel(threshold, hnat_priv->ppe_base + PPE_BNDR);
+ return 0;
+}
+
+int tcp_bind_lifetime(int tcp_life)
+{
+ pr_info("tcp_life = %d\n", tcp_life);
+ /* set Delta time for aging out an bind TCP FOE entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, TCP_DLTA, tcp_life);
+
+ return 0;
+}
+
+int fin_bind_lifetime(int fin_life)
+{
+ pr_info("fin_life = %d\n", fin_life);
+ /* set Delta time for aging out an bind TCP FIN FOE entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_1, FIN_DLTA, fin_life);
+
+ return 0;
+}
+
+int udp_bind_lifetime(int udp_life)
+{
+ pr_info("udp_life = %d\n", udp_life);
+ /* set Delta time for aging out an bind UDP FOE entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_BND_AGE_0, UDP_DLTA, udp_life);
+
+ return 0;
+}
+
+int tcp_keep_alive(int tcp_interval)
+{
+ if (tcp_interval > 255) {
+ tcp_interval = 255;
+ pr_info("TCP keep alive max interval = 255\n");
+ } else {
+ pr_info("tcp_interval = %d\n", tcp_interval);
+ }
+ /* Keep alive time for bind FOE TCP entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, TCP_KA, tcp_interval);
+
+ return 0;
+}
+
+int udp_keep_alive(int udp_interval)
+{
+ if (udp_interval > 255) {
+ udp_interval = 255;
+ pr_info("TCP/UDP keep alive max interval = 255\n");
+ } else {
+ pr_info("udp_interval = %d\n", udp_interval);
+ }
+ /* Keep alive timer for bind FOE UDP entry */
+ cr_set_field(hnat_priv->ppe_base + PPE_KA, UDP_KA, udp_interval);
+
+ return 0;
+}
+
+static const debugfs_write_func hnat_set_func[] = {
+ [0] = hnat_set_usage,
+ [1] = hnat_cpu_reason,
+};
+
+static const debugfs_write_func entry_set_func[] = {
+ [0] = entry_set_usage,
+ [1] = entry_set_state,
+ [2] = entry_detail,
+ [3] = entry_delete,
+};
+
+static const debugfs_write_func cr_set_func[] = {
+ [0] = cr_set_usage, [1] = binding_threshold,
+ [2] = tcp_bind_lifetime, [3] = fin_bind_lifetime,
+ [4] = udp_bind_lifetime, [5] = tcp_keep_alive,
+ [6] = udp_keep_alive,
+};
+
+static struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 index)
+{
+ struct hnat_accounting *acount;
+ u32 val, cnt_r0, cnt_r1, cnt_r2;
+ int ret = -1;
+
+ if (!hnat_priv->data->per_flow_accounting)
+ return NULL;
+
+ writel(index | (1 << 16), h->ppe_base + PPE_MIB_SER_CR);
+ ret = readx_poll_timeout_atomic(readl, h->ppe_base + PPE_MIB_SER_CR, val,
+ !(val & BIT_MIB_BUSY), 20, 10000);
+ if (ret < 0) {
+ pr_notice("mib busy,please check later\n");
+ return NULL;
+ }
+ cnt_r0 = readl(h->ppe_base + PPE_MIB_SER_R0);
+ cnt_r1 = readl(h->ppe_base + PPE_MIB_SER_R1);
+ cnt_r2 = readl(h->ppe_base + PPE_MIB_SER_R2);
+ acount = &h->acct[index];
+ acount->bytes += cnt_r0 + ((u64)(cnt_r1 & 0xffff) << 32);
+ acount->packets +=
+ ((cnt_r1 & 0xffff0000) >> 16) + ((cnt_r2 & 0xffffff) << 16);
+
+ return acount;
+}
+
+#define PRINT_COUNT(m, acount) {if (acount) \
+ seq_printf(m, "bytes=%llu|packets=%llu|", \
+ acount->bytes, acount->packets); }
+static int hnat_debug_show(struct seq_file *m, void *private)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct foe_entry *entry, *end;
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ struct hnat_accounting *acount;
+ u32 entry_index = 0;
+
+ entry = h->foe_table_cpu;
+ end = h->foe_table_cpu + hnat_priv->foe_etry_num;
+ while (entry < end) {
+ if (!entry->bfib1.state) {
+ entry++;
+ entry_index++;
+ continue;
+ }
+ acount = hnat_get_count(h, entry_index);
+ if (IS_IPV4_HNAPT(entry)) {
+ __be32 saddr = htonl(entry->ipv4_hnapt.sip);
+ __be32 daddr = htonl(entry->ipv4_hnapt.dip);
+ __be32 nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+ __be32 ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+
+ *((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv4_hnapt.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_hnapt.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|%pI4:%d->%pI4:%d=>%pI4:%d->%pI4:%d|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x|vlan1=%d|vlan2=%d\n",
+ entry, ei(entry, end), es(entry), pt(entry), &saddr,
+ entry->ipv4_hnapt.sport, &daddr,
+ entry->ipv4_hnapt.dport, &nsaddr,
+ entry->ipv4_hnapt.new_sport, &ndaddr,
+ entry->ipv4_hnapt.new_dport, h_source, h_dest,
+ ntohs(entry->ipv4_hnapt.etype),
+ entry->ipv4_hnapt.info_blk1,
+ entry->ipv4_hnapt.info_blk2,
+ entry->ipv4_hnapt.vlan1,
+ entry->ipv4_hnapt.vlan2);
+ } else if (IS_IPV4_HNAT(entry)) {
+ __be32 saddr = htonl(entry->ipv4_hnapt.sip);
+ __be32 daddr = htonl(entry->ipv4_hnapt.dip);
+ __be32 nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+ __be32 ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+
+ *((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv4_hnapt.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_hnapt.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|%pI4->%pI4=>%pI4->%pI4|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x|vlan1=%d|vlan2=%d\n",
+ entry, ei(entry, end), es(entry), pt(entry), &saddr,
+ &daddr, &nsaddr, &ndaddr, h_source, h_dest,
+ ntohs(entry->ipv4_hnapt.etype),
+ entry->ipv4_hnapt.info_blk1,
+ entry->ipv4_hnapt.info_blk2,
+ entry->ipv4_hnapt.vlan1,
+ entry->ipv4_hnapt.vlan2);
+ } else if (IS_IPV6_5T_ROUTE(entry)) {
+ u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0;
+ u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1;
+ u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2;
+ u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3;
+ u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0;
+ u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1;
+ u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2;
+ u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3;
+
+ *((u32 *)h_source) =
+ swab32(entry->ipv6_5t_route.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv6_5t_route.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv6_5t_route.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x(sp=%d)->DIP=%08x:%08x:%08x:%08x(dp=%d)|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry), ipv6_sip0,
+ ipv6_sip1, ipv6_sip2, ipv6_sip3,
+ entry->ipv6_5t_route.sport, ipv6_dip0,
+ ipv6_dip1, ipv6_dip2, ipv6_dip3,
+ entry->ipv6_5t_route.dport, h_source, h_dest,
+ ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+ } else if (IS_IPV6_3T_ROUTE(entry)) {
+ u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0;
+ u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1;
+ u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2;
+ u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3;
+ u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0;
+ u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1;
+ u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2;
+ u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3;
+
+ *((u32 *)h_source) =
+ swab32(entry->ipv6_5t_route.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv6_5t_route.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv6_5t_route.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x->DIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry), ipv6_sip0,
+ ipv6_sip1, ipv6_sip2, ipv6_sip3, ipv6_dip0,
+ ipv6_dip1, ipv6_dip2, ipv6_dip3, h_source,
+ h_dest, ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+ } else if (IS_IPV6_6RD(entry)) {
+ u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0;
+ u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1;
+ u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2;
+ u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3;
+ u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0;
+ u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1;
+ u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2;
+ u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3;
+ __be32 tsaddr = htonl(entry->ipv6_6rd.tunnel_sipv4);
+ __be32 tdaddr = htonl(entry->ipv6_6rd.tunnel_dipv4);
+
+ *((u32 *)h_source) =
+ swab32(entry->ipv6_5t_route.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv6_5t_route.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv6_5t_route.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x(sp=%d)->DIP=%08x:%08x:%08x:%08x(dp=%d)|TSIP=%pI4->TDIP=%pI4|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry), ipv6_sip0,
+ ipv6_sip1, ipv6_sip2, ipv6_sip3,
+ entry->ipv6_5t_route.sport, ipv6_dip0,
+ ipv6_dip1, ipv6_dip2, ipv6_dip3,
+ entry->ipv6_5t_route.dport, &tsaddr, &tdaddr,
+ h_source, h_dest,
+ ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+ } else if (IS_IPV4_DSLITE(entry)) {
+ __be32 saddr = htonl(entry->ipv4_hnapt.sip);
+ __be32 daddr = htonl(entry->ipv4_hnapt.dip);
+ u32 ipv6_tsip0 = entry->ipv4_dslite.tunnel_sipv6_0;
+ u32 ipv6_tsip1 = entry->ipv4_dslite.tunnel_sipv6_1;
+ u32 ipv6_tsip2 = entry->ipv4_dslite.tunnel_sipv6_2;
+ u32 ipv6_tsip3 = entry->ipv4_dslite.tunnel_sipv6_3;
+ u32 ipv6_tdip0 = entry->ipv4_dslite.tunnel_dipv6_0;
+ u32 ipv6_tdip1 = entry->ipv4_dslite.tunnel_dipv6_1;
+ u32 ipv6_tdip2 = entry->ipv4_dslite.tunnel_dipv6_2;
+ u32 ipv6_tdip3 = entry->ipv4_dslite.tunnel_dipv6_3;
+
+ *((u32 *)h_source) = swab32(entry->ipv4_dslite.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv4_dslite.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_dslite.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_dslite.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%pI4->DIP=%pI4|TSIP=%08x:%08x:%08x:%08x->TDIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry), &saddr,
+ &daddr, ipv6_tsip0, ipv6_tsip1, ipv6_tsip2,
+ ipv6_tsip3, ipv6_tdip0, ipv6_tdip1, ipv6_tdip2,
+ ipv6_tdip3, h_source, h_dest,
+ ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ } else if (IS_IPV4_MAPE(entry)) {
+ __be32 saddr = htonl(entry->ipv4_dslite.sip);
+ __be32 daddr = htonl(entry->ipv4_dslite.dip);
+ __be32 nsaddr = htonl(entry->ipv4_dslite.new_sip);
+ __be32 ndaddr = htonl(entry->ipv4_dslite.new_dip);
+ u32 ipv6_tsip0 = entry->ipv4_dslite.tunnel_sipv6_0;
+ u32 ipv6_tsip1 = entry->ipv4_dslite.tunnel_sipv6_1;
+ u32 ipv6_tsip2 = entry->ipv4_dslite.tunnel_sipv6_2;
+ u32 ipv6_tsip3 = entry->ipv4_dslite.tunnel_sipv6_3;
+ u32 ipv6_tdip0 = entry->ipv4_dslite.tunnel_dipv6_0;
+ u32 ipv6_tdip1 = entry->ipv4_dslite.tunnel_dipv6_1;
+ u32 ipv6_tdip2 = entry->ipv4_dslite.tunnel_dipv6_2;
+ u32 ipv6_tdip3 = entry->ipv4_dslite.tunnel_dipv6_3;
+
+ *((u32 *)h_source) = swab32(entry->ipv4_dslite.smac_hi);
+ *((u16 *)&h_source[4]) =
+ swab16(entry->ipv4_dslite.smac_lo);
+ *((u32 *)h_dest) = swab32(entry->ipv4_dslite.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_dslite.dmac_lo);
+ PRINT_COUNT(m, acount);
+ seq_printf(m,
+ "addr=0x%p|index=%d|state=%s|type=%s|SIP=%pI4:%d->DIP=%pI4:%d|NSIP=%pI4:%d->NDIP=%pI4:%d|TSIP=%08x:%08x:%08x:%08x->TDIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n",
+ entry, ei(entry, end), es(entry), pt(entry),
+ &saddr, entry->ipv4_dslite.sport,
+ &daddr, entry->ipv4_dslite.dport,
+ &nsaddr, entry->ipv4_dslite.new_sport,
+ &ndaddr, entry->ipv4_dslite.new_dport,
+ ipv6_tsip0, ipv6_tsip1, ipv6_tsip2,
+ ipv6_tsip3, ipv6_tdip0, ipv6_tdip1,
+ ipv6_tdip2, ipv6_tdip3, h_source, h_dest,
+ ntohs(entry->ipv6_5t_route.etype),
+ entry->ipv6_5t_route.info_blk1,
+ entry->ipv6_5t_route.info_blk2);
+#endif
+ } else
+ seq_printf(m, "addr=0x%p|index=%d state=%s\n", entry, ei(entry, end),
+ es(entry));
+ entry++;
+ entry_index++;
+ }
+
+ return 0;
+}
+
+static int hnat_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_debug_show, file->private_data);
+}
+
+static const struct file_operations hnat_debug_fops = {
+ .open = hnat_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int hnat_whnat_show(struct seq_file *m, void *private)
+{
+ int i;
+ struct net_device *dev;
+
+ for (i = 0; i < MAX_IF_NUM; i++) {
+ dev = hnat_priv->wifi_hook_if[i];
+ if (dev)
+ seq_printf(m, "%d:%s\n", i, dev->name);
+ else
+ continue;
+ }
+
+ return 0;
+}
+
+static int hnat_whnat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_whnat_show, file->private_data);
+}
+
+static const struct file_operations hnat_whnat_fops = {
+ .open = hnat_whnat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int cpu_reason_read(struct seq_file *m, void *private)
+{
+ int i;
+
+ pr_info("============ CPU REASON =========\n");
+ pr_info("(2)IPv4(IPv6) TTL(hop limit) = %u\n", dbg_cpu_reason_cnt[0]);
+ pr_info("(3)Ipv4(IPv6) has option(extension) header = %u\n",
+ dbg_cpu_reason_cnt[1]);
+ pr_info("(7)No flow is assigned = %u\n", dbg_cpu_reason_cnt[2]);
+ pr_info("(8)IPv4 HNAT doesn't support IPv4 /w fragment = %u\n",
+ dbg_cpu_reason_cnt[3]);
+ pr_info("(9)IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment = %u\n",
+ dbg_cpu_reason_cnt[4]);
+ pr_info("(10)IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport = %u\n",
+ dbg_cpu_reason_cnt[5]);
+ pr_info("(11)IPv6 5T-route/6RD can't find TCP/UDP sport/dport = %u\n",
+ dbg_cpu_reason_cnt[6]);
+ pr_info("(12)Ingress packet is TCP fin/syn/rst = %u\n",
+ dbg_cpu_reason_cnt[7]);
+ pr_info("(13)FOE Un-hit = %u\n", dbg_cpu_reason_cnt[8]);
+ pr_info("(14)FOE Hit unbind = %u\n", dbg_cpu_reason_cnt[9]);
+ pr_info("(15)FOE Hit unbind & rate reach = %u\n",
+ dbg_cpu_reason_cnt[10]);
+ pr_info("(16)Hit bind PPE TCP FIN entry = %u\n",
+ dbg_cpu_reason_cnt[11]);
+ pr_info("(17)Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1 = %u\n",
+ dbg_cpu_reason_cnt[12]);
+ pr_info("(18)Hit bind and VLAN replacement violation = %u\n",
+ dbg_cpu_reason_cnt[13]);
+ pr_info("(19)Hit bind and keep alive with unicast old-header packet = %u\n",
+ dbg_cpu_reason_cnt[14]);
+ pr_info("(20)Hit bind and keep alive with multicast new-header packet = %u\n",
+ dbg_cpu_reason_cnt[15]);
+ pr_info("(21)Hit bind and keep alive with duplicate old-header packet = %u\n",
+ dbg_cpu_reason_cnt[16]);
+ pr_info("(22)FOE Hit bind & force to CPU = %u\n",
+ dbg_cpu_reason_cnt[17]);
+ pr_info("(28)Hit bind and exceed MTU =%u\n", dbg_cpu_reason_cnt[18]);
+ pr_info("(24)Hit bind multicast packet to CPU = %u\n",
+ dbg_cpu_reason_cnt[19]);
+ pr_info("(25)Hit bind multicast packet to GMAC & CPU = %u\n",
+ dbg_cpu_reason_cnt[20]);
+ pr_info("(26)Pre bind = %u\n", dbg_cpu_reason_cnt[21]);
+
+ for (i = 0; i < 22; i++)
+ dbg_cpu_reason_cnt[i] = 0;
+ return 0;
+}
+
+static int cpu_reason_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cpu_reason_read, file->private_data);
+}
+
+ssize_t cpu_reason_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[32];
+ char *p_buf;
+ int len = count;
+ long arg0 = 0, arg1 = 0;
+ char *p_token = NULL;
+ char *p_delimiter = " \t";
+ int ret;
+
+ if (len >= sizeof(buf)) {
+ pr_info("input handling fail!\n");
+ len = sizeof(buf) - 1;
+ return -1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ p_buf = buf;
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg0 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg0);
+
+ switch (arg0) {
+ case 0:
+ case 1:
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg1 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg1);
+ break;
+ default:
+ pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+ arg0 = 0;
+ arg1 = 0;
+ break;
+ }
+
+ (*hnat_set_func[arg0])(arg1);
+
+ return len;
+}
+
+static const struct file_operations cpu_reason_fops = {
+ .open = cpu_reason_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = cpu_reason_write,
+ .release = single_release,
+};
+
+void dbg_dump_entry(struct seq_file *m, struct foe_entry *entry,
+ uint32_t index)
+{
+ __be32 saddr, daddr, nsaddr, ndaddr;
+
+ saddr = htonl(entry->ipv4_hnapt.sip);
+ daddr = htonl(entry->ipv4_hnapt.dip);
+ nsaddr = htonl(entry->ipv4_hnapt.new_sip);
+ ndaddr = htonl(entry->ipv4_hnapt.new_dip);
+
+ if (IS_IPV4_HNAPT(entry)) {
+ seq_printf(m,
+ "NAPT(%d): %pI4:%d->%pI4:%d => %pI4:%d->%pI4:%d\n",
+ index, &saddr, entry->ipv4_hnapt.sport, &daddr,
+ entry->ipv4_hnapt.dport, &nsaddr,
+ entry->ipv4_hnapt.new_sport, &ndaddr,
+ entry->ipv4_hnapt.new_dport);
+ } else if (IS_IPV4_HNAT(entry)) {
+ seq_printf(m, "NAT(%d): %pI4->%pI4 => %pI4->%pI4\n",
+ index, &saddr, &daddr, &nsaddr, &ndaddr);
+ }
+
+ if (IS_IPV4_DSLITE(entry)) {
+ seq_printf(m,
+ "IPv4 Ds-Lite(%d): %pI4:%d->%pI4:%d => %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+ index, &saddr, entry->ipv4_dslite.sport, &daddr,
+ entry->ipv4_dslite.dport,
+ entry->ipv4_dslite.tunnel_sipv6_0,
+ entry->ipv4_dslite.tunnel_sipv6_1,
+ entry->ipv4_dslite.tunnel_sipv6_2,
+ entry->ipv4_dslite.tunnel_sipv6_3,
+ entry->ipv4_dslite.tunnel_dipv6_0,
+ entry->ipv4_dslite.tunnel_dipv6_1,
+ entry->ipv4_dslite.tunnel_dipv6_2,
+ entry->ipv4_dslite.tunnel_dipv6_3);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ } else if (IS_IPV4_MAPE(entry)) {
+ nsaddr = htonl(entry->ipv4_dslite.new_sip);
+ ndaddr = htonl(entry->ipv4_dslite.new_dip);
+
+ seq_printf(m,
+ "IPv4 MAP-E(%d): %pI4:%d->%pI4:%d => %pI4:%d->%pI4:%d | Tunnel=%08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+ index, &saddr, entry->ipv4_dslite.sport,
+ &daddr, entry->ipv4_dslite.dport,
+ &nsaddr, entry->ipv4_dslite.new_sport,
+ &ndaddr, entry->ipv4_dslite.new_dport,
+ entry->ipv4_dslite.tunnel_sipv6_0,
+ entry->ipv4_dslite.tunnel_sipv6_1,
+ entry->ipv4_dslite.tunnel_sipv6_2,
+ entry->ipv4_dslite.tunnel_sipv6_3,
+ entry->ipv4_dslite.tunnel_dipv6_0,
+ entry->ipv4_dslite.tunnel_dipv6_1,
+ entry->ipv4_dslite.tunnel_dipv6_2,
+ entry->ipv4_dslite.tunnel_dipv6_3);
+#endif
+ } else if (IS_IPV6_3T_ROUTE(entry)) {
+ seq_printf(m,
+ "IPv6_3T(%d): %08X:%08X:%08X:%08X => %08X:%08X:%08X:%08X (Prot=%d)\n",
+ index, entry->ipv6_3t_route.ipv6_sip0,
+ entry->ipv6_3t_route.ipv6_sip1,
+ entry->ipv6_3t_route.ipv6_sip2,
+ entry->ipv6_3t_route.ipv6_sip3,
+ entry->ipv6_3t_route.ipv6_dip0,
+ entry->ipv6_3t_route.ipv6_dip1,
+ entry->ipv6_3t_route.ipv6_dip2,
+ entry->ipv6_3t_route.ipv6_dip3,
+ entry->ipv6_3t_route.prot);
+ } else if (IS_IPV6_5T_ROUTE(entry)) {
+ seq_printf(m,
+ "IPv6_5T(%d): %08X:%08X:%08X:%08X:%d => %08X:%08X:%08X:%08X:%d\n",
+ index, entry->ipv6_5t_route.ipv6_sip0,
+ entry->ipv6_5t_route.ipv6_sip1,
+ entry->ipv6_5t_route.ipv6_sip2,
+ entry->ipv6_5t_route.ipv6_sip3,
+ entry->ipv6_5t_route.sport,
+ entry->ipv6_5t_route.ipv6_dip0,
+ entry->ipv6_5t_route.ipv6_dip1,
+ entry->ipv6_5t_route.ipv6_dip2,
+ entry->ipv6_5t_route.ipv6_dip3,
+ entry->ipv6_5t_route.dport);
+ } else if (IS_IPV6_6RD(entry)) {
+ seq_printf(m,
+ "IPv6_6RD(%d): %08X:%08X:%08X:%08X:%d => %08X:%08X:%08X:%08X:%d\n",
+ index, entry->ipv6_6rd.ipv6_sip0,
+ entry->ipv6_6rd.ipv6_sip1, entry->ipv6_6rd.ipv6_sip2,
+ entry->ipv6_6rd.ipv6_sip3, entry->ipv6_6rd.sport,
+ entry->ipv6_6rd.ipv6_dip0, entry->ipv6_6rd.ipv6_dip1,
+ entry->ipv6_6rd.ipv6_dip2, entry->ipv6_6rd.ipv6_dip3,
+ entry->ipv6_6rd.dport);
+ }
+}
+
+int hnat_entry_read(struct seq_file *m, void *private)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct foe_entry *entry, *end;
+ int hash_index;
+ int cnt;
+
+ hash_index = 0;
+ cnt = 0;
+ entry = h->foe_table_cpu;
+ end = h->foe_table_cpu + hnat_priv->foe_etry_num;
+
+ while (entry < end) {
+ if (entry->bfib1.state == dbg_entry_state) {
+ cnt++;
+ dbg_dump_entry(m, entry, hash_index);
+ }
+ hash_index++;
+ entry++;
+ }
+
+ seq_printf(m, "Total State = %s cnt = %d\n",
+ dbg_entry_state == 0 ?
+ "Invalid" : dbg_entry_state == 1 ?
+ "Unbind" : dbg_entry_state == 2 ?
+ "BIND" : dbg_entry_state == 3 ?
+ "FIN" : "Unknown", cnt);
+
+ return 0;
+}
+
+ssize_t hnat_entry_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[32];
+ char *p_buf;
+ int len = count;
+ long arg0 = 0, arg1 = 0;
+ char *p_token = NULL;
+ char *p_delimiter = " \t";
+ int ret;
+
+ if (len >= sizeof(buf)) {
+ pr_info("input handling fail!\n");
+ len = sizeof(buf) - 1;
+ return -1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ p_buf = buf;
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg0 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg0);
+
+ switch (arg0) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg1 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg1);
+ break;
+ default:
+ pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+ arg0 = 0;
+ arg1 = 0;
+ break;
+ }
+
+ (*entry_set_func[arg0])(arg1);
+
+ return len;
+}
+
+static int hnat_entry_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_entry_read, file->private_data);
+}
+
+static const struct file_operations hnat_entry_fops = {
+ .open = hnat_entry_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_entry_write,
+ .release = single_release,
+};
+
+int hnat_setting_read(struct seq_file *m, void *private)
+{
+ struct mtk_hnat *h = hnat_priv;
+ int i;
+ int cr_max;
+
+ cr_max = 319 * 4;
+ for (i = 0; i < cr_max; i = i + 0x10) {
+ pr_info("0x%p : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (void *)h->foe_table_dev + i, readl(h->ppe_base + i),
+ readl(h->ppe_base + i + 4), readl(h->ppe_base + i + 8),
+ readl(h->ppe_base + i + 0xc));
+ }
+
+ return 0;
+}
+
+static int hnat_setting_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_setting_read, file->private_data);
+}
+
+ssize_t hnat_setting_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[32];
+ char *p_buf;
+ int len = count;
+ long arg0 = 0, arg1 = 0;
+ char *p_token = NULL;
+ char *p_delimiter = " \t";
+ int ret;
+
+ if (len >= sizeof(buf)) {
+ pr_info("input handling fail!\n");
+ len = sizeof(buf) - 1;
+ return -1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ p_buf = buf;
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg0 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg0);
+
+ switch (arg0) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ p_token = strsep(&p_buf, p_delimiter);
+ if (!p_token)
+ arg1 = 0;
+ else
+ ret = kstrtol(p_token, 10, &arg1);
+ break;
+ default:
+ pr_info("no handler defined for command id(0x%08lx)\n\r", arg0);
+ arg0 = 0;
+ arg1 = 0;
+ break;
+ }
+
+ (*cr_set_func[arg0])(arg1);
+
+ return len;
+}
+
+static const struct file_operations hnat_setting_fops = {
+ .open = hnat_setting_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_setting_write,
+ .release = single_release,
+};
+
+int mcast_table_dump(struct seq_file *m, void *private)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct ppe_mcast_h mcast_h;
+ struct ppe_mcast_l mcast_l;
+ u8 i, max;
+ void __iomem *reg;
+
+ if (!h->pmcast)
+ return 0;
+
+ max = h->pmcast->max_entry;
+ pr_info("MAC | VID | PortMask | QosPortMask\n");
+ for (i = 0; i < max; i++) {
+ if (i < 0x10) {
+ reg = h->ppe_base + PPE_MCAST_H_0 + i * 8;
+ mcast_h.u.value = readl(reg);
+ reg = h->ppe_base + PPE_MCAST_L_0 + i * 8;
+ mcast_l.addr = readl(reg);
+ } else {
+ reg = h->fe_base + PPE_MCAST_H_10 + (i - 0x10) * 8;
+ mcast_h.u.value = readl(reg);
+ reg = h->fe_base + PPE_MCAST_L_10 + (i - 0x10) * 8;
+ mcast_l.addr = readl(reg);
+ }
+ pr_info("%08x %d %c%c%c%c %c%c%c%c (QID=%d, mc_mpre_sel=%d)\n",
+ mcast_l.addr,
+ mcast_h.u.info.mc_vid,
+ (mcast_h.u.info.mc_px_en & 0x08) ? '1' : '-',
+ (mcast_h.u.info.mc_px_en & 0x04) ? '1' : '-',
+ (mcast_h.u.info.mc_px_en & 0x02) ? '1' : '-',
+ (mcast_h.u.info.mc_px_en & 0x01) ? '1' : '-',
+ (mcast_h.u.info.mc_px_qos_en & 0x08) ? '1' : '-',
+ (mcast_h.u.info.mc_px_qos_en & 0x04) ? '1' : '-',
+ (mcast_h.u.info.mc_px_qos_en & 0x02) ? '1' : '-',
+ (mcast_h.u.info.mc_px_qos_en & 0x01) ? '1' : '-',
+ mcast_h.u.info.mc_qos_qid +
+ ((mcast_h.u.info.mc_qos_qid54) << 4),
+ mcast_h.u.info.mc_mpre_sel);
+ }
+
+ return 0;
+}
+
+static int mcast_table_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mcast_table_dump, file->private_data);
+}
+
+static const struct file_operations hnat_mcast_fops = {
+ .open = mcast_table_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int hnat_ext_show(struct seq_file *m, void *private)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (ext_entry->dev)
+ seq_printf(m, "ext devices [%d] = %s (dev=%p, ifindex=%d)\n",
+ i, ext_entry->name, ext_entry->dev,
+ ext_entry->dev->ifindex);
+ }
+
+ return 0;
+}
+
+static int hnat_ext_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_ext_show, file->private_data);
+}
+
+static const struct file_operations hnat_ext_fops = {
+ .open = hnat_ext_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t hnat_sched_show(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ long id = (long)file->private_data;
+ struct mtk_hnat *h = hnat_priv;
+ u32 qdma_tx_sch;
+ int enable;
+ int scheduling;
+ int max_rate;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ ssize_t ret_cnt;
+ int scheduler, i;
+ u32 sch_reg;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (hnat_priv->data->num_of_sch == 4)
+ qdma_tx_sch = readl(h->fe_base + QDMA_TX_4SCH_BASE(id));
+ else
+ qdma_tx_sch = readl(h->fe_base + QDMA_TX_2SCH_BASE);
+
+ if (id & 0x1)
+ qdma_tx_sch >>= 16;
+ qdma_tx_sch &= 0xffff;
+ enable = !!(qdma_tx_sch & BIT(11));
+ scheduling = !!(qdma_tx_sch & BIT(15));
+ max_rate = ((qdma_tx_sch >> 4) & 0x7f);
+ qdma_tx_sch &= 0xf;
+ while (qdma_tx_sch--)
+ max_rate *= 10;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "EN\tScheduling\tMAX\tQueue#\n%d\t%s%16d\t", enable,
+ (scheduling == 1) ? "WRR" : "SP", max_rate);
+
+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
+ cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE,
+ (i / NUM_OF_Q_PER_PAGE));
+ sch_reg = readl(h->fe_base + QTX_SCH(i % NUM_OF_Q_PER_PAGE));
+ if (hnat_priv->data->num_of_sch == 4)
+ scheduler = (sch_reg >> 30) & 0x3;
+ else
+ scheduler = !!(sch_reg & BIT(31));
+ if (id == scheduler)
+ len += scnprintf(buf + len, buf_len - len, "%d ", i);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t hnat_sched_write(struct file *file, const char __user *buf,
+ size_t length, loff_t *offset)
+{
+ long id = (long)file->private_data;
+ struct mtk_hnat *h = hnat_priv;
+ char line[64];
+ int enable, rate, exp = 0, shift = 0;
+ char scheduling[32];
+ size_t size;
+ u32 qdma_tx_sch;
+ u32 val = 0;
+
+ if (length > sizeof(line))
+ return -EINVAL;
+
+ if (copy_from_user(line, buf, length))
+ return -EFAULT;
+
+ if (sscanf(line, "%d %s %d", &enable, scheduling, &rate) != 3)
+ return -EFAULT;
+
+ while (rate > 127) {
+ rate /= 10;
+ exp++;
+ }
+
+ if (enable)
+ val |= BIT(11);
+ if (strcmp(scheduling, "sp") != 0)
+ val |= BIT(15);
+ val |= (rate & 0x7f) << 4;
+ val |= exp & 0xf;
+ if (id & 0x1)
+ shift = 16;
+
+ if (hnat_priv->data->num_of_sch == 4)
+ qdma_tx_sch = readl(h->fe_base + QDMA_TX_4SCH_BASE(id));
+ else
+ qdma_tx_sch = readl(h->fe_base + QDMA_TX_2SCH_BASE);
+
+ qdma_tx_sch &= ~(0xffff << shift);
+ qdma_tx_sch |= val << shift;
+ if (hnat_priv->data->num_of_sch == 4)
+ writel(qdma_tx_sch, h->fe_base + QDMA_TX_4SCH_BASE(id));
+ else
+ writel(qdma_tx_sch, h->fe_base + QDMA_TX_2SCH_BASE);
+
+ size = strlen(line);
+ *offset += size;
+
+ return length;
+}
+
+static const struct file_operations hnat_sched_fops = {
+ .open = simple_open,
+ .read = hnat_sched_show,
+ .write = hnat_sched_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t hnat_queue_show(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mtk_hnat *h = hnat_priv;
+ long id = (long)file->private_data;
+ u32 qtx_sch;
+ u32 qtx_cfg;
+ int scheduler;
+ int min_rate_en;
+ int min_rate;
+ int min_rate_exp;
+ int max_rate_en;
+ int max_weight;
+ int max_rate;
+ int max_rate_exp;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE, (id / NUM_OF_Q_PER_PAGE));
+ qtx_cfg = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+ qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+ if (hnat_priv->data->num_of_sch == 4)
+ scheduler = (qtx_sch >> 30) & 0x3;
+ else
+ scheduler = !!(qtx_sch & BIT(31));
+ min_rate_en = !!(qtx_sch & BIT(27));
+ min_rate = (qtx_sch >> 20) & 0x7f;
+ min_rate_exp = (qtx_sch >> 16) & 0xf;
+ max_rate_en = !!(qtx_sch & BIT(11));
+ max_weight = (qtx_sch >> 12) & 0xf;
+ max_rate = (qtx_sch >> 4) & 0x7f;
+ max_rate_exp = qtx_sch & 0xf;
+ while (min_rate_exp--)
+ min_rate *= 10;
+
+ while (max_rate_exp--)
+ max_rate *= 10;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "scheduler: %d\nhw resv: %d\nsw resv: %d\n", scheduler,
+ (qtx_cfg >> 8) & 0xff, qtx_cfg & 0xff);
+
+ if (hnat_priv->data->version != MTK_HNAT_V1) {
+ /* Switch to debug mode */
+ cr_set_field(h->fe_base + QTX_MIB_IF, MIB_ON_QTX_CFG, 1);
+ cr_set_field(h->fe_base + QTX_MIB_IF, VQTX_MIB_EN, 1);
+ qtx_cfg = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+ qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+ len += scnprintf(buf + len, buf_len - len,
+ "packet count: %u\n", qtx_cfg);
+ len += scnprintf(buf + len, buf_len - len,
+ "packet drop: %u\n\n", qtx_sch);
+
+ /* Recover to normal mode */
+ cr_set_field(hnat_priv->fe_base + QTX_MIB_IF,
+ MIB_ON_QTX_CFG, 0);
+ cr_set_field(hnat_priv->fe_base + QTX_MIB_IF, VQTX_MIB_EN, 0);
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ " EN RATE WEIGHT\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "----------------------------\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "max%5d%9d%9d\n", max_rate_en, max_rate, max_weight);
+ len += scnprintf(buf + len, buf_len - len,
+ "min%5d%9d -\n", min_rate_en, min_rate);
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t hnat_queue_write(struct file *file, const char __user *buf,
+ size_t length, loff_t *offset)
+{
+ long id = (long)file->private_data;
+ struct mtk_hnat *h = hnat_priv;
+ char line[64];
+ int max_enable, max_rate, max_exp = 0;
+ int min_enable, min_rate, min_exp = 0;
+ int weight;
+ int resv;
+ int scheduler;
+ size_t size;
+ u32 qtx_sch;
+
+ cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE, (id / NUM_OF_Q_PER_PAGE));
+ qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+ if (length > sizeof(line))
+ return -EINVAL;
+
+ if (copy_from_user(line, buf, length))
+ return -EFAULT;
+
+ if (sscanf(line, "%d %d %d %d %d %d %d", &scheduler, &min_enable, &min_rate,
+ &max_enable, &max_rate, &weight, &resv) != 7)
+ return -EFAULT;
+
+ while (max_rate > 127) {
+ max_rate /= 10;
+ max_exp++;
+ }
+
+ while (min_rate > 127) {
+ min_rate /= 10;
+ min_exp++;
+ }
+
+ qtx_sch &= 0x70000000;
+ if (hnat_priv->data->num_of_sch == 4)
+ qtx_sch |= (scheduler & 0x3) << 30;
+ else
+ qtx_sch |= (scheduler & 0x1) << 31;
+ if (min_enable)
+ qtx_sch |= BIT(27);
+ qtx_sch |= (min_rate & 0x7f) << 20;
+ qtx_sch |= (min_exp & 0xf) << 16;
+ if (max_enable)
+ qtx_sch |= BIT(11);
+ qtx_sch |= (weight & 0xf) << 12;
+ qtx_sch |= (max_rate & 0x7f) << 4;
+ qtx_sch |= max_exp & 0xf;
+ writel(qtx_sch, h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+
+ resv &= 0xff;
+ qtx_sch = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+ qtx_sch &= 0xffff0000;
+ qtx_sch |= (resv << 8) | resv;
+ writel(qtx_sch, h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE));
+
+ size = strlen(line);
+ *offset += size;
+
+ return length;
+}
+
+static const struct file_operations hnat_queue_fops = {
+ .open = simple_open,
+ .read = hnat_queue_show,
+ .write = hnat_queue_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t hnat_ppd_if_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[IFNAMSIZ];
+ struct net_device *dev;
+ char *p, *tmp;
+
+ if (count >= IFNAMSIZ)
+ return -EFAULT;
+
+ memset(buf, 0, IFNAMSIZ);
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ tmp = buf;
+ p = strsep(&tmp, "\n\r ");
+ dev = dev_get_by_name(&init_net, p);
+
+ if (dev) {
+ if (hnat_priv->g_ppdev)
+ dev_put(hnat_priv->g_ppdev);
+ hnat_priv->g_ppdev = dev;
+
+ strncpy(hnat_priv->ppd, p, IFNAMSIZ);
+ pr_info("hnat_priv ppd = %s\n", hnat_priv->ppd);
+ } else {
+ pr_info("no such device!\n");
+ }
+
+ return count;
+}
+
+static int hnat_ppd_if_read(struct seq_file *m, void *private)
+{
+ pr_info("hnat_priv ppd = %s\n", hnat_priv->ppd);
+
+ if (hnat_priv->g_ppdev) {
+ pr_info("hnat_priv g_ppdev name = %s\n",
+ hnat_priv->g_ppdev->name);
+ } else {
+ pr_info("hnat_priv g_ppdev is null!\n");
+ }
+
+ return 0;
+}
+
+static int hnat_ppd_if_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_ppd_if_read, file->private_data);
+}
+
+static const struct file_operations hnat_ppd_if_fops = {
+ .open = hnat_ppd_if_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_ppd_if_write,
+ .release = single_release,
+};
+
+static int hnat_mape_toggle_read(struct seq_file *m, void *private)
+{
+ pr_info("value=%d, %s is enabled now!\n", mape_toggle, (mape_toggle) ? "mape" : "ds-lite");
+
+ return 0;
+}
+
+static int hnat_mape_toggle_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_mape_toggle_read, file->private_data);
+}
+
+static ssize_t hnat_mape_toggle_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf;
+ int len = count;
+
+ if (copy_from_user(&buf, buffer, len))
+ return -EFAULT;
+
+ if (buf == '1' && !mape_toggle) {
+ pr_info("mape is going to be enabled, ds-lite is going to be disabled !\n");
+ mape_toggle = 1;
+ } else if (buf == '0' && mape_toggle) {
+ pr_info("ds-lite is going to be enabled, mape is going to be disabled !\n");
+ mape_toggle = 0;
+ }
+
+ return len;
+}
+
+static const struct file_operations hnat_mape_toggle_fops = {
+ .open = hnat_mape_toggle_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_mape_toggle_write,
+ .release = single_release,
+};
+
+static int hnat_hook_toggle_read(struct seq_file *m, void *private)
+{
+ pr_info("value=%d, hook is %s now!\n", hook_toggle, (hook_toggle) ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int hnat_hook_toggle_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_hook_toggle_read, file->private_data);
+}
+
+static ssize_t hnat_hook_toggle_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[8];
+ int len = count;
+
+ if ((len > 8) || copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ if (buf[0] == '1' && !hook_toggle) {
+ pr_info("hook is going to be enabled !\n");
+ hnat_enable_hook();
+ } else if (buf[0] == '0' && hook_toggle) {
+ pr_info("hook is going to be disabled !\n");
+ hnat_disable_hook();
+ }
+
+ return len;
+}
+
+static const struct file_operations hnat_hook_toggle_fops = {
+ .open = hnat_hook_toggle_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_hook_toggle_write,
+ .release = single_release,
+};
+
+static int hnat_version_read(struct seq_file *m, void *private)
+{
+ pr_info("HNAT SW version : %s\nHNAT HW version : %d\n", HNAT_SW_VER, hnat_priv->data->version);
+
+ return 0;
+}
+
+static int hnat_version_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_version_read, file->private_data);
+}
+
+static const struct file_operations hnat_version_fops = {
+ .open = hnat_version_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int get_ppe_mib(int index, u64 *pkt_cnt, u64 *byte_cnt)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct hnat_accounting *acount;
+ struct foe_entry *entry;
+
+ acount = hnat_get_count(h, index);
+ entry = hnat_priv->foe_table_cpu + index;
+
+ if (!acount)
+ return -1;
+
+ if (entry->bfib1.state != BIND)
+ return -1;
+
+ *pkt_cnt = acount->packets;
+ *byte_cnt = acount->bytes;
+
+ return 0;
+}
+EXPORT_SYMBOL(get_ppe_mib);
+
+int is_entry_binding(int index)
+{
+ struct foe_entry *entry;
+
+ entry = hnat_priv->foe_table_cpu + index;
+
+ return entry->bfib1.state == BIND;
+}
+EXPORT_SYMBOL(is_entry_binding);
+
+#define dump_register(nm) \
+ { \
+ .name = __stringify(nm), .offset = PPE_##nm, \
+ }
+
+static const struct debugfs_reg32 hnat_regs[] = {
+ dump_register(GLO_CFG), dump_register(FLOW_CFG),
+ dump_register(IP_PROT_CHK), dump_register(IP_PROT_0),
+ dump_register(IP_PROT_1), dump_register(IP_PROT_2),
+ dump_register(IP_PROT_3), dump_register(TB_CFG),
+ dump_register(TB_BASE), dump_register(TB_USED),
+ dump_register(BNDR), dump_register(BIND_LMT_0),
+ dump_register(BIND_LMT_1), dump_register(KA),
+ dump_register(UNB_AGE), dump_register(BND_AGE_0),
+ dump_register(BND_AGE_1), dump_register(HASH_SEED),
+ dump_register(DFT_CPORT), dump_register(MCAST_PPSE),
+ dump_register(MCAST_L_0), dump_register(MCAST_H_0),
+ dump_register(MCAST_L_1), dump_register(MCAST_H_1),
+ dump_register(MCAST_L_2), dump_register(MCAST_H_2),
+ dump_register(MCAST_L_3), dump_register(MCAST_H_3),
+ dump_register(MCAST_L_4), dump_register(MCAST_H_4),
+ dump_register(MCAST_L_5), dump_register(MCAST_H_5),
+ dump_register(MCAST_L_6), dump_register(MCAST_H_6),
+ dump_register(MCAST_L_7), dump_register(MCAST_H_7),
+ dump_register(MCAST_L_8), dump_register(MCAST_H_8),
+ dump_register(MCAST_L_9), dump_register(MCAST_H_9),
+ dump_register(MCAST_L_A), dump_register(MCAST_H_A),
+ dump_register(MCAST_L_B), dump_register(MCAST_H_B),
+ dump_register(MCAST_L_C), dump_register(MCAST_H_C),
+ dump_register(MCAST_L_D), dump_register(MCAST_H_D),
+ dump_register(MCAST_L_E), dump_register(MCAST_H_E),
+ dump_register(MCAST_L_F), dump_register(MCAST_H_F),
+ dump_register(MTU_DRP), dump_register(MTU_VLYR_0),
+ dump_register(MTU_VLYR_1), dump_register(MTU_VLYR_2),
+ dump_register(VPM_TPID), dump_register(VPM_TPID),
+ dump_register(CAH_CTRL), dump_register(CAH_TAG_SRH),
+ dump_register(CAH_LINE_RW), dump_register(CAH_WDATA),
+ dump_register(CAH_RDATA),
+};
+
+int hnat_init_debugfs(struct mtk_hnat *h)
+{
+ int ret = 0;
+ struct dentry *root;
+ struct dentry *file;
+ long i;
+ char name[16];
+
+ root = debugfs_create_dir("hnat", NULL);
+ if (!root) {
+ dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
+ ret = -ENOMEM;
+ goto err0;
+ }
+ h->root = root;
+ h->regset = kzalloc(sizeof(*h->regset), GFP_KERNEL);
+ if (!h->regset) {
+ dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
+ ret = -ENOMEM;
+ goto err1;
+ }
+ h->regset->regs = hnat_regs;
+ h->regset->nregs = ARRAY_SIZE(hnat_regs);
+ h->regset->base = h->ppe_base;
+
+ file = debugfs_create_regset32("regdump", S_IRUGO, root, h->regset);
+ if (!file) {
+ dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
+ ret = -ENOMEM;
+ goto err1;
+ }
+ debugfs_create_file("all_entry", S_IRUGO, root, h, &hnat_debug_fops);
+ debugfs_create_file("external_interface", S_IRUGO, root, h,
+ &hnat_ext_fops);
+ debugfs_create_file("whnat_interface", S_IRUGO, root, h,
+ &hnat_whnat_fops);
+ debugfs_create_file("cpu_reason", S_IFREG | S_IRUGO, root, h,
+ &cpu_reason_fops);
+ debugfs_create_file("hnat_entry", S_IRUGO | S_IRUGO, root, h,
+ &hnat_entry_fops);
+ debugfs_create_file("hnat_setting", S_IRUGO | S_IRUGO, root, h,
+ &hnat_setting_fops);
+ debugfs_create_file("mcast_table", S_IRUGO | S_IRUGO, root, h,
+ &hnat_mcast_fops);
+ debugfs_create_file("hook_toggle", S_IRUGO | S_IRUGO, root, h,
+ &hnat_hook_toggle_fops);
+ debugfs_create_file("mape_toggle", S_IRUGO | S_IRUGO, root, h,
+ &hnat_mape_toggle_fops);
+ debugfs_create_file("hnat_version", S_IRUGO | S_IRUGO, root, h,
+ &hnat_version_fops);
+ debugfs_create_file("hnat_ppd_if", S_IRUGO | S_IRUGO, root, h,
+ &hnat_ppd_if_fops);
+
+ for (i = 0; i < hnat_priv->data->num_of_sch; i++) {
+ snprintf(name, sizeof(name), "qdma_sch%ld", i);
+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
+ &hnat_sched_fops);
+ }
+
+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
+ snprintf(name, sizeof(name), "qdma_txq%ld", i);
+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
+ &hnat_queue_fops);
+ }
+
+ return 0;
+
+err1:
+ debugfs_remove_recursive(root);
+err0:
+ return ret;
+}
+
+void hnat_deinit_debugfs(struct mtk_hnat *h)
+{
+ debugfs_remove_recursive(h->root);
+ h->root = NULL;
+ kfree(h->regset);
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.c
new file mode 100644
index 0000000..79e4bd0
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.c
@@ -0,0 +1,347 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Zhiqiang Yang <zhiqiang.yang@mediatek.com>
+ */
+#include <net/sock.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_bridge.h>
+#include "hnat.h"
+
+/* *
+ * mcast_entry_get - Returns the index of an unused entry
+ * or an already existed entry in mtbl
+ */
+static int mcast_entry_get(u16 vlan_id, u32 dst_mac)
+{
+ int index = -1;
+ u8 i;
+ struct ppe_mcast_group *p = hnat_priv->pmcast->mtbl;
+ u8 max = hnat_priv->pmcast->max_entry;
+
+ for (i = 0; i < max; i++) {
+ if ((index == -1) && (!p->valid)) {
+ index = i; /*get the first unused entry index*/
+ continue;
+ }
+ if ((p->vid == vlan_id) && (p->mac_hi == dst_mac)) {
+ index = i;
+ break;
+ }
+ p++;
+ }
+ if (index == -1)
+ pr_info("%s:group table is full\n", __func__);
+
+ return index;
+}
+
+static void get_mac_from_mdb_entry(struct br_mdb_entry *entry,
+ u32 *mac_hi, u16 *mac_lo)
+{
+ switch (ntohs(entry->addr.proto)) {
+ case ETH_P_IP:
+ *mac_lo = 0x0100;
+ *mac_hi = swab32((entry->addr.u.ip4 & 0xfffffe00) + 0x5e);
+ break;
+ case ETH_P_IPV6:
+ *mac_lo = 0x3333;
+ *mac_hi = swab32(entry->addr.u.ip6.s6_addr32[3]);
+ break;
+ }
+ trace_printk("%s:group mac_h=0x%08x, mac_l=0x%04x\n",
+ __func__, *mac_hi, *mac_lo);
+}
+
+/*set_hnat_mtbl - set ppe multicast register*/
+static int set_hnat_mtbl(struct ppe_mcast_group *group, int index)
+{
+ struct ppe_mcast_h mcast_h;
+ struct ppe_mcast_l mcast_l;
+ u16 mac_lo = group->mac_lo;
+ u32 mac_hi = group->mac_hi;
+ u8 mc_port = group->mc_port;
+ void __iomem *reg;
+
+ mcast_h.u.value = 0;
+ mcast_l.addr = 0;
+ if (mac_lo == 0x0100)
+ mcast_h.u.info.mc_mpre_sel = 0;
+ else if (mac_lo == 0x3333)
+ mcast_h.u.info.mc_mpre_sel = 1;
+
+ mcast_h.u.info.mc_px_en = mc_port;
+ mcast_l.addr = mac_hi;
+ mcast_h.u.info.valid = group->valid;
+ trace_printk("%s:index=%d,group info=0x%x,addr=0x%x\n",
+ __func__, index, mcast_h.u.value, mcast_l.addr);
+ if (index < 0x10) {
+ reg = hnat_priv->ppe_base + PPE_MCAST_H_0 + ((index) * 8);
+ writel(mcast_h.u.value, reg);
+ reg = hnat_priv->ppe_base + PPE_MCAST_L_0 + ((index) * 8);
+ writel(mcast_l.addr, reg);
+ } else {
+ index = index - 0x10;
+ reg = hnat_priv->fe_base + PPE_MCAST_H_10 + ((index) * 8);
+ writel(mcast_h.u.value, reg);
+ reg = hnat_priv->fe_base + PPE_MCAST_L_10 + ((index) * 8);
+ writel(mcast_h.u.value, reg);
+ }
+
+ return 0;
+}
+
+/**
+ * hnat_mcast_table_update -
+ * 1.get a valid group entry
+ * 2.update group info
+ * a.update eif&oif count
+ * b.eif ==0 & oif == 0,delete it from group table
+ * c.oif != 0,set mc forward port to cpu,else do not forward to cpu
+ * 3.set the group info to ppe register
+ */
+static int hnat_mcast_table_update(int type, struct br_mdb_entry *entry)
+{
+ struct net_device *dev;
+ u32 mac_hi;
+ u16 mac_lo;
+ int index;
+ struct ppe_mcast_group *group;
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(&init_net, entry->ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ rcu_read_unlock();
+
+ get_mac_from_mdb_entry(entry, &mac_hi, &mac_lo);
+ index = mcast_entry_get(entry->vid, mac_hi);
+ if (index == -1)
+ return -1;
+
+ group = &hnat_priv->pmcast->mtbl[index];
+ group->mac_hi = mac_hi;
+ group->mac_lo = mac_lo;
+ switch (type) {
+ case RTM_NEWMDB:
+ if (IS_LAN(dev) || IS_WAN(dev))
+ group->eif++;
+ else
+ group->oif++;
+ group->vid = entry->vid;
+ group->valid = true;
+ break;
+ case RTM_DELMDB:
+ if (group->valid) {
+ if (IS_LAN(dev) || IS_WAN(dev))
+ group->eif--;
+ else
+ group->oif--;
+ }
+ break;
+ }
+ trace_printk("%s:devname=%s,eif=%d,oif=%d\n", __func__,
+ dev->name, group->eif, group->oif);
+ if (group->valid) {
+ if (group->oif && group->eif)
+ /*eth&wifi both in group,forward to cpu&GDMA1*/
+ group->mc_port = (MCAST_TO_PDMA || MCAST_TO_GDMA1);
+ else if (group->oif)
+ /*only wifi in group,forward to cpu only*/
+ group->mc_port = MCAST_TO_PDMA;
+ else
+ /*only eth in group,forward to GDMA1 only*/
+ group->mc_port = MCAST_TO_GDMA1;
+ if (!group->oif && !group->eif)
+ /*nobody in this group,clear the entry*/
+ memset(group, 0, sizeof(struct ppe_mcast_group));
+ set_hnat_mtbl(group, index);
+ }
+
+ return 0;
+}
+
+static void hnat_mcast_nlmsg_handler(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ struct nlattr *nest, *nest2, *info;
+ struct br_port_msg *bpm;
+ struct br_mdb_entry *entry;
+ struct ppe_mcast_table *pmcast;
+ struct sock *sk;
+
+ pmcast = container_of(work, struct ppe_mcast_table, work);
+ sk = pmcast->msock->sk;
+
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+ nlh = nlmsg_hdr(skb);
+ if (!nlmsg_ok(nlh, skb->len)) {
+ kfree_skb(skb);
+ continue;
+ }
+ bpm = nlmsg_data(nlh);
+ nest = nlmsg_find_attr(nlh, sizeof(bpm), MDBA_MDB);
+ if (!nest) {
+ kfree_skb(skb);
+ continue;
+ }
+ nest2 = nla_find_nested(nest, MDBA_MDB_ENTRY);
+ if (nest2) {
+ info = nla_find_nested(nest2, MDBA_MDB_ENTRY_INFO);
+ if (!info) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ entry = (struct br_mdb_entry *)nla_data(info);
+ trace_printk("%s:cmd=0x%2x,ifindex=0x%x,state=0x%x",
+ __func__, nlh->nlmsg_type,
+ entry->ifindex, entry->state);
+ trace_printk("vid=0x%x,ip=0x%x,proto=0x%x\n",
+ entry->vid, entry->addr.u.ip4,
+ entry->addr.proto);
+ hnat_mcast_table_update(nlh->nlmsg_type, entry);
+ }
+ kfree_skb(skb);
+ }
+}
+
+static void hnat_mcast_nlmsg_rcv(struct sock *sk)
+{
+ struct ppe_mcast_table *pmcast = hnat_priv->pmcast;
+ struct workqueue_struct *queue = pmcast->queue;
+ struct work_struct *work = &pmcast->work;
+
+ queue_work(queue, work);
+}
+
+static struct socket *hnat_mcast_netlink_open(struct net *net)
+{
+ struct socket *sock = NULL;
+ int ret;
+ struct sockaddr_nl addr;
+
+ ret = sock_create_kern(net, PF_NETLINK, SOCK_RAW, NETLINK_ROUTE, &sock);
+ if (ret < 0)
+ goto out;
+
+ sock->sk->sk_data_ready = hnat_mcast_nlmsg_rcv;
+ addr.nl_family = PF_NETLINK;
+ addr.nl_pid = 65536; /*fix me:how to get an unique id?*/
+ addr.nl_groups = RTMGRP_MDB;
+ ret = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0)
+ goto out;
+
+ return sock;
+out:
+ if (sock)
+ sock_release(sock);
+
+ return NULL;
+}
+
+static void hnat_mcast_check_timestamp(struct timer_list *t)
+{
+ struct foe_entry *entry;
+ int hash_index;
+ u16 e_ts, foe_ts;
+
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.sta == 1) {
+ e_ts = (entry->ipv4_hnapt.m_timestamp) & 0xffff;
+ foe_ts = foe_timestamp(hnat_priv);
+ if ((foe_ts - e_ts) > 0x3000)
+ foe_ts = (~(foe_ts)) & 0xffff;
+ if (abs(foe_ts - e_ts) > 20)
+ entry_delete(hash_index);
+ }
+ }
+ mod_timer(&hnat_priv->hnat_mcast_check_timer, jiffies + 10 * HZ);
+}
+
+int hnat_mcast_enable(void)
+{
+ struct ppe_mcast_table *pmcast;
+
+ pmcast = kzalloc(sizeof(*pmcast), GFP_KERNEL);
+ if (!pmcast)
+ goto err;
+
+ if (hnat_priv->data->version == MTK_HNAT_V1)
+ pmcast->max_entry = 0x10;
+ else
+ pmcast->max_entry = MAX_MCAST_ENTRY;
+
+ INIT_WORK(&pmcast->work, hnat_mcast_nlmsg_handler);
+ pmcast->queue = create_singlethread_workqueue("ppe_mcast");
+ if (!pmcast->queue)
+ goto err;
+
+ pmcast->msock = hnat_mcast_netlink_open(&init_net);
+ if (!pmcast->msock)
+ goto err;
+
+ hnat_priv->pmcast = pmcast;
+
+ /* mt7629 should checkout mcast entry life time manualy */
+ if (hnat_priv->data->version == MTK_HNAT_V3) {
+ timer_setup(&hnat_priv->hnat_mcast_check_timer,
+ hnat_mcast_check_timestamp, 0);
+ hnat_priv->hnat_mcast_check_timer.expires = jiffies;
+ add_timer(&hnat_priv->hnat_mcast_check_timer);
+ }
+
+ /* Enable multicast table lookup */
+ cr_set_field(hnat_priv->ppe_base + PPE_GLO_CFG, MCAST_TB_EN, 1);
+ /* multicast port0 map to PDMA */
+ cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P0_PPSE, 0);
+ /* multicast port1 map to GMAC1 */
+ cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P1_PPSE, 1);
+ /* multicast port2 map to GMAC2 */
+ cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P2_PPSE, 2);
+ /* multicast port3 map to QDMA */
+ cr_set_field(hnat_priv->ppe_base + PPE_MCAST_PPSE, MC_P3_PPSE, 5);
+
+ return 0;
+err:
+ if (pmcast->queue)
+ destroy_workqueue(pmcast->queue);
+ if (pmcast->msock)
+ sock_release(pmcast->msock);
+ kfree(pmcast);
+
+ return -1;
+}
+
+int hnat_mcast_disable(void)
+{
+ struct ppe_mcast_table *pmcast = hnat_priv->pmcast;
+ struct socket *sock = pmcast->msock;
+ struct workqueue_struct *queue = pmcast->queue;
+ struct work_struct *work = &pmcast->work;
+
+ if (hnat_priv->data->version == MTK_HNAT_V3)
+ del_timer_sync(&hnat_priv->hnat_mcast_check_timer);
+
+ if (pmcast) {
+ flush_work(work);
+ destroy_workqueue(queue);
+ sock_release(sock);
+ kfree(pmcast);
+ }
+
+ return 0;
+}
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.h
new file mode 100644
index 0000000..048bc58
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_mcast.h
@@ -0,0 +1,69 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Zhiqiang Yang <zhiqiang.yang@mediatek.com>
+ */
+
+#ifndef NF_HNAT_MCAST_H
+#define NF_HNAT_MCAST_H
+
+#define RTMGRP_IPV4_MROUTE 0x20
+#define RTMGRP_MDB 0x2000000
+
+#define MAX_MCAST_ENTRY 64
+
+#define MCAST_TO_PDMA (0x1 << 0)
+#define MCAST_TO_GDMA1 (0x1 << 1)
+#define MCAST_TO_GDMA2 (0x1 << 2)
+
+struct ppe_mcast_group {
+ u32 mac_hi; /*multicast mac addr*/
+ u16 mac_lo; /*multicast mac addr*/
+ u16 vid;
+ u8 mc_port; /*1:forward to cpu,2:forward to GDMA1,4:forward to GDMA2*/
+ u8 eif; /*num of eth if added to multi group. */
+ u8 oif; /* num of other if added to multi group ,ex wifi.*/
+ bool valid;
+};
+
+struct ppe_mcast_table {
+ struct workqueue_struct *queue;
+ struct work_struct work;
+ struct socket *msock;
+ struct ppe_mcast_group mtbl[MAX_MCAST_ENTRY];
+ u8 max_entry;
+};
+
+struct ppe_mcast_h {
+ union {
+ u32 value;
+ struct {
+ u32 mc_vid:12;
+ u32 mc_qos_qid54:2; /* mt7622 only */
+ u32 valid:1;
+ u32 rev1:1;
+ /*0:forward to cpu,1:forward to GDMA1*/
+ u32 mc_px_en:4;
+ u32 mc_mpre_sel:2; /* 0=01:00, 2=33:33 */
+ u32 mc_vid_cmp:1;
+ u32 rev2:1;
+ u32 mc_px_qos_en:4;
+ u32 mc_qos_qid:4;
+ } info;
+ } u;
+};
+
+struct ppe_mcast_l {
+ u32 addr;
+};
+
+int hnat_mcast_enable(void);
+int hnat_mcast_disable(void);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
new file mode 100644
index 0000000..fe495ce
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
@@ -0,0 +1,2138 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv6.h>
+
+#include <net/arp.h>
+#include <net/neighbour.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#include "nf_hnat_mtk.h"
+#include "hnat.h"
+
+#include "../mtk_eth_soc.h"
+
+#define do_ge2ext_fast(dev, skb) \
+ ((IS_LAN(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
+ skb_hnat_is_hashed(skb) && \
+ skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
+#define do_ext2ge_fast_learn(dev, skb) \
+ (IS_PPD(dev) && \
+ (skb_hnat_sport(skb) == NR_PDMA_PORT || \
+ skb_hnat_sport(skb) == NR_QDMA_PORT) && \
+ ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
+ get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
+#define do_mape_w2l_fast(dev, skb) \
+ (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
+
+static struct ipv6hdr mape_l2w_v6h;
+static struct ipv6hdr mape_w2l_v6h;
+static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
+{
+ int i;
+
+ for (i = 1; i < MAX_IF_NUM; i++) {
+ if (hnat_priv->wifi_hook_if[i] == dev)
+ return i;
+ }
+
+ return 0;
+}
+
+static inline int get_ext_device_number(void)
+{
+ int i, number = 0;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
+ number += 1;
+ return number;
+}
+
+static inline int find_extif_from_devname(const char *name)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (!strcmp(name, ext_entry->name))
+ return 1;
+ }
+ return 0;
+}
+
+static inline int get_index_from_dev(const struct net_device *dev)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (dev == ext_entry->dev)
+ return ext_entry->dev->ifindex;
+ }
+ return 0;
+}
+
+static inline struct net_device *get_dev_from_index(int index)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+ struct net_device *dev = 0;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (ext_entry->dev && index == ext_entry->dev->ifindex) {
+ dev = ext_entry->dev;
+ break;
+ }
+ }
+ return dev;
+}
+
+static inline struct net_device *get_wandev_from_index(int index)
+{
+ struct net_device *wandev = 0;
+
+ wandev = dev_get_by_name(&init_net, hnat_priv->wan);
+ if (wandev->ifindex == index)
+ return wandev;
+ return NULL;
+}
+
+static inline int extif_set_dev(struct net_device *dev)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
+ dev_hold(dev);
+ ext_entry->dev = dev;
+ pr_info("%s(%s)\n", __func__, dev->name);
+
+ return ext_entry->dev->ifindex;
+ }
+ }
+
+ return -1;
+}
+
+static inline int extif_put_dev(struct net_device *dev)
+{
+ int i;
+ struct extdev_entry *ext_entry;
+
+ for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
+ ext_entry = hnat_priv->ext_if[i];
+ if (ext_entry->dev == dev) {
+ ext_entry->dev = NULL;
+ dev_put(dev);
+ pr_info("%s(%s)\n", __func__, dev->name);
+
+ return ext_entry->dev->ifindex;
+ }
+ }
+
+ return -1;
+}
+
+int ext_if_add(struct extdev_entry *ext_entry)
+{
+ int len = get_ext_device_number();
+
+ hnat_priv->ext_if[len++] = ext_entry;
+ return len;
+}
+
+int ext_if_del(struct extdev_entry *ext_entry)
+{
+ int i, j;
+
+ for (i = 0; i < MAX_EXT_DEVS; i++) {
+ if (hnat_priv->ext_if[i] == ext_entry) {
+ for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
+ hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
+ hnat_priv->ext_if[j] = NULL;
+ break;
+ }
+ }
+
+ return i;
+}
+
+void foe_clear_all_bind_entries(struct net_device *dev)
+{
+ int hash_index;
+ struct foe_entry *entry;
+
+ if (!IS_LAN(dev) && !IS_WAN(dev) &&
+ !find_extif_from_devname(dev->name) &&
+ !dev->netdev_ops->ndo_flow_offload_check)
+ return;
+
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA, SMA_ONLY_FWD_CPU);
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.state == BIND) {
+ entry->ipv4_hnapt.udib1.state = INVALID;
+ entry->ipv4_hnapt.udib1.time_stamp =
+ readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
+ }
+ }
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+
+ mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
+}
+
+static void gmac_ppe_fwd_enable(struct net_device *dev)
+{
+ if (IS_LAN(dev) || IS_GMAC1_MODE)
+ set_gmac_ppe_fwd(0, 1);
+ else if (IS_WAN(dev))
+ set_gmac_ppe_fwd(1, 1);
+}
+
+int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev;
+
+ dev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_UP:
+ gmac_ppe_fwd_enable(dev);
+
+ extif_set_dev(dev);
+
+ break;
+ case NETDEV_GOING_DOWN:
+ if (!get_wifi_hook_if_index_from_dev(dev))
+ extif_put_dev(dev);
+
+ foe_clear_all_bind_entries(dev);
+
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+void foe_clear_entry(struct neighbour *neigh)
+{
+ u32 *daddr = (u32 *)neigh->primary_key;
+ unsigned char h_dest[ETH_ALEN];
+ struct foe_entry *entry;
+ int hash_index;
+ u32 dip;
+
+ dip = (u32)(*daddr);
+
+ for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
+ entry = hnat_priv->foe_table_cpu + hash_index;
+ if (entry->bfib1.state == BIND &&
+ entry->ipv4_hnapt.new_dip == ntohl(dip)) {
+ *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
+ *((u16 *)&h_dest[4]) =
+ swab16(entry->ipv4_hnapt.dmac_lo);
+ if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
+ pr_info("%s: state=%d\n", __func__,
+ neigh->nud_state);
+ cr_set_field(hnat_priv->ppe_base + PPE_TB_CFG, SMA,
+ SMA_ONLY_FWD_CPU);
+
+ entry->ipv4_hnapt.udib1.state = INVALID;
+ entry->ipv4_hnapt.udib1.time_stamp =
+ readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+
+ mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
+ jiffies + 3 * HZ);
+
+ pr_info("Delete old entry: dip =%pI4\n", &dip);
+ pr_info("Old mac= %pM\n", h_dest);
+ pr_info("New mac= %pM\n", neigh->ha);
+ }
+ }
+ }
+}
+
+int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = NULL;
+ struct neighbour *neigh = NULL;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ neigh = ptr;
+ dev = neigh->dev;
+ if (dev)
+ foe_clear_entry(neigh);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
+{
+ struct ethhdr *eth = NULL;
+ struct ipv6hdr *ip6h = NULL;
+ struct iphdr *iph = NULL;
+
+ if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
+ (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
+ return -1;
+ }
+
+ /* point to L3 */
+ memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
+ memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
+
+ eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+ eth->h_proto = htons(ETH_P_IPV6);
+ skb->protocol = htons(ETH_P_IPV6);
+
+ iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
+ ip6h = (struct ipv6hdr *)(skb->data);
+ ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
+
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
+ return 0;
+}
+
+static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
+ struct ethhdr *eth)
+{
+ skb->pkt_type = PACKET_HOST;
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ }
+}
+
+unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
+ const char *func)
+{
+ if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
+ u16 vlan_id = 0;
+ skb_set_network_header(skb, 0);
+ skb_push(skb, ETH_HLEN);
+ set_to_ppe(skb);
+
+ vlan_id = skb_vlan_tag_get_id(skb);
+ if (vlan_id) {
+ skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
+ if (!skb)
+ return -1;
+ }
+
+ /*set where we come from*/
+ skb->vlan_proto = htons(ETH_P_8021Q);
+ skb->vlan_tci =
+ (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
+ trace_printk(
+ "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
+ __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
+ in->name, hnat_priv->g_ppdev->name);
+ skb->dev = hnat_priv->g_ppdev;
+ dev_queue_xmit(skb);
+ trace_printk("%s: called from %s successfully\n", __func__, func);
+ return 0;
+ }
+
+ trace_printk("%s: called from %s fail\n", __func__, func);
+ return -1;
+}
+
+unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
+{
+ struct ethhdr *eth = eth_hdr(skb);
+ struct net_device *dev;
+ struct foe_entry *entry;
+
+ trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
+ ntohs(skb->vlan_proto), skb->vlan_tci);
+
+ dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
+
+ if (dev) {
+ /*set where we to go*/
+ skb->dev = dev;
+ skb->vlan_proto = 0;
+ skb->vlan_tci = 0;
+
+ if (ntohs(eth->h_proto) == ETH_P_8021Q) {
+ skb = skb_vlan_untag(skb);
+ if (unlikely(!skb))
+ return -1;
+ }
+
+ if (IS_BOND_MODE &&
+ (((hnat_priv->data->version == MTK_HNAT_V4) &&
+ (skb_hnat_entry(skb) != 0x7fff)) ||
+ ((hnat_priv->data->version != MTK_HNAT_V4) &&
+ (skb_hnat_entry(skb) != 0x3fff))))
+ skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
+
+ set_from_extge(skb);
+ fix_skb_packet_type(skb, skb->dev, eth);
+ netif_rx(skb);
+ trace_printk("%s: called from %s successfully\n", __func__,
+ func);
+ return 0;
+ } else {
+ /* MapE WAN --> LAN/WLAN PingPong. */
+ dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
+ if (mape_toggle && dev) {
+ if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
+ skb_set_mac_header(skb, -ETH_HLEN);
+ skb->dev = dev;
+ set_from_mape(skb);
+ skb->vlan_proto = 0;
+ skb->vlan_tci = 0;
+ fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+ entry->bfib1.pkt_type = IPV4_HNAPT;
+ netif_rx(skb);
+ return 0;
+ }
+ }
+ trace_printk("%s: called from %s fail\n", __func__, func);
+ return -1;
+ }
+}
+
+unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
+{
+ /*set where we to go*/
+ u8 index;
+ struct foe_entry *entry;
+ struct net_device *dev;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+
+ if (IS_IPV4_GRP(entry))
+ index = entry->ipv4_hnapt.act_dp;
+ else
+ index = entry->ipv6_5t_route.act_dp;
+
+ skb->dev = get_dev_from_index(index);
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ return NF_ACCEPT;
+
+ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+ return NF_ACCEPT;
+
+ skb_pull_rcsum(skb, VLAN_HLEN);
+
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
+ 2 * ETH_ALEN);
+ }
+#endif
+
+ if (skb->dev) {
+ skb_set_network_header(skb, 0);
+ skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ trace_printk("%s: called from %s successfully\n", __func__,
+ func);
+ return 0;
+ } else {
+ if (mape_toggle) {
+ /* Add ipv6 header mape for lan/wlan -->wan */
+ dev = get_wandev_from_index(index);
+ if (dev) {
+ if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
+ skb_set_network_header(skb, 0);
+ skb_push(skb, ETH_HLEN);
+ skb_set_mac_header(skb, 0);
+ skb->dev = dev;
+ dev_queue_xmit(skb);
+ return 0;
+ }
+ trace_printk("%s: called from %s fail[MapE]\n", __func__,
+ func);
+ return -1;
+ }
+ }
+ }
+ /*if external devices is down, invalidate related ppe entry*/
+ if (entry_hnat_is_bound(entry)) {
+ entry->bfib1.state = INVALID;
+ if (IS_IPV4_GRP(entry))
+ entry->ipv4_hnapt.act_dp = 0;
+ else
+ entry->ipv6_5t_route.act_dp = 0;
+
+ /* clear HWNAT cache */
+ hnat_cache_ebl(1);
+ }
+ trace_printk("%s: called from %s fail, index=%x\n", __func__,
+ func, index);
+ return -1;
+}
+
+static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const char *func)
+{
+ trace_printk(
+ "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
+ __func__, in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
+ func);
+}
+
+static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const char *func)
+{
+ trace_printk(
+ "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
+ __func__, in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
+ func);
+}
+
+static inline void hnat_set_iif(const struct nf_hook_state *state,
+ struct sk_buff *skb, int val)
+{
+ if (IS_LAN(state->in)) {
+ skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
+ } else if (IS_PPD(state->in)) {
+ skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
+ } else if (IS_EXT(state->in)) {
+ skb_hnat_iface(skb) = FOE_MAGIC_EXT;
+ } else if (IS_WAN(state->in)) {
+ skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
+ } else if (state->in->netdev_ops->ndo_flow_offload_check) {
+ skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
+ } else if (!IS_BR(state->in)) {
+ skb_hnat_iface(skb) = FOE_INVALID;
+
+ if (is_magic_tag_valid(skb) && IS_SPACE_AVAILABLE_HEAD(skb))
+ memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
+ }
+}
+
+static inline void hnat_set_alg(const struct nf_hook_state *state,
+ struct sk_buff *skb, int val)
+{
+ skb_hnat_alg(skb) = val;
+}
+
+static inline void hnat_set_head_frags(const struct nf_hook_state *state,
+ struct sk_buff *head_skb, int val,
+ void (*fn)(const struct nf_hook_state *state,
+ struct sk_buff *skb, int val))
+{
+ struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
+
+ fn(state, head_skb, val);
+ while (segs) {
+ fn(state, segs, val);
+ segs = segs->next;
+ }
+}
+
+unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
+ const char *func)
+{
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct iphdr _iphdr;
+ struct iphdr *iph;
+ struct ethhdr *eth;
+
+ /* WAN -> LAN/WLAN MapE. */
+ if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
+ iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
+ switch (iph->protocol) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ break;
+ default:
+ return -1;
+ }
+ mape_w2l_v6h = *ip6h;
+
+ /* Remove ipv6 header. */
+ memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
+ skb->data - ETH_HLEN, ETH_HLEN);
+ skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
+ skb_set_mac_header(skb, 0);
+ skb_set_network_header(skb, ETH_HLEN);
+ skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
+
+ eth = eth_hdr(skb);
+ eth->h_proto = htons(ETH_P_IP);
+ set_to_ppe(skb);
+
+ skb->vlan_proto = htons(ETH_P_8021Q);
+ skb->vlan_tci =
+ (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
+
+ if (!hnat_priv->g_ppdev)
+ hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
+
+ skb->dev = hnat_priv->g_ppdev;
+ skb->protocol = htons(ETH_P_IP);
+
+ dev_queue_xmit(skb);
+
+ return 0;
+ }
+ return -1;
+}
+
+static unsigned int is_ppe_support_type(struct sk_buff *skb)
+{
+ struct ethhdr *eth = NULL;
+ struct iphdr *iph = NULL;
+ struct ipv6hdr *ip6h = NULL;
+ struct iphdr _iphdr;
+
+ eth = eth_hdr(skb);
+ if (is_broadcast_ether_addr(eth->h_dest))
+ return 0;
+
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_IP:
+ iph = ip_hdr(skb);
+
+ /* do not accelerate non tcp/udp traffic */
+ if ((iph->protocol == IPPROTO_TCP) ||
+ (iph->protocol == IPPROTO_UDP) ||
+ (iph->protocol == IPPROTO_IPV6)) {
+ return 1;
+ }
+
+ break;
+ case ETH_P_IPV6:
+ ip6h = ipv6_hdr(skb);
+
+ if ((ip6h->nexthdr == NEXTHDR_TCP) ||
+ (ip6h->nexthdr == NEXTHDR_UDP)) {
+ return 1;
+ } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
+ iph = skb_header_pointer(skb, IPV6_HDR_LEN,
+ sizeof(_iphdr), &_iphdr);
+
+ if ((iph->protocol == IPPROTO_TCP) ||
+ (iph->protocol == IPPROTO_UDP)) {
+ return 1;
+ }
+
+ }
+
+ break;
+ case ETH_P_8021Q:
+ return 1;
+ }
+
+ return 0;
+}
+
+static unsigned int
+mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ if (!is_ppe_support_type(skb)) {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ return NF_ACCEPT;
+ }
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
+ pre_routing_print(skb, state->in, state->out, __func__);
+
+ if ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) ||
+ (skb_hnat_iface(skb) == FOE_MAGIC_WED1))
+ return NF_ACCEPT;
+
+ /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
+ if (do_ext2ge_fast_try(state->in, skb)) {
+ if (!do_hnat_ext_to_ge(skb, state->in, __func__))
+ return NF_STOLEN;
+ if (!skb)
+ goto drop;
+ return NF_ACCEPT;
+ }
+
+ /* packets form ge -> external device
+ * For standalone wan interface
+ */
+ if (do_ge2ext_fast(state->in, skb)) {
+ if (!do_hnat_ge_to_ext(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ /* MapE need remove ipv6 header and pingpong. */
+ if (do_mape_w2l_fast(state->in, skb)) {
+ if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
+ return NF_STOLEN;
+ else
+ return NF_ACCEPT;
+ }
+
+ if (is_from_mape(skb))
+ clr_from_extge(skb);
+
+ return NF_ACCEPT;
+drop:
+ printk_ratelimited(KERN_WARNING
+ "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, state->in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ if (!is_ppe_support_type(skb)) {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ return NF_ACCEPT;
+ }
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
+ pre_routing_print(skb, state->in, state->out, __func__);
+
+ if ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) ||
+ (skb_hnat_iface(skb) == FOE_MAGIC_WED1))
+ return NF_ACCEPT;
+
+ /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
+ if (do_ext2ge_fast_try(state->in, skb)) {
+ if (!do_hnat_ext_to_ge(skb, state->in, __func__))
+ return NF_STOLEN;
+ if (!skb)
+ goto drop;
+ return NF_ACCEPT;
+ }
+
+ /* packets form ge -> external device
+ * For standalone wan interface
+ */
+ if (do_ge2ext_fast(state->in, skb)) {
+ if (!do_hnat_ge_to_ext(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ return NF_ACCEPT;
+drop:
+ printk_ratelimited(KERN_WARNING
+ "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, state->in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ struct vlan_ethhdr *veth;
+
+ if (hnat_priv->data->whnat) {
+ veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+ if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
+ skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
+ skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
+ }
+ }
+#endif
+
+ if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ return NF_ACCEPT;
+ }
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
+ pre_routing_print(skb, state->in, state->out, __func__);
+
+ if (unlikely(debug_level >= 7)) {
+ hnat_cpu_reason_cnt(skb);
+ if (skb_hnat_reason(skb) == dbg_cpu_reason)
+ foe_dump_pkt(skb);
+ }
+
+ if ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) ||
+ (skb_hnat_iface(skb) == FOE_MAGIC_WED1))
+ return NF_ACCEPT;
+
+ /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
+ if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
+ !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
+ if (!hnat_priv->g_ppdev)
+ hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
+
+ if (!do_hnat_ext_to_ge(skb, state->in, __func__))
+ return NF_STOLEN;
+ if (!skb)
+ goto drop;
+ return NF_ACCEPT;
+ }
+
+ if (hnat_priv->data->whnat) {
+ if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
+ clr_from_extge(skb);
+
+ /* packets from external devices -> xxx ,step 2, learning stage */
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (do_ext2ge_fast_learn(state->in, skb) && (eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG)) {
+#else
+ if (do_ext2ge_fast_learn(state->in, skb)) {
+#endif
+ if (!do_hnat_ext_to_ge2(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ /* packets form ge -> external device */
+ if (do_ge2ext_fast(state->in, skb)) {
+ if (!do_hnat_ge_to_ext(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+ }
+
+ /* MapE need remove ipv6 header and pingpong. (bridge mode) */
+ if (do_mape_w2l_fast(state->in, skb)) {
+ if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
+ return NF_STOLEN;
+ else
+ return NF_ACCEPT;
+ }
+
+ return NF_ACCEPT;
+drop:
+ printk_ratelimited(KERN_WARNING
+ "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, state->in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
+ const struct net_device *out,
+ struct flow_offload_hw_path *hw_path)
+{
+ const struct in6_addr *ipv6_nexthop;
+ struct neighbour *neigh = NULL;
+ struct dst_entry *dst = skb_dst(skb);
+ struct ethhdr *eth;
+
+ if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
+ memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
+ return 0;
+ }
+
+ rcu_read_lock_bh();
+ ipv6_nexthop =
+ rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+ neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
+ if (unlikely(!neigh)) {
+ dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
+ &ipv6_hdr(skb)->daddr);
+ rcu_read_unlock_bh();
+ return -1;
+ }
+
+ /* why do we get all zero ethernet address ? */
+ if (!is_valid_ether_addr(neigh->ha)) {
+ rcu_read_unlock_bh();
+ return -1;
+ }
+
+ if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
+ /*copy ether type for DS-Lite and MapE */
+ eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+ eth->h_proto = skb->protocol;
+ } else {
+ eth = eth_hdr(skb);
+ }
+
+ ether_addr_copy(eth->h_dest, neigh->ha);
+ ether_addr_copy(eth->h_source, out->dev_addr);
+
+ rcu_read_unlock_bh();
+
+ return 0;
+}
+
+static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
+ const struct net_device *out,
+ struct flow_offload_hw_path *hw_path)
+{
+ u32 nexthop;
+ struct neighbour *neigh;
+ struct dst_entry *dst = skb_dst(skb);
+ struct rtable *rt = (struct rtable *)dst;
+ struct net_device *dev = (__force struct net_device *)out;
+
+ if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
+ memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
+ return 0;
+ }
+
+ rcu_read_lock_bh();
+ nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
+ neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+ if (unlikely(!neigh)) {
+ dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
+ &ip_hdr(skb)->daddr);
+ rcu_read_unlock_bh();
+ return -1;
+ }
+
+ /* why do we get all zero ethernet address ? */
+ if (!is_valid_ether_addr(neigh->ha)) {
+ rcu_read_unlock_bh();
+ return -1;
+ }
+
+ memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
+
+ rcu_read_unlock_bh();
+
+ return 0;
+}
+
+static u16 ppe_get_chkbase(struct iphdr *iph)
+{
+ u16 org_chksum = ntohs(iph->check);
+ u16 org_tot_len = ntohs(iph->tot_len);
+ u16 org_id = ntohs(iph->id);
+ u16 chksum_tmp, tot_len_tmp, id_tmp;
+ u32 tmp = 0;
+ u16 chksum_base = 0;
+
+ chksum_tmp = ~(org_chksum);
+ tot_len_tmp = ~(org_tot_len);
+ id_tmp = ~(org_id);
+ tmp = chksum_tmp + tot_len_tmp + id_tmp;
+ tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
+ tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
+ chksum_base = tmp & 0xFFFF;
+
+ return chksum_base;
+}
+
+struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
+ struct flow_offload_hw_path *hw_path)
+{
+ switch (entry.bfib1.pkt_type) {
+ case IPV4_HNAPT:
+ case IPV4_HNAT:
+ entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
+ entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)ð->h_dest[4]));
+ entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
+ break;
+ case IPV4_DSLITE:
+ case IPV4_MAP_E:
+ case IPV6_6RD:
+ case IPV6_5T_ROUTE:
+ case IPV6_3T_ROUTE:
+ entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
+ entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)ð->h_dest[4]));
+ entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry.ipv6_5t_route.smac_lo =
+ swab16(*((u16 *)ð->h_source[4]));
+ entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
+ break;
+ }
+ return entry;
+}
+
+struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
+ struct flow_offload_hw_path *hw_path)
+{
+ entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
+ entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
+ entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
+ entry.bfib1.ttl = 1;
+ entry.bfib1.cah = 1;
+ entry.bfib1.ka = 1;
+ entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4) ?
+ readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
+ readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
+
+ switch (entry.bfib1.pkt_type) {
+ case IPV4_HNAPT:
+ case IPV4_HNAT:
+ if (is_multicast_ether_addr(ð->h_dest[0])) {
+ entry.ipv4_hnapt.iblk2.mcast = 1;
+ if (hnat_priv->data->version == MTK_HNAT_V3) {
+ entry.bfib1.sta = 1;
+ entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
+ }
+ } else {
+ entry.ipv4_hnapt.iblk2.mcast = 0;
+ }
+
+ entry.ipv4_hnapt.iblk2.port_ag =
+ (hnat_priv->data->version == MTK_HNAT_V4) ? 0x3f : 0xf;
+ break;
+ case IPV4_DSLITE:
+ case IPV4_MAP_E:
+ case IPV6_6RD:
+ case IPV6_5T_ROUTE:
+ case IPV6_3T_ROUTE:
+ if (is_multicast_ether_addr(ð->h_dest[0])) {
+ entry.ipv6_5t_route.iblk2.mcast = 1;
+ if (hnat_priv->data->version == MTK_HNAT_V3) {
+ entry.bfib1.sta = 1;
+ entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
+ }
+ } else {
+ entry.ipv6_5t_route.iblk2.mcast = 0;
+ }
+
+ entry.ipv6_5t_route.iblk2.port_ag =
+ (hnat_priv->data->version == MTK_HNAT_V4) ? 0x3f : 0xf;
+ break;
+ }
+ return entry;
+}
+
+static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
+{
+ entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
+ entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
+ entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
+}
+
+static unsigned int skb_to_hnat_info(struct sk_buff *skb,
+ const struct net_device *dev,
+ struct foe_entry *foe,
+ struct flow_offload_hw_path *hw_path)
+{
+ struct foe_entry entry = { 0 };
+ int whnat = IS_WHNAT(dev);
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcpudphdr _ports;
+ const struct tcpudphdr *pptr;
+ u32 gmac = NR_DISCARD;
+ int udp = 0;
+ u32 qid = 0;
+ int mape = 0;
+
+ if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
+ /* point to ethernet header for DS-Lite and MapE */
+ eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+ else
+ eth = eth_hdr(skb);
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ /*do not bind multicast if PPE mcast not enable*/
+ if (!hnat_priv->pmcast)
+ return 0;
+ }
+
+ entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ entry.bfib1.sp = foe->udib1.sp;
+#endif
+
+ switch (ntohs(eth->h_proto)) {
+ case ETH_P_IP:
+ iph = ip_hdr(skb);
+ switch (iph->protocol) {
+ case IPPROTO_UDP:
+ udp = 1;
+ /* fallthrough */
+ case IPPROTO_TCP:
+ entry.ipv4_hnapt.etype = htons(ETH_P_IP);
+
+ /* DS-Lite WAN->LAN */
+ if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
+ entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
+ entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
+ entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
+ entry.ipv4_dslite.sport =
+ foe->ipv4_dslite.sport;
+ entry.ipv4_dslite.dport =
+ foe->ipv4_dslite.dport;
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ if (entry.bfib1.pkt_type == IPV4_MAP_E) {
+ pptr = skb_header_pointer(skb,
+ iph->ihl * 4,
+ sizeof(_ports),
+ &_ports);
+
+ entry.ipv4_dslite.new_sip =
+ ntohl(iph->saddr);
+ entry.ipv4_dslite.new_dip =
+ ntohl(iph->daddr);
+ entry.ipv4_dslite.new_sport =
+ ntohs(pptr->src);
+ entry.ipv4_dslite.new_dport =
+ ntohs(pptr->dst);
+ }
+#endif
+
+ entry.ipv4_dslite.tunnel_sipv6_0 =
+ foe->ipv4_dslite.tunnel_sipv6_0;
+ entry.ipv4_dslite.tunnel_sipv6_1 =
+ foe->ipv4_dslite.tunnel_sipv6_1;
+ entry.ipv4_dslite.tunnel_sipv6_2 =
+ foe->ipv4_dslite.tunnel_sipv6_2;
+ entry.ipv4_dslite.tunnel_sipv6_3 =
+ foe->ipv4_dslite.tunnel_sipv6_3;
+
+ entry.ipv4_dslite.tunnel_dipv6_0 =
+ foe->ipv4_dslite.tunnel_dipv6_0;
+ entry.ipv4_dslite.tunnel_dipv6_1 =
+ foe->ipv4_dslite.tunnel_dipv6_1;
+ entry.ipv4_dslite.tunnel_dipv6_2 =
+ foe->ipv4_dslite.tunnel_dipv6_2;
+ entry.ipv4_dslite.tunnel_dipv6_3 =
+ foe->ipv4_dslite.tunnel_dipv6_3;
+
+ entry.ipv4_dslite.bfib1.rmt = 1;
+ entry.ipv4_dslite.iblk2.dscp = iph->tos;
+ entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv4_dslite.iblk2.mibf = 1;
+
+ } else {
+ entry.ipv4_hnapt.iblk2.dscp = iph->tos;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv4_hnapt.iblk2.mibf = 1;
+
+ entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
+
+ if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
+ entry.bfib1.vlan_layer += 1;
+
+ if (entry.ipv4_hnapt.vlan1)
+ entry.ipv4_hnapt.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
+ else
+ entry.ipv4_hnapt.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
+ }
+
+ entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
+ entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
+ entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
+ entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
+
+ entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
+ entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
+ }
+
+ entry.ipv4_hnapt.bfib1.udp = udp;
+ if (IS_IPV4_HNAPT(foe)) {
+ pptr = skb_header_pointer(skb, iph->ihl * 4,
+ sizeof(_ports),
+ &_ports);
+ entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
+ entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
+ }
+
+ break;
+
+ default:
+ return -1;
+ }
+ trace_printk(
+ "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
+ __func__, skb->head, skb->data, iph, skb->len,
+ skb->data_len);
+ break;
+
+ case ETH_P_IPV6:
+ ip6h = ipv6_hdr(skb);
+ switch (ip6h->nexthdr) {
+ case NEXTHDR_UDP:
+ udp = 1;
+ /* fallthrough */
+ case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
+ entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
+
+ entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
+
+ if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
+ entry.bfib1.vlan_layer += 1;
+
+ if (entry.ipv6_5t_route.vlan1)
+ entry.ipv6_5t_route.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
+ else
+ entry.ipv6_5t_route.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
+ }
+
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv6_5t_route.iblk2.mibf = 1;
+ entry.ipv6_5t_route.bfib1.udp = udp;
+
+ if (IS_IPV6_6RD(foe)) {
+ entry.ipv6_5t_route.bfib1.rmt = 1;
+ entry.ipv6_6rd.tunnel_sipv4 =
+ foe->ipv6_6rd.tunnel_sipv4;
+ entry.ipv6_6rd.tunnel_dipv4 =
+ foe->ipv6_6rd.tunnel_dipv4;
+ }
+
+ entry.ipv6_3t_route.ipv6_sip0 =
+ foe->ipv6_3t_route.ipv6_sip0;
+ entry.ipv6_3t_route.ipv6_sip1 =
+ foe->ipv6_3t_route.ipv6_sip1;
+ entry.ipv6_3t_route.ipv6_sip2 =
+ foe->ipv6_3t_route.ipv6_sip2;
+ entry.ipv6_3t_route.ipv6_sip3 =
+ foe->ipv6_3t_route.ipv6_sip3;
+
+ entry.ipv6_3t_route.ipv6_dip0 =
+ foe->ipv6_3t_route.ipv6_dip0;
+ entry.ipv6_3t_route.ipv6_dip1 =
+ foe->ipv6_3t_route.ipv6_dip1;
+ entry.ipv6_3t_route.ipv6_dip2 =
+ foe->ipv6_3t_route.ipv6_dip2;
+ entry.ipv6_3t_route.ipv6_dip3 =
+ foe->ipv6_3t_route.ipv6_dip3;
+
+ if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
+ entry.ipv6_5t_route.sport =
+ foe->ipv6_5t_route.sport;
+ entry.ipv6_5t_route.dport =
+ foe->ipv6_5t_route.dport;
+ }
+ entry.ipv6_5t_route.iblk2.dscp =
+ (ip6h->priority << 4 |
+ (ip6h->flow_lbl[0] >> 4));
+ break;
+
+ case NEXTHDR_IPIP:
+ if ((!mape_toggle &&
+ entry.bfib1.pkt_type == IPV4_DSLITE) ||
+ (mape_toggle &&
+ entry.bfib1.pkt_type == IPV4_MAP_E)) {
+ /* DS-Lite LAN->WAN */
+ entry.ipv4_dslite.bfib1.udp =
+ foe->ipv4_dslite.bfib1.udp;
+ entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
+ entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
+ entry.ipv4_dslite.sport =
+ foe->ipv4_dslite.sport;
+ entry.ipv4_dslite.dport =
+ foe->ipv4_dslite.dport;
+
+ entry.ipv4_dslite.tunnel_sipv6_0 =
+ ntohl(ip6h->saddr.s6_addr32[0]);
+ entry.ipv4_dslite.tunnel_sipv6_1 =
+ ntohl(ip6h->saddr.s6_addr32[1]);
+ entry.ipv4_dslite.tunnel_sipv6_2 =
+ ntohl(ip6h->saddr.s6_addr32[2]);
+ entry.ipv4_dslite.tunnel_sipv6_3 =
+ ntohl(ip6h->saddr.s6_addr32[3]);
+
+ entry.ipv4_dslite.tunnel_dipv6_0 =
+ ntohl(ip6h->daddr.s6_addr32[0]);
+ entry.ipv4_dslite.tunnel_dipv6_1 =
+ ntohl(ip6h->daddr.s6_addr32[1]);
+ entry.ipv4_dslite.tunnel_dipv6_2 =
+ ntohl(ip6h->daddr.s6_addr32[2]);
+ entry.ipv4_dslite.tunnel_dipv6_3 =
+ ntohl(ip6h->daddr.s6_addr32[3]);
+
+ ppe_fill_flow_lbl(&entry, ip6h);
+
+ entry.ipv4_dslite.priority = ip6h->priority;
+ entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
+ entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv4_dslite.iblk2.mibf = 1;
+ } else if (mape_toggle &&
+ entry.bfib1.pkt_type == IPV4_HNAPT) {
+ /* MapE LAN -> WAN */
+ mape = 1;
+ entry.ipv4_hnapt.iblk2.dscp =
+ foe->ipv4_hnapt.iblk2.dscp;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv4_hnapt.iblk2.mibf = 1;
+
+ entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
+
+ entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
+ entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
+ entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
+ entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
+
+ entry.ipv4_hnapt.new_sip =
+ foe->ipv4_hnapt.new_sip;
+ entry.ipv4_hnapt.new_dip =
+ foe->ipv4_hnapt.new_dip;
+ entry.ipv4_hnapt.etype = htons(ETH_P_IP);
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ entry.ipv4_hnapt.iblk2.qid =
+ (hnat_priv->data->version == MTK_HNAT_V4) ?
+ skb->mark & 0x7f : skb->mark & 0xf;
+ entry.ipv4_hnapt.iblk2.fqos = 1;
+#endif
+
+ entry.ipv4_hnapt.bfib1.udp =
+ foe->ipv4_hnapt.bfib1.udp;
+
+ entry.ipv4_hnapt.new_sport =
+ foe->ipv4_hnapt.new_sport;
+ entry.ipv4_hnapt.new_dport =
+ foe->ipv4_hnapt.new_dport;
+ mape_l2w_v6h = *ip6h;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ trace_printk(
+ "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
+ __func__, skb->head, skb->data, ip6h, skb->len,
+ skb->data_len);
+ break;
+
+ default:
+ ip6h = ipv6_hdr(skb);
+ iph = ip_hdr(skb);
+ switch (entry.bfib1.pkt_type) {
+ case IPV6_6RD: /* 6RD LAN->WAN */
+ entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
+ entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
+ entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
+ entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
+
+ entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
+ entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
+ entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
+ entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
+
+ entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
+ entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
+ entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
+ entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
+ entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
+ entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
+ entry.ipv6_6rd.ttl = iph->ttl;
+ entry.ipv6_6rd.dscp = iph->tos;
+ entry.ipv6_6rd.per_flow_6rd_id = 1;
+ entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
+ if (hnat_priv->data->per_flow_accounting)
+ entry.ipv6_6rd.iblk2.mibf = 1;
+ break;
+
+ default:
+ return -1;
+ }
+ }
+
+ /* Fill Layer2 Info.*/
+ entry = ppe_fill_L2_info(eth, entry, hw_path);
+
+ /* Fill Info Blk*/
+ entry = ppe_fill_info_blk(eth, entry, hw_path);
+
+ if (IS_LAN(dev)) {
+ if (IS_DSA_LAN(dev))
+ hnat_dsa_fill_stag(dev, &entry, hw_path,
+ ntohs(eth->h_proto), mape);
+
+ if (IS_BOND_MODE)
+ gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
+ NR_GMAC2_PORT : NR_GMAC1_PORT;
+ else
+ gmac = NR_GMAC1_PORT;
+ } else if (IS_WAN(dev)) {
+ if (IS_DSA_WAN(dev))
+ hnat_dsa_fill_stag(dev, &entry, hw_path,
+ ntohs(eth->h_proto), mape);
+ if (mape_toggle && mape == 1) {
+ gmac = NR_PDMA_PORT;
+ /* Set act_dp = wan_dev */
+ entry.ipv4_hnapt.act_dp = dev->ifindex;
+ } else {
+ gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
+ }
+ } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN(skb) ||
+ FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
+ if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
+ entry.bfib1.vpm = 1;
+ entry.bfib1.vlan_layer = 1;
+
+ if (FROM_GE_LAN(skb))
+ entry.ipv4_hnapt.vlan1 = 1;
+ else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
+ entry.ipv4_hnapt.vlan1 = 2;
+ }
+
+ trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
+ skb_hnat_iface(skb), dev->name);
+ /* To CPU then stolen by pre-routing hant hook of LAN/WAN
+ * Current setting is PDMA RX.
+ */
+ gmac = NR_PDMA_PORT;
+ if (IS_IPV4_GRP(foe))
+ entry.ipv4_hnapt.act_dp = dev->ifindex;
+ else
+ entry.ipv6_5t_route.act_dp = dev->ifindex;
+ } else {
+ printk_ratelimited(KERN_WARNING
+ "Unknown case of dp, iif=%x --> %s\n",
+ skb_hnat_iface(skb), dev->name);
+
+ return 0;
+ }
+
+ qid = skb->mark & (MTK_QDMA_TX_MASK);
+
+ if (IS_IPV4_GRP(foe)) {
+ entry.ipv4_hnapt.iblk2.dp = gmac;
+ entry.ipv4_hnapt.iblk2.port_mg =
+ (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (hnat_priv->data->version == MTK_HNAT_V4) {
+ entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
+ } else {
+ /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
+ entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
+ if (hnat_priv->data->version != MTK_HNAT_V1)
+ entry.ipv4_hnapt.iblk2.port_mg |=
+ ((qid >> 4) & 0x3);
+
+ if (((IS_EXT(dev) && (FROM_GE_LAN(skb) ||
+ FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
+ ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
+ (!whnat)) {
+ entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
+ entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
+ entry.bfib1.vlan_layer = 1;
+ }
+ }
+
+ if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT)
+ entry.ipv4_hnapt.iblk2.fqos = 0;
+ else
+ entry.ipv4_hnapt.iblk2.fqos = 1;
+#else
+ entry.ipv4_hnapt.iblk2.fqos = 0;
+#endif
+ } else {
+ entry.ipv6_5t_route.iblk2.dp = gmac;
+ entry.ipv6_5t_route.iblk2.port_mg =
+ (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (hnat_priv->data->version == MTK_HNAT_V4) {
+ entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
+ } else {
+ /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
+ entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
+ if (hnat_priv->data->version != MTK_HNAT_V1)
+ entry.ipv6_5t_route.iblk2.port_mg |=
+ ((qid >> 4) & 0x3);
+
+ if (IS_EXT(dev) && (FROM_GE_LAN(skb) ||
+ FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
+ (!whnat)) {
+ entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
+ entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
+ entry.bfib1.vlan_layer = 1;
+ }
+ }
+
+ if (FROM_EXT(skb))
+ entry.ipv6_5t_route.iblk2.fqos = 0;
+ else
+ entry.ipv6_5t_route.iblk2.fqos = 1;
+#else
+ entry.ipv6_5t_route.iblk2.fqos = 0;
+#endif
+ }
+
+ memcpy(foe, &entry, sizeof(entry));
+ /*reset statistic for this entry*/
+ if (hnat_priv->data->per_flow_accounting)
+ memset(&hnat_priv->acct[skb_hnat_entry(skb)], 0,
+ sizeof(struct mib_entry));
+
+ wmb();
+ /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
+ * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
+ * the entry is set to BIND state in mtk_sw_nat_hook_tx().
+ */
+ if (!whnat)
+ foe->bfib1.state = BIND;
+
+ return 0;
+}
+
+int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
+{
+ struct foe_entry *entry;
+ struct ethhdr *eth;
+
+ if (skb_hnat_alg(skb) || !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
+ return NF_ACCEPT;
+
+ trace_printk(
+ "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
+ __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
+ skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
+ skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
+
+ if (!skb_hnat_is_hashed(skb))
+ return NF_ACCEPT;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+ if (entry_hnat_is_bound(entry))
+ return NF_ACCEPT;
+
+ if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
+ return NF_ACCEPT;
+
+ eth = eth_hdr(skb);
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ /*not bind multicast if PPE mcast not enable*/
+ if (!hnat_priv->pmcast)
+ return NF_ACCEPT;
+ }
+
+ /* Some mt_wifi virtual interfaces, such as apcli,
+ * will change the smac for specail purpose.
+ */
+ switch (entry->bfib1.pkt_type) {
+ case IPV4_HNAPT:
+ case IPV4_HNAT:
+ entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ break;
+ case IPV4_DSLITE:
+ case IPV4_MAP_E:
+ case IPV6_6RD:
+ case IPV6_5T_ROUTE:
+ case IPV6_3T_ROUTE:
+ entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ break;
+ }
+
+ /* MT7622 wifi hw_nat not support QoS */
+ if (IS_IPV4_GRP(entry)) {
+ entry->ipv4_hnapt.iblk2.fqos = 0;
+ if (gmac_no == NR_WHNAT_WDMA_PORT) {
+ entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
+ entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
+ entry->ipv4_hnapt.iblk2.winfoi = 1;
+#else
+ entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
+ entry->ipv4_hnapt.iblk2w.winfoi = 1;
+ entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
+#endif
+ } else {
+ if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
+ entry->bfib1.vpm = 1;
+ entry->bfib1.vlan_layer = 1;
+
+ if (FROM_GE_LAN(skb))
+ entry->ipv4_hnapt.vlan1 = 1;
+ else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
+ entry->ipv4_hnapt.vlan1 = 2;
+ }
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) {
+ entry->bfib1.vpm = 0;
+ entry->bfib1.vlan_layer = 1;
+ entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
+ entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
+ entry->ipv4_hnapt.iblk2.fqos = 1;
+ }
+#endif
+ }
+ entry->ipv4_hnapt.iblk2.dp = gmac_no;
+ } else {
+ entry->ipv6_5t_route.iblk2.fqos = 0;
+ if (gmac_no == NR_WHNAT_WDMA_PORT) {
+ entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
+ entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
+ entry->ipv6_5t_route.iblk2.winfoi = 1;
+#else
+ entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
+ entry->ipv6_5t_route.iblk2w.winfoi = 1;
+ entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
+#endif
+ } else {
+ if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
+ entry->bfib1.vpm = 1;
+ entry->bfib1.vlan_layer = 1;
+
+ if (FROM_GE_LAN(skb))
+ entry->ipv6_5t_route.vlan1 = 1;
+ else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
+ entry->ipv6_5t_route.vlan1 = 2;
+ }
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) {
+ entry->bfib1.vpm = 0;
+ entry->bfib1.vlan_layer = 1;
+ entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
+ entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
+ entry->ipv6_5t_route.iblk2.fqos = 1;
+ }
+#endif
+ }
+ entry->ipv6_5t_route.iblk2.dp = gmac_no;
+ }
+
+ entry->bfib1.state = BIND;
+
+ return NF_ACCEPT;
+}
+
+int mtk_sw_nat_hook_rx(struct sk_buff *skb)
+{
+ if (!IS_SPACE_AVAILABLE_HEAD(skb))
+ return NF_ACCEPT;
+
+ skb_hnat_alg(skb) = 0;
+ skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+
+ if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
+ skb_hnat_sport(skb) = NR_WDMA0_PORT;
+ else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
+ skb_hnat_sport(skb) = NR_WDMA1_PORT;
+
+ return NF_ACCEPT;
+}
+
+void mtk_ppe_dev_register_hook(struct net_device *dev)
+{
+ int i, number = 0;
+ struct extdev_entry *ext_entry;
+
+ if (!strncmp(dev->name, "wds", 3))
+ return;
+
+ for (i = 1; i < MAX_IF_NUM; i++) {
+ if (hnat_priv->wifi_hook_if[i] == dev) {
+ pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
+ __func__, dev->name, i);
+ return;
+ }
+ if (!hnat_priv->wifi_hook_if[i]) {
+ if (find_extif_from_devname(dev->name)) {
+ extif_set_dev(dev);
+ goto add_wifi_hook_if;
+ }
+
+ number = get_ext_device_number();
+ if (number >= MAX_EXT_DEVS) {
+ pr_info("%s : extdev array is full. %s is not registered\n",
+ __func__, dev->name);
+ return;
+ }
+
+ ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
+ if (!ext_entry)
+ return;
+
+ strncpy(ext_entry->name, dev->name, IFNAMSIZ);
+ dev_hold(dev);
+ ext_entry->dev = dev;
+ ext_if_add(ext_entry);
+
+add_wifi_hook_if:
+ dev_hold(dev);
+ hnat_priv->wifi_hook_if[i] = dev;
+
+ break;
+ }
+ }
+ pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
+}
+
+void mtk_ppe_dev_unregister_hook(struct net_device *dev)
+{
+ int i;
+
+ for (i = 1; i < MAX_IF_NUM; i++) {
+ if (hnat_priv->wifi_hook_if[i] == dev) {
+ hnat_priv->wifi_hook_if[i] = NULL;
+ dev_put(dev);
+
+ break;
+ }
+ }
+
+ extif_put_dev(dev);
+ pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
+}
+
+static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
+{
+ struct dst_entry *dst;
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn_help *help;
+
+ /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
+ * is from local_out which is also filtered in sanity check.
+ */
+ dst = skb_dst(skb);
+ if (dst && dst_xfrm(dst))
+ return 0;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return 1;
+
+ /* rcu_read_lock()ed by nf_hook_slow */
+ help = nfct_help(ct);
+ if (help && rcu_dereference(help->helper))
+ return 0;
+
+ return 1;
+}
+
+static unsigned int mtk_hnat_nf_post_routing(
+ struct sk_buff *skb, const struct net_device *out,
+ unsigned int (*fn)(struct sk_buff *, const struct net_device *,
+ struct flow_offload_hw_path *),
+ const char *func)
+{
+ struct foe_entry *entry;
+ struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
+ .virt_dev = (struct net_device*)out };
+ const struct net_device *arp_dev = out;
+
+ if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
+ !IS_SPACE_AVAILABLE_HEAD(skb)))
+ return 0;
+
+ if (unlikely(!skb_hnat_is_hashed(skb)))
+ return 0;
+
+ if (out->netdev_ops->ndo_flow_offload_check) {
+ if (out->netdev_ops->ndo_flow_offload_check(&hw_path))
+ return 0;
+ out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
+ }
+
+ if (!IS_LAN(out) && !IS_WAN(out) && !IS_EXT(out))
+ return 0;
+
+ trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
+ skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+
+ switch (skb_hnat_reason(skb)) {
+ case HIT_UNBIND_RATE_REACH:
+ if (entry_hnat_is_bound(entry))
+ break;
+
+ if (fn && !mtk_hnat_accel_type(skb))
+ break;
+
+ if (fn && fn(skb, arp_dev, &hw_path))
+ break;
+
+ skb_to_hnat_info(skb, out, entry, &hw_path);
+ break;
+ case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+ if (fn && !mtk_hnat_accel_type(skb))
+ break;
+
+ /* update mcast timestamp*/
+ if (hnat_priv->data->version == MTK_HNAT_V3 &&
+ hnat_priv->data->mcast && entry->bfib1.sta == 1)
+ entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
+
+ if (entry_hnat_is_bound(entry)) {
+ memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
+
+ return -1;
+ }
+ break;
+ case HIT_BIND_MULTICAST_TO_CPU:
+ case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+ /*do not forward to gdma again,if ppe already done it*/
+ if (IS_LAN(out) || IS_WAN(out))
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned int
+mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct foe_entry *entry;
+ struct ipv6hdr *ip6h;
+ struct iphdr _iphdr;
+ const struct iphdr *iph;
+ struct tcpudphdr _ports;
+ const struct tcpudphdr *pptr;
+ int udp = 0;
+
+ if (unlikely(!skb_hnat_is_hashed(skb)))
+ return NF_ACCEPT;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+ if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
+ ip6h = ipv6_hdr(skb);
+ if (ip6h->nexthdr == NEXTHDR_IPIP) {
+ /* Map-E LAN->WAN: need to record orig info before fn. */
+ if (mape_toggle) {
+ iph = skb_header_pointer(skb, IPV6_HDR_LEN,
+ sizeof(_iphdr), &_iphdr);
+ switch (iph->protocol) {
+ case IPPROTO_UDP:
+ udp = 1;
+ case IPPROTO_TCP:
+ break;
+
+ default:
+ return NF_ACCEPT;
+ }
+
+ pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
+ sizeof(_ports), &_ports);
+ entry->bfib1.udp = udp;
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ entry->bfib1.pkt_type = IPV4_MAP_E;
+ entry->ipv4_dslite.iblk2.dscp = iph->tos;
+ entry->ipv4_dslite.new_sip = ntohl(iph->saddr);
+ entry->ipv4_dslite.new_dip = ntohl(iph->daddr);
+ entry->ipv4_dslite.new_sport = ntohs(pptr->src);
+ entry->ipv4_dslite.new_dport = ntohs(pptr->dst);
+#else
+ entry->ipv4_hnapt.iblk2.dscp = iph->tos;
+ entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
+ entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
+ entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
+ entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
+#endif
+ } else {
+ entry->bfib1.pkt_type = IPV4_DSLITE;
+ }
+ }
+ }
+ return NF_ACCEPT;
+}
+
+static unsigned int
+mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ post_routing_print(skb, state->in, state->out, __func__);
+
+ if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
+ __func__))
+ return NF_ACCEPT;
+
+ trace_printk(
+ "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
+ skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ post_routing_print(skb, state->in, state->out, __func__);
+
+ if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
+ __func__))
+ return NF_ACCEPT;
+
+ trace_printk(
+ "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
+ skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+ if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
+ skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
+ skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
+ }
+#endif
+
+ if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
+ clr_from_extge(skb);
+
+ /* packets from external devices -> xxx ,step 2, learning stage */
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+ if (do_ext2ge_fast_learn(state->in, skb) && (eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG)) {
+#else
+ if (do_ext2ge_fast_learn(state->in, skb)) {
+#endif
+ if (!do_hnat_ext_to_ge2(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ /* packets form ge -> external device */
+ if (do_ge2ext_fast(state->in, skb)) {
+ if (!do_hnat_ge_to_ext(skb, __func__))
+ return NF_STOLEN;
+ goto drop;
+ }
+
+ return NF_ACCEPT;
+drop:
+ printk_ratelimited(KERN_WARNING
+ "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, state->in->name, skb_hnat_iface(skb),
+ HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
+ skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ post_routing_print(skb, state->in, state->out, __func__);
+
+ if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
+ return NF_ACCEPT;
+
+ trace_printk(
+ "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
+ __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
+ skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
+ skb_hnat_alg(skb));
+
+ return NF_DROP;
+}
+
+static unsigned int
+mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct sk_buff *new_skb;
+ struct foe_entry *entry;
+ struct iphdr *iph;
+
+ if (!skb_hnat_is_hashed(skb))
+ return NF_ACCEPT;
+
+ entry = &hnat_priv->foe_table_cpu[skb_hnat_entry(skb)];
+
+ if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
+ new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
+ if (!new_skb) {
+ dev_info(hnat_priv->dev, "%s:drop\n", __func__);
+ return NF_DROP;
+ }
+ dev_kfree_skb(skb);
+ skb = new_skb;
+ }
+
+ /* Make the flow from local not be bound. */
+ iph = ip_hdr(skb);
+ if (iph->protocol == IPPROTO_IPV6) {
+ entry->udib1.pkt_type = IPV6_6RD;
+ hnat_set_head_frags(state, skb, 0, hnat_set_alg);
+ } else {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ }
+
+ return NF_ACCEPT;
+}
+
+static unsigned int mtk_hnat_br_nf_forward(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ if (unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+
+ return NF_ACCEPT;
+}
+
+static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
+ {
+ .hook = mtk_hnat_ipv4_nf_pre_routing,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_FIRST + 1,
+ },
+ {
+ .hook = mtk_hnat_ipv6_nf_pre_routing,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_FIRST + 1,
+ },
+ {
+ .hook = mtk_hnat_ipv6_nf_post_routing,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = mtk_hnat_ipv6_nf_local_out,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = mtk_hnat_ipv4_nf_post_routing,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = mtk_hnat_ipv4_nf_local_out,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = mtk_hnat_br_nf_local_in,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_LOCAL_IN,
+ .priority = NF_BR_PRI_FIRST,
+ },
+ {
+ .hook = mtk_hnat_br_nf_local_out,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_LOCAL_OUT,
+ .priority = NF_BR_PRI_LAST - 1,
+ },
+ {
+ .hook = mtk_pong_hqos_handler,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_PRE_ROUTING,
+ .priority = NF_BR_PRI_FIRST,
+ },
+};
+
+int hnat_register_nf_hooks(void)
+{
+ return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
+}
+
+void hnat_unregister_nf_hooks(void)
+{
+ nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
+}
+
+int whnat_adjust_nf_hooks(void)
+{
+ struct nf_hook_ops *hook = mtk_hnat_nf_ops;
+ unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
+
+ if (!hook)
+ return -1;
+
+ while (n-- > 0) {
+ if (hook[n].hook == mtk_hnat_br_nf_local_in) {
+ hook[n].hooknum = NF_BR_PRE_ROUTING;
+ } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
+ hook[n].hooknum = NF_BR_POST_ROUTING;
+ } else if (hook[n].hook == mtk_pong_hqos_handler) {
+ hook[n].hook = mtk_hnat_br_nf_forward;
+ hook[n].hooknum = NF_BR_FORWARD;
+ hook[n].priority = NF_BR_PRI_LAST - 1;
+ }
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *unused)
+{
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+ skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
+ skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
+
+ do_hnat_ge_to_ext(skb, __func__);
+
+ return 0;
+}
+#endif
+
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_stag.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_stag.c
new file mode 100644
index 0000000..b0fabfb
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_stag.c
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Landen Chao <landen.chao@mediatek.com>
+ */
+
+#include <linux/of_device.h>
+#include <net/netfilter/nf_flow_table.h>
+#include "hnat.h"
+
+void hnat_dsa_fill_stag(const struct net_device *netdev,
+ struct foe_entry *entry,
+ struct flow_offload_hw_path *hw_path,
+ u16 eth_proto,
+ int mape)
+{
+ const struct net_device *ndev;
+ const unsigned int *port_reg;
+ int port_index;
+ u16 sp_tag;
+
+ if (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN)
+ ndev = hw_path->dev;
+ else
+ ndev = netdev;
+
+ port_reg = of_get_property(ndev->dev.of_node, "reg", NULL);
+ port_index = be32_to_cpup(port_reg);
+ sp_tag = BIT(port_index);
+
+ if (!entry->bfib1.vlan_layer)
+ entry->bfib1.vlan_layer = 1;
+ else
+ /* VLAN existence indicator */
+ sp_tag |= BIT(8);
+ entry->bfib1.vpm = 0;
+
+ switch (eth_proto) {
+ case ETH_P_IP:
+ if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE)
+ entry->ipv4_dslite.etype = sp_tag;
+ else
+ entry->ipv4_hnapt.etype = sp_tag;
+ break;
+ case ETH_P_IPV6:
+ /* In the case MAPE LAN --> WAN, binding entry is to CPU.
+ * Do not add special tag.
+ */
+ if (!mape)
+ /* etype offset of ipv6 entries are the same. */
+ entry->ipv6_5t_route.etype = sp_tag;
+
+ break;
+ default:
+ pr_info("DSA + HNAT unsupport protocol\n");
+ }
+}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
new file mode 100644
index 0000000..bd857f4
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
@@ -0,0 +1,126 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
+ * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef NF_HNAT_MTK_H
+#define NF_HNAT_MTK_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include "../mtk_eth_soc.h"
+
+#define HNAT_SKB_CB2(__skb) ((struct hnat_skb_cb2 *)&((__skb)->cb[44]))
+struct hnat_skb_cb2 {
+ __u32 magic;
+};
+
+#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+struct hnat_desc {
+ u32 entry : 15;
+ u32 resv0 : 3;
+ u32 crsn : 5;
+ u32 resv1 : 3;
+ u32 sport : 4;
+ u32 resv2 : 1;
+ u32 alg : 1;
+ u32 iface : 4;
+ u32 resv3 : 4;
+ u32 magic_tag_protect : 16;
+ u32 wdmaid : 2;
+ u32 rxid : 2;
+ u32 wcid : 10;
+ u32 bssid : 6;
+} __packed;
+#else
+struct hnat_desc {
+ u32 entry : 14;
+ u32 crsn : 5;
+ u32 sport : 4;
+ u32 alg : 1;
+ u32 iface : 4;
+ u32 resv : 4;
+ u32 magic_tag_protect : 16;
+ u32 wdmaid : 8;
+ u32 rxid : 2;
+ u32 wcid : 8;
+ u32 bssid : 6;
+} __packed;
+#endif
+
+#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
+#define HQOS_MAGIC_TAG 0x5678
+#define HAS_HQOS_MAGIC_TAG(skb) (skb->protocol == HQOS_MAGIC_TAG)
+#else
+#define HAS_HQOS_MAGIC_TAG(skb) NULL
+#endif
+
+#define HNAT_MAGIC_TAG 0x6789
+#define WIFI_INFO_LEN 3
+#define FOE_INFO_LEN (10 + WIFI_INFO_LEN)
+#define IS_SPACE_AVAILABLE_HEAD(skb) \
+ ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+
+#define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
+#define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
+#define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
+#define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
+#define skb_hnat_sport(skb) (((struct hnat_desc *)(skb->head))->sport)
+#define skb_hnat_alg(skb) (((struct hnat_desc *)(skb->head))->alg)
+#define skb_hnat_iface(skb) (((struct hnat_desc *)(skb->head))->iface)
+#define skb_hnat_magic_tag(skb) (((struct hnat_desc *)((skb)->head))->magic_tag_protect)
+#define skb_hnat_wdma_id(skb) (((struct hnat_desc *)((skb)->head))->wdmaid)
+#define skb_hnat_rx_id(skb) (((struct hnat_desc *)((skb)->head))->rxid)
+#define skb_hnat_wc_id(skb) (((struct hnat_desc *)((skb)->head))->wcid)
+#define skb_hnat_bss_id(skb) (((struct hnat_desc *)((skb)->head))->bssid)
+#define do_ext2ge_fast_try(dev, skb) (IS_EXT(dev) && !is_from_extge(skb))
+#define set_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x78786688)
+#define clr_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x0)
+#define set_to_ppe(skb) (HNAT_SKB_CB2(skb)->magic = 0x78681415)
+#define is_from_extge(skb) (HNAT_SKB_CB2(skb)->magic == 0x78786688)
+#define is_magic_tag_valid(skb) (skb_hnat_magic_tag(skb) == HNAT_MAGIC_TAG)
+#define set_from_mape(skb) (HNAT_SKB_CB2(skb)->magic = 0x78787788)
+#define is_from_mape(skb) (HNAT_SKB_CB2(skb)->magic == 0x78787788)
+#define is_unreserved_port(hdr) \
+ ((ntohs(hdr->source) > 1023) && (ntohs(hdr->dest) > 1023))
+
+#define TTL_0 0x02
+#define HAS_OPTION_HEADER 0x03
+#define NO_FLOW_IS_ASSIGNED 0x07
+#define IPV4_WITH_FRAGMENT 0x08
+#define IPV4_HNAPT_DSLITE_WITH_FRAGMENT 0x09
+#define IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP 0x0A
+#define IPV6_5T_6RD_WITHOUT_TCP_UDP 0x0B
+#define TCP_FIN_SYN_RST \
+ 0x0C /* Ingress packet is TCP fin/syn/rst (for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */
+#define UN_HIT 0x0D /* FOE Un-hit */
+#define HIT_UNBIND 0x0E /* FOE Hit unbind */
+#define HIT_UNBIND_RATE_REACH 0x0F
+#define HIT_BIND_TCP_FIN 0x10
+#define HIT_BIND_TTL_1 0x11
+#define HIT_BIND_WITH_VLAN_VIOLATION 0x12
+#define HIT_BIND_KEEPALIVE_UC_OLD_HDR 0x13
+#define HIT_BIND_KEEPALIVE_MC_NEW_HDR 0x14
+#define HIT_BIND_KEEPALIVE_DUP_OLD_HDR 0x15
+#define HIT_BIND_FORCE_TO_CPU 0x16
+#define HIT_BIND_WITH_OPTION_HEADER 0x17
+#define HIT_BIND_MULTICAST_TO_CPU 0x18
+#define HIT_BIND_MULTICAST_TO_GMAC_CPU 0x19
+#define HIT_PRE_BIND 0x1A
+#define HIT_BIND_PACKET_SAMPLING 0x1B
+#define HIT_BIND_EXCEED_MTU 0x1C
+
+u32 hnat_tx(struct sk_buff *skb);
+u32 hnat_set_skb_info(struct sk_buff *skb, u32 *rxd);
+u32 hnat_reg(struct net_device *, void __iomem *);
+u32 hnat_unreg(void);
+
+#endif
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c
new file mode 100755
index 0000000..4db27df
--- /dev/null
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for MediaTek SGMII circuit
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
+{
+ struct device_node *np;
+ int i;
+
+ ss->ana_rgc3 = ana_rgc3;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ np = of_parse_phandle(r, "mediatek,sgmiisys", i);
+ if (!np)
+ break;
+
+ ss->regmap[i] = syscon_node_to_regmap(np);
+ if (IS_ERR(ss->regmap[i]))
+ return PTR_ERR(ss->regmap[i]);
+ }
+
+ return 0;
+}
+
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
+{
+ unsigned int val;
+
+ if (!ss->regmap[id])
+ return -EINVAL;
+
+ /* Setup the link timer and QPHY power up inside SGMIISYS */
+ regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
+ SGMII_LINK_TIMER_DEFAULT);
+
+ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ val |= SGMII_REMOTE_FAULT_DIS;
+ regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+ regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ val |= SGMII_AN_RESTART;
+ regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+ regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val &= ~SGMII_PHYA_PWD;
+ regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+ return 0;
+}
+
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+ const struct phylink_link_state *state)
+{
+ unsigned int val;
+
+ if (!ss->regmap[id])
+ return -EINVAL;
+
+ regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
+ val &= ~RG_PHY_SPEED_MASK;
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ val |= RG_PHY_SPEED_3_125G;
+ regmap_write(ss->regmap[id], ss->ana_rgc3, val);
+
+ /* Disable SGMII AN */
+ regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ val &= ~SGMII_AN_ENABLE;
+ regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+ /* SGMII force mode setting */
+ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ val &= ~SGMII_IF_MODE_MASK;
+
+ switch (state->speed) {
+ case SPEED_10:
+ val |= SGMII_SPEED_10;
+ break;
+ case SPEED_100:
+ val |= SGMII_SPEED_100;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ val |= SGMII_SPEED_1000;
+ break;
+ };
+
+ if (state->duplex == DUPLEX_FULL)
+ val |= SGMII_DUPLEX_FULL;
+
+ regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+ /* Release PHYA power down state */
+ regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val &= ~SGMII_PHYA_PWD;
+ regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+ return 0;
+}
+
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
+{
+ struct mtk_sgmii *ss = eth->sgmii;
+ unsigned int val, sid;
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac_id;
+
+ if (!ss->regmap[sid])
+ return;
+
+ regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
+ val |= SGMII_AN_RESTART;
+ regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
+}