[Change and sync linux version from OpenWRT]
[Description]
Change and sync linux version from OpenWRT
1. sync mtk kernel patch from mtk_feed
[Release-log]
N/A
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile
index f046e73..13c5b4e 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/Makefile
@@ -4,5 +4,5 @@
#
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o
+mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o
obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
index 574440d..ab411d9 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.c
@@ -27,6 +27,7 @@
#include "mtk_eth_soc.h"
#include "mtk_eth_dbg.h"
+#include "mtk_eth_reset.h"
u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1];
u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16];
@@ -319,6 +320,7 @@
{
struct mtk_eth *eth = file->private_data;
+ atomic_inc(&force);
schedule_work(ð->pending_work);
return len;
}
@@ -625,7 +627,7 @@
.release = single_release
};
-static struct proc_dir_entry *proc_tx_ring, *proc_rx_ring;
+static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring;
int tx_ring_read(struct seq_file *seq, void *v)
{
@@ -677,6 +679,53 @@
.release = single_release
};
+int hwtx_ring_read(struct seq_file *seq, void *v)
+{
+ struct mtk_eth *eth = g_eth;
+ struct mtk_tx_dma *hwtx_ring;
+ int i = 0;
+
+ hwtx_ring =
+ kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
+ if (!hwtx_ring) {
+ seq_puts(seq, " allocate temp hwtx_ring fail.\n");
+ return 0;
+ }
+
+ for (i = 0; i < MTK_DMA_SIZE; i++)
+ hwtx_ring[i] = eth->scratch_ring[i];
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ dma_addr_t addr = eth->phy_scratch_ring + i * sizeof(*hwtx_ring);
+
+ seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr,
+ *(int *)&hwtx_ring[i].txd1, *(int *)&hwtx_ring[i].txd2,
+ *(int *)&hwtx_ring[i].txd3, *(int *)&hwtx_ring[i].txd4);
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ seq_printf(seq, " %08x %08x %08x %08x",
+ *(int *)&hwtx_ring[i].txd5, *(int *)&hwtx_ring[i].txd6,
+ *(int *)&hwtx_ring[i].txd7, *(int *)&hwtx_ring[i].txd8);
+#endif
+ seq_printf(seq, "\n");
+ }
+
+ kfree(hwtx_ring);
+ return 0;
+}
+
+static int hwtx_ring_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hwtx_ring_read, NULL);
+}
+
+static const struct file_operations hwtx_ring_fops = {
+ .owner = THIS_MODULE,
+ .open = hwtx_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
int rx_ring_read(struct seq_file *seq, void *v)
{
struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
@@ -744,10 +793,10 @@
seq_puts(seq, " <<DEBUG REG DUMP>>\n");
seq_printf(seq, "| FE_INT_STA : %08x |\n",
- mtk_r32(eth, MTK_INT_STATUS));
+ mtk_r32(eth, MTK_FE_INT_STATUS));
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
- mtk_r32(eth, MTK_INT_STATUS2));
+ mtk_r32(eth, MTK_FE_INT_STATUS2));
seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n",
mtk_r32(eth, MTK_PSE_FQFC_CFG));
@@ -833,9 +882,9 @@
mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG));
}
- mtk_w32(eth, 0xffffffff, MTK_INT_STATUS);
+ mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
- mtk_w32(eth, 0xffffffff, MTK_INT_STATUS2);
+ mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2);
return 0;
}
@@ -1454,8 +1503,66 @@
.release = single_release
};
+int reset_event_read(struct seq_file *seq, void *v)
+{
+ struct mtk_eth *eth = g_eth;
+ struct mtk_reset_event reset_event = eth->reset_event;
+
+ seq_printf(seq, "[Event] [Count]\n");
+ seq_printf(seq, " FQ Empty: %d\n",
+ reset_event.count[MTK_EVENT_FQ_EMPTY]);
+ seq_printf(seq, " TSO Fail: %d\n",
+ reset_event.count[MTK_EVENT_TSO_FAIL]);
+ seq_printf(seq, " TSO Illegal: %d\n",
+ reset_event.count[MTK_EVENT_TSO_ILLEGAL]);
+ seq_printf(seq, " TSO Align: %d\n",
+ reset_event.count[MTK_EVENT_TSO_ALIGN]);
+ seq_printf(seq, " RFIFO OV: %d\n",
+ reset_event.count[MTK_EVENT_RFIFO_OV]);
+ seq_printf(seq, " RFIFO UF: %d\n",
+ reset_event.count[MTK_EVENT_RFIFO_UF]);
+ seq_printf(seq, " Force: %d\n",
+ reset_event.count[MTK_EVENT_FORCE]);
+ seq_printf(seq, "----------------------------\n");
+ seq_printf(seq, " Warm Cnt: %d\n",
+ reset_event.count[MTK_EVENT_WARM_CNT]);
+ seq_printf(seq, " Cold Cnt: %d\n",
+ reset_event.count[MTK_EVENT_COLD_CNT]);
+ seq_printf(seq, " Total Cnt: %d\n",
+ reset_event.count[MTK_EVENT_TOTAL_CNT]);
+
+ return 0;
+}
+
+static int reset_event_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, reset_event_read, 0);
+}
+
+ssize_t reset_event_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ struct mtk_eth *eth = g_eth;
+ struct mtk_reset_event *reset_event = ð->reset_event;
+
+ memset(reset_event, 0, sizeof(struct mtk_reset_event));
+ pr_info("MTK reset event counter is cleared !\n");
+
+ return count;
+}
+
+static const struct file_operations reset_event_fops = {
+ .owner = THIS_MODULE,
+ .open = reset_event_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = reset_event_write,
+ .release = single_release
+};
+
+
struct proc_dir_entry *proc_reg_dir;
-static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs;
+static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs, *proc_reset_event;
int debug_proc_init(struct mtk_eth *eth)
{
@@ -1469,6 +1576,11 @@
if (!proc_tx_ring)
pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
+ proc_hwtx_ring =
+ proc_create(PROCREG_HWTXRING, 0, proc_reg_dir, &hwtx_ring_fops);
+ if (!proc_hwtx_ring)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_HWTXRING);
+
proc_rx_ring =
proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops);
if (!proc_rx_ring)
@@ -1499,6 +1611,11 @@
PROCREG_HW_LRO_AUTO_TLB);
}
+ proc_reset_event =
+ proc_create(PROCREG_RESET_EVENT, 0, proc_reg_dir, &reset_event_fops);
+ if (!proc_reset_event)
+ pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RESET_EVENT);
+
return 0;
}
@@ -1506,6 +1623,8 @@
{
if (proc_tx_ring)
remove_proc_entry(PROCREG_TXRING, proc_reg_dir);
+ if (proc_hwtx_ring)
+ remove_proc_entry(PROCREG_HWTXRING, proc_reg_dir);
if (proc_rx_ring)
remove_proc_entry(PROCREG_RXRING, proc_reg_dir);
@@ -1525,5 +1644,8 @@
if (proc_hw_lro_auto_tlb)
remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir);
}
+
+ if (proc_reset_event)
+ remove_proc_entry(PROCREG_RESET_EVENT, proc_reg_dir);
}
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h
index ea147b7..43f4838 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_dbg.h
@@ -53,11 +53,13 @@
#define PROCREG_ESW_CNT "esw_cnt"
#define PROCREG_TXRING "tx_ring"
+#define PROCREG_HWTXRING "hwtx_ring"
#define PROCREG_RXRING "rx_ring"
#define PROCREG_DIR "mtketh"
#define PROCREG_DBG_REGS "dbg_regs"
#define PROCREG_HW_LRO_STATS "hw_lro_stats"
#define PROCREG_HW_LRO_AUTO_TLB "hw_lro_auto_tlb"
+#define PROCREG_RESET_EVENT "reset_event"
/* HW LRO flush reason */
#define MTK_HW_LRO_AGG_FLUSH (1)
@@ -271,6 +273,7 @@
extern u32 mtk_cl45_ind_read(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data);
extern u32 mtk_cl45_ind_write(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data);
+extern atomic_t force;
int debug_proc_init(struct mtk_eth *eth);
void debug_proc_exit(void);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c
new file mode 100644
index 0000000..13dbef4
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Henry Yen <henry.yen@mediatek.com>
+ */
+
+#include <linux/regmap.h>
+#include "mtk_eth_soc.h"
+#include "mtk_eth_dbg.h"
+#include "mtk_eth_reset.h"
+
+char* mtk_reset_event_name[32] = {
+ [MTK_EVENT_FORCE] = "Force",
+ [MTK_EVENT_WARM_CNT] = "Warm",
+ [MTK_EVENT_COLD_CNT] = "Cold",
+ [MTK_EVENT_TOTAL_CNT] = "Total",
+ [MTK_EVENT_FQ_EMPTY] = "FQ Empty",
+ [MTK_EVENT_TSO_FAIL] = "TSO Fail",
+ [MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal",
+ [MTK_EVENT_TSO_ALIGN] = "TSO Align",
+ [MTK_EVENT_RFIFO_OV] = "RFIFO OV",
+ [MTK_EVENT_RFIFO_UF] = "RFIFO UF",
+};
+
+void mtk_reset_event_update(struct mtk_eth *eth, u32 id)
+{
+ struct mtk_reset_event *reset_event = ð->reset_event;
+ reset_event->count[id]++;
+}
+
+int mtk_eth_cold_reset(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0 | RSTCTRL_PPE1);
+ else
+ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
+
+ return 0;
+}
+
+int mtk_eth_warm_reset(struct mtk_eth *eth)
+{
+ u32 reset_bits = 0, i = 0, done = 0;
+ u32 val1 = 0, val2 = 0, val3 = 0;
+
+ mdelay(100);
+
+ reset_bits |= RSTCTRL_FE;
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits, reset_bits);
+
+ while (i < 1000) {
+ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1);
+ if (val1 & RSTCTRL_FE)
+ break;
+ i++;
+ udelay(1);
+ }
+
+ if (i < 1000) {
+ reset_bits = 0;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0 | RSTCTRL_PPE1;
+ else
+ reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits, reset_bits);
+
+ udelay(1);
+ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2);
+ if (!(val2 & reset_bits))
+ pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n",
+ __func__, val2, reset_bits);
+ reset_bits |= RSTCTRL_FE;
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits, ~reset_bits);
+
+ udelay(1);
+ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3);
+ if (val3 & reset_bits)
+ pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n",
+ __func__, val3, reset_bits);
+ done = 1;
+ mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT);
+ }
+
+ pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x !\n",
+ __func__, val1, val2, val3);
+
+ if (!done)
+ mtk_eth_cold_reset(eth);
+
+ return 0;
+}
+
+u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status)
+{
+ u32 ret = 0, val = 0;
+
+ if ((status & MTK_FE_INT_FQ_EMPTY) ||
+ (status & MTK_FE_INT_RFIFO_UF) ||
+ (status & MTK_FE_INT_RFIFO_OV) ||
+ (status & MTK_FE_INT_TSO_FAIL) ||
+ (status & MTK_FE_INT_TSO_ALIGN) ||
+ (status & MTK_FE_INT_TSO_ILLEGAL)) {
+ while (status) {
+ val = ffs((unsigned int)status) - 1;
+ mtk_reset_event_update(eth, val);
+ status &= ~(1 << val);
+ }
+ ret = 1;
+ }
+
+ if (atomic_read(&force)) {
+ mtk_reset_event_update(eth, MTK_EVENT_FORCE);
+ ret = 1;
+ }
+
+ if (ret) {
+ mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT);
+ mtk_dump_netsys_info(eth);
+ }
+
+ return ret;
+}
+
+irqreturn_t mtk_handle_fe_irq(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+ u32 status = 0, val = 0;
+
+ status = mtk_r32(eth, MTK_FE_INT_STATUS);
+ pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status);
+
+ while (status) {
+ val = ffs((unsigned int)status) - 1;
+ status &= ~(1 << val);
+ pr_info("[%s] Detect reset event: %s !\n", __func__,
+ mtk_reset_event_name[val]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range)
+{
+ struct mtk_eth *eth = _eth;
+ u32 cur = offset;
+
+ pr_info("\n============ %s ============\n", name);
+ while(cur < offset + range) {
+ pr_info("0x%x: %08x %08x %08x %08x\n",
+ cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4),
+ mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc));
+ cur += 0x10;
+ }
+}
+
+void mtk_dump_netsys_info(void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ mtk_dump_reg(eth, "FE", 0x0, 0x500);
+ mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300);
+ mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x400);
+ mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600);
+ mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
+ mtk_dump_reg(eth, "GMAC", 0x10000, 0x300);
+}
+
+void mtk_dma_monitor(struct timer_list *t)
+{
+ struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer);
+ static u32 timestamp = 0;
+ static u32 err_cnt1 = 0, err_cnt2 = 0, err_cnt3 = 0;
+ static u32 prev_wdidx = 0;
+ u32 cur_wdidx = mtk_r32(eth, MTK_WDMA_DTX_PTR(0));
+ u32 is_wtx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(0)) & MTK_TX_DMA_BUSY;
+ u32 is_oq_free = ((mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x01FF0000) == 0) &&
+ ((mtk_r32(eth, MTK_PSE_OQ_STA(1)) & 0x000001FF) == 0) &&
+ ((mtk_r32(eth, MTK_PSE_OQ_STA(4)) & 0x01FF0000) == 0);
+ u32 is_cdm_full =
+ !(mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)) & MTK_CDM_TXFIFO_RDY);
+ u32 is_qfsm_hang = mtk_r32(eth, MTK_QDMA_FSM) != 0;
+ u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
+ u32 is_qfq_hang = mtk_r32(eth, MTK_QDMA_FQ_CNT) !=
+ ((MTK_DMA_SIZE << 16) | MTK_DMA_SIZE);
+ u32 is_oq0_stuck = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0;
+ u32 is_cdm1_busy = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0;
+ u32 is_adma_busy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0) &&
+ ((mtk_r32(eth, MTK_ADMA_RX_DBG1) & 0x3F0000) == 0) &&
+ ((mtk_r32(eth, MTK_ADMA_RX_DBG1) & 0xFF) == 0);
+
+ if (cur_wdidx == prev_wdidx && is_wtx_busy &&
+ is_oq_free && is_cdm_full) {
+ err_cnt1++;
+ if (err_cnt1 == 3) {
+ pr_info("WDMA CDM Hang !\n");
+ pr_info("============== Time: %d ================\n",
+ timestamp);
+ pr_info("err_cnt1 = %d", err_cnt1);
+ pr_info("prev_wdidx = 0x%x | cur_wdidx = 0x%x\n",
+ prev_wdidx, cur_wdidx);
+ pr_info("is_wtx_busy = %d | is_oq_free = %d | is_cdm_full = %d\n",
+ is_wtx_busy, is_oq_free, is_cdm_full);
+ pr_info("-- -- -- -- -- -- --\n");
+ pr_info("WDMA_CTX_PTR = 0x%x\n", mtk_r32(eth, 0x4808));
+ pr_info("WDMA_DTX_PTR = 0x%x\n",
+ mtk_r32(eth, MTK_WDMA_DTX_PTR(0)));
+ pr_info("WDMA_GLO_CFG = 0x%x\n",
+ mtk_r32(eth, MTK_WDMA_GLO_CFG(0)));
+ pr_info("WDMA_TX_DBG_MON0 = 0x%x\n",
+ mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)));
+ pr_info("PSE_OQ_STA1 = 0x%x\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(0)));
+ pr_info("PSE_OQ_STA2 = 0x%x\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(1)));
+ pr_info("PSE_OQ_STA5 = 0x%x\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(4)));
+ pr_info("==============================\n");
+
+ if ((atomic_read(&reset_lock) == 0) &&
+ (atomic_read(&force) == 0)){
+ atomic_inc(&force);
+ schedule_work(ð->pending_work);
+ }
+ }
+ } else if (is_qfsm_hang && is_qfwd_hang) {
+ err_cnt2++;
+ if (err_cnt2 == 3) {
+ pr_info("QDMA Tx Hang !\n");
+ pr_info("============== Time: %d ================\n",
+ timestamp);
+ pr_info("err_cnt2 = %d", err_cnt2);
+ pr_info("is_qfsm_hang = %d\n", is_qfsm_hang);
+ pr_info("is_qfwd_hang = %d\n", is_qfwd_hang);
+ pr_info("is_qfq_hang = %d\n", is_qfq_hang);
+ pr_info("-- -- -- -- -- -- --\n");
+ pr_info("MTK_QDMA_FSM = 0x%x\n",
+ mtk_r32(eth, MTK_QDMA_FSM));
+ pr_info("MTK_QDMA_FWD_CNT = 0x%x\n",
+ mtk_r32(eth, MTK_QDMA_FWD_CNT));
+ pr_info("MTK_QDMA_FQ_CNT = 0x%x\n",
+ mtk_r32(eth, MTK_QDMA_FQ_CNT));
+ pr_info("==============================\n");
+
+ if ((atomic_read(&reset_lock) == 0) &&
+ (atomic_read(&force) == 0)){
+ atomic_inc(&force);
+ schedule_work(ð->pending_work);
+ }
+ }
+ } else if (is_oq0_stuck && is_cdm1_busy && is_adma_busy) {
+ err_cnt3++;
+ if (err_cnt3 == 3) {
+ pr_info("ADMA Rx Hang !\n");
+ pr_info("============== Time: %d ================\n",
+ timestamp);
+ pr_info("err_cnt3 = %d", err_cnt3);
+ pr_info("is_oq0_stuck = %d\n", is_oq0_stuck);
+ pr_info("is_cdm1_busy = %d\n", is_cdm1_busy);
+ pr_info("is_adma_busy = %d\n", is_adma_busy);
+ pr_info("-- -- -- -- -- -- --\n");
+ pr_info("MTK_PSE_OQ_STA1 = 0x%x\n",
+ mtk_r32(eth, MTK_PSE_OQ_STA(0)));
+ pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n",
+ mtk_r32(eth, MTK_ADMA_RX_DBG0));
+ pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n",
+ mtk_r32(eth, MTK_ADMA_RX_DBG1));
+ pr_info("==============================\n");
+ if ((atomic_read(&reset_lock) == 0) &&
+ (atomic_read(&force) == 0)){
+ atomic_inc(&force);
+ schedule_work(ð->pending_work);
+ }
+ }
+ } else {
+ err_cnt1 = 0;
+ err_cnt2 = 0;
+ err_cnt3 = 0;
+ }
+
+ prev_wdidx = cur_wdidx;
+ mod_timer(ð->mtk_dma_monitor_timer, jiffies + 1 * HZ);
+}
+
+void mtk_prepare_reset_fe(struct mtk_eth *eth)
+{
+ u32 i = 0, val = 0;
+
+ /* Disable NETSYS Interrupt */
+ mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
+ mtk_w32(eth, 0, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, 0, MTK_QDMA_INT_MASK);
+
+ /* Disable Linux netif Tx path */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_tx_disable(eth->netdev[i]);
+ }
+
+ /* Disable QDMA Tx */
+ val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
+ mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG);
+
+ /* Power down sgmii */
+ regmap_read(eth->sgmii->regmap[0], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val |= SGMII_PHYA_PWD;
+ regmap_write(eth->sgmii->regmap[0], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+ regmap_read(eth->sgmii->regmap[1], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val |= SGMII_PHYA_PWD;
+ regmap_write(eth->sgmii->regmap[1], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+ /* Force link down GMAC */
+ val = mtk_r32(eth, MTK_MAC_MCR(0));
+ mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(0));
+ val = mtk_r32(eth, MTK_MAC_MCR(1));
+ mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(1));
+
+ /* Disable GMAC Rx */
+ val = mtk_r32(eth, MTK_MAC_MCR(0));
+ mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(0));
+ val = mtk_r32(eth, MTK_MAC_MCR(1));
+ mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(1));
+
+ /* Enable GDM drop */
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
+ /* Disable ADMA Rx */
+ val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
+ mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG);
+}
+
+void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id)
+{
+ u32 i = 0, poll_time = 5000, val;
+
+ /* Disable KA */
+ mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
+ mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id));
+ mtk_w32(eth, 0, MTK_PPE_KA(ppe_id));
+ mdelay(10);
+
+ /* Set KA timer to maximum */
+ mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id));
+ mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id));
+
+ /* Set KA tick select */
+ mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id));
+ mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id));
+ mdelay(10);
+
+ /* Disable scan mode */
+ mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
+ mdelay(10);
+
+ /* Check PPE idle */
+ while (i++ < poll_time) {
+ val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id));
+ if (!(val & MTK_PPE_BUSY))
+ break;
+ mdelay(1);
+ }
+
+ if (i >= poll_time) {
+ pr_info("[%s] PPE keeps busy !\n", __func__);
+ mtk_dump_reg(eth, "FE", 0x0, 0x500);
+ mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
+ }
+}
+
+static int mtk_eth_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case MTK_WIFI_RESET_DONE:
+ complete(&wait_ser_done);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct notifier_block mtk_eth_netdevice_nb __read_mostly = {
+ .notifier_call = mtk_eth_netdevice_event,
+};
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.h b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.h
new file mode 100644
index 0000000..9abd2f5
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Henry Yen <henry.yen@mediatek.com>
+ */
+
+#ifndef MTK_ETH_RESET_H
+#define MTK_ETH_RESET_H
+
+/* Frame Engine Reset FSM */
+#define MTK_FE_START_RESET 0x2000
+#define MTK_FE_RESET_DONE 0x2001
+#define MTK_WIFI_RESET_DONE 0x2002
+#define MTK_NAT_DISABLE 0x3000
+#define MTK_FE_RESET_NAT_DONE 0x4001
+
+/* ADMA Rx Debug Monitor */
+#define MTK_ADMA_RX_DBG0 (PDMA_BASE + 0x238)
+#define MTK_ADMA_RX_DBG1 (PDMA_BASE + 0x23C)
+
+/* PPE Configurations */
+#define MTK_PPE_GLO_CFG(x) (PPE_BASE(x) + 0x00)
+#define MTK_PPE_TB_CFG(x) (PPE_BASE(x) + 0x1C)
+#define MTK_PPE_BIND_LMT_1(x) (PPE_BASE(x) + 0x30)
+#define MTK_PPE_KA(x) (PPE_BASE(x) + 0x34)
+#define MTK_PPE_KA_CFG_MASK (0x3 << 12)
+#define MTK_PPE_NTU_KA_MASK (0xFF << 16)
+#define MTK_PPE_KA_T_MASK (0xFFFF << 0)
+#define MTK_PPE_TCP_KA_MASK (0xFF << 16)
+#define MTK_PPE_UDP_KA_MASK (0xFF << 24)
+#define MTK_PPE_TICK_SEL_MASK (0x1 << 24)
+#define MTK_PPE_SCAN_MODE_MASK (0x3 << 16)
+#define MTK_PPE_BUSY BIT(31)
+
+enum mtk_reset_type {
+ MTK_TYPE_COLD_RESET = 0,
+ MTK_TYPE_WARM_RESET,
+};
+
+enum mtk_reset_event_id {
+ MTK_EVENT_FORCE = 0,
+ MTK_EVENT_WARM_CNT = 1,
+ MTK_EVENT_COLD_CNT = 2,
+ MTK_EVENT_TOTAL_CNT = 3,
+ MTK_EVENT_FQ_EMPTY = 8,
+ MTK_EVENT_TSO_FAIL = 12,
+ MTK_EVENT_TSO_ILLEGAL = 13,
+ MTK_EVENT_TSO_ALIGN = 14,
+ MTK_EVENT_RFIFO_OV = 18,
+ MTK_EVENT_RFIFO_UF = 19,
+};
+
+extern struct notifier_block mtk_eth_netdevice_nb __read_mostly;
+extern struct completion wait_ser_done;
+extern char* mtk_reset_event_name[32];
+extern atomic_t reset_lock;
+
+irqreturn_t mtk_handle_fe_irq(int irq, void *_eth);
+u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status);
+int mtk_eth_cold_reset(struct mtk_eth *eth);
+int mtk_eth_warm_reset(struct mtk_eth *eth);
+void mtk_reset_event_update(struct mtk_eth *eth, u32 id);
+void mtk_dump_netsys_info(void *_eth);
+void mtk_dma_monitor(struct timer_list *t);
+void mtk_prepare_reset_fe(struct mtk_eth *eth);
+void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id);
+
+#endif /* MTK_ETH_RESET_H */
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index a50c25f..28cda15 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -23,14 +23,19 @@
#include "mtk_eth_soc.h"
#include "mtk_eth_dbg.h"
+#include "mtk_eth_reset.h"
#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
#include "mtk_hnat/nf_hnat_mtk.h"
#endif
static int mtk_msg_level = -1;
+atomic_t reset_lock = ATOMIC_INIT(0);
+atomic_t force = ATOMIC_INIT(0);
+
module_param_named(msg_level, mtk_msg_level, int, 0);
MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
+DECLARE_COMPLETION(wait_ser_done);
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
@@ -1650,12 +1655,12 @@
static void mtk_handle_status_irq(struct mtk_eth *eth)
{
- u32 status2 = mtk_r32(eth, MTK_INT_STATUS);
+ u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS);
if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
mtk_stats_update(eth);
mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
- MTK_INT_STATUS);
+ MTK_FE_INT_STATUS);
}
}
@@ -2068,7 +2073,7 @@
for (i = 0; i < 10; i++) {
val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
if (val & MTK_LRO_RING_RELINGUISH_DONE) {
- msleep(20);
+ mdelay(20);
continue;
}
break;
@@ -2493,7 +2498,9 @@
eth->netdev[mac->id]->stats.tx_errors++;
netif_err(eth, tx_err, dev,
"transmit timed out\n");
- schedule_work(ð->pending_work);
+
+ if (atomic_read(&reset_lock) == 0)
+ schedule_work(ð->pending_work);
}
static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
@@ -2600,7 +2607,7 @@
return 0;
}
-static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+void mtk_gdm_config(struct mtk_eth *eth, u32 config)
{
int i;
@@ -2699,7 +2706,7 @@
for (i = 0; i < 10; i++) {
val = mtk_r32(eth, glo_cfg);
if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
- msleep(20);
+ mdelay(20);
continue;
}
break;
@@ -2762,16 +2769,25 @@
return 0;
}
-static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
+void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
- regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
- reset_bits,
- reset_bits);
+ u32 val = 0, i = 0;
- usleep_range(1000, 1100);
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
- reset_bits,
- ~reset_bits);
+ reset_bits, reset_bits);
+
+ while (i++ < 5000) {
+ mdelay(1);
+ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
+
+ if ((val & reset_bits) == reset_bits) {
+ mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT);
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits, ~reset_bits);
+ break;
+ }
+ }
+
mdelay(10);
}
@@ -2823,19 +2839,24 @@
return 0;
}
-static int mtk_hw_init(struct mtk_eth *eth)
+static int mtk_hw_init(struct mtk_eth *eth, u32 type)
{
- int i, ret;
+ int i, ret = 0;
- if (test_and_set_bit(MTK_HW_INIT, ð->state))
- return 0;
+ pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
+ atomic_read(&reset_lock), atomic_read(&force));
- pm_runtime_enable(eth->dev);
- pm_runtime_get_sync(eth->dev);
+ if (atomic_read(&reset_lock) == 0) {
+ if (test_and_set_bit(MTK_HW_INIT, ð->state))
+ return 0;
- ret = mtk_clk_enable(eth);
- if (ret)
- goto err_disable_pm;
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
+
+ ret = mtk_clk_enable(eth);
+ if (ret)
+ goto err_disable_pm;
+ }
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
ret = device_reset(eth->dev);
@@ -2854,18 +2875,15 @@
return 0;
}
- /* Non-MT7628 handling... */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+ pr_info("[%s] execute fe %s reset\n", __func__,
+ (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold");
- if(MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
- ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE | RSTCTRL_PPE1);
+ if (type == MTK_TYPE_WARM_RESET)
+ mtk_eth_warm_reset(eth);
else
- ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE);
+ mtk_eth_cold_reset(eth);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
-
/* Set FE to PDMAv2 if necessary */
mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC);
}
@@ -2906,7 +2924,10 @@
mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2);
mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2);
- mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+ mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP);
+ mtk_w32(eth, MTK_FE_INT_FQ_EMPTY | MTK_FE_INT_TSO_FAIL |
+ MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN |
+ MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
/* PSE Free Queue Flow Control */
@@ -3017,17 +3038,48 @@
static void mtk_pending_work(struct work_struct *work)
{
struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
- int err, i;
+ struct device_node *phy_node = NULL;
+ struct mtk_mac *mac = NULL;
+ int err, i = 0;
unsigned long restart = 0;
+ u32 val = 0;
+
+ atomic_inc(&reset_lock);
+ val = mtk_r32(eth, MTK_FE_INT_STATUS);
+ if (!mtk_check_reset_event(eth, val)) {
+ atomic_dec(&reset_lock);
+ pr_info("[%s] No need to do FE reset !\n", __func__);
+ return;
+ }
rtnl_lock();
- dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
+ /* Disabe FE P3 and P4 */
+ val = mtk_r32(eth, MTK_FE_GLO_CFG);
+ val |= MTK_FE_LINK_DOWN_P3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= MTK_FE_LINK_DOWN_P4;
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
+
+ /* Adjust PPE configurations to prepare for reset */
+ mtk_prepare_reset_ppe(eth, 0);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ mtk_prepare_reset_ppe(eth, 1);
+
+ /* Adjust FE configurations to prepare for reset */
+ mtk_prepare_reset_fe(eth);
+
+ /* Trigger Wifi SER reset */
+ call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]);
+ rtnl_unlock();
+ wait_for_completion_timeout(&wait_ser_done, 5000);
+ rtnl_lock();
while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
cpu_relax();
- dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
+ del_timer_sync(ð->mtk_dma_monitor_timer);
+ pr_info("[%s] mtk_stop starts !\n", __func__);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
@@ -3035,17 +3087,16 @@
mtk_stop(eth->netdev[i]);
__set_bit(i, &restart);
}
- dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
-
- /* restart underlying hardware such as power, clock, pin mux
- * and the connected phy
- */
- mtk_hw_deinit(eth);
+ pr_info("[%s] mtk_stop ends !\n", __func__);
+ mdelay(15);
if (eth->dev->pins)
pinctrl_select_state(eth->dev->pins->p,
eth->dev->pins->default_state);
- mtk_hw_init(eth);
+
+ pr_info("[%s] mtk_hw_init starts !\n", __func__);
+ mtk_hw_init(eth, MTK_TYPE_WARM_RESET);
+ pr_info("[%s] mtk_hw_init ends !\n", __func__);
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
@@ -3059,8 +3110,41 @@
}
}
+ /* Set KA tick select */
+ mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(0));
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(1));
+
+ /* Enabe FE P3 and P4*/
+ val = mtk_r32(eth, MTK_FE_GLO_CFG);
+ val &= ~MTK_FE_LINK_DOWN_P3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val &= ~MTK_FE_LINK_DOWN_P4;
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
+
+ /* Power up sgmii */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ mac = netdev_priv(eth->netdev[i]);
+ phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0);
+ if (!phy_node) {
+ mtk_gmac_sgmii_path_setup(eth, i);
+ regmap_write(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
+ }
+ }
+
- dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+ call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]);
+ pr_info("[%s] HNAT reset done !\n", __func__);
+ call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]);
+ pr_info("[%s] WiFi SER reset done !\n", __func__);
+
+ atomic_dec(&reset_lock);
+ if (atomic_read(&force) > 0)
+ atomic_dec(&force);
+
+ timer_setup(ð->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
+ eth->mtk_dma_monitor_timer.expires = jiffies;
+ add_timer(ð->mtk_dma_monitor_timer);
clear_bit_unlock(MTK_RESETTING, ð->state);
rtnl_unlock();
@@ -3519,7 +3603,7 @@
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(ð->pending_work, mtk_pending_work);
- err = mtk_hw_init(eth);
+ err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET);
if (err)
return err;
@@ -3571,8 +3655,15 @@
if (err)
goto err_free_dev;
}
+ } else {
+ err = devm_request_irq(eth->dev, eth->irq[3],
+ mtk_handle_fe_irq, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
}
}
+
if (err)
goto err_free_dev;
@@ -3617,6 +3708,11 @@
platform_set_drvdata(pdev, eth);
+ register_netdevice_notifier(&mtk_eth_netdevice_nb);
+ timer_setup(ð->mtk_dma_monitor_timer, mtk_dma_monitor, 0);
+ eth->mtk_dma_monitor_timer.expires = jiffies;
+ add_timer(ð->mtk_dma_monitor_timer);
+
return 0;
err_deinit_mdio:
@@ -3656,6 +3752,8 @@
mtk_cleanup(eth);
mtk_mdio_cleanup(eth);
+ unregister_netdevice_notifier(&mtk_eth_netdevice_nb);
+ del_timer_sync(ð->mtk_dma_monitor_timer);
return 0;
}
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 4cd18bc..41e8f30 100755
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -56,13 +56,25 @@
#define MTK_HW_LRO_REPLACE_DELTA 1000
#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
+/* Frame Engine Global Configuration */
+#define MTK_FE_GLO_CFG 0x00
+#define MTK_FE_LINK_DOWN_P3 BIT(11)
+#define MTK_FE_LINK_DOWN_P4 BIT(12)
+
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
#define RST_GL_PSE BIT(0)
/* Frame Engine Interrupt Status Register */
-#define MTK_INT_STATUS 0x08
-#define MTK_INT_STATUS2 0x28
+#define MTK_FE_INT_STATUS 0x08
+#define MTK_FE_INT_STATUS2 0x28
+#define MTK_FE_INT_ENABLE 0x0C
+#define MTK_FE_INT_FQ_EMPTY BIT(8)
+#define MTK_FE_INT_TSO_FAIL BIT(12)
+#define MTK_FE_INT_TSO_ILLEGAL BIT(13)
+#define MTK_FE_INT_TSO_ALIGN BIT(14)
+#define MTK_FE_INT_RFIFO_OV BIT(18)
+#define MTK_FE_INT_RFIFO_UF BIT(19)
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
@@ -133,9 +145,13 @@
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define PDMA_BASE 0x6000
#define QDMA_BASE 0x4400
+#define WDMA_BASE(x) (0x4800 + ((x) * 0x400))
+#define PPE_BASE(x) (0x2200 + ((x) * 0x400))
#else
#define PDMA_BASE 0x0800
#define QDMA_BASE 0x1800
+#define WDMA_BASE(x) (0x2800 + ((x) * 0x400))
+#define PPE_BASE(x) (0xE00 + ((x) * 0x400))
#endif
/* PDMA RX Base Pointer Register */
#define MTK_PRX_BASE_PTR0 (PDMA_BASE + 0x100)
@@ -407,6 +423,9 @@
/* QDMA Interrupt Status Register */
#define MTK_QDMA_INT_MASK (QDMA_BASE + 0x21c)
+/* QDMA DMA FSM */
+#define MTK_QDMA_FSM (QDMA_BASE + 0x234)
+
/* QDMA Interrupt Mask Register */
#define MTK_QDMA_HRED2 (QDMA_BASE + 0x244)
@@ -416,6 +435,9 @@
/* QDMA TX Forward DMA Pointer Register */
#define MTK_QTX_DTX_PTR (QDMA_BASE +0x304)
+/* QDMA TX Forward DMA Counter */
+#define MTK_QDMA_FWD_CNT (QDMA_BASE + 0x308)
+
/* QDMA TX Release CPU Pointer Register */
#define MTK_QTX_CRX_PTR (QDMA_BASE +0x310)
@@ -434,6 +456,12 @@
/* QDMA FQ Free Page Buffer Length Register */
#define MTK_QDMA_FQ_BLEN (QDMA_BASE +0x32c)
+/* WDMA Registers */
+#define MTK_WDMA_DTX_PTR(x) (WDMA_BASE(x) + 0xC)
+#define MTK_WDMA_GLO_CFG(x) (WDMA_BASE(x) + 0x204)
+#define MTK_WDMA_TX_DBG_MON0(x) (WDMA_BASE(x) + 0x230)
+#define MTK_CDM_TXFIFO_RDY BIT(7)
+
/* GMA1 Received Good Byte Count Register */
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_GDM1_TX_GBCNT 0x1C00
@@ -648,9 +676,14 @@
/* ethernet reset control register */
#define ETHSYS_RSTCTRL 0x34
#define RSTCTRL_FE BIT(6)
-#define RSTCTRL_PPE BIT(31)
-#define RSTCTRL_PPE1 BIT(30)
#define RSTCTRL_ETH BIT(23)
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#define RSTCTRL_PPE0 BIT(30)
+#define RSTCTRL_PPE1 BIT(31)
+#elif
+#define RSTCTRL_PPE0 BIT(31)
+#define RSTCTRL_PPE1 NULL
+#endif
/* ethernet reset check idle register */
#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
@@ -1097,7 +1130,7 @@
#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
- MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+ MTK_NETSYS_V2)
#define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
@@ -1148,6 +1181,15 @@
u32 ana_rgc3;
};
+
+/* struct mtk_reset_event - This is the structure holding statistics counters
+ * for reset events
+ * @count: The counter is used to record the number of events
+ */
+struct mtk_reset_event {
+ u32 count[32];
+};
+
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
@@ -1207,6 +1249,7 @@
struct napi_struct tx_napi;
struct mtk_napi rx_napi[MTK_RX_NAPI_NUM];
struct mtk_tx_dma *scratch_ring;
+ struct mtk_reset_event reset_event;
dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clks[MTK_CLK_MAX];
@@ -1222,6 +1265,7 @@
u32 rx_dma_l4_valid;
int ip_align;
spinlock_t syscfg0_lock;
+ struct timer_list mtk_dma_monitor_timer;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
@@ -1255,6 +1299,7 @@
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg);
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
u32 ana_rgc3);
@@ -1266,5 +1311,7 @@
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
+void mtk_gdm_config(struct mtk_eth *eth, u32 config);
+void ethsys_reset(struct mtk_eth *eth, u32 reset_bits);
#endif /* MTK_ETH_H */
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
index 68aad32..ad4184a 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
@@ -287,56 +287,13 @@
pr_info("hnat roaming work disable\n");
}
-static int hnat_start(u32 ppe_id)
+static int hnat_hw_init(u32 ppe_id)
{
- u32 foe_table_sz;
- u32 foe_mib_tb_sz;
- int etry_num_cfg;
-
if (ppe_id >= CFG_PPE_NUM)
return -EINVAL;
- /* mapp the FOE table */
- for (etry_num_cfg = DEF_ETRY_NUM_CFG ; etry_num_cfg >= 0 ; etry_num_cfg--, hnat_priv->foe_etry_num /= 2) {
- foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
- hnat_priv->foe_table_cpu[ppe_id] = dma_alloc_coherent(
- hnat_priv->dev, foe_table_sz,
- &hnat_priv->foe_table_dev[ppe_id], GFP_KERNEL);
-
- if (hnat_priv->foe_table_cpu[ppe_id])
- break;
- }
-
- if (!hnat_priv->foe_table_cpu[ppe_id])
- return -1;
- dev_info(hnat_priv->dev, "PPE%d entry number = %d\n",
- ppe_id, hnat_priv->foe_etry_num);
-
- writel(hnat_priv->foe_table_dev[ppe_id], hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
- memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz);
-
- if (hnat_priv->data->version == MTK_HNAT_V1)
- exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]);
-
- if (hnat_priv->data->per_flow_accounting) {
- foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
- hnat_priv->foe_mib_cpu[ppe_id] =
- dma_alloc_coherent(hnat_priv->dev, foe_mib_tb_sz,
- &hnat_priv->foe_mib_dev[ppe_id], GFP_KERNEL);
- if (!hnat_priv->foe_mib_cpu[ppe_id])
- return -1;
- writel(hnat_priv->foe_mib_dev[ppe_id],
- hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE);
- memset(hnat_priv->foe_mib_cpu[ppe_id], 0, foe_mib_tb_sz);
-
- hnat_priv->acct[ppe_id] =
- kzalloc(hnat_priv->foe_etry_num * sizeof(struct hnat_accounting),
- GFP_KERNEL);
- if (!hnat_priv->acct[ppe_id])
- return -1;
- }
/* setup hashing */
- cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ETRY_NUM, etry_num_cfg);
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ETRY_NUM, hnat_priv->etry_num_cfg);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, HASH_MODE, HASH_MODE_1);
writel(HASH_SEED_KEY, hnat_priv->ppe_base[ppe_id] + PPE_HASH_SEED);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, XMODE, 0);
@@ -374,8 +331,16 @@
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_1, TCP_DLTA, 7);
/* setup FOE ka */
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 0);
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, NTU_KA, 0);
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, KA_T, 0);
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, TCP_KA, 0);
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, UDP_KA, 0);
+ mdelay(10);
+
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SCAN_MODE, 2);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 3);
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TICK_SEL, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, KA_T, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, TCP_KA, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, UDP_KA, 1);
@@ -393,6 +358,7 @@
writel(0, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); /* pdma */
/* writel(0x55555555, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); */ /* qdma */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, TTL0_DRP, 0);
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, MCAST_TB_EN, 1);
if (hnat_priv->data->version == MTK_HNAT_V4) {
writel(0xcb777, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT1);
@@ -414,6 +380,62 @@
return 0;
}
+static int hnat_start(u32 ppe_id)
+{
+ u32 foe_table_sz;
+ u32 foe_mib_tb_sz;
+ u32 etry_num_cfg;
+
+ if (ppe_id >= CFG_PPE_NUM)
+ return -EINVAL;
+
+ /* mapp the FOE table */
+ for (etry_num_cfg = DEF_ETRY_NUM_CFG ; etry_num_cfg >= 0 ;
+ etry_num_cfg--, hnat_priv->foe_etry_num /= 2) {
+ foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
+ hnat_priv->foe_table_cpu[ppe_id] = dma_alloc_coherent(
+ hnat_priv->dev, foe_table_sz,
+ &hnat_priv->foe_table_dev[ppe_id], GFP_KERNEL);
+
+ if (hnat_priv->foe_table_cpu[ppe_id])
+ break;
+ }
+
+ if (!hnat_priv->foe_table_cpu[ppe_id])
+ return -1;
+ dev_info(hnat_priv->dev, "PPE%d entry number = %d\n",
+ ppe_id, hnat_priv->foe_etry_num);
+
+ writel(hnat_priv->foe_table_dev[ppe_id], hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
+ memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz);
+
+ if (hnat_priv->data->version == MTK_HNAT_V1)
+ exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]);
+
+ if (hnat_priv->data->per_flow_accounting) {
+ foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
+ hnat_priv->foe_mib_cpu[ppe_id] =
+ dma_alloc_coherent(hnat_priv->dev, foe_mib_tb_sz,
+ &hnat_priv->foe_mib_dev[ppe_id], GFP_KERNEL);
+ if (!hnat_priv->foe_mib_cpu[ppe_id])
+ return -1;
+ writel(hnat_priv->foe_mib_dev[ppe_id],
+ hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE);
+ memset(hnat_priv->foe_mib_cpu[ppe_id], 0, foe_mib_tb_sz);
+
+ hnat_priv->acct[ppe_id] =
+ kzalloc(hnat_priv->foe_etry_num * sizeof(struct hnat_accounting),
+ GFP_KERNEL);
+ if (!hnat_priv->acct[ppe_id])
+ return -1;
+ }
+
+ hnat_priv->etry_num_cfg = etry_num_cfg;
+ hnat_hw_init(ppe_id);
+
+ return 0;
+}
+
static int ppe_busy_wait(u32 ppe_id)
{
unsigned long t_start = jiffies;
@@ -428,7 +450,7 @@
return 0;
if (time_after(jiffies, t_start + HZ))
break;
- usleep_range(10, 20);
+ mdelay(10);
}
dev_notice(hnat_priv->dev, "ppe:%s timeout\n", __func__);
@@ -441,7 +463,6 @@
u32 foe_table_sz;
u32 foe_mib_tb_sz;
struct foe_entry *entry, *end;
- u32 r1 = 0, r2 = 0;
if (ppe_id >= CFG_PPE_NUM)
return;
@@ -490,23 +511,13 @@
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UDP_AGE, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, FIN_AGE, 0);
- r1 = readl(hnat_priv->fe_base + 0x100);
- r2 = readl(hnat_priv->fe_base + 0x10c);
-
- dev_info(hnat_priv->dev, "0x100 = 0x%x, 0x10c = 0x%x\n", r1, r2);
-
- if (((r1 & 0xff00) >> 0x8) >= (r1 & 0xff) ||
- ((r1 & 0xff00) >> 0x8) >= (r2 & 0xff)) {
- dev_info(hnat_priv->dev, "reset pse\n");
- writel(0x1, hnat_priv->fe_base + 0x4);
- }
-
/* free the FOE table */
foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
if (hnat_priv->foe_table_cpu[ppe_id])
dma_free_coherent(hnat_priv->dev, foe_table_sz,
hnat_priv->foe_table_cpu[ppe_id],
hnat_priv->foe_table_dev[ppe_id]);
+ hnat_priv->foe_table_cpu[ppe_id] = NULL;
writel(0, hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
if (hnat_priv->data->per_flow_accounting) {
@@ -603,6 +614,41 @@
return 0;
}
+int hnat_warm_init(void)
+{
+ u32 foe_table_sz, foe_mib_tb_sz, ppe_id = 0;
+
+ unregister_netevent_notifier(&nf_hnat_netevent_nb);
+
+ for (ppe_id = 0; ppe_id < CFG_PPE_NUM; ppe_id++) {
+ foe_table_sz =
+ hnat_priv->foe_etry_num * sizeof(struct foe_entry);
+ writel(hnat_priv->foe_table_dev[ppe_id],
+ hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
+ memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz);
+
+ if (hnat_priv->data->version == MTK_HNAT_V1)
+ exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]);
+
+ if (hnat_priv->data->per_flow_accounting) {
+ foe_mib_tb_sz =
+ hnat_priv->foe_etry_num * sizeof(struct mib_entry);
+ writel(hnat_priv->foe_mib_dev[ppe_id],
+ hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE);
+ memset(hnat_priv->foe_mib_cpu[ppe_id], 0,
+ foe_mib_tb_sz);
+ }
+
+ hnat_hw_init(ppe_id);
+ }
+
+ set_gmac_ppe_fwd(0, 1);
+ set_gmac_ppe_fwd(1, 1);
+ register_netevent_notifier(&nf_hnat_netevent_nb);
+
+ return 0;
+}
+
static struct packet_type mtk_pack_type __read_mostly = {
.type = HQOS_MAGIC_TAG,
.func = mtk_hqos_ptype_cb,
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
index 31a060f..f0ea6a2 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
@@ -141,6 +141,8 @@
#define HASH_MODE (0x3 << 14) /* RW */
#define SCAN_MODE (0x3 << 16) /* RW */
#define XMODE (0x3 << 18) /* RW */
+#define TICK_SEL (0x1 << 24) /* RW */
+
/*PPE_CAH_CTRL mask*/
#define CAH_EN (0x1 << 0) /* RW */
@@ -670,6 +672,7 @@
struct ppe_mcast_table *pmcast;
u32 foe_etry_num;
+ u32 etry_num_cfg;
struct net_device *g_ppdev;
struct net_device *g_wandev;
struct net_device *wifi_hook_if[MAX_IF_NUM];
@@ -960,6 +963,8 @@
int entry_detail(u32 ppe_id, int index);
int entry_delete_by_mac(u8 *mac);
int entry_delete(u32 ppe_id, int index);
+int hnat_warm_init(void);
+
struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 ppe_id,
u32 index, struct hnat_accounting *diff);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
index 8d199ef..2df6d14 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
@@ -30,6 +30,7 @@
#include "hnat.h"
#include "../mtk_eth_soc.h"
+#include "../mtk_eth_reset.h"
#define do_ge2ext_fast(dev, skb) \
((IS_LAN(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
@@ -260,6 +261,10 @@
hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
break;
+ case MTK_FE_RESET_NAT_DONE:
+ pr_info("[%s] HNAT driver starts to do warm init !\n", __func__);
+ hnat_warm_init();
+ break;
default:
break;
}
@@ -278,6 +283,9 @@
dip = (u32)(*daddr);
for (i = 0; i < CFG_PPE_NUM; i++) {
+ if (!hnat_priv->foe_table_cpu[i])
+ continue;
+
for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
entry = hnat_priv->foe_table_cpu[i] + hash_index;
if (entry->bfib1.state == BIND &&