| From 67a20e07982e9c43d299679f75fd81638271fc63 Mon Sep 17 00:00:00 2001 |
| From: mtk27745 <rex.lu@mediatek.com> |
| Date: Mon, 18 Sep 2023 13:22:44 +0800 |
| Subject: [PATCH 2/6] mtk wed add wed3 ser support |
| |
| --- |
| drivers/net/ethernet/mediatek/mtk_wed.c | 339 ++++++++++++++++--- |
| drivers/net/ethernet/mediatek/mtk_wed_regs.h | 68 +++- |
| include/linux/soc/mediatek/mtk_wed.h | 8 +- |
| 3 files changed, 367 insertions(+), 48 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c |
| index 4b32a82..02c156a 100644 |
| --- a/drivers/net/ethernet/mediatek/mtk_wed.c |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed.c |
| @@ -110,24 +110,88 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev) |
| return wdma_r32(dev, MTK_WDMA_GLO_CFG); |
| } |
| |
| -static u32 |
| -mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| +static void |
| +mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev) |
| { |
| - if (wed_r32(dev, reg) & mask) |
| - return true; |
| - |
| - return false; |
| -} |
| + u32 status; |
| |
| -static int |
| -mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| -{ |
| - int sleep = 1000; |
| - int timeout = 100 * sleep; |
| - u32 val; |
| + if (!mtk_wed_is_v3_or_greater(dev->hw)) |
| + return; |
| |
| - return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, |
| - timeout, false, dev, reg, mask); |
| + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); |
| + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); |
| + |
| + if (read_poll_timeout(wdma_r32, status, |
| + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), |
| + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) |
| + dev_err(dev->hw->dev, "rx reset failed\n"); |
| + |
| + if (read_poll_timeout(wdma_r32, status, |
| + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), |
| + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) |
| + dev_err(dev->hw->dev, "rx reset failed\n"); |
| + |
| + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); |
| + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); |
| + |
| + if (read_poll_timeout(wdma_r32, status, |
| + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), |
| + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) |
| + dev_err(dev->hw->dev, "rx reset failed\n"); |
| + |
| + if (read_poll_timeout(wdma_r32, status, |
| + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), |
| + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) |
| + dev_err(dev->hw->dev, "rx reset failed\n"); |
| + |
| + /* prefetch FIFO */ |
| + wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG, |
| + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | |
| + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG, |
| + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | |
| + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); |
| + |
| + /* core FIFO */ |
| + wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); |
| + |
| + /* writeback FIFO */ |
| + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), |
| + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); |
| + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), |
| + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); |
| + |
| + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), |
| + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), |
| + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); |
| + |
| + /* prefetch ring status */ |
| + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, |
| + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, |
| + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); |
| + |
| + /* writeback ring status */ |
| + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, |
| + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, |
| + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); |
| } |
| |
| static int |
| @@ -142,6 +206,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev) |
| if (ret) |
| dev_err(dev->hw->dev, "rx reset failed \n"); |
| |
| + mtk_wdma_v3_rx_reset(dev); |
| wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); |
| wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| |
| @@ -156,6 +221,101 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev) |
| return ret; |
| } |
| |
| +static u32 |
| +mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| +{ |
| + return !!(wed_r32(dev, reg) & mask); |
| +} |
| + |
| +static int |
| +mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| +{ |
| + int sleep = 15000; |
| + int timeout = 100 * sleep; |
| + u32 val; |
| + |
| + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, |
| + timeout, false, dev, reg, mask); |
| +} |
| + |
| +static void |
| +mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev) |
| +{ |
| + u32 status; |
| + |
| + if (!mtk_wed_is_v3_or_greater(dev->hw)) |
| + return; |
| + |
| + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); |
| + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); |
| + |
| + if (read_poll_timeout(wdma_r32, status, |
| + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), |
| + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) |
| + dev_err(dev->hw->dev, "tx reset failed\n"); |
| + |
| + if (read_poll_timeout(wdma_r32, status, |
| + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), |
| + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) |
| + dev_err(dev->hw->dev, "tx reset failed\n"); |
| + |
| + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); |
| + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); |
| + |
| + if (read_poll_timeout(wdma_r32, status, |
| + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), |
| + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) |
| + dev_err(dev->hw->dev, "tx reset failed\n"); |
| + |
| + if (read_poll_timeout(wdma_r32, status, |
| + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), |
| + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) |
| + dev_err(dev->hw->dev, "tx reset failed\n"); |
| + |
| + /* prefetch FIFO */ |
| + wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG, |
| + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | |
| + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG, |
| + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | |
| + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); |
| + |
| + /* core FIFO */ |
| + wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, |
| + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, |
| + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | |
| + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); |
| + |
| + /* writeback FIFO */ |
| + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), |
| + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); |
| + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), |
| + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); |
| + |
| + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), |
| + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), |
| + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); |
| + |
| + /* prefetch ring status */ |
| + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, |
| + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, |
| + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); |
| + |
| + /* writeback ring status */ |
| + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, |
| + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); |
| + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, |
| + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); |
| +} |
| + |
| static void |
| mtk_wdma_tx_reset(struct mtk_wed_device *dev) |
| { |
| @@ -167,6 +327,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev) |
| !(status & mask), 0, 10000)) |
| dev_err(dev->hw->dev, "tx reset failed \n"); |
| |
| + mtk_wdma_v3_tx_reset(dev); |
| wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); |
| wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| |
| @@ -1389,25 +1550,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) |
| } |
| } |
| |
| -static u32 |
| -mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| -{ |
| - if (wed_r32(dev, reg) & mask) |
| - return true; |
| - |
| - return false; |
| -} |
| - |
| -static int |
| -mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| -{ |
| - int sleep = 1000; |
| - int timeout = 100 * sleep; |
| - u32 val; |
| - |
| - return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, |
| - timeout, false, dev, reg, mask); |
| -} |
| |
| static int |
| mtk_wed_rx_reset(struct mtk_wed_device *dev) |
| @@ -1423,13 +1565,32 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) |
| if (ret) |
| return ret; |
| |
| + if (dev->wlan.hw_rro) { |
| + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); |
| + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS, |
| + MTK_WED_RX_IND_CMD_BUSY); |
| + mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG); |
| + } |
| + |
| wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); |
| ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
| MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); |
| + if (!ret && mtk_wed_is_v3_or_greater(dev->hw)) |
| + ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, |
| + MTK_WED_WPDMA_RX_D_PREF_BUSY); |
| if (ret) { |
| mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); |
| mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); |
| } else { |
| + if (mtk_wed_is_v3_or_greater(dev->hw)) { |
| + /*1.a. Disable Prefetch HW*/ |
| + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, MTK_WED_WPDMA_RX_D_PREF_EN); |
| + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, |
| + MTK_WED_WPDMA_RX_D_PREF_BUSY); |
| + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, |
| + MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL); |
| + } |
| + |
| wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, |
| MTK_WED_WPDMA_RX_D_RST_CRX_IDX | |
| MTK_WED_WPDMA_RX_D_RST_DRV_IDX); |
| @@ -1457,15 +1618,36 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) |
| wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); |
| } |
| |
| + if (dev->wlan.hw_rro) { |
| + /* Disable RRO MSDU Page Drv */ |
| + wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN); |
| + |
| + /* Disable RRO Data Drv */ |
| + wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); |
| + |
| + /* RRO MSDU Page Drv Reset */ |
| + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR); |
| + mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, |
| + MTK_WED_RRO_MSDU_PG_DRV_CLR); |
| + |
| + /* RRO Data Drv Reset */ |
| + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_CLR); |
| + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2), |
| + MTK_WED_RRO_RX_D_DRV_CLR); |
| + } |
| + |
| /* reset route qm */ |
| wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); |
| ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
| MTK_WED_CTRL_RX_ROUTE_QM_BUSY); |
| if (ret) { |
| mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); |
| + } else if (mtk_wed_is_v3_or_greater(dev->hw)) { |
| + wed_set(dev, MTK_WED_RTQM_RST, BIT(0)); |
| + wed_clr(dev, MTK_WED_RTQM_RST, BIT(0)); |
| + mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); |
| } else { |
| - wed_set(dev, MTK_WED_RTQM_GLO_CFG, |
| - MTK_WED_RTQM_Q_RST); |
| + wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); |
| } |
| |
| /* reset tx wdma */ |
| @@ -1473,8 +1655,12 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) |
| |
| /* reset tx wdma drv */ |
| wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); |
| - mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
| - MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); |
| + if (mtk_wed_is_v3_or_greater(dev->hw)) |
| + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS, |
| + MTK_WED_WPDMA_STATUS_TX_DRV); |
| + else |
| + mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
| + MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); |
| mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); |
| |
| /* reset wed rx dma */ |
| @@ -1495,6 +1681,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) |
| MTK_WED_CTRL_WED_RX_BM_BUSY); |
| mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); |
| |
| + if (dev->wlan.hw_rro) { |
| + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); |
| + mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
| + MTK_WED_CTRL_WED_RX_PG_BM_BUSY); |
| + wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); |
| + wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); |
| + } |
| + |
| /* wo change to enable state */ |
| val = WO_STATE_ENABLE; |
| ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, |
| @@ -1549,16 +1743,55 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| |
| /* 2. Reset WDMA Rx DMA/Driver_Engine */ |
| busy = !!mtk_wdma_rx_reset(dev); |
| + if (mtk_wed_is_v3_or_greater(dev->hw)) { |
| + val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE | |
| + wed_r32(dev, MTK_WED_WDMA_GLO_CFG); |
| + val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN; |
| + wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val); |
| + } else { |
| + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, |
| + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); |
| + } |
| |
| - wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); |
| if (!busy) |
| busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, |
| MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); |
| + if (!busy && mtk_wed_is_v3_or_greater(dev->hw)) |
| + busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, |
| + MTK_WED_WDMA_RX_PREF_BUSY); |
| |
| if (busy) { |
| mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); |
| mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); |
| } else { |
| + if (mtk_wed_is_v3_or_greater(dev->hw)) { |
| + /*1.a. Disable Prefetch HW*/ |
| + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, |
| + MTK_WED_WDMA_RX_PREF_EN); |
| + mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, |
| + MTK_WED_WDMA_RX_PREF_BUSY); |
| + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, |
| + MTK_WED_WDMA_RX_PREF_DDONE2_EN); |
| + |
| + /* reset prefetch index */ |
| + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, |
| + MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | |
| + MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); |
| + |
| + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, |
| + MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | |
| + MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); |
| + |
| + /* reset prefetch FIFO */ |
| + wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, |
| + MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR | |
| + MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR); |
| + wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0); |
| + |
| + /*2. Reset dma index*/ |
| + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, |
| + MTK_WED_WDMA_RESET_IDX_RX_ALL); |
| + } |
| wed_w32(dev, MTK_WED_WDMA_RESET_IDX, |
| MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); |
| wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); |
| @@ -1574,8 +1807,13 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
| |
| for (i = 0; i < 100; i++) { |
| - val = wed_r32(dev, MTK_WED_TX_BM_INTF); |
| - if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) |
| + if (mtk_wed_is_v1(dev->hw)) |
| + val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, |
| + wed_r32(dev, MTK_WED_TX_BM_INTF)); |
| + else |
| + val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, |
| + wed_r32(dev, MTK_WED_TX_TKID_INTF)); |
| + if (val == 0x40) |
| break; |
| } |
| |
| @@ -1599,6 +1837,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); |
| mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); |
| mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); |
| + if (mtk_wed_is_v3_or_greater(dev->hw)) |
| + wed_w32(dev, MTK_WED_RX1_CTRL2, 0); |
| } else { |
| wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, |
| MTK_WED_WPDMA_RESET_IDX_TX | |
| @@ -1615,7 +1855,14 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| wed_w32(dev, MTK_WED_RESET_IDX, 0); |
| } |
| |
| - mtk_wed_rx_reset(dev); |
| + if (mtk_wed_is_v3_or_greater(dev->hw)) { |
| + /* reset amsdu engine */ |
| + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); |
| + mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU); |
| + } |
| + |
| + if (mtk_wed_get_rx_capa(dev)) |
| + mtk_wed_rx_reset(dev); |
| } |
| |
| static int |
| @@ -1932,7 +2179,7 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev) |
| } |
| |
| static void |
| -mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask) |
| +mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset) |
| { |
| int i; |
| |
| @@ -1942,6 +2189,12 @@ mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask) |
| if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) |
| return; |
| |
| + if (reset) { |
| + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, |
| + MTK_WED_RRO_MSDU_PG_DRV_EN); |
| + return; |
| + } |
| + |
| wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR); |
| wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, |
| MTK_WED_RRO_MSDU_PG_DRV_CLR); |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| index 0af264d..1ee0fe1 100644 |
| --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| @@ -36,6 +36,8 @@ struct mtk_wdma_desc { |
| #define MTK_WED_RESET 0x008 |
| #define MTK_WED_RESET_TX_BM BIT(0) |
| #define MTK_WED_RESET_RX_BM BIT(1) |
| +#define MTK_WED_RESET_RX_PG_BM BIT(2) |
| +#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3) |
| #define MTK_WED_RESET_TX_FREE_AGENT BIT(4) |
| #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) |
| #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) |
| @@ -58,7 +60,7 @@ struct mtk_wdma_desc { |
| #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3) |
| #define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5) |
| #define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6) |
| -#define MTK_WED_CTRL_WED_RX_PG_BM_BUSU BIT(7) |
| +#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY BIT(7) |
| #define MTK_WED_CTRL_WED_TX_BM_EN BIT(8) |
| #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) |
| #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) |
| @@ -117,6 +119,10 @@ struct mtk_wdma_desc { |
| #define MTK_WED_STATUS 0x060 |
| #define MTK_WED_STATUS_TX GENMASK(15, 8) |
| |
| +#define MTK_WED_WPDMA_STATUS 0x068 |
| +#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8) |
| + |
| + |
| #define MTK_WED_TX_BM_CTRL 0x080 |
| #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0) |
| #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16) |
| @@ -154,6 +160,9 @@ struct mtk_wdma_desc { |
| #define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0) |
| #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16) |
| |
| +#define MTK_WED_TX_TKID_INTF 0x0dc |
| +#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16) |
| + |
| #define MTK_WED_TX_TKID_DYN_THR 0x0e0 |
| #define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0) |
| #define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16) |
| @@ -205,6 +214,7 @@ struct mtk_wdma_desc { |
| #define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10) |
| |
| #define MTK_WED_SCR0 0x3c0 |
| +#define MTK_WED_RX1_CTRL2 0x418 |
| #define MTK_WED_WPDMA_INT_TRIGGER 0x504 |
| #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) |
| #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) |
| @@ -320,6 +330,7 @@ struct mtk_wdma_desc { |
| |
| #define MTK_WED_WPDMA_RX_D_RST_IDX 0x760 |
| #define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16) |
| +#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20) |
| #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24) |
| |
| #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c |
| @@ -336,6 +347,7 @@ struct mtk_wdma_desc { |
| |
| #define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4 |
| #define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0) |
| +#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1) |
| #define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8) |
| #define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16) |
| |
| @@ -357,11 +369,13 @@ struct mtk_wdma_desc { |
| |
| #define MTK_WED_WDMA_RX_PREF_CFG 0x950 |
| #define MTK_WED_WDMA_RX_PREF_EN BIT(0) |
| +#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1) |
| #define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8) |
| #define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16) |
| #define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24) |
| #define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25) |
| #define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26) |
| +#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27) |
| |
| #define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C |
| #define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0) |
| @@ -390,6 +404,7 @@ struct mtk_wdma_desc { |
| |
| #define MTK_WED_WDMA_RESET_IDX 0xa08 |
| #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) |
| +#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20) |
| #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) |
| |
| #define MTK_WED_WDMA_INT_CLR 0xa24 |
| @@ -458,21 +473,66 @@ struct mtk_wdma_desc { |
| #define MTK_WDMA_INT_MASK_RX_DELAY BIT(30) |
| #define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31) |
| |
| +#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238 |
| +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0) |
| +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4) |
| +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8) |
| +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12) |
| + |
| +#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c |
| +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0) |
| +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4) |
| +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8) |
| +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12) |
| +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15) |
| +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18) |
| +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21) |
| + |
| + |
| + |
| #define MTK_WDMA_INT_GRP1 0x250 |
| #define MTK_WDMA_INT_GRP2 0x254 |
| |
| #define MTK_WDMA_PREF_TX_CFG 0x2d0 |
| #define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0) |
| +#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1) |
| |
| #define MTK_WDMA_PREF_RX_CFG 0x2dc |
| #define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0) |
| +#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1) |
| + |
| +#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0 |
| +#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0) |
| +#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16) |
| + |
| +#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4 |
| +#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0) |
| +#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16) |
| + |
| +#define MTK_WDMA_PREF_SIDX_CFG 0x2e4 |
| +#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0) |
| +#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4) |
| |
| #define MTK_WDMA_WRBK_TX_CFG 0x300 |
| +#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0) |
| #define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30) |
| |
| +#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4) |
| +#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0) |
| + |
| + |
| #define MTK_WDMA_WRBK_RX_CFG 0x344 |
| +#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0) |
| #define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30) |
| |
| +#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4) |
| +#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0) |
| + |
| + |
| +#define MTK_WDMA_WRBK_SIDX_CFG 0x388 |
| +#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0) |
| +#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4) |
| + |
| #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0) |
| #define MTK_PCIE_MIRROR_MAP_EN BIT(0) |
| #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1) |
| @@ -486,6 +546,9 @@ struct mtk_wdma_desc { |
| #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5) |
| #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20) |
| |
| +#define MTK_WED_RTQM_RST 0xb04 |
| + |
| + |
| #define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c |
| #define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4) |
| #define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28 |
| @@ -675,6 +738,9 @@ struct mtk_wdma_desc { |
| #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17) |
| #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18) |
| |
| +#define MTK_WED_RRO_RX_HW_STS 0xf00 |
| +#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0) |
| + |
| #define MTK_WED_RX_IND_CMD_CNT0 0xf20 |
| #define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31) |
| |
| diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h |
| index e81e41f..83a4b8b 100644 |
| --- a/include/linux/soc/mediatek/mtk_wed.h |
| +++ b/include/linux/soc/mediatek/mtk_wed.h |
| @@ -222,7 +222,7 @@ struct mtk_wed_ops { |
| |
| u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask); |
| void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask); |
| - void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask); |
| + void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask, bool reset); |
| void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring, |
| void __iomem *regs); |
| void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring, |
| @@ -302,8 +302,8 @@ mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev) |
| #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev) |
| #define mtk_wed_device_setup_tc(_dev, _ndev, _type, _data) \ |
| (_dev)->ops->setup_tc(_dev, _ndev, _type, _data) |
| -#define mtk_wed_device_start_hw_rro(_dev, _mask) \ |
| - (_dev)->ops->start_hw_rro(_dev, _mask) |
| +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \ |
| + (_dev)->ops->start_hw_rro(_dev, _mask, _reset) |
| #define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \ |
| (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs) |
| #define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \ |
| @@ -329,7 +329,7 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) |
| #define mtk_wed_device_stop(_dev) do {} while (0) |
| #define mtk_wed_device_dma_reset(_dev) do {} while (0) |
| #define mtk_wed_device_setup_tc(_dev, _ndev, _type, _data) do {} while (0) |
| -#define mtk_wed_device_start_hw_rro(_dev, _mask) do {} while (0) |
| +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0) |
| #define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV |
| #define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV |
| #define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV |
| -- |
| 2.18.0 |
| |