| From bc8244ada5c668374813f7f9b73d990bf2695aaf Mon Sep 17 00:00:00 2001 |
| From: Sujuan Chen <sujuan.chen@mediatek.com> |
| Date: Wed, 15 Jun 2022 14:38:54 +0800 |
| Subject: [PATCH 8/8] 9997-add-wed-rx-support-for-mt7896 |
| |
| Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
| --- |
| arch/arm64/boot/dts/mediatek/mt7986a.dtsi | 42 +- |
| arch/arm64/boot/dts/mediatek/mt7986b.dtsi | 42 +- |
| drivers/net/ethernet/mediatek/Makefile | 2 +- |
| drivers/net/ethernet/mediatek/mtk_wed.c | 544 +++++++++++++++-- |
| drivers/net/ethernet/mediatek/mtk_wed.h | 50 ++ |
| drivers/net/ethernet/mediatek/mtk_wed_ccif.c | 121 ++++ |
| drivers/net/ethernet/mediatek/mtk_wed_ccif.h | 45 ++ |
| .../net/ethernet/mediatek/mtk_wed_debugfs.c | 90 +++ |
| drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 561 ++++++++++++++++++ |
| drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 125 ++++ |
| drivers/net/ethernet/mediatek/mtk_wed_regs.h | 145 ++++- |
| drivers/net/ethernet/mediatek/mtk_wed_wo.c | 548 +++++++++++++++++ |
| drivers/net/ethernet/mediatek/mtk_wed_wo.h | 334 +++++++++++ |
| include/linux/soc/mediatek/mtk_wed.h | 63 +- |
| 14 files changed, 2643 insertions(+), 69 deletions(-) |
| mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_wed.c |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.c |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ccif.h |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.h |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h |
| |
| diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi |
| index 644255b35..ddcc0b809 100644 |
| --- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi |
| +++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi |
| @@ -65,6 +65,12 @@ |
| interrupt-parent = <&gic>; |
| interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>; |
| mediatek,wed_pcie = <&wed_pcie>; |
| + mediatek,ap2woccif = <&ap2woccif0>; |
| + mediatek,wocpu_ilm = <&wocpu0_ilm>; |
| + mediatek,wocpu_dlm = <&wocpu0_dlm>; |
| + mediatek,wocpu_boot = <&cpu_boot>; |
| + mediatek,wocpu_emi = <&wocpu0_emi>; |
| + mediatek,wocpu_data = <&wocpu_data>; |
| }; |
| |
| wed1: wed@15011000 { |
| @@ -74,15 +80,26 @@ |
| interrupt-parent = <&gic>; |
| interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>; |
| mediatek,wed_pcie = <&wed_pcie>; |
| + mediatek,ap2woccif = <&ap2woccif1>; |
| + mediatek,wocpu_ilm = <&wocpu1_ilm>; |
| + mediatek,wocpu_dlm = <&wocpu1_dlm>; |
| + mediatek,wocpu_boot = <&cpu_boot>; |
| + mediatek,wocpu_emi = <&wocpu1_emi>; |
| + mediatek,wocpu_data = <&wocpu_data>; |
| }; |
| |
| - ap2woccif: ap2woccif@151A5000 { |
| - compatible = "mediatek,ap2woccif"; |
| - reg = <0 0x151A5000 0 0x1000>, |
| - <0 0x151AD000 0 0x1000>; |
| + ap2woccif0: ap2woccif@151A5000 { |
| + compatible = "mediatek,ap2woccif", "syscon"; |
| + reg = <0 0x151A5000 0 0x1000>; |
| interrupt-parent = <&gic>; |
| - interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>, |
| - <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>; |
| + interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>; |
| + }; |
| + |
| + ap2woccif1: ap2woccif@0x151AD000 { |
| + compatible = "mediatek,ap2woccif", "syscon"; |
| + reg = <0 0x151AD000 0 0x1000>; |
| + interrupt-parent = <&gic>; |
| + interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>; |
| }; |
| |
| wocpu0_ilm: wocpu0_ilm@151E0000 { |
| @@ -95,10 +112,17 @@ |
| reg = <0 0x151F0000 0 0x8000>; |
| }; |
| |
| - wocpu_dlm: wocpu_dlm@151E8000 { |
| + wocpu0_dlm: wocpu_dlm@151E8000 { |
| + compatible = "mediatek,wocpu_dlm"; |
| + reg = <0 0x151E8000 0 0x2000>; |
| + |
| + resets = <ðsysrst 0>; |
| + reset-names = "wocpu_rst"; |
| + }; |
| + |
| + wocpu1_dlm: wocpu_dlm@0x151F8000 { |
| compatible = "mediatek,wocpu_dlm"; |
| - reg = <0 0x151E8000 0 0x2000>, |
| - <0 0x151F8000 0 0x2000>; |
| + reg = <0 0x151F8000 0 0x2000>; |
| |
| resets = <ðsysrst 0>; |
| reset-names = "wocpu_rst"; |
| diff --git a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi |
| index 67bf86f6a..6710b388b 100644 |
| --- a/arch/arm64/boot/dts/mediatek/mt7986b.dtsi |
| +++ b/arch/arm64/boot/dts/mediatek/mt7986b.dtsi |
| @@ -65,6 +65,12 @@ |
| interrupt-parent = <&gic>; |
| interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>; |
| mediatek,wed_pcie = <&wed_pcie>; |
| + mediatek,ap2woccif = <&ap2woccif0>; |
| + mediatek,wocpu_ilm = <&wocpu0_ilm>; |
| + mediatek,wocpu_dlm = <&wocpu0_dlm>; |
| + mediatek,wocpu_boot = <&cpu_boot>; |
| + mediatek,wocpu_emi = <&wocpu0_emi>; |
| + mediatek,wocpu_data = <&wocpu_data>; |
| }; |
| |
| wed1: wed@15011000 { |
| @@ -74,15 +80,26 @@ |
| interrupt-parent = <&gic>; |
| interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>; |
| mediatek,wed_pcie = <&wed_pcie>; |
| + mediatek,ap2woccif = <&ap2woccif1>; |
| + mediatek,wocpu_ilm = <&wocpu1_ilm>; |
| + mediatek,wocpu_dlm = <&wocpu1_dlm>; |
| + mediatek,wocpu_boot = <&cpu_boot>; |
| + mediatek,wocpu_emi = <&wocpu1_emi>; |
| + mediatek,wocpu_data = <&wocpu_data>; |
| }; |
| |
| - ap2woccif: ap2woccif@151A5000 { |
| - compatible = "mediatek,ap2woccif"; |
| - reg = <0 0x151A5000 0 0x1000>, |
| - <0 0x151AD000 0 0x1000>; |
| + ap2woccif0: ap2woccif@151A5000 { |
| + compatible = "mediatek,ap2woccif", "syscon"; |
| + reg = <0 0x151A5000 0 0x1000>; |
| interrupt-parent = <&gic>; |
| - interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>, |
| - <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>; |
| + interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>; |
| + }; |
| + |
| + ap2woccif1: ap2woccif@0x151AD000 { |
| + compatible = "mediatek,ap2woccif", "syscon"; |
| + reg = <0 0x151AD000 0 0x1000>; |
| + interrupt-parent = <&gic>; |
| + interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>; |
| }; |
| |
| wocpu0_ilm: wocpu0_ilm@151E0000 { |
| @@ -95,10 +112,17 @@ |
| reg = <0 0x151F0000 0 0x8000>; |
| }; |
| |
| - wocpu_dlm: wocpu_dlm@151E8000 { |
| + wocpu0_dlm: wocpu_dlm@151E8000 { |
| + compatible = "mediatek,wocpu_dlm"; |
| + reg = <0 0x151E8000 0 0x2000>; |
| + |
| + resets = <ðsysrst 0>; |
| + reset-names = "wocpu_rst"; |
| + }; |
| + |
| + wocpu1_dlm: wocpu_dlm@0x151F8000 { |
| compatible = "mediatek,wocpu_dlm"; |
| - reg = <0 0x151E8000 0 0x2000>, |
| - <0 0x151F8000 0 0x2000>; |
| + reg = <0 0x151F8000 0 0x2000>; |
| |
| resets = <ðsysrst 0>; |
| reset-names = "wocpu_rst"; |
| diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile |
| index 3528f1b3c..0c724a55c 100644 |
| --- a/drivers/net/ethernet/mediatek/Makefile |
| +++ b/drivers/net/ethernet/mediatek/Makefile |
| @@ -10,5 +10,5 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o |
| ifdef CONFIG_DEBUG_FS |
| mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o |
| endif |
| -obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o |
| +obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o mtk_wed_wo.o mtk_wed_mcu.o mtk_wed_ccif.o |
| obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/ |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c |
| old mode 100644 |
| new mode 100755 |
| index 48b0353bb..c4aab12b0 |
| --- a/drivers/net/ethernet/mediatek/mtk_wed.c |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed.c |
| @@ -13,11 +13,19 @@ |
| #include <linux/debugfs.h> |
| #include <linux/iopoll.h> |
| #include <linux/soc/mediatek/mtk_wed.h> |
| + |
| #include "mtk_eth_soc.h" |
| #include "mtk_wed_regs.h" |
| #include "mtk_wed.h" |
| #include "mtk_ppe.h" |
| - |
| +#include "mtk_wed_mcu.h" |
| +#include "mtk_wed_wo.h" |
| + |
| +struct wo_cmd_ring { |
| + u32 q_base; |
| + u32 cnt; |
| + u32 unit; |
| +}; |
| static struct mtk_wed_hw *hw_list[2]; |
| static DEFINE_MUTEX(hw_lock); |
| |
| @@ -51,6 +59,12 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| wdma_m32(dev, reg, 0, mask); |
| } |
| |
| +static void |
| +wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| +{ |
| + wdma_m32(dev, reg, mask, 0); |
| +} |
| + |
| static u32 |
| mtk_wed_read_reset(struct mtk_wed_device *dev) |
| { |
| @@ -68,6 +82,48 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) |
| WARN_ON_ONCE(1); |
| } |
| |
| +static void |
| +mtk_wed_wo_reset(struct mtk_wed_device *dev) |
| +{ |
| + struct mtk_wed_wo *wo = dev->hw->wed_wo; |
| + u8 state = WO_STATE_DISABLE; |
| + u8 state_done = WOIF_DISABLE_DONE; |
| + void __iomem *reg; |
| + u32 value; |
| + unsigned long timeout = jiffies + WOCPU_TIMEOUT; |
| + |
| + mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_CHANGE_STATE, |
| + &state, sizeof(state), false); |
| + |
| + do { |
| + value = wed_r32(dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_WO_STATUS); |
| + } while (value != state_done && !time_after(jiffies, timeout)); |
| + |
| + reg = ioremap(WOCPU_MCUSYS_RESET_ADDR, 4); |
| + value = readl((void *)reg); |
| + switch(dev->hw->index) { |
| + case 0: |
| + value |= WOCPU_WO0_MCUSYS_RESET_MASK; |
| + writel(value, (void *)reg); |
| + value &= ~WOCPU_WO0_MCUSYS_RESET_MASK; |
| + writel(value, (void *)reg); |
| + break; |
| + case 1: |
| + value |= WOCPU_WO1_MCUSYS_RESET_MASK; |
| + writel(value, (void *)reg); |
| + value &= ~WOCPU_WO1_MCUSYS_RESET_MASK; |
| + writel(value, (void *)reg); |
| + break; |
| + default: |
| + dev_err(dev->hw->dev, "wrong mtk_wed%d\n", |
| + dev->hw->index); |
| + |
| + break; |
| + } |
| + |
| + iounmap((void *)reg); |
| +} |
| + |
| static struct mtk_wed_hw * |
| mtk_wed_assign(struct mtk_wed_device *dev) |
| { |
| @@ -205,6 +261,42 @@ free_pagelist: |
| kfree(page_list); |
| } |
| |
| +static int |
| +mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev) |
| +{ |
| + struct mtk_rxbm_desc *desc; |
| + dma_addr_t desc_phys; |
| + int ring_size; |
| + |
| + ring_size = dev->wlan.rx_nbuf; |
| + dev->rx_buf_ring.size = ring_size; |
| + desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), |
| + &desc_phys, GFP_KERNEL); |
| + if (!desc) |
| + return -ENOMEM; |
| + |
| + dev->rx_buf_ring.desc = desc; |
| + dev->rx_buf_ring.desc_phys = desc_phys; |
| + |
| + dev->wlan.init_rx_buf(dev, dev->wlan.rx_pkt); |
| + return 0; |
| +} |
| + |
| +static void |
| +mtk_wed_free_rx_bm(struct mtk_wed_device *dev) |
| +{ |
| + struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; |
| + int ring_size =dev->rx_buf_ring.size; |
| + |
| + if (!desc) |
| + return; |
| + |
| + dev->wlan.release_rx_buf(dev); |
| + |
| + dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc), |
| + desc, dev->buf_ring.desc_phys); |
| +} |
| + |
| static void |
| mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale) |
| { |
| @@ -226,13 +318,22 @@ mtk_wed_free_tx_rings(struct mtk_wed_device *dev) |
| mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver); |
| } |
| |
| +static void |
| +mtk_wed_free_rx_rings(struct mtk_wed_device *dev) |
| +{ |
| + mtk_wed_free_rx_bm(dev); |
| + mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1); |
| +} |
| + |
| static void |
| mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask) |
| { |
| u32 wdma_mask; |
| |
| wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); |
| - |
| + if (dev->ver > MTK_WED_V1) |
| + wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, |
| + GENMASK(1, 0)); |
| /* wed control cr set */ |
| wed_set(dev, MTK_WED_CTRL, |
| MTK_WED_CTRL_WDMA_INT_AGENT_EN | |
| @@ -251,7 +352,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask) |
| wed_set(dev, MTK_WED_WPDMA_INT_CTRL, |
| MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); |
| } else { |
| - /* initail tx interrupt trigger */ |
| + |
| wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, |
| MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | |
| MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | |
| @@ -262,22 +363,30 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask) |
| FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, |
| dev->wlan.tx_tbit[1])); |
| |
| - /* initail txfree interrupt trigger */ |
| wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, |
| MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | |
| MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | |
| FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, |
| dev->wlan.txfree_tbit)); |
| + |
| + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, |
| + MTK_WED_WPDMA_INT_CTRL_RX0_EN | |
| + MTK_WED_WPDMA_INT_CTRL_RX0_CLR | |
| + MTK_WED_WPDMA_INT_CTRL_RX1_EN | |
| + MTK_WED_WPDMA_INT_CTRL_RX1_CLR | |
| + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, |
| + dev->wlan.rx_tbit[0]) | |
| + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, |
| + dev->wlan.rx_tbit[1])); |
| } |
| - /* initail wdma interrupt agent */ |
| wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); |
| if (dev->ver == MTK_WED_V1) { |
| wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); |
| } else { |
| wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); |
| wed_set(dev, MTK_WED_WDMA_INT_CTRL, |
| - FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL,dev->wdma_idx)); |
| - |
| + FIELD_PREP(MTK_WED_WDMA_INT_POLL_SRC_SEL, |
| + dev->wdma_idx)); |
| } |
| |
| wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); |
| @@ -312,6 +421,39 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en) |
| } |
| } |
| |
| +static void |
| +mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) |
| +{ |
| +#define MTK_WFMDA_RX_DMA_EN BIT(2) |
| + |
| + int timeout = 3; |
| + u32 cur_idx, regs; |
| + |
| + do { |
| + regs = MTK_WED_WPDMA_RING_RX_DATA(idx) + |
| + MTK_WED_RING_OFS_COUNT; |
| + cur_idx = wed_r32(dev, regs); |
| + if (cur_idx == MTK_WED_RX_RING_SIZE - 1) |
| + break; |
| + |
| + usleep_range(100000, 200000); |
| + } while (timeout-- > 0); |
| + |
| + if (timeout) { |
| + unsigned int val; |
| + |
| + val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - |
| + dev->wlan.phy_base); |
| + val |= MTK_WFMDA_RX_DMA_EN; |
| + |
| + wifi_w32(dev, dev->wlan.wpdma_rx_glo - |
| + dev->wlan.phy_base, val); |
| + } else { |
| + dev_err(dev->hw->dev, "mtk_wed%d: rx dma enable failed!\n", |
| + dev->hw->index); |
| + } |
| +} |
| + |
| static void |
| mtk_wed_dma_enable(struct mtk_wed_device *dev) |
| { |
| @@ -336,9 +478,14 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev) |
| wdma_set(dev, MTK_WDMA_GLO_CFG, |
| MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); |
| } else { |
| + int idx = 0; |
| + |
| wed_set(dev, MTK_WED_WPDMA_CTRL, |
| MTK_WED_WPDMA_CTRL_SDL1_FIXED); |
| |
| + wed_set(dev, MTK_WED_WDMA_GLO_CFG, |
| + MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); |
| + |
| wed_set(dev, MTK_WED_WPDMA_GLO_CFG, |
| MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | |
| MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); |
| @@ -346,6 +493,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev) |
| wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, |
| MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | |
| MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); |
| + |
| + wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
| + MTK_WED_WPDMA_RX_D_RX_DRV_EN | |
| + FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | |
| + FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, |
| + 0x2)); |
| + |
| + for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++) |
| + mtk_wed_check_wfdma_rx_fill(dev, idx); |
| } |
| } |
| |
| @@ -363,19 +519,23 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev) |
| MTK_WED_GLO_CFG_TX_DMA_EN | |
| MTK_WED_GLO_CFG_RX_DMA_EN); |
| |
| - wdma_m32(dev, MTK_WDMA_GLO_CFG, |
| + wdma_clr(dev, MTK_WDMA_GLO_CFG, |
| MTK_WDMA_GLO_CFG_TX_DMA_EN | |
| MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | |
| - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0); |
| + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); |
| |
| if (dev->ver == MTK_WED_V1) { |
| regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); |
| - wdma_m32(dev, MTK_WDMA_GLO_CFG, |
| - MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0); |
| + wdma_clr(dev, MTK_WDMA_GLO_CFG, |
| + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); |
| } else { |
| wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, |
| MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | |
| MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); |
| + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
| + MTK_WED_WPDMA_RX_D_RX_DRV_EN); |
| + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, |
| + MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); |
| } |
| } |
| |
| @@ -395,6 +555,11 @@ mtk_wed_stop(struct mtk_wed_device *dev) |
| MTK_WED_CTRL_WED_TX_BM_EN | |
| MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
| |
| + if (dev->ver > MTK_WED_V1) { |
| + wed_clr(dev, MTK_WED_CTRL, |
| + MTK_WED_CTRL_WED_RX_BM_EN); |
| + } |
| + |
| wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); |
| wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); |
| wdma_w32(dev, MTK_WDMA_INT_MASK, 0); |
| @@ -416,9 +581,17 @@ mtk_wed_detach(struct mtk_wed_device *dev) |
| wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| |
| mtk_wed_reset(dev, MTK_WED_RESET_WED); |
| + if (dev->ver > MTK_WED_V1) |
| + mtk_wed_wo_reset(dev); |
| + |
| + wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); |
| + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); |
| + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| |
| mtk_wed_free_buffer(dev); |
| mtk_wed_free_tx_rings(dev); |
| + if (dev->ver > MTK_WED_V1) |
| + mtk_wed_free_rx_rings(dev); |
| |
| if (dev->wlan.bus_type == MTK_BUS_TYPE_PCIE) { |
| wlan_node = dev->wlan.pci_dev->dev.of_node; |
| @@ -477,7 +650,6 @@ mtk_wed_bus_init(struct mtk_wed_device *dev) |
| value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM); |
| value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE); |
| |
| - /* pcie interrupt status trigger register */ |
| wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); |
| wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); |
| |
| @@ -501,6 +673,9 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev) |
| wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); |
| wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); |
| wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); |
| + |
| + wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); |
| + wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); |
| } else { |
| wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); |
| } |
| @@ -549,24 +722,92 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev) |
| FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, |
| MTK_WDMA_RING_RX(0))); |
| } |
| +} |
| + |
| +static void |
| +mtk_wed_rx_bm_hw_init(struct mtk_wed_device *dev) |
| +{ |
| + wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, |
| + FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_pkt_size)); |
| + |
| + wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); |
| |
| + wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | |
| + FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_pkt)); |
| + |
| + wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, |
| + FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); |
| + |
| + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); |
| } |
| |
| static void |
| -mtk_wed_hw_init(struct mtk_wed_device *dev) |
| +mtk_wed_rro_hw_init(struct mtk_wed_device *dev) |
| +{ |
| + wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, |
| + FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | |
| + FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | |
| + FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, |
| + MTK_WED_MIOD_ENTRY_CNT >> 2)); |
| + |
| + wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_desc_phys); |
| + |
| + wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, |
| + FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); |
| + |
| + wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_desc_phys); |
| + |
| + wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, |
| + FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); |
| + |
| + wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); |
| + |
| + wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.rro_ring.desc_phys); |
| + |
| + wed_set(dev, MTK_WED_RROQM_RST_IDX, |
| + MTK_WED_RROQM_RST_IDX_MIOD | |
| + MTK_WED_RROQM_RST_IDX_FDBK); |
| + |
| + wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); |
| + |
| + wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT -1); |
| + |
| + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); |
| +} |
| + |
| +static void |
| +mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) |
| +{ |
| + wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); |
| + |
| + do { |
| + udelay(100); |
| + |
| + if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) |
| + break; |
| + } while (1); |
| + |
| + /* configure RX_ROUTE_QM */ |
| + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); |
| + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); |
| + wed_set(dev, MTK_WED_RTQM_GLO_CFG, |
| + FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); |
| + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); |
| + |
| + /* enable RX_ROUTE_QM */ |
| + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); |
| +} |
| + |
| +static void |
| +mtk_wed_tx_hw_init(struct mtk_wed_device *dev) |
| { |
| int size = dev->buf_ring.size; |
| int rev_size = MTK_WED_TX_RING_SIZE / 2; |
| int thr = 1; |
| |
| - if (dev->init_done) |
| - return; |
| - |
| - dev->init_done = true; |
| - mtk_wed_set_ext_int(dev, false); |
| - |
| if (dev->ver > MTK_WED_V1) { |
| - size = MTK_WED_WDMA_RING_SIZE * 2 + dev->buf_ring.size; |
| + size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) + |
| + dev->buf_ring.size; |
| rev_size = size; |
| thr = 0; |
| } |
| @@ -609,13 +852,48 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) |
| } |
| |
| static void |
| -mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale) |
| +mtk_wed_rx_hw_init(struct mtk_wed_device *dev) |
| { |
| + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, |
| + MTK_WED_WPDMA_RX_D_RST_CRX_IDX0 | |
| + MTK_WED_WPDMA_RX_D_RST_CRX_IDX1 | |
| + MTK_WED_WPDMA_RX_D_RST_DRV_IDX0 | |
| + MTK_WED_WPDMA_RX_D_RST_DRV_IDX1); |
| + |
| + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); |
| + |
| + mtk_wed_rx_bm_hw_init(dev); |
| + mtk_wed_rro_hw_init(dev); |
| + mtk_wed_route_qm_hw_init(dev); |
| +} |
| + |
| +static void |
| +mtk_wed_hw_init(struct mtk_wed_device *dev) |
| +{ |
| + if (dev->init_done) |
| + return; |
| + |
| + dev->init_done = true; |
| + mtk_wed_set_ext_int(dev, false); |
| + mtk_wed_tx_hw_init(dev); |
| + if (dev->ver > MTK_WED_V1) |
| + mtk_wed_rx_hw_init(dev); |
| +} |
| + |
| +static void |
| +mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx) |
| +{ |
| + __le32 ctrl; |
| int i; |
| |
| + if (tx) |
| + ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); |
| + else |
| + ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); |
| + |
| for (i = 0; i < size; i++) { |
| desc->buf0 = 0; |
| - desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); |
| + desc->ctrl = ctrl; |
| desc->buf1 = 0; |
| desc->info = 0; |
| desc += scale; |
| @@ -674,7 +952,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| if (!desc) |
| continue; |
| |
| - mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver); |
| + mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE, dev->ver, true); |
| } |
| |
| if (mtk_wed_poll_busy(dev)) |
| @@ -729,9 +1007,24 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| |
| } |
| |
| +static int |
| +mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, |
| + int size) |
| +{ |
| + ring->desc = dma_alloc_coherent(dev->hw->dev, |
| + size * sizeof(*ring->desc), |
| + &ring->desc_phys, GFP_KERNEL); |
| + if (!ring->desc) |
| + return -ENOMEM; |
| + |
| + ring->size = size; |
| + memset(ring->desc, 0, size); |
| + return 0; |
| +} |
| + |
| static int |
| mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, |
| - int size, int scale) |
| + int size, int scale, bool tx) |
| { |
| ring->desc = dma_alloc_coherent(dev->hw->dev, |
| size * sizeof(*ring->desc) * scale, |
| @@ -740,17 +1033,18 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, |
| return -ENOMEM; |
| |
| ring->size = size; |
| - mtk_wed_ring_reset(ring->desc, size, scale); |
| + mtk_wed_ring_reset(ring->desc, size, scale, tx); |
| |
| return 0; |
| } |
| |
| static int |
| -mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) |
| +mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size) |
| { |
| struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; |
| |
| - if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, dev->ver)) |
| + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, |
| + dev->ver, true)) |
| return -ENOMEM; |
| |
| wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, |
| @@ -767,22 +1061,143 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) |
| return 0; |
| } |
| |
| +static int |
| +mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size) |
| +{ |
| + struct mtk_wed_ring *wdma = &dev->rx_wdma[idx]; |
| + |
| + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, |
| + dev->ver, true)) |
| + return -ENOMEM; |
| + |
| + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, |
| + wdma->desc_phys); |
| + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, |
| + size); |
| + wdma_w32(dev, |
| + MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); |
| + wdma_w32(dev, |
| + MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); |
| + |
| + if (idx == 0) { |
| + wed_w32(dev, MTK_WED_WDMA_RING_TX |
| + + MTK_WED_RING_OFS_BASE, wdma->desc_phys); |
| + wed_w32(dev, MTK_WED_WDMA_RING_TX |
| + + MTK_WED_RING_OFS_COUNT, size); |
| + wed_w32(dev, MTK_WED_WDMA_RING_TX |
| + + MTK_WED_RING_OFS_CPU_IDX, 0); |
| + wed_w32(dev, MTK_WED_WDMA_RING_TX |
| + + MTK_WED_RING_OFS_DMA_IDX, 0); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +static int |
| +mtk_wed_rro_alloc(struct mtk_wed_device *dev) |
| +{ |
| + struct device_node *np, *node = dev->hw->node; |
| + struct mtk_wed_ring *ring; |
| + struct resource res; |
| + int ret; |
| + |
| + np = of_parse_phandle(node, "mediatek,wocpu_dlm", 0); |
| + if (!np) |
| + return -ENODEV; |
| + |
| + ret = of_address_to_resource(np, 0, &res); |
| + if (ret) |
| + return ret; |
| + |
| + dev->rro.rro_desc = ioremap(res.start, resource_size(&res)); |
| + |
| + ring = &dev->rro.rro_ring; |
| + |
| + dev->rro.miod_desc_phys = res.start; |
| + |
| + dev->rro.mcu_view_miod = MTK_WED_WOCPU_VIEW_MIOD_BASE; |
| + dev->rro.fdbk_desc_phys = MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT |
| + + dev->rro.miod_desc_phys; |
| + |
| + if (mtk_wed_rro_ring_alloc(dev, ring, MTK_WED_RRO_QUE_CNT)) |
| + return -ENOMEM; |
| + |
| + return 0; |
| +} |
| + |
| +static int |
| +mtk_wed_rro_cfg(struct mtk_wed_device *dev) |
| +{ |
| + struct mtk_wed_wo *wo = dev->hw->wed_wo; |
| + struct { |
| + struct wo_cmd_ring ring[2]; |
| + |
| + u32 wed; |
| + u8 ver; |
| + } req = { |
| + .ring = { |
| + [0] = { |
| + .q_base = dev->rro.mcu_view_miod, |
| + .cnt = MTK_WED_MIOD_CNT, |
| + .unit = MTK_WED_MIOD_ENTRY_CNT, |
| + }, |
| + [1] = { |
| + .q_base = dev->rro.mcu_view_miod + |
| + MTK_WED_MIOD_ENTRY_CNT * |
| + MTK_WED_MIOD_CNT, |
| + .cnt = MTK_WED_FB_CMD_CNT, |
| + .unit = 4, |
| + }, |
| + }, |
| + .wed = 0, |
| + }; |
| + |
| + return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_WED_CFG, |
| + &req, sizeof(req), true); |
| +} |
| + |
| +static int |
| +mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len) |
| +{ |
| + struct mtk_wed_wo *wo = dev->hw->wed_wo; |
| + |
| + if (dev->ver == MTK_WED_V1) |
| + return 0; |
| + |
| + return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true); |
| +} |
| + |
| +static void |
| +mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, |
| + u32 reason, u32 hash) |
| +{ |
| + int idx = dev->hw->index; |
| + struct mtk_eth *eth = dev->hw->eth; |
| + struct ethhdr *eh; |
| + |
| + if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) { |
| + if (!skb) |
| + return; |
| + |
| + skb_set_mac_header(skb, 0); |
| + eh = eth_hdr(skb); |
| + skb->protocol = eh->h_proto; |
| + mtk_ppe_check_skb(eth->ppe[idx], skb, hash); |
| + } |
| +} |
| + |
| static void |
| mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) |
| { |
| - u32 wdma_mask; |
| - int i; |
| + int i, ret; |
| |
| for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) |
| if (!dev->tx_wdma[i].desc) |
| - mtk_wed_wdma_ring_setup(dev, i, 16); |
| - |
| + mtk_wed_wdma_rx_ring_setup(dev, i, 16); |
| |
| mtk_wed_hw_init(dev); |
| |
| mtk_wed_set_int(dev, irq_mask); |
| - |
| - |
| mtk_wed_set_ext_int(dev, true); |
| |
| if (dev->ver == MTK_WED_V1) { |
| @@ -797,6 +1212,19 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) |
| val |= BIT(0); |
| regmap_write(dev->hw->mirror, dev->hw->index * 4, val); |
| } else { |
| + /* driver set mid ready and only once */ |
| + wed_w32(dev, MTK_WED_EXT_INT_MASK1, |
| + MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); |
| + wed_w32(dev, MTK_WED_EXT_INT_MASK2, |
| + MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); |
| + |
| + wed_r32(dev, MTK_WED_EXT_INT_MASK1); |
| + wed_r32(dev, MTK_WED_EXT_INT_MASK2); |
| + |
| + ret = mtk_wed_rro_cfg(dev); |
| + if (ret) |
| + return; |
| + |
| mtk_wed_set_512_support(dev, true); |
| } |
| |
| @@ -841,9 +1269,17 @@ mtk_wed_attach(struct mtk_wed_device *dev) |
| wed_r32(dev, MTK_WED_REV_ID)); |
| |
| ret = mtk_wed_buffer_alloc(dev); |
| - if (ret) { |
| - mtk_wed_detach(dev); |
| - goto out; |
| + if (ret) |
| + goto error; |
| + |
| + if (dev->ver > MTK_WED_V1) { |
| + ret = mtk_wed_rx_bm_alloc(dev); |
| + if (ret) |
| + goto error; |
| + |
| + ret = mtk_wed_rro_alloc(dev); |
| + if (ret) |
| + goto error; |
| } |
| |
| mtk_wed_hw_init_early(dev); |
| @@ -851,7 +1287,12 @@ mtk_wed_attach(struct mtk_wed_device *dev) |
| if (dev->ver == MTK_WED_V1) |
| regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, |
| BIT(hw->index), 0); |
| + else |
| + ret = mtk_wed_wo_init(hw); |
| |
| +error: |
| + if (ret) |
| + mtk_wed_detach(dev); |
| out: |
| mutex_unlock(&hw_lock); |
| |
| @@ -877,10 +1318,10 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) |
| |
| BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); |
| |
| - if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1)) |
| + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1, true)) |
| return -ENOMEM; |
| |
| - if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) |
| + if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) |
| return -ENOMEM; |
| |
| ring->reg_base = MTK_WED_RING_TX(idx); |
| @@ -927,6 +1368,35 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) |
| return 0; |
| } |
| |
| +static int |
| +mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) |
| +{ |
| + struct mtk_wed_ring *ring = &dev->rx_ring[idx]; |
| + |
| + BUG_ON(idx > ARRAY_SIZE(dev->rx_ring)); |
| + |
| + |
| + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1, false)) |
| + return -ENOMEM; |
| + |
| + if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) |
| + return -ENOMEM; |
| + |
| + ring->reg_base = MTK_WED_RING_RX_DATA(idx); |
| + ring->wpdma = regs; |
| + |
| + /* WPDMA -> WED */ |
| + wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); |
| + wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); |
| + |
| + wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, |
| + ring->desc_phys); |
| + wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, |
| + MTK_WED_RX_RING_SIZE); |
| + |
| + return 0; |
| +} |
| + |
| static u32 |
| mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) |
| { |
| @@ -1014,6 +1484,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, |
| .attach = mtk_wed_attach, |
| .tx_ring_setup = mtk_wed_tx_ring_setup, |
| .txfree_ring_setup = mtk_wed_txfree_ring_setup, |
| + .rx_ring_setup = mtk_wed_rx_ring_setup, |
| + .msg_update = mtk_wed_send_msg, |
| .start = mtk_wed_start, |
| .stop = mtk_wed_stop, |
| .reset_dma = mtk_wed_reset_dma, |
| @@ -1022,6 +1494,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, |
| .irq_get = mtk_wed_irq_get, |
| .irq_set_mask = mtk_wed_irq_set_mask, |
| .detach = mtk_wed_detach, |
| + .ppe_check = mtk_wed_ppe_check, |
| }; |
| struct device_node *eth_np = eth->dev->of_node; |
| struct platform_device *pdev; |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h |
| index 9b17b7405..ec79b0d42 100644 |
| --- a/drivers/net/ethernet/mediatek/mtk_wed.h |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed.h |
| @@ -13,6 +13,7 @@ |
| #define MTK_WED_PKT_SIZE 1900 |
| #define MTK_WED_BUF_SIZE 2048 |
| #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) |
| +#define MTK_WED_RX_RING_SIZE 1536 |
| |
| #define MTK_WED_TX_RING_SIZE 2048 |
| #define MTK_WED_WDMA_RING_SIZE 512 |
| @@ -21,8 +22,15 @@ |
| #define MTK_WED_PER_GROUP_PKT 128 |
| |
| #define MTK_WED_FBUF_SIZE 128 |
| +#define MTK_WED_MIOD_CNT 16 |
| +#define MTK_WED_FB_CMD_CNT 1024 |
| +#define MTK_WED_RRO_QUE_CNT 8192 |
| +#define MTK_WED_MIOD_ENTRY_CNT 128 |
| + |
| +#define MODULE_ID_WO 1 |
| |
| struct mtk_eth; |
| +struct mtk_wed_wo; |
| |
| struct mtk_wed_hw { |
| struct device_node *node; |
| @@ -34,12 +42,14 @@ struct mtk_wed_hw { |
| struct regmap *mirror; |
| struct dentry *debugfs_dir; |
| struct mtk_wed_device *wed_dev; |
| + struct mtk_wed_wo *wed_wo; |
| u32 debugfs_reg; |
| u32 num_flows; |
| u32 wdma_phy; |
| char dirname[5]; |
| int irq; |
| int index; |
| + u32 ver; |
| }; |
| |
| struct mtk_wdma_info { |
| @@ -66,6 +76,18 @@ wed_r32(struct mtk_wed_device *dev, u32 reg) |
| return val; |
| } |
| |
| +static inline u32 |
| +wifi_r32(struct mtk_wed_device *dev, u32 reg) |
| +{ |
| + return readl(dev->wlan.base + reg); |
| +} |
| + |
| +static inline void |
| +wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) |
| +{ |
| + writel(val, dev->wlan.base + reg); |
| +} |
| + |
| static inline void |
| wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val) |
| { |
| @@ -114,6 +136,23 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) |
| writel(val, dev->txfree_ring.wpdma + reg); |
| } |
| |
| +static inline u32 |
| +wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg) |
| +{ |
| + if (!dev->rx_ring[ring].wpdma) |
| + return 0; |
| + |
| + return readl(dev->rx_ring[ring].wpdma + reg); |
| +} |
| + |
| +static inline void |
| +wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) |
| +{ |
| + if (!dev->rx_ring[ring].wpdma) |
| + return; |
| + |
| + writel(val, dev->rx_ring[ring].wpdma + reg); |
| +} |
| void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, |
| void __iomem *wdma, u32 wdma_phy, int index); |
| void mtk_wed_exit(void); |
| @@ -146,4 +185,15 @@ static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) |
| } |
| #endif |
| |
| +int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr); |
| +int wed_wo_mcu_init(struct mtk_wed_wo *wo); |
| +int mtk_wed_exception_init(struct mtk_wed_wo *wo); |
| +void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb); |
| +int mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb); |
| +void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir); |
| +void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb); |
| +int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo,int to_id, int cmd, |
| + const void *data, int len, bool wait_resp); |
| +int mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget); |
| + |
| #endif |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.c b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c |
| new file mode 100644 |
| index 000000000..732ffc8cf |
| --- /dev/null |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.c |
| @@ -0,0 +1,121 @@ |
| +// SPDX-License-Identifier: GPL-2.0-only |
| + |
| +#include <linux/soc/mediatek/mtk_wed.h> |
| +#include <linux/of_address.h> |
| +#include <linux/mfd/syscon.h> |
| +#include <linux/of_irq.h> |
| +#include "mtk_wed_ccif.h" |
| +#include "mtk_wed_regs.h" |
| +#include "mtk_wed_wo.h" |
| + |
| +static inline void woif_set_isr(struct mtk_wed_wo *wo, u32 mask) |
| +{ |
| + woccif_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask); |
| +} |
| + |
| +static inline u32 woif_get_csr(struct mtk_wed_wo *wo) |
| +{ |
| + u32 val; |
| + |
| + val = woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM); |
| + |
| + return val & MTK_WED_WO_CCIF_RCHNUM_MASK; |
| +} |
| + |
| +static inline void woif_set_ack(struct mtk_wed_wo *wo, u32 mask) |
| +{ |
| + woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask); |
| +} |
| + |
| +static inline void woif_kickout(struct mtk_wed_wo *wo) |
| +{ |
| + woccif_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM); |
| + woccif_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM); |
| +} |
| + |
| +static inline void woif_clear_int(struct mtk_wed_wo *wo, u32 mask) |
| +{ |
| + woccif_w32(wo, MTK_WED_WO_CCIF_ACK, mask); |
| + woccif_r32(wo, MTK_WED_WO_CCIF_RCHNUM); |
| +} |
| + |
| +int wed_wo_hardware_init(struct mtk_wed_wo *wo, irq_handler_t isr) |
| +{ |
| + static const struct wed_wo_drv_ops wo_drv_ops = { |
| + .kickout = woif_kickout, |
| + .set_ack = woif_set_ack, |
| + .set_isr = woif_set_isr, |
| + .get_csr = woif_get_csr, |
| + .clear_int = woif_clear_int, |
| + }; |
| + struct device_node *np, *node = wo->hw->node; |
| + struct wed_wo_queue_regs queues; |
| + struct regmap *regs; |
| + int ret; |
| + |
| + np = of_parse_phandle(node, "mediatek,ap2woccif", 0); |
| + if (!np) |
| + return -ENODEV; |
| + |
| + regs = syscon_regmap_lookup_by_phandle(np, NULL); |
| + if (!regs) |
| + return -ENODEV; |
| + |
| + wo->drv_ops = &wo_drv_ops; |
| + |
| + wo->ccif.regs = regs; |
| + wo->ccif.irq = irq_of_parse_and_map(np, 0); |
| + |
| + spin_lock_init(&wo->ccif.irq_lock); |
| + |
| + ret = request_irq(wo->ccif.irq, isr, IRQF_TRIGGER_HIGH, |
| + "wo_ccif_isr", wo); |
| + if (ret) |
| + goto free_irq; |
| + |
| + queues.desc_base = MTK_WED_WO_CCIF_DUMMY1; |
| + queues.ring_size = MTK_WED_WO_CCIF_DUMMY2; |
| + queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY3; |
| + queues.dma_idx = MTK_WED_WO_CCIF_SHADOW4; |
| + |
| + ret = mtk_wed_wo_q_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE, |
| + MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM, |
| + &queues); |
| + |
| + if (ret) |
| + goto free_irq; |
| + |
| + queues.desc_base = MTK_WED_WO_CCIF_DUMMY5; |
| + queues.ring_size = MTK_WED_WO_CCIF_DUMMY6; |
| + queues.cpu_idx = MTK_WED_WO_CCIF_DUMMY7; |
| + queues.dma_idx = MTK_WED_WO_CCIF_SHADOW8; |
| + |
| + ret = mtk_wed_wo_q_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE, |
| + MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM, |
| + &queues); |
| + if (ret) |
| + goto free_irq; |
| + |
| + wo->ccif.q_int_mask = MTK_WED_WO_RXCH_INT_MASK; |
| + |
| + ret = mtk_wed_wo_q_init(wo, mtk_wed_wo_rx_poll); |
| + if (ret) |
| + goto free_irq; |
| + |
| + wo->ccif.q_exep_mask = MTK_WED_WO_EXCEPTION_INT_MASK; |
| + wo->ccif.irqmask = MTK_WED_WO_ALL_INT_MASK; |
| + |
| + /* rx queue irqmask */ |
| + wo->drv_ops->set_isr(wo, wo->ccif.irqmask); |
| + |
| + return 0; |
| + |
| +free_irq: |
| + devm_free_irq(wo->hw->dev, wo->ccif.irq, wo); |
| + |
| + return ret; |
| +} |
| + |
| +static void wed_wo_hardware_exit(struct mtk_wed_wo *wo) |
| +{ |
| +} |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ccif.h b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h |
| new file mode 100644 |
| index 000000000..68ade449c |
| --- /dev/null |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed_ccif.h |
| @@ -0,0 +1,45 @@ |
| +// SPDX-License-Identifier: GPL-2.0-only |
| + |
| +#ifndef __MTK_WED_CCIF_H |
| +#define __MTK_WED_CCIF_H |
| + |
| +#define MTK_WED_WO_RING_SIZE 256 |
| +#define MTK_WED_WO_CMD_LEN 1504 |
| + |
| +#define MTK_WED_WO_TXCH_NUM 0 |
| +#define MTK_WED_WO_RXCH_NUM 1 |
| +#define MTK_WED_WO_RXCH_WO_EXCEPTION 7 |
| + |
| +#define MTK_WED_WO_TXCH_INT_MASK BIT(0) |
| +#define MTK_WED_WO_RXCH_INT_MASK BIT(1) |
| +#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7) |
| +#define MTK_WED_WO_ALL_INT_MASK MTK_WED_WO_RXCH_INT_MASK | \ |
| + MTK_WED_WO_EXCEPTION_INT_MASK |
| + |
| +#define MTK_WED_WO_CCIF_BUSY 0x004 |
| +#define MTK_WED_WO_CCIF_START 0x008 |
| +#define MTK_WED_WO_CCIF_TCHNUM 0x00c |
| +#define MTK_WED_WO_CCIF_RCHNUM 0x010 |
| +#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0) |
| + |
| +#define MTK_WED_WO_CCIF_ACK 0x014 |
| +#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018 |
| +#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c |
| +#define MTK_WED_WO_CCIF_DUMMY1 0x020 |
| +#define MTK_WED_WO_CCIF_DUMMY2 0x024 |
| +#define MTK_WED_WO_CCIF_DUMMY3 0x028 |
| +#define MTK_WED_WO_CCIF_DUMMY4 0x02c |
| +#define MTK_WED_WO_CCIF_SHADOW1 0x030 |
| +#define MTK_WED_WO_CCIF_SHADOW2 0x034 |
| +#define MTK_WED_WO_CCIF_SHADOW3 0x038 |
| +#define MTK_WED_WO_CCIF_SHADOW4 0x03c |
| +#define MTK_WED_WO_CCIF_DUMMY5 0x050 |
| +#define MTK_WED_WO_CCIF_DUMMY6 0x054 |
| +#define MTK_WED_WO_CCIF_DUMMY7 0x058 |
| +#define MTK_WED_WO_CCIF_DUMMY8 0x05c |
| +#define MTK_WED_WO_CCIF_SHADOW5 0x060 |
| +#define MTK_WED_WO_CCIF_SHADOW6 0x064 |
| +#define MTK_WED_WO_CCIF_SHADOW7 0x068 |
| +#define MTK_WED_WO_CCIF_SHADOW8 0x06c |
| + |
| +#endif |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c |
| index f420f187e..fea7ae2fc 100644 |
| --- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c |
| @@ -2,6 +2,7 @@ |
| /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ |
| |
| #include <linux/seq_file.h> |
| +#include <linux/soc/mediatek/mtk_wed.h> |
| #include "mtk_wed.h" |
| #include "mtk_wed_regs.h" |
| |
| @@ -18,6 +19,8 @@ enum { |
| DUMP_TYPE_WDMA, |
| DUMP_TYPE_WPDMA_TX, |
| DUMP_TYPE_WPDMA_TXFREE, |
| + DUMP_TYPE_WPDMA_RX, |
| + DUMP_TYPE_WED_RRO, |
| }; |
| |
| #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } |
| @@ -36,6 +39,10 @@ enum { |
| |
| #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) |
| #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) |
| +#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n) |
| +#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO) |
| +#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO) |
| + |
| |
| static void |
| print_reg_val(struct seq_file *s, const char *name, u32 val) |
| @@ -58,6 +65,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, |
| cur->name); |
| continue; |
| case DUMP_TYPE_WED: |
| + case DUMP_TYPE_WED_RRO: |
| val = wed_r32(dev, cur->offset); |
| break; |
| case DUMP_TYPE_WDMA: |
| @@ -69,6 +77,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, |
| case DUMP_TYPE_WPDMA_TXFREE: |
| val = wpdma_txfree_r32(dev, cur->offset); |
| break; |
| + case DUMP_TYPE_WPDMA_RX: |
| + val = wpdma_rx_r32(dev, cur->base, cur->offset); |
| + break; |
| } |
| print_reg_val(s, cur->name, val); |
| } |
| @@ -132,6 +143,81 @@ wed_txinfo_show(struct seq_file *s, void *data) |
| } |
| DEFINE_SHOW_ATTRIBUTE(wed_txinfo); |
| |
| +static int |
| +wed_rxinfo_show(struct seq_file *s, void *data) |
| +{ |
| + static const struct reg_dump regs[] = { |
| + DUMP_STR("WPDMA RX"), |
| + DUMP_WPDMA_RX_RING(0), |
| + DUMP_WPDMA_RX_RING(1), |
| + |
| + DUMP_STR("WPDMA RX"), |
| + DUMP_WED(WED_WPDMA_RX_D_MIB(0)), |
| + DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)), |
| + DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)), |
| + DUMP_WED(WED_WPDMA_RX_D_MIB(1)), |
| + DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)), |
| + DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)), |
| + DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB), |
| + |
| + DUMP_STR("WED RX"), |
| + DUMP_WED_RING(WED_RING_RX_DATA(0)), |
| + DUMP_WED_RING(WED_RING_RX_DATA(1)), |
| + |
| + DUMP_STR("WED RRO"), |
| + DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0), |
| + DUMP_WED(WED_RROQM_MID_MIB), |
| + DUMP_WED(WED_RROQM_MOD_MIB), |
| + DUMP_WED(WED_RROQM_MOD_COHERENT_MIB), |
| + DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0), |
| + DUMP_WED(WED_RROQM_FDBK_IND_MIB), |
| + DUMP_WED(WED_RROQM_FDBK_ENQ_MIB), |
| + DUMP_WED(WED_RROQM_FDBK_ANC_MIB), |
| + DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB), |
| + |
| + DUMP_STR("WED Route QM"), |
| + DUMP_WED(WED_RTQM_R2H_MIB(0)), |
| + DUMP_WED(WED_RTQM_R2Q_MIB(0)), |
| + DUMP_WED(WED_RTQM_Q2H_MIB(0)), |
| + DUMP_WED(WED_RTQM_R2H_MIB(1)), |
| + DUMP_WED(WED_RTQM_R2Q_MIB(1)), |
| + DUMP_WED(WED_RTQM_Q2H_MIB(1)), |
| + DUMP_WED(WED_RTQM_Q2N_MIB), |
| + DUMP_WED(WED_RTQM_Q2B_MIB), |
| + DUMP_WED(WED_RTQM_PFDBK_MIB), |
| + |
| + DUMP_STR("WED WDMA TX"), |
| + DUMP_WED(WED_WDMA_TX_MIB), |
| + DUMP_WED_RING(WED_WDMA_RING_TX), |
| + |
| + DUMP_STR("WDMA TX"), |
| + DUMP_WDMA(WDMA_GLO_CFG), |
| + DUMP_WDMA_RING(WDMA_RING_TX(0)), |
| + DUMP_WDMA_RING(WDMA_RING_TX(1)), |
| + |
| + DUMP_STR("WED RX BM"), |
| + DUMP_WED(WED_RX_BM_BASE), |
| + DUMP_WED(WED_RX_BM_RX_DMAD), |
| + DUMP_WED(WED_RX_BM_PTR), |
| + DUMP_WED(WED_RX_BM_TKID_MIB), |
| + DUMP_WED(WED_RX_BM_BLEN), |
| + DUMP_WED(WED_RX_BM_STS), |
| + DUMP_WED(WED_RX_BM_INTF2), |
| + DUMP_WED(WED_RX_BM_INTF), |
| + DUMP_WED(WED_RX_BM_ERR_STS), |
| + }; |
| + |
| + struct mtk_wed_hw *hw = s->private; |
| + struct mtk_wed_device *dev = hw->wed_dev; |
| + |
| + if (!dev) |
| + return 0; |
| + |
| + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); |
| + |
| + return 0; |
| +} |
| +DEFINE_SHOW_ATTRIBUTE(wed_rxinfo); |
| |
| static int |
| mtk_wed_reg_set(void *data, u64 val) |
| @@ -175,4 +261,8 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) |
| debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); |
| debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); |
| debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); |
| + debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops); |
| + if (hw->ver > MTK_WED_V1) { |
| + wed_wo_mcu_debugfs(hw, dir); |
| + } |
| } |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c |
| new file mode 100644 |
| index 000000000..bd1ab9500 |
| --- /dev/null |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c |
| @@ -0,0 +1,561 @@ |
| +// SPDX-License-Identifier: GPL-2.0-only |
| + |
| +#include <linux/skbuff.h> |
| +#include <linux/debugfs.h> |
| +#include <linux/firmware.h> |
| +#include <linux/of_address.h> |
| +#include <linux/soc/mediatek/mtk_wed.h> |
| +#include "mtk_wed_regs.h" |
| +#include "mtk_wed_mcu.h" |
| +#include "mtk_wed_wo.h" |
| + |
| +struct sk_buff * |
| +mtk_wed_mcu_msg_alloc(struct mtk_wed_wo *wo, |
| + const void *data, int data_len) |
| +{ |
| + const struct wed_wo_mcu_ops *ops = wo->mcu_ops; |
| + int length = ops->headroom + data_len; |
| + struct sk_buff *skb; |
| + |
| + skb = alloc_skb(length, GFP_KERNEL); |
| + if (!skb) |
| + return NULL; |
| + |
| + memset(skb->head, 0, length); |
| + skb_reserve(skb, ops->headroom); |
| + |
| + if (data && data_len) |
| + skb_put_data(skb, data, data_len); |
| + |
| + return skb; |
| +} |
| + |
| +struct sk_buff * |
| +mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires) |
| +{ |
| + unsigned long timeout; |
| + |
| + if (!time_is_after_jiffies(expires)) |
| + return NULL; |
| + |
| + timeout = expires - jiffies; |
| + wait_event_timeout(wo->mcu.wait, |
| + (!skb_queue_empty(&wo->mcu.res_q)), |
| + timeout); |
| + |
| + return skb_dequeue(&wo->mcu.res_q); |
| +} |
| + |
| +int |
| +mtk_wed_mcu_skb_send_and_get_msg(struct mtk_wed_wo *wo, |
| + int to_id, int cmd, struct sk_buff *skb, |
| + bool wait_resp, struct sk_buff **ret_skb) |
| +{ |
| + unsigned long expires; |
| + int ret, seq; |
| + |
| + if (ret_skb) |
| + *ret_skb = NULL; |
| + |
| + mutex_lock(&wo->mcu.mutex); |
| + |
| + ret = wo->mcu_ops->mcu_skb_send_msg(wo, to_id, cmd, skb, &seq, wait_resp); |
| + if (ret < 0) |
| + goto out; |
| + |
| + if (!wait_resp) { |
| + ret = 0; |
| + goto out; |
| + } |
| + |
| + expires = jiffies + wo->mcu.timeout; |
| + |
| + do { |
| + skb = mtk_wed_mcu_get_response(wo, expires); |
| + ret = wo->mcu_ops->mcu_parse_response(wo, cmd, skb, seq); |
| + |
| + if (!ret && ret_skb) |
| + *ret_skb = skb; |
| + else |
| + dev_kfree_skb(skb); |
| + } while (ret == -EAGAIN); |
| + |
| +out: |
| + mutex_unlock(&wo->mcu.mutex); |
| + |
| + return ret; |
| +} |
| + |
| +void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, |
| + struct sk_buff *skb) |
| +{ |
| + skb_queue_tail(&wo->mcu.res_q, skb); |
| + wake_up(&wo->mcu.wait); |
| +} |
| + |
| +static int mtk_wed_mcu_send_and_get_msg(struct mtk_wed_wo *wo, |
| + int to_id, int cmd, const void *data, int len, |
| + bool wait_resp, struct sk_buff **ret_skb) |
| +{ |
| + struct sk_buff *skb; |
| + |
| + skb = mtk_wed_mcu_msg_alloc(wo, data, len); |
| + if (!skb) |
| + return -ENOMEM; |
| + |
| + return mtk_wed_mcu_skb_send_and_get_msg(wo, to_id, cmd, skb, wait_resp, ret_skb); |
| +} |
| + |
| +int |
| +mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, |
| + int to_id, int cmd, |
| + const void *data, int len, bool wait_resp) |
| +{ |
| + struct sk_buff *skb = NULL; |
| + int ret = 0; |
| + |
| + ret = mtk_wed_mcu_send_and_get_msg(wo, to_id, cmd, data, |
| + len, wait_resp, &skb); |
| + if (skb) |
| + dev_kfree_skb(skb); |
| + |
| + return ret; |
| +} |
| + |
| +int mtk_wed_exception_init(struct mtk_wed_wo *wo) |
| +{ |
| + struct wed_wo_exception *exp = &wo->exp; |
| + struct { |
| + u32 arg0; |
| + u32 arg1; |
| + }req; |
| + |
| + exp->log_size = EXCEPTION_LOG_SIZE; |
| + exp->log = kmalloc(exp->log_size, GFP_ATOMIC); |
| + if (!exp->log) |
| + return -ENOMEM; |
| + |
| + memset(exp->log, 0, exp->log_size); |
| + exp->phys = dma_map_single(wo->hw->dev, exp->log, exp->log_size, |
| + DMA_FROM_DEVICE); |
| + |
| + if (unlikely(dma_mapping_error(wo->hw->dev, exp->phys))) { |
| + dev_info(wo->hw->dev, "dma map error\n"); |
| + goto free; |
| + } |
| + |
| + req.arg0 = (u32)exp->phys; |
| + req.arg1 = (u32)exp->log_size; |
| + |
| + return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, WO_CMD_EXCEPTION_INIT, |
| + &req, sizeof(req), false); |
| + |
| +free: |
| + kfree(exp->log); |
| + return -ENOMEM; |
| +} |
| + |
| +int |
| +mtk_wed_mcu_cmd_sanity_check(struct mtk_wed_wo *wo, struct sk_buff *skb) |
| +{ |
| + struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data; |
| + |
| + if (hdr->ver != 0) |
| + return WARP_INVALID_PARA_STATUS; |
| + |
| + if (skb->len < sizeof(struct wed_cmd_hdr)) |
| + return WARP_INVALID_PARA_STATUS; |
| + |
| + if (skb->len != hdr->length) |
| + return WARP_INVALID_PARA_STATUS; |
| + |
| + return WARP_OK_STATUS; |
| +} |
| + |
| +void |
| +mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, struct sk_buff *skb) |
| +{ |
| + struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data; |
| + struct wed_wo_log *record; |
| + char *msg = (char *)(skb->data + sizeof(struct wed_cmd_hdr)); |
| + u16 msg_len = skb->len - sizeof(struct wed_cmd_hdr); |
| + u32 i, cnt = 0; |
| + |
| + switch (hdr->cmd_id) { |
| + case WO_EVT_LOG_DUMP: |
| + pr_info("[WO LOG]: %s\n", msg); |
| + break; |
| + case WO_EVT_PROFILING: |
| + cnt = msg_len / (sizeof(struct wed_wo_log)); |
| + record = (struct wed_wo_log *) msg; |
| + dev_info(wo->hw->dev, "[WO Profiling]: %d report arrived!\n", cnt); |
| + |
| + for (i = 0 ; i < cnt ; i++) { |
| + //PROFILE_STAT(wo->total, record[i].total); |
| + //PROFILE_STAT(wo->mod, record[i].mod); |
| + //PROFILE_STAT(wo->rro, record[i].rro); |
| + |
| + dev_info(wo->hw->dev, "[WO Profiling]: SN:%u with latency: total=%u, rro:%u, mod:%u\n", |
| + record[i].sn, |
| + record[i].total, |
| + record[i].rro, |
| + record[i].mod); |
| + } |
| + break; |
| + |
| + default: |
| + break; |
| + } |
| + |
| + dev_kfree_skb(skb); |
| + |
| +} |
| + |
| +static int |
| +mtk_wed_load_firmware(struct mtk_wed_wo *wo) |
| +{ |
| + struct fw_info { |
| + __le32 decomp_crc; |
| + __le32 decomp_len; |
| + __le32 decomp_blk_sz; |
| + u8 reserved[4]; |
| + __le32 addr; |
| + __le32 len; |
| + u8 feature_set; |
| + u8 reserved1[15]; |
| + } __packed *region; |
| + |
| + char *mcu; |
| + const struct mtk_wed_fw_trailer *hdr; |
| + static u8 shared[MAX_REGION_SIZE] = {0}; |
| + const struct firmware *fw; |
| + int ret, i; |
| + u32 ofs = 0; |
| + u32 boot_cr, val; |
| + |
| + mcu = wo->hw->index ? MT7986_FIRMWARE_WO_2 : MT7986_FIRMWARE_WO_1; |
| + |
| + ret = request_firmware(&fw, mcu, wo->hw->dev); |
| + if (ret) |
| + return ret; |
| + |
| + hdr = (const struct mtk_wed_fw_trailer *)(fw->data + fw->size - |
| + sizeof(*hdr)); |
| + |
| + dev_info(wo->hw->dev, "WO Firmware Version: %.10s, Build Time: %.15s\n", |
| + hdr->fw_ver, hdr->build_date); |
| + |
| + for (i = 0; i < hdr->n_region; i++) { |
| + int j = 0; |
| + region = (struct fw_info *)(fw->data + fw->size - |
| + sizeof(*hdr) - |
| + sizeof(*region) * |
| + (hdr->n_region - i)); |
| + |
| + while (j < MAX_REGION_SIZE) { |
| + struct mtk_wed_fw_region *wo_region; |
| + |
| + wo_region = &wo->region[j]; |
| + if (!wo_region->addr) |
| + break; |
| + |
| + if (wo_region->addr_pa == region->addr) { |
| + if (!wo_region->shared) { |
| + memcpy(wo_region->addr, |
| + fw->data + ofs, region->len); |
| + } else if (!shared[j]) { |
| + memcpy(wo_region->addr, |
| + fw->data + ofs, region->len); |
| + shared[j] = true; |
| + } |
| + } |
| + j++; |
| + } |
| + |
| + if (j == __WO_REGION_MAX) { |
| + ret = -ENOENT; |
| + goto done; |
| + } |
| + ofs += region->len; |
| + } |
| + |
| + /* write the start address */ |
| + boot_cr = wo->hw->index ? |
| + WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR : WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR; |
| + wo_w32(wo, boot_cr, (wo->region[WO_REGION_EMI].addr_pa >> 16)); |
| + |
| + /* wo firmware reset */ |
| + wo_w32(wo, WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00); |
| + |
| + val = wo_r32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR); |
| + |
| + val |= wo->hw->index ? WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK : |
| + WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK; |
| + |
| + wo_w32(wo, WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val); |
| + |
| +done: |
| + release_firmware(fw); |
| + |
| + return ret; |
| +} |
| + |
| +static int |
| +mtk_wed_get_firmware_region(struct mtk_wed_wo *wo) |
| +{ |
| + struct device_node *node, *np = wo->hw->node; |
| + struct mtk_wed_fw_region *region; |
| + struct resource res; |
| + const char *compat; |
| + int i, ret; |
| + |
| + static const char *const wo_region_compat[__WO_REGION_MAX] = { |
| + [WO_REGION_EMI] = WOCPU_EMI_DEV_NODE, |
| + [WO_REGION_ILM] = WOCPU_ILM_DEV_NODE, |
| + [WO_REGION_DATA] = WOCPU_DATA_DEV_NODE, |
| + [WO_REGION_BOOT] = WOCPU_BOOT_DEV_NODE, |
| + }; |
| + |
| + for (i = 0; i < __WO_REGION_MAX; i++) { |
| + region = &wo->region[i]; |
| + compat = wo_region_compat[i]; |
| + |
| + node = of_parse_phandle(np, compat, 0); |
| + if (!node) |
| + return -ENODEV; |
| + |
| + ret = of_address_to_resource(node, 0, &res); |
| + if (ret) |
| + return ret; |
| + |
| + region->addr_pa = res.start; |
| + region->size = resource_size(&res); |
| + region->addr = ioremap(region->addr_pa, region->size); |
| + |
| + of_property_read_u32_index(node, "shared", 0, ®ion->shared); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +static int |
| +wo_mcu_send_message(struct mtk_wed_wo *wo, |
| + int to_id, int cmd, struct sk_buff *skb, |
| + int *wait_seq, bool wait_resp) |
| +{ |
| + struct wed_cmd_hdr *hdr; |
| + u8 seq = 0; |
| + |
| + /* TDO: make dynamic based on msg type */ |
| + wo->mcu.timeout = 20 * HZ; |
| + |
| + if (wait_resp && wait_seq) { |
| + seq = wo->mcu.msg_seq++ ; |
| + *wait_seq = seq; |
| + } |
| + |
| + hdr = (struct wed_cmd_hdr *)skb_push(skb, sizeof(*hdr)); |
| + |
| + hdr->cmd_id = cmd; |
| + hdr->length = cpu_to_le16(skb->len); |
| + hdr->uni_id = seq; |
| + |
| + if (to_id == MODULE_ID_WO) |
| + hdr->flag |= WARP_CMD_FLAG_FROM_TO_WO; |
| + |
| + if (wait_resp && wait_seq) |
| + hdr->flag |= WARP_CMD_FLAG_NEED_RSP; |
| + |
| + return mtk_wed_wo_q_tx_skb(wo, &wo->q_tx, skb); |
| +} |
| + |
| +static int |
| +wo_mcu_parse_response(struct mtk_wed_wo *wo, int cmd, |
| + struct sk_buff *skb, int seq) |
| +{ |
| + struct wed_cmd_hdr *hdr; |
| + |
| + if (!skb) { |
| + dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n", |
| + cmd, seq); |
| + return -ETIMEDOUT; |
| + } |
| + |
| + hdr = (struct wed_cmd_hdr *)skb->data; |
| + if (seq != hdr->uni_id) { |
| + dev_err(wo->hw->dev, "Message %08x (seq %d) with not match uid(%d)\n", |
| + cmd, seq, hdr->uni_id); |
| + return -EAGAIN; |
| + } |
| + |
| + //skb_pull(skb, sizeof(struct wed_cmd_hdr)); |
| + |
| + return 0; |
| +} |
| + |
| +int wed_wo_mcu_init(struct mtk_wed_wo *wo) |
| +{ |
| + static const struct wed_wo_mcu_ops wo_mcu_ops = { |
| + .headroom = sizeof(struct wed_cmd_hdr), |
| + .mcu_skb_send_msg = wo_mcu_send_message, |
| + .mcu_parse_response = wo_mcu_parse_response, |
| + /*TDO .mcu_restart = wo_mcu_restart,*/ |
| + }; |
| + unsigned long timeout = jiffies + FW_DL_TIMEOUT; |
| + int ret; |
| + u32 val; |
| + |
| + wo->mcu_ops = &wo_mcu_ops; |
| + |
| + ret = mtk_wed_get_firmware_region(wo); |
| + if (ret) |
| + return ret; |
| + |
| + /* set dummy cr */ |
| + wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL, |
| + wo->hw->index + 1); |
| + |
| + ret = mtk_wed_load_firmware(wo); |
| + if (ret) |
| + return ret; |
| + |
| + do { |
| + /* get dummy cr */ |
| + val = wed_r32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * WED_DUMMY_CR_FWDL); |
| + } while (val != 0 && !time_after(jiffies, timeout)); |
| + |
| + if (val) |
| + return -EBUSY; |
| + |
| + return 0; |
| +} |
| + |
| +static ssize_t |
| +mtk_wed_wo_ctrl(struct file *file, |
| + const char __user *user_buf, |
| + size_t count, |
| + loff_t *ppos) |
| +{ |
| + struct mtk_wed_hw *hw = file->private_data; |
| + struct mtk_wed_wo *wo = hw->wed_wo; |
| + char buf[100], *cmd = NULL, *input[11] = {0}; |
| + char msgbuf[128] = {0}; |
| + struct wo_cmd_query *query = (struct wo_cmd_query *)msgbuf; |
| + u32 cmd_id; |
| + bool wait = false; |
| + char *sub_str = NULL; |
| + int input_idx = 0, input_total = 0, scan_num = 0; |
| + char *p; |
| + |
| + if (count > sizeof(buf)) |
| + return -EINVAL; |
| + |
| + if (copy_from_user(buf, user_buf, count)) |
| + return -EFAULT; |
| + |
| + if (count && buf[count - 1] == '\n') |
| + buf[count - 1] = '\0'; |
| + else |
| + buf[count] = '\0'; |
| + |
| + p = buf; |
| + |
| + while ((sub_str = strsep(&p, " ")) != NULL) { |
| + input[input_idx] = sub_str; |
| + input_idx++; |
| + input_total++; |
| + } |
| + cmd = input[0]; |
| + if (input_total == 1 && cmd) { |
| + if (strncmp(cmd, "bainfo", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_BA_INFO_DUMP; |
| + } else if (strncmp(cmd, "bactrl", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_BA_CTRL_DUMP; |
| + } else if (strncmp(cmd, "fbcmdq", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_FBCMD_Q_DUMP; |
| + } else if (strncmp(cmd, "logflush", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_LOG_FLUSH; |
| + } else if (strncmp(cmd, "cpustat.dump", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_CPU_STATS_DUMP; |
| + } else if (strncmp(cmd, "state", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_WED_RX_STAT; |
| + } else if (strncmp(cmd, "prof_hit_dump", strlen(cmd)) == 0) { |
| + //wo_profiling_report(); |
| + return count; |
| + } else if (strncmp(cmd, "rxcnt_info", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_RXCNT_INFO; |
| + wait = true; |
| + } else { |
| + pr_info("(%s) unknown comand string(%s)!\n", __func__, cmd); |
| + return count; |
| + } |
| + } else if (input_total > 1) { |
| + for (input_idx = 1 ; input_idx < input_total ; input_idx++) { |
| + scan_num = sscanf(input[input_idx], "%u", &query->query0+(input_idx - 1)); |
| + |
| + if (scan_num < 1) { |
| + pr_info("(%s) require more input!\n", __func__); |
| + return count; |
| + } |
| + } |
| + if(strncmp(cmd, "devinfo", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_DEV_INFO_DUMP; |
| + } else if (strncmp(cmd, "bssinfo", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_BSS_INFO_DUMP; |
| + } else if (strncmp(cmd, "starec", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_STA_REC_DUMP; |
| + } else if (strncmp(cmd, "starec_ba", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_STA_BA_DUMP; |
| + } else if (strncmp(cmd, "logctrl", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_FW_LOG_CTRL; |
| + } else if (strncmp(cmd, "cpustat.en", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_CPU_STATS_ENABLE; |
| + } else if (strncmp(cmd, "prof_conf", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_PROF_CTRL; |
| + } else if (strncmp(cmd, "rxcnt_ctrl", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_RXCNT_CTRL; |
| + } else if (strncmp(cmd, "dbg_set", strlen(cmd)) == 0) { |
| + cmd_id = WO_CMD_DBG_INFO; |
| + } |
| + } else { |
| + dev_info(hw->dev, "usage: echo cmd='cmd_str' > wo_write\n"); |
| + dev_info(hw->dev, "cmd_str value range:\n"); |
| + dev_info(hw->dev, "\tbainfo:\n"); |
| + dev_info(hw->dev, "\tbactrl:\n"); |
| + dev_info(hw->dev, "\tfbcmdq:\n"); |
| + dev_info(hw->dev, "\tlogflush:\n"); |
| + dev_info(hw->dev, "\tcpustat.dump:\n"); |
| + dev_info(hw->dev, "\tprof_hit_dump:\n"); |
| + dev_info(hw->dev, "\trxcnt_info:\n"); |
| + dev_info(hw->dev, "\tdevinfo:\n"); |
| + dev_info(hw->dev, "\tbssinfo:\n"); |
| + dev_info(hw->dev, "\tstarec:\n"); |
| + dev_info(hw->dev, "\tstarec_ba:\n"); |
| + dev_info(hw->dev, "\tlogctrl:\n"); |
| + dev_info(hw->dev, "\tcpustat.en:\n"); |
| + dev_info(hw->dev, "\tprof_conf:\n"); |
| + dev_info(hw->dev, "\trxcnt_ctrl:\n"); |
| + dev_info(hw->dev, "\tdbg_set [level] [category]:\n"); |
| + return count; |
| + } |
| + |
| + mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, (void *)msgbuf, sizeof(struct wo_cmd_query), wait); |
| + |
| + return count; |
| + |
| +} |
| + |
| +static const struct file_operations fops_wo_ctrl = { |
| + .write = mtk_wed_wo_ctrl, |
| + .open = simple_open, |
| + .llseek = default_llseek, |
| +}; |
| + |
| +void wed_wo_mcu_debugfs(struct mtk_wed_hw *hw, struct dentry *dir) |
| +{ |
| + if (!dir) |
| + return; |
| + |
| + debugfs_create_file("wo_write", 0600, dir, hw, &fops_wo_ctrl); |
| +} |
| + |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.h b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h |
| new file mode 100644 |
| index 000000000..6a5ac7672 |
| --- /dev/null |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.h |
| @@ -0,0 +1,125 @@ |
| +// SPDX-License-Identifier: GPL-2.0-only |
| + |
| +#ifndef __MTK_WED_MCU_H |
| +#define __MTK_WED_MCU_H |
| + |
| +#define EXCEPTION_LOG_SIZE 32768 |
| +#define WOCPU_MCUSYS_RESET_ADDR 0x15194050 |
| +#define WOCPU_WO0_MCUSYS_RESET_MASK 0x20 |
| +#define WOCPU_WO1_MCUSYS_RESET_MASK 0x1 |
| + |
| +#define WARP_INVALID_LENGTH_STATUS (-2) |
| +#define WARP_NULL_POINTER_STATUS (-3) |
| +#define WARP_INVALID_PARA_STATUS (-4) |
| +#define WARP_NOT_HANDLE_STATUS (-5) |
| +#define WARP_FAIL_STATUS (-1) |
| +#define WARP_OK_STATUS (0) |
| +#define WARP_ALREADY_DONE_STATUS (1) |
| + |
| +#define MT7986_FIRMWARE_WO_1 "mediatek/mt7986_wo_0.bin" |
| +#define MT7986_FIRMWARE_WO_2 "mediatek/mt7986_wo_1.bin" |
| + |
| +#define WOCPU_EMI_DEV_NODE "mediatek,wocpu_emi" |
| +#define WOCPU_ILM_DEV_NODE "mediatek,wocpu_ilm" |
| +#define WOCPU_DLM_DEV_NODE "mediatek,wocpu_dlm" |
| +#define WOCPU_DATA_DEV_NODE "mediatek,wocpu_data" |
| +#define WOCPU_BOOT_DEV_NODE "mediatek,wocpu_boot" |
| + |
| +#define FW_DL_TIMEOUT ((3000 * HZ) / 1000) |
| +#define WOCPU_TIMEOUT ((1000 * HZ) / 1000) |
| + |
| +#define MAX_REGION_SIZE 3 |
| + |
| +#define WOX_MCU_CFG_LS_BASE 0 /*0x15194000*/ |
| + |
| +#define WOX_MCU_CFG_LS_HW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x000) // 4000 |
| +#define WOX_MCU_CFG_LS_FW_VER_ADDR (WOX_MCU_CFG_LS_BASE + 0x004) // 4004 |
| +#define WOX_MCU_CFG_LS_CFG_DBG1_ADDR (WOX_MCU_CFG_LS_BASE + 0x00C) // 400C |
| +#define WOX_MCU_CFG_LS_CFG_DBG2_ADDR (WOX_MCU_CFG_LS_BASE + 0x010) // 4010 |
| +#define WOX_MCU_CFG_LS_WF_MCCR_ADDR (WOX_MCU_CFG_LS_BASE + 0x014) // 4014 |
| +#define WOX_MCU_CFG_LS_WF_MCCR_SET_ADDR (WOX_MCU_CFG_LS_BASE + 0x018) // 4018 |
| +#define WOX_MCU_CFG_LS_WF_MCCR_CLR_ADDR (WOX_MCU_CFG_LS_BASE + 0x01C) // 401C |
| +#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (WOX_MCU_CFG_LS_BASE + 0x050) // 4050 |
| +#define WOX_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x060) // 4060 |
| +#define WOX_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (WOX_MCU_CFG_LS_BASE + 0x064) // 4064 |
| + |
| +#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WM_CPU_RSTB_MASK BIT(5) |
| +#define WOX_MCU_CFG_LS_WF_MCU_CFG_WM_WA_WA_CPU_RSTB_MASK BIT(0) |
| + |
| + |
| +enum wo_event_id { |
| + WO_EVT_LOG_DUMP = 0x1, |
| + WO_EVT_PROFILING = 0x2, |
| + WO_EVT_RXCNT_INFO = 0x3 |
| +}; |
| + |
| +enum wo_cmd_id { |
| + WO_CMD_WED_CFG = 0, |
| + WO_CMD_WED_RX_STAT, |
| + WO_CMD_RRO_SER, |
| + WO_CMD_DBG_INFO, |
| + WO_CMD_DEV_INFO, |
| + WO_CMD_BSS_INFO, |
| + WO_CMD_STA_REC, |
| + WO_CMD_DEV_INFO_DUMP, |
| + WO_CMD_BSS_INFO_DUMP, |
| + WO_CMD_STA_REC_DUMP, |
| + WO_CMD_BA_INFO_DUMP, |
| + WO_CMD_FBCMD_Q_DUMP, |
| + WO_CMD_FW_LOG_CTRL, |
| + WO_CMD_LOG_FLUSH, |
| + WO_CMD_CHANGE_STATE, |
| + WO_CMD_CPU_STATS_ENABLE, |
| + WO_CMD_CPU_STATS_DUMP, |
| + WO_CMD_EXCEPTION_INIT, |
| + WO_CMD_PROF_CTRL, |
| + WO_CMD_STA_BA_DUMP, |
| + WO_CMD_BA_CTRL_DUMP, |
| + WO_CMD_RXCNT_CTRL, |
| + WO_CMD_RXCNT_INFO, |
| + WO_CMD_SET_CAP, |
| + WO_CMD_CCIF_RING_DUMP, |
| + WO_CMD_WED_END |
| +}; |
| + |
| +enum wo_state { |
| + WO_STATE_UNDEFINED = 0x0, |
| + WO_STATE_INIT = 0x1, |
| + WO_STATE_ENABLE = 0x2, |
| + WO_STATE_DISABLE = 0x3, |
| + WO_STATE_HALT = 0x4, |
| + WO_STATE_GATING = 0x5, |
| + WO_STATE_SER_RESET = 0x6, |
| + WO_STATE_WF_RESET = 0x7, |
| + WO_STATE_END |
| +}; |
| + |
| +enum wo_done_state { |
| + WOIF_UNDEFINED = 0, |
| + WOIF_DISABLE_DONE = 1, |
| + WOIF_TRIGGER_ENABLE = 2, |
| + WOIF_ENABLE_DONE = 3, |
| + WOIF_TRIGGER_GATING = 4, |
| + WOIF_GATING_DONE = 5, |
| + WOIF_TRIGGER_HALT = 6, |
| + WOIF_HALT_DONE = 7, |
| +}; |
| + |
| +enum wed_dummy_cr_idx { |
| + WED_DUMMY_CR_FWDL = 0, |
| + WED_DUMMY_CR_WO_STATUS = 1 |
| +}; |
| + |
| +struct mtk_wed_fw_trailer { |
| + u8 chip_id; |
| + u8 eco_code; |
| + u8 n_region; |
| + u8 format_ver; |
| + u8 format_flag; |
| + u8 reserved[2]; |
| + char fw_ver[10]; |
| + char build_date[15]; |
| + u32 crc; |
| +}; |
| + |
| +#endif |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| index 69f136ed4..e911b5315 100644 |
| --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| @@ -4,6 +4,8 @@ |
| #ifndef __MTK_WED_REGS_H |
| #define __MTK_WED_REGS_H |
| |
| +#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8) |
| + |
| #if defined(CONFIG_MEDIATEK_NETSYS_V2) |
| #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(13, 0) |
| #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(14) |
| @@ -16,6 +18,7 @@ |
| #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16) |
| #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30) |
| #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31) |
| +#define MTK_WED_RX_BM_TOKEN GENMASK(31, 16) |
| |
| struct mtk_wdma_desc { |
| __le32 buf0; |
| @@ -37,6 +40,8 @@ struct mtk_wdma_desc { |
| #define MTK_WED_RESET_WED_TX_DMA BIT(12) |
| #define MTK_WED_RESET_WDMA_RX_DRV BIT(17) |
| #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) |
| +#define MTK_WED_RESET_RX_RRO_QM BIT(20) |
| +#define MTK_WED_RESET_RX_ROUTE_QM BIT(21) |
| #define MTK_WED_RESET_WED BIT(31) |
| |
| #define MTK_WED_CTRL 0x00c |
| @@ -48,8 +53,12 @@ struct mtk_wdma_desc { |
| #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) |
| #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) |
| #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) |
| -#define MTK_WED_CTRL_RESERVE_EN BIT(12) |
| -#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) |
| +#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12) |
| +#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13) |
| +#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14) |
| +#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15) |
| +#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16) |
| +#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17) |
| #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) |
| #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25) |
| #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) |
| @@ -64,8 +73,8 @@ struct mtk_wdma_desc { |
| #define MTK_WED_EXT_INT_STATUS_TX_TKID_LO_TH BIT(10) |
| #define MTK_WED_EXT_INT_STATUS_TX_TKID_HI_TH BIT(11) |
| #endif |
| -#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12) |
| -#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13) |
| +#define MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY BIT(12) |
| +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER BIT(13) |
| #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16) |
| #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17) |
| #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18) |
| @@ -82,8 +91,8 @@ struct mtk_wdma_desc { |
| #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ |
| MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ |
| MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ |
| - MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | \ |
| - MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | \ |
| + MTK_WED_EXT_INT_STATUS_RX_FREE_AT_EMPTY | \ |
| + MTK_WED_EXT_INT_STATUS_RX_FBUF_DMAD_ER | \ |
| MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \ |
| MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \ |
| MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | \ |
| @@ -92,6 +101,8 @@ struct mtk_wdma_desc { |
| MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR) |
| |
| #define MTK_WED_EXT_INT_MASK 0x028 |
| +#define MTK_WED_EXT_INT_MASK1 0x02c |
| +#define MTK_WED_EXT_INT_MASK2 0x030 |
| |
| #define MTK_WED_STATUS 0x060 |
| #define MTK_WED_STATUS_TX GENMASK(15, 8) |
| @@ -179,6 +190,9 @@ struct mtk_wdma_desc { |
| |
| #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) |
| |
| +#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10) |
| + |
| +#define MTK_WED_SCR0 0x3c0 |
| #define MTK_WED_WPDMA_INT_TRIGGER 0x504 |
| #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) |
| #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) |
| @@ -235,13 +249,19 @@ struct mtk_wdma_desc { |
| |
| #define MTK_WED_WPDMA_INT_CTRL_TX 0x530 |
| #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0) |
| -#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1) |
| +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1) |
| #define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2) |
| #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8) |
| #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9) |
| #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10) |
| |
| #define MTK_WED_WPDMA_INT_CTRL_RX 0x534 |
| +#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0) |
| +#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1) |
| +#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2) |
| +#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8) |
| +#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9) |
| +#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10) |
| |
| #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538 |
| #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0) |
| @@ -266,13 +286,43 @@ struct mtk_wdma_desc { |
| #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4) |
| #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4) |
| |
| +#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4) |
| +#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4) |
| + |
| #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) |
| #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) |
| +#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10) |
| + |
| + |
| +#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c |
| +#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0) |
| +#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7) |
| +#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24) |
| + |
| +#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760 |
| +#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX0 BIT(16) |
| +#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX1 BIT(17) |
| +#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX0 BIT(24) |
| +#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX1 BIT(25) |
| + |
| +#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c |
| +#define MTK_WED_WPDMA_RX_RING 0x770 |
| + |
| +#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4) |
| +#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4) |
| +#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c |
| + |
| +#define MTK_WED_WDMA_RING_TX 0x800 |
| + |
| +#define MTK_WED_WDMA_TX_MIB 0x810 |
| + |
| + |
| #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) |
| #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) |
| |
| #define MTK_WED_WDMA_GLO_CFG 0xa04 |
| #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) |
| +#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1) |
| #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) |
| #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) |
| #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) |
| @@ -316,6 +366,20 @@ struct mtk_wdma_desc { |
| #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) |
| #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) |
| |
| +#define MTK_WED_RX_BM_RX_DMAD 0xd80 |
| +#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0) |
| + |
| +#define MTK_WED_RX_BM_BASE 0xd84 |
| +#define MTK_WED_RX_BM_INIT_PTR 0xd88 |
| +#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0) |
| +#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16) |
| + |
| +#define MTK_WED_RX_PTR 0xd8c |
| + |
| +#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4 |
| +#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16) |
| +#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0) |
| + |
| #define MTK_WED_RING_OFS_BASE 0x00 |
| #define MTK_WED_RING_OFS_COUNT 0x04 |
| #define MTK_WED_RING_OFS_CPU_IDX 0x08 |
| @@ -355,4 +419,71 @@ struct mtk_wdma_desc { |
| /* DMA channel mapping */ |
| #define HIFSYS_DMA_AG_MAP 0x008 |
| |
| +#define MTK_WED_RTQM_GLO_CFG 0xb00 |
| +#define MTK_WED_RTQM_BUSY BIT(1) |
| +#define MTK_WED_RTQM_Q_RST BIT(2) |
| +#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5) |
| +#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20) |
| + |
| +#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4) |
| +#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4) |
| +#define MTK_WED_RTQM_Q2N_MIB 0xb80 |
| +#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4) |
| + |
| +#define MTK_WED_RTQM_Q2B_MIB 0xb8c |
| +#define MTK_WED_RTQM_PFDBK_MIB 0xb90 |
| + |
| +#define MTK_WED_RROQM_GLO_CFG 0xc04 |
| +#define MTK_WED_RROQM_RST_IDX 0xc08 |
| +#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0) |
| +#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4) |
| + |
| +#define MTK_WED_RROQM_MIOD_CTRL0 0xc40 |
| +#define MTK_WED_RROQM_MIOD_CTRL1 0xc44 |
| +#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0) |
| + |
| +#define MTK_WED_RROQM_MIOD_CTRL2 0xc48 |
| +#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c |
| + |
| +#define MTK_WED_RROQM_FDBK_CTRL0 0xc50 |
| +#define MTK_WED_RROQM_FDBK_CTRL1 0xc54 |
| +#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0) |
| + |
| +#define MTK_WED_RROQM_FDBK_CTRL2 0xc58 |
| + |
| +#define MTK_WED_RROQ_BASE_L 0xc80 |
| +#define MTK_WED_RROQ_BASE_H 0xc84 |
| + |
| + |
| +#define MTK_WED_RROQM_MIOD_CFG 0xc8c |
| +#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0) |
| +#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8) |
| +#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16) |
| + |
| +#define MTK_WED_RROQM_MID_MIB 0xcc0 |
| +#define MTK_WED_RROQM_MOD_MIB 0xcc4 |
| +#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8 |
| +#define MTK_WED_RROQM_FDBK_MIB 0xcd0 |
| +#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4 |
| +#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0 |
| +#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4 |
| +#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8 |
| +#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec |
| + |
| +#define MTK_WED_RX_BM_RX_DMAD 0xd80 |
| +#define MTK_WED_RX_BM_BASE 0xd84 |
| +#define MTK_WED_RX_BM_INIT_PTR 0xd88 |
| +#define MTK_WED_RX_BM_PTR 0xd8c |
| +#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16) |
| +#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0) |
| + |
| +#define MTK_WED_RX_BM_BLEN 0xd90 |
| +#define MTK_WED_RX_BM_STS 0xd94 |
| +#define MTK_WED_RX_BM_INTF2 0xd98 |
| +#define MTK_WED_RX_BM_INTF 0xd9c |
| +#define MTK_WED_RX_BM_ERR_STS 0xda8 |
| + |
| +#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000 |
| +#define MTK_WED_PCIE_INT_MASK 0x0 |
| + |
| #endif |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c |
| new file mode 100644 |
| index 000000000..10618fc1a |
| --- /dev/null |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c |
| @@ -0,0 +1,548 @@ |
| +// SPDX-License-Identifier: GPL-2.0-only |
| + |
| +#include <linux/kernel.h> |
| +#include <linux/bitfield.h> |
| +#include <linux/dma-mapping.h> |
| +#include <linux/skbuff.h> |
| +#include <linux/of_platform.h> |
| +#include <linux/interrupt.h> |
| +#include <linux/of_address.h> |
| +#include <linux/iopoll.h> |
| +#include <linux/soc/mediatek/mtk_wed.h> |
| +#include "mtk_wed.h" |
| +#include "mtk_wed_regs.h" |
| +#include "mtk_wed_ccif.h" |
| +#include "mtk_wed_wo.h" |
| + |
| +struct wed_wo_profile_stat profile_total[6] = { |
| + {1001, 0}, |
| + {1501, 0}, |
| + {3001, 0}, |
| + {5001, 0}, |
| + {10001, 0}, |
| + {0xffffffff, 0} |
| +}; |
| + |
| +struct wed_wo_profile_stat profiling_mod[6] = { |
| + {1001, 0}, |
| + {1501, 0}, |
| + {3001, 0}, |
| + {5001, 0}, |
| + {10001, 0}, |
| + {0xffffffff, 0} |
| +}; |
| + |
| +struct wed_wo_profile_stat profiling_rro[6] = { |
| + {1001, 0}, |
| + {1501, 0}, |
| + {3001, 0}, |
| + {5001, 0}, |
| + {10001, 0}, |
| + {0xffffffff, 0} |
| +}; |
| + |
| +static void |
| +woif_q_sync_idx(struct mtk_wed_wo *wo, struct wed_wo_queue *q) |
| +{ |
| + woccif_w32(wo, q->regs->desc_base, q->desc_dma); |
| + woccif_w32(wo, q->regs->ring_size, q->ndesc); |
| + |
| + /* wo fw start from 1 */ |
| + q->head = woccif_r32(wo, q->regs->dma_idx) + 1; |
| + q->tail = q->head; |
| +} |
| + |
| +static void |
| +woif_q_reset(struct mtk_wed_wo *dev, struct wed_wo_queue *q) |
| +{ |
| + |
| + if (!q || !q->ndesc) |
| + return; |
| + |
| + woccif_w32(dev, q->regs->cpu_idx, 0); |
| + |
| + woif_q_sync_idx(dev, q); |
| +} |
| + |
| +static void |
| +woif_q_kick(struct mtk_wed_wo *wo, struct wed_wo_queue *q, int offset) |
| +{ |
| + wmb(); |
| + woccif_w32(wo, q->regs->cpu_idx, q->head + offset); |
| +} |
| + |
| +static int |
| +woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q) |
| +{ |
| + int len = q->buf_size, frames = 0; |
| + struct wed_wo_queue_entry *entry; |
| + struct wed_wo_desc *desc; |
| + dma_addr_t addr; |
| + u32 ctrl = 0; |
| + void *buf; |
| + |
| + if (!q->ndesc) |
| + return 0; |
| + |
| + spin_lock_bh(&q->lock); |
| + |
| + while (q->queued < q->ndesc - 1) { |
| + |
| + buf = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC); |
| + if (!buf) |
| + break; |
| + |
| + addr = dma_map_single(wo->hw->dev, buf, len, DMA_FROM_DEVICE); |
| + if (unlikely(dma_mapping_error(wo->hw->dev, addr))) { |
| + skb_free_frag(buf); |
| + break; |
| + } |
| + dma_sync_single_for_cpu(wo->hw->dev, addr, len, |
| + DMA_TO_DEVICE); |
| + desc = &q->desc[q->head]; |
| + entry = &q->entry[q->head]; |
| + |
| + entry->dma_addr = addr; |
| + entry->dma_len = len; |
| + |
| + ctrl = FIELD_PREP(WED_CTL_SD_LEN0, entry->dma_len); |
| + ctrl |= WED_CTL_LAST_SEC0; |
| + |
| + WRITE_ONCE(desc->buf0, cpu_to_le32(addr)); |
| + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); |
| + dma_sync_single_for_device(wo->hw->dev, addr, len, |
| + DMA_TO_DEVICE); |
| + q->queued++; |
| + q->entry[q->head].buf = buf; |
| + |
| + q->head = (q->head + 1) % q->ndesc; |
| + frames++; |
| + } |
| + |
| + spin_unlock_bh(&q->lock); |
| + |
| + return frames; |
| +} |
| + |
| +static void |
| +woif_q_rx_fill_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q) |
| +{ |
| + if(woif_q_rx_fill(wo, q)) |
| + woif_q_kick(wo, q, -1); |
| +} |
| + |
| +static int |
| +woif_q_alloc(struct mtk_wed_wo *dev, struct wed_wo_queue *q, |
| + int n_desc, int bufsize, int idx, |
| + struct wed_wo_queue_regs *regs) |
| +{ |
| + struct wed_wo_queue_regs *q_regs; |
| + int size; |
| + |
| + spin_lock_init(&q->lock); |
| + spin_lock_init(&q->cleanup_lock); |
| + |
| + q_regs = devm_kzalloc(dev->hw->dev, sizeof(*q_regs), GFP_KERNEL); |
| + |
| + q_regs->desc_base = regs->desc_base; |
| + q_regs->ring_size = regs->ring_size; |
| + q_regs->cpu_idx = regs->cpu_idx; |
| + q_regs->dma_idx = regs->dma_idx; |
| + |
| + q->regs = q_regs; |
| + q->ndesc = n_desc; |
| + q->buf_size = bufsize; |
| + |
| + size = q->ndesc * sizeof(struct wed_wo_desc); |
| + |
| + q->desc = dmam_alloc_coherent(dev->hw->dev, size, |
| + &q->desc_dma, GFP_KERNEL); |
| + if (!q->desc) |
| + return -ENOMEM; |
| + |
| + size = q->ndesc * sizeof(*q->entry); |
| + q->entry = devm_kzalloc(dev->hw->dev, size, GFP_KERNEL); |
| + if (!q->entry) |
| + return -ENOMEM; |
| + |
| + if (idx == 0) |
| + woif_q_reset(dev, &dev->q_tx); |
| + |
| + return 0; |
| +} |
| + |
| +static void |
| +woif_q_tx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush) |
| +{ |
| + int last; |
| + |
| + if (!q || !q->ndesc) |
| + return; |
| + |
| + spin_lock_bh(&q->cleanup_lock); |
| + if (flush) |
| + last = -1; |
| + else |
| + last = readl(&q->regs->dma_idx); |
| + |
| + while (q->queued > 0 && q->tail != last) { |
| + struct wed_wo_queue_entry *e; |
| + |
| + e = &q->entry[q->tail]; |
| + |
| + dma_unmap_single(wo->hw->dev, e->dma_addr, e->dma_len, |
| + DMA_TO_DEVICE); |
| + |
| + if (e->skb) |
| + dev_kfree_skb(e->skb); |
| + |
| + memset(e, 0, sizeof(*e)); |
| + |
| + spin_lock_bh(&q->lock); |
| + q->tail = (q->tail + 1) % q->ndesc; |
| + q->queued--; |
| + spin_unlock_bh(&q->lock); |
| + |
| + if (!flush && q->tail == last) |
| + last = readl(&q->regs->dma_idx); |
| + } |
| + spin_unlock_bh(&q->cleanup_lock); |
| + |
| + if (flush) { |
| + spin_lock_bh(&q->lock); |
| + woif_q_sync_idx(wo, q); |
| + woif_q_kick(wo, q, 0); |
| + spin_unlock_bh(&q->lock); |
| + } |
| +} |
| + |
| +static void |
| +woif_q_rx_clean(struct mtk_wed_wo *wo, struct wed_wo_queue *q) |
| +{ |
| +} |
| + |
| +static void * |
| +woif_q_deq(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool flush, |
| + int *len, u32 *info, bool *more) |
| +{ |
| + int buf_len = SKB_WITH_OVERHEAD(q->buf_size); |
| + struct wed_wo_queue_entry *e; |
| + struct wed_wo_desc *desc; |
| + int idx = q->tail; |
| + void *buf; |
| + |
| + *more = false; |
| + if (!q->queued) |
| + return NULL; |
| + |
| + if (flush) |
| + q->desc[idx].ctrl |= cpu_to_le32(WED_CTL_DMA_DONE); |
| + else if (!(q->desc[idx].ctrl & cpu_to_le32(WED_CTL_DMA_DONE))) |
| + return NULL; |
| + |
| + q->tail = (q->tail + 1) % q->ndesc; |
| + q->queued--; |
| + |
| + desc = &q->desc[idx]; |
| + e = &q->entry[idx]; |
| + |
| + buf = e->buf; |
| + if (len) { |
| + u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
| + *len = FIELD_GET(WED_CTL_SD_LEN0, ctl); |
| + *more = !(ctl & WED_CTL_LAST_SEC0); |
| + } |
| + |
| + if (info) |
| + *info = le32_to_cpu(desc->info); |
| + if(buf) |
| + dma_unmap_single(wo->hw->dev, e->dma_addr, buf_len, |
| + DMA_FROM_DEVICE); |
| + e->skb = NULL; |
| + |
| + return buf; |
| +} |
| + |
| +static int |
| +woif_q_init(struct mtk_wed_wo *dev, |
| + int (*poll)(struct napi_struct *napi, int budget)) |
| +{ |
| + init_dummy_netdev(&dev->napi_dev); |
| + snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", |
| + "woif_q"); |
| + |
| + if (dev->q_rx.ndesc) { |
| + netif_napi_add(&dev->napi_dev, &dev->napi, poll, 64); |
| + woif_q_rx_fill(dev, &dev->q_rx); |
| + woif_q_reset(dev, &dev->q_rx); |
| + napi_enable(&dev->napi); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +void woif_q_rx_skb(struct mtk_wed_wo *wo, struct sk_buff *skb) |
| +{ |
| + struct wed_cmd_hdr *hdr = (struct wed_cmd_hdr *)skb->data; |
| + int ret; |
| + |
| + ret = mtk_wed_mcu_cmd_sanity_check(wo, skb); |
| + if (ret) |
| + goto free_skb; |
| + |
| + if (WED_WO_CMD_FLAG_IS_RSP(hdr)) |
| + mtk_wed_mcu_rx_event(wo, skb); |
| + else |
| + mtk_wed_mcu_rx_unsolicited_event(wo, skb); |
| + |
| + return; |
| +free_skb: |
| + dev_kfree_skb(skb); |
| +} |
| + |
| +static int |
| +woif_q_tx_skb(struct mtk_wed_wo *wo, struct wed_wo_queue *q, |
| + struct sk_buff *skb) |
| +{ |
| + struct wed_wo_queue_entry *entry; |
| + struct wed_wo_desc *desc; |
| + int len, ret, idx = -1; |
| + dma_addr_t addr; |
| + u32 ctrl = 0; |
| + |
| + len = skb->len; |
| + addr = dma_map_single(wo->hw->dev, skb->data, len, DMA_TO_DEVICE); |
| + if (unlikely(dma_mapping_error(wo->hw->dev, addr))) |
| + goto error; |
| + |
| + /* packet tx, force trigger tx clean. */ |
| + if (q->queued + MTK_WED_WO_TXQ_FREE_THR >= q->ndesc - 1) |
| + woif_q_tx_clean(wo, q, false); |
| + |
| + if (q->queued + 1 >= q->ndesc - 1) { |
| + ret = -ENOMEM; |
| + goto error; |
| + } |
| + |
| + spin_lock_bh(&q->lock); |
| + |
| + dma_sync_single_for_device(wo->hw->dev, addr, len, |
| + DMA_TO_DEVICE); |
| + |
| + idx = q->head; |
| + |
| + desc = &q->desc[idx]; |
| + entry = &q->entry[idx]; |
| + |
| + entry->dma_addr = addr; |
| + entry->dma_len = len; |
| + |
| + ctrl = FIELD_PREP(WED_CTL_SD_LEN0, len); |
| + ctrl |= WED_CTL_LAST_SEC0; |
| + ctrl |= WED_CTL_DMA_DONE; |
| + |
| + WRITE_ONCE(desc->buf0, cpu_to_le32(addr)); |
| + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); |
| + |
| + q->queued++; |
| + q->entry[idx].skb = skb; |
| + |
| + woif_q_kick(wo, q, 0); |
| + wo->drv_ops->kickout(wo); |
| + |
| + q->head = (q->head + 1) % q->ndesc; |
| + spin_unlock_bh(&q->lock); |
| + return 0; |
| + |
| +error: |
| + dev_kfree_skb(skb); |
| + return -ENOMEM; |
| +} |
| + |
| +static const struct wed_wo_queue_ops wo_queue_ops = { |
| + .init = woif_q_init, |
| + .alloc = woif_q_alloc, |
| + .reset = woif_q_reset, |
| + .tx_skb = woif_q_tx_skb, |
| + .tx_clean = woif_q_tx_clean, |
| + .rx_clean = woif_q_rx_clean, |
| + .kick = woif_q_kick, |
| +}; |
| + |
| +static int |
| +mtk_wed_wo_rx_process(struct mtk_wed_wo *wo, struct wed_wo_queue *q, |