blob: bd65fea067d77f3d962027d329a1fc212b659f50 [file] [log] [blame]
From b54ca484993804cec5941bd12c6cafc9ce51e4dc Mon Sep 17 00:00:00 2001
From: Sujuan Chen <sujuan.chen@mediatek.com>
Date: Mon, 18 Sep 2023 13:21:15 +0800
Subject: [PATCH] mtk:wed:add wed3 support
---
arch/arm64/boot/dts/mediatek/mt7988.dtsi | 152 ++-
.../dts/mediatek/mt7988a-dsa-10g-spim-nor.dts | 16 +-
.../dts/mediatek/mt7988d-dsa-10g-spim-nor.dts | 16 +-
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 +-
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +-
drivers/net/ethernet/mediatek/mtk_ppe.c | 17 +-
drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +-
.../net/ethernet/mediatek/mtk_ppe_offload.c | 13 +-
drivers/net/ethernet/mediatek/mtk_wed.c | 1178 +++++++++++++----
drivers/net/ethernet/mediatek/mtk_wed.h | 25 +-
.../net/ethernet/mediatek/mtk_wed_debugfs.c | 584 +++++++-
drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 13 +-
drivers/net/ethernet/mediatek/mtk_wed_mcu.h | 5 +-
drivers/net/ethernet/mediatek/mtk_wed_regs.h | 338 ++++-
include/linux/netdevice.h | 7 +
include/linux/soc/mediatek/mtk_wed.h | 83 +-
16 files changed, 2069 insertions(+), 388 deletions(-)
mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
diff --git a/arch/arm64/boot/dts/mediatek/mt7988.dtsi b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
index 561450e..8995ea3 100644
--- a/arch/arm64/boot/dts/mediatek/mt7988.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7988.dtsi
@@ -205,44 +205,49 @@
status = "disabled";
};
- wed: wed@15010000 {
- compatible = "mediatek,wed";
- wed_num = <3>;
- /* add this property for wed get the pci slot number. */
- pci_slot_map = <0>, <1>, <2>;
- reg = <0 0x15010000 0 0x2000>,
- <0 0x15012000 0 0x2000>,
- <0 0x15014000 0 0x2000>;
+ wed0: wed@15010000 {
+ compatible = "mediatek,mt7988-wed",
+ "syscon";
+ reg = <0 0x15010000 0 0x2000>;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- wed2: wed2@15012000 {
- compatible = "mediatek,wed2";
- wed_num = <3>;
- /* add this property for wed get the pci slot number. */
- reg = <0 0x15010000 0 0x2000>,
- <0 0x15012000 0 0x2000>,
- <0 0x15014000 0 0x2000>;
+ interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+ mediatek,wed_pcie = <&wed_pcie>;
+ mediatek,ap2woccif = <&ap2woccif0>;
+ mediatek,wocpu_ilm = <&wocpu0_ilm>;
+ mediatek,wocpu_dlm = <&wocpu0_dlm>;
+ mediatek,wocpu_boot = <&cpu0_boot>;
+ mediatek,wocpu_emi = <&wocpu0_emi>;
+ mediatek,wocpu_data = <&wocpu_data>;
+ };
+
+ wed1: wed@15012000 {
+ compatible = "mediatek,mt7988-wed",
+ "syscon";
+ reg = <0 0x15012000 0 0x2000>;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- wed3: wed3@15014000 {
- compatible = "mediatek,wed3";
- wed_num = <3>;
- /* add this property for wed get the pci slot number. */
- reg = <0 0x15010000 0 0x2000>,
- <0 0x15012000 0 0x2000>,
- <0 0x15014000 0 0x2000>;
+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+ mediatek,wed_pcie = <&wed_pcie>;
+ mediatek,ap2woccif = <&ap2woccif1>;
+ mediatek,wocpu_ilm = <&wocpu1_ilm>;
+ mediatek,wocpu_dlm = <&wocpu1_dlm>;
+ mediatek,wocpu_boot = <&cpu1_boot>;
+ mediatek,wocpu_emi = <&wocpu1_emi>;
+ mediatek,wocpu_data = <&wocpu_data>;
+ };
+
+ wed2: wed@15014000 {
+ compatible = "mediatek,mt7988-wed",
+ "syscon";
+ reg = <0 0x15014000 0 0x2000>;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
+ mediatek,wed_pcie = <&wed_pcie>;
+ mediatek,ap2woccif = <&ap2woccif2>;
+ mediatek,wocpu_ilm = <&wocpu2_ilm>;
+ mediatek,wocpu_dlm = <&wocpu2_dlm>;
+ mediatek,wocpu_boot = <&cpu2_boot>;
+ mediatek,wocpu_emi = <&wocpu2_emi>;
+ mediatek,wocpu_data = <&wocpu_data>;
};
wdma: wdma@15104800 {
@@ -252,15 +257,25 @@
<0 0x15105000 0 0x400>;
};
- ap2woccif: ap2woccif@151A5000 {
- compatible = "mediatek,ap2woccif";
- reg = <0 0x151A5000 0 0x1000>,
- <0 0x152A5000 0 0x1000>,
- <0 0x153A5000 0 0x1000>;
+ ap2woccif0: ap2woccif@151A5000 {
+ compatible = "mediatek,ap2woccif", "syscon";
+ reg = <0 0x151A5000 0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ ap2woccif1: ap2woccif@152A5000 {
+ compatible = "mediatek,ap2woccif", "syscon";
+ reg = <0 0x152A5000 0 0x1000>;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ ap2woccif2: ap2woccif@153A5000 {
+ compatible = "mediatek,ap2woccif", "syscon";
+ reg = <0 0x153A5000 0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
};
wocpu0_ilm: wocpu0_ilm@151E0000 {
@@ -268,31 +283,53 @@
reg = <0 0x151E0000 0 0x8000>;
};
- wocpu1_ilm: wocpu1_ilm@152E0000 {
- compatible = "mediatek,wocpu1_ilm";
+ wocpu1_ilm: wocpu_ilm@152E0000 {
+ compatible = "mediatek,wocpu_ilm";
reg = <0 0x152E0000 0 0x8000>;
};
- wocpu2_ilm: wocpu2_ilm@153E0000 {
- compatible = "mediatek,wocpu2_ilm";
- reg = <0 0x153E0000 0 0x8000>;
+ wocpu2_ilm: wocpu_ilm@153E0000 {
+ compatible = "mediatek,wocpu_ilm";
+ reg = <0 0x153E0000 0 0x8000>;
+ };
+
+ wocpu0_dlm: wocpu_dlm@151E8000 {
+ compatible = "mediatek,wocpu_dlm";
+ reg = <0 0x151E8000 0 0x2000>;
+
+ resets = <&ethsysrst 0>;
+ reset-names = "wocpu_rst";
+ };
+
+ wocpu1_dlm: wocpu_dlm@0x152E8000 {
+ compatible = "mediatek,wocpu_dlm";
+ reg = <0 0x152E8000 0 0x2000>;
+
+ resets = <&ethsysrst 0>;
+ reset-names = "wocpu_rst";
};
- wocpu_dlm: wocpu_dlm@151E8000 {
+ wocpu2_dlm: wocpu_dlm@0x153E8000 {
compatible = "mediatek,wocpu_dlm";
- reg = <0 0x151E8000 0 0x2000>,
- <0 0x152E8000 0 0x2000>,
- <0 0x153E8000 0 0x2000>;
+ reg = <0 0x153E8000 0 0x2000>;
resets = <&ethsysrst 0>;
reset-names = "wocpu_rst";
};
- cpu_boot: wocpu_boot@15194000 {
- compatible = "mediatek,wocpu_boot";
- reg = <0 0x15194000 0 0x1000>,
- <0 0x15294000 0 0x1000>,
- <0 0x15394000 0 0x1000>;
+ cpu0_boot: wocpu_boot@15194000 {
+ compatible = "mediatek,wocpu0_boot";
+ reg = <0 0x15194000 0 0x1000>;
+ };
+
+ cpu1_boot: wocpu_boot@15294000 {
+ compatible = "mediatek,wocpu1_boot";
+ reg = <0 0x15294000 0 0x1000>;
+ };
+
+ cpu2_boot: wocpu_boot@15394000 {
+ compatible = "mediatek,wocpu2_boot";
+ reg = <0 0x15394000 0 0x1000>;
};
reserved-memory {
@@ -902,6 +939,7 @@
<&topckgen CK_TOP_CB_SGM_325M>;
mediatek,ethsys = <&ethsys>;
mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+ mediatek,wed = <&wed0>, <&wed1>, <&wed2>;
mediatek,usxgmiisys = <&usxgmiisys0>, <&usxgmiisys1>;
mediatek,xfi_pextp = <&xfi_pextp0>, <&xfi_pextp1>;
mediatek,xfi_pll = <&xfi_pll>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
index 70a7554..bed27b4 100644
--- a/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7988a-dsa-10g-spim-nor.dts
@@ -369,9 +369,23 @@
status = "okay";
};
-&wed {
+&wed0 {
dy_txbm_enable = "true";
dy_txbm_budge = <8>;
txbm_init_sz = <10>;
status = "okay";
};
+
+&wed1 {
+ dy_txbm_enable = "true";
+ dy_txbm_budge = <8>;
+ txbm_init_sz = <10>;
+ status = "okay";
+};
+
+&wed2 {
+ dy_txbm_enable = "true";
+ dy_txbm_budge = <8>;
+ txbm_init_sz = <10>;
+ status = "okay";
+};
\ No newline at end of file
diff --git a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
index e8e3a69..5dd481b 100644
--- a/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7988d-dsa-10g-spim-nor.dts
@@ -379,9 +379,23 @@
status = "okay";
};
-&wed {
+&wed0 {
dy_txbm_enable = "true";
dy_txbm_budge = <8>;
txbm_init_sz = <10>;
status = "okay";
};
+
+&wed1 {
+ dy_txbm_enable = "true";
+ dy_txbm_budge = <8>;
+ txbm_init_sz = <10>;
+ status = "okay";
+};
+
+&wed2 {
+ dy_txbm_enable = "true";
+ dy_txbm_budge = <8>;
+ txbm_init_sz = <10>;
+ status = "okay";
+};
\ No newline at end of file
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 8bd526a..dea66d7 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -5095,7 +5095,8 @@ static int mtk_probe(struct platform_device *pdev)
"mediatek,wed", i);
static const u32 wdma_regs[] = {
MTK_WDMA0_BASE,
- MTK_WDMA1_BASE
+ MTK_WDMA1_BASE,
+ MTK_WDMA2_BASE
};
void __iomem *wdma;
u32 wdma_phy;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index ee89b4c..8656b5f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -613,9 +613,12 @@
#define RX_DMA_SPORT_MASK 0x7
#define RX_DMA_SPORT_MASK_V2 0xf
-#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#define MTK_WDMA0_BASE 0x4800
#define MTK_WDMA1_BASE 0x4c00
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+#define MTK_WDMA2_BASE 0x5000
+#endif
#else
#define MTK_WDMA0_BASE 0x2800
#define MTK_WDMA1_BASE 0x2c00
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
old mode 100755
new mode 100644
index 384e811..eda23c2
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -9,6 +9,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/dsa.h>
+#include <net/route.h>
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
@@ -396,7 +397,7 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
}
int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
- int bss, int wcid)
+ int bss, int wcid, bool amsdu_en)
{
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
u32 *ib2 = mtk_foe_entry_ib2(entry);
@@ -408,6 +409,9 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ l2->winfo_pao = FIELD_PREP(MTK_FOE_WINFO_PAO_AMSDU_EN, amsdu_en);
+#endif
#else
if (wdma_idx)
*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
@@ -443,6 +447,17 @@ int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp)
*ib2 &= ~MTK_FOE_IB2_DSCP;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_DSCP, dscp);
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+
+ if (*ib2 & MTK_FOE_IB2_WDMA_WINFO &&
+ l2->winfo_pao & MTK_FOE_WINFO_PAO_AMSDU_EN) {
+ u8 tid = rt_tos2priority(dscp) & 0xf;
+
+ l2->winfo_pao |= FIELD_PREP(MTK_FOE_WINFO_PAO_TID, tid);
+ }
+#endif
+
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 2a8b6ef..66c7f10 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -428,7 +428,7 @@ int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
- int bss, int wcid);
+ int bss, int wcid, bool amsdu_en);
int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
int mtk_foe_entry_set_dscp(struct mtk_foe_entry *entry, int dscp);
bool mtk_foe_entry_match(struct mtk_foe_entry *entry, struct mtk_foe_entry *data);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 95174b7..eab9e9d 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -112,6 +112,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
info->queue = path.mtk_wdma.queue;
info->bss = path.mtk_wdma.bss;
info->wcid = path.mtk_wdma.wcid;
+ info->amsdu_en = path.mtk_wdma.amsdu_en;
return 0;
}
@@ -193,13 +194,15 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
- info.wcid);
+ info.wcid, info.amsdu_en);
pse_port = PSE_PPE0_PORT;
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
if (info.wdma_idx == 0)
pse_port = PSE_WDMA0_PORT;
else if (info.wdma_idx == 1)
pse_port = PSE_WDMA1_PORT;
+ else if (info.wdma_idx == 2)
+ pse_port = PSE_WDMA2_PORT;
else
return -EOPNOTSUPP;
#endif
@@ -490,8 +493,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
if (err)
return err;
- if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
- return err;
+ /*if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
+ return err;*/
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
@@ -516,8 +519,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
mtk_foe_entry_clear(eth->ppe[ppe_index], entry);
free:
kfree(entry);
- if (wed_index >= 0)
- mtk_wed_flow_remove(wed_index);
+ /*if (wed_index >= 0)
+ mtk_wed_flow_remove(wed_index);*/
return err;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 3e760f7..7b2e199 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -28,7 +28,7 @@ struct wo_cmd_ring {
u32 cnt;
u32 unit;
};
-static struct mtk_wed_hw *hw_list[2];
+static struct mtk_wed_hw *hw_list[3];
static DEFINE_MUTEX(hw_lock);
static void
@@ -73,6 +73,26 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
return wdma_r32(dev, MTK_WDMA_GLO_CFG);
}
+static u32
+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ if (wed_r32(dev, reg) & mask)
+ return true;
+
+ return false;
+}
+
+static int
+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ int sleep = 1000;
+ int timeout = 100 * sleep;
+ u32 val;
+
+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+ timeout, false, dev, reg, mask);
+}
+
static int
mtk_wdma_rx_reset(struct mtk_wed_device *dev)
{
@@ -235,6 +255,8 @@ mtk_wed_assign(struct mtk_wed_device *dev)
continue;
hw->wed_dev = dev;
+ hw->pci_base = MTK_WED_PCIE_BASE;
+
return hw;
}
@@ -242,23 +264,84 @@ mtk_wed_assign(struct mtk_wed_device *dev)
}
static int
-mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+mtk_wed_pao_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_pao *pao;
+ int i, j;
+
+ pao = kzalloc(sizeof(struct mtk_wed_pao), GFP_KERNEL);
+ if (!pao)
+ return -ENOMEM;
+
+ dev->hw->wed_pao = pao;
+
+ for (i = 0; i < 32; i++) {
+ /* each segment is 64K*/
+ pao->hif_txd[i] = (char *)__get_free_pages(GFP_ATOMIC |
+ GFP_DMA32 |
+ __GFP_ZERO, 4);
+ if (!pao->hif_txd[i])
+ goto err;
+
+ pao->hif_txd_phys[i] = dma_map_single(dev->hw->dev,
+ pao->hif_txd[i],
+ 16 * PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->hw->dev,
+ pao->hif_txd_phys[i])))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[j],
+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+static int
+mtk_wed_pao_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ dma_unmap_single(dev->hw->dev, pao->hif_txd_phys[i],
+ 16 * PAGE_SIZE, DMA_TO_DEVICE);
+ free_pages((unsigned long)pao->hif_txd[i], 4);
+ }
+
+ return 0;
+}
+
+static int
+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_wdma_desc *desc;
+ void *desc_ptr;
dma_addr_t desc_phys;
- void **page_list;
+ struct dma_page_info *page_list;
u32 last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG1;
int token = dev->wlan.token_start;
- int ring_size, n_pages, page_idx;
- int i;
-
+ int ring_size, pkt_nums, n_pages, page_idx;
+ int i, ret = 0;
if (dev->ver == MTK_WED_V1) {
ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
- } else {
+ pkt_nums = ring_size;
+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
+ } else if (dev->hw->version == 2) {
ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
MTK_WED_WDMA_RING_SIZE * 2;
last_seg = MTK_WDMA_DESC_CTRL_LAST_SEG0;
+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_wdma_desc);
+ } else if (dev->hw->version == 3) {
+ ring_size = MTK_WED_TX_BM_DMA_SIZE;
+ pkt_nums = MTK_WED_TX_BM_PKT_CNT;
+ dev->tx_buf_ring.desc_size = sizeof(struct mtk_rxbm_desc);
}
n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
@@ -267,18 +350,20 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
if (!page_list)
return -ENOMEM;
- dev->buf_ring.size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
- dev->buf_ring.pages = page_list;
+ dev->tx_buf_ring.size = ring_size;
+ dev->tx_buf_ring.pages = page_list;
+ dev->tx_buf_ring.pkt_nums = pkt_nums;
- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
- &desc_phys, GFP_KERNEL);
- if (!desc)
+ desc_ptr = dma_alloc_coherent(dev->hw->dev,
+ ring_size * dev->tx_buf_ring.desc_size,
+ &desc_phys, GFP_KERNEL);
+ if (!desc_ptr)
return -ENOMEM;
- dev->buf_ring.desc = desc;
- dev->buf_ring.desc_phys = desc_phys;
+ dev->tx_buf_ring.desc = desc_ptr;
+ dev->tx_buf_ring.desc_phys = desc_phys;
- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
dma_addr_t page_phys, buf_phys;
struct page *page;
void *buf;
@@ -295,7 +380,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
return -ENOMEM;
}
- page_list[page_idx++] = page;
+ page_list[page_idx].addr = page;
+ page_list[page_idx].addr_phys = page_phys;
+ page_idx++;
+
dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -303,19 +391,23 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
buf_phys = page_phys;
for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
- u32 txd_size;
-
- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
-
+ desc = desc_ptr;
desc->buf0 = buf_phys;
- desc->buf1 = buf_phys + txd_size;
- desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
- txd_size) |
- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
- MTK_WED_BUF_SIZE - txd_size) |
- last_seg;
- desc->info = 0;
- desc++;
+ if (dev->hw->version < 3) {
+ u32 txd_size;
+
+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+ desc->buf1 = buf_phys + txd_size;
+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
+ txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+ last_seg;
+ desc->info = 0;
+ } else {
+ desc->ctrl = token << 16;
+ }
+ desc_ptr += dev->tx_buf_ring.desc_size;
buf += MTK_WED_BUF_SIZE;
buf_phys += MTK_WED_BUF_SIZE;
@@ -325,15 +417,18 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
DMA_BIDIRECTIONAL);
}
- return 0;
+ if (dev->hw->version == 3)
+ ret = mtk_wed_pao_buffer_alloc(dev);
+
+ return ret;
}
static void
-mtk_wed_free_buffer(struct mtk_wed_device *dev)
+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
{
- struct mtk_wdma_desc *desc = dev->buf_ring.desc;
- void **page_list = dev->buf_ring.pages;
- int ring_size, page_idx;
+ struct mtk_rxbm_desc *desc = dev->tx_buf_ring.desc;
+ struct dma_page_info *page_list = dev->tx_buf_ring.pages;
+ int ring_size, page_idx, pkt_nums;
int i;
if (!page_list)
@@ -342,33 +437,33 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
if (!desc)
goto free_pagelist;
- if (dev->ver == MTK_WED_V1) {
- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
- } else {
- ring_size = MTK_WED_VLD_GROUP_SIZE * MTK_WED_PER_GROUP_PKT +
- MTK_WED_WDMA_RING_SIZE * 2;
+ pkt_nums = ring_size = dev->tx_buf_ring.size;
+ if (dev->hw->version == 3) {
+ mtk_wed_pao_free_buffer(dev);
+ pkt_nums = dev->tx_buf_ring.pkt_nums;
}
- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
- void *page = page_list[page_idx++];
+ for (i = 0, page_idx = 0; i < pkt_nums; i += MTK_WED_BUF_PER_PAGE) {
+ void *page = page_list[page_idx].addr;
if (!page)
break;
- dma_unmap_page(dev->hw->dev, desc[i].buf0,
+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(page);
+ page_idx++;
}
- dma_free_coherent(dev->hw->dev, ring_size * sizeof(*desc),
- desc, dev->buf_ring.desc_phys);
+ dma_free_coherent(dev->hw->dev, ring_size * dev->tx_buf_ring.desc_size,
+ dev->tx_buf_ring.desc, dev->tx_buf_ring.desc_phys);
free_pagelist:
kfree(page_list);
}
static int
-mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_rxbm_desc *desc;
dma_addr_t desc_phys;
@@ -389,7 +484,7 @@ mtk_wed_rx_bm_alloc(struct mtk_wed_device *dev)
}
static void
-mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
{
struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
int ring_size = dev->rx_buf_ring.size;
@@ -403,6 +498,113 @@ mtk_wed_free_rx_bm(struct mtk_wed_device *dev)
desc, dev->rx_buf_ring.desc_phys);
}
+/* TODO */
+static int
+mtk_wed_rx_page_buffer_alloc(struct mtk_wed_device *dev)
+{
+ int ring_size = dev->wlan.rx_nbuf, buf_num = MTK_WED_RX_PG_BM_CNT;
+ struct mtk_rxbm_desc *desc;
+ dma_addr_t desc_phys;
+ struct dma_page_info *page_list;
+ int n_pages, page_idx;
+ int i;
+
+ n_pages = buf_num / MTK_WED_RX_PAGE_BUF_PER_PAGE;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ dev->rx_page_buf_ring.size = ring_size & ~(MTK_WED_BUF_PER_PAGE - 1);
+ dev->rx_page_buf_ring.pages = page_list;
+ dev->rx_page_buf_ring.pkt_nums = buf_num;
+
+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->rx_page_buf_ring.desc = desc;
+ dev->rx_page_buf_ring.desc_phys = desc_phys;
+
+ for (i = 0, page_idx = 0; i < buf_num; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ void *buf;
+ int s;
+
+ page = __dev_alloc_pages(GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+
+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ page_list[page_idx].addr= page;
+ page_list[page_idx].addr_phys= page_phys;
+ page_idx++;
+
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ buf = page_to_virt(page);
+ buf_phys = page_phys;
+
+ for (s = 0; s < MTK_WED_RX_PAGE_BUF_PER_PAGE; s++) {
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc++;
+
+ buf += MTK_WED_PAGE_BUF_SIZE;
+ buf_phys += MTK_WED_PAGE_BUF_SIZE;
+ }
+
+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static void
+mtk_wed_rx_page_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_rxbm_desc *desc = dev->rx_page_buf_ring.desc;
+ struct dma_page_info *page_list = dev->rx_page_buf_ring.pages;
+ int ring_size, page_idx;
+ int i;
+
+ if (!page_list)
+ return;
+
+ if (!desc)
+ goto free_pagelist;
+
+ ring_size = dev->rx_page_buf_ring.pkt_nums;
+
+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_RX_PAGE_BUF_PER_PAGE) {
+ void *page = page_list[page_idx].addr;
+
+ if (!page)
+ break;
+
+ dma_unmap_page(dev->hw->dev, page_list[page_idx].addr_phys,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(page);
+ page_idx++;
+ }
+
+ dma_free_coherent(dev->hw->dev, dev->rx_page_buf_ring.size * sizeof(*desc),
+ desc, dev->rx_page_buf_ring.desc_phys);
+
+free_pagelist:
+ kfree(page_list);
+}
+
static void
mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int scale)
{
@@ -416,19 +618,35 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int sca
static void
mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
{
- int i;
+ int i, scale = dev->hw->version > 1 ? 2 : 1;
for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
- mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
+ if ((dev->tx_ring[i].flags & MTK_WED_RING_CONFIGURED))
+ mtk_wed_free_ring(dev, &dev->tx_ring[i], 1);
+
for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
- mtk_wed_free_ring(dev, &dev->tx_wdma[i], dev->ver);
+ if ((dev->tx_wdma[i].flags & MTK_WED_RING_CONFIGURED))
+ mtk_wed_free_ring(dev, &dev->tx_wdma[i], scale);
}
static void
mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
{
- mtk_wed_free_rx_bm(dev);
+ int i, scale = dev->hw->version > 1 ? 2 : 1;
+
+ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++)
+ if ((dev->rx_ring[i].flags & MTK_WED_RING_CONFIGURED))
+ mtk_wed_free_ring(dev, &dev->rx_ring[i], 1);
+
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
+ if ((dev->rx_wdma[i].flags & MTK_WED_RING_CONFIGURED))
+ mtk_wed_free_ring(dev, &dev->rx_wdma[i], scale);
+
+ mtk_wed_free_rx_buffer(dev);
mtk_wed_free_ring(dev, &dev->rro.rro_ring, 1);
+
+ if (dev->wlan.hwrro)
+ mtk_wed_rx_page_free_buffer(dev);
}
static void
@@ -437,7 +655,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
u32 wdma_mask;
wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
- if (dev->ver > MTK_WED_V1)
+ if (mtk_wed_get_rx_capa(dev))
wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
GENMASK(1, 0));
/* wed control cr set */
@@ -447,7 +665,7 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- if (dev->ver == MTK_WED_V1) {
+ if (dev->hw->version == 1) {
wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
MTK_WED_PCIE_INT_TRIGGER_STATUS);
@@ -458,6 +676,8 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
} else {
+ if (dev->hw->version == 3)
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
@@ -475,18 +695,20 @@ mtk_wed_set_int(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
- dev->wlan.rx_tbit[0]) |
- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
- dev->wlan.rx_tbit[1]));
+ if (mtk_wed_get_rx_capa(dev))
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+ dev->wlan.rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+ dev->wlan.rx_tbit[1]));
}
+
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
- if (dev->ver == MTK_WED_V1) {
+ if (dev->hw->version == 1) {
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
@@ -506,6 +728,21 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
{
u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ switch (dev->hw->version) {
+ case 1:
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ break;
+ case 2 :
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH2 |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH2 |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+ break;
+ case 3:
+ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
+ break;
+ }
+
if (!dev->hw->num_flows)
mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
@@ -514,31 +751,86 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
}
static void
-mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
+mtk_wed_pao_init(struct mtk_wed_device *dev)
{
- if (en) {
- wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
- wed_w32(dev, MTK_WED_TXP_DW1,
- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
- } else {
- wed_w32(dev, MTK_WED_TXP_DW1,
- FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
- wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ struct mtk_wed_pao *pao = dev->hw->wed_pao;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ wed_w32(dev, MTK_WED_PAO_HIFTXD_BASE_L(i),
+ pao->hif_txd_phys[i]);
+
+ /* init all sta parameter */
+ wed_w32(dev, MTK_WED_PAO_STA_INFO_INIT, MTK_WED_PAO_STA_RMVL |
+ MTK_WED_PAO_STA_WTBL_HDRT_MODE |
+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_LEN,
+ dev->wlan.max_amsdu_len >> 8) |
+ FIELD_PREP(MTK_WED_PAO_STA_MAX_AMSDU_NUM,
+ dev->wlan.max_amsdu_nums));
+
+ wed_w32(dev, MTK_WED_PAO_STA_INFO, MTK_WED_PAO_STA_INFO_DO_INIT);
+
+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_STA_INFO,
+ MTK_WED_PAO_STA_INFO_DO_INIT)) {
+ dev_err(dev->hw->dev, "mtk_wed%d: pao init failed!\n",
+ dev->hw->index);
+ return;
}
+
+ /* init pao txd src */
+ wed_set(dev, MTK_WED_PAO_HIFTXD_CFG,
+ FIELD_PREP(MTK_WED_PAO_HIFTXD_SRC, dev->hw->index));
+
+ /* init qmem */
+ wed_set(dev, MTK_WED_PAO_PSE, MTK_WED_PAO_PSE_RESET);
+ if (mtk_wed_poll_busy(dev, MTK_WED_PAO_MON_QMEM_STS1, BIT(29))) {
+ pr_info("%s: init pao qmem fail\n", __func__);
+ return;
+ }
+
+ /* eagle E1 PCIE1 tx ring 22 flow control issue */
+ if (dev->wlan.chip_id == 0x7991) {
+ wed_clr(dev, MTK_WED_PAO_AMSDU_FIFO,
+ MTK_WED_PAO_AMSDU_IS_PRIOR0_RING);
+ }
+
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
+
+ return;
}
-static void
-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
+static int
+mtk_wed_hwrro_init(struct mtk_wed_device *dev)
{
-#define MTK_WFMDA_RX_DMA_EN BIT(2)
+ if (!mtk_wed_get_rx_capa(dev))
+ return 0;
+ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
+ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
+
+ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE,
+ dev->rx_page_buf_ring.desc_phys);
+
+ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
+ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
+ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
+ MTK_WED_RX_PG_BM_CNT));
+
+ /* enable rx_page_bm to fetch dmad */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
+
+ return 0;
+}
+
+static int
+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
+ struct mtk_wed_ring *ring)
+{
int timeout = 3;
- u32 cur_idx, regs;
+ u32 cur_idx;
do {
- regs = MTK_WED_WPDMA_RING_RX_DATA(idx) +
- MTK_WED_RING_OFS_CPU_IDX;
- cur_idx = wed_r32(dev, regs);
+ cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
break;
@@ -546,70 +838,133 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
timeout--;
} while (timeout > 0);
- if (timeout) {
- unsigned int val;
+ return timeout;
+}
- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
- dev->wlan.phy_base);
- val |= MTK_WFMDA_RX_DMA_EN;
- wifi_w32(dev, dev->wlan.wpdma_rx_glo -
- dev->wlan.phy_base, val);
+static void
+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool en)
+{
+ if (en) {
+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
} else {
- dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
- dev->hw->index, idx);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
}
}
static void
mtk_wed_dma_enable(struct mtk_wed_device *dev)
{
- wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
- MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+#define MTK_WFMDA_RX_DMA_EN BIT(2)
+
+ if (dev->hw->version == 1)
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
wed_set(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_TX_DMA_EN |
MTK_WED_GLO_CFG_RX_DMA_EN);
+
+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
+
+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
+
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
wdma_set(dev, MTK_WDMA_GLO_CFG,
- MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_TX_DMA_EN /*|
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES*/);
- if (dev->ver == MTK_WED_V1) {
+ if (dev->hw->version == 1) {
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
int idx = 0;
- wed_set(dev, MTK_WED_WPDMA_CTRL,
- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
-
- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+ if (mtk_wed_get_rx_capa(dev))
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+ if (dev->hw->version == 3) {
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
+
+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
+ //wdma_w32(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+ if (mtk_wed_get_rx_capa(dev)) {
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
+
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
+
+ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
+
+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ }
+ }
+
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+ if (!mtk_wed_get_rx_capa(dev))
+ return;
+
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_EN |
FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
0x2));
- for (idx = 0; idx < dev->hw->ring_num; idx++)
- mtk_wed_check_wfdma_rx_fill(dev, idx);
+ for (idx = 0; idx < dev->hw->ring_num; idx++) {
+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
+
+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue;
+
+ if(mtk_wed_check_wfdma_rx_fill(dev, ring)) {
+ unsigned int val;
+
+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo -
+ dev->wlan.phy_base);
+ val |= MTK_WFMDA_RX_DMA_EN;
+
+ wifi_w32(dev, dev->wlan.wpdma_rx_glo -
+ dev->wlan.phy_base, val);
+
+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable successful!\n",
+ dev->hw->index, idx);
+ } else {
+ dev_err(dev->hw->dev, "mtk_wed%d: rx(%d) dma enable failed!\n",
+ dev->hw->index, idx);
+ }
+ }
}
}
@@ -644,15 +999,20 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
MTK_WED_WPDMA_RX_D_RX_DRV_EN);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
- }
- mtk_wed_set_512_support(dev, false);
+ if (dev->hw->version == 3 && mtk_wed_get_rx_capa(dev)) {
+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
+ MTK_WDMA_PREF_TX_CFG_PREF_EN);
+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
+ MTK_WDMA_PREF_RX_CFG_PREF_EN);
+ }
+ }
}
static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
- if (dev->ver > MTK_WED_V1) {
+ if (mtk_wed_get_rx_capa(dev)) {
wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
}
@@ -677,13 +1037,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev)
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- if (dev->hw->ver == 1)
+ if (dev->hw->version == 1)
return;
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_ROUTE_QM_EN |
MTK_WED_CTRL_WED_RX_BM_EN |
MTK_WED_CTRL_RX_RRO_QM_EN);
+
+ if (dev->hw->version == 3) {
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_PAO_EN);
+ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_PAO);
+ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
+ }
}
static void
@@ -702,9 +1070,9 @@ mtk_wed_detach(struct mtk_wed_device *dev)
mtk_wdma_tx_reset(dev);
- mtk_wed_free_buffer(dev);
+ mtk_wed_free_tx_buffer(dev);
mtk_wed_free_tx_rings(dev);
- if (dev->ver > MTK_WED_V1) {
+ if (mtk_wed_get_rx_capa(dev)) {
mtk_wed_wo_reset(dev);
mtk_wed_free_rx_rings(dev);
mtk_wed_wo_exit(hw);
@@ -731,24 +1099,29 @@ mtk_wed_detach(struct mtk_wed_device *dev)
static void
mtk_wed_bus_init(struct mtk_wed_device *dev)
{
-#define PCIE_BASE_ADDR0 0x11280000
+ switch (dev->wlan.bus_type) {
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+ struct regmap *regs;
- if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
- struct device_node *node;
- void __iomem * base_addr;
- u32 value = 0;
+ if (dev->hw->version == 2) {
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
+ if (IS_ERR(regs))
+ break;
- node = of_parse_phandle(dev->hw->node, "mediatek,wed_pcie", 0);
- if (!node) {
- pr_err("%s: no wed_pcie node\n", __func__);
- return;
+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
}
- base_addr = of_iomap(node, 0);
-
- value = readl(base_addr);
- value |= BIT(0);
- writel(value, base_addr);
+ if (dev->wlan.msi) {
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base| 0xc08);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0xc04);
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
+ } else {
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pci_base | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pci_base | 0x184);
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ }
wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
@@ -756,45 +1129,53 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
/* pcie interrupt control: pola/source selection */
wed_set(dev, MTK_WED_PCIE_INT_CTRL,
MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
-
- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, dev->hw->index));
- value = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
- value = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
-
- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
-
- /* pola setting */
- value = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
- } else if (dev->wlan.bus_type == MTK_WED_BUS_AXI) {
+ break;
+ }
+ case MTK_WED_BUS_AXI:
wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
+ break;
+ default:
+ break;
}
+
return;
}
static void
mtk_wed_set_wpdma(struct mtk_wed_device *dev)
{
- if (dev->ver > MTK_WED_V1) {
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ } else {
+ mtk_wed_bus_init(dev);
+
wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
- } else {
- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ if (mtk_wed_get_rx_capa(dev)) {
+ int i;
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+ wed_w32(dev, MTK_WED_WPDMA_RX_RING0, dev->wlan.wpdma_rx[0]);
+ if (dev->wlan.wpdma_rx[1])
+ wed_w32(dev, MTK_WED_WPDMA_RX_RING1, dev->wlan.wpdma_rx[1]);
+
+ if (dev->wlan.hwrro) {
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
+ dev->wlan.wpdma_rx_pg + i * 0x10);
+ }
+ }
+ }
}
}
@@ -806,21 +1187,25 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
mtk_wed_deinit(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
- if (dev->ver > MTK_WED_V1)
- mtk_wed_bus_init(dev);
-
mtk_wed_set_wpdma(dev);
- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ if (dev->hw->version == 3) {
+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
+ } else {
+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ }
+
wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
- if (dev->ver == MTK_WED_V1) {
+ if (dev->hw->version == 1) {
u32 offset;
+
offset = dev->hw->index ? 0x04000400 : 0;
wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
@@ -907,11 +1292,16 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
} while (1);
/* configure RX_ROUTE_QM */
- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ if (dev->hw->version == 2) {
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ } else {
+ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
+ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 0x3 + dev->hw->index));
+ }
/* enable RX_ROUTE_QM */
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
@@ -920,23 +1310,45 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
static void
mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
{
- int size = dev->buf_ring.size;
+ int size = dev->wlan.nbuf;
int rev_size = MTK_WED_TX_RING_SIZE / 2;
- int thr = 1;
+ int thr_lo = 1, thr_hi = 1;
- if (dev->ver > MTK_WED_V1) {
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
+ } else {
size = MTK_WED_WDMA_RING_SIZE * ARRAY_SIZE(dev->tx_wdma) +
- dev->buf_ring.size;
+ dev->tx_buf_ring.size;
rev_size = size;
- thr = 0;
+ thr_lo = 0;
+ thr_hi = MTK_WED_TX_BM_DYN_THR_HI;
+
+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
+ size / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+ size / 128));
+
+ /* return SKBID + SDP back to bm */
+ if (dev->ver == 3) {
+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
+ size = dev->wlan.nbuf;
+ rev_size = size;
+ } else {
+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+ MTK_WED_TX_TKID_DYN_THR_HI);
+ }
}
- wed_w32(dev, MTK_WED_TX_BM_CTRL,
- MTK_WED_TX_BM_CTRL_PAUSE |
- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, size / 128) |
- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, rev_size / 128));
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
- wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
wed_w32(dev, MTK_WED_TX_BM_TKID,
FIELD_PREP(MTK_WED_TX_BM_TKID_START,
@@ -946,25 +1358,44 @@ mtk_wed_tx_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
- wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
- FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr) |
- MTK_WED_TX_BM_DYN_THR_HI);
+ if (dev->hw->version < 3)
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_lo) |
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, thr_hi));
+ else {
+ /* change to new bm */
+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_LEGACY_EN);
+ }
- if (dev->ver > MTK_WED_V1) {
+ if (dev->hw->version != 1) {
wed_w32(dev, MTK_WED_TX_TKID_CTRL,
MTK_WED_TX_TKID_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
- dev->buf_ring.size / 128) |
+ size / 128) |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
- dev->buf_ring.size / 128));
- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
- MTK_WED_TX_TKID_DYN_THR_HI);
+ size / 128));
+
+ /* return SKBID + SDP back to bm */
+ if (dev->ver == 3)
+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
+ else
+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+ MTK_WED_TX_TKID_DYN_THR_HI);
}
- mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start + dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, dev->tx_buf_ring.pkt_nums |
+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
- if (dev->ver > MTK_WED_V1)
+ if (dev->hw->version != 1)
wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
}
@@ -977,7 +1408,26 @@ mtk_wed_rx_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+ /* reset prefetch index of ring */
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+
+ /* reset prefetch FIFO of ring */
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
+
mtk_wed_rx_bm_hw_init(dev);
+ if (dev->wlan.hwrro)
+ mtk_wed_hwrro_init(dev);
mtk_wed_rro_hw_init(dev);
mtk_wed_route_qm_hw_init(dev);
}
@@ -991,7 +1441,7 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
dev->init_done = true;
mtk_wed_set_ext_int(dev, false);
mtk_wed_tx_hw_init(dev);
- if (dev->ver > MTK_WED_V1)
+ if (mtk_wed_get_rx_capa(dev))
mtk_wed_rx_hw_init(dev);
}
@@ -1015,26 +1465,6 @@ mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size, int scale, bool tx)
}
}
-static u32
-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
-{
- if (wed_r32(dev, reg) & mask)
- return true;
-
- return false;
-}
-
-static int
-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
-{
- int sleep = 1000;
- int timeout = 100 * sleep;
- u32 val;
-
- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
- timeout, false, dev, reg, mask);
-}
-
static void
mtk_wed_rx_reset(struct mtk_wed_device *dev)
{
@@ -1133,7 +1563,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
mtk_wed_ring_reset(desc, MTK_WED_RX_RING_SIZE, 1, false);
}
- mtk_wed_free_rx_bm(dev);
+ mtk_wed_free_rx_buffer(dev);
}
@@ -1271,12 +1701,15 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev,
int idx, int size, bool reset)
{
struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
+ int scale = dev->hw->version > 1 ? 2 : 1;
if(!reset)
if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
- dev->ver, true))
+ scale, true))
return -ENOMEM;
+ wdma->flags |= MTK_WED_RING_CONFIGURED;
+
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
@@ -1296,12 +1729,33 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
int idx, int size, bool reset)
{
struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
+ int scale = dev->hw->version > 1 ? 2 : 1;
if (!reset)
if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
- dev->ver, true))
+ scale, true))
return -ENOMEM;
+ if (dev->hw->version == 3) {
+ struct mtk_wdma_desc *desc = wdma->desc;
+ int i;
+
+ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
+ desc->buf0 = 0;
+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
+ desc->buf1 = 0;
+ desc->info = MTK_WDMA_TXD0_DESC_INFO_DMA_DONE;
+ desc++;
+ desc->buf0 = 0;
+ desc->ctrl = MTK_WDMA_DESC_CTRL_DMA_DONE;
+ desc->buf1 = 0;
+ desc->info = MTK_WDMA_TXD1_DESC_INFO_DMA_DONE;
+ desc++;
+ }
+ }
+
+ wdma->flags |= MTK_WED_RING_CONFIGURED;
+
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
@@ -1312,7 +1766,7 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev,
MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
if (reset)
mtk_wed_ring_reset(wdma->desc, MTK_WED_WDMA_RING_SIZE,
- dev->ver, true);
+ scale, true);
if (idx == 0) {
wed_w32(dev, MTK_WED_WDMA_RING_TX
+ MTK_WED_RING_OFS_BASE, wdma->desc_phys);
@@ -1395,7 +1849,7 @@ mtk_wed_send_msg(struct mtk_wed_device *dev, int cmd_id, void *data, int len)
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
- if (dev->ver == MTK_WED_V1)
+ if (!mtk_wed_get_rx_capa(dev))
return 0;
return mtk_wed_mcu_send_msg(wo, MODULE_ID_WO, cmd_id, data, len, true);
@@ -1420,24 +1874,106 @@ mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
}
}
+static void
+mtk_wed_start_hwrro(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ int idx, ret;
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hwrro)
+ return;
+
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
+ dev->wlan.rro_rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
+ dev->wlan.rro_rx_tbit[1]));
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[1])|
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[2]));
+
+ /*
+ * RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
+ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
+ */
+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN);
+
+ for (idx = 0; idx < MTK_WED_RX_QUEUES; idx++) {
+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
+
+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue;
+
+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
+ if (!ret)
+ dev_err(dev->hw->dev, "mtk_wed%d: rx_rro_ring(%d) init failed!\n",
+ dev->hw->index, idx);
+ }
+
+ for (idx = 0; idx < MTK_WED_RX_PAGE_QUEUES; idx++){
+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
+ if(!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue;
+
+ ret = mtk_wed_check_wfdma_rx_fill(dev, ring);
+ if (!ret)
+ dev_err(dev->hw->dev, "mtk_wed%d: rx_page_ring(%d) init failed!\n",
+ dev->hw->index, idx);
+ }
+}
+
static void
mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
{
int i, ret;
- if (dev->ver > MTK_WED_V1)
- ret = mtk_wed_rx_bm_alloc(dev);
+ if (mtk_wed_get_rx_capa(dev)) {
+ ret = mtk_wed_rx_buffer_alloc(dev);
+ if (ret)
+ return;
+
+ if (dev->wlan.hwrro)
+ mtk_wed_rx_page_buffer_alloc(dev);
+ }
for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
if (!dev->tx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
+ for (i = 0; i < ARRAY_SIZE(dev->rx_page_ring); i++) {
+ u32 count = MTK_WED_RRO_MSDU_PG_CTRL0(i) +
+ MTK_WED_RING_OFS_COUNT;
+
+ if (!wed_r32(dev, count))
+ wed_w32(dev, count, 1);
+ }
+
mtk_wed_hw_init(dev);
mtk_wed_set_int(dev, irq_mask);
mtk_wed_set_ext_int(dev, true);
- if (dev->ver == MTK_WED_V1) {
+ if (dev->hw->version == 1) {
u32 val;
val = dev->wlan.wpdma_phys |
@@ -1448,33 +1984,52 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
val |= BIT(1);
val |= BIT(0);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
- } else {
+ } else if (mtk_wed_get_rx_capa(dev)) {
/* driver set mid ready and only once */
wed_w32(dev, MTK_WED_EXT_INT_MASK1,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
wed_w32(dev, MTK_WED_EXT_INT_MASK2,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+ if (dev->hw->version == 3)
+ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
wed_r32(dev, MTK_WED_EXT_INT_MASK1);
wed_r32(dev, MTK_WED_EXT_INT_MASK2);
+ if (dev->hw->version == 3)
+ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
ret = mtk_wed_rro_cfg(dev);
if (ret)
return;
}
- mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+
+ if (dev->hw->version == 2)
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+ else if (dev->hw->version == 3)
+ mtk_wed_pao_init(dev);
mtk_wed_dma_enable(dev);
dev->running = true;
}
+static int
+mtk_wed_get_pci_base(struct mtk_wed_device *dev)
+{
+ if (dev->hw->index == 0)
+ return MTK_WED_PCIE_BASE0;
+ else if (dev->hw->index == 1)
+ return MTK_WED_PCIE_BASE1;
+ else
+ return MTK_WED_PCIE_BASE2;
+}
+
static int
mtk_wed_attach(struct mtk_wed_device *dev)
__releases(RCU)
{
struct mtk_wed_hw *hw;
struct device *device;
- u16 ver;
int ret = 0;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
@@ -1494,34 +2049,30 @@ mtk_wed_attach(struct mtk_wed_device *dev)
goto out;
}
- device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
- ? &dev->wlan.pci_dev->dev
- : &dev->wlan.platform_dev->dev;
+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE ?
+ &dev->wlan.pci_dev->dev
+ : &dev->wlan.platform_dev->dev;
dev_info(device, "attaching wed device %d version %d\n",
- hw->index, hw->ver);
+ hw->index, hw->version);
dev->hw = hw;
dev->dev = hw->dev;
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
+ dev->ver = hw->version;
+
+ if (dev->hw->version == 3)
+ dev->hw->pci_base = mtk_wed_get_pci_base(dev);
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
mtk_eth_set_dma_device(hw->eth, hw->dev);
- dev->ver = FIELD_GET(MTK_WED_REV_ID_MAJOR,
- wed_r32(dev, MTK_WED_REV_ID));
- if (dev->ver > MTK_WED_V1)
- ver = FIELD_GET(MTK_WED_REV_ID_MINOR,
- wed_r32(dev, MTK_WED_REV_ID));
-
- dev->rev_id = ((dev->ver << 28) | ver << 16);
-
- ret = mtk_wed_buffer_alloc(dev);
+ ret = mtk_wed_tx_buffer_alloc(dev);
if (ret)
goto error;
- if (dev->ver > MTK_WED_V1) {
+ if (mtk_wed_get_rx_capa(dev)) {
ret = mtk_wed_rro_alloc(dev);
if (ret)
goto error;
@@ -1533,15 +2084,20 @@ mtk_wed_attach(struct mtk_wed_device *dev)
init_completion(&dev->wlan_reset_done);
atomic_set(&dev->fe_reset, 0);
- if (dev->ver == MTK_WED_V1)
+ if (dev->hw->version != 1)
+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
+ else
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
BIT(hw->index), 0);
- else
+
+ if (mtk_wed_get_rx_capa(dev))
ret = mtk_wed_wo_init(hw);
error:
- if (ret)
+ if (ret) {
+ pr_info("%s: detach wed\n", __func__);
mtk_wed_detach(dev);
+ }
out:
mutex_unlock(&hw_lock);
@@ -1576,8 +2132,26 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx,
if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset))
return -ENOMEM;
+ if (dev->hw->version == 3 && idx == 1) {
+ /* reset prefetch index */
+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
+
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
+
+ /* reset prefetch FIFO */
+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
+ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
+ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
+ }
+
ring->reg_base = MTK_WED_RING_TX(idx);
ring->wpdma = regs;
+ ring->flags |= MTK_WED_RING_CONFIGURED;
/* WED -> WPDMA */
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
@@ -1599,7 +2173,7 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
struct mtk_wed_ring *ring = &dev->txfree_ring;
int i, idx = 1;
- if(dev->ver > MTK_WED_V1)
+ if(dev->hw->version > 1)
idx = 0;
/*
@@ -1638,6 +2212,7 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
ring->reg_base = MTK_WED_RING_RX_DATA(idx);
ring->wpdma = regs;
+ ring->flags |= MTK_WED_RING_CONFIGURED;
dev->hw->ring_num = idx + 1;
/* WPDMA -> WED */
@@ -1652,6 +2227,129 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev,
return 0;
}
+static int
+mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
+
+ ring->wpdma = regs;
+
+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
+ readl(regs));
+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+
+ return 0;
+}
+
+static int
+mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
+
+ ring->wpdma = regs;
+
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
+ readl(regs));
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+
+ return 0;
+}
+
+static int
+mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
+ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
+ int i = 0, cnt = 0;
+
+ ring->wpdma = regs;
+
+ if (readl(regs) & 0xf)
+ pr_info("%s(): address is not 16-byte alignment\n", __func__);
+
+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
+ readl(regs) & 0xfffffff0);
+
+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+
+ /* ack sn cr */
+ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
+ dev->wlan.ind_cmd.ack_sn_addr);
+ wed_w32(dev, MTK_WED_RRO_CFG1,
+ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
+ dev->wlan.ind_cmd.win_size) |
+ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
+ dev->wlan.ind_cmd.particular_sid));
+
+ /* particular session addr element */
+ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, dev->wlan.ind_cmd.particular_se_phys);
+
+ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
+ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
+ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
+ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
+ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
+
+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
+ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) &&
+ cnt < 100) {
+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
+ cnt++;
+ }
+ if (cnt >= 100) {
+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
+ dev->hw->index);
+ }
+ /*if (mtk_wed_poll_busy(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
+ MTK_WED_ADDR_ELEM_TBL_WR_RDY)) {
+ dev_err(dev->hw->dev, "mtk_wed%d: write ba session base failed!\n",
+ dev->hw->index);
+ return -1;
+ }*/
+ }
+
+ /* pn check init */
+ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
+ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
+ MTK_WED_PN_CHECK_IS_FIRST);
+
+ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
+ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
+
+ cnt = 0;
+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
+ while (!(val & MTK_WED_PN_CHECK_WR_RDY) &&
+ cnt < 100) {
+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
+ cnt++;
+ }
+ if (cnt >= 100) {
+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
+ dev->hw->index, i);
+ }
+ /*if (mtk_wed_poll_busy(dev, MTK_WED_PN_CHECK_CFG,
+ MTK_WED_PN_CHECK_WR_RDY)) {
+ dev_err(dev->hw->dev, "mtk_wed%d: session(%d) init failed!\n",
+ dev->hw->index, i);
+ //return -1;
+ }*/
+ }
+
+ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
+
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
+
+ return 0;
+}
+
+
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
@@ -1659,9 +2357,13 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
- val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
- if (!dev->hw->num_flows)
- val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ if (dev->hw->version == 3) {
+ val &= MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT;
+ } else {
+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ if (!dev->hw->num_flows)
+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ }
if (val && net_ratelimit())
pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
@@ -1754,6 +2456,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
.tx_ring_setup = mtk_wed_tx_ring_setup,
.txfree_ring_setup = mtk_wed_txfree_ring_setup,
.rx_ring_setup = mtk_wed_rx_ring_setup,
+ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
+ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
+ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
.msg_update = mtk_wed_send_msg,
.start = mtk_wed_start,
.stop = mtk_wed_stop,
@@ -1765,6 +2470,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
.detach = mtk_wed_detach,
.setup_tc = mtk_wed_eth_setup_tc,
.ppe_check = mtk_wed_ppe_check,
+ .start_hwrro = mtk_wed_start_hwrro,
};
struct device_node *eth_np = eth->dev->of_node;
struct platform_device *pdev;
@@ -1804,9 +2510,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw->wdma_phy = wdma_phy;
hw->index = index;
hw->irq = irq;
- hw->ver = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) ?
+ 3 : MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (hw->version == 1) {
hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
"mediatek,pcie-mirror");
hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
@@ -1821,7 +2528,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
regmap_write(hw->mirror, 0, 0);
regmap_write(hw->mirror, 4, 0);
}
- hw->ver = MTK_WED_V1;
}
mtk_wed_hw_add_debugfs(hw);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index 490873c..fcf7bd0 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -10,10 +10,13 @@
#include <linux/netdevice.h>
#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
-#define MTK_WED_PKT_SIZE 1900
+#define MTK_WED_PKT_SIZE 1920//1900
#define MTK_WED_BUF_SIZE 2048
+#define MTK_WED_PAGE_BUF_SIZE 128
#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
#define MTK_WED_RX_RING_SIZE 1536
+#define MTK_WED_RX_PG_BM_CNT 8192
#define MTK_WED_TX_RING_SIZE 2048
#define MTK_WED_WDMA_RING_SIZE 512
@@ -27,6 +30,9 @@
#define MTK_WED_RRO_QUE_CNT 8192
#define MTK_WED_MIOD_ENTRY_CNT 128
+#define MTK_WED_TX_BM_DMA_SIZE 65536
+#define MTK_WED_TX_BM_PKT_CNT 32768
+
#define MODULE_ID_WO 1
struct mtk_eth;
@@ -43,6 +49,8 @@ struct mtk_wed_hw {
struct dentry *debugfs_dir;
struct mtk_wed_device *wed_dev;
struct mtk_wed_wo *wed_wo;
+ struct mtk_wed_pao *wed_pao;
+ u32 pci_base;
u32 debugfs_reg;
u32 num_flows;
u32 wdma_phy;
@@ -50,7 +58,8 @@ struct mtk_wed_hw {
int ring_num;
int irq;
int index;
- u32 ver;
+ int token_id;
+ u32 version;
};
struct mtk_wdma_info {
@@ -58,6 +67,18 @@ struct mtk_wdma_info {
u8 queue;
u16 wcid;
u8 bss;
+ u32 usr_info;
+ u8 tid;
+ u8 is_fixedrate;
+ u8 is_prior;
+ u8 is_sp;
+ u8 hf;
+ u8 amsdu_en;
+};
+
+struct mtk_wed_pao {
+ char *hif_txd[32];
+ dma_addr_t hif_txd_phys[32];
};
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
index 4a9e684..51e3d7c 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -11,9 +11,11 @@ struct reg_dump {
u16 offset;
u8 type;
u8 base;
+ u32 mask;
};
enum {
+ DUMP_TYPE_END,
DUMP_TYPE_STRING,
DUMP_TYPE_WED,
DUMP_TYPE_WDMA,
@@ -23,8 +25,11 @@ enum {
DUMP_TYPE_WED_RRO,
};
+#define DUMP_END() { .type = DUMP_TYPE_END }
#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
+#define DUMP_REG_MASK(_reg, _mask) { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
+
#define DUMP_RING(_prefix, _base, ...) \
{ _prefix " BASE", _base, __VA_ARGS__ }, \
{ _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
@@ -32,6 +37,7 @@ enum {
{ _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
+#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
@@ -52,36 +58,49 @@ print_reg_val(struct seq_file *s, const char *name, u32 val)
static void
dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
- const struct reg_dump *regs, int n_regs)
+ const struct reg_dump **regs)
{
- const struct reg_dump *cur;
+ const struct reg_dump **cur_o = regs, *cur;
+ bool newline = false;
u32 val;
- for (cur = regs; cur < &regs[n_regs]; cur++) {
- switch (cur->type) {
- case DUMP_TYPE_STRING:
- seq_printf(s, "%s======== %s:\n",
- cur > regs ? "\n" : "",
- cur->name);
- continue;
- case DUMP_TYPE_WED:
- case DUMP_TYPE_WED_RRO:
- val = wed_r32(dev, cur->offset);
- break;
- case DUMP_TYPE_WDMA:
- val = wdma_r32(dev, cur->offset);
- break;
- case DUMP_TYPE_WPDMA_TX:
- val = wpdma_tx_r32(dev, cur->base, cur->offset);
- break;
- case DUMP_TYPE_WPDMA_TXFREE:
- val = wpdma_txfree_r32(dev, cur->offset);
- break;
- case DUMP_TYPE_WPDMA_RX:
- val = wpdma_rx_r32(dev, cur->base, cur->offset);
- break;
+ while (*cur_o) {
+ cur = *cur_o;
+
+ while (cur->type != DUMP_TYPE_END) {
+ switch (cur->type) {
+ case DUMP_TYPE_STRING:
+ seq_printf(s, "%s======== %s:\n",
+ newline ? "\n" : "",
+ cur->name);
+ newline = true;
+ cur++;
+ continue;
+ case DUMP_TYPE_WED:
+ case DUMP_TYPE_WED_RRO:
+ val = wed_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WDMA:
+ val = wdma_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TX:
+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TXFREE:
+ val = wpdma_txfree_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_RX:
+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
+ break;
+ }
+
+ if (cur->mask)
+ val = (cur->mask & val) >> (ffs(cur->mask) - 1);
+
+ print_reg_val(s, cur->name, val);
+ cur++;
}
- print_reg_val(s, cur->name, val);
+ cur_o++;
}
}
@@ -89,7 +108,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
static int
wed_txinfo_show(struct seq_file *s, void *data)
{
- static const struct reg_dump regs[] = {
+ static const struct reg_dump regs_common[] = {
DUMP_STR("WED TX"),
DUMP_WED(WED_TX_MIB(0)),
DUMP_WED_RING(WED_RING_TX(0)),
@@ -128,16 +147,32 @@ wed_txinfo_show(struct seq_file *s, void *data)
DUMP_WDMA_RING(WDMA_RING_RX(0)),
DUMP_WDMA_RING(WDMA_RING_RX(1)),
- DUMP_STR("TX FREE"),
+ DUMP_STR("WED TX FREE"),
DUMP_WED(WED_RX_MIB(0)),
+ DUMP_WED_RING(WED_RING_RX(0)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
+
+ DUMP_WED(WED_RX_MIB(1)),
+ DUMP_WED_RING(WED_RING_RX(1)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
+ DUMP_STR("WED_WPDMA TX FREE"),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
+ DUMP_END(),
+ };
+
+ static const struct reg_dump *regs[] = {
+ &regs_common[0],
+ NULL,
};
+
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
if (!dev)
return 0;
- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+ dump_wed_regs(s, dev, regs);
return 0;
}
@@ -146,7 +181,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
static int
wed_rxinfo_show(struct seq_file *s, void *data)
{
- static const struct reg_dump regs[] = {
+ static const struct reg_dump regs_common[] = {
DUMP_STR("WPDMA RX"),
DUMP_WPDMA_RX_RING(0),
DUMP_WPDMA_RX_RING(1),
@@ -164,7 +199,7 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED_RING(WED_RING_RX_DATA(0)),
DUMP_WED_RING(WED_RING_RX_DATA(1)),
- DUMP_STR("WED RRO"),
+ DUMP_STR("WED WO RRO"),
DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
DUMP_WED(WED_RROQM_MID_MIB),
DUMP_WED(WED_RROQM_MOD_MIB),
@@ -175,16 +210,6 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
- DUMP_STR("WED Route QM"),
- DUMP_WED(WED_RTQM_R2H_MIB(0)),
- DUMP_WED(WED_RTQM_R2Q_MIB(0)),
- DUMP_WED(WED_RTQM_Q2H_MIB(0)),
- DUMP_WED(WED_RTQM_R2H_MIB(1)),
- DUMP_WED(WED_RTQM_R2Q_MIB(1)),
- DUMP_WED(WED_RTQM_Q2H_MIB(1)),
- DUMP_WED(WED_RTQM_Q2N_MIB),
- DUMP_WED(WED_RTQM_Q2B_MIB),
- DUMP_WED(WED_RTQM_PFDBK_MIB),
DUMP_STR("WED WDMA TX"),
DUMP_WED(WED_WDMA_TX_MIB),
@@ -205,15 +230,99 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED(WED_RX_BM_INTF2),
DUMP_WED(WED_RX_BM_INTF),
DUMP_WED(WED_RX_BM_ERR_STS),
+ DUMP_END()
+ };
+
+ static const struct reg_dump regs_v2[] = {
+ DUMP_STR("WED Route QM"),
+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2N_MIB),
+ DUMP_WED(WED_RTQM_Q2B_MIB),
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
+
+ DUMP_END()
+ };
+
+ static const struct reg_dump regs_v3[] = {
+ DUMP_STR("WED RX RRO DATA"),
+ DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
+ DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
+
+ DUMP_STR("WED RX MSDU PAGE"),
+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
+
+ DUMP_STR("WED RX IND CMD"),
+ DUMP_WED(WED_IND_CMD_RX_CTRL1),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
+ WED_IND_CMD_PREFETCH_FREE_CNT),
+ DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
+
+ DUMP_STR("WED ADDR ELEM"),
+ DUMP_WED(WED_ADDR_ELEM_CFG0),
+ DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
+ WED_ADDR_ELEM_PREFETCH_FREE_CNT),
+
+ DUMP_STR("WED Route QM"),
+ DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
+ DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
+ DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
+ DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
+ DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
+ DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
+
+ DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
+ DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
+ DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
+ DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
+ DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
+ DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
+
+ DUMP_END()
+ };
+
+ static const struct reg_dump *regs_new_v2[] = {
+ &regs_common[0],
+ &regs_v2[0],
+ NULL,
+ };
+
+ static const struct reg_dump *regs_new_v3[] = {
+ &regs_common[0],
+ &regs_v3[0],
+ NULL,
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
+ const struct reg_dump **regs;
if (!dev)
return 0;
- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+ switch(dev->hw->version) {
+ case 2:
+ regs = regs_new_v2;
+ break;
+ case 3:
+ regs = regs_new_v3;
+ break;
+ default:
+ return 0;
+ }
+
+ dump_wed_regs(s, dev, regs);
return 0;
}
@@ -248,6 +357,383 @@ mtk_wed_reg_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
"0x%08llx\n");
+static int
+wed_token_txd_show(struct seq_file *s, void *data)
+{
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+ struct dma_page_info *page_list = dev->tx_buf_ring.pages;
+ int token = dev->wlan.token_start;
+ u32 val = hw->token_id, size = 1;
+ int page_idx = (val - token) / 2;
+ int i;
+
+ if (val < token) {
+ size = val;
+ page_idx = 0;
+ }
+
+ for (i = 0; i < size; i += MTK_WED_BUF_PER_PAGE) {
+ void *page = page_list[page_idx++].addr;
+ void *buf;
+ int j;
+
+ if (!page)
+ break;
+
+ buf = page_to_virt(page);
+
+ for (j = 0; j < MTK_WED_BUF_PER_PAGE; j++) {
+ printk("[TXD]:token id = %d\n", token + 2 * (page_idx - 1) + j);
+ print_hex_dump(KERN_ERR , "", DUMP_PREFIX_OFFSET, 16, 1, (u8 *)buf, 128, false);
+ seq_printf(s, "\n");
+
+ buf += MTK_WED_BUF_SIZE;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(wed_token_txd);
+
+static int
+wed_pao_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs_common[] = {
+ DUMP_STR("PAO AMDSU INFO"),
+ DUMP_WED(WED_PAO_MON_AMSDU_FIFO_DMAD),
+
+ DUMP_STR("PAO AMDSU ENG0 INFO"),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(0)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(0)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(0)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(0)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(0)),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(0),
+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(0),
+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("PAO AMDSU ENG1 INFO"),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(1)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(1)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(1)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(1)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(1)),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(1),
+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(1),
+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("PAO AMDSU ENG2 INFO"),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(2)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(2)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(2)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(2)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(2)),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(2),
+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(2),
+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("PAO AMDSU ENG3 INFO"),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(3)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(3)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(3)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(3)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(3)),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(3),
+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
+ WED_PAO_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
+ WED_PAO_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(3),
+ WED_PAO_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("PAO AMDSU ENG4 INFO"),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_DMAD(4)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QFPL(4)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENI(4)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_QENO(4)),
+ DUMP_WED(WED_PAO_MON_AMSDU_ENG_MERG(4)),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
+ WED_PAO_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT8(4),
+ WED_PAO_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_PAO_MON_AMSDU_ENG_CNT9(4),