| From 342fdc50b761309e75974554cdcf790a2d09e134 Mon Sep 17 00:00:00 2001 |
| From: Sujuan Chen <sujuan.chen@mediatek.com> |
| Date: Thu, 2 Jun 2022 15:32:07 +0800 |
| Subject: [PATCH 4/8] 9993-add-wed |
| |
| Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
| --- |
| arch/arm64/boot/dts/mediatek/mt7622.dtsi | 32 +- |
| drivers/net/ethernet/mediatek/Kconfig | 4 + |
| drivers/net/ethernet/mediatek/Makefile | 5 + |
| drivers/net/ethernet/mediatek/mtk_eth_soc.c | 136 ++- |
| drivers/net/ethernet/mediatek/mtk_eth_soc.h | 14 +- |
| drivers/net/ethernet/mediatek/mtk_ppe.c | 373 +++++++- |
| drivers/net/ethernet/mediatek/mtk_ppe.h | 89 +- |
| .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 4 +- |
| .../net/ethernet/mediatek/mtk_ppe_offload.c | 167 +++- |
| drivers/net/ethernet/mediatek/mtk_wed.c | 876 ++++++++++++++++++ |
| drivers/net/ethernet/mediatek/mtk_wed.h | 135 +++ |
| .../net/ethernet/mediatek/mtk_wed_debugfs.c | 175 ++++ |
| drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 + |
| drivers/net/ethernet/mediatek/mtk_wed_regs.h | 251 +++++ |
| include/linux/netdevice.h | 7 + |
| include/linux/soc/mediatek/mtk_wed.h | 131 +++ |
| net/core/dev.c | 4 + |
| 17 files changed, 2283 insertions(+), 128 deletions(-) |
| mode change 100755 => 100644 drivers/net/ethernet/mediatek/Kconfig |
| mode change 100755 => 100644 drivers/net/ethernet/mediatek/Makefile |
| mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c |
| mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h |
| mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_ppe.c |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c |
| create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h |
| create mode 100644 include/linux/soc/mediatek/mtk_wed.h |
| |
| diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi |
| index 369e01389..d0fbc367e 100644 |
| --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi |
| +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi |
| @@ -338,7 +338,7 @@ |
| }; |
| |
| cci_control2: slave-if@5000 { |
| - compatible = "arm,cci-400-ctrl-if"; |
| + compatible = "arm,cci-400-ctrl-if", "syscon"; |
| interface-type = "ace"; |
| reg = <0x5000 0x1000>; |
| }; |
| @@ -920,6 +920,11 @@ |
| }; |
| }; |
| |
| + hifsys: syscon@1af00000 { |
| + compatible = "mediatek,mt7622-hifsys", "syscon"; |
| + reg = <0 0x1af00000 0 0x70>; |
| + }; |
| + |
| ethsys: syscon@1b000000 { |
| compatible = "mediatek,mt7622-ethsys", |
| "syscon"; |
| @@ -938,6 +943,26 @@ |
| #dma-cells = <1>; |
| }; |
| |
| + pcie_mirror: pcie-mirror@10000400 { |
| + compatible = "mediatek,mt7622-pcie-mirror", |
| + "syscon"; |
| + reg = <0 0x10000400 0 0x10>; |
| + }; |
| + |
| + wed0: wed@1020a000 { |
| + compatible = "mediatek,mt7622-wed", |
| + "syscon"; |
| + reg = <0 0x1020a000 0 0x1000>; |
| + interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>; |
| + }; |
| + |
| + wed1: wed@1020b000 { |
| + compatible = "mediatek,mt7622-wed", |
| + "syscon"; |
| + reg = <0 0x1020b000 0 0x1000>; |
| + interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>; |
| + }; |
| + |
| eth: ethernet@1b100000 { |
| compatible = "mediatek,mt7622-eth", |
| "mediatek,mt2701-eth", |
| @@ -964,6 +989,11 @@ |
| power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>; |
| mediatek,ethsys = <ðsys>; |
| mediatek,sgmiisys = <&sgmiisys>; |
| + mediatek,cci-control = <&cci_control2>; |
| + mediatek,wed = <&wed0>, <&wed1>; |
| + mediatek,pcie-mirror = <&pcie_mirror>; |
| + mediatek,hifsys = <&hifsys>; |
| + dma-coherent; |
| #address-cells = <1>; |
| #size-cells = <0>; |
| status = "disabled"; |
| diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig |
| old mode 100755 |
| new mode 100644 |
| index 42e6b38d2..8ab6615a3 |
| --- a/drivers/net/ethernet/mediatek/Kconfig |
| +++ b/drivers/net/ethernet/mediatek/Kconfig |
| @@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK |
| |
| if NET_VENDOR_MEDIATEK |
| |
| +config NET_MEDIATEK_SOC_WED |
| + depends on ARCH_MEDIATEK || COMPILE_TEST |
| + def_bool NET_MEDIATEK_SOC != n |
| + |
| config NET_MEDIATEK_SOC |
| tristate "MediaTek SoC Gigabit Ethernet support" |
| select PHYLINK |
| diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile |
| old mode 100755 |
| new mode 100644 |
| index 0a6af99f1..3528f1b3c |
| --- a/drivers/net/ethernet/mediatek/Makefile |
| +++ b/drivers/net/ethernet/mediatek/Makefile |
| @@ -6,4 +6,9 @@ |
| obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o |
| mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \ |
| mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o |
| +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o |
| +ifdef CONFIG_DEBUG_FS |
| +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o |
| +endif |
| +obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o |
| obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/ |
| diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
| old mode 100755 |
| new mode 100644 |
| index 819d8a0be..2121335a1 |
| --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
| +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
| @@ -9,6 +9,7 @@ |
| #include <linux/of_device.h> |
| #include <linux/of_mdio.h> |
| #include <linux/of_net.h> |
| +#include <linux/of_address.h> |
| #include <linux/mfd/syscon.h> |
| #include <linux/regmap.h> |
| #include <linux/clk.h> |
| @@ -19,13 +20,15 @@ |
| #include <linux/interrupt.h> |
| #include <linux/pinctrl/devinfo.h> |
| #include <linux/phylink.h> |
| #include <linux/gpio/consumer.h> |
| +#include <linux/bitfield.h> |
| #include <net/dsa.h> |
| |
| #include "mtk_eth_soc.h" |
| #include "mtk_eth_dbg.h" |
| #include "mtk_eth_reset.h" |
| #include "mtk_hnat/hnat.h" |
| +#include "mtk_wed.h" |
| |
| #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE) |
| #include "mtk_hnat/nf_hnat_mtk.h" |
| @@ -850,7 +853,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) |
| int i; |
| |
| if (!eth->soc->has_sram) { |
| - eth->scratch_ring = dma_alloc_coherent(eth->dev, |
| + eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, |
| cnt * soc->txrx.txd_size, |
| ð->phy_scratch_ring, |
| GFP_ATOMIC); |
| @@ -866,10 +869,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) |
| if (unlikely(!eth->scratch_head)) |
| return -ENOMEM; |
| |
| - dma_addr = dma_map_single(eth->dev, |
| + dma_addr = dma_map_single(eth->dma_dev, |
| eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, |
| DMA_FROM_DEVICE); |
| - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) |
| + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) |
| return -ENOMEM; |
| |
| phy_ring_tail = eth->phy_scratch_ring + |
| @@ -933,26 +936,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, |
| { |
| if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { |
| if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { |
| - dma_unmap_single(eth->dev, |
| + dma_unmap_single(eth->dma_dev, |
| dma_unmap_addr(tx_buf, dma_addr0), |
| dma_unmap_len(tx_buf, dma_len0), |
| DMA_TO_DEVICE); |
| } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { |
| - dma_unmap_page(eth->dev, |
| + dma_unmap_page(eth->dma_dev, |
| dma_unmap_addr(tx_buf, dma_addr0), |
| dma_unmap_len(tx_buf, dma_len0), |
| DMA_TO_DEVICE); |
| } |
| } else { |
| if (dma_unmap_len(tx_buf, dma_len0)) { |
| - dma_unmap_page(eth->dev, |
| + dma_unmap_page(eth->dma_dev, |
| dma_unmap_addr(tx_buf, dma_addr0), |
| dma_unmap_len(tx_buf, dma_len0), |
| DMA_TO_DEVICE); |
| } |
| |
| if (dma_unmap_len(tx_buf, dma_len1)) { |
| - dma_unmap_page(eth->dev, |
| + dma_unmap_page(eth->dma_dev, |
| dma_unmap_addr(tx_buf, dma_addr1), |
| dma_unmap_len(tx_buf, dma_len1), |
| DMA_TO_DEVICE); |
| @@ -1017,9 +1020,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, |
| itx_buf = mtk_desc_to_tx_buf(ring, itxd); |
| memset(itx_buf, 0, sizeof(*itx_buf)); |
| |
| - txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size, |
| + txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, |
| DMA_TO_DEVICE); |
| - if (unlikely(dma_mapping_error(eth->dev, txd_info.addr))) |
| + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) |
| return -ENOMEM; |
| |
| WRITE_ONCE(itxd->txd1, mapped_addr); |
| @@ -1114,10 +1117,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, |
| txd_info.qid = skb->mark & MTK_QDMA_TX_MASK; |
| txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && |
| !(frag_size - txd_info.size); |
| - txd_info.addr = skb_frag_dma_map(eth->dev, frag, |
| + txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, |
| offset, txd_info.size, |
| DMA_TO_DEVICE); |
| - if (unlikely(dma_mapping_error(eth->dev, txd_info.addr))) |
| + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) |
| goto err_dma; |
| |
| mtk_tx_set_dma_desc(skb, dev, txd, &txd_info); |
| @@ -1384,6 +1387,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, |
| struct net_device *netdev; |
| unsigned int pktlen; |
| dma_addr_t dma_addr; |
| + u32 hash, reason; |
| int mac = 0; |
| |
| if (eth->hwlro) |
| @@ -1427,18 +1431,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, |
| netdev->stats.rx_dropped++; |
| goto release_desc; |
| } |
| - dma_addr = dma_map_single(eth->dev, |
| + dma_addr = dma_map_single(eth->dma_dev, |
| new_data + NET_SKB_PAD + |
| eth->ip_align, |
| ring->buf_size, |
| DMA_FROM_DEVICE); |
| - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { |
| + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) { |
| skb_free_frag(new_data); |
| netdev->stats.rx_dropped++; |
| goto release_desc; |
| } |
| |
| - dma_unmap_single(eth->dev, trxd.rxd1, |
| + dma_unmap_single(eth->dma_dev, trxd.rxd1, |
| ring->buf_size, DMA_FROM_DEVICE); |
| |
| /* receive data */ |
| @@ -1463,6 +1467,17 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, |
| skb_checksum_none_assert(skb); |
| skb->protocol = eth_type_trans(skb, netdev); |
| |
| + hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; |
| + if (hash != MTK_RXD4_FOE_ENTRY) { |
| + hash = jhash_1word(hash, 0); |
| + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); |
| + } |
| + |
| + reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); |
| + if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) |
| + mtk_ppe_check_skb(eth->ppe, skb, |
| + trxd.rxd4 & MTK_RXD4_FOE_ENTRY); |
| + |
| if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { |
| if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { |
| if (trxd.rxd3 & RX_DMA_VTAG_V2) |
| @@ -1748,7 +1763,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) |
| goto no_tx_mem; |
| |
| if (!eth->soc->has_sram) |
| - ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, |
| + ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, |
| &ring->phys, GFP_KERNEL); |
| else { |
| ring->dma = eth->scratch_ring + MTK_DMA_SIZE; |
| @@ -1780,6 +1795,6 @@ static int mtk_tx_alloc(struct mtk_eth *eth) |
| * descriptors in ring->dma_pdma. |
| */ |
| if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { |
| - ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, |
| + ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, |
| &ring->phys_pdma, GFP_KERNEL); |
| if (!ring->dma_pdma) |
| @@ -1839,6 +1854,6 @@ static void mtk_tx_clean(struct mtk_eth *eth) |
| } |
| |
| if (!eth->soc->has_sram && ring->dma) { |
| - dma_free_coherent(eth->dev, |
| + dma_free_coherent(eth->dma_dev, |
| MTK_DMA_SIZE * soc->txrx.txd_size, |
| ring->dma, ring->phys); |
| @@ -1847,6 +1862,6 @@ static void mtk_tx_clean(struct mtk_eth *eth) |
| } |
| |
| if (ring->dma_pdma) { |
| - dma_free_coherent(eth->dev, |
| + dma_free_coherent(eth->dma_dev, |
| MTK_DMA_SIZE * soc->txrx.txd_size, |
| ring->dma_pdma, ring->phys_pdma); |
| @@ -1892,7 +1907,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) |
| |
| if ((!eth->soc->has_sram) || (eth->soc->has_sram |
| && (rx_flag != MTK_RX_FLAGS_NORMAL))) |
| - ring->dma = dma_alloc_coherent(eth->dev, |
| + ring->dma = dma_alloc_coherent(eth->dma_dev, |
| rx_dma_size * eth->soc->txrx.rxd_size, |
| &ring->phys, GFP_KERNEL); |
| else { |
| @@ -1907,13 +1922,13 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) |
| return -ENOMEM; |
| |
| for (i = 0; i < rx_dma_size; i++) { |
| struct mtk_rx_dma_v2 *rxd; |
| |
| - dma_addr_t dma_addr = dma_map_single(eth->dev, |
| + dma_addr_t dma_addr = dma_map_single(eth->dma_dev, |
| ring->data[i] + NET_SKB_PAD + eth->ip_align, |
| ring->buf_size, |
| DMA_FROM_DEVICE); |
| - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) |
| + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) |
| return -ENOMEM; |
| |
| rxd = ring->dma + i * eth->soc->txrx.rxd_size; |
| @@ -1968,8 +1983,8 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s |
| rxd = ring->dma + i * eth->soc->txrx.rxd_size; |
| if (!rxd->rxd1) |
| continue; |
| |
| - dma_unmap_single(eth->dev, |
| + dma_unmap_single(eth->dma_dev, |
| rxd->rxd1, |
| ring->buf_size, |
| DMA_FROM_DEVICE); |
| @@ -1982,7 +1997,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s |
| return; |
| |
| if (ring->dma) { |
| - dma_free_coherent(eth->dev, |
| + dma_free_coherent(eth->dma_dev, |
| ring->dma_size * eth->soc->txrx.rxd_size, |
| ring->dma, |
| ring->phys); |
| @@ -2462,6 +2477,6 @@ static void mtk_dma_free(struct mtk_eth *eth) |
| if (eth->netdev[i]) |
| netdev_reset_queue(eth->netdev[i]); |
| if ( !eth->soc->has_sram && eth->scratch_ring) { |
| - dma_free_coherent(eth->dev, |
| + dma_free_coherent(eth->dma_dev, |
| MTK_DMA_SIZE * soc->txrx.txd_size, |
| eth->scratch_ring, eth->phy_scratch_ring); |
| @@ -2661,7 +2676,7 @@ static int mtk_open(struct net_device *dev) |
| if (err) |
| return err; |
| |
| - if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) |
| + if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0) |
| gdm_config = MTK_GDMA_TO_PPE; |
| |
| mtk_gdm_config(eth, gdm_config); |
| @@ -2778,7 +2793,7 @@ static int mtk_stop(struct net_device *dev) |
| mtk_dma_free(eth); |
| |
| if (eth->soc->offload_version) |
| - mtk_ppe_stop(ð->ppe); |
| + mtk_ppe_stop(eth->ppe); |
| |
| return 0; |
| } |
| @@ -2855,6 +2870,8 @@ static int mtk_napi_init(struct mtk_eth *eth) |
| |
| static int mtk_hw_init(struct mtk_eth *eth, u32 type) |
| { |
| + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | |
| + ETHSYS_DMA_AG_MAP_PPE; |
| int i, ret = 0; |
| |
| pr_info("[%s] reset_lock:%d, force:%d\n", __func__, |
| @@ -2872,6 +2889,10 @@ static int mtk_hw_init(struct mtk_eth *eth, u32 type) |
| goto err_disable_pm; |
| } |
| |
| + if (eth->ethsys) |
| + regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, |
| + of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); |
| + |
| if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { |
| ret = device_reset(eth->dev); |
| if (ret) { |
| @@ -3501,6 +3522,35 @@ free_netdev: |
| return err; |
| } |
| |
| +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) |
| +{ |
| + struct net_device *dev, *tmp; |
| + LIST_HEAD(dev_list); |
| + int i; |
| + |
| + rtnl_lock(); |
| + |
| + for (i = 0; i < MTK_MAC_COUNT; i++) { |
| + dev = eth->netdev[i]; |
| + |
| + if (!dev || !(dev->flags & IFF_UP)) |
| + continue; |
| + |
| + list_add_tail(&dev->close_list, &dev_list); |
| + } |
| + |
| + dev_close_many(&dev_list, false); |
| + |
| + eth->dma_dev = dma_dev; |
| + |
| + list_for_each_entry_safe(dev, tmp, &dev_list, close_list) { |
| + list_del_init(&dev->close_list); |
| + dev_open(dev, NULL); |
| + } |
| + |
| + rtnl_unlock(); |
| +} |
| + |
| static int mtk_probe(struct platform_device *pdev) |
| { |
| struct device_node *mac_np; |
| @@ -3514,6 +3564,7 @@ static int mtk_probe(struct platform_device *pdev) |
| eth->soc = of_device_get_match_data(&pdev->dev); |
| |
| eth->dev = &pdev->dev; |
| + eth->dma_dev = &pdev->dev; |
| eth->base = devm_platform_ioremap_resource(pdev, 0); |
| if (IS_ERR(eth->base)) |
| return PTR_ERR(eth->base); |
| @@ -3567,6 +3618,16 @@ static int mtk_probe(struct platform_device *pdev) |
| } |
| } |
| |
| + if (of_dma_is_coherent(pdev->dev.of_node)) { |
| + struct regmap *cci; |
| + |
| + cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, |
| + "mediatek,cci-control"); |
| + /* enable CPU/bus coherency */ |
| + if (!IS_ERR(cci)) |
| + regmap_write(cci, 0, 3); |
| + } |
| + |
| if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { |
| eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), |
| GFP_KERNEL); |
| @@ -3589,6 +3650,22 @@ static int mtk_probe(struct platform_device *pdev) |
| } |
| } |
| |
| + for (i = 0;; i++) { |
| + struct device_node *np = of_parse_phandle(pdev->dev.of_node, |
| + "mediatek,wed", i); |
| + static const u32 wdma_regs[] = { |
| + MTK_WDMA0_BASE, |
| + MTK_WDMA1_BASE |
| + }; |
| + void __iomem *wdma; |
| + |
| + if (!np || i >= ARRAY_SIZE(wdma_regs)) |
| + break; |
| + |
| + wdma = eth->base + wdma_regs[i]; |
| + mtk_wed_add_hw(np, eth, wdma, i); |
| + } |
| + |
| for (i = 0; i < MTK_MAX_IRQ_NUM; i++) { |
| if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) |
| eth->irq[i] = eth->irq[0]; |
| @@ -3692,10 +3769,11 @@ static int mtk_probe(struct platform_device *pdev) |
| } |
| |
| if (eth->soc->offload_version) { |
| - err = mtk_ppe_init(ð->ppe, eth->dev, |
| - eth->base + MTK_ETH_PPE_BASE, 2); |
| - if (err) |
| + eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2); |
| + if (!eth->ppe) { |
| + err = -ENOMEM; |
| goto err_free_dev; |
| + } |
| |
| err = mtk_eth_offload_init(eth); |
| if (err) |
| diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h |
| old mode 100755 |
| new mode 100644 |
| index 349f98503..b52378bd6 |
| --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h |
| +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h |
| @@ -517,6 +517,9 @@ |
| #define RX_DMA_SPORT_MASK 0x7 |
| #endif |
| |
| +#define MTK_WDMA0_BASE 0x2800 |
| +#define MTK_WDMA1_BASE 0x2c00 |
| + |
| /* QDMA descriptor txd4 */ |
| #define TX_DMA_CHKSUM (0x7 << 29) |
| #define TX_DMA_TSO BIT(28) |
| @@ -704,6 +707,12 @@ |
| #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 |
| |
| |
| +/* ethernet dma channel agent map */ |
| +#define ETHSYS_DMA_AG_MAP 0x408 |
| +#define ETHSYS_DMA_AG_MAP_PDMA BIT(0) |
| +#define ETHSYS_DMA_AG_MAP_QDMA BIT(1) |
| +#define ETHSYS_DMA_AG_MAP_PPE BIT(2) |
| + |
| /* SGMII subsystem config registers */ |
| /* Register to auto-negotiation restart */ |
| #define SGMSYS_PCS_CONTROL_1 0x0 |
| @@ -1209,6 +1218,7 @@ struct mtk_reset_event { |
| /* struct mtk_eth - This is the main datasructure for holding the state |
| * of the driver |
| * @dev: The device pointer |
| + * @dev: The device pointer used for dma mapping/alloc |
| * @base: The mapped register i/o base |
| * @page_lock: Make sure that register operations are atomic |
| * @tx_irq__lock: Make sure that IRQ register operations are atomic |
| @@ -1243,6 +1253,7 @@ struct mtk_reset_event { |
| |
| struct mtk_eth { |
| struct device *dev; |
| + struct device *dma_dev; |
| void __iomem *base; |
| spinlock_t page_lock; |
| spinlock_t tx_irq_lock; |
| @@ -1283,7 +1294,7 @@ struct mtk_eth { |
| spinlock_t syscfg0_lock; |
| struct timer_list mtk_dma_monitor_timer; |
| |
| - struct mtk_ppe ppe; |
| + struct mtk_ppe *ppe; |
| struct rhashtable flow_table; |
| }; |
| |
| @@ -1336,5 +1347,6 @@ void ethsys_reset(struct mtk_eth *eth, u32 reset_bits); |
| int mtk_eth_offload_init(struct mtk_eth *eth); |
| int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, |
| void *type_data); |
| +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); |
| |
| #endif /* MTK_ETH_H */ |
| diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c |
| old mode 100644 |
| new mode 100755 |
| index 66298e223..3d75c22be |
| --- a/drivers/net/ethernet/mediatek/mtk_ppe.c |
| +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c |
| @@ -6,9 +6,22 @@ |
| #include <linux/iopoll.h> |
| #include <linux/etherdevice.h> |
| #include <linux/platform_device.h> |
| +#include <linux/if_ether.h> |
| +#include <linux/if_vlan.h> |
| +#include <net/dsa.h> |
| +#include "mtk_eth_soc.h" |
| #include "mtk_ppe.h" |
| #include "mtk_ppe_regs.h" |
| |
| +static DEFINE_SPINLOCK(ppe_lock); |
| + |
| +static const struct rhashtable_params mtk_flow_l2_ht_params = { |
| + .head_offset = offsetof(struct mtk_flow_entry, l2_node), |
| + .key_offset = offsetof(struct mtk_flow_entry, data.bridge), |
| + .key_len = offsetof(struct mtk_foe_bridge, key_end), |
| + .automatic_shrinking = true, |
| +}; |
| + |
| static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) |
| { |
| writel(val, ppe->base + reg); |
| @@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) |
| return ppe_m32(ppe, reg, val, 0); |
| } |
| |
| +static u32 mtk_eth_timestamp(struct mtk_eth *eth) |
| +{ |
| + return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; |
| +} |
| + |
| static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) |
| { |
| int ret; |
| @@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e) |
| u32 hash; |
| |
| switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { |
| - case MTK_PPE_PKT_TYPE_BRIDGE: |
| - hv1 = e->bridge.src_mac_lo; |
| - hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); |
| - hv2 = e->bridge.src_mac_hi >> 16; |
| - hv2 ^= e->bridge.dest_mac_lo; |
| - hv3 = e->bridge.dest_mac_hi; |
| - break; |
| case MTK_PPE_PKT_TYPE_IPV4_ROUTE: |
| case MTK_PPE_PKT_TYPE_IPV4_HNAPT: |
| hv1 = e->ipv4.orig.ports; |
| @@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry) |
| { |
| int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); |
| |
| + if (type == MTK_PPE_PKT_TYPE_BRIDGE) |
| + return &entry->bridge.l2; |
| + |
| if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) |
| return &entry->ipv6.l2; |
| |
| @@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry) |
| { |
| int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); |
| |
| + if (type == MTK_PPE_PKT_TYPE_BRIDGE) |
| + return &entry->bridge.ib2; |
| + |
| if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) |
| return &entry->ipv6.ib2; |
| |
| @@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, |
| if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) |
| entry->ipv6.ports = ports_pad; |
| |
| - if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { |
| + if (type == MTK_PPE_PKT_TYPE_BRIDGE) { |
| + ether_addr_copy(entry->bridge.src_mac, src_mac); |
| + ether_addr_copy(entry->bridge.dest_mac, dest_mac); |
| + entry->bridge.ib2 = val; |
| + l2 = &entry->bridge.l2; |
| + } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { |
| entry->ipv6.ib2 = val; |
| l2 = &entry->ipv6.l2; |
| } else { |
| @@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid) |
| return 0; |
| } |
| |
| +int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, |
| + int bss, int wcid) |
| +{ |
| + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); |
| + u32 *ib2 = mtk_foe_entry_ib2(entry); |
| + |
| + *ib2 &= ~MTK_FOE_IB2_PORT_MG; |
| + *ib2 |= MTK_FOE_IB2_WDMA_WINFO; |
| + if (wdma_idx) |
| + *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; |
| + |
| + l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | |
| + FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | |
| + FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); |
| + |
| + return 0; |
| +} |
| + |
| static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) |
| { |
| return !(entry->ib1 & MTK_FOE_IB1_STATIC) && |
| FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; |
| } |
| |
| -int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, |
| - u16 timestamp) |
| +static bool |
| +mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data) |
| +{ |
| + int type, len; |
| + |
| + if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) |
| + return false; |
| + |
| + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); |
| + if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) |
| + len = offsetof(struct mtk_foe_entry, ipv6._rsv); |
| + else |
| + len = offsetof(struct mtk_foe_entry, ipv4.ib2); |
| + |
| + return !memcmp(&entry->data.data, &data->data, len - 4); |
| +} |
| + |
| +static void |
| +__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) |
| +{ |
| + struct hlist_head *head; |
| + struct hlist_node *tmp; |
| + |
| + if (entry->type == MTK_FLOW_TYPE_L2) { |
| + rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, |
| + mtk_flow_l2_ht_params); |
| + |
| + head = &entry->l2_flows; |
| + hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) |
| + __mtk_foe_entry_clear(ppe, entry); |
| + return; |
| + } |
| + |
| + hlist_del_init(&entry->list); |
| + if (entry->hash != 0xffff) { |
| + ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; |
| + ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, |
| + MTK_FOE_STATE_INVALID); |
| + dma_wmb(); |
| + } |
| + entry->hash = 0xffff; |
| + |
| + if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) |
| + return; |
| + |
| + hlist_del_init(&entry->l2_data.list); |
| + kfree(entry); |
| +} |
| + |
| +static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1) |
| +{ |
| + u16 timestamp; |
| + u16 now; |
| + |
| + now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; |
| + timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; |
| + |
| + if (timestamp > now) |
| + return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; |
| + else |
| + return now - timestamp; |
| +} |
| + |
| +static void |
| +mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) |
| { |
| + struct mtk_flow_entry *cur; |
| struct mtk_foe_entry *hwe; |
| - u32 hash; |
| + struct hlist_node *tmp; |
| + int idle; |
| + |
| + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); |
| + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { |
| + int cur_idle; |
| + u32 ib1; |
| + |
| + hwe = &ppe->foe_table[cur->hash]; |
| + ib1 = READ_ONCE(hwe->ib1); |
| + |
| + if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { |
| + cur->hash = 0xffff; |
| + __mtk_foe_entry_clear(ppe, cur); |
| + continue; |
| + } |
| + |
| + cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); |
| + if (cur_idle >= idle) |
| + continue; |
| + |
| + idle = cur_idle; |
| + entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; |
| + entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; |
| + } |
| +} |
| + |
| +static void |
| +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) |
| +{ |
| + struct mtk_foe_entry *hwe; |
| + struct mtk_foe_entry foe; |
| + |
| + spin_lock_bh(&ppe_lock); |
| + |
| + if (entry->type == MTK_FLOW_TYPE_L2) { |
| + mtk_flow_entry_update_l2(ppe, entry); |
| + goto out; |
| + } |
| + |
| + if (entry->hash == 0xffff) |
| + goto out; |
| + |
| + hwe = &ppe->foe_table[entry->hash]; |
| + memcpy(&foe, hwe, sizeof(foe)); |
| + if (!mtk_flow_entry_match(entry, &foe)) { |
| + entry->hash = 0xffff; |
| + goto out; |
| + } |
| + |
| + entry->data.ib1 = foe.ib1; |
| + |
| +out: |
| + spin_unlock_bh(&ppe_lock); |
| +} |
| + |
| +static void |
| +__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, |
| + u16 hash) |
| +{ |
| + struct mtk_foe_entry *hwe; |
| + u16 timestamp; |
| |
| + timestamp = mtk_eth_timestamp(ppe->eth); |
| timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; |
| entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; |
| entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); |
| |
| - hash = mtk_ppe_hash_entry(entry); |
| hwe = &ppe->foe_table[hash]; |
| - if (!mtk_foe_entry_usable(hwe)) { |
| - hwe++; |
| - hash++; |
| - |
| - if (!mtk_foe_entry_usable(hwe)) |
| - return -ENOSPC; |
| - } |
| - |
| memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); |
| wmb(); |
| hwe->ib1 = entry->ib1; |
| @@ -362,32 +519,198 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, |
| dma_wmb(); |
| |
| mtk_ppe_cache_clear(ppe); |
| +} |
| |
| - return hash; |
| +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) |
| +{ |
| + spin_lock_bh(&ppe_lock); |
| + __mtk_foe_entry_clear(ppe, entry); |
| + spin_unlock_bh(&ppe_lock); |
| +} |
| + |
| +static int |
| +mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) |
| +{ |
| + entry->type = MTK_FLOW_TYPE_L2; |
| + |
| + return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node, |
| + mtk_flow_l2_ht_params); |
| +} |
| + |
| +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) |
| +{ |
| + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); |
| + u32 hash; |
| + |
| + if (type == MTK_PPE_PKT_TYPE_BRIDGE) |
| + return mtk_foe_entry_commit_l2(ppe, entry); |
| + |
| + hash = mtk_ppe_hash_entry(&entry->data); |
| + entry->hash = 0xffff; |
| + spin_lock_bh(&ppe_lock); |
| + hlist_add_head(&entry->list, &ppe->foe_flow[hash / 4]); |
| + spin_unlock_bh(&ppe_lock); |
| + |
| + return 0; |
| +} |
| + |
| +static void |
| +mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, |
| + u16 hash) |
| +{ |
| + struct mtk_flow_entry *flow_info; |
| + struct mtk_foe_entry foe, *hwe; |
| + struct mtk_foe_mac_info *l2; |
| + u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP; |
| + int type; |
| + |
| + flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), |
| + GFP_ATOMIC); |
| + if (!flow_info) |
| + return; |
| + |
| + flow_info->l2_data.base_flow = entry; |
| + flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; |
| + flow_info->hash = hash; |
| + hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 4]); |
| + hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); |
| + |
| + hwe = &ppe->foe_table[hash]; |
| + memcpy(&foe, hwe, sizeof(foe)); |
| + foe.ib1 &= ib1_mask; |
| + foe.ib1 |= entry->data.ib1 & ~ib1_mask; |
| + |
| + l2 = mtk_foe_entry_l2(&foe); |
| + memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); |
| + |
| + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1); |
| + if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) |
| + memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); |
| + else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) |
| + l2->etype = ETH_P_IPV6; |
| + |
| + *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2; |
| + |
| + __mtk_foe_entry_commit(ppe, &foe, hash); |
| } |
| |
| -int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, |
| +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) |
| +{ |
| + struct hlist_head *head = &ppe->foe_flow[hash / 4]; |
| + struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; |
| + struct mtk_flow_entry *entry; |
| + struct mtk_foe_bridge key = {}; |
| + struct hlist_node *n; |
| + struct ethhdr *eh; |
| + bool found = false; |
| + u8 *tag; |
| + |
| + spin_lock_bh(&ppe_lock); |
| + |
| + if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) |
| + goto out; |
| + |
| + hlist_for_each_entry_safe(entry, n, head, list) { |
| + if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { |
| + if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == |
| + MTK_FOE_STATE_BIND)) |
| + continue; |
| + |
| + entry->hash = 0xffff; |
| + __mtk_foe_entry_clear(ppe, entry); |
| + continue; |
| + } |
| + |
| + if (found || !mtk_flow_entry_match(entry, hwe)) { |
| + if (entry->hash != 0xffff) |
| + entry->hash = 0xffff; |
| + continue; |
| + } |
| + |
| + entry->hash = hash; |
| + __mtk_foe_entry_commit(ppe, &entry->data, hash); |
| + found = true; |
| + } |
| + |
| + if (found) |
| + goto out; |
| + |
| + if (!skb) |
| + goto out; |
| + |
| + eh = eth_hdr(skb); |
| + ether_addr_copy(key.dest_mac, eh->h_dest); |
| + ether_addr_copy(key.src_mac, eh->h_source); |
| + tag = skb->data - 2; |
| + key.vlan = 0; |
| + switch (skb->protocol) { |
| +#if IS_ENABLED(CONFIG_NET_DSA) |
| + case htons(ETH_P_XDSA): |
| + if (!netdev_uses_dsa(skb->dev) || |
| + skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) |
| + goto out; |
| + |
| + tag += 4; |
| + if (get_unaligned_be16(tag) != ETH_P_8021Q) |
| + break; |
| + |
| + fallthrough; |
| +#endif |
| + case htons(ETH_P_8021Q): |
| + key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK; |
| + break; |
| + default: |
| + break; |
| + } |
| + |
| + entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params); |
| + if (!entry) |
| + goto out; |
| + |
| + mtk_foe_entry_commit_subflow(ppe, entry, hash); |
| + |
| +out: |
| + spin_unlock_bh(&ppe_lock); |
| +} |
| + |
| +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) |
| +{ |
| + mtk_flow_entry_update(ppe, entry); |
| + |
| + return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); |
| +} |
| + |
| +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, |
| int version) |
| { |
| + struct device *dev = eth->dev; |
| struct mtk_foe_entry *foe; |
| + struct mtk_ppe *ppe; |
| + |
| + ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); |
| + if (!ppe) |
| + return NULL; |
| + |
| + rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params); |
| |
| /* need to allocate a separate device, since it PPE DMA access is |
| * not coherent. |
| */ |
| ppe->base = base; |
| + ppe->eth = eth; |
| ppe->dev = dev; |
| ppe->version = version; |
| |
| foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), |
| &ppe->foe_phys, GFP_KERNEL); |
| if (!foe) |
| - return -ENOMEM; |
| + return NULL; |
| |
| ppe->foe_table = foe; |
| |
| mtk_ppe_debugfs_init(ppe); |
| |
| - return 0; |
| + return ppe; |
| } |
| |
| static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) |
| @@ -395,7 +717,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) |
| static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 }; |
| int i, k; |
| |
| - memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table)); |
| + memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table)); |
| |
| if (!IS_ENABLED(CONFIG_SOC_MT7621)) |
| return; |
| @@ -443,7 +765,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe) |
| MTK_PPE_FLOW_CFG_IP4_NAT | |
| MTK_PPE_FLOW_CFG_IP4_NAPT | |
| MTK_PPE_FLOW_CFG_IP4_DSLITE | |
| - MTK_PPE_FLOW_CFG_L2_BRIDGE | |
| MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; |
| ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); |
| |
| diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h |
| index 242fb8f2a..1f5cf1c9a 100644 |
| --- a/drivers/net/ethernet/mediatek/mtk_ppe.h |
| +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h |
| @@ -6,6 +6,7 @@ |
| |
| #include <linux/kernel.h> |
| #include <linux/bitfield.h> |
| +#include <linux/rhashtable.h> |
| |
| #define MTK_ETH_PPE_BASE 0xc00 |
| |
| @@ -48,9 +49,9 @@ enum { |
| #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) |
| #define MTK_FOE_IB2_MULTICAST BIT(8) |
| |
| -#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12) |
| -#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16) |
| -#define MTK_FOE_IB2_WHNAT_NAT BIT(17) |
| +#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12) |
| +#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16) |
| +#define MTK_FOE_IB2_WDMA_WINFO BIT(17) |
| |
| #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) |
| |
| @@ -58,9 +59,9 @@ enum { |
| |
| #define MTK_FOE_IB2_DSCP GENMASK(31, 24) |
| |
| -#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0) |
| -#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6) |
| -#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14) |
| +#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0) |
| +#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6) |
| +#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14) |
| |
| enum { |
| MTK_FOE_STATE_INVALID, |
| @@ -84,19 +85,16 @@ struct mtk_foe_mac_info { |
| u16 src_mac_lo; |
| }; |
| |
| +/* software-only entry type */ |
| struct mtk_foe_bridge { |
| - u32 dest_mac_hi; |
| + u8 dest_mac[ETH_ALEN]; |
| + u8 src_mac[ETH_ALEN]; |
| + u16 vlan; |
| |
| - u16 src_mac_lo; |
| - u16 dest_mac_lo; |
| - |
| - u32 src_mac_hi; |
| + struct {} key_end; |
| |
| u32 ib2; |
| |
| - u32 _rsv[5]; |
| - |
| - u32 udf_tsid; |
| struct mtk_foe_mac_info l2; |
| }; |
| |
| @@ -235,7 +233,37 @@ enum { |
| MTK_PPE_CPU_REASON_INVALID = 0x1f, |
| }; |
| |
| +enum { |
| + MTK_FLOW_TYPE_L4, |
| + MTK_FLOW_TYPE_L2, |
| + MTK_FLOW_TYPE_L2_SUBFLOW, |
| +}; |
| + |
| +struct mtk_flow_entry { |
| + union { |
| + struct hlist_node list; |
| + struct { |
| + struct rhash_head l2_node; |
| + struct hlist_head l2_flows; |
| + }; |
| + }; |
| + u8 type; |
| + s8 wed_index; |
| + u16 hash; |
| + union { |
| + struct mtk_foe_entry data; |
| + struct { |
| + struct mtk_flow_entry *base_flow; |
| + struct hlist_node list; |
| + struct {} end; |
| + } l2_data; |
| + }; |
| + struct rhash_head node; |
| + unsigned long cookie; |
| +}; |
| + |
| struct mtk_ppe { |
| + struct mtk_eth *eth; |
| struct device *dev; |
| void __iomem *base; |
| int version; |
| @@ -243,19 +271,35 @@ struct mtk_ppe { |
| struct mtk_foe_entry *foe_table; |
| dma_addr_t foe_phys; |
| |
| + u16 foe_check_time[MTK_PPE_ENTRIES]; |
| + struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; |
| + |
| + struct rhashtable l2_flows; |
| + |
| void *acct_table; |
| }; |
| |
| -int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, |
| - int version); |
| +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version); |
| int mtk_ppe_start(struct mtk_ppe *ppe); |
| int mtk_ppe_stop(struct mtk_ppe *ppe); |
| |
| +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); |
| + |
| static inline void |
| -mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) |
| +mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) |
| { |
| - ppe->foe_table[hash].ib1 = 0; |
| - dma_wmb(); |
| + u16 now, diff; |
| + |
| + if (!ppe) |
| + return; |
| + |
| + now = (u16)jiffies; |
| + diff = now - ppe->foe_check_time[hash]; |
| + if (diff < HZ / 10) |
| + return; |
| + |
| + ppe->foe_check_time[hash] = now; |
| + __mtk_ppe_check_skb(ppe, skb, hash); |
| } |
| |
| static inline int |
| @@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, |
| int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); |
| int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); |
| int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); |
| -int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, |
| - u16 timestamp); |
| +int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, |
| + int bss, int wcid); |
| +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); |
| +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); |
| +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); |
| int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); |
| |
| #endif |
| diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c |
| index d4b482340..a591ab1fd 100644 |
| --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c |
| +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c |
| @@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type) |
| static const char * const type_str[] = { |
| [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", |
| [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", |
| - [MTK_PPE_PKT_TYPE_BRIDGE] = "L2", |
| [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", |
| [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", |
| [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", |
| @@ -207,6 +206,9 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe) |
| struct dentry *root; |
| |
| root = debugfs_create_dir("mtk_ppe", NULL); |
| + if (!root) |
| + return -ENOMEM; |
| + |
| debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all); |
| debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind); |
| |
| diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c |
| index 4294f0c74..d4a012608 100644 |
| --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c |
| +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c |
| @@ -11,6 +11,7 @@ |
| #include <net/pkt_cls.h> |
| #include <net/dsa.h> |
| #include "mtk_eth_soc.h" |
| +#include "mtk_wed.h" |
| |
| struct mtk_flow_data { |
| struct ethhdr eth; |
| @@ -30,6 +31,8 @@ struct mtk_flow_data { |
| __be16 src_port; |
| __be16 dst_port; |
| |
| + u16 vlan_in; |
| + |
| struct { |
| u16 id; |
| __be16 proto; |
| @@ -41,12 +44,6 @@ struct mtk_flow_data { |
| } pppoe; |
| }; |
| |
| -struct mtk_flow_entry { |
| - struct rhash_head node; |
| - unsigned long cookie; |
| - u16 hash; |
| -}; |
| - |
| static const struct rhashtable_params mtk_flow_ht_params = { |
| .head_offset = offsetof(struct mtk_flow_entry, node), |
| .key_offset = offsetof(struct mtk_flow_entry, cookie), |
| @@ -54,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = { |
| .automatic_shrinking = true, |
| }; |
| |
| -static u32 |
| -mtk_eth_timestamp(struct mtk_eth *eth) |
| -{ |
| - return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; |
| -} |
| - |
| static int |
| mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, |
| bool egress) |
| @@ -94,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) |
| memcpy(dest, src, act->mangle.mask ? 2 : 4); |
| } |
| |
| +static int |
| +mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) |
| +{ |
| + struct net_device_path_ctx ctx = { |
| + .dev = dev, |
| + }; |
| + struct net_device_path path = {}; |
| + |
| + if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) |
| + return -1; |
| + |
| + if (!dev->netdev_ops->ndo_fill_forward_path) |
| + return -1; |
| + |
| + memcpy(ctx.daddr, addr, sizeof(ctx.daddr)); |
| + if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path)) |
| + return -1; |
| + |
| + if (path.type != DEV_PATH_MTK_WDMA) |
| + return -1; |
| + |
| + info->wdma_idx = path.mtk_wdma.wdma_idx; |
| + info->queue = path.mtk_wdma.queue; |
| + info->bss = path.mtk_wdma.bss; |
| + info->wcid = path.mtk_wdma.wcid; |
| + |
| + return 0; |
| +} |
| + |
| |
| static int |
| mtk_flow_mangle_ports(const struct flow_action_entry *act, |
| @@ -163,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev) |
| |
| static int |
| mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, |
| - struct net_device *dev) |
| + struct net_device *dev, const u8 *dest_mac, |
| + int *wed_index) |
| { |
| + struct mtk_wdma_info info = {}; |
| int pse_port, dsa_port; |
| |
| + if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { |
| + mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss, |
| + info.wcid); |
| + pse_port = 3; |
| + *wed_index = info.wdma_idx; |
| + goto out; |
| + } |
| + |
| dsa_port = mtk_flow_get_dsa_port(&dev); |
| if (dsa_port >= 0) |
| mtk_foe_entry_set_dsa(foe, dsa_port); |
| @@ -178,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, |
| else |
| return -EOPNOTSUPP; |
| |
| +out: |
| mtk_foe_entry_set_pse_port(foe, pse_port); |
| |
| return 0; |
| @@ -193,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) |
| struct net_device *odev = NULL; |
| struct mtk_flow_entry *entry; |
| int offload_type = 0; |
| + int wed_index = -1; |
| u16 addr_type = 0; |
| - u32 timestamp; |
| u8 l4proto = 0; |
| int err = 0; |
| - int hash; |
| int i; |
| |
| if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) |
| @@ -229,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) |
| return -EOPNOTSUPP; |
| } |
| |
| + switch (addr_type) { |
| + case 0: |
| + offload_type = MTK_PPE_PKT_TYPE_BRIDGE; |
| + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
| + struct flow_match_eth_addrs match; |
| + |
| + flow_rule_match_eth_addrs(rule, &match); |
| + memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); |
| + memcpy(data.eth.h_source, match.key->src, ETH_ALEN); |
| + } else { |
| + return -EOPNOTSUPP; |
| + } |
| + |
| + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { |
| + struct flow_match_vlan match; |
| + |
| + flow_rule_match_vlan(rule, &match); |
| + |
| + if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q)) |
| + return -EOPNOTSUPP; |
| + |
| + data.vlan_in = match.key->vlan_id; |
| + } |
| + break; |
| + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: |
| + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; |
| + break; |
| + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: |
| + offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; |
| + break; |
| + default: |
| + return -EOPNOTSUPP; |
| + } |
| + |
| flow_action_for_each(i, act, &rule->action) { |
| switch (act->id) { |
| case FLOW_ACTION_MANGLE: |
| + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) |
| + return -EOPNOTSUPP; |
| if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) |
| mtk_flow_offload_mangle_eth(act, &data.eth); |
| break; |
| @@ -263,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) |
| } |
| } |
| |
| - switch (addr_type) { |
| - case FLOW_DISSECTOR_KEY_IPV4_ADDRS: |
| - offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; |
| - break; |
| - case FLOW_DISSECTOR_KEY_IPV6_ADDRS: |
| - offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; |
| - break; |
| - default: |
| - return -EOPNOTSUPP; |
| - } |
| - |
| if (!is_valid_ether_addr(data.eth.h_source) || |
| !is_valid_ether_addr(data.eth.h_dest)) |
| return -EINVAL; |
| @@ -287,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) |
| if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { |
| struct flow_match_ports ports; |
| |
| + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) |
| + return -EOPNOTSUPP; |
| + |
| flow_rule_match_ports(rule, &ports); |
| data.src_port = ports.key->src; |
| data.dst_port = ports.key->dst; |
| - } else { |
| + } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) { |
| return -EOPNOTSUPP; |
| } |
| |
| @@ -320,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) |
| if (act->id != FLOW_ACTION_MANGLE) |
| continue; |
| |
| + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) |
| + return -EOPNOTSUPP; |
| + |
| switch (act->mangle.htype) { |
| case FLOW_ACT_MANGLE_HDR_TYPE_TCP: |
| case FLOW_ACT_MANGLE_HDR_TYPE_UDP: |
| @@ -345,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) |
| return err; |
| } |
| |
| + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) |
| + foe.bridge.vlan = data.vlan_in; |
| + |
| if (data.vlan.num == 1) { |
| if (data.vlan.proto != htons(ETH_P_8021Q)) |
| return -EOPNOTSUPP; |
| @@ -354,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) |
| if (data.pppoe.num == 1) |
| mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); |
| |
| - err = mtk_flow_set_output_device(eth, &foe, odev); |
| + err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, |
| + &wed_index); |
| if (err) |
| return err; |
| |
| + if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0) |
| + return err; |
| + |
| entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
| if (!entry) |
| return -ENOMEM; |
| |
| entry->cookie = f->cookie; |
| - timestamp = mtk_eth_timestamp(eth); |
| - hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp); |
| - if (hash < 0) { |
| - err = hash; |
| + memcpy(&entry->data, &foe, sizeof(entry->data)); |
| + entry->wed_index = wed_index; |
| + |
| + if (mtk_foe_entry_commit(eth->ppe, entry) < 0) |
| goto free; |
| - } |
| |
| - entry->hash = hash; |
| err = rhashtable_insert_fast(ð->flow_table, &entry->node, |
| mtk_flow_ht_params); |
| if (err < 0) |
| - goto clear_flow; |
| + goto clear; |
| |
| return 0; |
| -clear_flow: |
| - mtk_foe_entry_clear(ð->ppe, hash); |
| + |
| +clear: |
| + mtk_foe_entry_clear(eth->ppe, entry); |
| free: |
| kfree(entry); |
| + if (wed_index >= 0) |
| + mtk_wed_flow_remove(wed_index); |
| return err; |
| } |
| |
| @@ -394,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) |
| if (!entry) |
| return -ENOENT; |
| |
| - mtk_foe_entry_clear(ð->ppe, entry->hash); |
| + mtk_foe_entry_clear(eth->ppe, entry); |
| rhashtable_remove_fast(ð->flow_table, &entry->node, |
| mtk_flow_ht_params); |
| + if (entry->wed_index >= 0) |
| + mtk_wed_flow_remove(entry->wed_index); |
| kfree(entry); |
| |
| return 0; |
| @@ -406,7 +477,6 @@ static int |
| mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) |
| { |
| struct mtk_flow_entry *entry; |
| - int timestamp; |
| u32 idle; |
| |
| entry = rhashtable_lookup(ð->flow_table, &f->cookie, |
| @@ -414,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) |
| if (!entry) |
| return -ENOENT; |
| |
| - timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash); |
| - if (timestamp < 0) |
| - return -ETIMEDOUT; |
| - |
| - idle = mtk_eth_timestamp(eth) - timestamp; |
| + idle = mtk_foe_entry_idle_time(eth->ppe, entry); |
| f->stats.lastused = jiffies - idle * HZ; |
| |
| return 0; |
| @@ -470,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) |
| struct flow_block_cb *block_cb; |
| flow_setup_cb_t *cb; |
| |
| - if (!eth->ppe.foe_table) |
| + if (!eth->ppe || !eth->ppe->foe_table) |
| return -EOPNOTSUPP; |
| |
| if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
| @@ -511,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) |
| int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, |
| void *type_data) |
| { |
| - if (type == TC_SETUP_FT) |
| + switch (type) { |
| + case TC_SETUP_BLOCK: |
| + case TC_SETUP_FT: |
| return mtk_eth_setup_tc_block(dev, type_data); |
| - |
| - return -EOPNOTSUPP; |
| + default: |
| + return -EOPNOTSUPP; |
| + } |
| } |
| |
| int mtk_eth_offload_init(struct mtk_eth *eth) |
| { |
| - if (!eth->ppe.foe_table) |
| + if (!eth->ppe || !eth->ppe->foe_table) |
| return 0; |
| |
| return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c |
| new file mode 100644 |
| index 000000000..ea1cbdf1a |
| --- /dev/null |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed.c |
| @@ -0,0 +1,876 @@ |
| +// SPDX-License-Identifier: GPL-2.0-only |
| +/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ |
| + |
| +#include <linux/kernel.h> |
| +#include <linux/slab.h> |
| +#include <linux/module.h> |
| +#include <linux/bitfield.h> |
| +#include <linux/dma-mapping.h> |
| +#include <linux/skbuff.h> |
| +#include <linux/of_platform.h> |
| +#include <linux/of_address.h> |
| +#include <linux/mfd/syscon.h> |
| +#include <linux/debugfs.h> |
| +#include <linux/iopoll.h> |
| +#include <linux/soc/mediatek/mtk_wed.h> |
| +#include "mtk_eth_soc.h" |
| +#include "mtk_wed_regs.h" |
| +#include "mtk_wed.h" |
| +#include "mtk_ppe.h" |
| + |
| +#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) |
| + |
| +#define MTK_WED_PKT_SIZE 1900 |
| +#define MTK_WED_BUF_SIZE 2048 |
| +#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) |
| + |
| +#define MTK_WED_TX_RING_SIZE 2048 |
| +#define MTK_WED_WDMA_RING_SIZE 1024 |
| + |
| +static struct mtk_wed_hw *hw_list[2]; |
| +static DEFINE_MUTEX(hw_lock); |
| + |
| +static void |
| +wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) |
| +{ |
| + regmap_update_bits(dev->hw->regs, reg, mask | val, val); |
| +} |
| + |
| +static void |
| +wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| +{ |
| + return wed_m32(dev, reg, 0, mask); |
| +} |
| + |
| +static void |
| +wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| +{ |
| + return wed_m32(dev, reg, mask, 0); |
| +} |
| + |
| +static void |
| +wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) |
| +{ |
| + wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); |
| +} |
| + |
| +static void |
| +wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) |
| +{ |
| + wdma_m32(dev, reg, 0, mask); |
| +} |
| + |
| +static u32 |
| +mtk_wed_read_reset(struct mtk_wed_device *dev) |
| +{ |
| + return wed_r32(dev, MTK_WED_RESET); |
| +} |
| + |
| +static void |
| +mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) |
| +{ |
| + u32 status; |
| + |
| + wed_w32(dev, MTK_WED_RESET, mask); |
| + if (readx_poll_timeout(mtk_wed_read_reset, dev, status, |
| + !(status & mask), 0, 1000)) |
| + WARN_ON_ONCE(1); |
| +} |
| + |
| +static struct mtk_wed_hw * |
| +mtk_wed_assign(struct mtk_wed_device *dev) |
| +{ |
| + struct mtk_wed_hw *hw; |
| + |
| + hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; |
| + if (!hw || hw->wed_dev) |
| + return NULL; |
| + |
| + hw->wed_dev = dev; |
| + return hw; |
| +} |
| + |
| +static int |
| +mtk_wed_buffer_alloc(struct mtk_wed_device *dev) |
| +{ |
| + struct mtk_wdma_desc *desc; |
| + dma_addr_t desc_phys; |
| + void **page_list; |
| + int token = dev->wlan.token_start; |
| + int ring_size; |
| + int n_pages; |
| + int i, page_idx; |
| + |
| + ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); |
| + n_pages = ring_size / MTK_WED_BUF_PER_PAGE; |
| + |
| + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); |
| + if (!page_list) |
| + return -ENOMEM; |
| + |
| + dev->buf_ring.size = ring_size; |
| + dev->buf_ring.pages = page_list; |
| + |
| + desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), |
| + &desc_phys, GFP_KERNEL); |
| + if (!desc) |
| + return -ENOMEM; |
| + |
| + dev->buf_ring.desc = desc; |
| + dev->buf_ring.desc_phys = desc_phys; |
| + |
| + for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { |
| + dma_addr_t page_phys, buf_phys; |
| + struct page *page; |
| + void *buf; |
| + int s; |
| + |
| + page = __dev_alloc_pages(GFP_KERNEL, 0); |
| + if (!page) |
| + return -ENOMEM; |
| + |
| + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, |
| + DMA_BIDIRECTIONAL); |
| + if (dma_mapping_error(dev->hw->dev, page_phys)) { |
| + __free_page(page); |
| + return -ENOMEM; |
| + } |
| + |
| + page_list[page_idx++] = page; |
| + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, |
| + DMA_BIDIRECTIONAL); |
| + |
| + buf = page_to_virt(page); |
| + buf_phys = page_phys; |
| + |
| + for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { |
| + u32 txd_size; |
| + |
| + txd_size = dev->wlan.init_buf(buf, buf_phys, token++); |
| + |
| + desc->buf0 = buf_phys; |
| + desc->buf1 = buf_phys + txd_size; |
| + desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, |
| + txd_size) | |
| + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, |
| + MTK_WED_BUF_SIZE - txd_size) | |
| + MTK_WDMA_DESC_CTRL_LAST_SEG1; |
| + desc->info = 0; |
| + desc++; |
| + |
| + buf += MTK_WED_BUF_SIZE; |
| + buf_phys += MTK_WED_BUF_SIZE; |
| + } |
| + |
| + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, |
| + DMA_BIDIRECTIONAL); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +static void |
| +mtk_wed_free_buffer(struct mtk_wed_device *dev) |
| +{ |
| + struct mtk_wdma_desc *desc = dev->buf_ring.desc; |
| + void **page_list = dev->buf_ring.pages; |
| + int page_idx; |
| + int i; |
| + |
| + if (!page_list) |
| + return; |
| + |
| + if (!desc) |
| + goto free_pagelist; |
| + |
| + for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { |
| + void *page = page_list[page_idx++]; |
| + |
| + if (!page) |
| + break; |
| + |
| + dma_unmap_page(dev->hw->dev, desc[i].buf0, |
| + PAGE_SIZE, DMA_BIDIRECTIONAL); |
| + __free_page(page); |
| + } |
| + |
| + dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), |
| + desc, dev->buf_ring.desc_phys); |
| + |
| +free_pagelist: |
| + kfree(page_list); |
| +} |
| + |
| +static void |
| +mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) |
| +{ |
| + if (!ring->desc) |
| + return; |
| + |
| + dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc), |
| + ring->desc, ring->desc_phys); |
| +} |
| + |
| +static void |
| +mtk_wed_free_tx_rings(struct mtk_wed_device *dev) |
| +{ |
| + int i; |
| + |
| + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) |
| + mtk_wed_free_ring(dev, &dev->tx_ring[i]); |
| + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) |
| + mtk_wed_free_ring(dev, &dev->tx_wdma[i]); |
| +} |
| + |
| +static void |
| +mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) |
| +{ |
| + u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; |
| + |
| + if (!dev->hw->num_flows) |
| + mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; |
| + |
| + wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); |
| + wed_r32(dev, MTK_WED_EXT_INT_MASK); |
| +} |
| + |
| +static void |
| +mtk_wed_stop(struct mtk_wed_device *dev) |
| +{ |
| + regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); |
| + mtk_wed_set_ext_int(dev, false); |
| + |
| + wed_clr(dev, MTK_WED_CTRL, |
| + MTK_WED_CTRL_WDMA_INT_AGENT_EN | |
| + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | |
| + MTK_WED_CTRL_WED_TX_BM_EN | |
| + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
| + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); |
| + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); |
| + wdma_w32(dev, MTK_WDMA_INT_MASK, 0); |
| + wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); |
| + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); |
| + |
| + wed_clr(dev, MTK_WED_GLO_CFG, |
| + MTK_WED_GLO_CFG_TX_DMA_EN | |
| + MTK_WED_GLO_CFG_RX_DMA_EN); |
| + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, |
| + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | |
| + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); |
| + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, |
| + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); |
| +} |
| + |
| +static void |
| +mtk_wed_detach(struct mtk_wed_device *dev) |
| +{ |
| + struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node; |
| + struct mtk_wed_hw *hw = dev->hw; |
| + |
| + mutex_lock(&hw_lock); |
| + |
| + mtk_wed_stop(dev); |
| + |
| + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); |
| + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| + |
| + mtk_wed_reset(dev, MTK_WED_RESET_WED); |
| + |
| + mtk_wed_free_buffer(dev); |
| + mtk_wed_free_tx_rings(dev); |
| + |
| + if (of_dma_is_coherent(wlan_node)) |
| + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, |
| + BIT(hw->index), BIT(hw->index)); |
| + |
| + if (!hw_list[!hw->index]->wed_dev && |
| + hw->eth->dma_dev != hw->eth->dev) |
| + mtk_eth_set_dma_device(hw->eth, hw->eth->dev); |
| + |
| + memset(dev, 0, sizeof(*dev)); |
| + module_put(THIS_MODULE); |
| + |
| + hw->wed_dev = NULL; |
| + mutex_unlock(&hw_lock); |
| +} |
| + |
| +static void |
| +mtk_wed_hw_init_early(struct mtk_wed_device *dev) |
| +{ |
| + u32 mask, set; |
| + u32 offset; |
| + |
| + mtk_wed_stop(dev); |
| + mtk_wed_reset(dev, MTK_WED_RESET_WED); |
| + |
| + mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | |
| + MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | |
| + MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; |
| + set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | |
| + MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | |
| + MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; |
| + wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); |
| + |
| + wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES); |
| + |
| + offset = dev->hw->index ? 0x04000400 : 0; |
| + wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); |
| + wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); |
| + |
| + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index)); |
| + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); |
| +} |
| + |
| +static void |
| +mtk_wed_hw_init(struct mtk_wed_device *dev) |
| +{ |
| + if (dev->init_done) |
| + return; |
| + |
| + dev->init_done = true; |
| + mtk_wed_set_ext_int(dev, false); |
| + wed_w32(dev, MTK_WED_TX_BM_CTRL, |
| + MTK_WED_TX_BM_CTRL_PAUSE | |
| + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, |
| + dev->buf_ring.size / 128) | |
| + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, |
| + MTK_WED_TX_RING_SIZE / 256)); |
| + |
| + wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); |
| + |
| + wed_w32(dev, MTK_WED_TX_BM_TKID, |
| + FIELD_PREP(MTK_WED_TX_BM_TKID_START, |
| + dev->wlan.token_start) | |
| + FIELD_PREP(MTK_WED_TX_BM_TKID_END, |
| + dev->wlan.token_start + dev->wlan.nbuf - 1)); |
| + |
| + wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); |
| + |
| + wed_w32(dev, MTK_WED_TX_BM_DYN_THR, |
| + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | |
| + MTK_WED_TX_BM_DYN_THR_HI); |
| + |
| + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); |
| + |
| + wed_set(dev, MTK_WED_CTRL, |
| + MTK_WED_CTRL_WED_TX_BM_EN | |
| + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
| + |
| + wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); |
| +} |
| + |
| +static void |
| +mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size) |
| +{ |
| + int i; |
| + |
| + for (i = 0; i < size; i++) { |
| + desc[i].buf0 = 0; |
| + desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); |
| + desc[i].buf1 = 0; |
| + desc[i].info = 0; |
| + } |
| +} |
| + |
| +static u32 |
| +mtk_wed_check_busy(struct mtk_wed_device *dev) |
| +{ |
| + if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) |
| + return true; |
| + |
| + if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & |
| + MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) |
| + return true; |
| + |
| + if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) |
| + return true; |
| + |
| + if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & |
| + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) |
| + return true; |
| + |
| + if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & |
| + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) |
| + return true; |
| + |
| + if (wed_r32(dev, MTK_WED_CTRL) & |
| + (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) |
| + return true; |
| + |
| + return false; |
| +} |
| + |
| +static int |
| +mtk_wed_poll_busy(struct mtk_wed_device *dev) |
| +{ |
| + int sleep = 15000; |
| + int timeout = 100 * sleep; |
| + u32 val; |
| + |
| + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, |
| + timeout, false, dev); |
| +} |
| + |
| +static void |
| +mtk_wed_reset_dma(struct mtk_wed_device *dev) |
| +{ |
| + bool busy = false; |
| + u32 val; |
| + int i; |
| + |
| + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { |
| + struct mtk_wdma_desc *desc = dev->tx_ring[i].desc; |
| + |
| + if (!desc) |
| + continue; |
| + |
| + mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE); |
| + } |
| + |
| + if (mtk_wed_poll_busy(dev)) |
| + busy = mtk_wed_check_busy(dev); |
| + |
| + if (busy) { |
| + mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); |
| + } else { |
| + wed_w32(dev, MTK_WED_RESET_IDX, |
| + MTK_WED_RESET_IDX_TX | |
| + MTK_WED_RESET_IDX_RX); |
| + wed_w32(dev, MTK_WED_RESET_IDX, 0); |
| + } |
| + |
| + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); |
| + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); |
| + |
| + if (busy) { |
| + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); |
| + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); |
| + } else { |
| + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, |
| + MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); |
| + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); |
| + |
| + wed_set(dev, MTK_WED_WDMA_GLO_CFG, |
| + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); |
| + |
| + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, |
| + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); |
| + } |
| + |
| + for (i = 0; i < 100; i++) { |
| + val = wed_r32(dev, MTK_WED_TX_BM_INTF); |
| + if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) |
| + break; |
| + } |
| + |
| + mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); |
| + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); |
| + |
| + if (busy) { |
| + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); |
| + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); |
| + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); |
| + } else { |
| + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, |
| + MTK_WED_WPDMA_RESET_IDX_TX | |
| + MTK_WED_WPDMA_RESET_IDX_RX); |
| + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); |
| + } |
| + |
| +} |
| + |
| +static int |
| +mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, |
| + int size) |
| +{ |
| + ring->desc = dma_alloc_coherent(dev->hw->dev, |
| + size * sizeof(*ring->desc), |
| + &ring->desc_phys, GFP_KERNEL); |
| + if (!ring->desc) |
| + return -ENOMEM; |
| + |
| + ring->size = size; |
| + mtk_wed_ring_reset(ring->desc, size); |
| + |
| + return 0; |
| +} |
| + |
| +static int |
| +mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) |
| +{ |
| + struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; |
| + |
| + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE)) |
| + return -ENOMEM; |
| + |
| + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, |
| + wdma->desc_phys); |
| + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, |
| + size); |
| + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); |
| + |
| + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, |
| + wdma->desc_phys); |
| + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, |
| + size); |
| + |
| + return 0; |
| +} |
| + |
| +static void |
| +mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) |
| +{ |
| + u32 wdma_mask; |
| + u32 val; |
| + int i; |
| + |
| + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) |
| + if (!dev->tx_wdma[i].desc) |
| + mtk_wed_wdma_ring_setup(dev, i, 16); |
| + |
| + wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); |
| + |
| + mtk_wed_hw_init(dev); |
| + |
| + wed_set(dev, MTK_WED_CTRL, |
| + MTK_WED_CTRL_WDMA_INT_AGENT_EN | |
| + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | |
| + MTK_WED_CTRL_WED_TX_BM_EN | |
| + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
| + |
| + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); |
| + |
| + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, |
| + MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | |
| + MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); |
| + |
| + wed_set(dev, MTK_WED_WPDMA_INT_CTRL, |
| + MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); |
| + |
| + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); |
| + wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); |
| + |
| + wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); |
| + wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); |
| + |
| + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); |
| + wed_w32(dev, MTK_WED_INT_MASK, irq_mask); |
| + |
| + wed_set(dev, MTK_WED_GLO_CFG, |
| + MTK_WED_GLO_CFG_TX_DMA_EN | |
| + MTK_WED_GLO_CFG_RX_DMA_EN); |
| + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, |
| + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | |
| + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); |
| + wed_set(dev, MTK_WED_WDMA_GLO_CFG, |
| + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); |
| + |
| + mtk_wed_set_ext_int(dev, true); |
| + val = dev->wlan.wpdma_phys | |
| + MTK_PCIE_MIRROR_MAP_EN | |
| + FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); |
| + |
| + if (dev->hw->index) |
| + val |= BIT(1); |
| + val |= BIT(0); |
| + regmap_write(dev->hw->mirror, dev->hw->index * 4, val); |
| + |
| + dev->running = true; |
| +} |
| + |
| +static int |
| +mtk_wed_attach(struct mtk_wed_device *dev) |
| + __releases(RCU) |
| +{ |
| + struct mtk_wed_hw *hw; |
| + int ret = 0; |
| + |
| + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), |
| + "mtk_wed_attach without holding the RCU read lock"); |
| + |
| + if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 || |
| + !try_module_get(THIS_MODULE)) |
| + ret = -ENODEV; |
| + |
| + rcu_read_unlock(); |
| + |
| + if (ret) |
| + return ret; |
| + |
| + mutex_lock(&hw_lock); |
| + |
| + hw = mtk_wed_assign(dev); |
| + if (!hw) { |
| + module_put(THIS_MODULE); |
| + ret = -ENODEV; |
| + goto out; |
| + } |
| + |
| + dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index); |
| + |
| + dev->hw = hw; |
| + dev->dev = hw->dev; |
| + dev->irq = hw->irq; |
| + dev->wdma_idx = hw->index; |
| + |
| + if (hw->eth->dma_dev == hw->eth->dev && |
| + of_dma_is_coherent(hw->eth->dev->of_node)) |
| + mtk_eth_set_dma_device(hw->eth, hw->dev); |
| + |
| + ret = mtk_wed_buffer_alloc(dev); |
| + if (ret) { |
| + mtk_wed_detach(dev); |
| + goto out; |
| + } |
| + |
| + mtk_wed_hw_init_early(dev); |
| + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); |
| + |
| +out: |
| + mutex_unlock(&hw_lock); |
| + |
| + return ret; |
| +} |
| + |
| +static int |
| +mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) |
| +{ |
| + struct mtk_wed_ring *ring = &dev->tx_ring[idx]; |
| + |
| + /* |
| + * Tx ring redirection: |
| + * Instead of configuring the WLAN PDMA TX ring directly, the WLAN |
| + * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) |
| + * registers. |
| + * |
| + * WED driver posts its own DMA ring as WLAN PDMA TX and configures it |
| + * into MTK_WED_WPDMA_RING_TX(n) registers. |
| + * It gets filled with packets picked up from WED TX ring and from |
| + * WDMA RX. |
| + */ |
| + |
| + BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); |
| + |
| + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE)) |
| + return -ENOMEM; |
| + |
| + if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) |
| + return -ENOMEM; |
| + |
| + ring->reg_base = MTK_WED_RING_TX(idx); |
| + ring->wpdma = regs; |
| + |
| + /* WED -> WPDMA */ |
| + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); |
| + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); |
| + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); |
| + |
| + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, |
| + ring->desc_phys); |
| + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, |
| + MTK_WED_TX_RING_SIZE); |
| + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); |
| + |
| + return 0; |
| +} |
| + |
| +static int |
| +mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) |
| +{ |
| + struct mtk_wed_ring *ring = &dev->txfree_ring; |
| + int i; |
| + |
| + /* |
| + * For txfree event handling, the same DMA ring is shared between WED |
| + * and WLAN. The WLAN driver accesses the ring index registers through |
| + * WED |
| + */ |
| + ring->reg_base = MTK_WED_RING_RX(1); |
| + ring->wpdma = regs; |
| + |
| + for (i = 0; i < 12; i += 4) { |
| + u32 val = readl(regs + i); |
| + |
| + wed_w32(dev, MTK_WED_RING_RX(1) + i, val); |
| + wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +static u32 |
| +mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) |
| +{ |
| + u32 val; |
| + |
| + val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); |
| + wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); |
| + val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK; |
| + if (!dev->hw->num_flows) |
| + val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; |
| + if (val && net_ratelimit()) |
| + pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); |
| + |
| + val = wed_r32(dev, MTK_WED_INT_STATUS); |
| + val &= mask; |
| + wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ |
| + |
| + return val; |
| +} |
| + |
| +static void |
| +mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) |
| +{ |
| + if (!dev->running) |
| + return; |
| + |
| + mtk_wed_set_ext_int(dev, !!mask); |
| + wed_w32(dev, MTK_WED_INT_MASK, mask); |
| +} |
| + |
| +int mtk_wed_flow_add(int index) |
| +{ |
| + struct mtk_wed_hw *hw = hw_list[index]; |
| + int ret; |
| + |
| + if (!hw || !hw->wed_dev) |
| + return -ENODEV; |
| + |
| + if (hw->num_flows) { |
| + hw->num_flows++; |
| + return 0; |
| + } |
| + |
| + mutex_lock(&hw_lock); |
| + if (!hw->wed_dev) { |
| + ret = -ENODEV; |
| + goto out; |
| + } |
| + |
| + ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); |
| + if (!ret) |
| + hw->num_flows++; |
| + mtk_wed_set_ext_int(hw->wed_dev, true); |
| + |
| +out: |
| + mutex_unlock(&hw_lock); |
| + |
| + return ret; |
| +} |
| + |
| +void mtk_wed_flow_remove(int index) |
| +{ |
| + struct mtk_wed_hw *hw = hw_list[index]; |
| + |
| + if (!hw) |
| + return; |
| + |
| + if (--hw->num_flows) |
| + return; |
| + |
| + mutex_lock(&hw_lock); |
| + if (!hw->wed_dev) |
| + goto out; |
| + |
| + hw->wed_dev->wlan.offload_disable(hw->wed_dev); |
| + mtk_wed_set_ext_int(hw->wed_dev, true); |
| + |
| +out: |
| + mutex_unlock(&hw_lock); |
| +} |
| + |
| +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, |
| + void __iomem *wdma, int index) |
| +{ |
| + static const struct mtk_wed_ops wed_ops = { |
| + .attach = mtk_wed_attach, |
| + .tx_ring_setup = mtk_wed_tx_ring_setup, |
| + .txfree_ring_setup = mtk_wed_txfree_ring_setup, |
| + .start = mtk_wed_start, |
| + .stop = mtk_wed_stop, |
| + .reset_dma = mtk_wed_reset_dma, |
| + .reg_read = wed_r32, |
| + .reg_write = wed_w32, |
| + .irq_get = mtk_wed_irq_get, |
| + .irq_set_mask = mtk_wed_irq_set_mask, |
| + .detach = mtk_wed_detach, |
| + }; |
| + struct device_node *eth_np = eth->dev->of_node; |
| + struct platform_device *pdev; |
| + struct mtk_wed_hw *hw; |
| + struct regmap *regs; |
| + int irq; |
| + |
| + if (!np) |
| + return; |
| + |
| + pdev = of_find_device_by_node(np); |
| + if (!pdev) |
| + return; |
| + |
| + get_device(&pdev->dev); |
| + irq = platform_get_irq(pdev, 0); |
| + if (irq < 0) |
| + return; |
| + |
| + regs = syscon_regmap_lookup_by_phandle(np, NULL); |
| + if (!regs) |
| + return; |
| + |
| + rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); |
| + |
| + mutex_lock(&hw_lock); |
| + |
| + if (WARN_ON(hw_list[index])) |
| + goto unlock; |
| + |
| + hw = kzalloc(sizeof(*hw), GFP_KERNEL); |
| + hw->node = np; |
| + hw->regs = regs; |
| + hw->eth = eth; |
| + hw->dev = &pdev->dev; |
| + hw->wdma = wdma; |
| + hw->index = index; |
| + hw->irq = irq; |
| + hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, |
| + "mediatek,pcie-mirror"); |
| + hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, |
| + "mediatek,hifsys"); |
| + if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { |
| + kfree(hw); |
| + goto unlock; |
| + } |
| + |
| + if (!index) { |
| + regmap_write(hw->mirror, 0, 0); |
| + regmap_write(hw->mirror, 4, 0); |
| + } |
| + mtk_wed_hw_add_debugfs(hw); |
| + |
| + hw_list[index] = hw; |
| + |
| +unlock: |
| + mutex_unlock(&hw_lock); |
| +} |
| + |
| +void mtk_wed_exit(void) |
| +{ |
| + int i; |
| + |
| + rcu_assign_pointer(mtk_soc_wed_ops, NULL); |
| + |
| + synchronize_rcu(); |
| + |
| + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { |
| + struct mtk_wed_hw *hw; |
| + |
| + hw = hw_list[i]; |
| + if (!hw) |
| + continue; |
| + |
| + hw_list[i] = NULL; |
| + debugfs_remove(hw->debugfs_dir); |
| + put_device(hw->dev); |
| + kfree(hw); |
| + } |
| +} |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h |
| new file mode 100644 |
| index 000000000..981ec613f |
| --- /dev/null |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed.h |
| @@ -0,0 +1,135 @@ |
| +// SPDX-License-Identifier: GPL-2.0-only |
| +/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ |
| + |
| +#ifndef __MTK_WED_PRIV_H |
| +#define __MTK_WED_PRIV_H |
| + |
| +#include <linux/soc/mediatek/mtk_wed.h> |
| +#include <linux/debugfs.h> |
| +#include <linux/regmap.h> |
| +#include <linux/netdevice.h> |
| + |
| +struct mtk_eth; |
| + |
| +struct mtk_wed_hw { |
| + struct device_node *node; |
| + struct mtk_eth *eth; |
| + struct regmap *regs; |
| + struct regmap *hifsys; |
| + struct device *dev; |
| + void __iomem *wdma; |
| + struct regmap *mirror; |
| + struct dentry *debugfs_dir; |
| + struct mtk_wed_device *wed_dev; |
| + u32 debugfs_reg; |
| + u32 num_flows; |
| + char dirname[5]; |
| + int irq; |
| + int index; |
| +}; |
| + |
| +struct mtk_wdma_info { |
| + u8 wdma_idx; |
| + u8 queue; |
| + u16 wcid; |
| + u8 bss; |
| +}; |
| + |
| +#ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| +static inline void |
| +wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) |
| +{ |
| + regmap_write(dev->hw->regs, reg, val); |
| +} |
| + |
| +static inline u32 |
| +wed_r32(struct mtk_wed_device *dev, u32 reg) |
| +{ |
| + unsigned int val; |
| + |
| + regmap_read(dev->hw->regs, reg, &val); |
| + |
| + return val; |
| +} |
| + |
| +static inline void |
| +wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val) |
| +{ |
| + writel(val, dev->hw->wdma + reg); |
| +} |
| + |
| +static inline u32 |
| +wdma_r32(struct mtk_wed_device *dev, u32 reg) |
| +{ |
| + return readl(dev->hw->wdma + reg); |
| +} |
| + |
| +static inline u32 |
| +wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg) |
| +{ |
| + if (!dev->tx_ring[ring].wpdma) |
| + return 0; |
| + |
| + return readl(dev->tx_ring[ring].wpdma + reg); |
| +} |
| + |
| +static inline void |
| +wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) |
| +{ |
| + if (!dev->tx_ring[ring].wpdma) |
| + return; |
| + |
| + writel(val, dev->tx_ring[ring].wpdma + reg); |
| +} |
| + |
| +static inline u32 |
| +wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) |
| +{ |
| + if (!dev->txfree_ring.wpdma) |
| + return 0; |
| + |
| + return readl(dev->txfree_ring.wpdma + reg); |
| +} |
| + |
| +static inline void |
| +wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) |
| +{ |
| + if (!dev->txfree_ring.wpdma) |
| + return; |
| + |
| + writel(val, dev->txfree_ring.wpdma + reg); |
| +} |
| + |
| +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, |
| + void __iomem *wdma, int index); |
| +void mtk_wed_exit(void); |
| +int mtk_wed_flow_add(int index); |
| +void mtk_wed_flow_remove(int index); |
| +#else |
| +static inline void |
| +mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, |
| + void __iomem *wdma, int index) |
| +{ |
| +} |
| +static inline void |
| +mtk_wed_exit(void) |
| +{ |
| +} |
| +static inline int mtk_wed_flow_add(int index) |
| +{ |
| + return -EINVAL; |
| +} |
| +static inline void mtk_wed_flow_remove(int index) |
| +{ |
| +} |
| +#endif |
| + |
| +#ifdef CONFIG_DEBUG_FS |
| +void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw); |
| +#else |
| +static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) |
| +{ |
| +} |
| +#endif |
| + |
| +#endif |
| diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c |
| new file mode 100644 |
| index 000000000..a81d3fd1a |
| --- /dev/null |
| +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c |
| @@ -0,0 +1,175 @@ |
| +// SPDX-License-Identifier: GPL-2.0-only |
| +/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ |
| + |
| +#include <linux/seq_file.h> |
| +#include "mtk_wed.h" |
| +#include "mtk_wed_regs.h" |
| + |
| +struct reg_dump { |
| + const char *name; |
| + u16 offset; |
| + u8 type; |
| + u8 base; |
| +}; |
| + |
| +enum { |
| + DUMP_TYPE_STRING, |
| + DUMP_TYPE_WED, |
| + DUMP_TYPE_WDMA, |
| + DUMP_TYPE_WPDMA_TX, |
| + DUMP_TYPE_WPDMA_TXFREE, |
| +}; |
| + |
| +#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } |
| +#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ } |
| +#define DUMP_RING(_prefix, _base, ...) \ |
| + { _prefix " BASE", _base, __VA_ARGS__ }, \ |
| + { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \ |
| + { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \ |
| + { _prefix " DIDX", _base + 0xc, __VA_ARGS__ } |
| + |
| +#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED) |
| +#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED) |
| + |
| +#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA) |
| +#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA) |
| + |
| +#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) |
| +#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) |
| + |
| +static void |
| +print_reg_val(struct seq_file *s, const char *name, u32 val) |
| +{ |
| + seq_printf(s, "%-32s %08x\n", name, val); |
| +} |
| + |
| +static void |
| +dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, |
| + const struct reg_dump *regs, int n_regs) |
| +{ |
| + const struct reg_dump *cur; |
| + u32 val; |
| + |
| + for (cur = regs; cur < ®s[n_regs]; cur++) { |
| + switch (cur->type) { |
| + case DUMP_TYPE_STRING: |
| + seq_printf(s, "%s======== %s:\n", |
| + cur > regs ? "\n" : "", |
| + cur->name); |
| + continue; |
| + case DUMP_TYPE_WED: |
| + val = wed_r32(dev, cur->offset); |
| + break; |
| + case DUMP_TYPE_WDMA: |
| + val = wdma_r32(dev, cur->offset); |
| + break; |
| + case DUMP_TYPE_WPDMA_TX: |
| + val = wpdma_tx_r32(dev, cur->base, cur->offset); |
| + break; |
| + case DUMP_TYPE_WPDMA_TXFREE: |
| + val = wpdma_txfree_r32(dev, cur->offset); |
| + break; |
| + } |
| + print_reg_val(s, cur->name, val); |
| + } |
| +} |
| + |
| + |
| +static int |
| +wed_txinfo_show(struct seq_file *s, void *data) |
| +{ |
| + static const struct reg_dump regs[] = { |
| + DUMP_STR("WED TX"), |
| + DUMP_WED(WED_TX_MIB(0)), |
| + DUMP_WED_RING(WED_RING_TX(0)), |
| + |
| + DUMP_WED(WED_TX_MIB(1)), |
| + DUMP_WED_RING(WED_RING_TX(1)), |
| + |
| + DUMP_STR("WPDMA TX"), |
| + DUMP_WED(WED_WPDMA_TX_MIB(0)), |
| + DUMP_WED_RING(WED_WPDMA_RING_TX(0)), |
| + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)), |
| + |
| + DUMP_WED(WED_WPDMA_TX_MIB(1)), |
| + DUMP_WED_RING(WED_WPDMA_RING_TX(1)), |
| + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)), |
| + |
| + DUMP_STR("WPDMA TX"), |
| + DUMP_WPDMA_TX_RING(0), |
| + DUMP_WPDMA_TX_RING(1), |
| + |
| + DUMP_STR("WED WDMA RX"), |
| + DUMP_WED(WED_WDMA_RX_MIB(0)), |
| + DUMP_WED_RING(WED_WDMA_RING_RX(0)), |
| + DUMP_WED(WED_WDMA_RX_THRES(0)), |
| + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)), |
| + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)), |
| + |
| + DUMP_WED(WED_WDMA_RX_MIB(1)), |
| + DUMP_WED_RING(WED_WDMA_RING_RX(1)), |
| + DUMP_WED(WED_WDMA_RX_THRES(1)), |
| + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)), |
| + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)), |
| + |
| + DUMP_STR("WDMA RX"), |
| + DUMP_WDMA(WDMA_GLO_CFG), |
| + DUMP_WDMA_RING(WDMA_RING_RX(0)), |
| + DUMP_WDMA_RING(WDMA_RING_RX(1)), |
| + }; |
| + struct mtk_wed_hw *hw = s->private; |
| + struct mtk_wed_device *dev = hw->wed_dev; |
| + |
| + if (!dev) |
| + return 0; |
| + |
| + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); |
| + |
| + return 0; |
| +} |
| +DEFINE_SHOW_ATTRIBUTE(wed_txinfo); |
| + |
|