[][kernel][common][eth][Add support for coherent DMA]

[Description]
Add support for coherent DMA.

It improves performance by eliminating the need for a cache flush
on rx and tx.
In preparation for supporting WED (Wireless Ethernet Dispatch),
also add a function for disabling coherent DMA at runtime.

This modification refers to Linux upstream patch which came from
Felix Fietkau.
  - https://patchwork.kernel.org/project/linux-mediatek/patch/
20220225101811.72103-2-nbd@nbd.name/

[Release-log]


Change-Id: I1dc9ec636ee0c780d8e8bd59673d36dd6bf1cbdb
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/7214683
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 9836e99..2ae369f 100755
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -9,6 +9,7 @@
 #include <linux/of_device.h>
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
+#include <linux/of_address.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 #include <linux/clk.h>
@@ -1382,7 +1383,7 @@
 	int i;
 
 	if (!eth->soc->has_sram) {
-		eth->scratch_ring = dma_alloc_coherent(eth->dev,
+		eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
 					       cnt * soc->txrx.txd_size,
 					       &eth->phy_scratch_ring,
 					       GFP_KERNEL);
@@ -1400,10 +1401,10 @@
 	if (unlikely(!eth->scratch_head))
 		return -ENOMEM;
 
-	dma_addr = dma_map_single(eth->dev,
+	dma_addr = dma_map_single(eth->dma_dev,
 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
 				  DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
 		return -ENOMEM;
 
 	phy_ring_tail = eth->phy_scratch_ring +
@@ -1467,26 +1468,26 @@
 {
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
-			dma_unmap_single(eth->dev,
+			dma_unmap_single(eth->dma_dev,
 					 dma_unmap_addr(tx_buf, dma_addr0),
 					 dma_unmap_len(tx_buf, dma_len0),
 					 DMA_TO_DEVICE);
 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
-			dma_unmap_page(eth->dev,
+			dma_unmap_page(eth->dma_dev,
 				       dma_unmap_addr(tx_buf, dma_addr0),
 				       dma_unmap_len(tx_buf, dma_len0),
 				       DMA_TO_DEVICE);
 		}
 	} else {
 		if (dma_unmap_len(tx_buf, dma_len0)) {
-			dma_unmap_page(eth->dev,
+			dma_unmap_page(eth->dma_dev,
 				       dma_unmap_addr(tx_buf, dma_addr0),
 				       dma_unmap_len(tx_buf, dma_len0),
 				       DMA_TO_DEVICE);
 		}
 
 		if (dma_unmap_len(tx_buf, dma_len1)) {
-			dma_unmap_page(eth->dev,
+			dma_unmap_page(eth->dma_dev,
 				       dma_unmap_addr(tx_buf, dma_addr1),
 				       dma_unmap_len(tx_buf, dma_len1),
 				       DMA_TO_DEVICE);
@@ -1727,9 +1728,9 @@
 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
 	memset(itx_buf, 0, sizeof(*itx_buf));
 
-	txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
+	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
 				       DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
+	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
 		return -ENOMEM;
 
 	mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
@@ -1770,10 +1771,11 @@
 			txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
 					!(frag_size - txd_info.size);
-			txd_info.addr = skb_frag_dma_map(eth->dev, frag,
+			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
 							 offset, txd_info.size,
 							 DMA_TO_DEVICE);
-			if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
+			if (unlikely(dma_mapping_error(eth->dma_dev,
+						       txd_info.addr)))
  				goto err_dma;
 
 			mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
@@ -2060,12 +2062,12 @@
 			netdev->stats.rx_dropped++;
 			goto release_desc;
 		}
-		dma_addr = dma_map_single(eth->dev,
+		dma_addr = dma_map_single(eth->dma_dev,
 					  new_data + NET_SKB_PAD +
 					  eth->ip_align,
 					  ring->buf_size,
 					  DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
+		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
 			skb_free_frag(new_data);
 			netdev->stats.rx_dropped++;
 			goto release_desc;
@@ -2074,7 +2076,7 @@
 		addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
 			  ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
 
-		dma_unmap_single(eth->dev,
+		dma_unmap_single(eth->dma_dev,
 				 (u64)(trxd.rxd1 | addr64),
 				 ring->buf_size, DMA_FROM_DEVICE);
 
@@ -2399,7 +2401,7 @@
 		goto no_tx_mem;
 
 	if (!eth->soc->has_sram)
-		ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+		ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
 					       &ring->phys, GFP_KERNEL);
 	else {
 		ring->dma =  eth->scratch_ring + MTK_DMA_SIZE * sz;
@@ -2433,7 +2435,8 @@
 	 * descriptors in ring->dma_pdma.
 	 */
 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
-		ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev,
+						    MTK_DMA_SIZE * sz,
 						    &ring->phys_pdma, GFP_KERNEL);
 		if (!ring->dma_pdma)
 			goto no_tx_mem;
@@ -2494,14 +2497,14 @@
 	}
 
 	if (!eth->soc->has_sram && ring->dma) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  MTK_DMA_SIZE * soc->txrx.txd_size,
 				  ring->dma, ring->phys);
 		ring->dma = NULL;
 	}
 
 	if (ring->dma_pdma) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  MTK_DMA_SIZE * soc->txrx.txd_size,
 				  ring->dma_pdma, ring->phys_pdma);
 		ring->dma_pdma = NULL;
@@ -2547,7 +2550,7 @@
 
 	if ((!eth->soc->has_sram) || (eth->soc->has_sram
 				&& (rx_flag != MTK_RX_FLAGS_NORMAL)))
-		ring->dma = dma_alloc_coherent(eth->dev,
+		ring->dma = dma_alloc_coherent(eth->dma_dev,
 					       rx_dma_size * eth->soc->txrx.rxd_size,
 					       &ring->phys, GFP_KERNEL);
 	else {
@@ -2564,11 +2567,11 @@
 	for (i = 0; i < rx_dma_size; i++) {
 		struct mtk_rx_dma_v2 *rxd;
 
-		dma_addr_t dma_addr = dma_map_single(eth->dev,
+		dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
 				ring->data[i] + NET_SKB_PAD + eth->ip_align,
 				ring->buf_size,
 				DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
 			return -ENOMEM;
 
 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
@@ -2648,7 +2651,7 @@
 					       MTK_8GB_ADDRESSING)) ?
 				  ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
 
-			dma_unmap_single(eth->dev,
+			dma_unmap_single(eth->dma_dev,
 					 (u64)(rxd->rxd1 | addr64),
 					 ring->buf_size,
 					 DMA_FROM_DEVICE);
@@ -2662,7 +2665,7 @@
 		return;
 
 	if (ring->dma) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  ring->dma_size * eth->soc->txrx.rxd_size,
 				  ring->dma,
 				  ring->phys);
@@ -3149,7 +3152,7 @@
 		if (eth->netdev[i])
 			netdev_reset_queue(eth->netdev[i]);
 	if ( !eth->soc->has_sram && eth->scratch_ring) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  MTK_DMA_SIZE * soc->txrx.txd_size,
 				  eth->scratch_ring, eth->phy_scratch_ring);
 		eth->scratch_ring = NULL;
@@ -3603,6 +3606,8 @@
 
 static int mtk_hw_init(struct mtk_eth *eth, u32 type)
 {
+	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+		       ETHSYS_DMA_AG_MAP_PPE;
 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
 	int i, ret = 0;
 	u32 val;
@@ -3622,6 +3627,11 @@
 			goto err_disable_pm;
 	}
 
+	if (eth->ethsys)
+		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
+				   of_dma_is_coherent(eth->dma_dev->of_node) *
+				   dma_mask);
+
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
 		ret = device_reset(eth->dev);
 		if (ret) {
@@ -4446,6 +4456,35 @@
 	return err;
 }
 
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
+{
+	struct net_device *dev, *tmp;
+	LIST_HEAD(dev_list);
+	int i;
+
+	rtnl_lock();
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		dev = eth->netdev[i];
+
+		if (!dev || !(dev->flags & IFF_UP))
+			continue;
+
+		list_add_tail(&dev->close_list, &dev_list);
+	}
+
+	dev_close_many(&dev_list, false);
+
+	eth->dma_dev = dma_dev;
+
+	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
+		list_del_init(&dev->close_list);
+		dev_open(dev, NULL);
+	}
+
+	rtnl_unlock();
+}
+
 static int mtk_probe(struct platform_device *pdev)
 {
 	struct device_node *mac_np;
@@ -4459,6 +4498,7 @@
 	eth->soc = of_device_get_match_data(&pdev->dev);
 
 	eth->dev = &pdev->dev;
+	eth->dma_dev = &pdev->dev;
 	eth->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(eth->base))
 		return PTR_ERR(eth->base);
@@ -4515,6 +4555,16 @@
 		}
 	}
 
+	if (of_dma_is_coherent(pdev->dev.of_node)) {
+		struct regmap *cci;
+
+		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+						      "cci-control-port");
+		/* enable CPU/bus coherency */
+		if (!IS_ERR(cci))
+			regmap_write(cci, 0, 3);
+	}
+
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
 		eth->xgmii = devm_kzalloc(eth->dev, sizeof(*eth->xgmii),
 					  GFP_KERNEL);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 0683ad1..e1fe8a3 100755
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -792,6 +792,11 @@
 /* ethernet reset check idle register */
 #define ETHSYS_FE_RST_CHK_IDLE_EN 	0x28
 
+/* ethernet dma channel agent map */
+#define ETHSYS_DMA_AG_MAP	0x408
+#define ETHSYS_DMA_AG_MAP_PDMA	BIT(0)
+#define ETHSYS_DMA_AG_MAP_QDMA	BIT(1)
+#define ETHSYS_DMA_AG_MAP_PPE	BIT(2)
 
 /* SGMII subsystem config registers */
 /* Register to auto-negotiation restart */
@@ -1624,6 +1629,7 @@
 /* struct mtk_eth -	This is the main datasructure for holding the state
  *			of the driver
  * @dev:		The device pointer
+ * @dma_dev:		The device pointer used for dma mapping/alloc
  * @base:		The mapped register i/o base
  * @page_lock:		Make sure that register operations are atomic
  * @tx_irq__lock:	Make sure that IRQ register operations are atomic
@@ -1658,6 +1664,7 @@
 
 struct mtk_eth {
 	struct device			*dev;
+	struct device			*dma_dev;
 	void __iomem			*base;
 	void __iomem			*sram_base;
 	spinlock_t			page_lock;
@@ -1766,4 +1773,6 @@
 void mtk_usxgmii_setup_phya_an_10000(struct mtk_xgmii *ss, int mac_id);
 void mtk_usxgmii_reset(struct mtk_xgmii *ss, int mac_id);
 int mtk_dump_usxgmii(struct regmap *pmap, char *name, u32 offset, u32 range);
+
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
 #endif /* MTK_ETH_H */