blob: af32459593e0871e6d1396a4ecfd7d435f738d41 [file] [log] [blame]
developer740bee82023-10-16 10:58:43 +08001From 659d8d088ee856cbc7598a26a307fd3e20a70e8e Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Mon, 18 Sep 2023 13:23:56 +0800
4Subject: [PATCH 22/22] mtk: wed: add dma mask limitation and GFP_DMA32 for
5 board >= 4GB dram
developer7ebbd872023-08-18 16:54:47 +08006
developer7ebbd872023-08-18 16:54:47 +08007---
8 drivers/net/ethernet/mediatek/mtk_wed.c | 8 ++++++--
9 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 4 ++--
10 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 4 ++--
11 drivers/net/ethernet/mediatek/mtk_wed_wo.h | 1 +
12 4 files changed, 11 insertions(+), 6 deletions(-)
13
14diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
developer740bee82023-10-16 10:58:43 +080015index 0d101d5..2ec7148 100644
developer7ebbd872023-08-18 16:54:47 +080016--- a/drivers/net/ethernet/mediatek/mtk_wed.c
17+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
18@@ -472,7 +472,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
19 void *buf;
20 int s;
21
22- page = __dev_alloc_pages(GFP_KERNEL, 0);
23+ page = __dev_alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
24 if (!page)
25 return -ENOMEM;
26
27@@ -636,7 +636,7 @@ mtk_wed_rx_page_buffer_alloc(struct mtk_wed_device *dev)
28 void *buf;
29 int s;
30
31- page = __dev_alloc_pages(GFP_KERNEL, 0);
32+ page = __dev_alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
33 if (!page)
34 return -ENOMEM;
35
developer740bee82023-10-16 10:58:43 +080036@@ -2249,6 +2249,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
developer7ebbd872023-08-18 16:54:47 +080037 dev->wdma_idx = hw->index;
38 dev->ver = hw->version;
39
40+ ret = dma_set_mask_and_coherent(hw->dev, DMA_BIT_MASK(32));
41+ if (ret)
42+ return ret;
43+
44 if (dev->hw->version == 3)
45 dev->hw->pci_base = mtk_wed_get_pci_base(dev);
46
47diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
48index 055594d..4ed1548 100644
49--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
50+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
51@@ -131,7 +131,7 @@ int mtk_wed_exception_init(struct mtk_wed_wo *wo)
52 }req;
53
54 exp->log_size = EXCEPTION_LOG_SIZE;
55- exp->log = kmalloc(exp->log_size, GFP_ATOMIC);
56+ exp->log = page_frag_alloc(&wo->page, exp->log_size, GFP_ATOMIC | GFP_DMA32);
57 if (!exp->log)
58 return -ENOMEM;
59
60@@ -151,7 +151,7 @@ int mtk_wed_exception_init(struct mtk_wed_wo *wo)
61 &req, sizeof(req), false);
62
63 free:
64- kfree(exp->log);
65+ skb_free_frag(exp->log);
66 return -ENOMEM;
67 }
68
69diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
70index 54b7787..e991d20 100644
71--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
72+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
73@@ -88,7 +88,7 @@ woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool rx)
74 page = &q->rx_page;
75
76 while (q->queued < q->ndesc) {
77- buf = page_frag_alloc(page, len, GFP_ATOMIC);
78+ buf = page_frag_alloc(page, len, GFP_ATOMIC | GFP_DMA32);
79 if (!buf)
80 break;
81
82@@ -555,7 +555,7 @@ void mtk_wed_wo_exit(struct mtk_wed_hw *hw)
83
84 if (wo->exp.log) {
85 dma_unmap_single(wo->hw->dev, wo->exp.phys, wo->exp.log_size, DMA_FROM_DEVICE);
86- kfree(wo->exp.log);
87+ skb_free_frag(wo->exp.log);
88 }
89
90 wo->hw = NULL;
91diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
92index 548b38e..3fd1f3f 100644
93--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
94+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
95@@ -193,6 +193,7 @@ struct mtk_wed_wo {
96 const struct wed_wo_drv_ops *drv_ops;
97 const struct wed_wo_mcu_ops *mcu_ops;
98 const struct wed_wo_queue_ops *queue_ops;
99+ struct page_frag_cache page;
100
101 struct net_device napi_dev;
102 spinlock_t rx_lock;
103--
1042.18.0
105