blob: e27c903748e03c2535d0ef7316155da35c6937a1 [file] [log] [blame]
developer26d5f412023-08-15 15:04:02 +08001From 57294c4562d1f93ee2deaef67a2ad15fb925c288 Mon Sep 17 00:00:00 2001
2From: "sujuan.chen" <sujuan.chen@mediatek.com>
3Date: Wed, 19 Jul 2023 17:22:59 +0800
4Subject: [PATCH] mtk: wed: add dma mask limitation and GFP_DMA32 for board w/
5 >= 4GB dram
6
7Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com>
8---
9 drivers/net/ethernet/mediatek/mtk_wed.c | 8 ++++++--
10 drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 4 ++--
11 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 4 ++--
12 drivers/net/ethernet/mediatek/mtk_wed_wo.h | 1 +
13 4 files changed, 11 insertions(+), 6 deletions(-)
14
15diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
16index 45dfa0c..d4d8658 100644
17--- a/drivers/net/ethernet/mediatek/mtk_wed.c
18+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
19@@ -472,7 +472,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
20 void *buf;
21 int s;
22
23- page = __dev_alloc_pages(GFP_KERNEL, 0);
24+ page = __dev_alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
25 if (!page)
26 return -ENOMEM;
27
28@@ -636,7 +636,7 @@ mtk_wed_rx_page_buffer_alloc(struct mtk_wed_device *dev)
29 void *buf;
30 int s;
31
32- page = __dev_alloc_pages(GFP_KERNEL, 0);
33+ page = __dev_alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
34 if (!page)
35 return -ENOMEM;
36
37@@ -2239,6 +2239,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
38 dev->wdma_idx = hw->index;
39 dev->ver = hw->version;
40
41+ ret = dma_set_mask_and_coherent(hw->dev, DMA_BIT_MASK(32));
42+ if (ret)
43+ return ret;
44+
45 if (dev->hw->version == 3)
46 dev->hw->pci_base = mtk_wed_get_pci_base(dev);
47
48diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
49index 055594d..4ed1548 100644
50--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
51+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
52@@ -131,7 +131,7 @@ int mtk_wed_exception_init(struct mtk_wed_wo *wo)
53 }req;
54
55 exp->log_size = EXCEPTION_LOG_SIZE;
56- exp->log = kmalloc(exp->log_size, GFP_ATOMIC);
57+ exp->log = page_frag_alloc(&wo->page, exp->log_size, GFP_ATOMIC | GFP_DMA32);
58 if (!exp->log)
59 return -ENOMEM;
60
61@@ -151,7 +151,7 @@ int mtk_wed_exception_init(struct mtk_wed_wo *wo)
62 &req, sizeof(req), false);
63
64 free:
65- kfree(exp->log);
66+ skb_free_frag(exp->log);
67 return -ENOMEM;
68 }
69
70diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
71index 54b7787..e991d20 100644
72--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
73+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
74@@ -88,7 +88,7 @@ woif_q_rx_fill(struct mtk_wed_wo *wo, struct wed_wo_queue *q, bool rx)
75 page = &q->rx_page;
76
77 while (q->queued < q->ndesc) {
78- buf = page_frag_alloc(page, len, GFP_ATOMIC);
79+ buf = page_frag_alloc(page, len, GFP_ATOMIC | GFP_DMA32);
80 if (!buf)
81 break;
82
83@@ -555,7 +555,7 @@ void mtk_wed_wo_exit(struct mtk_wed_hw *hw)
84
85 if (wo->exp.log) {
86 dma_unmap_single(wo->hw->dev, wo->exp.phys, wo->exp.log_size, DMA_FROM_DEVICE);
87- kfree(wo->exp.log);
88+ skb_free_frag(wo->exp.log);
89 }
90
91 wo->hw = NULL;
92diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
93index 548b38e..3fd1f3f 100644
94--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
95+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
96@@ -193,6 +193,7 @@ struct mtk_wed_wo {
97 const struct wed_wo_drv_ops *drv_ops;
98 const struct wed_wo_mcu_ops *mcu_ops;
99 const struct wed_wo_queue_ops *queue_ops;
100+ struct page_frag_cache page;
101
102 struct net_device napi_dev;
103 spinlock_t rx_lock;
104--
1052.18.0
106