blob: b48ad31a536630c9d48bd215613594f8a0c7ef78 [file] [log] [blame]
developer617abbd2024-04-23 14:50:01 +08001From 4bf3625251b6086c3b57cbef75aa3ef6f3706fe3 Mon Sep 17 00:00:00 2001
2From: "sujuan.chen" <sujuan.chen@mediatek.com>
3Date: Thu, 20 Jul 2023 10:25:50 +0800
4Subject: [PATCH 065/116] mtk: wifi: mt76: mt7996: add dma mask limitation
5
6Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com>
7---
8 dma.c | 4 ++--
9 wed.c | 4 ++--
10 2 files changed, 4 insertions(+), 4 deletions(-)
11
12diff --git a/dma.c b/dma.c
13index 1021b3e5d..da21f6410 100644
14--- a/dma.c
15+++ b/dma.c
16@@ -488,7 +488,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
17 } else {
18 struct mt76_queue_buf qbuf;
19
20- buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
21+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC | GFP_DMA32);
22 if (!buf)
23 return NULL;
24
25@@ -711,7 +711,7 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
26 if (mt76_queue_is_wed_rro_ind(q))
27 goto done;
28
29- buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
30+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC | GFP_DMA32);
31 if (!buf)
32 break;
33
34diff --git a/wed.c b/wed.c
35index 0a0b5c05c..1c6d53c84 100644
36--- a/wed.c
37+++ b/wed.c
38@@ -65,14 +65,14 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
39 for (i = 0; i < size; i++) {
40 struct mt76_rxwi_cache *r = mt76_get_rxwi(dev);
41 dma_addr_t addr;
42- struct page *page;
43 int token;
44 void *ptr;
45
46 if (!r)
47 goto unmap;
48
49- ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length, GFP_ATOMIC);
50+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length,
51+ GFP_ATOMIC | GFP_DMA32);
52 if (!ptr) {
53 mt76_put_rxwi(dev, r);
54 goto unmap;
55--
562.39.2
57