blob: 648cf4fa83719795fadd64ca5704940894787bbf [file] [log] [blame]
developerd75d3632023-01-05 14:31:01 +08001From 4f947075bef0a413742d8f47b869b3087e242ca8 Mon Sep 17 00:00:00 2001
developer4c9435c2022-12-16 12:24:31 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Fri, 16 Dec 2022 12:16:40 +0800
developerd75d3632023-01-05 14:31:01 +08004Subject: [PATCH 3012/3013] mt76: mt7915: wed: fix potential memory leakage
developer4c9435c2022-12-16 12:24:31 +08005
6release rx queue pages in mt7915_mmio_wed_release_rx_buf.
7recycle rxwi when mt76_dma_add_buf() call fails.
8
9Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
10---
11 dma.c | 16 ++++++++++------
12 mt7915/mmio.c | 15 +++++++++++++++
13 2 files changed, 25 insertions(+), 6 deletions(-)
14
15diff --git a/dma.c b/dma.c
developer699cda22022-12-17 15:21:57 +080016index 7ef272e..0925daf 100644
developer4c9435c2022-12-16 12:24:31 +080017--- a/dma.c
18+++ b/dma.c
19@@ -633,9 +633,11 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
20 qbuf.len = len - offset;
21 qbuf.skip_unmap = false;
22 if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL, r) < 0) {
23+ mt76_put_rxwi(dev, r);
24 dma_unmap_single(dev->dma_dev, addr, len,
25 DMA_FROM_DEVICE);
26- skb_free_frag(buf);
27+ if (!skip_alloc)
28+ skb_free_frag(buf);
29 break;
30 }
31 frames++;
32@@ -758,12 +760,14 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
33 } while (1);
34 spin_unlock_bh(&q->lock);
35
36- if (!q->rx_page.va)
37- return;
38+ if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX) {
39+ if (!q->rx_page.va)
40+ return;
41
42- page = virt_to_page(q->rx_page.va);
43- __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
44- memset(&q->rx_page, 0, sizeof(q->rx_page));
45+ page = virt_to_page(q->rx_page.va);
46+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
47+ memset(&q->rx_page, 0, sizeof(q->rx_page));
48+ }
49 }
50
51 static void
52diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer699cda22022-12-17 15:21:57 +080053index 09b3973..419338c 100644
developer4c9435c2022-12-16 12:24:31 +080054--- a/mt7915/mmio.c
55+++ b/mt7915/mmio.c
56@@ -621,12 +621,27 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
57
58 mt76_free_pending_rxwi(&dev->mt76);
59
60+ mt76_for_each_q_rx(&dev->mt76, i) {
61+ if (FIELD_GET(MT_QFLAG_WED_TYPE,
62+ dev->mt76.q_rx[i].flags) == MT76_WED_Q_RX) {
63+ struct mt76_queue *q = &dev->mt76.q_rx[i];
64+
65+ if (!q->rx_page.va)
66+ continue;
67+
68+ page = virt_to_page(q->rx_page.va);
69+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
70+ memset(&q->rx_page, 0, sizeof(q->rx_page));
71+ }
72+ }
73+
74 if (!wed->rx_buf_ring.rx_page.va)
75 return;
76
77 page = virt_to_page(wed->rx_buf_ring.rx_page.va);
78 __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
79 memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
80+
81 }
82
83 static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
84--
developerd75d3632023-01-05 14:31:01 +0800852.18.0
developer4c9435c2022-12-16 12:24:31 +080086