blob: cdb5320e054bc9b5135764b17af075e4fc4c7de5 [file] [log] [blame]
developerf6ebf632023-01-06 19:15:00 +08001From 5f7175246f6a734b63ecd336f1b3ad0bc4f37048 Mon Sep 17 00:00:00 2001
developer4c9435c2022-12-16 12:24:31 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
developerf6ebf632023-01-06 19:15:00 +08003Date: Fri, 6 Jan 2023 18:25:51 +0800
4Subject: [PATCH 3012/3014] mt76: mt7915: wed: fix potential memory leakage
developer4c9435c2022-12-16 12:24:31 +08005
6release rx queue pages in mt7915_mmio_wed_release_rx_buf.
7recycle rxwi when mt76_dma_add_buf() call fails.
8
9Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
10---
developerf6ebf632023-01-06 19:15:00 +080011 dma.c | 12 +++++++-----
developer4c9435c2022-12-16 12:24:31 +080012 mt7915/mmio.c | 15 +++++++++++++++
developerf6ebf632023-01-06 19:15:00 +080013 2 files changed, 22 insertions(+), 5 deletions(-)
developer4c9435c2022-12-16 12:24:31 +080014
15diff --git a/dma.c b/dma.c
developerf6ebf632023-01-06 19:15:00 +080016index 21f26df7..cb1fd9c6 100644
developer4c9435c2022-12-16 12:24:31 +080017--- a/dma.c
18+++ b/dma.c
developerf6ebf632023-01-06 19:15:00 +080019@@ -795,12 +795,14 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developer4c9435c2022-12-16 12:24:31 +080020 } while (1);
21 spin_unlock_bh(&q->lock);
22
23- if (!q->rx_page.va)
24- return;
25+ if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX) {
26+ if (!q->rx_page.va)
27+ return;
28
29- page = virt_to_page(q->rx_page.va);
30- __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
31- memset(&q->rx_page, 0, sizeof(q->rx_page));
32+ page = virt_to_page(q->rx_page.va);
33+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
34+ memset(&q->rx_page, 0, sizeof(q->rx_page));
35+ }
36 }
37
38 static void
39diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developerf6ebf632023-01-06 19:15:00 +080040index 09b39730..419338cc 100644
developer4c9435c2022-12-16 12:24:31 +080041--- a/mt7915/mmio.c
42+++ b/mt7915/mmio.c
43@@ -621,12 +621,27 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
44
45 mt76_free_pending_rxwi(&dev->mt76);
46
47+ mt76_for_each_q_rx(&dev->mt76, i) {
48+ if (FIELD_GET(MT_QFLAG_WED_TYPE,
49+ dev->mt76.q_rx[i].flags) == MT76_WED_Q_RX) {
50+ struct mt76_queue *q = &dev->mt76.q_rx[i];
51+
52+ if (!q->rx_page.va)
53+ continue;
54+
55+ page = virt_to_page(q->rx_page.va);
56+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
57+ memset(&q->rx_page, 0, sizeof(q->rx_page));
58+ }
59+ }
60+
61 if (!wed->rx_buf_ring.rx_page.va)
62 return;
63
64 page = virt_to_page(wed->rx_buf_ring.rx_page.va);
65 __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
66 memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
67+
68 }
69
70 static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
71--
developerd75d3632023-01-05 14:31:01 +0800722.18.0
developer4c9435c2022-12-16 12:24:31 +080073