developer | f6ebf63 | 2023-01-06 19:15:00 +0800 | [diff] [blame] | 1 | From 5f7175246f6a734b63ecd336f1b3ad0bc4f37048 Mon Sep 17 00:00:00 2001 |
developer | 4c9435c | 2022-12-16 12:24:31 +0800 | [diff] [blame] | 2 | From: Sujuan Chen <sujuan.chen@mediatek.com> |
developer | f6ebf63 | 2023-01-06 19:15:00 +0800 | [diff] [blame] | 3 | Date: Fri, 6 Jan 2023 18:25:51 +0800 |
| 4 | Subject: [PATCH 3012/3014] mt76: mt7915: wed: fix potential memory leakage |
developer | 4c9435c | 2022-12-16 12:24:31 +0800 | [diff] [blame] | 5 | |
| 6 | release rx queue pages in mt7915_mmio_wed_release_rx_buf. |
| 7 | recycle rxwi when mt76_dma_add_buf() call fails. |
| 8 | |
| 9 | Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
| 10 | --- |
developer | f6ebf63 | 2023-01-06 19:15:00 +0800 | [diff] [blame] | 11 | dma.c | 12 +++++++----- |
developer | 4c9435c | 2022-12-16 12:24:31 +0800 | [diff] [blame] | 12 | mt7915/mmio.c | 15 +++++++++++++++ |
developer | f6ebf63 | 2023-01-06 19:15:00 +0800 | [diff] [blame] | 13 | 2 files changed, 22 insertions(+), 5 deletions(-) |
developer | 4c9435c | 2022-12-16 12:24:31 +0800 | [diff] [blame] | 14 | |
| 15 | diff --git a/dma.c b/dma.c |
developer | f6ebf63 | 2023-01-06 19:15:00 +0800 | [diff] [blame] | 16 | index 21f26df7..cb1fd9c6 100644 |
developer | 4c9435c | 2022-12-16 12:24:31 +0800 | [diff] [blame] | 17 | --- a/dma.c |
| 18 | +++ b/dma.c |
developer | 28d0474 | 2023-01-18 14:02:40 +0800 | [diff] [blame] | 19 | @@ -803,12 +803,14 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
developer | 4c9435c | 2022-12-16 12:24:31 +0800 | [diff] [blame] | 20 | } while (1); |
| 21 | spin_unlock_bh(&q->lock); |
| 22 | |
| 23 | - if (!q->rx_page.va) |
| 24 | - return; |
| 25 | + if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX) { |
| 26 | + if (!q->rx_page.va) |
| 27 | + return; |
| 28 | |
| 29 | - page = virt_to_page(q->rx_page.va); |
| 30 | - __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 31 | - memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 32 | + page = virt_to_page(q->rx_page.va); |
| 33 | + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 34 | + memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 35 | + } |
| 36 | } |
| 37 | |
| 38 | static void |
| 39 | diff --git a/mt7915/mmio.c b/mt7915/mmio.c |
developer | f6ebf63 | 2023-01-06 19:15:00 +0800 | [diff] [blame] | 40 | index 09b39730..419338cc 100644 |
developer | 4c9435c | 2022-12-16 12:24:31 +0800 | [diff] [blame] | 41 | --- a/mt7915/mmio.c |
| 42 | +++ b/mt7915/mmio.c |
| 43 | @@ -621,12 +621,27 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
| 44 | |
| 45 | mt76_free_pending_rxwi(&dev->mt76); |
| 46 | |
| 47 | + mt76_for_each_q_rx(&dev->mt76, i) { |
| 48 | + if (FIELD_GET(MT_QFLAG_WED_TYPE, |
| 49 | + dev->mt76.q_rx[i].flags) == MT76_WED_Q_RX) { |
| 50 | + struct mt76_queue *q = &dev->mt76.q_rx[i]; |
| 51 | + |
| 52 | + if (!q->rx_page.va) |
| 53 | + continue; |
| 54 | + |
| 55 | + page = virt_to_page(q->rx_page.va); |
| 56 | + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 57 | + memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 58 | + } |
| 59 | + } |
| 60 | + |
| 61 | if (!wed->rx_buf_ring.rx_page.va) |
| 62 | return; |
| 63 | |
| 64 | page = virt_to_page(wed->rx_buf_ring.rx_page.va); |
| 65 | __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias); |
| 66 | memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page)); |
| 67 | + |
| 68 | } |
| 69 | |
| 70 | static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 71 | -- |
developer | d75d363 | 2023-01-05 14:31:01 +0800 | [diff] [blame] | 72 | 2.18.0 |
developer | 4c9435c | 2022-12-16 12:24:31 +0800 | [diff] [blame] | 73 | |