developer | b5cec01 | 2022-12-21 18:05:47 +0800 | [diff] [blame] | 1 | From e8340bcdb520bd5b5ddf860de81402359ad556b9 Mon Sep 17 00:00:00 2001 |
developer | c5ce750 | 2022-12-19 11:33:22 +0800 | [diff] [blame] | 2 | From: Sujuan Chen <sujuan.chen@mediatek.com> |
| 3 | Date: Fri, 16 Dec 2022 12:16:40 +0800 |
| 4 | Subject: [PATCH 3013/3013] mt76: mt7915: wed: fix potential memory leakage |
| 5 | |
| 6 | release rx queue pages in mt7915_mmio_wed_release_rx_buf. |
| 7 | recycle rxwi when mt76_dma_add_buf() call fails. |
| 8 | |
| 9 | Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
| 10 | --- |
| 11 | dma.c | 16 ++++++++++------ |
| 12 | mt7915/mmio.c | 15 +++++++++++++++ |
| 13 | 2 files changed, 25 insertions(+), 6 deletions(-) |
| 14 | |
| 15 | diff --git a/dma.c b/dma.c |
| 16 | index 7ef272e..0925daf 100644 |
| 17 | --- a/dma.c |
| 18 | +++ b/dma.c |
| 19 | @@ -633,9 +633,11 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
| 20 | qbuf.len = len - offset; |
| 21 | qbuf.skip_unmap = false; |
| 22 | if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL, r) < 0) { |
| 23 | + mt76_put_rxwi(dev, r); |
| 24 | dma_unmap_single(dev->dma_dev, addr, len, |
| 25 | DMA_FROM_DEVICE); |
| 26 | - skb_free_frag(buf); |
| 27 | + if (!skip_alloc) |
| 28 | + skb_free_frag(buf); |
| 29 | break; |
| 30 | } |
| 31 | frames++; |
| 32 | @@ -758,12 +760,14 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) |
| 33 | } while (1); |
| 34 | spin_unlock_bh(&q->lock); |
| 35 | |
| 36 | - if (!q->rx_page.va) |
| 37 | - return; |
| 38 | + if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX) { |
| 39 | + if (!q->rx_page.va) |
| 40 | + return; |
| 41 | |
| 42 | - page = virt_to_page(q->rx_page.va); |
| 43 | - __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 44 | - memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 45 | + page = virt_to_page(q->rx_page.va); |
| 46 | + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 47 | + memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 48 | + } |
| 49 | } |
| 50 | |
| 51 | static void |
| 52 | diff --git a/mt7915/mmio.c b/mt7915/mmio.c |
| 53 | index 09b3973..419338c 100644 |
| 54 | --- a/mt7915/mmio.c |
| 55 | +++ b/mt7915/mmio.c |
| 56 | @@ -621,12 +621,27 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) |
| 57 | |
| 58 | mt76_free_pending_rxwi(&dev->mt76); |
| 59 | |
| 60 | + mt76_for_each_q_rx(&dev->mt76, i) { |
| 61 | + if (FIELD_GET(MT_QFLAG_WED_TYPE, |
| 62 | + dev->mt76.q_rx[i].flags) == MT76_WED_Q_RX) { |
| 63 | + struct mt76_queue *q = &dev->mt76.q_rx[i]; |
| 64 | + |
| 65 | + if (!q->rx_page.va) |
| 66 | + continue; |
| 67 | + |
| 68 | + page = virt_to_page(q->rx_page.va); |
| 69 | + __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); |
| 70 | + memset(&q->rx_page, 0, sizeof(q->rx_page)); |
| 71 | + } |
| 72 | + } |
| 73 | + |
| 74 | if (!wed->rx_buf_ring.rx_page.va) |
| 75 | return; |
| 76 | |
| 77 | page = virt_to_page(wed->rx_buf_ring.rx_page.va); |
| 78 | __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias); |
| 79 | memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page)); |
| 80 | + |
| 81 | } |
| 82 | |
| 83 | static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) |
| 84 | -- |
| 85 | 2.36.1 |
| 86 | |