developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 1 | From bd9310d73f9c7396d06e60fc10ff554a8e02817f Mon Sep 17 00:00:00 2001 |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 2 | From: Sujuan Chen <sujuan.chen@mediatek.com> |
| 3 | Date: Fri, 25 Nov 2022 12:05:06 +0800 |
developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 4 | Subject: [PATCH 3003/3011] mt76: connac: wed: add wed rx copy skb |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 5 | |
| 6 | Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> |
| 7 | --- |
| 8 | dma.c | 23 ++++++++++++++++++----- |
| 9 | 1 file changed, 18 insertions(+), 5 deletions(-) |
| 10 | |
| 11 | diff --git a/dma.c b/dma.c |
developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 12 | index dc8d8882..53c7528f 100644 |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 13 | --- a/dma.c |
| 14 | +++ b/dma.c |
developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 15 | @@ -386,9 +386,12 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 16 | SKB_WITH_OVERHEAD(q->buf_size), |
| 17 | DMA_FROM_DEVICE); |
| 18 | |
| 19 | - buf = t->ptr; |
| 20 | + buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
| 21 | + if (!buf) |
| 22 | + return NULL; |
| 23 | + |
| 24 | + memcpy(buf, t->ptr, SKB_WITH_OVERHEAD(q->buf_size)); |
| 25 | t->dma_addr = 0; |
| 26 | - t->ptr = NULL; |
| 27 | |
| 28 | mt76_put_rxwi(dev, t); |
| 29 | |
developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 30 | @@ -568,6 +571,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 31 | while (q->queued < q->ndesc - 1) { |
| 32 | struct mt76_txwi_cache *t = NULL; |
| 33 | struct mt76_queue_buf qbuf; |
| 34 | + bool skip_alloc = false; |
| 35 | void *buf = NULL; |
| 36 | |
| 37 | if ((q->flags & MT_QFLAG_WED) && |
developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 38 | @@ -575,11 +579,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 39 | t = mt76_get_rxwi(dev); |
| 40 | if (!t) |
| 41 | break; |
| 42 | + |
| 43 | + if (t->ptr) { |
| 44 | + skip_alloc = true; |
| 45 | + buf = t->ptr; |
| 46 | + } |
| 47 | } |
| 48 | |
developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 49 | - buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 50 | - if (!buf) |
| 51 | - break; |
| 52 | + if (!skip_alloc) { |
developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 53 | + buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 54 | + if (!buf) |
| 55 | + break; |
| 56 | + } |
| 57 | |
| 58 | addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE); |
| 59 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { |
developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 60 | @@ -949,5 +960,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 61 | |
| 62 | if (mtk_wed_device_active(&dev->mmio.wed)) |
| 63 | mtk_wed_device_detach(&dev->mmio.wed); |
| 64 | + |
| 65 | + mt76_free_pending_rxwi(dev); |
| 66 | } |
| 67 | EXPORT_SYMBOL_GPL(mt76_dma_cleanup); |
| 68 | -- |
developer | aa5b1b2 | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 69 | 2.25.1 |
developer | e2cfb52 | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 70 | |