blob: d53a9934154469925b3e487a94b31c04aa9fafbe [file] [log] [blame]
developer9851a292022-12-15 17:33:43 +08001From 6e14cb1fc0fb334c92192fc7440fa8b574c0961a Mon Sep 17 00:00:00 2001
developer144824b2022-11-25 21:27:43 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Fri, 25 Nov 2022 12:05:06 +0800
developerafd75872022-12-14 21:15:46 +08004Subject: [PATCH 3003/3012] mt76: connac: wed: add wed rx copy skb
developer144824b2022-11-25 21:27:43 +08005
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 dma.c | 23 ++++++++++++++++++-----
9 1 file changed, 18 insertions(+), 5 deletions(-)
10
11diff --git a/dma.c b/dma.c
developereb6a0182022-12-12 18:53:32 +080012index dc8d8882..53c7528f 100644
developer144824b2022-11-25 21:27:43 +080013--- a/dma.c
14+++ b/dma.c
developereb6a0182022-12-12 18:53:32 +080015@@ -386,9 +386,12 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer144824b2022-11-25 21:27:43 +080016 SKB_WITH_OVERHEAD(q->buf_size),
17 DMA_FROM_DEVICE);
18
19- buf = t->ptr;
20+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
21+ if (!buf)
22+ return NULL;
23+
24+ memcpy(buf, t->ptr, SKB_WITH_OVERHEAD(q->buf_size));
25 t->dma_addr = 0;
26- t->ptr = NULL;
27
28 mt76_put_rxwi(dev, t);
29
developereb6a0182022-12-12 18:53:32 +080030@@ -568,6 +571,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developer144824b2022-11-25 21:27:43 +080031 while (q->queued < q->ndesc - 1) {
32 struct mt76_txwi_cache *t = NULL;
33 struct mt76_queue_buf qbuf;
34+ bool skip_alloc = false;
35 void *buf = NULL;
36
37 if ((q->flags & MT_QFLAG_WED) &&
developereb6a0182022-12-12 18:53:32 +080038@@ -575,11 +579,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developer144824b2022-11-25 21:27:43 +080039 t = mt76_get_rxwi(dev);
40 if (!t)
41 break;
42+
43+ if (t->ptr) {
44+ skip_alloc = true;
45+ buf = t->ptr;
46+ }
47 }
48
developereb6a0182022-12-12 18:53:32 +080049- buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
developer144824b2022-11-25 21:27:43 +080050- if (!buf)
51- break;
52+ if (!skip_alloc) {
developereb6a0182022-12-12 18:53:32 +080053+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
developer144824b2022-11-25 21:27:43 +080054+ if (!buf)
55+ break;
56+ }
57
58 addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
59 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
developereb6a0182022-12-12 18:53:32 +080060@@ -949,5 +960,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developer144824b2022-11-25 21:27:43 +080061
62 if (mtk_wed_device_active(&dev->mmio.wed))
63 mtk_wed_device_detach(&dev->mmio.wed);
64+
65+ mt76_free_pending_rxwi(dev);
66 }
67 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
68--
developer9851a292022-12-15 17:33:43 +0800692.18.0
developer144824b2022-11-25 21:27:43 +080070