blob: 8c33c0fd688d7c9f636f3020b0b24b086b07fc6c [file] [log] [blame]
developerf6ebf632023-01-06 19:15:00 +08001From 0f7a824cab3692fe35f73f6d11e788619e02193f Mon Sep 17 00:00:00 2001
developer144824b2022-11-25 21:27:43 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
developerf6ebf632023-01-06 19:15:00 +08003Date: Thu, 5 Jan 2023 16:43:57 +0800
4Subject: [PATCH 3003/3014] mt76: connac: wed: add wed rx copy skb
developer144824b2022-11-25 21:27:43 +08005
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
developerf6ebf632023-01-06 19:15:00 +08008 dma.c | 51 ++++++++++++++++++++++++++++++++++++++++-----------
9 1 file changed, 40 insertions(+), 11 deletions(-)
developer144824b2022-11-25 21:27:43 +080010
11diff --git a/dma.c b/dma.c
developerf6ebf632023-01-06 19:15:00 +080012index f977b7f5..1082d6f1 100644
developer144824b2022-11-25 21:27:43 +080013--- a/dma.c
14+++ b/dma.c
developerf6ebf632023-01-06 19:15:00 +080015@@ -207,11 +207,11 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
16
17 static int
18 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
19- struct mt76_queue_buf *buf, void *data)
20+ struct mt76_queue_buf *buf, void *data,
21+ struct mt76_txwi_cache *txwi)
22 {
23 struct mt76_desc *desc = &q->desc[q->head];
24 struct mt76_queue_entry *entry = &q->entry[q->head];
25- struct mt76_txwi_cache *txwi = NULL;
26 u32 buf1 = 0, ctrl;
27 int idx = q->head;
28 int rx_token;
29@@ -220,9 +220,11 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
30
31 if ((q->flags & MT_QFLAG_WED) &&
32 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
33- txwi = mt76_get_rxwi(dev);
34- if (!txwi)
35- return -ENOMEM;
36+ if(!txwi) {
37+ txwi = mt76_get_rxwi(dev);
38+ if (!txwi)
39+ return -ENOMEM;
40+ }
41
42 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
43 if (rx_token < 0) {
44@@ -406,6 +408,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
45 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
46 le32_to_cpu(desc->buf1));
47 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
48+ struct mt76_queue_buf qbuf;
49
50 if (!t)
51 return NULL;
52@@ -414,11 +417,33 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer144824b2022-11-25 21:27:43 +080053 SKB_WITH_OVERHEAD(q->buf_size),
54 DMA_FROM_DEVICE);
55
56- buf = t->ptr;
developerf6ebf632023-01-06 19:15:00 +080057- t->dma_addr = 0;
58- t->ptr = NULL;
developer144824b2022-11-25 21:27:43 +080059+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
60+ if (!buf)
61+ return NULL;
62+
63+ memcpy(buf, t->ptr, SKB_WITH_OVERHEAD(q->buf_size));
developerf6ebf632023-01-06 19:15:00 +080064+
65+ t->dma_addr = dma_map_single(dev->dma_dev, t->ptr,
66+ SKB_WITH_OVERHEAD(q->buf_size),
67+ DMA_FROM_DEVICE);
68+ if (unlikely(dma_mapping_error(dev->dma_dev, t->dma_addr))) {
69+ skb_free_frag(t->ptr);
70+ mt76_put_rxwi(dev, t);
71+ return NULL;
72+ }
developer144824b2022-11-25 21:27:43 +080073+
developerf6ebf632023-01-06 19:15:00 +080074+ qbuf.addr = t->dma_addr;
75+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
76+ qbuf.skip_unmap = false;
developer144824b2022-11-25 21:27:43 +080077
developerf6ebf632023-01-06 19:15:00 +080078- mt76_put_rxwi(dev, t);
79+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, t->ptr, t) < 0) {
80+ dma_unmap_single(dev->dma_dev, t->dma_addr,
81+ SKB_WITH_OVERHEAD(q->buf_size),
82+ DMA_FROM_DEVICE);
83+ skb_free_frag(t->ptr);
84+ mt76_put_rxwi(dev, t);
85+ return NULL;
developer144824b2022-11-25 21:27:43 +080086+ }
87
developerf6ebf632023-01-06 19:15:00 +080088 if (drop) {
89 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
90@@ -587,6 +612,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
91 int len = SKB_WITH_OVERHEAD(q->buf_size);
92 int frames = 0, offset = q->buf_offset;
93 dma_addr_t addr;
94+ bool flags = false;
developer144824b2022-11-25 21:27:43 +080095
developerf6ebf632023-01-06 19:15:00 +080096 if (!q->ndesc)
97 return 0;
98@@ -610,7 +636,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
99 qbuf.addr = addr + offset;
100 qbuf.len = len - offset;
101 qbuf.skip_unmap = false;
102- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
103+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
104 dma_unmap_single(dev->dma_dev, addr, len,
105 DMA_FROM_DEVICE);
106 skb_free_frag(buf);
107@@ -619,7 +645,10 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
108 frames++;
109 }
110
111- if (frames)
112+ flags = (q->flags & MT_QFLAG_WED) &&
113+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
developer144824b2022-11-25 21:27:43 +0800114+
developerf6ebf632023-01-06 19:15:00 +0800115+ if (frames || flags)
116 mt76_dma_kick_queue(dev, q);
117
118 spin_unlock_bh(&q->lock);
developer144824b2022-11-25 21:27:43 +0800119--
developerd75d3632023-01-05 14:31:01 +08001202.18.0
developer144824b2022-11-25 21:27:43 +0800121