blob: f6cca7122c4c1413e66877002545adeaa43fe08c [file] [log] [blame]
developer28d04742023-01-18 14:02:40 +08001From cf9c84fbe7863a9af60e00bbb18ebdc6a4f29020 Mon Sep 17 00:00:00 2001
developer144824b2022-11-25 21:27:43 +08002From: Sujuan Chen <sujuan.chen@mediatek.com>
developerf6ebf632023-01-06 19:15:00 +08003Date: Thu, 5 Jan 2023 16:43:57 +0800
4Subject: [PATCH 3003/3014] mt76: connac: wed: add wed rx copy skb
developer144824b2022-11-25 21:27:43 +08005
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
developer28d04742023-01-18 14:02:40 +08008 dma.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++-------------
9 1 file changed, 52 insertions(+), 15 deletions(-)
developer144824b2022-11-25 21:27:43 +080010
11diff --git a/dma.c b/dma.c
developer28d04742023-01-18 14:02:40 +080012index e05b7ca1..74e2169e 100644
developer144824b2022-11-25 21:27:43 +080013--- a/dma.c
14+++ b/dma.c
developerf6ebf632023-01-06 19:15:00 +080015@@ -207,11 +207,11 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
16
17 static int
18 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
19- struct mt76_queue_buf *buf, void *data)
20+ struct mt76_queue_buf *buf, void *data,
21+ struct mt76_txwi_cache *txwi)
22 {
23 struct mt76_desc *desc = &q->desc[q->head];
24 struct mt76_queue_entry *entry = &q->entry[q->head];
25- struct mt76_txwi_cache *txwi = NULL;
26 u32 buf1 = 0, ctrl;
27 int idx = q->head;
28 int rx_token;
29@@ -220,9 +220,11 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
30
31 if ((q->flags & MT_QFLAG_WED) &&
32 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
33- txwi = mt76_get_rxwi(dev);
34- if (!txwi)
35- return -ENOMEM;
36+ if(!txwi) {
37+ txwi = mt76_get_rxwi(dev);
38+ if (!txwi)
39+ return -ENOMEM;
40+ }
41
42 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
43 if (rx_token < 0) {
developer28d04742023-01-18 14:02:40 +080044@@ -386,7 +388,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
developerf6ebf632023-01-06 19:15:00 +080045
developer28d04742023-01-18 14:02:40 +080046 static void *
47 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
48- int *len, u32 *info, bool *more, bool *drop)
49+ int *len, u32 *info, bool *more, bool *drop, bool flush)
50 {
51 struct mt76_queue_entry *e = &q->entry[idx];
52 struct mt76_desc *desc = &q->desc[idx];
53@@ -413,12 +415,43 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
54 dma_unmap_single(dev->dma_dev, t->dma_addr,
developer144824b2022-11-25 21:27:43 +080055 SKB_WITH_OVERHEAD(q->buf_size),
56 DMA_FROM_DEVICE);
developer28d04742023-01-18 14:02:40 +080057-
developer144824b2022-11-25 21:27:43 +080058- buf = t->ptr;
developerf6ebf632023-01-06 19:15:00 +080059- t->dma_addr = 0;
60- t->ptr = NULL;
developer28d04742023-01-18 14:02:40 +080061-
62- mt76_put_rxwi(dev, t);
63+ if (flush) {
64+ buf = t->ptr;
65+ t->dma_addr = 0;
66+ t->ptr = NULL;
developerf6ebf632023-01-06 19:15:00 +080067+
developerf6ebf632023-01-06 19:15:00 +080068+ mt76_put_rxwi(dev, t);
developer28d04742023-01-18 14:02:40 +080069+ } else {
70+ struct mt76_queue_buf qbuf;
developer144824b2022-11-25 21:27:43 +080071+
developer28d04742023-01-18 14:02:40 +080072+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
73+ if (!buf)
74+ return NULL;
75+
76+ memcpy(buf, t->ptr, SKB_WITH_OVERHEAD(q->buf_size));
77+
78+ t->dma_addr = dma_map_single(dev->dma_dev, t->ptr,
79+ SKB_WITH_OVERHEAD(q->buf_size),
80+ DMA_FROM_DEVICE);
81+ if (unlikely(dma_mapping_error(dev->dma_dev, t->dma_addr))) {
82+ skb_free_frag(t->ptr);
83+ mt76_put_rxwi(dev, t);
84+ return NULL;
85+ }
86+
87+ qbuf.addr = t->dma_addr;
88+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
89+ qbuf.skip_unmap = false;
90+
91+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, t->ptr, t) < 0) {
92+ dma_unmap_single(dev->dma_dev, t->dma_addr,
93+ SKB_WITH_OVERHEAD(q->buf_size),
94+ DMA_FROM_DEVICE);
95+ skb_free_frag(t->ptr);
96+ mt76_put_rxwi(dev, t);
97+ return NULL;
98+ }
developer144824b2022-11-25 21:27:43 +080099+ }
100
developerf6ebf632023-01-06 19:15:00 +0800101 if (drop) {
102 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
developer28d04742023-01-18 14:02:40 +0800103@@ -455,7 +488,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
104 q->tail = (q->tail + 1) % q->ndesc;
105 q->queued--;
106
107- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
108+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
109 }
110
111 static int
112@@ -587,6 +620,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developerf6ebf632023-01-06 19:15:00 +0800113 int len = SKB_WITH_OVERHEAD(q->buf_size);
114 int frames = 0, offset = q->buf_offset;
115 dma_addr_t addr;
116+ bool flags = false;
developer144824b2022-11-25 21:27:43 +0800117
developerf6ebf632023-01-06 19:15:00 +0800118 if (!q->ndesc)
119 return 0;
developer28d04742023-01-18 14:02:40 +0800120@@ -610,7 +644,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developerf6ebf632023-01-06 19:15:00 +0800121 qbuf.addr = addr + offset;
122 qbuf.len = len - offset;
123 qbuf.skip_unmap = false;
124- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
125+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
126 dma_unmap_single(dev->dma_dev, addr, len,
127 DMA_FROM_DEVICE);
128 skb_free_frag(buf);
developer28d04742023-01-18 14:02:40 +0800129@@ -619,7 +653,10 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developerf6ebf632023-01-06 19:15:00 +0800130 frames++;
131 }
132
133- if (frames)
134+ flags = (q->flags & MT_QFLAG_WED) &&
135+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
developer144824b2022-11-25 21:27:43 +0800136+
developerf6ebf632023-01-06 19:15:00 +0800137+ if (frames || flags)
138 mt76_dma_kick_queue(dev, q);
139
140 spin_unlock_bh(&q->lock);
developer144824b2022-11-25 21:27:43 +0800141--
developerd75d3632023-01-05 14:31:01 +08001422.18.0
developer144824b2022-11-25 21:27:43 +0800143