blob: 6705920f7063169e9c0e74cc17d55afabeca76f1 [file] [log] [blame]
developer05f3b2b2024-08-19 19:17:34 +08001From cfad406a8e50de85347861782835596f599bfe59 Mon Sep 17 00:00:00 2001
developer753619c2024-02-22 13:42:45 +08002From: Evelyn Tsai <evelyn.tsai@mediatek.com>
3Date: Sun, 4 Feb 2024 17:52:44 +0800
developera20cdc22024-05-31 18:57:31 +08004Subject: [PATCH 2008/2015] wifi: mt76: connac: wed: add wed rx copy skb
developerda18a742023-04-06 13:44:00 +08005
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
developer753619c2024-02-22 13:42:45 +08008 dma.c | 71 +++++++++++++++++++++++++++++++++++++++++++++--------------
9 wed.c | 37 ++++++++++++++++++++++---------
developer1a173672023-12-21 14:49:33 +080010 2 files changed, 80 insertions(+), 28 deletions(-)
developerda18a742023-04-06 13:44:00 +080011
12diff --git a/dma.c b/dma.c
developer05f3b2b2024-08-19 19:17:34 +080013index 9cd97d24..d17fc88c 100644
developerda18a742023-04-06 13:44:00 +080014--- a/dma.c
15+++ b/dma.c
developer753619c2024-02-22 13:42:45 +080016@@ -225,10 +225,10 @@ void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
developerda18a742023-04-06 13:44:00 +080017
18 static int
19 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
20- struct mt76_queue_buf *buf, void *data)
21+ struct mt76_queue_buf *buf, void *data,
22+ struct mt76_rxwi_cache *rxwi)
23 {
developerda18a742023-04-06 13:44:00 +080024 struct mt76_queue_entry *entry = &q->entry[q->head];
25- struct mt76_rxwi_cache *rxwi = NULL;
developer1a173672023-12-21 14:49:33 +080026 struct mt76_desc *desc;
developerda18a742023-04-06 13:44:00 +080027 int idx = q->head;
developer1a173672023-12-21 14:49:33 +080028 u32 buf1 = 0, ctrl;
developer753619c2024-02-22 13:42:45 +080029@@ -249,9 +249,11 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developer1a173672023-12-21 14:49:33 +080030 #endif
developerda18a742023-04-06 13:44:00 +080031
32 if (mt76_queue_is_wed_rx(q)) {
33- rxwi = mt76_get_rxwi(dev);
34- if (!rxwi)
35- return -ENOMEM;
36+ if (!rxwi) {
37+ rxwi = mt76_get_rxwi(dev);
38+ if (!rxwi)
39+ return -ENOMEM;
40+ }
41
42 rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
43 if (rx_token < 0) {
developer753619c2024-02-22 13:42:45 +080044@@ -421,7 +423,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
developerda18a742023-04-06 13:44:00 +080045
46 static void *
47 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
48- int *len, u32 *info, bool *more, bool *drop)
49+ int *len, u32 *info, bool *more, bool *drop, bool flush)
50 {
51 struct mt76_queue_entry *e = &q->entry[idx];
52 struct mt76_desc *desc = &q->desc[idx];
developer753619c2024-02-22 13:42:45 +080053@@ -478,11 +480,44 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developerda18a742023-04-06 13:44:00 +080054 SKB_WITH_OVERHEAD(q->buf_size),
55 DMA_FROM_DEVICE);
56
57- buf = r->ptr;
58- r->dma_addr = 0;
59- r->ptr = NULL;
60+ if (flush) {
61+ buf = r->ptr;
62+ r->dma_addr = 0;
63+ r->ptr = NULL;
developer1a173672023-12-21 14:49:33 +080064+
developerda18a742023-04-06 13:44:00 +080065+ mt76_put_rxwi(dev, r);
66+ } else {
67+ struct mt76_queue_buf qbuf;
68+
developer1a173672023-12-21 14:49:33 +080069+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC | GFP_DMA32);
developerda18a742023-04-06 13:44:00 +080070+ if (!buf)
71+ return NULL;
72+
73+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
74+
75+ r->dma_addr = dma_map_single(dev->dma_dev, r->ptr,
76+ SKB_WITH_OVERHEAD(q->buf_size),
77+ DMA_FROM_DEVICE);
78+ if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
79+ skb_free_frag(r->ptr);
80+ mt76_put_rxwi(dev, r);
81+ return NULL;
82+ }
83+
84+ qbuf.addr = r->dma_addr;
85+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
86+ qbuf.skip_unmap = false;
87+
88+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) {
89+ dma_unmap_single(dev->dma_dev, r->dma_addr,
90+ SKB_WITH_OVERHEAD(q->buf_size),
91+ DMA_FROM_DEVICE);
92+ skb_free_frag(r->ptr);
93+ mt76_put_rxwi(dev, r);
94+ return NULL;
95+ }
96+ }
97
developer1a173672023-12-21 14:49:33 +080098- mt76_put_rxwi(dev, r);
99 if (drop)
100 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
101 } else {
developer753619c2024-02-22 13:42:45 +0800102@@ -519,7 +554,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
developerda18a742023-04-06 13:44:00 +0800103 q->tail = (q->tail + 1) % q->ndesc;
104 q->queued--;
105
106- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
107+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
108 }
109
110 static int
developera46f6132024-03-26 14:09:54 +0800111@@ -690,7 +725,7 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developer1a173672023-12-21 14:49:33 +0800112 done:
developerda18a742023-04-06 13:44:00 +0800113 qbuf.len = len - offset;
114 qbuf.skip_unmap = false;
115- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
116+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
117 dma_unmap_single(dev->dma_dev, addr, len,
118 DMA_FROM_DEVICE);
119 skb_free_frag(buf);
developera46f6132024-03-26 14:09:54 +0800120@@ -791,12 +826,14 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerda18a742023-04-06 13:44:00 +0800121
122 spin_unlock_bh(&q->lock);
123
124- if (!q->rx_page.va)
125- return;
126+ if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX) {
127+ if (!q->rx_page.va)
128+ return;
129
130- page = virt_to_page(q->rx_page.va);
131- __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
132- memset(&q->rx_page, 0, sizeof(q->rx_page));
133+ page = virt_to_page(q->rx_page.va);
134+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
135+ memset(&q->rx_page, 0, sizeof(q->rx_page));
136+ }
137 }
138
139 static void
developer753619c2024-02-22 13:42:45 +0800140diff --git a/wed.c b/wed.c
developer05f3b2b2024-08-19 19:17:34 +0800141index c03b52f9..70e40575 100644
developer753619c2024-02-22 13:42:45 +0800142--- a/wed.c
143+++ b/wed.c
144@@ -9,12 +9,9 @@
145 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
developerda18a742023-04-06 13:44:00 +0800146 {
developer1a173672023-12-21 14:49:33 +0800147 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
148- u32 length;
developerda18a742023-04-06 13:44:00 +0800149+ struct page *page;
developerda18a742023-04-06 13:44:00 +0800150 int i;
151
developer1a173672023-12-21 14:49:33 +0800152- length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
153- sizeof(struct skb_shared_info));
154-
155 for (i = 0; i < dev->rx_token_size; i++) {
156 struct mt76_rxwi_cache *r;
157
developer753619c2024-02-22 13:42:45 +0800158@@ -24,13 +21,33 @@ void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
developerda18a742023-04-06 13:44:00 +0800159
developer1a173672023-12-21 14:49:33 +0800160 dma_unmap_single(dev->dma_dev, r->dma_addr,
developerda18a742023-04-06 13:44:00 +0800161 wed->wlan.rx_size, DMA_FROM_DEVICE);
162- __free_pages(virt_to_page(r->ptr), get_order(length));
163+ skb_free_frag(r->ptr);
164 r->ptr = NULL;
165
developer1a173672023-12-21 14:49:33 +0800166 mt76_put_rxwi(dev, r);
developerda18a742023-04-06 13:44:00 +0800167 }
168
developer1a173672023-12-21 14:49:33 +0800169 mt76_free_pending_rxwi(dev);
developerda18a742023-04-06 13:44:00 +0800170+
developer1a173672023-12-21 14:49:33 +0800171+ mt76_for_each_q_rx(dev, i) {
172+ struct mt76_queue *q = &dev->q_rx[i];
developerda18a742023-04-06 13:44:00 +0800173+
174+ if (mt76_queue_is_wed_rx(q)) {
175+ if (!q->rx_page.va)
176+ continue;
177+
178+ page = virt_to_page(q->rx_page.va);
179+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
180+ memset(&q->rx_page, 0, sizeof(q->rx_page));
181+ }
182+ }
183+
184+ if (!wed->rx_buf_ring.rx_page.va)
185+ return;
186+
187+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
188+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
189+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
190 }
developer753619c2024-02-22 13:42:45 +0800191 EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
developerda18a742023-04-06 13:44:00 +0800192
developer753619c2024-02-22 13:42:45 +0800193@@ -48,25 +65,23 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
developerda18a742023-04-06 13:44:00 +0800194 for (i = 0; i < size; i++) {
developer1a173672023-12-21 14:49:33 +0800195 struct mt76_rxwi_cache *r = mt76_get_rxwi(dev);
developerda18a742023-04-06 13:44:00 +0800196 dma_addr_t phy_addr;
197- struct page *page;
198 int token;
199 void *ptr;
200
201 if (!r)
202 goto unmap;
203
204- page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
205- if (!page) {
206+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length, GFP_ATOMIC);
207+ if (!ptr) {
developer1a173672023-12-21 14:49:33 +0800208 mt76_put_rxwi(dev, r);
developerda18a742023-04-06 13:44:00 +0800209 goto unmap;
210 }
211
212- ptr = page_address(page);
developer1a173672023-12-21 14:49:33 +0800213 phy_addr = dma_map_single(dev->dma_dev, ptr,
developerda18a742023-04-06 13:44:00 +0800214 wed->wlan.rx_size,
215 DMA_TO_DEVICE);
developer1a173672023-12-21 14:49:33 +0800216 if (unlikely(dma_mapping_error(dev->dev, phy_addr))) {
developerda18a742023-04-06 13:44:00 +0800217- __free_pages(page, get_order(length));
218+ skb_free_frag(ptr);
developer1a173672023-12-21 14:49:33 +0800219 mt76_put_rxwi(dev, r);
developerda18a742023-04-06 13:44:00 +0800220 goto unmap;
221 }
developer753619c2024-02-22 13:42:45 +0800222@@ -76,7 +91,7 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
developerda18a742023-04-06 13:44:00 +0800223 if (token < 0) {
developer1a173672023-12-21 14:49:33 +0800224 dma_unmap_single(dev->dma_dev, phy_addr,
developerda18a742023-04-06 13:44:00 +0800225 wed->wlan.rx_size, DMA_TO_DEVICE);
226- __free_pages(page, get_order(length));
227+ skb_free_frag(ptr);
developer1a173672023-12-21 14:49:33 +0800228 mt76_put_rxwi(dev, r);
developerda18a742023-04-06 13:44:00 +0800229 goto unmap;
230 }
231--
developerbd9fa1e2023-10-16 11:04:00 +08002322.18.0
developerda18a742023-04-06 13:44:00 +0800233