blob: 44d5b833412644101dd0f80b0b030cf342596c3c [file] [log] [blame]
developer133b30e2024-03-19 15:52:02 +08001From 8db2652823341170081e41f0fc118e92d39cc918 Mon Sep 17 00:00:00 2001
developera72bbd82024-02-04 18:27:28 +08002From: Evelyn Tsai <evelyn.tsai@mediatek.com>
3Date: Sun, 4 Feb 2024 17:52:44 +0800
4Subject: [PATCH 2008/2012] wifi: mt76: connac: wed: add wed rx copy skb
developerc9233442023-04-04 06:06:17 +08005
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
developera72bbd82024-02-04 18:27:28 +08008 dma.c | 71 +++++++++++++++++++++++++++++++++++++++++++++--------------
9 wed.c | 37 ++++++++++++++++++++++---------
developerade48b12023-12-12 10:37:24 +080010 2 files changed, 80 insertions(+), 28 deletions(-)
developerc9233442023-04-04 06:06:17 +080011
12diff --git a/dma.c b/dma.c
developer133b30e2024-03-19 15:52:02 +080013index 9cd97d24..d17fc88c 100644
developerc9233442023-04-04 06:06:17 +080014--- a/dma.c
15+++ b/dma.c
developera72bbd82024-02-04 18:27:28 +080016@@ -225,10 +225,10 @@ void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
developerc9233442023-04-04 06:06:17 +080017
18 static int
19 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
20- struct mt76_queue_buf *buf, void *data)
21+ struct mt76_queue_buf *buf, void *data,
22+ struct mt76_rxwi_cache *rxwi)
23 {
developerc9233442023-04-04 06:06:17 +080024 struct mt76_queue_entry *entry = &q->entry[q->head];
25- struct mt76_rxwi_cache *rxwi = NULL;
developerade48b12023-12-12 10:37:24 +080026 struct mt76_desc *desc;
developerc9233442023-04-04 06:06:17 +080027 int idx = q->head;
developerade48b12023-12-12 10:37:24 +080028 u32 buf1 = 0, ctrl;
developera72bbd82024-02-04 18:27:28 +080029@@ -249,9 +249,11 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developerade48b12023-12-12 10:37:24 +080030 #endif
developerc9233442023-04-04 06:06:17 +080031
32 if (mt76_queue_is_wed_rx(q)) {
33- rxwi = mt76_get_rxwi(dev);
34- if (!rxwi)
35- return -ENOMEM;
36+ if (!rxwi) {
37+ rxwi = mt76_get_rxwi(dev);
38+ if (!rxwi)
39+ return -ENOMEM;
40+ }
41
42 rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
43 if (rx_token < 0) {
developera72bbd82024-02-04 18:27:28 +080044@@ -421,7 +423,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
developerc9233442023-04-04 06:06:17 +080045
46 static void *
47 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
48- int *len, u32 *info, bool *more, bool *drop)
49+ int *len, u32 *info, bool *more, bool *drop, bool flush)
50 {
51 struct mt76_queue_entry *e = &q->entry[idx];
52 struct mt76_desc *desc = &q->desc[idx];
developera72bbd82024-02-04 18:27:28 +080053@@ -478,11 +480,44 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developerc9233442023-04-04 06:06:17 +080054 SKB_WITH_OVERHEAD(q->buf_size),
55 DMA_FROM_DEVICE);
56
57- buf = r->ptr;
58- r->dma_addr = 0;
59- r->ptr = NULL;
60+ if (flush) {
61+ buf = r->ptr;
62+ r->dma_addr = 0;
63+ r->ptr = NULL;
developerade48b12023-12-12 10:37:24 +080064+
developerc9233442023-04-04 06:06:17 +080065+ mt76_put_rxwi(dev, r);
66+ } else {
67+ struct mt76_queue_buf qbuf;
68+
developerade48b12023-12-12 10:37:24 +080069+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC | GFP_DMA32);
developerc9233442023-04-04 06:06:17 +080070+ if (!buf)
71+ return NULL;
72+
73+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
74+
75+ r->dma_addr = dma_map_single(dev->dma_dev, r->ptr,
76+ SKB_WITH_OVERHEAD(q->buf_size),
77+ DMA_FROM_DEVICE);
78+ if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
79+ skb_free_frag(r->ptr);
80+ mt76_put_rxwi(dev, r);
81+ return NULL;
82+ }
83+
84+ qbuf.addr = r->dma_addr;
85+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
86+ qbuf.skip_unmap = false;
87+
88+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) {
89+ dma_unmap_single(dev->dma_dev, r->dma_addr,
90+ SKB_WITH_OVERHEAD(q->buf_size),
91+ DMA_FROM_DEVICE);
92+ skb_free_frag(r->ptr);
93+ mt76_put_rxwi(dev, r);
94+ return NULL;
95+ }
96+ }
97
developerade48b12023-12-12 10:37:24 +080098- mt76_put_rxwi(dev, r);
99 if (drop)
100 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
101 } else {
developera72bbd82024-02-04 18:27:28 +0800102@@ -519,7 +554,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
developerc9233442023-04-04 06:06:17 +0800103 q->tail = (q->tail + 1) % q->ndesc;
104 q->queued--;
105
106- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
107+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
108 }
109
110 static int
developer133b30e2024-03-19 15:52:02 +0800111@@ -690,7 +725,7 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developerade48b12023-12-12 10:37:24 +0800112 done:
developerc9233442023-04-04 06:06:17 +0800113 qbuf.len = len - offset;
114 qbuf.skip_unmap = false;
115- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
116+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
117 dma_unmap_single(dev->dma_dev, addr, len,
118 DMA_FROM_DEVICE);
119 skb_free_frag(buf);
developer133b30e2024-03-19 15:52:02 +0800120@@ -791,12 +826,14 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerc9233442023-04-04 06:06:17 +0800121
122 spin_unlock_bh(&q->lock);
123
124- if (!q->rx_page.va)
125- return;
126+ if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX) {
127+ if (!q->rx_page.va)
128+ return;
129
130- page = virt_to_page(q->rx_page.va);
131- __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
132- memset(&q->rx_page, 0, sizeof(q->rx_page));
133+ page = virt_to_page(q->rx_page.va);
134+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
135+ memset(&q->rx_page, 0, sizeof(q->rx_page));
136+ }
137 }
138
139 static void
developera72bbd82024-02-04 18:27:28 +0800140diff --git a/wed.c b/wed.c
developer82d34062024-02-29 09:50:59 +0800141index c03b52f9..70e40575 100644
developera72bbd82024-02-04 18:27:28 +0800142--- a/wed.c
143+++ b/wed.c
144@@ -9,12 +9,9 @@
145 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
developerc9233442023-04-04 06:06:17 +0800146 {
developerade48b12023-12-12 10:37:24 +0800147 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
148- u32 length;
developerc9233442023-04-04 06:06:17 +0800149+ struct page *page;
developerc9233442023-04-04 06:06:17 +0800150 int i;
151
developerade48b12023-12-12 10:37:24 +0800152- length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
153- sizeof(struct skb_shared_info));
154-
155 for (i = 0; i < dev->rx_token_size; i++) {
156 struct mt76_rxwi_cache *r;
157
developera72bbd82024-02-04 18:27:28 +0800158@@ -24,13 +21,33 @@ void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
developerc9233442023-04-04 06:06:17 +0800159
developerade48b12023-12-12 10:37:24 +0800160 dma_unmap_single(dev->dma_dev, r->dma_addr,
developerc9233442023-04-04 06:06:17 +0800161 wed->wlan.rx_size, DMA_FROM_DEVICE);
162- __free_pages(virt_to_page(r->ptr), get_order(length));
163+ skb_free_frag(r->ptr);
164 r->ptr = NULL;
165
developerade48b12023-12-12 10:37:24 +0800166 mt76_put_rxwi(dev, r);
developerc9233442023-04-04 06:06:17 +0800167 }
168
developerade48b12023-12-12 10:37:24 +0800169 mt76_free_pending_rxwi(dev);
developerc9233442023-04-04 06:06:17 +0800170+
developerade48b12023-12-12 10:37:24 +0800171+ mt76_for_each_q_rx(dev, i) {
172+ struct mt76_queue *q = &dev->q_rx[i];
developerc9233442023-04-04 06:06:17 +0800173+
174+ if (mt76_queue_is_wed_rx(q)) {
175+ if (!q->rx_page.va)
176+ continue;
177+
178+ page = virt_to_page(q->rx_page.va);
179+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
180+ memset(&q->rx_page, 0, sizeof(q->rx_page));
181+ }
182+ }
183+
184+ if (!wed->rx_buf_ring.rx_page.va)
185+ return;
186+
187+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
188+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
189+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
190 }
developera72bbd82024-02-04 18:27:28 +0800191 EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
developerc9233442023-04-04 06:06:17 +0800192
developera72bbd82024-02-04 18:27:28 +0800193@@ -48,25 +65,23 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
developerc9233442023-04-04 06:06:17 +0800194 for (i = 0; i < size; i++) {
developerade48b12023-12-12 10:37:24 +0800195 struct mt76_rxwi_cache *r = mt76_get_rxwi(dev);
developerc9233442023-04-04 06:06:17 +0800196 dma_addr_t phy_addr;
197- struct page *page;
198 int token;
199 void *ptr;
200
201 if (!r)
202 goto unmap;
203
204- page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
205- if (!page) {
206+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length, GFP_ATOMIC);
207+ if (!ptr) {
developerade48b12023-12-12 10:37:24 +0800208 mt76_put_rxwi(dev, r);
developerc9233442023-04-04 06:06:17 +0800209 goto unmap;
210 }
211
212- ptr = page_address(page);
developerade48b12023-12-12 10:37:24 +0800213 phy_addr = dma_map_single(dev->dma_dev, ptr,
developerc9233442023-04-04 06:06:17 +0800214 wed->wlan.rx_size,
215 DMA_TO_DEVICE);
developerade48b12023-12-12 10:37:24 +0800216 if (unlikely(dma_mapping_error(dev->dev, phy_addr))) {
developerc9233442023-04-04 06:06:17 +0800217- __free_pages(page, get_order(length));
218+ skb_free_frag(ptr);
developerade48b12023-12-12 10:37:24 +0800219 mt76_put_rxwi(dev, r);
developerc9233442023-04-04 06:06:17 +0800220 goto unmap;
221 }
developera72bbd82024-02-04 18:27:28 +0800222@@ -76,7 +91,7 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
developerc9233442023-04-04 06:06:17 +0800223 if (token < 0) {
developerade48b12023-12-12 10:37:24 +0800224 dma_unmap_single(dev->dma_dev, phy_addr,
developerc9233442023-04-04 06:06:17 +0800225 wed->wlan.rx_size, DMA_TO_DEVICE);
226- __free_pages(page, get_order(length));
227+ skb_free_frag(ptr);
developerade48b12023-12-12 10:37:24 +0800228 mt76_put_rxwi(dev, r);
developerc9233442023-04-04 06:06:17 +0800229 goto unmap;
230 }
231--
developer0443cd32023-09-19 14:11:49 +08002322.18.0
developerc9233442023-04-04 06:06:17 +0800233