blob: f413ca12c7f23f2f84529df3a7209da7253d4dcb [file] [log] [blame]
developerebda9012024-02-22 13:42:45 +08001From 2b103fe6990bafb386d6f036068ad35713903817 Mon Sep 17 00:00:00 2001
developere35b8e42023-10-16 11:04:00 +08002From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 13:37:23 +0800
developerebda9012024-02-22 13:42:45 +08004Subject: [PATCH 2001/2032] mtk: wifi: mt76: rework wed rx flow
developere35b8e42023-10-16 11:04:00 +08005
6Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
developere35b8e42023-10-16 11:04:00 +08007---
8 dma.c | 125 +++++++++++++++++++++++++++++++-----------------
9 mac80211.c | 2 +-
developerd243af02023-12-21 14:49:33 +080010 mt76.h | 25 ++++++----
developere35b8e42023-10-16 11:04:00 +080011 mt7915/mmio.c | 3 +-
12 mt7915/mt7915.h | 1 +
13 tx.c | 16 +++----
developerebda9012024-02-22 13:42:45 +080014 wed.c | 57 ++++++++++++++--------
developerd243af02023-12-21 14:49:33 +080015 7 files changed, 144 insertions(+), 85 deletions(-)
developere35b8e42023-10-16 11:04:00 +080016
17diff --git a/dma.c b/dma.c
developerebda9012024-02-22 13:42:45 +080018index d076faa1..f4ecd117 100644
developere35b8e42023-10-16 11:04:00 +080019--- a/dma.c
20+++ b/dma.c
21@@ -64,17 +64,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
22 return t;
23 }
24
25-static struct mt76_txwi_cache *
26+static struct mt76_rxwi_cache *
27 mt76_alloc_rxwi(struct mt76_dev *dev)
28 {
29- struct mt76_txwi_cache *t;
30+ struct mt76_rxwi_cache *r;
31
32- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
33- if (!t)
34+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
35+ if (!r)
36 return NULL;
37
38- t->ptr = NULL;
39- return t;
40+ r->ptr = NULL;
41+ return r;
42 }
43
44 static struct mt76_txwi_cache *
45@@ -93,20 +93,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
46 return t;
47 }
48
49-static struct mt76_txwi_cache *
50+static struct mt76_rxwi_cache *
51 __mt76_get_rxwi(struct mt76_dev *dev)
52 {
53- struct mt76_txwi_cache *t = NULL;
54+ struct mt76_rxwi_cache *r = NULL;
55
developerd243af02023-12-21 14:49:33 +080056- spin_lock_bh(&dev->wed_lock);
57+ spin_lock_bh(&dev->lock);
developere35b8e42023-10-16 11:04:00 +080058 if (!list_empty(&dev->rxwi_cache)) {
59- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
60+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
61 list);
62- list_del(&t->list);
63+ list_del(&r->list);
64 }
developerd243af02023-12-21 14:49:33 +080065- spin_unlock_bh(&dev->wed_lock);
66+ spin_unlock_bh(&dev->lock);
developere35b8e42023-10-16 11:04:00 +080067
68- return t;
69+ return r;
70 }
71
72 static struct mt76_txwi_cache *
73@@ -120,13 +120,13 @@ mt76_get_txwi(struct mt76_dev *dev)
74 return mt76_alloc_txwi(dev);
75 }
76
77-struct mt76_txwi_cache *
78+struct mt76_rxwi_cache *
79 mt76_get_rxwi(struct mt76_dev *dev)
80 {
81- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
82+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
83
84- if (t)
85- return t;
86+ if (r)
87+ return r;
88
89 return mt76_alloc_rxwi(dev);
90 }
91@@ -145,14 +145,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
92 EXPORT_SYMBOL_GPL(mt76_put_txwi);
93
94 void
95-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
96+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
97 {
98- if (!t)
99+ if (!r)
100 return;
101
developerd243af02023-12-21 14:49:33 +0800102- spin_lock_bh(&dev->wed_lock);
developere35b8e42023-10-16 11:04:00 +0800103- list_add(&t->list, &dev->rxwi_cache);
developerd243af02023-12-21 14:49:33 +0800104- spin_unlock_bh(&dev->wed_lock);
105+ spin_lock_bh(&dev->lock);
developere35b8e42023-10-16 11:04:00 +0800106+ list_add(&r->list, &dev->rxwi_cache);
developerd243af02023-12-21 14:49:33 +0800107+ spin_unlock_bh(&dev->lock);
developere35b8e42023-10-16 11:04:00 +0800108 }
109 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
110
111@@ -173,13 +173,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
112 void
113 mt76_free_pending_rxwi(struct mt76_dev *dev)
114 {
115- struct mt76_txwi_cache *t;
116+ struct mt76_rxwi_cache *r;
117
118 local_bh_disable();
119- while ((t = __mt76_get_rxwi(dev)) != NULL) {
120- if (t->ptr)
121- skb_free_frag(t->ptr);
122- kfree(t);
123+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
124+ if (r->ptr)
125+ skb_free_frag(r->ptr);
126+ kfree(r);
127 }
128 local_bh_enable();
129 }
developerebda9012024-02-22 13:42:45 +0800130@@ -225,10 +225,10 @@ void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
developere35b8e42023-10-16 11:04:00 +0800131
132 static int
133 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
134- struct mt76_queue_buf *buf, void *data)
135+ struct mt76_queue_buf *buf, void *data,
136+ struct mt76_rxwi_cache *rxwi)
137 {
138 struct mt76_queue_entry *entry = &q->entry[q->head];
139- struct mt76_txwi_cache *txwi = NULL;
140 struct mt76_desc *desc;
developere35b8e42023-10-16 11:04:00 +0800141 int idx = q->head;
developerd243af02023-12-21 14:49:33 +0800142 u32 buf1 = 0, ctrl;
developerebda9012024-02-22 13:42:45 +0800143@@ -249,13 +249,15 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developerd243af02023-12-21 14:49:33 +0800144 #endif
developere35b8e42023-10-16 11:04:00 +0800145
146 if (mt76_queue_is_wed_rx(q)) {
147- txwi = mt76_get_rxwi(dev);
148- if (!txwi)
149- return -ENOMEM;
150+ if (!rxwi) {
151+ rxwi = mt76_get_rxwi(dev);
152+ if (!rxwi)
153+ return -ENOMEM;
154+ }
155
156- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
157+ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
158 if (rx_token < 0) {
159- mt76_put_rxwi(dev, txwi);
160+ mt76_put_rxwi(dev, rxwi);
161 return -ENOMEM;
162 }
163
developerebda9012024-02-22 13:42:45 +0800164@@ -271,7 +273,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developere35b8e42023-10-16 11:04:00 +0800165 done:
166 entry->dma_addr[0] = buf->addr;
167 entry->dma_len[0] = buf->len;
168- entry->txwi = txwi;
169+ entry->rxwi = rxwi;
170 entry->buf = data;
171 entry->wcid = 0xffff;
172 entry->skip_buf1 = true;
developerebda9012024-02-22 13:42:45 +0800173@@ -420,7 +422,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
developere35b8e42023-10-16 11:04:00 +0800174
175 static void *
176 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
177- int *len, u32 *info, bool *more, bool *drop)
178+ int *len, u32 *info, bool *more, bool *drop, bool flush)
179 {
180 struct mt76_queue_entry *e = &q->entry[idx];
181 struct mt76_desc *desc = &q->desc[idx];
developerebda9012024-02-22 13:42:45 +0800182@@ -445,20 +447,53 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developerd243af02023-12-21 14:49:33 +0800183
developere35b8e42023-10-16 11:04:00 +0800184 if (mt76_queue_is_wed_rx(q)) {
developere35b8e42023-10-16 11:04:00 +0800185 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
186- struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
187+ struct mt76_rxwi_cache *r = mt76_rx_token_release(dev, token);
188
189- if (!t)
190+ if (!r)
191 return NULL;
192
193- dma_unmap_single(dev->dma_dev, t->dma_addr,
194+ dma_unmap_single(dev->dma_dev, r->dma_addr,
195 SKB_WITH_OVERHEAD(q->buf_size),
196 DMA_FROM_DEVICE);
197
198- buf = t->ptr;
199- t->dma_addr = 0;
200- t->ptr = NULL;
201+ if (flush) {
202+ buf = r->ptr;
203+ r->dma_addr = 0;
204+ r->ptr = NULL;
205+
206+ mt76_put_rxwi(dev, r);
207+ } else {
208+ struct mt76_queue_buf qbuf;
209+
210+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
211+ if (!buf)
212+ return NULL;
213+
214+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
215+
216+ r->dma_addr = dma_map_single(dev->dma_dev, r->ptr,
217+ SKB_WITH_OVERHEAD(q->buf_size),
218+ DMA_FROM_DEVICE);
219+ if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
220+ skb_free_frag(r->ptr);
221+ mt76_put_rxwi(dev, r);
222+ return NULL;
223+ }
224+
225+ qbuf.addr = r->dma_addr;
226+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
227+ qbuf.skip_unmap = false;
228+
229+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) {
230+ dma_unmap_single(dev->dma_dev, r->dma_addr,
231+ SKB_WITH_OVERHEAD(q->buf_size),
232+ DMA_FROM_DEVICE);
233+ skb_free_frag(r->ptr);
234+ mt76_put_rxwi(dev, r);
235+ return NULL;
236+ }
237+ }
238
239- mt76_put_rxwi(dev, t);
240 if (drop)
241 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
242 } else {
developerebda9012024-02-22 13:42:45 +0800243@@ -495,7 +530,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
developere35b8e42023-10-16 11:04:00 +0800244 q->tail = (q->tail + 1) % q->ndesc;
245 q->queued--;
246
247- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
248+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
249 }
250
251 static int
developerebda9012024-02-22 13:42:45 +0800252@@ -666,7 +701,7 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developere35b8e42023-10-16 11:04:00 +0800253 done:
254 qbuf.len = len - offset;
255 qbuf.skip_unmap = false;
256- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
257+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
258 dma_unmap_single(dev->dma_dev, addr, len,
259 DMA_FROM_DEVICE);
260 skb_free_frag(buf);
261diff --git a/mac80211.c b/mac80211.c
developerebda9012024-02-22 13:42:45 +0800262index 380a74e4..91e771d3 100644
developere35b8e42023-10-16 11:04:00 +0800263--- a/mac80211.c
264+++ b/mac80211.c
developerd243af02023-12-21 14:49:33 +0800265@@ -595,7 +595,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developere35b8e42023-10-16 11:04:00 +0800266 spin_lock_init(&dev->lock);
267 spin_lock_init(&dev->cc_lock);
268 spin_lock_init(&dev->status_lock);
269- spin_lock_init(&dev->wed_lock);
270 mutex_init(&dev->mutex);
271 init_waitqueue_head(&dev->tx_wait);
272
developerd243af02023-12-21 14:49:33 +0800273@@ -628,6 +627,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developere35b8e42023-10-16 11:04:00 +0800274 INIT_LIST_HEAD(&dev->txwi_cache);
275 INIT_LIST_HEAD(&dev->rxwi_cache);
276 dev->token_size = dev->drv->token_size;
277+ dev->rx_token_size = dev->drv->rx_token_size;
278
279 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
280 skb_queue_head_init(&dev->rx_skb[i]);
developere35b8e42023-10-16 11:04:00 +0800281diff --git a/mt76.h b/mt76.h
developerebda9012024-02-22 13:42:45 +0800282index ce4e87b4..59cf6a16 100644
developere35b8e42023-10-16 11:04:00 +0800283--- a/mt76.h
284+++ b/mt76.h
285@@ -200,6 +200,7 @@ struct mt76_queue_entry {
286 };
287 union {
288 struct mt76_txwi_cache *txwi;
289+ struct mt76_rxwi_cache *rxwi;
290 struct urb *urb;
291 int buf_sz;
292 };
developerebda9012024-02-22 13:42:45 +0800293@@ -411,12 +412,16 @@ struct mt76_txwi_cache {
developere35b8e42023-10-16 11:04:00 +0800294 struct list_head list;
295 dma_addr_t dma_addr;
296
297- union {
298- struct sk_buff *skb;
299- void *ptr;
300- };
developerd243af02023-12-21 14:49:33 +0800301-
302 unsigned long jiffies;
303+
developere35b8e42023-10-16 11:04:00 +0800304+ struct sk_buff *skb;
305+};
306+
307+struct mt76_rxwi_cache {
308+ struct list_head list;
309+ dma_addr_t dma_addr;
310+
311+ void *ptr;
312 };
313
314 struct mt76_rx_tid {
developerebda9012024-02-22 13:42:45 +0800315@@ -504,6 +509,7 @@ struct mt76_driver_ops {
developere35b8e42023-10-16 11:04:00 +0800316 u16 txwi_size;
317 u16 token_size;
318 u8 mcs_rates;
319+ u16 rx_token_size;
320
321 void (*update_survey)(struct mt76_phy *phy);
322
developerebda9012024-02-22 13:42:45 +0800323@@ -881,7 +887,6 @@ struct mt76_dev {
developere35b8e42023-10-16 11:04:00 +0800324
325 struct ieee80211_hw *hw;
326
327- spinlock_t wed_lock;
328 spinlock_t lock;
329 spinlock_t cc_lock;
330
developerebda9012024-02-22 13:42:45 +0800331@@ -1563,8 +1568,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
developere35b8e42023-10-16 11:04:00 +0800332 }
333
334 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
335-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
336-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
337+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
338+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
339 void mt76_free_pending_rxwi(struct mt76_dev *dev);
340 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
341 struct napi_struct *napi);
developerebda9012024-02-22 13:42:45 +0800342@@ -1743,9 +1748,9 @@ struct mt76_txwi_cache *
developere35b8e42023-10-16 11:04:00 +0800343 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
344 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
345 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
346-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
347+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
348 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
349- struct mt76_txwi_cache *r, dma_addr_t phys);
350+ struct mt76_rxwi_cache *r, dma_addr_t phys);
351
352 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
353 {
354diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developerebda9012024-02-22 13:42:45 +0800355index 6004d64f..5938bd9f 100644
developere35b8e42023-10-16 11:04:00 +0800356--- a/mt7915/mmio.c
357+++ b/mt7915/mmio.c
developerebda9012024-02-22 13:42:45 +0800358@@ -714,7 +714,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
developere35b8e42023-10-16 11:04:00 +0800359 wed->wlan.reset = mt7915_mmio_wed_reset;
developerebda9012024-02-22 13:42:45 +0800360 wed->wlan.reset_complete = mt76_wed_reset_complete;
developere35b8e42023-10-16 11:04:00 +0800361
362- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
363+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
364
365 if (mtk_wed_device_attach(wed))
366 return 0;
developerebda9012024-02-22 13:42:45 +0800367@@ -921,6 +921,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
developere35b8e42023-10-16 11:04:00 +0800368 SURVEY_INFO_TIME_RX |
369 SURVEY_INFO_TIME_BSS_RX,
370 .token_size = MT7915_TOKEN_SIZE,
371+ .rx_token_size = MT7915_RX_TOKEN_SIZE;
372 .tx_prepare_skb = mt7915_tx_prepare_skb,
373 .tx_complete_skb = mt76_connac_tx_complete_skb,
374 .rx_skb = mt7915_queue_rx_skb,
375diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developerebda9012024-02-22 13:42:45 +0800376index 6e79bc65..e5a86759 100644
developere35b8e42023-10-16 11:04:00 +0800377--- a/mt7915/mt7915.h
378+++ b/mt7915/mt7915.h
379@@ -62,6 +62,7 @@
380 #define MT7915_EEPROM_BLOCK_SIZE 16
381 #define MT7915_HW_TOKEN_SIZE 4096
382 #define MT7915_TOKEN_SIZE 8192
383+#define MT7915_RX_TOKEN_SIZE 4096
384
385 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
386 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
387diff --git a/tx.c b/tx.c
developerd243af02023-12-21 14:49:33 +0800388index 4596b367..e0c3e854 100644
developere35b8e42023-10-16 11:04:00 +0800389--- a/tx.c
390+++ b/tx.c
developerd243af02023-12-21 14:49:33 +0800391@@ -851,16 +851,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
developere35b8e42023-10-16 11:04:00 +0800392 EXPORT_SYMBOL_GPL(mt76_token_consume);
393
394 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
395- struct mt76_txwi_cache *t, dma_addr_t phys)
396+ struct mt76_rxwi_cache *r, dma_addr_t phys)
397 {
398 int token;
399
400 spin_lock_bh(&dev->rx_token_lock);
401- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
402+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
403 GFP_ATOMIC);
404 if (token >= 0) {
405- t->ptr = ptr;
406- t->dma_addr = phys;
407+ r->ptr = ptr;
408+ r->dma_addr = phys;
409 }
410 spin_unlock_bh(&dev->rx_token_lock);
411
developerd243af02023-12-21 14:49:33 +0800412@@ -897,15 +897,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
developere35b8e42023-10-16 11:04:00 +0800413 }
414 EXPORT_SYMBOL_GPL(mt76_token_release);
415
416-struct mt76_txwi_cache *
417+struct mt76_rxwi_cache *
418 mt76_rx_token_release(struct mt76_dev *dev, int token)
419 {
420- struct mt76_txwi_cache *t;
421+ struct mt76_rxwi_cache *r;
422
423 spin_lock_bh(&dev->rx_token_lock);
424- t = idr_remove(&dev->rx_token, token);
425+ r = idr_remove(&dev->rx_token, token);
426 spin_unlock_bh(&dev->rx_token_lock);
427
428- return t;
429+ return r;
430 }
431 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
developerebda9012024-02-22 13:42:45 +0800432diff --git a/wed.c b/wed.c
433index 8eca4d81..0a0b5c05 100644
434--- a/wed.c
435+++ b/wed.c
436@@ -9,28 +9,45 @@
437 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
438 {
439 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
440- u32 length;
441+ struct page *page;
442 int i;
443
444- length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
445- sizeof(struct skb_shared_info));
446-
447 for (i = 0; i < dev->rx_token_size; i++) {
448- struct mt76_txwi_cache *t;
449+ struct mt76_rxwi_cache *r;
450
451- t = mt76_rx_token_release(dev, i);
452- if (!t || !t->ptr)
453+ r = mt76_rx_token_release(dev, i);
454+ if (!r || !r->ptr)
455 continue;
456
457- dma_unmap_single(dev->dma_dev, t->dma_addr,
458+ dma_unmap_single(dev->dma_dev, r->dma_addr,
459 wed->wlan.rx_size, DMA_FROM_DEVICE);
460- __free_pages(virt_to_page(t->ptr), get_order(length));
461- t->ptr = NULL;
462+ skb_free_frag(r->ptr);
463+ r->ptr = NULL;
464
465- mt76_put_rxwi(dev, t);
466+ mt76_put_rxwi(dev, r);
467 }
468
469 mt76_free_pending_rxwi(dev);
470+
471+ mt76_for_each_q_rx(dev, i) {
472+ struct mt76_queue *q = &dev->q_rx[i];
473+
474+ if (mt76_queue_is_wed_rx(q)) {
475+ if (!q->rx_page.va)
476+ continue;
477+
478+ page = virt_to_page(q->rx_page.va);
479+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
480+ memset(&q->rx_page, 0, sizeof(q->rx_page));
481+ }
482+ }
483+
484+ if (!wed->rx_buf_ring.rx_page.va)
485+ return;
486+
487+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
488+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
489+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
490 }
491 EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
492
493@@ -46,18 +63,18 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
494 sizeof(struct skb_shared_info));
495
496 for (i = 0; i < size; i++) {
497- struct mt76_txwi_cache *t = mt76_get_rxwi(dev);
498+ struct mt76_rxwi_cache *r = mt76_get_rxwi(dev);
499 dma_addr_t addr;
500 struct page *page;
501 int token;
502 void *ptr;
503
504- if (!t)
505+ if (!r)
506 goto unmap;
507
508- page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
509- if (!page) {
510- mt76_put_rxwi(dev, t);
511+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length, GFP_ATOMIC);
512+ if (!ptr) {
513+ mt76_put_rxwi(dev, r);
514 goto unmap;
515 }
516
517@@ -67,17 +84,17 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
518
519 if (unlikely(dma_mapping_error(dev->dev, addr))) {
520 skb_free_frag(ptr);
521- mt76_put_rxwi(dev, t);
522+ mt76_put_rxwi(dev, r);
523 goto unmap;
524 }
525
526 desc->buf0 = cpu_to_le32(addr);
527- token = mt76_rx_token_consume(dev, ptr, t, addr);
528+ token = mt76_rx_token_consume(dev, ptr, r, addr);
529 if (token < 0) {
530 dma_unmap_single(dev->dma_dev, addr,
531 wed->wlan.rx_size, DMA_TO_DEVICE);
532- __free_pages(page, get_order(length));
533- mt76_put_rxwi(dev, t);
534+ skb_free_frag(ptr);
535+ mt76_put_rxwi(dev, r);
536 goto unmap;
537 }
538
developere35b8e42023-10-16 11:04:00 +0800539--
5402.18.0
541