blob: a71e2b3472b93d88a59ea45652387874607ac525 [file] [log] [blame]
developer05f3b2b2024-08-19 19:17:34 +08001From 48f648333182ff895faa99cbad65d1f9baeb6f55 Mon Sep 17 00:00:00 2001
developer66e89bc2024-04-23 14:50:01 +08002From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 13:37:23 +0800
developer05f3b2b2024-08-19 19:17:34 +08004Subject: [PATCH 062/199] mtk: mt76: rework wed rx flow
developer66e89bc2024-04-23 14:50:01 +08005
6Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
developer66e89bc2024-04-23 14:50:01 +08007---
8 dma.c | 125 +++++++++++++++++++++++++++++++-----------------
9 mac80211.c | 2 +-
10 mt76.h | 25 ++++++----
11 mt7915/mmio.c | 3 +-
12 mt7915/mt7915.h | 1 +
13 tx.c | 16 +++----
14 wed.c | 57 ++++++++++++++--------
15 7 files changed, 144 insertions(+), 85 deletions(-)
16
17diff --git a/dma.c b/dma.c
developer05f3b2b2024-08-19 19:17:34 +080018index 33a84f5f..c54187bd 100644
developer66e89bc2024-04-23 14:50:01 +080019--- a/dma.c
20+++ b/dma.c
21@@ -64,17 +64,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
22 return t;
23 }
24
25-static struct mt76_txwi_cache *
26+static struct mt76_rxwi_cache *
27 mt76_alloc_rxwi(struct mt76_dev *dev)
28 {
29- struct mt76_txwi_cache *t;
30+ struct mt76_rxwi_cache *r;
31
32- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
33- if (!t)
34+ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
35+ if (!r)
36 return NULL;
37
38- t->ptr = NULL;
39- return t;
40+ r->ptr = NULL;
41+ return r;
42 }
43
44 static struct mt76_txwi_cache *
45@@ -93,20 +93,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
46 return t;
47 }
48
49-static struct mt76_txwi_cache *
50+static struct mt76_rxwi_cache *
51 __mt76_get_rxwi(struct mt76_dev *dev)
52 {
53- struct mt76_txwi_cache *t = NULL;
54+ struct mt76_rxwi_cache *r = NULL;
55
56- spin_lock_bh(&dev->wed_lock);
57+ spin_lock_bh(&dev->lock);
58 if (!list_empty(&dev->rxwi_cache)) {
59- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
60+ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
61 list);
62- list_del(&t->list);
63+ list_del(&r->list);
64 }
65- spin_unlock_bh(&dev->wed_lock);
66+ spin_unlock_bh(&dev->lock);
67
68- return t;
69+ return r;
70 }
71
72 static struct mt76_txwi_cache *
73@@ -120,13 +120,13 @@ mt76_get_txwi(struct mt76_dev *dev)
74 return mt76_alloc_txwi(dev);
75 }
76
77-struct mt76_txwi_cache *
78+struct mt76_rxwi_cache *
79 mt76_get_rxwi(struct mt76_dev *dev)
80 {
81- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
82+ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
83
84- if (t)
85- return t;
86+ if (r)
87+ return r;
88
89 return mt76_alloc_rxwi(dev);
90 }
91@@ -145,14 +145,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
92 EXPORT_SYMBOL_GPL(mt76_put_txwi);
93
94 void
95-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
96+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
97 {
98- if (!t)
99+ if (!r)
100 return;
101
102- spin_lock_bh(&dev->wed_lock);
103- list_add(&t->list, &dev->rxwi_cache);
104- spin_unlock_bh(&dev->wed_lock);
105+ spin_lock_bh(&dev->lock);
106+ list_add(&r->list, &dev->rxwi_cache);
107+ spin_unlock_bh(&dev->lock);
108 }
109 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
110
111@@ -173,13 +173,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
112 void
113 mt76_free_pending_rxwi(struct mt76_dev *dev)
114 {
115- struct mt76_txwi_cache *t;
116+ struct mt76_rxwi_cache *r;
117
118 local_bh_disable();
119- while ((t = __mt76_get_rxwi(dev)) != NULL) {
120- if (t->ptr)
121- skb_free_frag(t->ptr);
122- kfree(t);
123+ while ((r = __mt76_get_rxwi(dev)) != NULL) {
124+ if (r->ptr)
125+ skb_free_frag(r->ptr);
126+ kfree(r);
127 }
128 local_bh_enable();
129 }
130@@ -225,10 +225,10 @@ void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
131
132 static int
133 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
134- struct mt76_queue_buf *buf, void *data)
135+ struct mt76_queue_buf *buf, void *data,
136+ struct mt76_rxwi_cache *rxwi)
137 {
138 struct mt76_queue_entry *entry = &q->entry[q->head];
139- struct mt76_txwi_cache *txwi = NULL;
140 struct mt76_desc *desc;
141 int idx = q->head;
142 u32 buf1 = 0, ctrl;
143@@ -249,13 +249,15 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
144 #endif
145
146 if (mt76_queue_is_wed_rx(q)) {
147- txwi = mt76_get_rxwi(dev);
148- if (!txwi)
149- return -ENOMEM;
150+ if (!rxwi) {
151+ rxwi = mt76_get_rxwi(dev);
152+ if (!rxwi)
153+ return -ENOMEM;
154+ }
155
156- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
157+ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
158 if (rx_token < 0) {
159- mt76_put_rxwi(dev, txwi);
160+ mt76_put_rxwi(dev, rxwi);
161 return -ENOMEM;
162 }
163
164@@ -271,7 +273,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
165 done:
166 entry->dma_addr[0] = buf->addr;
167 entry->dma_len[0] = buf->len;
168- entry->txwi = txwi;
169+ entry->rxwi = rxwi;
170 entry->buf = data;
171 entry->wcid = 0xffff;
172 entry->skip_buf1 = true;
173@@ -420,7 +422,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
174
175 static void *
176 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
177- int *len, u32 *info, bool *more, bool *drop)
178+ int *len, u32 *info, bool *more, bool *drop, bool flush)
179 {
180 struct mt76_queue_entry *e = &q->entry[idx];
181 struct mt76_desc *desc = &q->desc[idx];
182@@ -445,20 +447,53 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
183
184 if (mt76_queue_is_wed_rx(q)) {
185 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
186- struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
187+ struct mt76_rxwi_cache *r = mt76_rx_token_release(dev, token);
188
189- if (!t)
190+ if (!r)
191 return NULL;
192
193- dma_unmap_single(dev->dma_dev, t->dma_addr,
194+ dma_unmap_single(dev->dma_dev, r->dma_addr,
195 SKB_WITH_OVERHEAD(q->buf_size),
196 DMA_FROM_DEVICE);
197
198- buf = t->ptr;
199- t->dma_addr = 0;
200- t->ptr = NULL;
201+ if (flush) {
202+ buf = r->ptr;
203+ r->dma_addr = 0;
204+ r->ptr = NULL;
205+
206+ mt76_put_rxwi(dev, r);
207+ } else {
208+ struct mt76_queue_buf qbuf;
209+
210+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
211+ if (!buf)
212+ return NULL;
213+
214+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
215+
216+ r->dma_addr = dma_map_single(dev->dma_dev, r->ptr,
217+ SKB_WITH_OVERHEAD(q->buf_size),
218+ DMA_FROM_DEVICE);
219+ if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
220+ skb_free_frag(r->ptr);
221+ mt76_put_rxwi(dev, r);
222+ return NULL;
223+ }
224+
225+ qbuf.addr = r->dma_addr;
226+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
227+ qbuf.skip_unmap = false;
228+
229+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) {
230+ dma_unmap_single(dev->dma_dev, r->dma_addr,
231+ SKB_WITH_OVERHEAD(q->buf_size),
232+ DMA_FROM_DEVICE);
233+ skb_free_frag(r->ptr);
234+ mt76_put_rxwi(dev, r);
235+ return NULL;
236+ }
237+ }
238
239- mt76_put_rxwi(dev, t);
240 if (drop)
241 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
242 } else {
243@@ -495,7 +530,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
244 q->tail = (q->tail + 1) % q->ndesc;
245 q->queued--;
246
247- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
248+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
249 }
250
251 static int
252@@ -667,7 +702,7 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
253 done:
254 qbuf.len = len - offset;
255 qbuf.skip_unmap = false;
256- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
257+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
258 dma_unmap_single(dev->dma_dev, addr, len,
259 DMA_FROM_DEVICE);
260 skb_free_frag(buf);
261diff --git a/mac80211.c b/mac80211.c
developer05f3b2b2024-08-19 19:17:34 +0800262index 5f85bf1d..3e054d9d 100644
developer66e89bc2024-04-23 14:50:01 +0800263--- a/mac80211.c
264+++ b/mac80211.c
developer9237f442024-06-14 17:13:04 +0800265@@ -596,7 +596,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developer66e89bc2024-04-23 14:50:01 +0800266 spin_lock_init(&dev->lock);
267 spin_lock_init(&dev->cc_lock);
268 spin_lock_init(&dev->status_lock);
269- spin_lock_init(&dev->wed_lock);
270 mutex_init(&dev->mutex);
271 init_waitqueue_head(&dev->tx_wait);
272
developer9237f442024-06-14 17:13:04 +0800273@@ -629,6 +628,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
developer66e89bc2024-04-23 14:50:01 +0800274 INIT_LIST_HEAD(&dev->txwi_cache);
275 INIT_LIST_HEAD(&dev->rxwi_cache);
276 dev->token_size = dev->drv->token_size;
277+ dev->rx_token_size = dev->drv->rx_token_size;
278
279 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
280 skb_queue_head_init(&dev->rx_skb[i]);
281diff --git a/mt76.h b/mt76.h
developer05f3b2b2024-08-19 19:17:34 +0800282index fbcd5ea9..92b59dd6 100644
developer66e89bc2024-04-23 14:50:01 +0800283--- a/mt76.h
284+++ b/mt76.h
285@@ -205,6 +205,7 @@ struct mt76_queue_entry {
286 };
287 union {
288 struct mt76_txwi_cache *txwi;
289+ struct mt76_rxwi_cache *rxwi;
290 struct urb *urb;
291 int buf_sz;
292 };
developer05f3b2b2024-08-19 19:17:34 +0800293@@ -420,12 +421,16 @@ struct mt76_txwi_cache {
developer66e89bc2024-04-23 14:50:01 +0800294 struct list_head list;
295 dma_addr_t dma_addr;
296
297- union {
298- struct sk_buff *skb;
299- void *ptr;
300- };
301-
302 unsigned long jiffies;
303+
304+ struct sk_buff *skb;
305+};
306+
307+struct mt76_rxwi_cache {
308+ struct list_head list;
309+ dma_addr_t dma_addr;
310+
311+ void *ptr;
312 };
313
314 struct mt76_rx_tid {
developer05f3b2b2024-08-19 19:17:34 +0800315@@ -512,6 +517,7 @@ struct mt76_driver_ops {
developer66e89bc2024-04-23 14:50:01 +0800316 u16 txwi_size;
317 u16 token_size;
318 u8 mcs_rates;
319+ u16 rx_token_size;
320
321 void (*update_survey)(struct mt76_phy *phy);
322
developer05f3b2b2024-08-19 19:17:34 +0800323@@ -876,7 +882,6 @@ struct mt76_dev {
developer66e89bc2024-04-23 14:50:01 +0800324
325 struct ieee80211_hw *hw;
326
327- spinlock_t wed_lock;
328 spinlock_t lock;
329 spinlock_t cc_lock;
330
developer05f3b2b2024-08-19 19:17:34 +0800331@@ -1562,8 +1567,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
developer66e89bc2024-04-23 14:50:01 +0800332 }
333
334 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
335-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
336-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
337+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
338+struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
339 void mt76_free_pending_rxwi(struct mt76_dev *dev);
340 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
341 struct napi_struct *napi);
developer05f3b2b2024-08-19 19:17:34 +0800342@@ -1754,9 +1759,9 @@ struct mt76_txwi_cache *
developer66e89bc2024-04-23 14:50:01 +0800343 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
344 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
345 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
346-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
347+struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
348 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
349- struct mt76_txwi_cache *r, dma_addr_t phys);
350+ struct mt76_rxwi_cache *r, dma_addr_t phys);
351
352 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
353 {
354diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer05f3b2b2024-08-19 19:17:34 +0800355index d6ecd698..c8511867 100644
developer66e89bc2024-04-23 14:50:01 +0800356--- a/mt7915/mmio.c
357+++ b/mt7915/mmio.c
358@@ -714,7 +714,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
359 wed->wlan.reset = mt7915_mmio_wed_reset;
360 wed->wlan.reset_complete = mt76_wed_reset_complete;
361
362- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
363+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
364
365 if (mtk_wed_device_attach(wed))
366 return 0;
367@@ -921,6 +921,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
368 SURVEY_INFO_TIME_RX |
369 SURVEY_INFO_TIME_BSS_RX,
370 .token_size = MT7915_TOKEN_SIZE,
371+ .rx_token_size = MT7915_RX_TOKEN_SIZE;
372 .tx_prepare_skb = mt7915_tx_prepare_skb,
373 .tx_complete_skb = mt76_connac_tx_complete_skb,
374 .rx_skb = mt7915_queue_rx_skb,
375diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developer05f3b2b2024-08-19 19:17:34 +0800376index a30d08eb..f1e2c93a 100644
developer66e89bc2024-04-23 14:50:01 +0800377--- a/mt7915/mt7915.h
378+++ b/mt7915/mt7915.h
379@@ -62,6 +62,7 @@
380 #define MT7915_EEPROM_BLOCK_SIZE 16
381 #define MT7915_HW_TOKEN_SIZE 4096
382 #define MT7915_TOKEN_SIZE 8192
383+#define MT7915_RX_TOKEN_SIZE 4096
384
385 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
386 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
387diff --git a/tx.c b/tx.c
developer05f3b2b2024-08-19 19:17:34 +0800388index ab42f69b..46dae6e0 100644
developer66e89bc2024-04-23 14:50:01 +0800389--- a/tx.c
390+++ b/tx.c
391@@ -851,16 +851,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
392 EXPORT_SYMBOL_GPL(mt76_token_consume);
393
394 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
395- struct mt76_txwi_cache *t, dma_addr_t phys)
396+ struct mt76_rxwi_cache *r, dma_addr_t phys)
397 {
398 int token;
399
400 spin_lock_bh(&dev->rx_token_lock);
401- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
402+ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
403 GFP_ATOMIC);
404 if (token >= 0) {
405- t->ptr = ptr;
406- t->dma_addr = phys;
407+ r->ptr = ptr;
408+ r->dma_addr = phys;
409 }
410 spin_unlock_bh(&dev->rx_token_lock);
411
412@@ -897,15 +897,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
413 }
414 EXPORT_SYMBOL_GPL(mt76_token_release);
415
416-struct mt76_txwi_cache *
417+struct mt76_rxwi_cache *
418 mt76_rx_token_release(struct mt76_dev *dev, int token)
419 {
420- struct mt76_txwi_cache *t;
421+ struct mt76_rxwi_cache *r;
422
423 spin_lock_bh(&dev->rx_token_lock);
424- t = idr_remove(&dev->rx_token, token);
425+ r = idr_remove(&dev->rx_token, token);
426 spin_unlock_bh(&dev->rx_token_lock);
427
428- return t;
429+ return r;
430 }
431 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
432diff --git a/wed.c b/wed.c
developer05f3b2b2024-08-19 19:17:34 +0800433index 8eca4d81..0a0b5c05 100644
developer66e89bc2024-04-23 14:50:01 +0800434--- a/wed.c
435+++ b/wed.c
436@@ -9,28 +9,45 @@
437 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
438 {
439 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
440- u32 length;
441+ struct page *page;
442 int i;
443
444- length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
445- sizeof(struct skb_shared_info));
446-
447 for (i = 0; i < dev->rx_token_size; i++) {
448- struct mt76_txwi_cache *t;
449+ struct mt76_rxwi_cache *r;
450
451- t = mt76_rx_token_release(dev, i);
452- if (!t || !t->ptr)
453+ r = mt76_rx_token_release(dev, i);
454+ if (!r || !r->ptr)
455 continue;
456
457- dma_unmap_single(dev->dma_dev, t->dma_addr,
458+ dma_unmap_single(dev->dma_dev, r->dma_addr,
459 wed->wlan.rx_size, DMA_FROM_DEVICE);
460- __free_pages(virt_to_page(t->ptr), get_order(length));
461- t->ptr = NULL;
462+ skb_free_frag(r->ptr);
463+ r->ptr = NULL;
464
465- mt76_put_rxwi(dev, t);
466+ mt76_put_rxwi(dev, r);
467 }
468
469 mt76_free_pending_rxwi(dev);
470+
471+ mt76_for_each_q_rx(dev, i) {
472+ struct mt76_queue *q = &dev->q_rx[i];
473+
474+ if (mt76_queue_is_wed_rx(q)) {
475+ if (!q->rx_page.va)
476+ continue;
477+
478+ page = virt_to_page(q->rx_page.va);
479+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
480+ memset(&q->rx_page, 0, sizeof(q->rx_page));
481+ }
482+ }
483+
484+ if (!wed->rx_buf_ring.rx_page.va)
485+ return;
486+
487+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
488+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
489+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
490 }
491 EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
492
493@@ -46,18 +63,18 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
494 sizeof(struct skb_shared_info));
495
496 for (i = 0; i < size; i++) {
497- struct mt76_txwi_cache *t = mt76_get_rxwi(dev);
498+ struct mt76_rxwi_cache *r = mt76_get_rxwi(dev);
499 dma_addr_t addr;
500 struct page *page;
501 int token;
502 void *ptr;
503
504- if (!t)
505+ if (!r)
506 goto unmap;
507
508- page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
509- if (!page) {
510- mt76_put_rxwi(dev, t);
511+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length, GFP_ATOMIC);
512+ if (!ptr) {
513+ mt76_put_rxwi(dev, r);
514 goto unmap;
515 }
516
517@@ -67,17 +84,17 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
518
519 if (unlikely(dma_mapping_error(dev->dev, addr))) {
520 skb_free_frag(ptr);
521- mt76_put_rxwi(dev, t);
522+ mt76_put_rxwi(dev, r);
523 goto unmap;
524 }
525
526 desc->buf0 = cpu_to_le32(addr);
527- token = mt76_rx_token_consume(dev, ptr, t, addr);
528+ token = mt76_rx_token_consume(dev, ptr, r, addr);
529 if (token < 0) {
530 dma_unmap_single(dev->dma_dev, addr,
531 wed->wlan.rx_size, DMA_TO_DEVICE);
532- __free_pages(page, get_order(length));
533- mt76_put_rxwi(dev, t);
534+ skb_free_frag(ptr);
535+ mt76_put_rxwi(dev, r);
536 goto unmap;
537 }
538
539--
developer9237f442024-06-14 17:13:04 +08005402.18.0
developer66e89bc2024-04-23 14:50:01 +0800541