blob: 68ec2b53f054d6ad8dcfb484db1ac91d800090e2 [file] [log] [blame]
developer617abbd2024-04-23 14:50:01 +08001From 7883011e4646c3c69b1f181d3a7de98e811c68fb Mon Sep 17 00:00:00 2001
2From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 19:49:22 +0800
4Subject: [PATCH 057/116] mtk: wifi: mt76: revert page_poll for kernel 5.4
5
6This reverts commit e8c10835cf062c577ddf426913788c39d30b4bd7.
7
8Change-Id: I4e5764fc545087f691fb4c2f43e7a9cefd1e1657
9---
10 dma.c | 75 ++++++++++++++++++++++++++-------------------------
11 mac80211.c | 56 --------------------------------------
12 mt76.h | 22 +--------------
13 mt7915/main.c | 26 +++++++-----------
14 usb.c | 43 ++++++++++++++---------------
15 wed.c | 50 ++++++++++++++++++++++------------
16 6 files changed, 104 insertions(+), 168 deletions(-)
17
18diff --git a/dma.c b/dma.c
19index 66c000ef0..33a84f5fa 100644
20--- a/dma.c
21+++ b/dma.c
22@@ -178,7 +178,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
23 local_bh_disable();
24 while ((t = __mt76_get_rxwi(dev)) != NULL) {
25 if (t->ptr)
26- mt76_put_page_pool_buf(t->ptr, false);
27+ skb_free_frag(t->ptr);
28 kfree(t);
29 }
30 local_bh_enable();
31@@ -450,9 +450,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
32 if (!t)
33 return NULL;
34
35- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
36- SKB_WITH_OVERHEAD(q->buf_size),
37- page_pool_get_dma_dir(q->page_pool));
38+ dma_unmap_single(dev->dma_dev, t->dma_addr,
39+ SKB_WITH_OVERHEAD(q->buf_size),
40+ DMA_FROM_DEVICE);
41
42 buf = t->ptr;
43 t->dma_addr = 0;
44@@ -462,9 +462,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
45 if (drop)
46 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
47 } else {
48- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
49- SKB_WITH_OVERHEAD(q->buf_size),
50- page_pool_get_dma_dir(q->page_pool));
51+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
52+ SKB_WITH_OVERHEAD(q->buf_size),
53+ DMA_FROM_DEVICE);
54 }
55
56 done:
57@@ -638,7 +638,8 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
58 bool allow_direct)
59 {
60 int len = SKB_WITH_OVERHEAD(q->buf_size);
61- int frames = 0;
62+ int frames = 0, offset = q->buf_offset;
63+ dma_addr_t addr;
64
65 if (!q->ndesc)
66 return 0;
67@@ -647,28 +648,29 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
68
69 while (q->queued < q->ndesc - 1) {
70 struct mt76_queue_buf qbuf = {};
71- enum dma_data_direction dir;
72- dma_addr_t addr;
73- int offset;
74 void *buf = NULL;
75
76 if (mt76_queue_is_wed_rro_ind(q))
77 goto done;
78
79- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
80+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
81 if (!buf)
82 break;
83
84- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
85- dir = page_pool_get_dma_dir(q->page_pool);
86- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
87+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
88+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
89+ skb_free_frag(buf);
90+ break;
91+ }
92
93- qbuf.addr = addr + q->buf_offset;
94+ qbuf.addr = addr + offset;
95 done:
96- qbuf.len = len - q->buf_offset;
97+ qbuf.len = len - offset;
98 qbuf.skip_unmap = false;
99 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
100- mt76_put_page_pool_buf(buf, allow_direct);
101+ dma_unmap_single(dev->dma_dev, addr, len,
102+ DMA_FROM_DEVICE);
103+ skb_free_frag(buf);
104 break;
105 }
106 frames++;
107@@ -722,10 +724,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
108 if (!q->entry)
109 return -ENOMEM;
110
111- ret = mt76_create_page_pool(dev, q);
112- if (ret)
113- return ret;
114-
115 ret = mt76_wed_dma_setup(dev, q, false);
116 if (ret)
117 return ret;
118@@ -744,6 +742,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
119 static void
120 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
121 {
122+ struct page *page;
123 void *buf;
124 bool more;
125
126@@ -759,7 +758,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
127 break;
128
129 if (!mt76_queue_is_wed_rro(q))
130- mt76_put_page_pool_buf(buf, false);
131+ skb_free_frag(buf);
132 } while (1);
133
134 spin_lock_bh(&q->lock);
135@@ -769,6 +768,16 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
136 }
137
138 spin_unlock_bh(&q->lock);
139+
140+ if (mt76_queue_is_wed_rx(q))
141+ return;
142+
143+ if (!q->rx_page.va)
144+ return;
145+
146+ page = virt_to_page(q->rx_page.va);
147+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
148+ memset(&q->rx_page, 0, sizeof(q->rx_page));
149 }
150
151 static void
152@@ -791,15 +800,10 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
153 /* reset WED rx queues */
154 mt76_wed_dma_setup(dev, q, true);
155
156- if (mt76_queue_is_wed_tx_free(q))
157- return;
158-
159- if (mtk_wed_device_active(&dev->mmio.wed) &&
160- mt76_queue_is_wed_rro(q))
161- return;
162-
163- mt76_dma_sync_idx(dev, q);
164- mt76_dma_rx_fill(dev, q, false);
165+ if (!mt76_queue_is_wed_tx_free(q)) {
166+ mt76_dma_sync_idx(dev, q);
167+ mt76_dma_rx_fill(dev, q, false);
168+ }
169 }
170
171 static void
172@@ -816,7 +820,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
173
174 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
175 } else {
176- mt76_put_page_pool_buf(data, allow_direct);
177+ skb_free_frag(data);
178 }
179
180 if (more)
181@@ -891,7 +895,6 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
182 goto free_frag;
183
184 skb_reserve(skb, q->buf_offset);
185- skb_mark_for_recycle(skb);
186
187 *(u32 *)skb->cb = info;
188
189@@ -907,7 +910,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
190 continue;
191
192 free_frag:
193- mt76_put_page_pool_buf(data, allow_direct);
194+ skb_free_frag(data);
195 }
196
197 mt76_dma_rx_fill(dev, q, true);
198@@ -1010,8 +1013,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
199
200 netif_napi_del(&dev->napi[i]);
201 mt76_dma_rx_cleanup(dev, q);
202-
203- page_pool_destroy(q->page_pool);
204 }
205
206 if (mtk_wed_device_active(&dev->mmio.wed))
207diff --git a/mac80211.c b/mac80211.c
208index 8091a60e0..f7b9ba6a0 100644
209--- a/mac80211.c
210+++ b/mac80211.c
211@@ -565,47 +565,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
212 }
213 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
214
215-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
216-{
217- struct page_pool_params pp_params = {
218- .order = 0,
219- .flags = PP_FLAG_PAGE_FRAG,
220- .nid = NUMA_NO_NODE,
221- .dev = dev->dma_dev,
222- };
223- int idx = q - dev->q_rx;
224-
225- switch (idx) {
226- case MT_RXQ_MAIN:
227- case MT_RXQ_BAND1:
228- case MT_RXQ_BAND2:
229- pp_params.pool_size = 256;
230- break;
231- default:
232- pp_params.pool_size = 16;
233- break;
234- }
235-
236- if (mt76_is_mmio(dev)) {
237- /* rely on page_pool for DMA mapping */
238- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
239- pp_params.dma_dir = DMA_FROM_DEVICE;
240- pp_params.max_len = PAGE_SIZE;
241- pp_params.offset = 0;
242- }
243-
244- q->page_pool = page_pool_create(&pp_params);
245- if (IS_ERR(q->page_pool)) {
246- int err = PTR_ERR(q->page_pool);
247-
248- q->page_pool = NULL;
249- return err;
250- }
251-
252- return 0;
253-}
254-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
255-
256 struct mt76_dev *
257 mt76_alloc_device(struct device *pdev, unsigned int size,
258 const struct ieee80211_ops *ops,
259@@ -1818,21 +1777,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
260 }
261 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
262
263-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
264-{
265-#ifdef CONFIG_PAGE_POOL_STATS
266- struct page_pool_stats stats = {};
267- int i;
268-
269- mt76_for_each_q_rx(dev, i)
270- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
271-
272- page_pool_ethtool_stats_get(data, &stats);
273- *index += page_pool_ethtool_stats_get_count();
274-#endif
275-}
276-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
277-
278 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
279 {
280 struct ieee80211_hw *hw = phy->hw;
281diff --git a/mt76.h b/mt76.h
282index 11cbb2d28..e21c6537f 100644
283--- a/mt76.h
284+++ b/mt76.h
285@@ -251,7 +251,7 @@ struct mt76_queue {
286
287 dma_addr_t desc_dma;
288 struct sk_buff *rx_head;
289- struct page_pool *page_pool;
290+ struct page_frag_cache rx_page;
291 };
292
293 struct mt76_mcu_ops {
294@@ -1606,7 +1606,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
295 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
296 }
297
298-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
299 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
300 struct mt76_sta_stats *stats, bool eht);
301 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
302@@ -1752,25 +1751,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
303 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
304 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
305 struct mt76_txwi_cache *r, dma_addr_t phys);
306-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
307-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
308-{
309- struct page *page = virt_to_head_page(buf);
310-
311- page_pool_put_full_page(page->pp, page, allow_direct);
312-}
313-
314-static inline void *
315-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
316-{
317- struct page *page;
318-
319- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
320- if (!page)
321- return NULL;
322-
323- return page_address(page) + *offset;
324-}
325
326 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
327 {
328diff --git a/mt7915/main.c b/mt7915/main.c
329index b16a63366..ad91fc3c8 100644
330--- a/mt7915/main.c
331+++ b/mt7915/main.c
332@@ -1402,22 +1402,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
333 struct ieee80211_vif *vif,
334 u32 sset, u8 *data)
335 {
336- if (sset != ETH_SS_STATS)
337- return;
338-
339- memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
340- data += sizeof(mt7915_gstrings_stats);
341- page_pool_ethtool_stats_get_strings(data);
342+ if (sset == ETH_SS_STATS)
343+ memcpy(data, mt7915_gstrings_stats,
344+ sizeof(mt7915_gstrings_stats));
345 }
346
347 static
348 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
349 struct ieee80211_vif *vif, int sset)
350 {
351- if (sset != ETH_SS_STATS)
352- return 0;
353+ if (sset == ETH_SS_STATS)
354+ return MT7915_SSTATS_LEN;
355
356- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
357+ return 0;
358 }
359
360 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
361@@ -1445,7 +1442,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
362 .idx = mvif->mt76.idx,
363 };
364 /* See mt7915_ampdu_stat_read_phy, etc */
365- int i, ei = 0, stats_size;
366+ int i, ei = 0;
367
368 mutex_lock(&dev->mt76.mutex);
369
370@@ -1557,12 +1554,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
371 return;
372
373 ei += wi.worker_stat_count;
374-
375- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
376-
377- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
378- if (ei != stats_size)
379- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
380+ if (ei != MT7915_SSTATS_LEN)
381+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
382+ ei, (int)MT7915_SSTATS_LEN);
383 }
384
385 static void
386diff --git a/usb.c b/usb.c
387index dc690d1cd..058f2d124 100644
388--- a/usb.c
389+++ b/usb.c
390@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
391
392 static int
393 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
394- int nsgs)
395+ int nsgs, gfp_t gfp)
396 {
397 int i;
398
399 for (i = 0; i < nsgs; i++) {
400+ struct page *page;
401 void *data;
402 int offset;
403
404- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
405+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
406 if (!data)
407 break;
408
409- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
410- offset);
411+ page = virt_to_head_page(data);
412+ offset = data - page_address(page);
413+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
414 }
415
416 if (i < nsgs) {
417 int j;
418
419 for (j = nsgs; j < urb->num_sgs; j++)
420- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
421+ skb_free_frag(sg_virt(&urb->sg[j]));
422 urb->num_sgs = i;
423 }
424
425@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
426
427 static int
428 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
429- struct urb *urb, int nsgs)
430+ struct urb *urb, int nsgs, gfp_t gfp)
431 {
432 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
433- int offset;
434
435 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
436- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
437+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
438
439 urb->transfer_buffer_length = q->buf_size;
440- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
441+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
442
443 return urb->transfer_buffer ? 0 : -ENOMEM;
444 }
445@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
446 if (err)
447 return err;
448
449- return mt76u_refill_rx(dev, q, e->urb, sg_size);
450+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
451 }
452
453 static void mt76u_urb_free(struct urb *urb)
454@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
455 int i;
456
457 for (i = 0; i < urb->num_sgs; i++)
458- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
459+ skb_free_frag(sg_virt(&urb->sg[i]));
460
461 if (urb->transfer_buffer)
462- mt76_put_page_pool_buf(urb->transfer_buffer, false);
463+ skb_free_frag(urb->transfer_buffer);
464
465 usb_free_urb(urb);
466 }
467@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
468 len -= data_len;
469 nsgs++;
470 }
471-
472- skb_mark_for_recycle(skb);
473 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
474
475 return nsgs;
476@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
477
478 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
479 if (count > 0) {
480- err = mt76u_refill_rx(dev, q, urb, count);
481+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
482 if (err < 0)
483 break;
484 }
485@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
486 struct mt76_queue *q = &dev->q_rx[qid];
487 int i, err;
488
489- err = mt76_create_page_pool(dev, q);
490- if (err)
491- return err;
492-
493 spin_lock_init(&q->lock);
494 q->entry = devm_kcalloc(dev->dev,
495 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
496@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
497 static void
498 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
499 {
500+ struct page *page;
501 int i;
502
503 for (i = 0; i < q->ndesc; i++) {
504@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
505 mt76u_urb_free(q->entry[i].urb);
506 q->entry[i].urb = NULL;
507 }
508- page_pool_destroy(q->page_pool);
509- q->page_pool = NULL;
510+
511+ if (!q->rx_page.va)
512+ return;
513+
514+ page = virt_to_page(q->rx_page.va);
515+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
516+ memset(&q->rx_page, 0, sizeof(q->rx_page));
517 }
518
519 static void mt76u_free_rx(struct mt76_dev *dev)
520diff --git a/wed.c b/wed.c
521index f89e45375..8eca4d818 100644
522--- a/wed.c
523+++ b/wed.c
524@@ -9,8 +9,12 @@
525 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
526 {
527 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
528+ u32 length;
529 int i;
530
531+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
532+ sizeof(struct skb_shared_info));
533+
534 for (i = 0; i < dev->rx_token_size; i++) {
535 struct mt76_txwi_cache *t;
536
537@@ -18,7 +22,9 @@ void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
538 if (!t || !t->ptr)
539 continue;
540
541- mt76_put_page_pool_buf(t->ptr, false);
542+ dma_unmap_single(dev->dma_dev, t->dma_addr,
543+ wed->wlan.rx_size, DMA_FROM_DEVICE);
544+ __free_pages(virt_to_page(t->ptr), get_order(length));
545 t->ptr = NULL;
546
547 mt76_put_rxwi(dev, t);
548@@ -33,33 +39,45 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
549 {
550 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
551 struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
552- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
553- int i, len = SKB_WITH_OVERHEAD(q->buf_size);
554- struct mt76_txwi_cache *t = NULL;
555+ u32 length;
556+ int i;
557+
558+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
559+ sizeof(struct skb_shared_info));
560
561 for (i = 0; i < size; i++) {
562- enum dma_data_direction dir;
563+ struct mt76_txwi_cache *t = mt76_get_rxwi(dev);
564 dma_addr_t addr;
565- u32 offset;
566+ struct page *page;
567 int token;
568- void *buf;
569+ void *ptr;
570
571- t = mt76_get_rxwi(dev);
572 if (!t)
573 goto unmap;
574
575- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
576- if (!buf)
577+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
578+ if (!page) {
579+ mt76_put_rxwi(dev, t);
580 goto unmap;
581+ }
582
583- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
584- dir = page_pool_get_dma_dir(q->page_pool);
585- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
586+ addr = dma_map_single(dev->dma_dev, ptr,
587+ wed->wlan.rx_size,
588+ DMA_TO_DEVICE);
589+
590+ if (unlikely(dma_mapping_error(dev->dev, addr))) {
591+ skb_free_frag(ptr);
592+ mt76_put_rxwi(dev, t);
593+ goto unmap;
594+ }
595
596 desc->buf0 = cpu_to_le32(addr);
597- token = mt76_rx_token_consume(dev, buf, t, addr);
598+ token = mt76_rx_token_consume(dev, ptr, t, addr);
599 if (token < 0) {
600- mt76_put_page_pool_buf(buf, false);
601+ dma_unmap_single(dev->dma_dev, addr,
602+ wed->wlan.rx_size, DMA_TO_DEVICE);
603+ __free_pages(page, get_order(length));
604+ mt76_put_rxwi(dev, t);
605 goto unmap;
606 }
607
608@@ -74,8 +92,6 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
609 return 0;
610
611 unmap:
612- if (t)
613- mt76_put_rxwi(dev, t);
614 mt76_wed_release_rx_buf(wed);
615
616 return -ENOMEM;
617--
6182.39.2
619