blob: f17bdc514a6b5f1b0f68c756cd8418b59d52fab8 [file] [log] [blame]
developer43a264f2024-03-26 14:09:54 +08001From 531fa0a7bd4865ee9e631c6cd1d5655c8e8995a6 Mon Sep 17 00:00:00 2001
developer281084d2023-06-19 12:03:50 +08002From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 19:49:22 +0800
developerebda9012024-02-22 13:42:45 +08004Subject: [PATCH 2000/2032] mtk: wifi: mt76: revert page_poll for kernel 5.4
developerf3f5d9b2023-02-07 15:24:34 +08005
developer281084d2023-06-19 12:03:50 +08006This reverts commit e8c10835cf062c577ddf426913788c39d30b4bd7.
7
developerf3f5d9b2023-02-07 15:24:34 +08008---
developerebda9012024-02-22 13:42:45 +08009 dma.c | 75 ++++++++++++++++++++++++++-------------------------
10 mac80211.c | 57 ---------------------------------------
11 mt76.h | 22 +--------------
12 mt7915/main.c | 26 +++++++-----------
13 usb.c | 43 ++++++++++++++---------------
14 wed.c | 50 ++++++++++++++++++++++------------
15 6 files changed, 104 insertions(+), 169 deletions(-)
developerf3f5d9b2023-02-07 15:24:34 +080016
17diff --git a/dma.c b/dma.c
developer43a264f2024-03-26 14:09:54 +080018index 66c000ef..33a84f5f 100644
developerf3f5d9b2023-02-07 15:24:34 +080019--- a/dma.c
20+++ b/dma.c
developere35b8e42023-10-16 11:04:00 +080021@@ -178,7 +178,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
developerf3f5d9b2023-02-07 15:24:34 +080022 local_bh_disable();
developere35b8e42023-10-16 11:04:00 +080023 while ((t = __mt76_get_rxwi(dev)) != NULL) {
24 if (t->ptr)
25- mt76_put_page_pool_buf(t->ptr, false);
26+ skb_free_frag(t->ptr);
27 kfree(t);
developerf3f5d9b2023-02-07 15:24:34 +080028 }
29 local_bh_enable();
developerebda9012024-02-22 13:42:45 +080030@@ -450,9 +450,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developere35b8e42023-10-16 11:04:00 +080031 if (!t)
developerf3f5d9b2023-02-07 15:24:34 +080032 return NULL;
33
developere35b8e42023-10-16 11:04:00 +080034- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
developerf3f5d9b2023-02-07 15:24:34 +080035- SKB_WITH_OVERHEAD(q->buf_size),
36- page_pool_get_dma_dir(q->page_pool));
developere35b8e42023-10-16 11:04:00 +080037+ dma_unmap_single(dev->dma_dev, t->dma_addr,
developerf3f5d9b2023-02-07 15:24:34 +080038+ SKB_WITH_OVERHEAD(q->buf_size),
39+ DMA_FROM_DEVICE);
40
developere35b8e42023-10-16 11:04:00 +080041 buf = t->ptr;
42 t->dma_addr = 0;
developerebda9012024-02-22 13:42:45 +080043@@ -462,9 +462,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developere35b8e42023-10-16 11:04:00 +080044 if (drop)
45 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
developerf3f5d9b2023-02-07 15:24:34 +080046 } else {
developerf3f5d9b2023-02-07 15:24:34 +080047- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
48- SKB_WITH_OVERHEAD(q->buf_size),
49- page_pool_get_dma_dir(q->page_pool));
50+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
51+ SKB_WITH_OVERHEAD(q->buf_size),
52+ DMA_FROM_DEVICE);
53 }
54
developere35b8e42023-10-16 11:04:00 +080055 done:
developer43a264f2024-03-26 14:09:54 +080056@@ -638,7 +638,8 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developerebda9012024-02-22 13:42:45 +080057 bool allow_direct)
developerf3f5d9b2023-02-07 15:24:34 +080058 {
59 int len = SKB_WITH_OVERHEAD(q->buf_size);
60- int frames = 0;
61+ int frames = 0, offset = q->buf_offset;
62+ dma_addr_t addr;
63
64 if (!q->ndesc)
65 return 0;
developer43a264f2024-03-26 14:09:54 +080066@@ -647,28 +648,29 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developerf3f5d9b2023-02-07 15:24:34 +080067
68 while (q->queued < q->ndesc - 1) {
developere35b8e42023-10-16 11:04:00 +080069 struct mt76_queue_buf qbuf = {};
developerf3f5d9b2023-02-07 15:24:34 +080070- enum dma_data_direction dir;
developerf3f5d9b2023-02-07 15:24:34 +080071- dma_addr_t addr;
72- int offset;
developere35b8e42023-10-16 11:04:00 +080073 void *buf = NULL;
74
75 if (mt76_queue_is_wed_rro_ind(q))
76 goto done;
developerf3f5d9b2023-02-07 15:24:34 +080077
78- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
79+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
80 if (!buf)
81 break;
82
83- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
84- dir = page_pool_get_dma_dir(q->page_pool);
85- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
86+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
87+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
88+ skb_free_frag(buf);
89+ break;
90+ }
91
92- qbuf.addr = addr + q->buf_offset;
developerf3f5d9b2023-02-07 15:24:34 +080093+ qbuf.addr = addr + offset;
developere35b8e42023-10-16 11:04:00 +080094 done:
95- qbuf.len = len - q->buf_offset;
developerf3f5d9b2023-02-07 15:24:34 +080096+ qbuf.len = len - offset;
97 qbuf.skip_unmap = false;
98 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
99- mt76_put_page_pool_buf(buf, allow_direct);
100+ dma_unmap_single(dev->dma_dev, addr, len,
101+ DMA_FROM_DEVICE);
102+ skb_free_frag(buf);
103 break;
104 }
105 frames++;
developer43a264f2024-03-26 14:09:54 +0800106@@ -722,10 +724,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerf3f5d9b2023-02-07 15:24:34 +0800107 if (!q->entry)
108 return -ENOMEM;
109
110- ret = mt76_create_page_pool(dev, q);
111- if (ret)
112- return ret;
113-
developerebda9012024-02-22 13:42:45 +0800114 ret = mt76_wed_dma_setup(dev, q, false);
developerf3f5d9b2023-02-07 15:24:34 +0800115 if (ret)
116 return ret;
developer43a264f2024-03-26 14:09:54 +0800117@@ -744,6 +742,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerf3f5d9b2023-02-07 15:24:34 +0800118 static void
119 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
120 {
121+ struct page *page;
122 void *buf;
123 bool more;
124
developer43a264f2024-03-26 14:09:54 +0800125@@ -759,7 +758,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerf3f5d9b2023-02-07 15:24:34 +0800126 break;
127
developere35b8e42023-10-16 11:04:00 +0800128 if (!mt76_queue_is_wed_rro(q))
129- mt76_put_page_pool_buf(buf, false);
130+ skb_free_frag(buf);
developerf3f5d9b2023-02-07 15:24:34 +0800131 } while (1);
132
developerd243af02023-12-21 14:49:33 +0800133 spin_lock_bh(&q->lock);
developer43a264f2024-03-26 14:09:54 +0800134@@ -769,6 +768,16 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerf3f5d9b2023-02-07 15:24:34 +0800135 }
136
137 spin_unlock_bh(&q->lock);
138+
developere35b8e42023-10-16 11:04:00 +0800139+ if (mt76_queue_is_wed_rx(q))
developer281084d2023-06-19 12:03:50 +0800140+ return;
141+
developerf3f5d9b2023-02-07 15:24:34 +0800142+ if (!q->rx_page.va)
143+ return;
144+
145+ page = virt_to_page(q->rx_page.va);
146+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
147+ memset(&q->rx_page, 0, sizeof(q->rx_page));
148 }
149
150 static void
developer43a264f2024-03-26 14:09:54 +0800151@@ -791,15 +800,10 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developerd243af02023-12-21 14:49:33 +0800152 /* reset WED rx queues */
developerebda9012024-02-22 13:42:45 +0800153 mt76_wed_dma_setup(dev, q, true);
developerd243af02023-12-21 14:49:33 +0800154
155- if (mt76_queue_is_wed_tx_free(q))
156- return;
157-
158- if (mtk_wed_device_active(&dev->mmio.wed) &&
159- mt76_queue_is_wed_rro(q))
160- return;
161-
162- mt76_dma_sync_idx(dev, q);
163- mt76_dma_rx_fill(dev, q, false);
164+ if (!mt76_queue_is_wed_tx_free(q)) {
165+ mt76_dma_sync_idx(dev, q);
developerebda9012024-02-22 13:42:45 +0800166+ mt76_dma_rx_fill(dev, q, false);
developerd243af02023-12-21 14:49:33 +0800167+ }
developerf3f5d9b2023-02-07 15:24:34 +0800168 }
169
developerd243af02023-12-21 14:49:33 +0800170 static void
developer43a264f2024-03-26 14:09:54 +0800171@@ -816,7 +820,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
developerf3f5d9b2023-02-07 15:24:34 +0800172
173 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
174 } else {
developerd243af02023-12-21 14:49:33 +0800175- mt76_put_page_pool_buf(data, allow_direct);
developerf3f5d9b2023-02-07 15:24:34 +0800176+ skb_free_frag(data);
177 }
178
179 if (more)
developer43a264f2024-03-26 14:09:54 +0800180@@ -891,7 +895,6 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developerf3f5d9b2023-02-07 15:24:34 +0800181 goto free_frag;
182
183 skb_reserve(skb, q->buf_offset);
184- skb_mark_for_recycle(skb);
185
186 *(u32 *)skb->cb = info;
187
developer43a264f2024-03-26 14:09:54 +0800188@@ -907,7 +910,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developerf3f5d9b2023-02-07 15:24:34 +0800189 continue;
190
191 free_frag:
developerd243af02023-12-21 14:49:33 +0800192- mt76_put_page_pool_buf(data, allow_direct);
developerf3f5d9b2023-02-07 15:24:34 +0800193+ skb_free_frag(data);
194 }
195
developerebda9012024-02-22 13:42:45 +0800196 mt76_dma_rx_fill(dev, q, true);
developer43a264f2024-03-26 14:09:54 +0800197@@ -1010,8 +1013,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developerf3f5d9b2023-02-07 15:24:34 +0800198
199 netif_napi_del(&dev->napi[i]);
200 mt76_dma_rx_cleanup(dev, q);
201-
202- page_pool_destroy(q->page_pool);
203 }
204
developer281084d2023-06-19 12:03:50 +0800205 if (mtk_wed_device_active(&dev->mmio.wed))
developerf3f5d9b2023-02-07 15:24:34 +0800206diff --git a/mac80211.c b/mac80211.c
developerebda9012024-02-22 13:42:45 +0800207index f7cd47f9..380a74e4 100644
developerf3f5d9b2023-02-07 15:24:34 +0800208--- a/mac80211.c
209+++ b/mac80211.c
210@@ -4,7 +4,6 @@
211 */
212 #include <linux/sched.h>
213 #include <linux/of.h>
214-#include <net/page_pool.h>
215 #include "mt76.h"
216
developer281084d2023-06-19 12:03:50 +0800217 static const struct ieee80211_channel mt76_channels_2ghz[] = {
developerd243af02023-12-21 14:49:33 +0800218@@ -566,47 +565,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
developerf3f5d9b2023-02-07 15:24:34 +0800219 }
220 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
221
222-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
223-{
224- struct page_pool_params pp_params = {
225- .order = 0,
226- .flags = PP_FLAG_PAGE_FRAG,
227- .nid = NUMA_NO_NODE,
228- .dev = dev->dma_dev,
229- };
230- int idx = q - dev->q_rx;
231-
232- switch (idx) {
233- case MT_RXQ_MAIN:
234- case MT_RXQ_BAND1:
235- case MT_RXQ_BAND2:
236- pp_params.pool_size = 256;
237- break;
238- default:
239- pp_params.pool_size = 16;
240- break;
241- }
242-
243- if (mt76_is_mmio(dev)) {
244- /* rely on page_pool for DMA mapping */
245- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
246- pp_params.dma_dir = DMA_FROM_DEVICE;
247- pp_params.max_len = PAGE_SIZE;
248- pp_params.offset = 0;
249- }
250-
251- q->page_pool = page_pool_create(&pp_params);
252- if (IS_ERR(q->page_pool)) {
253- int err = PTR_ERR(q->page_pool);
254-
255- q->page_pool = NULL;
256- return err;
257- }
258-
259- return 0;
260-}
261-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
262-
263 struct mt76_dev *
264 mt76_alloc_device(struct device *pdev, unsigned int size,
265 const struct ieee80211_ops *ops,
developerd243af02023-12-21 14:49:33 +0800266@@ -1819,21 +1777,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
developerf3f5d9b2023-02-07 15:24:34 +0800267 }
268 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
269
270-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
271-{
272-#ifdef CONFIG_PAGE_POOL_STATS
273- struct page_pool_stats stats = {};
274- int i;
275-
276- mt76_for_each_q_rx(dev, i)
277- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
278-
279- page_pool_ethtool_stats_get(data, &stats);
280- *index += page_pool_ethtool_stats_get_count();
281-#endif
282-}
283-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
284-
285 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
286 {
287 struct ieee80211_hw *hw = phy->hw;
288diff --git a/mt76.h b/mt76.h
developer43a264f2024-03-26 14:09:54 +0800289index 543d9de5..540814f1 100644
developerf3f5d9b2023-02-07 15:24:34 +0800290--- a/mt76.h
291+++ b/mt76.h
developerebda9012024-02-22 13:42:45 +0800292@@ -246,7 +246,7 @@ struct mt76_queue {
developerf3f5d9b2023-02-07 15:24:34 +0800293
294 dma_addr_t desc_dma;
295 struct sk_buff *rx_head;
296- struct page_pool *page_pool;
297+ struct page_frag_cache rx_page;
298 };
299
300 struct mt76_mcu_ops {
developerebda9012024-02-22 13:42:45 +0800301@@ -1601,7 +1601,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
developerf3f5d9b2023-02-07 15:24:34 +0800302 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
303 }
304
305-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
306 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
307 struct mt76_sta_stats *stats, bool eht);
308 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
developerebda9012024-02-22 13:42:45 +0800309@@ -1747,25 +1746,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
developere35b8e42023-10-16 11:04:00 +0800310 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
developerf3f5d9b2023-02-07 15:24:34 +0800311 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
developere35b8e42023-10-16 11:04:00 +0800312 struct mt76_txwi_cache *r, dma_addr_t phys);
developerf3f5d9b2023-02-07 15:24:34 +0800313-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
314-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
315-{
316- struct page *page = virt_to_head_page(buf);
317-
318- page_pool_put_full_page(page->pp, page, allow_direct);
319-}
320-
321-static inline void *
322-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
323-{
324- struct page *page;
325-
326- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
327- if (!page)
328- return NULL;
329-
330- return page_address(page) + *offset;
331-}
332
333 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
334 {
335diff --git a/mt7915/main.c b/mt7915/main.c
developer43a264f2024-03-26 14:09:54 +0800336index 49d5b459..103a0709 100644
developerf3f5d9b2023-02-07 15:24:34 +0800337--- a/mt7915/main.c
338+++ b/mt7915/main.c
developer43a264f2024-03-26 14:09:54 +0800339@@ -1402,22 +1402,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
developerf3f5d9b2023-02-07 15:24:34 +0800340 struct ieee80211_vif *vif,
341 u32 sset, u8 *data)
342 {
343- if (sset != ETH_SS_STATS)
344- return;
345-
developere35b8e42023-10-16 11:04:00 +0800346- memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
developerf3f5d9b2023-02-07 15:24:34 +0800347- data += sizeof(mt7915_gstrings_stats);
348- page_pool_ethtool_stats_get_strings(data);
349+ if (sset == ETH_SS_STATS)
developere35b8e42023-10-16 11:04:00 +0800350+ memcpy(data, mt7915_gstrings_stats,
developerf3f5d9b2023-02-07 15:24:34 +0800351+ sizeof(mt7915_gstrings_stats));
352 }
353
354 static
355 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
356 struct ieee80211_vif *vif, int sset)
357 {
358- if (sset != ETH_SS_STATS)
359- return 0;
360+ if (sset == ETH_SS_STATS)
361+ return MT7915_SSTATS_LEN;
362
363- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
364+ return 0;
365 }
366
367 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
developer43a264f2024-03-26 14:09:54 +0800368@@ -1445,7 +1442,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developere35b8e42023-10-16 11:04:00 +0800369 .idx = mvif->mt76.idx,
developerf3f5d9b2023-02-07 15:24:34 +0800370 };
developerf3f5d9b2023-02-07 15:24:34 +0800371 /* See mt7915_ampdu_stat_read_phy, etc */
372- int i, ei = 0, stats_size;
373+ int i, ei = 0;
374
375 mutex_lock(&dev->mt76.mutex);
376
developer43a264f2024-03-26 14:09:54 +0800377@@ -1557,12 +1554,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developerf3f5d9b2023-02-07 15:24:34 +0800378 return;
379
380 ei += wi.worker_stat_count;
381-
382- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
383-
384- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
385- if (ei != stats_size)
386- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
387+ if (ei != MT7915_SSTATS_LEN)
388+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
389+ ei, (int)MT7915_SSTATS_LEN);
390 }
391
392 static void
developerf3f5d9b2023-02-07 15:24:34 +0800393diff --git a/usb.c b/usb.c
developer43a264f2024-03-26 14:09:54 +0800394index dc690d1c..058f2d12 100644
developerf3f5d9b2023-02-07 15:24:34 +0800395--- a/usb.c
396+++ b/usb.c
397@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
398
399 static int
400 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
401- int nsgs)
402+ int nsgs, gfp_t gfp)
403 {
404 int i;
405
406 for (i = 0; i < nsgs; i++) {
407+ struct page *page;
408 void *data;
409 int offset;
410
411- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
412+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
413 if (!data)
414 break;
415
416- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
417- offset);
418+ page = virt_to_head_page(data);
419+ offset = data - page_address(page);
420+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
421 }
422
423 if (i < nsgs) {
424 int j;
425
426 for (j = nsgs; j < urb->num_sgs; j++)
427- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
428+ skb_free_frag(sg_virt(&urb->sg[j]));
429 urb->num_sgs = i;
430 }
431
432@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
433
434 static int
435 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
436- struct urb *urb, int nsgs)
437+ struct urb *urb, int nsgs, gfp_t gfp)
438 {
439 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
440- int offset;
441
442 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
443- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
444+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
445
446 urb->transfer_buffer_length = q->buf_size;
447- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
448+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
449
450 return urb->transfer_buffer ? 0 : -ENOMEM;
451 }
452@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
453 if (err)
454 return err;
455
456- return mt76u_refill_rx(dev, q, e->urb, sg_size);
457+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
458 }
459
460 static void mt76u_urb_free(struct urb *urb)
461@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
462 int i;
463
464 for (i = 0; i < urb->num_sgs; i++)
465- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
466+ skb_free_frag(sg_virt(&urb->sg[i]));
467
468 if (urb->transfer_buffer)
469- mt76_put_page_pool_buf(urb->transfer_buffer, false);
470+ skb_free_frag(urb->transfer_buffer);
471
472 usb_free_urb(urb);
473 }
474@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
475 len -= data_len;
476 nsgs++;
477 }
478-
479- skb_mark_for_recycle(skb);
480 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
481
482 return nsgs;
483@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
484
485 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
486 if (count > 0) {
487- err = mt76u_refill_rx(dev, q, urb, count);
488+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
489 if (err < 0)
490 break;
491 }
492@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
493 struct mt76_queue *q = &dev->q_rx[qid];
494 int i, err;
495
496- err = mt76_create_page_pool(dev, q);
497- if (err)
498- return err;
499-
500 spin_lock_init(&q->lock);
501 q->entry = devm_kcalloc(dev->dev,
502 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
503@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
504 static void
505 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
506 {
507+ struct page *page;
508 int i;
509
510 for (i = 0; i < q->ndesc; i++) {
developerf8871e82023-03-08 17:22:32 +0800511@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
developerf3f5d9b2023-02-07 15:24:34 +0800512 mt76u_urb_free(q->entry[i].urb);
513 q->entry[i].urb = NULL;
514 }
515- page_pool_destroy(q->page_pool);
developerf8871e82023-03-08 17:22:32 +0800516- q->page_pool = NULL;
developerf3f5d9b2023-02-07 15:24:34 +0800517+
518+ if (!q->rx_page.va)
519+ return;
520+
521+ page = virt_to_page(q->rx_page.va);
522+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
523+ memset(&q->rx_page, 0, sizeof(q->rx_page));
524 }
525
526 static void mt76u_free_rx(struct mt76_dev *dev)
developerebda9012024-02-22 13:42:45 +0800527diff --git a/wed.c b/wed.c
528index f89e4537..8eca4d81 100644
529--- a/wed.c
530+++ b/wed.c
531@@ -9,8 +9,12 @@
532 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
533 {
534 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
535+ u32 length;
536 int i;
537
538+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
539+ sizeof(struct skb_shared_info));
540+
541 for (i = 0; i < dev->rx_token_size; i++) {
542 struct mt76_txwi_cache *t;
543
544@@ -18,7 +22,9 @@ void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
545 if (!t || !t->ptr)
546 continue;
547
548- mt76_put_page_pool_buf(t->ptr, false);
549+ dma_unmap_single(dev->dma_dev, t->dma_addr,
550+ wed->wlan.rx_size, DMA_FROM_DEVICE);
551+ __free_pages(virt_to_page(t->ptr), get_order(length));
552 t->ptr = NULL;
553
554 mt76_put_rxwi(dev, t);
555@@ -33,33 +39,45 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
556 {
557 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
558 struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
559- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
560- int i, len = SKB_WITH_OVERHEAD(q->buf_size);
561- struct mt76_txwi_cache *t = NULL;
562+ u32 length;
563+ int i;
564+
565+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
566+ sizeof(struct skb_shared_info));
567
568 for (i = 0; i < size; i++) {
569- enum dma_data_direction dir;
570+ struct mt76_txwi_cache *t = mt76_get_rxwi(dev);
571 dma_addr_t addr;
572- u32 offset;
573+ struct page *page;
574 int token;
575- void *buf;
576+ void *ptr;
577
578- t = mt76_get_rxwi(dev);
579 if (!t)
580 goto unmap;
581
582- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
583- if (!buf)
584+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
585+ if (!page) {
586+ mt76_put_rxwi(dev, t);
587 goto unmap;
588+ }
589
590- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
591- dir = page_pool_get_dma_dir(q->page_pool);
592- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
593+ addr = dma_map_single(dev->dma_dev, ptr,
594+ wed->wlan.rx_size,
595+ DMA_TO_DEVICE);
596+
597+ if (unlikely(dma_mapping_error(dev->dev, addr))) {
598+ skb_free_frag(ptr);
599+ mt76_put_rxwi(dev, t);
600+ goto unmap;
601+ }
602
603 desc->buf0 = cpu_to_le32(addr);
604- token = mt76_rx_token_consume(dev, buf, t, addr);
605+ token = mt76_rx_token_consume(dev, ptr, t, addr);
606 if (token < 0) {
607- mt76_put_page_pool_buf(buf, false);
608+ dma_unmap_single(dev->dma_dev, addr,
609+ wed->wlan.rx_size, DMA_TO_DEVICE);
610+ __free_pages(page, get_order(length));
611+ mt76_put_rxwi(dev, t);
612 goto unmap;
613 }
614
615@@ -74,8 +92,6 @@ u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
616 return 0;
617
618 unmap:
619- if (t)
620- mt76_put_rxwi(dev, t);
621 mt76_wed_release_rx_buf(wed);
622
623 return -ENOMEM;
developerf3f5d9b2023-02-07 15:24:34 +0800624--
developere35b8e42023-10-16 11:04:00 +08006252.18.0
developerf3f5d9b2023-02-07 15:24:34 +0800626