blob: d2800ea02f4688fac6f986f38d0afc417cb8927f [file] [log] [blame]
developer7e2761e2023-10-12 08:11:13 +08001From cc82fae54b1efd7de07034bea7a883b9fb362799 Mon Sep 17 00:00:00 2001
developer064da3c2023-06-13 15:57:26 +08002From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 19:49:22 +0800
developer7e2761e2023-10-12 08:11:13 +08004Subject: [PATCH 64/98] wifi: mt76: revert page_poll for kernel 5.4
developerabdbf252023-02-06 16:02:21 +08005
developer064da3c2023-06-13 15:57:26 +08006This reverts commit e8c10835cf062c577ddf426913788c39d30b4bd7.
7
8Change-Id: I4e5764fc545087f691fb4c2f43e7a9cefd1e1657
developerabdbf252023-02-06 16:02:21 +08009---
developer7e2761e2023-10-12 08:11:13 +080010 dma.c | 75 +++++++++++++++++++++++++++------------------------
11 mac80211.c | 57 ---------------------------------------
12 mmio.c | 56 ++++++++++++++++++++++++--------------
developer064da3c2023-06-13 15:57:26 +080013 mt76.h | 22 +--------------
developer7e2761e2023-10-12 08:11:13 +080014 mt7915/main.c | 26 +++++++-----------
15 usb.c | 43 ++++++++++++++---------------
16 6 files changed, 109 insertions(+), 170 deletions(-)
developerabdbf252023-02-06 16:02:21 +080017
18diff --git a/dma.c b/dma.c
developer7e2761e2023-10-12 08:11:13 +080019index bb995e2..06b76ea 100644
developerabdbf252023-02-06 16:02:21 +080020--- a/dma.c
21+++ b/dma.c
developerc2cfe0f2023-09-22 04:11:09 +080022@@ -178,7 +178,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
developerabdbf252023-02-06 16:02:21 +080023 local_bh_disable();
developer7e2761e2023-10-12 08:11:13 +080024 while ((t = __mt76_get_rxwi(dev)) != NULL) {
25 if (t->ptr)
26- mt76_put_page_pool_buf(t->ptr, false);
27+ skb_free_frag(t->ptr);
28 kfree(t);
developerabdbf252023-02-06 16:02:21 +080029 }
30 local_bh_enable();
developer7e2761e2023-10-12 08:11:13 +080031@@ -445,9 +445,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
32 if (!t)
developerabdbf252023-02-06 16:02:21 +080033 return NULL;
34
developer7e2761e2023-10-12 08:11:13 +080035- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
developerabdbf252023-02-06 16:02:21 +080036- SKB_WITH_OVERHEAD(q->buf_size),
37- page_pool_get_dma_dir(q->page_pool));
developer7e2761e2023-10-12 08:11:13 +080038+ dma_unmap_single(dev->dma_dev, t->dma_addr,
developerabdbf252023-02-06 16:02:21 +080039+ SKB_WITH_OVERHEAD(q->buf_size),
40+ DMA_FROM_DEVICE);
41
developer7e2761e2023-10-12 08:11:13 +080042 buf = t->ptr;
43 t->dma_addr = 0;
44@@ -457,9 +457,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
45 if (drop)
46 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
developerabdbf252023-02-06 16:02:21 +080047 } else {
developerabdbf252023-02-06 16:02:21 +080048- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
49- SKB_WITH_OVERHEAD(q->buf_size),
50- page_pool_get_dma_dir(q->page_pool));
51+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
52+ SKB_WITH_OVERHEAD(q->buf_size),
53+ DMA_FROM_DEVICE);
54 }
55
developer7e2761e2023-10-12 08:11:13 +080056 done:
57@@ -626,11 +626,11 @@ free_skb:
developerabdbf252023-02-06 16:02:21 +080058 }
59
60 static int
61-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
62- bool allow_direct)
63+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
64 {
65 int len = SKB_WITH_OVERHEAD(q->buf_size);
66- int frames = 0;
67+ int frames = 0, offset = q->buf_offset;
68+ dma_addr_t addr;
69
70 if (!q->ndesc)
71 return 0;
developer7e2761e2023-10-12 08:11:13 +080072@@ -639,28 +639,29 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developerabdbf252023-02-06 16:02:21 +080073
74 while (q->queued < q->ndesc - 1) {
developer7e2761e2023-10-12 08:11:13 +080075 struct mt76_queue_buf qbuf = {};
developerabdbf252023-02-06 16:02:21 +080076- enum dma_data_direction dir;
developerabdbf252023-02-06 16:02:21 +080077- dma_addr_t addr;
78- int offset;
developer7e2761e2023-10-12 08:11:13 +080079 void *buf = NULL;
80
81 if (mt76_queue_is_wed_rro_ind(q))
82 goto done;
developerabdbf252023-02-06 16:02:21 +080083
84- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
85+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
86 if (!buf)
87 break;
88
89- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
90- dir = page_pool_get_dma_dir(q->page_pool);
91- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
92+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
93+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
94+ skb_free_frag(buf);
95+ break;
96+ }
97
98- qbuf.addr = addr + q->buf_offset;
developerabdbf252023-02-06 16:02:21 +080099+ qbuf.addr = addr + offset;
developer7e2761e2023-10-12 08:11:13 +0800100 done:
101- qbuf.len = len - q->buf_offset;
developerabdbf252023-02-06 16:02:21 +0800102+ qbuf.len = len - offset;
103 qbuf.skip_unmap = false;
104 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
105- mt76_put_page_pool_buf(buf, allow_direct);
106+ dma_unmap_single(dev->dma_dev, addr, len,
107+ DMA_FROM_DEVICE);
108+ skb_free_frag(buf);
109 break;
110 }
111 frames++;
developer7e2761e2023-10-12 08:11:13 +0800112@@ -704,7 +705,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developerabdbf252023-02-06 16:02:21 +0800113 /* WED txfree queue needs ring to be initialized before setup */
114 q->flags = 0;
115 mt76_dma_queue_reset(dev, q);
116- mt76_dma_rx_fill(dev, q, false);
117+ mt76_dma_rx_fill(dev, q);
developerabdbf252023-02-06 16:02:21 +0800118
developer7e2761e2023-10-12 08:11:13 +0800119 ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
120 if (!ret)
121@@ -733,7 +734,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
122 case MT76_WED_RRO_Q_IND:
123 q->flags &= ~MT_QFLAG_WED;
124 mt76_dma_queue_reset(dev, q);
125- mt76_dma_rx_fill(dev, q, false);
126+ mt76_dma_rx_fill(dev, q);
127 mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
128 break;
129 default:
130@@ -789,10 +790,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerabdbf252023-02-06 16:02:21 +0800131 if (!q->entry)
132 return -ENOMEM;
133
134- ret = mt76_create_page_pool(dev, q);
135- if (ret)
136- return ret;
137-
138 ret = mt76_dma_wed_setup(dev, q, false);
139 if (ret)
140 return ret;
developer7e2761e2023-10-12 08:11:13 +0800141@@ -811,6 +808,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerabdbf252023-02-06 16:02:21 +0800142 static void
143 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
144 {
145+ struct page *page;
146 void *buf;
147 bool more;
148
developer7e2761e2023-10-12 08:11:13 +0800149@@ -825,7 +823,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerabdbf252023-02-06 16:02:21 +0800150 break;
151
developer7e2761e2023-10-12 08:11:13 +0800152 if (!mt76_queue_is_wed_rro(q))
153- mt76_put_page_pool_buf(buf, false);
154+ skb_free_frag(buf);
developerabdbf252023-02-06 16:02:21 +0800155 } while (1);
156
157 if (q->rx_head) {
developer7e2761e2023-10-12 08:11:13 +0800158@@ -834,6 +832,16 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerabdbf252023-02-06 16:02:21 +0800159 }
160
161 spin_unlock_bh(&q->lock);
162+
developer7e2761e2023-10-12 08:11:13 +0800163+ if (mt76_queue_is_wed_rx(q))
developer064da3c2023-06-13 15:57:26 +0800164+ return;
165+
developerabdbf252023-02-06 16:02:21 +0800166+ if (!q->rx_page.va)
167+ return;
168+
169+ page = virt_to_page(q->rx_page.va);
170+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
171+ memset(&q->rx_page, 0, sizeof(q->rx_page));
172 }
173
174 static void
developer7e2761e2023-10-12 08:11:13 +0800175@@ -857,7 +865,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developerabdbf252023-02-06 16:02:21 +0800176 mt76_dma_wed_setup(dev, q, true);
developer7e2761e2023-10-12 08:11:13 +0800177 if (!mt76_queue_is_wed_tx_free(q)) {
developerabdbf252023-02-06 16:02:21 +0800178 mt76_dma_sync_idx(dev, q);
179- mt76_dma_rx_fill(dev, q, false);
180+ mt76_dma_rx_fill(dev, q);
181 }
182 }
183
developer7e2761e2023-10-12 08:11:13 +0800184@@ -875,7 +883,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
developerabdbf252023-02-06 16:02:21 +0800185
186 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
187 } else {
188- mt76_put_page_pool_buf(data, true);
189+ skb_free_frag(data);
190 }
191
192 if (more)
developer7e2761e2023-10-12 08:11:13 +0800193@@ -948,7 +956,6 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developerabdbf252023-02-06 16:02:21 +0800194 goto free_frag;
195
196 skb_reserve(skb, q->buf_offset);
197- skb_mark_for_recycle(skb);
198
199 *(u32 *)skb->cb = info;
200
developer7e2761e2023-10-12 08:11:13 +0800201@@ -964,10 +971,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developerabdbf252023-02-06 16:02:21 +0800202 continue;
203
204 free_frag:
205- mt76_put_page_pool_buf(data, true);
206+ skb_free_frag(data);
207 }
208
209- mt76_dma_rx_fill(dev, q, true);
210+ mt76_dma_rx_fill(dev, q);
211 return done;
212 }
213
developer7e2761e2023-10-12 08:11:13 +0800214@@ -1012,7 +1019,7 @@ mt76_dma_init(struct mt76_dev *dev,
developerabdbf252023-02-06 16:02:21 +0800215
216 mt76_for_each_q_rx(dev, i) {
217 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
218- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
219+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
220 napi_enable(&dev->napi[i]);
221 }
222
developer7e2761e2023-10-12 08:11:13 +0800223@@ -1067,8 +1074,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developerabdbf252023-02-06 16:02:21 +0800224
225 netif_napi_del(&dev->napi[i]);
226 mt76_dma_rx_cleanup(dev, q);
227-
228- page_pool_destroy(q->page_pool);
229 }
230
developer064da3c2023-06-13 15:57:26 +0800231 if (mtk_wed_device_active(&dev->mmio.wed))
developerabdbf252023-02-06 16:02:21 +0800232diff --git a/mac80211.c b/mac80211.c
developer7e2761e2023-10-12 08:11:13 +0800233index 9c582cb..3715c73 100644
developerabdbf252023-02-06 16:02:21 +0800234--- a/mac80211.c
235+++ b/mac80211.c
236@@ -4,7 +4,6 @@
237 */
238 #include <linux/sched.h>
239 #include <linux/of.h>
240-#include <net/page_pool.h>
241 #include "mt76.h"
242
developer064da3c2023-06-13 15:57:26 +0800243 static const struct ieee80211_channel mt76_channels_2ghz[] = {
developerc2cfe0f2023-09-22 04:11:09 +0800244@@ -546,47 +545,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
developerabdbf252023-02-06 16:02:21 +0800245 }
246 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
247
248-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
249-{
250- struct page_pool_params pp_params = {
251- .order = 0,
252- .flags = PP_FLAG_PAGE_FRAG,
253- .nid = NUMA_NO_NODE,
254- .dev = dev->dma_dev,
255- };
256- int idx = q - dev->q_rx;
257-
258- switch (idx) {
259- case MT_RXQ_MAIN:
260- case MT_RXQ_BAND1:
261- case MT_RXQ_BAND2:
262- pp_params.pool_size = 256;
263- break;
264- default:
265- pp_params.pool_size = 16;
266- break;
267- }
268-
269- if (mt76_is_mmio(dev)) {
270- /* rely on page_pool for DMA mapping */
271- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
272- pp_params.dma_dir = DMA_FROM_DEVICE;
273- pp_params.max_len = PAGE_SIZE;
274- pp_params.offset = 0;
275- }
276-
277- q->page_pool = page_pool_create(&pp_params);
278- if (IS_ERR(q->page_pool)) {
279- int err = PTR_ERR(q->page_pool);
280-
281- q->page_pool = NULL;
282- return err;
283- }
284-
285- return 0;
286-}
287-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
288-
289 struct mt76_dev *
290 mt76_alloc_device(struct device *pdev, unsigned int size,
291 const struct ieee80211_ops *ops,
developer7e2761e2023-10-12 08:11:13 +0800292@@ -1786,21 +1744,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
developerabdbf252023-02-06 16:02:21 +0800293 }
294 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
295
296-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
297-{
298-#ifdef CONFIG_PAGE_POOL_STATS
299- struct page_pool_stats stats = {};
300- int i;
301-
302- mt76_for_each_q_rx(dev, i)
303- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
304-
305- page_pool_ethtool_stats_get(data, &stats);
306- *index += page_pool_ethtool_stats_get_count();
307-#endif
308-}
309-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
310-
311 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
312 {
313 struct ieee80211_hw *hw = phy->hw;
developer7e2761e2023-10-12 08:11:13 +0800314diff --git a/mmio.c b/mmio.c
315index c346249..5fb8392 100644
316--- a/mmio.c
317+++ b/mmio.c
318@@ -89,8 +89,12 @@ EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
319 void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
320 {
321 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
322+ u32 length;
323 int i;
324
325+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
326+ sizeof(struct skb_shared_info));
327+
328 for (i = 0; i < dev->rx_token_size; i++) {
329 struct mt76_txwi_cache *t;
330
331@@ -98,7 +102,9 @@ void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
332 if (!t || !t->ptr)
333 continue;
334
335- mt76_put_page_pool_buf(t->ptr, false);
336+ dma_unmap_single(dev->dma_dev, t->dma_addr,
337+ wed->wlan.rx_size, DMA_FROM_DEVICE);
338+ __free_pages(virt_to_page(t->ptr), get_order(length));
339 t->ptr = NULL;
340
341 mt76_put_rxwi(dev, t);
342@@ -112,33 +118,45 @@ u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
343 {
344 struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
345 struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
346- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
347- int i, len = SKB_WITH_OVERHEAD(q->buf_size);
348- struct mt76_txwi_cache *t = NULL;
349+ u32 length;
350+ int i;
351+
352+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
353+ sizeof(struct skb_shared_info));
354
355 for (i = 0; i < size; i++) {
356- enum dma_data_direction dir;
357- dma_addr_t addr;
358- u32 offset;
359+ struct mt76_txwi_cache *t = mt76_get_rxwi(dev);
360+ dma_addr_t phy_addr;
361+ struct page *page;
362 int token;
363- void *buf;
364+ void *ptr;
365
366- t = mt76_get_rxwi(dev);
367 if (!t)
368 goto unmap;
369
370- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
371- if (!buf)
372- goto unmap;
373+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
374+ if (!page) {
375+ mt76_put_rxwi(dev, t);
376+ goto unmap;
377+ }
378
379- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
380- dir = page_pool_get_dma_dir(q->page_pool);
381- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
382+ phy_addr = dma_map_single(dev->dma_dev, ptr,
383+ wed->wlan.rx_size,
384+ DMA_TO_DEVICE);
385
386- desc->buf0 = cpu_to_le32(addr);
387- token = mt76_rx_token_consume(dev, buf, t, addr);
388+ if (unlikely(dma_mapping_error(dev->dev, phy_addr))) {
389+ skb_free_frag(ptr);
390+ mt76_put_rxwi(dev, t);
391+ goto unmap;
392+ }
393+
394+ desc->buf0 = cpu_to_le32(phy_addr);
395+ token = mt76_rx_token_consume(dev, ptr, t, phy_addr);
396 if (token < 0) {
397- mt76_put_page_pool_buf(buf, false);
398+ dma_unmap_single(dev->dma_dev, phy_addr,
399+ wed->wlan.rx_size, DMA_TO_DEVICE);
400+ __free_pages(page, get_order(length));
401+ mt76_put_rxwi(dev, t);
402 goto unmap;
403 }
404
405@@ -150,8 +168,6 @@ u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
406 return 0;
407
408 unmap:
409- if (t)
410- mt76_put_rxwi(dev, t);
411 mt76_mmio_wed_release_rx_buf(wed);
412
413 return -ENOMEM;
developerabdbf252023-02-06 16:02:21 +0800414diff --git a/mt76.h b/mt76.h
developer7e2761e2023-10-12 08:11:13 +0800415index d59a1f5..3af97e5 100644
developerabdbf252023-02-06 16:02:21 +0800416--- a/mt76.h
417+++ b/mt76.h
developer7e2761e2023-10-12 08:11:13 +0800418@@ -245,7 +245,7 @@ struct mt76_queue {
developerabdbf252023-02-06 16:02:21 +0800419
420 dma_addr_t desc_dma;
421 struct sk_buff *rx_head;
422- struct page_pool *page_pool;
423+ struct page_frag_cache rx_page;
424 };
425
426 struct mt76_mcu_ops {
developer7e2761e2023-10-12 08:11:13 +0800427@@ -1566,7 +1566,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
developerabdbf252023-02-06 16:02:21 +0800428 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
429 }
430
431-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
432 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
433 struct mt76_sta_stats *stats, bool eht);
434 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
developer7e2761e2023-10-12 08:11:13 +0800435@@ -1707,25 +1706,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
436 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
developerabdbf252023-02-06 16:02:21 +0800437 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
developer7e2761e2023-10-12 08:11:13 +0800438 struct mt76_txwi_cache *r, dma_addr_t phys);
developerabdbf252023-02-06 16:02:21 +0800439-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
440-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
441-{
442- struct page *page = virt_to_head_page(buf);
443-
444- page_pool_put_full_page(page->pp, page, allow_direct);
445-}
446-
447-static inline void *
448-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
449-{
450- struct page *page;
451-
452- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
453- if (!page)
454- return NULL;
455-
456- return page_address(page) + *offset;
457-}
458
459 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
460 {
461diff --git a/mt7915/main.c b/mt7915/main.c
developer7e2761e2023-10-12 08:11:13 +0800462index ba34c8e..4c80473 100644
developerabdbf252023-02-06 16:02:21 +0800463--- a/mt7915/main.c
464+++ b/mt7915/main.c
developerc2cfe0f2023-09-22 04:11:09 +0800465@@ -1397,22 +1397,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
developerabdbf252023-02-06 16:02:21 +0800466 struct ieee80211_vif *vif,
467 u32 sset, u8 *data)
468 {
469- if (sset != ETH_SS_STATS)
470- return;
471-
developerc2cfe0f2023-09-22 04:11:09 +0800472- memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
developerabdbf252023-02-06 16:02:21 +0800473- data += sizeof(mt7915_gstrings_stats);
474- page_pool_ethtool_stats_get_strings(data);
475+ if (sset == ETH_SS_STATS)
developerc2cfe0f2023-09-22 04:11:09 +0800476+ memcpy(data, mt7915_gstrings_stats,
developerabdbf252023-02-06 16:02:21 +0800477+ sizeof(mt7915_gstrings_stats));
478 }
479
480 static
481 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
482 struct ieee80211_vif *vif, int sset)
483 {
484- if (sset != ETH_SS_STATS)
485- return 0;
486+ if (sset == ETH_SS_STATS)
487+ return MT7915_SSTATS_LEN;
488
489- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
490+ return 0;
491 }
492
493 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
developerc2cfe0f2023-09-22 04:11:09 +0800494@@ -1440,7 +1437,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
495 .idx = mvif->mt76.idx,
developerabdbf252023-02-06 16:02:21 +0800496 };
developerabdbf252023-02-06 16:02:21 +0800497 /* See mt7915_ampdu_stat_read_phy, etc */
498- int i, ei = 0, stats_size;
499+ int i, ei = 0;
500
501 mutex_lock(&dev->mt76.mutex);
502
developerc2cfe0f2023-09-22 04:11:09 +0800503@@ -1552,12 +1549,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developerabdbf252023-02-06 16:02:21 +0800504 return;
505
506 ei += wi.worker_stat_count;
507-
508- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
509-
510- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
511- if (ei != stats_size)
512- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
513+ if (ei != MT7915_SSTATS_LEN)
514+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
515+ ei, (int)MT7915_SSTATS_LEN);
516 }
517
518 static void
developerabdbf252023-02-06 16:02:21 +0800519diff --git a/usb.c b/usb.c
developer7e2761e2023-10-12 08:11:13 +0800520index 5e5c7bf..3e28171 100644
developerabdbf252023-02-06 16:02:21 +0800521--- a/usb.c
522+++ b/usb.c
523@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
524
525 static int
526 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
527- int nsgs)
528+ int nsgs, gfp_t gfp)
529 {
530 int i;
531
532 for (i = 0; i < nsgs; i++) {
533+ struct page *page;
534 void *data;
535 int offset;
536
537- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
538+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
539 if (!data)
540 break;
541
542- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
543- offset);
544+ page = virt_to_head_page(data);
545+ offset = data - page_address(page);
546+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
547 }
548
549 if (i < nsgs) {
550 int j;
551
552 for (j = nsgs; j < urb->num_sgs; j++)
553- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
554+ skb_free_frag(sg_virt(&urb->sg[j]));
555 urb->num_sgs = i;
556 }
557
558@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
559
560 static int
561 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
562- struct urb *urb, int nsgs)
563+ struct urb *urb, int nsgs, gfp_t gfp)
564 {
565 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
566- int offset;
567
568 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
569- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
570+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
571
572 urb->transfer_buffer_length = q->buf_size;
573- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
574+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
575
576 return urb->transfer_buffer ? 0 : -ENOMEM;
577 }
578@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
579 if (err)
580 return err;
581
582- return mt76u_refill_rx(dev, q, e->urb, sg_size);
583+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
584 }
585
586 static void mt76u_urb_free(struct urb *urb)
587@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
588 int i;
589
590 for (i = 0; i < urb->num_sgs; i++)
591- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
592+ skb_free_frag(sg_virt(&urb->sg[i]));
593
594 if (urb->transfer_buffer)
595- mt76_put_page_pool_buf(urb->transfer_buffer, false);
596+ skb_free_frag(urb->transfer_buffer);
597
598 usb_free_urb(urb);
599 }
600@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
601 len -= data_len;
602 nsgs++;
603 }
604-
605- skb_mark_for_recycle(skb);
606 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
607
608 return nsgs;
609@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
610
611 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
612 if (count > 0) {
613- err = mt76u_refill_rx(dev, q, urb, count);
614+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
615 if (err < 0)
616 break;
617 }
618@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
619 struct mt76_queue *q = &dev->q_rx[qid];
620 int i, err;
621
622- err = mt76_create_page_pool(dev, q);
623- if (err)
624- return err;
625-
626 spin_lock_init(&q->lock);
627 q->entry = devm_kcalloc(dev->dev,
628 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
629@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
630 static void
631 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
632 {
633+ struct page *page;
634 int i;
635
636 for (i = 0; i < q->ndesc; i++) {
developer483388c2023-03-08 13:52:15 +0800637@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
developerabdbf252023-02-06 16:02:21 +0800638 mt76u_urb_free(q->entry[i].urb);
639 q->entry[i].urb = NULL;
640 }
641- page_pool_destroy(q->page_pool);
developer483388c2023-03-08 13:52:15 +0800642- q->page_pool = NULL;
developerabdbf252023-02-06 16:02:21 +0800643+
644+ if (!q->rx_page.va)
645+ return;
646+
647+ page = virt_to_page(q->rx_page.va);
648+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
649+ memset(&q->rx_page, 0, sizeof(q->rx_page));
650 }
651
652 static void mt76u_free_rx(struct mt76_dev *dev)
653--
developer7e2761e2023-10-12 08:11:13 +08006542.18.0
developerabdbf252023-02-06 16:02:21 +0800655