blob: 58605d379f6d5746c9a0aa3b8f459300b25737e4 [file] [log] [blame]
developer064da3c2023-06-13 15:57:26 +08001From 15b04fc966aa8f30492726132a6b81466b187581 Mon Sep 17 00:00:00 2001
2From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 19:49:22 +0800
4Subject: [PATCH 2001/2008] wifi: mt76: revert page_poll for kernel 5.4
developerabdbf252023-02-06 16:02:21 +08005
developer064da3c2023-06-13 15:57:26 +08006This reverts commit e8c10835cf062c577ddf426913788c39d30b4bd7.
7
8Change-Id: I4e5764fc545087f691fb4c2f43e7a9cefd1e1657
developerabdbf252023-02-06 16:02:21 +08009---
developer064da3c2023-06-13 15:57:26 +080010 dma.c | 78 +++++++++++++++++++++++++++++----------------------
11 mac80211.c | 57 -------------------------------------
12 mt76.h | 22 +--------------
13 mt7915/main.c | 26 +++++++----------
14 mt7915/mmio.c | 55 ++++++++++++++++++++++--------------
15 mt7921/main.c | 31 +++-----------------
16 usb.c | 43 ++++++++++++++--------------
17 7 files changed, 115 insertions(+), 197 deletions(-)
developerabdbf252023-02-06 16:02:21 +080018
19diff --git a/dma.c b/dma.c
developer064da3c2023-06-13 15:57:26 +080020index 35db73b9..7153be47 100644
developerabdbf252023-02-06 16:02:21 +080021--- a/dma.c
22+++ b/dma.c
23@@ -173,7 +173,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
24 local_bh_disable();
developer064da3c2023-06-13 15:57:26 +080025 while ((r = __mt76_get_rxwi(dev)) != NULL) {
26 if (r->ptr)
27- mt76_put_page_pool_buf(r->ptr, false);
28+ skb_free_frag(r->ptr);
29 kfree(r);
developerabdbf252023-02-06 16:02:21 +080030 }
31 local_bh_enable();
32@@ -409,9 +409,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer064da3c2023-06-13 15:57:26 +080033 if (!r)
developerabdbf252023-02-06 16:02:21 +080034 return NULL;
35
developer064da3c2023-06-13 15:57:26 +080036- dma_sync_single_for_cpu(dev->dma_dev, r->dma_addr,
developerabdbf252023-02-06 16:02:21 +080037- SKB_WITH_OVERHEAD(q->buf_size),
38- page_pool_get_dma_dir(q->page_pool));
developer064da3c2023-06-13 15:57:26 +080039+ dma_unmap_single(dev->dma_dev, r->dma_addr,
developerabdbf252023-02-06 16:02:21 +080040+ SKB_WITH_OVERHEAD(q->buf_size),
41+ DMA_FROM_DEVICE);
42
developer064da3c2023-06-13 15:57:26 +080043 buf = r->ptr;
44 r->dma_addr = 0;
developer483388c2023-03-08 13:52:15 +080045@@ -430,9 +430,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developerabdbf252023-02-06 16:02:21 +080046 } else {
47 buf = e->buf;
48 e->buf = NULL;
49- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
50- SKB_WITH_OVERHEAD(q->buf_size),
51- page_pool_get_dma_dir(q->page_pool));
52+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
53+ SKB_WITH_OVERHEAD(q->buf_size),
54+ DMA_FROM_DEVICE);
55 }
56
57 return buf;
developer064da3c2023-06-13 15:57:26 +080058@@ -592,11 +592,11 @@ free_skb:
developerabdbf252023-02-06 16:02:21 +080059 }
60
61 static int
62-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
63- bool allow_direct)
64+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
65 {
66 int len = SKB_WITH_OVERHEAD(q->buf_size);
67- int frames = 0;
68+ int frames = 0, offset = q->buf_offset;
69+ dma_addr_t addr;
70
71 if (!q->ndesc)
72 return 0;
developer064da3c2023-06-13 15:57:26 +080073@@ -604,25 +604,26 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developerabdbf252023-02-06 16:02:21 +080074 spin_lock_bh(&q->lock);
75
76 while (q->queued < q->ndesc - 1) {
77- enum dma_data_direction dir;
78 struct mt76_queue_buf qbuf;
79- dma_addr_t addr;
80- int offset;
developer064da3c2023-06-13 15:57:26 +080081 void *buf;
developerabdbf252023-02-06 16:02:21 +080082
83- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
84+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
85 if (!buf)
86 break;
87
88- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
89- dir = page_pool_get_dma_dir(q->page_pool);
90- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
91+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
92+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
93+ skb_free_frag(buf);
94+ break;
95+ }
96
97- qbuf.addr = addr + q->buf_offset;
98- qbuf.len = len - q->buf_offset;
99+ qbuf.addr = addr + offset;
100+ qbuf.len = len - offset;
101 qbuf.skip_unmap = false;
102 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
103- mt76_put_page_pool_buf(buf, allow_direct);
104+ dma_unmap_single(dev->dma_dev, addr, len,
105+ DMA_FROM_DEVICE);
106+ skb_free_frag(buf);
107 break;
108 }
109 frames++;
developer064da3c2023-06-13 15:57:26 +0800110@@ -666,7 +667,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developerabdbf252023-02-06 16:02:21 +0800111 /* WED txfree queue needs ring to be initialized before setup */
112 q->flags = 0;
113 mt76_dma_queue_reset(dev, q);
114- mt76_dma_rx_fill(dev, q, false);
115+ mt76_dma_rx_fill(dev, q);
116 q->flags = flags;
117
118 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
developer064da3c2023-06-13 15:57:26 +0800119@@ -714,10 +715,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerabdbf252023-02-06 16:02:21 +0800120 if (!q->entry)
121 return -ENOMEM;
122
123- ret = mt76_create_page_pool(dev, q);
124- if (ret)
125- return ret;
126-
127 ret = mt76_dma_wed_setup(dev, q, false);
128 if (ret)
129 return ret;
developer064da3c2023-06-13 15:57:26 +0800130@@ -731,6 +728,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerabdbf252023-02-06 16:02:21 +0800131 static void
132 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
133 {
134+ struct page *page;
135 void *buf;
136 bool more;
137
developer064da3c2023-06-13 15:57:26 +0800138@@ -744,7 +742,10 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerabdbf252023-02-06 16:02:21 +0800139 if (!buf)
140 break;
141
142- mt76_put_page_pool_buf(buf, false);
developer064da3c2023-06-13 15:57:26 +0800143+ if (q->flags & MT_QFLAG_RRO)
144+ continue;
145+
developerabdbf252023-02-06 16:02:21 +0800146+ skb_free_frag(buf);
147 } while (1);
148
149 if (q->rx_head) {
developer064da3c2023-06-13 15:57:26 +0800150@@ -753,6 +754,18 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerabdbf252023-02-06 16:02:21 +0800151 }
152
153 spin_unlock_bh(&q->lock);
154+
developer064da3c2023-06-13 15:57:26 +0800155+ if (((q->flags & MT_QFLAG_WED) &&
156+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) ||
157+ (q->flags & MT_QFLAG_RRO))
158+ return;
159+
developerabdbf252023-02-06 16:02:21 +0800160+ if (!q->rx_page.va)
161+ return;
162+
163+ page = virt_to_page(q->rx_page.va);
164+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
165+ memset(&q->rx_page, 0, sizeof(q->rx_page));
166 }
167
168 static void
developer064da3c2023-06-13 15:57:26 +0800169@@ -773,7 +786,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developerabdbf252023-02-06 16:02:21 +0800170 mt76_dma_wed_setup(dev, q, true);
171 if (q->flags != MT_WED_Q_TXFREE) {
172 mt76_dma_sync_idx(dev, q);
173- mt76_dma_rx_fill(dev, q, false);
174+ mt76_dma_rx_fill(dev, q);
175 }
176 }
177
developer064da3c2023-06-13 15:57:26 +0800178@@ -791,7 +804,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
developerabdbf252023-02-06 16:02:21 +0800179
180 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
181 } else {
182- mt76_put_page_pool_buf(data, true);
183+ skb_free_frag(data);
184 }
185
186 if (more)
developer064da3c2023-06-13 15:57:26 +0800187@@ -864,7 +877,6 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developerabdbf252023-02-06 16:02:21 +0800188 goto free_frag;
189
190 skb_reserve(skb, q->buf_offset);
191- skb_mark_for_recycle(skb);
192
193 *(u32 *)skb->cb = info;
194
developer064da3c2023-06-13 15:57:26 +0800195@@ -880,10 +892,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developerabdbf252023-02-06 16:02:21 +0800196 continue;
197
198 free_frag:
199- mt76_put_page_pool_buf(data, true);
200+ skb_free_frag(data);
201 }
202
203- mt76_dma_rx_fill(dev, q, true);
204+ mt76_dma_rx_fill(dev, q);
205 return done;
206 }
207
developer064da3c2023-06-13 15:57:26 +0800208@@ -928,7 +940,7 @@ mt76_dma_init(struct mt76_dev *dev,
developerabdbf252023-02-06 16:02:21 +0800209
210 mt76_for_each_q_rx(dev, i) {
211 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
212- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
213+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
214 napi_enable(&dev->napi[i]);
215 }
216
developer064da3c2023-06-13 15:57:26 +0800217@@ -982,8 +994,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developerabdbf252023-02-06 16:02:21 +0800218
219 netif_napi_del(&dev->napi[i]);
220 mt76_dma_rx_cleanup(dev, q);
221-
222- page_pool_destroy(q->page_pool);
223 }
224
developer064da3c2023-06-13 15:57:26 +0800225 if (mtk_wed_device_active(&dev->mmio.wed))
developerabdbf252023-02-06 16:02:21 +0800226diff --git a/mac80211.c b/mac80211.c
developer064da3c2023-06-13 15:57:26 +0800227index 5a203d31..f7578308 100644
developerabdbf252023-02-06 16:02:21 +0800228--- a/mac80211.c
229+++ b/mac80211.c
230@@ -4,7 +4,6 @@
231 */
232 #include <linux/sched.h>
233 #include <linux/of.h>
234-#include <net/page_pool.h>
235 #include "mt76.h"
236
developer064da3c2023-06-13 15:57:26 +0800237 static const struct ieee80211_channel mt76_channels_2ghz[] = {
238@@ -542,47 +541,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
developerabdbf252023-02-06 16:02:21 +0800239 }
240 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
241
242-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
243-{
244- struct page_pool_params pp_params = {
245- .order = 0,
246- .flags = PP_FLAG_PAGE_FRAG,
247- .nid = NUMA_NO_NODE,
248- .dev = dev->dma_dev,
249- };
250- int idx = q - dev->q_rx;
251-
252- switch (idx) {
253- case MT_RXQ_MAIN:
254- case MT_RXQ_BAND1:
255- case MT_RXQ_BAND2:
256- pp_params.pool_size = 256;
257- break;
258- default:
259- pp_params.pool_size = 16;
260- break;
261- }
262-
263- if (mt76_is_mmio(dev)) {
264- /* rely on page_pool for DMA mapping */
265- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
266- pp_params.dma_dir = DMA_FROM_DEVICE;
267- pp_params.max_len = PAGE_SIZE;
268- pp_params.offset = 0;
269- }
270-
271- q->page_pool = page_pool_create(&pp_params);
272- if (IS_ERR(q->page_pool)) {
273- int err = PTR_ERR(q->page_pool);
274-
275- q->page_pool = NULL;
276- return err;
277- }
278-
279- return 0;
280-}
281-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
282-
283 struct mt76_dev *
284 mt76_alloc_device(struct device *pdev, unsigned int size,
285 const struct ieee80211_ops *ops,
developer064da3c2023-06-13 15:57:26 +0800286@@ -1728,21 +1686,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
developerabdbf252023-02-06 16:02:21 +0800287 }
288 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
289
290-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
291-{
292-#ifdef CONFIG_PAGE_POOL_STATS
293- struct page_pool_stats stats = {};
294- int i;
295-
296- mt76_for_each_q_rx(dev, i)
297- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
298-
299- page_pool_ethtool_stats_get(data, &stats);
300- *index += page_pool_ethtool_stats_get_count();
301-#endif
302-}
303-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
304-
305 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
306 {
307 struct ieee80211_hw *hw = phy->hw;
308diff --git a/mt76.h b/mt76.h
developer064da3c2023-06-13 15:57:26 +0800309index 72c3eb8f..a0c20d36 100644
developerabdbf252023-02-06 16:02:21 +0800310--- a/mt76.h
311+++ b/mt76.h
developer064da3c2023-06-13 15:57:26 +0800312@@ -224,7 +224,7 @@ struct mt76_queue {
developerabdbf252023-02-06 16:02:21 +0800313
314 dma_addr_t desc_dma;
315 struct sk_buff *rx_head;
316- struct page_pool *page_pool;
317+ struct page_frag_cache rx_page;
318 };
319
320 struct mt76_mcu_ops {
developer064da3c2023-06-13 15:57:26 +0800321@@ -1410,7 +1410,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
developerabdbf252023-02-06 16:02:21 +0800322 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
323 }
324
325-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
326 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
327 struct mt76_sta_stats *stats, bool eht);
328 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
developer064da3c2023-06-13 15:57:26 +0800329@@ -1523,25 +1522,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
330 struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
developerabdbf252023-02-06 16:02:21 +0800331 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
developer064da3c2023-06-13 15:57:26 +0800332 struct mt76_rxwi_cache *r, dma_addr_t phys);
developerabdbf252023-02-06 16:02:21 +0800333-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
334-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
335-{
336- struct page *page = virt_to_head_page(buf);
337-
338- page_pool_put_full_page(page->pp, page, allow_direct);
339-}
340-
341-static inline void *
342-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
343-{
344- struct page *page;
345-
346- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
347- if (!page)
348- return NULL;
349-
350- return page_address(page) + *offset;
351-}
352
353 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
354 {
355diff --git a/mt7915/main.c b/mt7915/main.c
developerde9ecce2023-05-22 11:17:16 +0800356index 8ce7b1c5..fbff908f 100644
developerabdbf252023-02-06 16:02:21 +0800357--- a/mt7915/main.c
358+++ b/mt7915/main.c
developerde9ecce2023-05-22 11:17:16 +0800359@@ -1289,22 +1289,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
developerabdbf252023-02-06 16:02:21 +0800360 struct ieee80211_vif *vif,
361 u32 sset, u8 *data)
362 {
363- if (sset != ETH_SS_STATS)
364- return;
365-
366- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
367- data += sizeof(mt7915_gstrings_stats);
368- page_pool_ethtool_stats_get_strings(data);
369+ if (sset == ETH_SS_STATS)
370+ memcpy(data, *mt7915_gstrings_stats,
371+ sizeof(mt7915_gstrings_stats));
372 }
373
374 static
375 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
376 struct ieee80211_vif *vif, int sset)
377 {
378- if (sset != ETH_SS_STATS)
379- return 0;
380+ if (sset == ETH_SS_STATS)
381+ return MT7915_SSTATS_LEN;
382
383- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
384+ return 0;
385 }
386
387 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
developerde9ecce2023-05-22 11:17:16 +0800388@@ -1332,7 +1329,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developerabdbf252023-02-06 16:02:21 +0800389 };
390 struct mib_stats *mib = &phy->mib;
391 /* See mt7915_ampdu_stat_read_phy, etc */
392- int i, ei = 0, stats_size;
393+ int i, ei = 0;
394
395 mutex_lock(&dev->mt76.mutex);
396
developerde9ecce2023-05-22 11:17:16 +0800397@@ -1413,12 +1410,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developerabdbf252023-02-06 16:02:21 +0800398 return;
399
400 ei += wi.worker_stat_count;
401-
402- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
403-
404- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
405- if (ei != stats_size)
406- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
407+ if (ei != MT7915_SSTATS_LEN)
408+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
409+ ei, (int)MT7915_SSTATS_LEN);
410 }
411
412 static void
413diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer064da3c2023-06-13 15:57:26 +0800414index 46256842..8ff2c70c 100644
developerabdbf252023-02-06 16:02:21 +0800415--- a/mt7915/mmio.c
416+++ b/mt7915/mmio.c
417@@ -596,9 +596,13 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
418 static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
419 {
420 struct mt7915_dev *dev;
421+ u32 length;
422 int i;
423
424 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
425+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
426+ sizeof(struct skb_shared_info));
427+
428 for (i = 0; i < dev->mt76.rx_token_size; i++) {
developer064da3c2023-06-13 15:57:26 +0800429 struct mt76_rxwi_cache *r;
developerabdbf252023-02-06 16:02:21 +0800430
431@@ -606,7 +610,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
developer064da3c2023-06-13 15:57:26 +0800432 if (!r || !r->ptr)
developerabdbf252023-02-06 16:02:21 +0800433 continue;
434
developer064da3c2023-06-13 15:57:26 +0800435- mt76_put_page_pool_buf(r->ptr, false);
436+ dma_unmap_single(dev->mt76.dma_dev, r->dma_addr,
developerabdbf252023-02-06 16:02:21 +0800437+ wed->wlan.rx_size, DMA_FROM_DEVICE);
developer064da3c2023-06-13 15:57:26 +0800438+ __free_pages(virt_to_page(r->ptr), get_order(length));
439 r->ptr = NULL;
developerabdbf252023-02-06 16:02:21 +0800440
developer064da3c2023-06-13 15:57:26 +0800441 mt76_put_rxwi(&dev->mt76, r);
442@@ -630,38 +636,47 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
developerabdbf252023-02-06 16:02:21 +0800443 static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
444 {
445 struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
446- struct mt76_txwi_cache *t = NULL;
447 struct mt7915_dev *dev;
448- struct mt76_queue *q;
449- int i, len;
450+ u32 length;
451+ int i;
452
453 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
454- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
455- len = SKB_WITH_OVERHEAD(q->buf_size);
456+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
457+ sizeof(struct skb_shared_info));
458
459 for (i = 0; i < size; i++) {
460- enum dma_data_direction dir;
461- dma_addr_t addr;
462- u32 offset;
developer064da3c2023-06-13 15:57:26 +0800463+ struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76);
developerabdbf252023-02-06 16:02:21 +0800464+ dma_addr_t phy_addr;
465+ struct page *page;
466 int token;
467- void *buf;
468+ void *ptr;
469
470- t = mt76_get_rxwi(&dev->mt76);
471 if (!t)
472 goto unmap;
473
474- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
475- if (!buf)
476+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
477+ if (!page) {
developer064da3c2023-06-13 15:57:26 +0800478+ mt76_put_rxwi(&dev->mt76, r);
developerabdbf252023-02-06 16:02:21 +0800479 goto unmap;
480+ }
481
482- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
483- dir = page_pool_get_dma_dir(q->page_pool);
484- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
485+ ptr = page_address(page);
486+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
487+ wed->wlan.rx_size,
488+ DMA_TO_DEVICE);
489+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
490+ __free_pages(page, get_order(length));
developer064da3c2023-06-13 15:57:26 +0800491+ mt76_put_rxwi(&dev->mt76, r);
developerabdbf252023-02-06 16:02:21 +0800492+ goto unmap;
493+ }
494
495- desc->buf0 = cpu_to_le32(addr);
496- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
497+ desc->buf0 = cpu_to_le32(phy_addr);
developer064da3c2023-06-13 15:57:26 +0800498+ token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr);
developerabdbf252023-02-06 16:02:21 +0800499 if (token < 0) {
500- mt76_put_page_pool_buf(buf, false);
501+ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
502+ wed->wlan.rx_size, DMA_TO_DEVICE);
503+ __free_pages(page, get_order(length));
developer064da3c2023-06-13 15:57:26 +0800504+ mt76_put_rxwi(&dev->mt76, r);
developerabdbf252023-02-06 16:02:21 +0800505 goto unmap;
506 }
507
developer064da3c2023-06-13 15:57:26 +0800508@@ -673,8 +688,6 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
developerabdbf252023-02-06 16:02:21 +0800509 return 0;
510
511 unmap:
512- if (t)
513- mt76_put_rxwi(&dev->mt76, t);
514 mt7915_mmio_wed_release_rx_buf(wed);
515 return -ENOMEM;
516 }
517diff --git a/mt7921/main.c b/mt7921/main.c
developerde9ecce2023-05-22 11:17:16 +0800518index 3b6adb29..47eb38e4 100644
developerabdbf252023-02-06 16:02:21 +0800519--- a/mt7921/main.c
520+++ b/mt7921/main.c
developerde9ecce2023-05-22 11:17:16 +0800521@@ -1083,34 +1083,17 @@ static void
developerabdbf252023-02-06 16:02:21 +0800522 mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
523 u32 sset, u8 *data)
524 {
525- struct mt7921_dev *dev = mt7921_hw_dev(hw);
526-
527 if (sset != ETH_SS_STATS)
528 return;
529
530 memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
531-
532- if (mt76_is_sdio(&dev->mt76))
533- return;
534-
535- data += sizeof(mt7921_gstrings_stats);
536- page_pool_ethtool_stats_get_strings(data);
537 }
538
539 static int
540 mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
541 int sset)
542 {
543- struct mt7921_dev *dev = mt7921_hw_dev(hw);
544-
545- if (sset != ETH_SS_STATS)
546- return 0;
547-
548- if (mt76_is_sdio(&dev->mt76))
549- return ARRAY_SIZE(mt7921_gstrings_stats);
550-
551- return ARRAY_SIZE(mt7921_gstrings_stats) +
552- page_pool_ethtool_stats_get_count();
553+ return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
554 }
555
556 static void
developerde9ecce2023-05-22 11:17:16 +0800557@@ -1130,7 +1113,6 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developerabdbf252023-02-06 16:02:21 +0800558 struct ethtool_stats *stats, u64 *data)
559 {
560 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
561- int stats_size = ARRAY_SIZE(mt7921_gstrings_stats);
562 struct mt7921_phy *phy = mt7921_hw_phy(hw);
563 struct mt7921_dev *dev = phy->dev;
564 struct mib_stats *mib = &phy->mib;
developerde9ecce2023-05-22 11:17:16 +0800565@@ -1186,14 +1168,9 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developerabdbf252023-02-06 16:02:21 +0800566 return;
567
568 ei += wi.worker_stat_count;
569-
570- if (!mt76_is_sdio(&dev->mt76)) {
571- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
572- stats_size += page_pool_ethtool_stats_get_count();
573- }
574-
575- if (ei != stats_size)
576- dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size);
577+ if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
578+ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
579+ ei, ARRAY_SIZE(mt7921_gstrings_stats));
580 }
581
582 static u64
583diff --git a/usb.c b/usb.c
developerde9ecce2023-05-22 11:17:16 +0800584index 5e5c7bf5..3e281715 100644
developerabdbf252023-02-06 16:02:21 +0800585--- a/usb.c
586+++ b/usb.c
587@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
588
589 static int
590 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
591- int nsgs)
592+ int nsgs, gfp_t gfp)
593 {
594 int i;
595
596 for (i = 0; i < nsgs; i++) {
597+ struct page *page;
598 void *data;
599 int offset;
600
601- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
602+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
603 if (!data)
604 break;
605
606- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
607- offset);
608+ page = virt_to_head_page(data);
609+ offset = data - page_address(page);
610+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
611 }
612
613 if (i < nsgs) {
614 int j;
615
616 for (j = nsgs; j < urb->num_sgs; j++)
617- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
618+ skb_free_frag(sg_virt(&urb->sg[j]));
619 urb->num_sgs = i;
620 }
621
622@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
623
624 static int
625 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
626- struct urb *urb, int nsgs)
627+ struct urb *urb, int nsgs, gfp_t gfp)
628 {
629 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
630- int offset;
631
632 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
633- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
634+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
635
636 urb->transfer_buffer_length = q->buf_size;
637- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
638+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
639
640 return urb->transfer_buffer ? 0 : -ENOMEM;
641 }
642@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
643 if (err)
644 return err;
645
646- return mt76u_refill_rx(dev, q, e->urb, sg_size);
647+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
648 }
649
650 static void mt76u_urb_free(struct urb *urb)
651@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
652 int i;
653
654 for (i = 0; i < urb->num_sgs; i++)
655- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
656+ skb_free_frag(sg_virt(&urb->sg[i]));
657
658 if (urb->transfer_buffer)
659- mt76_put_page_pool_buf(urb->transfer_buffer, false);
660+ skb_free_frag(urb->transfer_buffer);
661
662 usb_free_urb(urb);
663 }
664@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
665 len -= data_len;
666 nsgs++;
667 }
668-
669- skb_mark_for_recycle(skb);
670 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
671
672 return nsgs;
673@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
674
675 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
676 if (count > 0) {
677- err = mt76u_refill_rx(dev, q, urb, count);
678+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
679 if (err < 0)
680 break;
681 }
682@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
683 struct mt76_queue *q = &dev->q_rx[qid];
684 int i, err;
685
686- err = mt76_create_page_pool(dev, q);
687- if (err)
688- return err;
689-
690 spin_lock_init(&q->lock);
691 q->entry = devm_kcalloc(dev->dev,
692 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
693@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
694 static void
695 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
696 {
697+ struct page *page;
698 int i;
699
700 for (i = 0; i < q->ndesc; i++) {
developer483388c2023-03-08 13:52:15 +0800701@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
developerabdbf252023-02-06 16:02:21 +0800702 mt76u_urb_free(q->entry[i].urb);
703 q->entry[i].urb = NULL;
704 }
705- page_pool_destroy(q->page_pool);
developer483388c2023-03-08 13:52:15 +0800706- q->page_pool = NULL;
developerabdbf252023-02-06 16:02:21 +0800707+
708+ if (!q->rx_page.va)
709+ return;
710+
711+ page = virt_to_page(q->rx_page.va);
712+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
713+ memset(&q->rx_page, 0, sizeof(q->rx_page));
714 }
715
716 static void mt76u_free_rx(struct mt76_dev *dev)
717--
developerde9ecce2023-05-22 11:17:16 +08007182.39.2
developerabdbf252023-02-06 16:02:21 +0800719